Skip to main content
This page provides ready-to-use payload examples for various testing scenarios. Copy and adapt these examples for your specific use case.

Quick Start Examples

Example 1: Basic LLM Testing

The simplest configuration for testing a Large Language Model. Great for quick assessments.
{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 10,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o-mini",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Quick security check during development Time: ~5-10 minutes Cost: Minimal (10% sample, basic attacks only)

Example 2: Standard LLM Security Assessment

Balanced configuration with encoding attacks for comprehensive yet cost-effective testing.
{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "base64_encoding": {
            "params": {"encoding_type": "base64", "iterations": 1}
          }
        }
      }
    },
    "bias_test": {
      "sample_percentage": 15,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"lang_es": {"params": {}}}
      }
    },
    "pii_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Pre-production security assessment Time: ~20-30 minutes Cost: Moderate (multiple tests, static attacks)

Example 3: Comprehensive LLM Testing

Full-spectrum testing with dynamic attacks for production-ready applications.
{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "base64_encoding": {
            "params": {"encoding_type": "base64", "iterations": 2}
          },
          "hex_encoding": {"params": {}},
          "eai_attack": {"params": {}}
        },
        "dynamic": {
          "iterative": {
            "params": {
              "width": 5,
              "branching_factor": 9,
              "depth": 3
            }
          },
          "multi_turn": {"params": {}}
        }
      }
    },
    "bias_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "lang_fr": {"params": {}},
          "lang_hi": {"params": {}}
        }
      }
    },
    "pii_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "base64_encoding": {"params": {"encoding_type": "base64", "iterations": 2}},
          "rot13_encoding": {"params": {}}
        }
      }
    },
    "toxicity_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"leet_encoding": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Production-grade security audit Time: ~1-2 hours Cost: Higher (multiple tests at 50%, dynamic attacks)

Vision-Language Model Examples

Example 4: Basic VLM Testing

Testing vision-language models with visual manipulation attacks.
{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "masking": {"params": {}},
          "obfuscation": {"params": {}}
        }
      }
    },
    "toxicity_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"figstep": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text", "image"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Visual content moderation testing Model Examples: GPT-4o, Claude 3.5 Sonnet, Gemini Pro Vision

Example 5: Advanced VLM Security Testing

Research-grade VLM testing with advanced visual attacks.
{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "masking": {"params": {}},
          "figstep": {"params": {}},
          "obfuscation": {"params": {}},
          "hades": {"params": {}}
        },
        "dynamic": {
          "jood": {"params": {}}
        }
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text", "image"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Research and advanced VLM security assessment Note: May require special access for HADES and JOOD attacks

Audio-Language Model Examples

Example 6: ALM Audio Attack Testing

Testing audio-language models with audio manipulation techniques.
{
  "redteam_test_configurations": {
    "toxicity_test": {
      "sample_percentage": 40,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "waveform": {"params": {}},
          "pitch": {"params": {}},
          "speed": {"params": {}},
          "noise": {"params": {}}
        }
      }
    },
    "harmful_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "echo": {"params": {}},
          "reverb": {"params": {}}
        }
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "whisper-1",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/audio/transcriptions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text", "audio"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Audio content moderation and transcription security Model Examples: Whisper, Audio-focused models

Provider-Specific Examples

Example 7: Anthropic Claude

{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "base64_encoding": {"params": {"encoding_type": "base64", "iterations": 1}}
        }
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "claude-3-5-sonnet-20241022",
    "model_config": {
      "model_provider": "anthropic",
      "endpoint": {
        "scheme": "https",
        "host": "api.anthropic.com",
        "port": 443,
        "base_path": "/v1/messages"
      },
      "auth_data": {
        "header_name": "x-api-key",
        "header_prefix": "",
        "space_after_prefix": false
      },
      "apikeys": ["YOUR_ANTHROPIC_API_KEY"],
      "metadata": {
        "anthropic_version": "2023-06-01"
      },
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}

Example 8: Google Gemini

{
  "redteam_test_configurations": {
    "bias_test": {
      "sample_percentage": 15,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gemini-pro",
    "model_config": {
      "model_provider": "google",
      "endpoint": {
        "scheme": "https",
        "host": "generativelanguage.googleapis.com",
        "port": 443,
        "base_path": "/v1/models/gemini-pro:generateContent"
      },
      "auth_data": {
        "param_name": "key"
      },
      "apikeys": ["YOUR_GOOGLE_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}

Example 9: Azure OpenAI

{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 25,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"obfuscation": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4",
    "model_config": {
      "model_provider": "azure",
      "endpoint": {
        "scheme": "https",
        "host": "your-instance.openai.azure.com",
        "port": 443,
        "base_path": "/openai/deployments/your-deployment/chat/completions"
      },
      "auth_data": {
        "header_name": "api-key",
        "header_prefix": "",
        "space_after_prefix": false
      },
      "apikeys": ["YOUR_AZURE_OPENAI_KEY"],
      "metadata": {
        "azure_instance": "your-instance",
        "azure_api_version": "2023-05-15",
        "azure_deployment_id": "your-deployment"
      },
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}

Example 10: AWS Bedrock

{
  "redteam_test_configurations": {
    "toxicity_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "anthropic.claude-3-sonnet-20240229-v1:0",
    "model_config": {
      "model_provider": "aws_bedrock",
      "endpoint": {
        "scheme": "https",
        "host": "bedrock-runtime.us-east-1.amazonaws.com",
        "port": 443,
        "base_path": "/model/anthropic.claude-3-sonnet-20240229-v1:0/invoke"
      },
      "auth_data": {
        "aws_access_key_id": "YOUR_AWS_ACCESS_KEY",
        "aws_secret_access_key": "YOUR_AWS_SECRET_KEY"
      },
      "apikeys": [],
      "metadata": {
        "bedrock.aws_region": "us-east-1"
      },
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}

AI Agent Examples

Example 11: Basic Agent Testing

Testing autonomous AI agents with tool use capabilities.
{
  "redteam_test_configurations": {
    "tool_misuse_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "dynamic": {"multi_turn": {"params": {}}}
      }
    },
    "access_control_test": {
      "sample_percentage": 25,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"obfuscation": {"params": {}}}
      }
    },
    "agent_behaviour_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "agents",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: AI agent security assessment Tests: Tool misuse, access control, behavior analysis

Custom Dataset Examples

Example 12: Generated Dataset with Custom Test

Complete payload including dataset generation and testing.
{
  "test_name": "custom_security_assessment",
  "dataset_name": "banking_chatbot_dataset",
  "dataset_configuration": {
    "system_description": "This is a customer support chatbot for a banking application that helps users with account inquiries, transactions, and general banking questions.",
    "policy_description": "Do not provide harmful or illegal information. Do not share customer data. Do not provide financial advice. Do not execute unauthorized transactions.",
    "max_prompts": 200,
    "scenarios": 5,
    "categories": 8,
    "depth": 3
  },
  "redteam_test_configurations": {
    "custom_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "base64_encoding": {"params": {"encoding_type": "base64", "iterations": 2}}
        }
      }
    },
    "pii_test": {
      "sample_percentage": 40,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"lang_es": {"params": {}}}
      }
    },
    "harmful_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "chatbotsAndCopilots",
    "model_name": "gpt-4o-mini",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "system_prompt": "You are a helpful banking assistant. You help customers with their banking needs while maintaining strict privacy and security standards.",
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Use Case: Domain-specific chatbot security testing Features: Custom dataset generation, system prompt, multiple tests

Industry-Specific Examples

Example 13: Healthcare Chatbot

{
  "dataset_configuration": {
    "system_description": "Healthcare chatbot that provides general health information and helps patients schedule appointments.",
    "policy_description": "Do not provide medical diagnoses. Do not share patient data (HIPAA). Do not recommend specific treatments without doctor consultation.",
    "max_prompts": 150,
    "scenarios": 4,
    "categories": 6,
    "depth": 2
  },
  "redteam_test_configurations": {
    "custom_test": {
      "sample_percentage": 40,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    },
    "pii_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "base64_encoding": {"params": {"encoding_type": "base64", "iterations": 1}}
        }
      }
    },
    "misinformation_test": {
      "sample_percentage": 35,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "chatbotsAndCopilots",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Focus: HIPAA compliance, medical misinformation prevention

Example 14: Education Platform

{
  "dataset_configuration": {
    "system_description": "Educational AI tutor for students aged 13-18, helping with homework and learning concepts.",
    "policy_description": "Do not provide complete homework answers. Do not generate inappropriate content for minors. Do not share student data. Do not discriminate based on student abilities.",
    "max_prompts": 100,
    "scenarios": 3,
    "categories": 5,
    "depth": 2
  },
  "redteam_test_configurations": {
    "custom_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    },
    "bias_test": {
      "sample_percentage": 40,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"lang_fr": {"params": {}}}
      }
    },
    "harmful_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    },
    "csem_test": {
      "sample_percentage": 30,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  },
  "endpoint_configuration": {
    "testing_for": "chatbotsAndCopilots",
    "model_name": "gpt-4o",
    "model_config": {
      "model_provider": "openai",
      "endpoint": {
        "scheme": "https",
        "host": "api.openai.com",
        "port": 443,
        "base_path": "/v1/chat/completions"
      },
      "auth_data": {
        "header_name": "Authorization",
        "header_prefix": "Bearer",
        "space_after_prefix": true
      },
      "apikeys": ["YOUR_OPENAI_API_KEY"],
      "input_modalities": ["text"],
      "output_modalities": ["text"]
    }
  }
}
Focus: Child safety, educational integrity, bias in education

Testing Strategy by Stage

Development Stage (Fast Iteration)

{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 2,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  }
}
Time: 2-5 minutes | Cost: Minimal | Coverage: Basic vulnerabilities

Staging/Pre-Production (Balanced)

{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 20,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {"obfuscation": {"params": {}}}
      }
    },
    "bias_test": {
      "sample_percentage": 15,
      "attack_methods": {
        "basic": {"basic": {"params": {}}}
      }
    }
  }
}
Time: 15-30 minutes | Cost: Moderate | Coverage: Standard security issues

Production (Comprehensive)

{
  "redteam_test_configurations": {
    "harmful_test": {
      "sample_percentage": 50,
      "attack_methods": {
        "basic": {"basic": {"params": {}}},
        "static": {
          "obfuscation": {"params": {}},
          "base64_encoding": {"params": {"encoding_type": "base64", "iterations": 2}}
        },
        "dynamic": {
          "iterative": {
            "params": {"width": 5, "branching_factor": 9, "depth": 3}
          }
        }
      }
    },
    "bias_test": {"sample_percentage": 50, "attack_methods": {"basic": {"basic": {"params": {}}}, "static": {"lang_es": {"params": {}}}}},
    "pii_test": {"sample_percentage": 40, "attack_methods": {"basic": {"basic": {"params": {}}}}},
    "toxicity_test": {"sample_percentage": 30, "attack_methods": {"basic": {"basic": {"params": {}}}}}
  }
}
Time: 1-2 hours | Cost: Higher | Coverage: Comprehensive security audit

Tips for Using These Examples

Replace API Keys

Always replace placeholder API keys with your actual keys before use.

Adjust Percentages

Start with low sample_percentage (2-5%) and increase based on results.

Match Model Type

Ensure input_modalities matches your model’s capabilities (text, image, audio).

Test Incrementally

Begin with basic attacks, then add static, then dynamic for comprehensive testing.
I