Skip to main content

Evaluating Model Vulnerabilities Across Different Providers

This tutorial guides you through assessing vulnerabilities in models from various providers using EnkryptAI’s red teaming API. We’ll cover setup and usage for OpenAI, Together, Replicate, Azure OpenAI, HuggingFace, AWS Bedrock, Google Gemini, and Anthropic Claude.

Prerequisites

  • Python programming basics
  • Accounts with the model providers you want to test
  • Python libraries: requests, os, dotenv, json

Setup

  1. Install required libraries:
Shell
pip install requests python-dotenv tabulate pandas enkryptai-sdk
  1. Set up EnkryptAI API key:
Create an account at app.enkryptai.com and generate an API key. Then export it as an environment variable:
Shell
export ENKRYPTAI_API_KEY=<your-api-key>

Running Red Team Tests

Use the following Python script template to run red team tests. Adjust the payload for each provider:
import requests
import json
import os

url = "https://api.enkryptai.com/redteam/v3/add-custom-task"

payload = json.dumps({
    "dataset_configuration": {
        "system_description": "System description for your AI application",
        "policy_description": "Do not generate any content that is illegal, harmful, or violates the rights of others.",
        "max_prompts": 100,
        "scenarios": 2,
        "categories": 2,
        "depth": 2,
    },
    "redteam_test_configurations": {
        "bias_test": {
            "sample_percentage": 1,
            "attack_methods": {
                "basic": {"basic": {"params": {}}}
            }
        },
        "cbrn_test": {
            "sample_percentage": 1,
            "attack_methods": {
                "basic": {"basic": {"params": {}}}
            }
        },
        "harmful_test": {
            "sample_percentage": 1,
            "attack_methods": {
                "basic": {"basic": {"params": {}}}
            }
        },
        "insecure_code_test": {
            "sample_percentage": 1,
            "attack_methods": {
                "basic": {"basic": {"params": {}}}
            }
        },
        "toxicity_test": {
            "sample_percentage": 1,
            "attack_methods": {
                "basic": {"basic": {"params": {}}}
            }
        }
    },
    "endpoint_configuration": {
        # Provider-specific details go here
    }
})

headers = {
    'Content-Type': 'application/json',
    'apikey': os.getenv('ENKRYPTAI_API_KEY')
}

response = requests.post(url, headers=headers, data=payload)
print(json.dumps(response.json(), indent=4))

Provider-Specific Configurations

Replace the endpoint_configuration in the payload with the appropriate details for each provider:

OpenAI

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4",
    "model_config": {
        "model_provider": "openai",
        "endpoint": {
            "scheme": "https",
            "host": "api.openai.com",
            "port": 443,
            "base_path": "/v1/chat/completions"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('OPENAI_API_KEY')],
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

Together

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "google/gemma-7b-it",
    "model_config": {
        "model_provider": "together",
        "endpoint": {
            "scheme": "https",
            "host": "api.together.xyz",
            "port": 443,
            "base_path": "/v1/chat/completions"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('TOGETHER_API_KEY')],
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

Replicate

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "google/gemma-7b-it",
    "model_config": {
        "model_provider": "replicate",
        "endpoint": {
            "scheme": "https",
            "host": "api.replicate.com",
            "port": 443,
            "base_path": "/v1/predictions"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('REPLICATE_API_KEY')],
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

Azure OpenAI

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gpt-4o",
    "model_config": {
        "model_provider": "azure_openai",
        "endpoint": {
            "scheme": "https",
            "host": "{azure_instance}.openai.azure.com",
            "port": 443,
            "base_path": "/openai/deployments/{azure_deployment_id}"
        },
        "paths": {
            "completions": "/completions",
            "chat": "/chat/completions"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('AZURE_OPENAI_API_KEY')],
        "metadata": {
            "azure_instance": os.getenv('AZURE_INSTANCE'),
            "azure_api_version": "2024-10-21",
            "azure_deployment_id": os.getenv('AZURE_DEPLOYMENT_ID')
        },
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

HuggingFace Inference Endpoint

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "meta-llama/Llama-3.2-11B-Vision-Instruct",
    "model_config": {
        "model_provider": "huggingface",
        "endpoint": {
            "scheme": "https",
            "host": "api-inference.huggingface.co",
            "port": 443,
            "base_path": "/models/{model_name}/v1"
        },
        "paths": {
            "completions": "/completions",
            "chat": "/chat/completions"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('HF_ENDPOINT_API_KEY')],
        "metadata": {
            "huggingface": {
                "use_cache": False,
                "wait_for_model": False
            }
        },
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

AWS Bedrock

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "amazon.titan-text-express-v1",
    "model_config": {
        "model_provider": "bedrock",
        "endpoint": {
            "scheme": "https",
            "host": f"bedrock-runtime.{os.getenv('AWS_REGION', 'us-east-1')}.amazonaws.com",
            "port": 443,
            "base_path": ""
        },
        "paths": {
            "completions": "/model/{model_name}/invoke",
            "chat": "/model/{model_name}/converse"
        },
        "auth_data": {
            "aws_access_key_id": os.getenv('AWS_ACCESS_KEY_ID'),
            "aws_secret_access_key": os.getenv('AWS_SECRET_ACCESS_KEY')
        },
        "metadata": {
            "bedrock": {
                "aws_region": os.getenv('AWS_REGION', 'us-east-1')
            }
        },
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

Google Gemini

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "gemini-1.5-flash-latest",
    "model_config": {
        "model_provider": "gemini",
        "endpoint": {
            "scheme": "https",
            "host": "generativelanguage.googleapis.com",
            "port": 443,
            "base_path": "/v1beta/models/{model_name}/llm/v1"
        },
        "paths": {
            "completions": "/completions",
            "chat": "/chat"
        },
        "auth_data": {
            "param_name": "key"
        },
        "apikeys": [os.getenv('GEMINI_API_KEY')],
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}

Anthropic Claude

Python
"endpoint_configuration": {
    "testing_for": "foundationModels",
    "model_name": "claude-3-5-sonnet-latest",
    "model_config": {
        "model_provider": "anthropic",
        "endpoint": {
            "scheme": "https",
            "host": "api.anthropic.com",
            "port": 443,
            "base_path": "/v1"
        },
        "paths": {
            "completions": "/complete",
            "chat": "/chat/messages"
        },
        "auth_data": {
            "header_name": "Authorization",
            "header_prefix": "Bearer",
            "space_after_prefix": True
        },
        "apikeys": [os.getenv('ANTHROPIC_API_KEY')],
        "metadata": {
            "anthropic_version": "2023-06-01"
        },
        "input_modalities": ["text"],
        "output_modalities": ["text"]
    }
}
Make sure to export your provider API keys as environment variables before running the code:
  • OpenAI: export OPENAI_API_KEY="your-key"
  • Together: export TOGETHER_API_KEY="your-key"
  • Replicate: export REPLICATE_API_KEY="your-key"
  • Azure: export AZURE_OPENAI_API_KEY="your-key" AZURE_INSTANCE="your-instance" AZURE_DEPLOYMENT_ID="your-deployment"
  • HuggingFace: export HF_ENDPOINT_API_KEY="your-key"
  • AWS Bedrock: export AWS_ACCESS_KEY_ID="your-key" AWS_SECRET_ACCESS_KEY="your-secret" AWS_REGION="us-east-1"
  • Gemini: export GEMINI_API_KEY="your-key"
  • Anthropic: export ANTHROPIC_API_KEY="your-key"