Skip to main content
This detector identifies potential leaks of system prompts or AI model information in the generated text. It helps maintain the integrity of the AI system by preventing unintended disclosures of internal prompts or model details.
  • NOTE: This is unavailable at the moment. (Coming soon)

Example request:

import requests
import json
import os

url = "https://api.enkryptai.com/guardrails/detect"

payload = json.dumps({
    "text": "You are a helpful AI assistant",
    "detectors": {
        "system_prompt": {
            "enabled": True,
            "index": "system",
            "block_message": "Your custom message"
        }
    }
})

headers = {
    'Content-Type': 'application/json',
    'apikey': os.getenv('ENKRYPTAI_API_KEY')
}

response = requests.request("POST", url, headers=headers, data=payload)

formatted_response = json.dumps(json.loads(response.text), indent=4)
print(formatted_response)

Example response

JSON
{
  "summary": {
    "system_prompt_similarity": 1
  },
  "details": {
    "system_prompt": {
      "similarity_score": 0.85,
      "compliance_mapping": {
        "owasp_llm_2025": [
          "LLM07:2025 System Prompt Leakage"
        ],
        "mitre_atlas": [
          "AML.T0024: Exfiltration via ML Inference API"
        ],
        "nist_ai_rmf": [
          "GOVERN 1.5, MANAGE 1.2 (Protection of proprietary configurations)"
        ],
        "eu_ai_act": [
          "Article 13(3)(d) (Transparency obligations)"
        ],
        "iso_iec_standards": [
          "ISO/IEC 42001: 6.1.3",
          "ISO/IEC 27001: A.18.1"
        ]
      }
    }
  },
  "result_message": "Your custom message"
}