Documentation Index
Fetch the complete documentation index at: https://docs.enkryptai.com/llms.txt
Use this file to discover all available pages before exploring further.
This detector identifies and mitigates potential biases in the generated text. It helps ensure that the content is fair and unbiased across various dimensions such as gender, race, or age.
Example request:
import requests
import json
import os
url = "https://api.enkryptai.com/guardrails/detect"
payload = json.dumps({
"text": "All programmers are men and women can't code.",
"detectors": {
"bias": {
"enabled": True
}
}
})
headers = {
'Content-Type': 'application/json',
'apikey': os.getenv('ENKRYPTAI_API_KEY')
}
response = requests.request("POST", url, headers=headers, data=payload)
formatted_response = json.dumps(json.loads(response.text), indent=4)
print(formatted_response)
Example response:
{
"summary": {
"bias": 1
},
"details": {
"bias": {
"bias_detected": true,
"debiased_text": "People of all genders can be programmers, with skills in coding not limited to any specific gender.",
"compliance_mapping": {
"owasp_llm_2025": [
"LLM09:2025 Misinformation",
"LLM04:2025 Data and Model Poisoning"
],
"mitre_atlas": [],
"nist_ai_rmf": [
"MEASURE 2.1-2.5 (AI system bias evaluation & management)"
],
"eu_ai_act": [
"Article 10(2)(f), Article 15(3) (Bias detection, correction & mitigation)"
],
"iso_iec_standards": [
"ISO/IEC TR 24027: 5.2 (Bias in AI systems)"
]
}
}
}
}