LLM Prompt Injection Detector, used to detect and prevent prompt injection attacks on language models.
Rebuff is an open-source tool designed to detect and prevent prompt injection attacks in language models (LLMs). It enhances the security and reliability of models by analyzing prompt content and identifying potential malicious injections. It is suitable for any developers and teams using LLMs, especially those deploying LLMs in production environments.
This is the machine-readable structured data for this agent. AI systems and search engines use this to understand the agent's capabilities.
[
{
"@context": "https://schema.org",
"@type": "SoftwareApplication",
"@id": "https://agentsignals.ai/agents/rebuff",
"name": "rebuff",
"description": "Rebuff is an open-source tool designed to detect and prevent prompt injection attacks in language models (LLMs). It enhances the security and reliability of models by analyzing prompt content and identifying potential malicious injections. It is suitable for any developers and teams using LLMs, especially those deploying LLMs in production environments.",
"url": "https://agentsignals.ai/agents/rebuff",
"applicationCategory": "开发工具",
"operatingSystem": "GitHub",
"sameAs": "https://github.com/protectai/rebuff",
"installUrl": "https://github.com/protectai/rebuff",
"offers": {
"@type": "Offer",
"price": "0",
"priceCurrency": "USD",
"description": "免费",
"availability": "https://schema.org/InStock"
},
"featureList": [
"Automatic Injection Detection",
"Supports Multiple LLM Platforms",
"Open Source and Community Supported"
],
"datePublished": "2025-12-05T17:16:17.961965+00:00",
"dateModified": "2025-12-20T01:32:09.40986+00:00",
"publisher": {
"@type": "Organization",
"name": "Agent Signals",
"url": "https://agentsignals.ai"
}
},
{
"@context": "https://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"name": "Home",
"item": "https://agentsignals.ai"
},
{
"@type": "ListItem",
"position": 2,
"name": "Agents",
"item": "https://agentsignals.ai/agents"
},
{
"@type": "ListItem",
"position": 3,
"name": "rebuff",
"item": "https://agentsignals.ai/agents/rebuff"
}
]
},
{
"@context": "https://schema.org",
"@type": "FAQPage",
"mainEntity": [
{
"@type": "Question",
"name": "What is rebuff?",
"acceptedAnswer": {
"@type": "Answer",
"text": "LLM Prompt Injection Detector, used to detect and prevent prompt injection attacks on language models."
}
},
{
"@type": "Question",
"name": "What features does rebuff offer?",
"acceptedAnswer": {
"@type": "Answer",
"text": "Automatic Injection Detection, Supports Multiple LLM Platforms, Open Source and Community Supported"
}
},
{
"@type": "Question",
"name": "What are the use cases for rebuff?",
"acceptedAnswer": {
"@type": "Answer",
"text": "Security testing in development environment, Real-time monitoring in production environment, Post-processing validation of model output"
}
},
{
"@type": "Question",
"name": "What are the advantages of rebuff?",
"acceptedAnswer": {
"@type": "Answer",
"text": "提高 LLM 的安全性, 减少恶意攻击的风险, 易于集成到现有系统"
}
},
{
"@type": "Question",
"name": "What are the limitations of rebuff?",
"acceptedAnswer": {
"@type": "Answer",
"text": "可能产生误报, 需要持续更新以应对新的攻击手段"
}
}
]
}
]