{"data":{"id":"d929cf52-b77b-453b-8ae5-bad7a6b6a66d","title":"Don't blindly trust LLM responses. Threats to chatbots.","summary":"LLM outputs are untrusted and can be manipulated through prompt injection (tricking an AI by hiding instructions in its input), which affects large language models in particular ways. This post addresses how to handle the risks of untrusted output when using AI systems in real applications.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://embracethered.com/blog/posts/2023/ai-injections-threats-context-matters/","publishedAt":"2023-04-16T01:09:46.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","safety"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.75,"researchCategory":null,"atlasIds":null}}