{"data":{"id":"ae4dcbda-cac2-4acd-ad0d-a8284424f977","title":"Ekoparty Talk - Prompt Injections in the Wild","summary":"A security researcher presented at Ekoparty 2023 about prompt injections (attacks where malicious instructions are hidden in inputs to trick an AI into misbehaving) found in real-world LLM applications and chatbots like ChatGPT, Bing Chat, and Google Bard, demonstrating various exploits and discussing mitigations. The talk covered both basic LLM concepts and deep dives into how these attacks work across different AI platforms.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2023/ekoparty-prompt-injection-talk/","publishedAt":"2023-11-29T00:00:33.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":["OpenAI","Anthropic","Google","Microsoft","Amazon"],"affectedVendorsRaw":["Bing Chat","ChatGPT","Anthropic Claude","Azure AI","GCP Vertex AI","Google Bard"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}