{"data":{"id":"b7b48cba-baa7-4a96-9601-5736b07cdf52","title":"Video: Prompt Injections - An Introduction","summary":"Prompt injection (tricking an AI by hiding instructions in its input) is a widespread vulnerability in AI education, with indirect prompt injections being particularly dangerous because they allow untrusted data to secretly take control of an LLM (large language model) and change its goals and behavior. Since attack payloads use natural language, attackers can craft many creative variations to bypass input validation (checking that data meets safety rules) and web application firewalls (security systems that filter harmful requests).","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://embracethered.com/blog/posts/2023/prompt-injection-an-introduction-video/","publishedAt":"2023-05-10T14:00:40.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":["integrity","safety"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}