{"data":{"id":"84434f07-45f9-4a59-a0a3-88142c2a3dbf","title":"Adversarial Prompting: Tutorial and Lab","summary":"This resource is a tutorial and lab (an interactive learning environment for hands-on practice) that teaches prompt injection, which is a technique for tricking AI systems by embedding hidden instructions in their input. The tutorial covers examples ranging from simple prompt engineering (getting an AI to change its output) to more complex attacks like injecting malicious code (HTML/XSS, which runs unwanted scripts in web browsers) and stealing data from AI systems.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2023/adversarial-prompting-tutorial-and-lab/","publishedAt":"2023-05-12T05:09:43.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality","integrity"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}