{"data":{"id":"4de51c0e-264e-4894-9f20-ee84f3906bea","title":"Wrap Up: The Month of AI Bugs","summary":"This post wraps up a series of research articles documenting security vulnerabilities found in various AI tools and code assistants during a month-long investigation. The vulnerabilities included prompt injection (tricking an AI by hiding instructions in its input), data exfiltration (stealing sensitive information), and remote code execution (RCE, where attackers can run commands on systems they don't control) across tools like ChatGPT, Claude, GitHub Copilot, and others.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2025/wrapping-up-month-of-ai-bugs/","publishedAt":"2025-08-31T01:20:58.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection","data_extraction","model_theft"],"issueType":"news","affectedPackages":null,"affectedVendors":["OpenAI","Anthropic","Google","Microsoft","Amazon"],"affectedVendorsRaw":["ChatGPT","Claude","Cursor","Devin AI","OpenHands","GitHub Copilot","Google Jules","Amazon Q Developer","Windsurf","AWS Kiro","Cline"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality","integrity"],"aiComponentTargeted":"agent","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}