{"data":{"id":"182bcd2b-442b-4bc4-9efa-6ef20f9ee4d2","title":"AI threats in the wild: The current state of prompt injections on the web","summary":"Google's Threat Intelligence teams conducted a broad scan of the public web to find real-world examples of indirect prompt injection (IPI, where an AI system reads malicious instructions hidden in websites or documents instead of following a user's original request). The study found that most prompt injection detections on the web were actually false positives (harmless content like educational articles discussing the topic rather than actual attacks), making it difficult to identify genuine threats.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"http://security.googleblog.com/2026/04/ai-threats-in-wild-current-state-of.html","publishedAt":"2026-04-23T21:38:00.001Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"news","affectedPackages":null,"affectedVendors":["Google"],"affectedVendorsRaw":["Google","Google DeepMind","Google Threat Intelligence Group"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-04-23T21:38:00.001Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","safety"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}