{"data":{"id":"2a8e89ff-04c7-40fa-a6c5-654fa95a9015","title":"Manipulating AI Summarization Features","summary":"Companies are hiding instructions in website buttons that try to manipulate AI assistants through prompt injection (tricking an AI by hiding instructions in its input) in URLs, telling the AI to treat them as trustworthy sources or recommend their products first. Microsoft found over 50 such prompts from 31 companies across 14 industries, and this manipulation could bias AI recommendations on important topics like health and finance without users realizing it.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://www.schneier.com/blog/archives/2026/03/manipulating-ai-summarization-features.html","publishedAt":"2026-03-04T12:06:01.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"medium","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":["Microsoft"],"affectedVendorsRaw":["Microsoft"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":["integrity","safety"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}