{"data":{"id":"dd2cce16-c121-41e5-a25e-8c79792a99a8","title":"Prompt Injection Via Road Signs","summary":"Researchers discovered a new attack called CHAI (Command Hijacking against embodied AI) that tricks AI systems controlling robots and autonomous vehicles by embedding fake instructions in images, such as misleading road signs. The attack exploits Large Visual-Language Models (LVLMs, which are AI systems that understand both images and text together) to make these embodied AI systems (robots that perceive and interact with the physical world) ignore their real commands and follow the attacker's hidden instructions instead. The researchers tested CHAI on drones, self-driving cars, and real robots, showing it works better than previous attack methods.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://www.schneier.com/blog/archives/2026/02/prompt-injection-via-road-signs.html","publishedAt":"2026-02-11T12:03:22.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection","jailbreak"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["Large Visual-Language Models (LVLMs)"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"advanced","impactType":["integrity","safety"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}