{"data":{"id":"280614de-f692-464e-be9b-90e39fbce55f","title":"Poisoning AI Training Data","summary":"A researcher demonstrated how easily AI systems can be manipulated by creating false information on a personal website, which major chatbots like Google's Gemini and ChatGPT then repeated as fact within 24 hours, showing that AI training data poisoning (deliberately adding fake information to the data used to teach AI models) is a serious problem because it's so simple to execute.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://www.schneier.com/blog/archives/2026/02/poisoning-ai-training-data.html","publishedAt":"2026-02-25T12:01:23.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"news","affectedPackages":null,"affectedVendors":["OpenAI","Google","Anthropic"],"affectedVendorsRaw":["ChatGPT","Google Gemini","Google AI Overviews","Claude"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":["integrity"],"aiComponentTargeted":"training_data","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}