{"data":{"id":"43e3efbf-d297-4f9c-84f5-5406080752c8","title":"Anthropic says it ‘cannot in good conscience’ allow Pentagon to remove AI checks","summary":"Anthropic refused a Pentagon demand to remove safety precautions (safeguards built into AI systems to prevent harmful outputs) from its Claude AI model and allow unrestricted military use, despite threats to cancel a $200 million contract and damage the company's reputation. The Department of Defense demanded compliance by Friday or would label Anthropic a 'supply chain risk,' a designation that could harm the company financially.","solution":"N/A -- no mitigation discussed in source.","labels":["policy","safety"],"sourceUrl":"https://www.theguardian.com/us-news/2026/feb/26/anthropic-pentagon-claude","publishedAt":"2026-02-26T23:28:14.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"news","affectedPackages":null,"affectedVendors":["Anthropic"],"affectedVendorsRaw":["Anthropic","Claude","Pentagon","Department of Defense"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["safety"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}