{"data":{"id":"d5f52c1e-146c-4847-be30-b3256cc7306a","title":"What does the US military’s feud with Anthropic mean for AI used in war?","summary":"Anthropic, an AI company, is in a dispute with the US military over safety restrictions on its Claude AI model. Anthropic refuses to allow the government to use Claude for domestic mass surveillance (monitoring citizens' communications without proper oversight) or autonomous weapons systems (weapons that can select and attack targets without human control), while the Pentagon has declared Anthropic a supply chain risk (a company whose products pose a national security threat) for not agreeing to the government's demands, and Anthropic plans to challenge this designation in court.","solution":"N/A -- no mitigation discussed in source.","labels":["policy","safety"],"sourceUrl":"https://www.theguardian.com/technology/2026/mar/07/anthropic-claude-ai-pentagon-us-military","publishedAt":"2026-03-07T14:00:53.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"news","affectedPackages":null,"affectedVendors":["Anthropic"],"affectedVendorsRaw":["Anthropic","Claude","US Department of Defense","Pentagon"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["safety"],"aiComponentTargeted":null,"llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}