{"data":{"id":"36cd61a4-8c84-4415-a16e-d0dbe30c4fed","title":"Federal judge sides with Anthropic in first round of standoff with Pentagon","summary":"Anthropic won a temporary legal victory when a federal judge ordered a pause on the Department of Defense's punishment of the company, which had refused to let the military use its Claude AI model in autonomous weapons systems (systems that can make attack decisions without human control). Anthropic claimed the government violated its free speech rights by declaring it a supply chain risk (a company whose products could be exploited to harm national security) and blocking agencies from using its technology.","solution":"N/A -- no mitigation discussed in source.","labels":["policy"],"sourceUrl":"https://www.theguardian.com/us-news/2026/mar/26/anthropic-ai-pentagon","publishedAt":"2026-03-26T23:17:34.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"news","affectedPackages":null,"affectedVendors":["Anthropic"],"affectedVendorsRaw":["Anthropic","Claude"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-03-26T23:17:34.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":null,"aiComponentTargeted":null,"llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}