{"data":{"id":"5e0cf75c-d70c-47a0-a234-c72504785fc8","title":"CVE-2025-32444: vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. Versions starting from 0.6.5 and p","summary":"vLLM (a system for running AI models efficiently) versions 0.6.5 through 0.8.4 have a critical vulnerability when using mooncake integration. Attackers can execute arbitrary code remotely because the system uses pickle (an unsafe method for converting data into a format that can be transmitted) over unencrypted ZeroMQ sockets (communication channels) that listen to all network connections, making them easily accessible from the internet.","solution":"Update to vLLM version 0.8.5 or later, which has patched this vulnerability.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-32444","publishedAt":"2025-04-30T05:15:51.953Z","cveId":"CVE-2025-32444","cweIds":["CWE-502"],"cvssScore":"10","cvssSeverity":"critical","severity":"critical","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.02477,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-586"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality","integrity","availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}