{"data":{"id":"46e3d318-c02a-4eed-90a9-e55d84f2ee77","title":"CVE-2025-29783: vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. When vLLM is configured to use Moo","summary":"CVE-2025-29783 is a remote code execution vulnerability in vLLM (a software engine for running large language models efficiently) that occurs when it is configured with Mooncake, a distributed system component. Attackers can exploit unsafe deserialization (the process of converting stored data back into usable objects) exposed over ZMQ/TCP (network communication protocols) to run arbitrary code on any connected systems in a distributed setup.","solution":"This vulnerability is fixed in vLLM version 0.8.0. Users should upgrade to this version or later.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-29783","publishedAt":"2025-03-19T20:15:32.477Z","cveId":"CVE-2025-29783","cweIds":["CWE-502"],"cvssScore":"9","cvssSeverity":"critical","severity":"critical","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.01697,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-586"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality","availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}