{"data":{"id":"2387989b-b211-409d-9c0f-ce08fd7b3175","title":"CVE-2025-30202: vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. Versions starting from 0.5.2 and p","summary":"vLLM versions 0.5.2 through 0.8.4 have a security vulnerability in multi-node deployments where a ZeroMQ socket (a tool for sending messages between different computers) is left open to all network interfaces. An attacker with network access can connect to this socket to see internal vLLM data or deliberately slow down the system by connecting repeatedly without reading the data, causing a denial of service (making the system unavailable or very slow).","solution":"This issue has been patched in version 0.8.5. Update vLLM to version 0.8.5 or later.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-30202","publishedAt":"2025-04-30T05:15:51.800Z","cveId":"CVE-2025-30202","cweIds":["CWE-770"],"cvssScore":"7.5","cvssSeverity":"high","severity":"high","attackType":["denial_of_service","data_extraction"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00447,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-130"],"crossRefCount":0,"attackSophistication":"trivial","impactType":["confidentiality","availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}