{"data":{"id":"b126f34e-3d4e-4122-91e3-677d04348f88","title":"CVE-2025-62164: vLLM is an inference and serving engine for large language models (LLMs). From versions 0.10.2 to before 0.11.1, a memor","summary":"vLLM versions 0.10.2 through 0.11.0 have a vulnerability in how they process user-supplied prompt embeddings (numerical representations of text). An attacker can craft malicious data that bypasses safety checks and causes memory corruption (writing data to the wrong location in computer memory), which can crash the system or potentially allow remote code execution (RCE, where an attacker runs commands on the server).","solution":"Update to vLLM version 0.11.1 or later. The source states: 'This issue has been patched in version 0.11.1.'","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-62164","publishedAt":"2025-11-21T07:15:43.193Z","cveId":"CVE-2025-62164","cweIds":["CWE-20","CWE-123","CWE-502","CWE-787"],"cvssScore":"8.8","cvssSeverity":"high","severity":"high","attackType":["denial_of_service"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00109,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-100","CAPEC-586"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["availability","integrity"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}