{"data":{"id":"eb9a26d8-3b5b-4540-8ed9-839f58fe1d11","title":"CVE-2025-46570: vLLM is an inference and serving engine for large language models (LLMs). Prior to version 0.9.0, when a new prompt is p","summary":"vLLM, an inference and serving engine for large language models, had a vulnerability in versions before 0.9.0 where timing differences in the PageAttention mechanism (a feature that speeds up processing by reusing matching text chunks) were large enough that attackers could detect and exploit them. This type of attack is called a timing side-channel attack, where an attacker learns information by measuring how long operations take.","solution":"Update vLLM to version 0.9.0 or later. The issue has been patched in version 0.9.0.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-46570","publishedAt":"2025-05-29T21:15:21.327Z","cveId":"CVE-2025-46570","cweIds":["CWE-208","CWE-203"],"cvssScore":"2.6","cvssSeverity":"low","severity":"low","attackType":["data_extraction"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00058,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}