{"data":{"id":"140be3d5-1cec-40f6-bedc-6c1f38ab6819","title":"CVE-2025-25183: vLLM is a high-throughput and memory-efficient inference and serving engine for LLMs. Maliciously constructed statements","summary":"vLLM, a system for running large language models efficiently, has a vulnerability where attackers can craft malicious input to cause hash collisions (when two different inputs produce the same fingerprint value), allowing them to reuse cached data (stored computation results) from previous requests and corrupt subsequent responses. Python 3.12 made hash values predictable, making this attack easier to execute intentionally.","solution":"This issue has been addressed in version 0.7.2 and all users are advised to upgrade. There are no known workarounds for this vulnerability.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-25183","publishedAt":"2025-02-08T01:15:34.083Z","cveId":"CVE-2025-25183","cweIds":["CWE-354"],"cvssScore":"2.6","cvssSeverity":"low","severity":"low","attackType":["rag_poisoning"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00064,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}