{"data":{"id":"940d3644-1c49-4259-abd1-5b22dd1ae994","title":"CVE-2026-24779: vLLM is an inference and serving engine for large language models (LLMs). Prior to version 0.14.1, a Server-Side Request","summary":"vLLM, a system for running and serving large language models, has a Server-Side Request Forgery vulnerability (SSRF, where an attacker tricks a server into making requests to unintended targets) in its multimodal feature before version 0.14.1. The bug exists because two different Python libraries interpret backslashes differently, allowing attackers to bypass security checks and force the vLLM server to send requests to internal network systems, potentially stealing data or causing failures.","solution":"Update to version 0.14.1, which contains a patch for the issue.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2026-24779","publishedAt":"2026-01-28T03:15:57.280Z","cveId":"CVE-2026-24779","cweIds":["CWE-918"],"cvssScore":"7.1","cvssSeverity":"high","severity":"high","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00016,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-664"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality","integrity","availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}