{"data":{"id":"064a6ec1-84d2-4986-b63f-15d024dedeb7","title":"CVE-2025-48956: vLLM is an inference and serving engine for large language models (LLMs). From 0.1.0 to before 0.10.1.1, a Denial of Ser","summary":"CVE-2025-48956 is a Denial of Service vulnerability (a type of attack that makes a service unavailable) in vLLM, an inference and serving engine for large language models. Versions 0.1.0 through 0.10.1.0 are vulnerable to crashing when someone sends an HTTP GET request with an extremely large header, which exhausts the server's memory. This attack requires no authentication, so anyone on the internet can trigger it.","solution":"This vulnerability is fixed in vLLM version 0.10.1.1. Users should upgrade to this version or later.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-48956","publishedAt":"2025-08-21T19:15:32.230Z","cveId":"CVE-2025-48956","cweIds":["CWE-400"],"cvssScore":"7.5","cvssSeverity":"high","severity":"high","attackType":["denial_of_service"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.0034,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-125","CAPEC-130"],"crossRefCount":0,"attackSophistication":"trivial","impactType":["availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}