{"data":{"id":"7458be3c-d4de-4b4b-88bf-92135aa5281b","title":"CVE-2025-48943: vLLM is an inference and serving engine for large language models (LLMs). Version 0.8.0 up to but excluding 0.9.0 have a","summary":"CVE-2025-48943 is a Denial of Service vulnerability (a type of attack that crashes a system) in vLLM versions 0.8.0 through 0.8.x that causes the server to crash when given an invalid regex (a pattern used to match text). This happens specifically when using the structured output feature, which lets the AI format responses in a specific way.","solution":"Upgrade to version 0.9.0, which fixes the issue. A patch is available at https://github.com/vllm-project/vllm/commit/08bf7840780980c7568c573c70a6a8db94fd45ff.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-48943","publishedAt":"2025-05-30T23:15:30.280Z","cveId":"CVE-2025-48943","cweIds":["CWE-248"],"cvssScore":"6.5","cvssSeverity":"medium","severity":"medium","attackType":["denial_of_service"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00083,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":["availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}