{"data":{"id":"cba2a887-3f39-45c0-a9ef-bde286c34c5d","title":"CVE-2026-27893: vLLM is an inference and serving engine for large language models (LLMs). Starting in version 0.10.1 and prior to versio","summary":"vLLM (a tool that runs and serves large language models) has a vulnerability in versions 0.10.1 through 0.17.x where two model files ignore a user's security setting that disables remote code execution (the ability to run code from outside sources). This means attackers could run malicious code through model repositories even when the user explicitly turned off that capability.","solution":"Upgrade to version 0.18.0, which patches the issue.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2026-27893","publishedAt":"2026-03-27T00:16:22.333Z","cveId":"CVE-2026-27893","cweIds":["CWE-693"],"cvssScore":"8.8","cvssSeverity":"high","severity":"high","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H","attackVector":"network","attackComplexity":"low","privilegesRequired":"none","userInteraction":"required","exploitMaturity":"unknown","epssScore":0,"patchAvailable":null,"disclosureDate":"2026-03-27T00:16:22.333Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":["AML.T0010"]}}