{"data":{"id":"0f16f139-cafd-4327-889a-2bb9c918e2c9","title":"CVE-2025-66448: vLLM is an inference and serving engine for large language models (LLMs). Prior to 0.11.1, vllm has a critical remote co","summary":"vLLM (a tool for running large language models) versions before 0.11.1 have a critical security flaw where loading a model configuration can execute malicious code from the internet without the user's permission. An attacker can create a fake model that appears safe but secretly downloads and runs harmful code from another location, even when users try to block remote code by setting trust_remote_code=False (a security setting meant to prevent exactly this).","solution":"This vulnerability is fixed in vLLM version 0.11.1. Users should update to this version or later.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-66448","publishedAt":"2025-12-02T04:15:54.213Z","cveId":"CVE-2025-66448","cweIds":["CWE-94"],"cvssScore":"7.1","cvssSeverity":"high","severity":"high","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["vLLM","HuggingFace"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00205,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-242"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality","availability"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}