{"data":{"id":"0b03bbd2-e169-4a8c-9d99-70ac0997b4de","title":"CVE-2025-24357: vLLM is a library for LLM inference and serving. vllm/model_executor/weight_utils.py implements hf_model_weights_iterato","summary":"vLLM is a library that loads AI models from HuggingFace using a function that calls torch.load, a PyTorch function for loading model data. The problem is that torch.load is set to accept untrusted data without verification, which means if someone provides a malicious model file, it could run harmful code on the system during the loading process (deserialization of untrusted data, where code runs while converting saved data back into usable form).","solution":"This vulnerability is fixed in v0.7.0. Users should upgrade to this version or later.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-24357","publishedAt":"2025-01-27T23:15:41.523Z","cveId":"CVE-2025-24357","cweIds":["CWE-502"],"cvssScore":"7.5","cvssSeverity":"high","severity":"high","attackType":["model_poisoning","supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["vLLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.0087,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-586"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"framework","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}