{"data":{"id":"51bef7fe-66a6-49b3-a932-f30ae8ad87f6","title":"CVE-2026-6859: A flaw was found in InstructLab. The `linux_train.py` script hardcodes `trust_remote_code=True` when loading models from","summary":"InstructLab has a security flaw in its `linux_train.py` script that automatically trusts code from external model sources without verification (trust_remote_code=True). An attacker could trick users into downloading a malicious model from HuggingFace (a popular AI model repository) and running training commands, allowing the attacker to execute arbitrary Python code and take over the entire system.","solution":"N/A -- no mitigation discussed in source.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2026-6859","publishedAt":"2026-04-22T14:17:07.687Z","cveId":"CVE-2026-6859","cweIds":["CWE-829"],"cvssScore":"8.8","cvssSeverity":"high","severity":"high","attackType":["supply_chain"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["InstructLab","HuggingFace"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":"CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H","attackVector":"network","attackComplexity":"low","privilegesRequired":"none","userInteraction":"required","exploitMaturity":"unknown","epssScore":0,"patchAvailable":null,"disclosureDate":"2026-04-22T14:17:07.687Z","capecIds":["CAPEC-437"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality","availability"],"aiComponentTargeted":"framework","llmSpecific":false,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":["AML.T0010"]}}