{"data":{"id":"7f43d791-58d4-4732-8ed8-36ff248e5126","title":"CVE-2025-9906: The Keras Model.load_model method can be exploited to achieve arbitrary code execution, even with safe_mode=True.\n\nOne c","summary":"A vulnerability in Keras (a machine learning library) allows attackers to run arbitrary code on a system by creating a malicious .keras model file that tricks the load_model function into disabling its safety protections, even when safe_mode is enabled. The attacker does this by embedding a command in the model's configuration file that turns off safe mode, then hiding executable code in a Lambda layer (a Keras feature that can contain custom Python code), allowing the malicious code to run when the model is loaded.","solution":"N/A -- no mitigation discussed in source.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-9906","publishedAt":"2025-09-19T13:15:36.353Z","cveId":"CVE-2025-9906","cweIds":["CWE-502"],"cvssScore":"7.3","cvssSeverity":"high","severity":"high","attackType":["model_theft"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["Keras","TensorFlow"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00076,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-586"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"model","llmSpecific":false,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}