{"data":{"id":"8f4ae7e5-d40b-4b45-8b16-30260414d108","title":"CVE-2025-9905: The Keras Model.load_model method can be exploited to achieve arbitrary code execution, even with safe_mode=True.\n\nOne c","summary":"A vulnerability exists in Keras' Model.load_model method where specially crafted .h5 or .hdf5 model files (archive formats that store trained AI models) can execute arbitrary code on a system, even when safe_mode is enabled to prevent this. The attack works by embedding malicious pickled code (serialized Python code) in a Lambda layer, a Keras feature that allows custom Python functions, which bypasses the intended security protection.","solution":"N/A -- no mitigation discussed in source.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2025-9905","publishedAt":"2025-09-19T13:15:36.033Z","cveId":"CVE-2025-9905","cweIds":["CWE-913"],"cvssScore":"7.3","cvssSeverity":"high","severity":"high","attackType":["model_theft"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["Keras","TensorFlow"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00005,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"model","llmSpecific":false,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}