{"data":{"id":"4a02655f-32f1-4950-8d1a-a7a52ca5be72","title":"Machine Learning Attack Series: Backdooring Keras Models and How to Detect It","summary":"This post examines how attackers can insert hidden malicious code into machine learning models (a technique called backdooring) through supply chain attacks, specifically targeting Keras models (a popular framework for building AI systems). The authors demonstrate this attack and then explore tools that can detect when a model has been compromised in this way.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2024/machine-learning-attack-series-keras-backdoor-model/","publishedAt":"2024-05-18T23:00:00.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["model_poisoning","supply_chain"],"issueType":"news","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["Keras","HuggingFace","Python Pickle"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"model","llmSpecific":false,"classifierConfidence":0.75,"researchCategory":null,"atlasIds":null}}