{"data":{"id":"7aadc042-396b-423e-add7-44e166d2509e","title":"Using Microsoft Counterfit to create adversarial examples for Husky AI","summary":"This post describes Microsoft Counterfit, a tool for testing machine learning models against adversarial attacks (subtle modifications to input data designed to fool AI systems). The author demonstrates how to set up Counterfit, create a custom target for a husky image classifier, and use the tool's built-in attack modules to test the model's robustness.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2021/huskyai-using-azure-counterfit/","publishedAt":"2021-08-16T17:00:26.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["model_evasion"],"issueType":"news","affectedPackages":null,"affectedVendors":["HuggingFace"],"affectedVendorsRaw":["Microsoft Counterfit","HuggingFace","IBM Adversarial Robustness Toolbox","TextAttack","Keras","TensorFlow"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","safety"],"aiComponentTargeted":"model","llmSpecific":false,"classifierConfidence":0.75,"researchCategory":null,"atlasIds":null}}