{"data":{"id":"3bc69e54-0321-4890-8523-87b2677669fb","title":"PPOM-Attack: A Substitute Model-Free Perturbation Prediction and Optimization Method for Black-Box Adversarial Attack Against Face Recognition","summary":"Researchers developed PPOM-Attack, a method to fool face recognition (FR) systems by generating adversarial images (slightly altered photos that trick AI into misidentifying someone). Unlike earlier attacks that used substitute models (simpler AI systems trained to mimic the target system), PPOM-Attack directly queries the real face recognition system to learn how to create effective perturbations (tiny pixel changes), achieving 21.7% higher success rates while keeping the altered images looking natural.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"http://ieeexplore.ieee.org/document/11406187","publishedAt":"2026-02-23T13:19:07.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["model_evasion"],"issueType":"research","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-02-23T13:19:07.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"advanced","impactType":["integrity","safety"],"aiComponentTargeted":"model","llmSpecific":false,"classifierConfidence":0.85,"researchCategory":"peer_reviewed","atlasIds":null}}