{"data":{"id":"77242ccc-2f8e-4d6c-82b5-a8dc1587e4d4","title":"FORCE: Byzantine-Resilient Decentralized Federated Learning via Game-Theoretic Contribution Aggregation","summary":"Decentralized Federated Learning (DFL, a way for multiple computers to train AI models together without a central server) is vulnerable to Byzantine attacks (when malicious participants send bad data to sabotage the learning process). The paper proposes FORCE, a new method that uses game theory concepts (mathematical models of strategy and fairness) to identify and exclude malicious clients by checking their model loss (how well their models perform) instead of checking gradients (the direction to improve), making DFL more resistant to these attacks.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"http://ieeexplore.ieee.org/document/11436077","publishedAt":"2026-03-17T13:28:37.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["model_poisoning"],"issueType":"research","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-03-17T13:28:37.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"advanced","impactType":["integrity"],"aiComponentTargeted":"training_data","llmSpecific":false,"classifierConfidence":0.85,"researchCategory":"peer_reviewed","atlasIds":null}}