{"data":{"id":"9e88b078-a9ee-4d13-8b43-a3819f292971","title":"Risk-Aware Privacy Preservation for LLM Inference","summary":"When users send prompts to LLM services like ChatGPT, sensitive personal information (such as names, addresses, or ID numbers) can leak out, even when basic privacy protections are used. This paper presents Rap-LI, a framework that identifies which parts of a user's input contain sensitive data and applies stronger privacy protection to those specific parts, rather than treating all data equally.","solution":"N/A -- no mitigation discussed in source.","labels":["security","privacy"],"sourceUrl":"http://ieeexplore.ieee.org/document/11409403","publishedAt":"2026-02-24T13:17:14.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["pii_leakage","data_extraction"],"issueType":"research","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["ChatGPT","LLM inference services"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-02-24T13:17:14.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":"peer_reviewed","atlasIds":null}}