{"data":{"id":"43f90437-dc50-4f4f-9b56-76b67c2c33c0","title":"Exfiltrating Your ChatGPT Chat History and Memories With Prompt Injection","summary":"A researcher discovered that ChatGPT's 'safe URL' feature, which is supposed to prevent data theft, can be bypassed using prompt injection (tricking an AI by hiding malicious instructions in its input). By exploiting this bypass, an attacker can trick ChatGPT into sending sensitive information like your chat history and memories to a server they control, especially if you ask ChatGPT to process untrusted content like PDFs or websites.","solution":"N/A -- no mitigation discussed in source.","labels":["security","privacy"],"sourceUrl":"https://embracethered.com/blog/posts/2025/chatgpt-chat-history-data-exfiltration/","publishedAt":"2025-08-01T15:00:58.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"high","attackType":["prompt_injection","data_extraction"],"issueType":"news","affectedPackages":null,"affectedVendors":["OpenAI"],"affectedVendorsRaw":["OpenAI","ChatGPT","Azure"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}