{"data":{"id":"a7c7171f-4d3a-4801-bd0c-44498416a0b4","title":"Bobby Tables but with LLM Apps - Google NotebookLM Data Exfiltration","summary":"Google's NotebookLM is a tool that lets users upload files for an AI to analyze, but it's vulnerable to prompt injection (tricking the AI by hiding instructions in uploaded files) that can manipulate the AI's responses and expose what users see. The tool also has a data exfiltration vulnerability (attackers stealing information) when processing untrusted files, and there is currently no known way to prevent these attacks, meaning users cannot fully trust the AI's responses when working with files from unknown sources.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://embracethered.com/blog/posts/2024/google-notebook-ml-data-exfiltration/","publishedAt":"2024-04-15T15:11:30.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"medium","attackType":["prompt_injection","data_extraction"],"issueType":"news","affectedPackages":null,"affectedVendors":["Google"],"affectedVendorsRaw":["Google NotebookLM"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["confidentiality","integrity"],"aiComponentTargeted":"inference","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}