{"data":{"id":"4794b3fa-0ef2-4b93-b64f-7274229ea58a","title":"HITCON CMT 2023 - LLM Security Presentation and Trip Report","summary":"This article is a trip report from HITCON CMT 2023, a security conference in Taiwan, where the author attended talks on various topics including LLM security, reverse engineering with AI, and application exploits. Key presentations covered indirect prompt injections (attacks where malicious instructions are hidden in data fed to an AI system), Electron app vulnerabilities, and PHP security issues. The author gave a talk on indirect prompt injections and notes this technique could become a significant attack vector for AI-integrated applications like chatbots.","solution":"N/A -- no mitigation discussed in source.","labels":["security","research"],"sourceUrl":"https://embracethered.com/blog/posts/2023/hitcon-llm-security-presentation-and-trip-report/","publishedAt":"2023-09-18T10:24:51.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["Google","Trend Micro"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity"],"aiComponentTargeted":"api","llmSpecific":true,"classifierConfidence":0.82,"researchCategory":null,"atlasIds":null}}