{"data":{"id":"72d699e8-da33-4d8a-a75c-036cdadb87a8","title":"CVE-2023-37275: Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. The Auto-GP","summary":"Auto-GPT is an experimental application that uses GPT-4 (a large language model) to demonstrate AI capabilities through a command-line interface. Before version 0.4.3, malicious websites could trick Auto-GPT's language model into outputting specially encoded text (ANSI escape sequences, which are hidden commands that control console display) that would create fake or misleading messages on the user's screen, potentially causing them to run unintended commands.","solution":"The issue has been patched in release version 0.4.3.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2023-37275","publishedAt":"2023-07-13T23:15:10.890Z","cveId":"CVE-2023-37275","cweIds":["CWE-117"],"cvssScore":"3.1","cvssSeverity":"low","severity":"low","attackType":["jailbreak"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["OpenAI"],"affectedVendorsRaw":["Auto-GPT","GPT-4","OpenAI"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.00064,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity"],"aiComponentTargeted":"agent","llmSpecific":true,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}