{"data":{"id":"b11ca9f9-9a66-4061-b337-1e8fac5927e2","title":"CVE-2024-10950: In binary-husky/gpt_academic version <= 3.83, the plugin `CodeInterpreter` is vulnerable to code injection caused by pro","summary":"In gpt_academic version 3.83 and earlier, the CodeInterpreter plugin has a vulnerability where prompt injection (tricking an AI by hiding instructions in its input) allows attackers to inject malicious code. Because the application executes LLM-generated code without a sandbox (a restricted environment that isolates code from the main system), attackers can achieve RCE (remote code execution, where an attacker can run commands on a system they don't own) and potentially take over the backend server.","solution":"N/A -- no mitigation discussed in source.","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2024-10950","publishedAt":"2025-03-20T10:15:22.110Z","cveId":"CVE-2024-10950","cweIds":["CWE-94"],"cvssScore":null,"cvssSeverity":null,"severity":"high","attackType":["prompt_injection"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["GPT Academic","binary-husky/gpt_academic"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.01252,"patchAvailable":null,"disclosureDate":null,"capecIds":["CAPEC-242"],"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality","availability"],"aiComponentTargeted":"plugin","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}