{"data":{"id":"3382b9a9-8903-4c6b-b336-2d738c1735d4","title":"CVE-2023-29374: In LangChain through 0.0.131, the LLMMathChain chain allows prompt injection attacks that can execute arbitrary code via","summary":"CVE-2023-29374 is a vulnerability in LangChain versions up to 0.0.131 where the LLMMathChain component is vulnerable to prompt injection attacks (tricking an AI by hiding instructions in its input), allowing attackers to execute arbitrary code through Python's exec method. This is a code execution vulnerability that could allow an attacker to run malicious commands on a system running the affected software.","solution":"A patch is available at https://github.com/hwchase17/langchain/pull/1119","labels":["security"],"sourceUrl":"https://nvd.nist.gov/vuln/detail/CVE-2023-29374","publishedAt":"2023-04-05T06:15:37.340Z","cveId":"CVE-2023-29374","cweIds":["CWE-74","CWE-74"],"cvssScore":"9.8","cvssSeverity":"critical","severity":"critical","attackType":["prompt_injection"],"issueType":"vulnerability","affectedPackages":null,"affectedVendors":["LangChain"],"affectedVendorsRaw":["LangChain"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":"unknown","epssScore":0.04452,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","confidentiality"],"aiComponentTargeted":"agent","llmSpecific":true,"classifierConfidence":0.95,"researchCategory":null,"atlasIds":null}}