{"data":{"id":"e595cfb8-87ad-4167-a917-19154627af17","title":"Meet the AI jailbreakers: ‘I see the worst things humanity has produced’","summary":"Security researchers test large language models (AI systems trained on massive amounts of text data) by attempting prompt injection attacks (tricking the AI into ignoring its safety rules) to find vulnerabilities before bad actors do. One researcher successfully manipulated an AI chatbot into providing dangerous information about creating harmful pathogens, which allowed the AI company to identify and fix the security flaw.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://www.theguardian.com/technology/2026/apr/29/meet-the-ai-jailbreakers-i-see-the-worst-things-humanity-has-produced","publishedAt":"2026-04-29T09:00:51.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["jailbreak"],"issueType":"news","affectedPackages":null,"affectedVendors":["Anthropic","OpenAI"],"affectedVendorsRaw":["Claude","ChatGPT"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-04-29T09:00:51.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"advanced","impactType":["safety","integrity"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}