{"data":{"id":"af2c7ef6-b5f7-4979-8c68-c23a79a77249","title":"GPT-5.5 Bio Bug Bounty","summary":"OpenAI is running a bug bounty program called the Bio Bug Bounty for GPT-5.5, inviting security researchers to find universal jailbreaks (methods to bypass safety restrictions with a single prompt) that can defeat five biology safety questions. The program offers $25,000 for the first successful universal jailbreak and smaller awards for partial results, with applications open from April 23 to June 22, 2026 and testing running through July 27, 2026.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://openai.com/index/gpt-5-5-bio-bug-bounty","publishedAt":"2026-04-23T00:00:00.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["jailbreak"],"issueType":"news","affectedPackages":null,"affectedVendors":["OpenAI"],"affectedVendorsRaw":["OpenAI","GPT-5.5","ChatGPT"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-04-23T00:00:00.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["safety"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":null,"atlasIds":null}}