{"data":{"id":"777bfe88-cb8c-42f3-86c9-d224a2f9804f","title":"GHSA-fvfv-ppw4-7h2w: n8n has a Guardrail Node Bypass","summary":"A security flaw in n8n's Guardrail node (a component that enforces safety rules on AI outputs) allows users to craft inputs that bypass its default safety instructions. This means someone could trick the guardrail into allowing outputs it should have blocked.","solution":"The issue has been fixed in n8n version 2.10.0. Users should upgrade to this version or later to remediate the vulnerability. If upgrading is not immediately possible, administrators can limit access to trusted users and review the practical impact of guardrail bypasses in your workflow, then adjust accordingly (though these workarounds do not fully remediate the risk and should only be used as short-term mitigation).","labels":["security"],"sourceUrl":"https://github.com/advisories/GHSA-fvfv-ppw4-7h2w","publishedAt":"2026-02-26T22:46:42.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":"medium","severity":"medium","attackType":["jailbreak"],"issueType":"vulnerability","affectedPackages":["n8n@< 2.10.0 (fixed: 2.10.0)"],"affectedVendors":[],"affectedVendorsRaw":["n8n"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":null,"capecIds":null,"crossRefCount":0,"attackSophistication":"trivial","impactType":["safety"],"aiComponentTargeted":"agent","llmSpecific":false,"classifierConfidence":0.85,"researchCategory":null,"atlasIds":null}}