{"data":{"id":"7b70ab0c-cc71-4e14-b62f-6c46383121a6","title":"Security Spotlight: Securing Cloud & AI Products with Guardrails","summary":"This article collection discusses security challenges in AI and cloud systems, particularly focusing on agentic AI (AI systems that can take autonomous actions). Key risks include jailbreaks (tricking AI systems into ignoring safety rules), prompt injection (hidden malicious instructions in AI inputs), and tool misuse by autonomous agents, which require contextual red teaming (security testing designed for specific use cases) rather than generic testing to identify real vulnerabilities.","solution":"N/A -- no mitigation discussed in source.","labels":["security","safety"],"sourceUrl":"https://protectai.com/blog/security-spotlight-securing-ai-with-guardrails","publishedAt":"2025-05-28T19:37:57.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":["prompt_injection","jailbreak"],"issueType":"news","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":["Palo Alto Networks","Prisma AIRS","Glean"],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2025-05-28T19:37:57.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["integrity","safety"],"aiComponentTargeted":"agent","llmSpecific":false,"classifierConfidence":0.75,"researchCategory":null,"atlasIds":null}}