{"data":{"id":"a8bf847d-57ec-4d38-8242-c8785d3f4d83","title":"\nUQLM: A Python Package for Uncertainty Quantification in Large Language Models\n","summary":"Hallucinations (instances where Large Language Models generate false or misleading content) are a safety problem for AI applications. The paper introduces UQLM, a Python package that uses uncertainty quantification (UQ, a statistical technique for measuring how confident a model is in its answer) to detect when an LLM is likely hallucinating by assigning confidence scores between 0 and 1 to responses.","solution":"The source describes UQLM as 'an off-the-shelf solution for UQ-based hallucination detection that can be easily integrated to enhance the reliability of LLM outputs.' No specific implementation steps, code examples, or version details are provided in the source text.","labels":["research","safety"],"sourceUrl":"\nhttp://jmlr.org/papers/v27/25-1557.html\n","publishedAt":"2026-01-01T00:00:00.000Z","cveId":null,"cweIds":null,"cvssScore":null,"cvssSeverity":null,"severity":"info","attackType":[],"issueType":"research","affectedPackages":null,"affectedVendors":[],"affectedVendorsRaw":[],"classifierModel":"claude-haiku-4-5-20251001","classifierPromptVersion":"v3","cvssVector":null,"attackVector":null,"attackComplexity":null,"privilegesRequired":null,"userInteraction":null,"exploitMaturity":null,"epssScore":null,"patchAvailable":null,"disclosureDate":"2026-01-01T00:00:00.000Z","capecIds":null,"crossRefCount":0,"attackSophistication":"moderate","impactType":["safety"],"aiComponentTargeted":"model","llmSpecific":true,"classifierConfidence":0.92,"researchCategory":"peer_reviewed","atlasIds":null}}