@article{M428DA3A0, title = "Integrating Self-Verification with External Knowledge Retrieval for Reducing Hallucination in Large Language Models", journal = "Journal of KIISE, JOK", year = "2026", issn = "2383-630X", doi = "10.5626/JOK.2026.53.1.58", author = "Yerin Park, Junsu Cho", keywords = "large language models, hallucination mitigation, chain-of-verification, retrieval-augmented generation", abstract = "Large Language Models (LLMs) have transformed the field of natural language processing. However, they face a significant challenge known as hallucination—the generation of text that appears plausible but is factually incorrect. Previous methods aimed at reducing hallucinations often struggled with limitations, such as relying solely on the LLMs’ inherent knowledge, which can lead to insufficient information, and failing to fully utilize the reasoning capabilities of LLMs. To overcome these challenges, we introduce CoVe-RAG, a novel technique that combines Chain-of-Verification (CoVe) with Retrieval-Augmented Generation (RAG). CoVe-RAG improves the factual accuracy of LLM outputs by integrating external knowledge retrieval into a self-verification process. Our experiments demonstrate that CoVe-RAG significantly outperforms both CoVe and RAG in reducing hallucinations. Our main contributions include: (1) identifying the limitation of CoVe in mitigating hallucinations, (2) presenting CoVe-RAG as a more effective solution, and (3) providing empirical evidence of the synergistic benefits of combining self-verification with external knowledge retrieval. Based on these findings, we highlight the complementary relationship between self-verification and external information in enhancing the reliability of LLMs." }