@article{M23B62286, title = "OCR post-processing, Korean OCR error correction, Prompt engineering, LLM", journal = "Journal of KIISE, JOK", year = "2025", issn = "2383-630X", doi = "10.5626/JOK.2025.52.11.948", author = "Hyunsun Hwang, Youngjun Jung, Changki Lee", keywords = "Semantic Role Labeling, Large Language Model, In-context learning, example selection, example reordering", abstract = "Recent large language models utilize In-context Learning (ICL) techniques, which process existing tasks by inserting examples into prompts without requiring additional training data. This approach leverages their inherent language understanding capabilities developed during pre-training on massive datasets. However, these example-based ICL techniques rely on few-shot examples, leading to significant performance variations depending on the selection and structure of the examples in the prompt. This paper proposes methods to enhance example selection and reorganization when applying ICL techniques to Semantic Role Labeling, a challenging task that requires outputting semantic structures. In particular, we found that simply ordering examples in reverse similarity order can achieve performance close to the optimal example ordering for semantic role labeling tasks." }