@article{M7C0F8B49, title = "PrefixLM for Korean Text Summarization", journal = "Journal of KIISE, JOK", year = "2022", issn = "2383-630X", doi = "10.5626/JOK.2022.49.6.475", author = "Kun-Hui Lee,Seung-Hoon Na,Joon-Ho Lim,Tae-Hyeong Kim,Du-Seong Chang", keywords = "language model,text generation,document summarization,PrefixLM", abstract = "In this paper, we examine the effectiveness of PrefixLM that consists of half of the parameters of the T5"s encoder-decoder architecture for Korean text generation tasks. Different from T5 where input and output sequences are separately provided, the transformer block of PrefixLM takes a single sequence that concatenates both input and output sequences. By designing the attention mask, PrefixLM performs uni- and bi-directional attentions on input and output sequences, respectively, thereby enabling to perform two roles of encoder and decoder with a single transformer block. Experiment results on Korean abstractive document summarization task show that PrefixLM leads to performance increases of 2.17 and 2.78 more than 2 in Rouge-F1 score over BART and T5, respectively, implying that the PrefixLM is promising in Korean text generation tasks." }