@article{M1B93B139, title = "Korean Dependency Parsing using Subtree Linking based on Machine Reading Comprehension", journal = "Journal of KIISE, JOK", year = "2022", issn = "2383-630X", doi = "10.5626/JOK.2022.49.8.617", author = "Jinwoo Min,Seung-Hoon Na,Jong-Hoon Shin,Young-Kil Kim,Kangil Kim", keywords = "dependency parsing,machine reading comprehension,subtree,subtree linking,biaffine attention", abstract = "In Korean dependency parsing, biaffine attention models have shown state-of-the-art performances; they first obtain head-level and modifier-level representations by applying two multi-layer perceptrons (MLP) on the encoded contextualized word representation, perform the attention by regarding modifier-level representation as a query and head-level one as a key, and take the resulting attention score as a probability of forming a dependency arc between the corresponding two words. However, given two target words (i.e., candidate head and modifier), biaffine attention methods are basically limited to their word-level representations, not being aware of the explicit boundaries of their phrases or subtrees. Thus, without relying on semantically and syntactically enriched phrase-level and subtree-level representations, biaffine attention methods might be not effective in the case that determining a dependency arc is not simple but complicated such as identifying a dependency between “far-distant” words, where these cases may often require subtree or phrase-level information surrounding target words. To address this drawback, this paper presents the use of dependency paring framework based on machine reading comprehension (MRC) that explicitly utilizes the subtree-level information by mapping a given child subtree and its parent subtree to a question and an answer, respectively. The experiment results on standard datasets of Korean dependency parsing shows that the MRC-based dependency paring outperforms the biaffine attention model. In particular, the results further given observations that improvements in performances are likely strong in long sentences, comparing to short ones." }