@article{M4EAA5D23, title = "PGB: Permutation and Grouping for BERT Pruning", journal = "Journal of KIISE, JOK", year = "2023", issn = "2383-630X", doi = "10.5626/JOK.2023.50.6.503", author = "Hye-Min Lim,Dong-Wan Choi", keywords = "BERT compression,task-specific pruning,structured pruning,head pruning", abstract = "Recently, pre-trained Transformer-based models have been actively used for various artificial intelligence tasks, such as natural language processing and image recognition. However, these models have billions of parameters, which require significant computation for inference, and may be subject to many limitations for use in resource-limited environments. To address this problem, we propose PGB(Permutation Grouped BERT pruning), a new group-based structured pruning method for Transformer models. PGB effectively finds a way to change the optimal attention order according to resource constraints, and prunes unnecessary heads based on the importance of the heads to minimize the information loss in the model. Through various comparison experiments, PGB shows better performance in terms of inference speed and accuracy loss than the other existing structured pruning methods for the pre-trained BERT model." }