@article{M07766182, title = "Survey on Feature Attribution Methods in Explainable AI", journal = "Journal of KIISE, JOK", year = "2020", issn = "2383-630X", doi = "10.5626/JOK.2020.47.12.1181", author = "Gihyuk Ko,Gyumin Lim,Homook Cho", keywords = "feature attribution,input feature importance,XAI,artificial intelligence,survey categorization", abstract = "As artificial intelligence (AI)-based technologies are increasingly being used in areas that can have big socioeconomic effects, there is a growing effort to explain decisions made by AI models. One important direction in such eXplainable AI (XAI) is the ‘feature attribution’ method, which explains AI models by assigning a contribution score to each input feature. In this work, we surveyed nine recently developed feature attribution methods and categorized them using four different criteria. Based on the categorizations, we found that the current methods focused only on specific settings such as generating local, white-box explanations of neural networks and lacked theoretical foundations such as axiomatic definitions. We suggest future research directions toward a unified feature attribution method based on our findings." }