@article{M22CFC3EC, title = "VR Headset User’s Blendshape-driven Facial ExpressionTracking for Parametric Model and Photo-realistic AvatarReconstruction", journal = "Journal of KIISE, JOK", year = "2026", issn = "2383-630X", doi = "10.5626/JOK.2026.53.2.124", author = "Seokhwan Yang, Woontack Woo", keywords = "Virtual reality, photo-realistic avatar, facial expression tracking, VR headset", abstract = "This study introduces a novel method for representing the facial expressions of VR headset users through a parametric mesh model and a photo-realistic avatar. While previous research has concentrated on optimizing mesh parameters using video input, such methods are not feasible for VR users due to the absence of accessible RGB imagery. To overcome this limitation, our approach indirectly tracks user expressions by utilizing the blendshape-based avatar expressions provided by the VR headset. Additionally, we create a proxy avatar that resembles the user`s appearance and remove loss terms sensitive to facial geometry, which enhances expression modeling. This method is particularly suitable for VR environments, as it relies solely on headset input, while achieving expression representations comparable to those derived from video-based methods. Ultimately, this approach bridges the gap between realistic mesh-based facial avatars and the expressions of users wearing VR headsets." }