@article{M7F091633, title = "Training Liquid State Machine using Reward-Modulated Spike-Timing-Dependent Plasticity", journal = "Journal of KIISE, JOK", year = "2026", issn = "2383-630X", doi = "10.5626/JOK.2026.53.1.32", author = "Youngseok Joo, Minsu Lee, Byoung-Tak Zhang", keywords = "liquid state machine, spiking neural network, reward-modulated STDP, reinforcement learning", abstract = "The Liquid State Machine (LSM) is a recurrent spiking neural network model rooted in computational neuroscience, characterized by its rich temporal dynamics, low training complexity, and biological plausibility. While traditional LSMs typically use fixed reservoir weights, which limits their adaptability, incorporating spike-timing-dependent plasticity (STDP) into the reservoir has been shown to improve performance. Additionally, conventional LSMs employ external classifiers, such as linear regression or gradient-based methods, to train the output layer, which is not suitable for online, real-time learning. In this paper, we introduce a reinforcement learning framework for training an LSM using dopamine-modulated spike-timing-dependent plasticity (DA-STDP). Our system enables a biologically inspired, reward-based learning mechanism that adjusts synaptic weights based on feedback signals. We validate our approach under various training conditions using the MNIST dataset, demonstrating the applicability of DA-STDP as a training mechanism within the LSM framework." }