@inproceedings{632309bd302e487abe81cc2ac482faef,
title = "Near-optimal control of motor drives via approximate dynamic programming",
abstract = "Data-driven methods for learning near-optimal control policies through approximate dynamic programming (ADP) have garnered widespread attention. In this paper, we investigate how data-driven control methods can be leveraged to imbue near-optimal performance in a core component in modern factory systems: The electric motor drive. We apply policy iteration-based ADP to an induction motor model in order to construct a state feedback control policy for a given cost functional. Approximate error convergence properties of policy iteration methods imply that the learned control policy is near-optimal. We demonstrate that carefully selecting a cost functional and initial control policy yields a near-optimal control policy that outperforms both a baseline nonlinear control policy based on backstepping, as well as the initial control policy.",
author = "Yebin Wang and Ankush Chakrabarty and Zhou, {Meng Chu} and Jinyun Zhang",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 2019 IEEE International Conference on Systems, Man and Cybernetics, SMC 2019 ; Conference date: 06-10-2019 Through 09-10-2019",
year = "2019",
month = oct,
doi = "10.1109/SMC.2019.8914595",
language = "English (US)",
series = "Conference Proceedings - IEEE International Conference on Systems, Man and Cybernetics",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "3679--3686",
booktitle = "2019 IEEE International Conference on Systems, Man and Cybernetics, SMC 2019",
address = "United States",
}