{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T06:24:57Z","timestamp":1770963897867,"version":"3.50.1"},"reference-count":39,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/100014718","name":"Innovative Research Group Project of the National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100014718","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62173232"],"award-info":[{"award-number":["62173232"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62003214"],"award-info":[{"award-number":["62003214"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62203305"],"award-info":[{"award-number":["62203305"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1016\/j.neucom.2025.129846","type":"journal-article","created":{"date-parts":[[2025,3,5]],"date-time":"2025-03-05T03:03:17Z","timestamp":1741143797000},"page":"129846","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":1,"special_numbering":"C","title":["Model-free extended Q-learning method for\n                    <mml:math xmlns:mml=\"http:\/\/www.w3.org\/1998\/Math\/MathML\" altimg=\"si4.svg\" display=\"inline\" id=\"d1e217\">\n                      <mml:msub>\n                        <mml:mrow>\n                          <mml:mi>H<\/mml:mi>\n                        <\/mml:mrow>\n                        <mml:mrow>\n                          <mml:mi>\u221e<\/mml:mi>\n                        <\/mml:mrow>\n                      <\/mml:msub>\n                    <\/mml:math>\n                    output tracking control of networked control systems with network delays and packet loss"],"prefix":"10.1016","volume":"634","author":[{"given":"Longyan","family":"Hao","sequence":"first","affiliation":[]},{"given":"Chaoli","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1245-8997","authenticated-orcid":false,"given":"Dong","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Shihua","family":"Li","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"2","key":"10.1016\/j.neucom.2025.129846_b1","doi-asserted-by":"crossref","first-page":"301","DOI":"10.1109\/TAC.1981.1102603","article-title":"Feedback and optimal sensitivity: Model reference transformation, multiplicative seminorms, and approximates inverses","volume":"26","author":"Zames","year":"1981","journal-title":"IEEE Trans. Autom. Control"},{"issue":"3","key":"10.1016\/j.neucom.2025.129846_b2","doi-asserted-by":"crossref","first-page":"473","DOI":"10.1016\/j.automatica.2006.09.019","article-title":"Model-free Q-learning designs for linear discrete-time zero-sum games with application to H\u221e control","volume":"43","author":"Al-Tamimi","year":"2007","journal-title":"Automatica"},{"key":"10.1016\/j.neucom.2025.129846_b3","doi-asserted-by":"crossref","first-page":"348","DOI":"10.1016\/j.automatica.2016.05.003","article-title":"Value iteration and adaptive dynamic programming for data-driven adaptive optimal control design","volume":"71","author":"Bian","year":"2016","journal-title":"Automatica"},{"issue":"5","key":"10.1016\/j.neucom.2025.129846_b4","first-page":"1523","article-title":"Output feedback Q-learning control for the discrete-time linear quadratic regulator problem","volume":"30","author":"Asad","year":"2018","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"10","key":"10.1016\/j.neucom.2025.129846_b5","doi-asserted-by":"crossref","first-page":"2699","DOI":"10.1016\/j.automatica.2012.06.096","article-title":"Computational adaptive optimal control for continuous-time linear systems with completely unknown dynamics","volume":"48","author":"Jiang","year":"2012","journal-title":"Automatica"},{"issue":"2","key":"10.1016\/j.neucom.2025.129846_b6","doi-asserted-by":"crossref","first-page":"477","DOI":"10.1016\/j.automatica.2008.08.017","article-title":"Adaptive optimal control for continuous-time linear systems based on policy iteration","volume":"45","author":"Vrabie","year":"2009","journal-title":"Automatica"},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b7","doi-asserted-by":"crossref","first-page":"2401","DOI":"10.1109\/TCYB.2015.2477810","article-title":"Optimal output-feedback control of unknown continuous-time linear systems using off-policy reinforcement learning","volume":"46","author":"Modares","year":"2016","journal-title":"IEEE Trans. Cybern."},{"issue":"5","key":"10.1016\/j.neucom.2025.129846_b8","doi-asserted-by":"crossref","first-page":"1523","DOI":"10.1109\/TNNLS.2018.2870075","article-title":"Output feedback Q-learning control for the discrete-time linear quadratic regulator problem","volume":"30","author":"Rizvi","year":"2019","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.neucom.2025.129846_b9","first-page":"1","article-title":"Optimal output regulation of linear discrete-time systems with unknown dynamics using reinforcement learning","author":"Jiang","year":"2019","journal-title":"IEEE Trans. Cybern."},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b10","doi-asserted-by":"crossref","first-page":"4423","DOI":"10.1109\/TAC.2019.2905215","article-title":"Reinforcement learning-based adaptive optimal exponential tracking control of linear systems with unknown dynamics","volume":"64","author":"Chen","year":"2019","journal-title":"IEEE Trans. Autom. Control"},{"issue":"4","key":"10.1016\/j.neucom.2025.129846_b11","doi-asserted-by":"crossref","first-page":"2391","DOI":"10.1109\/TAC.2022.3172590","article-title":"Robust output regulation and reinforcement learning-based output tracking design for unknown linear discrete-time systems","volume":"68","author":"Chen","year":"2023","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.neucom.2025.129846_b12","doi-asserted-by":"crossref","first-page":"4423","DOI":"10.1109\/TAC.2019.2905215","article-title":"Reinforcement learning-based adaptive optimal exponential tracking control of linear systems with unknown dynamics","author":"Chen","year":"2019","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.neucom.2025.129846_b13","doi-asserted-by":"crossref","first-page":"144","DOI":"10.1016\/j.automatica.2016.12.009","article-title":"H\u221e control of linear discrete-time systems: Off-policy reinforcement learning","volume":"78","author":"Kiumarsi","year":"2017","journal-title":"Automatica"},{"key":"10.1016\/j.neucom.2025.129846_b14","doi-asserted-by":"crossref","first-page":"213","DOI":"10.1016\/j.automatica.2018.05.027","article-title":"Output feedback Q-learning for discrete-time linear zero-sum games with application to the H\u221e control","volume":"95","author":"Rizvi","year":"2018","journal-title":"Automatica"},{"issue":"3","key":"10.1016\/j.neucom.2025.129846_b15","doi-asserted-by":"crossref","first-page":"2361","DOI":"10.1016\/j.jfranklin.2022.10.052","article-title":"Off-policy reinforcement learning for tracking control of discrete-time Markov jump linear systems with completely unknown dynamics","volume":"360","author":"Huang","year":"2023","journal-title":"J. Franklin Inst."},{"key":"10.1016\/j.neucom.2025.129846_b16","doi-asserted-by":"crossref","first-page":"48","DOI":"10.1016\/j.neucom.2023.01.050","article-title":"Output feedback Q-learning for discrete-time finite-horizon zero-sum games with application to the H\u221e control","volume":"529","author":"Liu","year":"2023","journal-title":"Neurocomputing"},{"issue":"14","key":"10.1016\/j.neucom.2025.129846_b17","doi-asserted-by":"crossref","first-page":"10564","DOI":"10.1016\/j.jfranklin.2023.08.010","article-title":"Model-free algorithm for consensus of discrete-time multi-agent systems using reinforcement learning method","volume":"360","author":"Long","year":"2023","journal-title":"J. Franklin Inst."},{"issue":"12","key":"10.1016\/j.neucom.2025.129846_b18","doi-asserted-by":"crossref","first-page":"8059","DOI":"10.1016\/j.jfranklin.2020.05.038","article-title":"Off-policy based adaptive dynamic programming method for nonzero-sum games on discrete-time system","volume":"357","author":"Wen","year":"2020","journal-title":"J. Franklin Inst."},{"issue":"10","key":"10.1016\/j.neucom.2025.129846_b19","doi-asserted-by":"crossref","first-page":"2550","DOI":"10.1109\/TNNLS.2015.2441749","article-title":"H\u221e tracking control of completely unknown continuous-time systems via off-policy reinforcement learning","volume":"26","author":"Modares","year":"2015","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"2","key":"10.1016\/j.neucom.2025.129846_b20","doi-asserted-by":"crossref","first-page":"300","DOI":"10.1002\/acs.2830","article-title":"Output-feedback H\u221e quadratic tracking control of linear systems using reinforcement learning","volume":"33","author":"Moghadam","year":"2019","journal-title":"Internat. J. Adapt. Control Signal Process."},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b21","doi-asserted-by":"crossref","first-page":"4109","DOI":"10.1109\/TSMC.2019.2957000","article-title":"Reinforcement Q-learning algorithm for H\u221e tracking control of unknown discrete-time linear systems","volume":"50","author":"Peng","year":"2020","journal-title":"IEEE Trans. Syst. Man, Cybern.: Syst."},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b22","doi-asserted-by":"crossref","first-page":"4078","DOI":"10.1109\/TSMC.2019.2946397","article-title":"H\u221e tracking control of discrete-time system with delays via data-based adaptive dynamic programming","volume":"50","author":"Liu","year":"2020","journal-title":"IEEE Trans. Syst. Man, Cybernetics: Syst."},{"key":"10.1016\/j.neucom.2025.129846_b23","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.126855","article-title":"Adaptive PI control for H\u221e synchronization of multiple delayed coupled neural networks","volume":"560","author":"Cao","year":"2023","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2025.129846_b24","article-title":"Observer-based H\u221e control of memristor-based neural networks with unbounded time-varying delays","volume":"549","year":"2023","journal-title":"Neurocomputing"},{"issue":"8","key":"10.1016\/j.neucom.2025.129846_b25","doi-asserted-by":"crossref","first-page":"1177","DOI":"10.1109\/TAC.2005.852550","article-title":"A new method for stabilization of networked control systems with random delays","volume":"50","author":"Zhang","year":"2005","journal-title":"IEEE Trans. Autom. Control"},{"issue":"7","key":"10.1016\/j.neucom.2025.129846_b26","doi-asserted-by":"crossref","first-page":"1575","DOI":"10.1109\/TAC.2009.2015543","article-title":"Stability of networked control systems with uncertain time-varying delays","volume":"54","author":"Cloosterman","year":"2009","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.neucom.2025.129846_b27","doi-asserted-by":"crossref","first-page":"85","DOI":"10.1016\/j.automatica.2015.04.006","article-title":"Output tracking control of networked control systems via delay compensation controllers","volume":"57","author":"Zhang","year":"2015","journal-title":"Automatica"},{"key":"10.1016\/j.neucom.2025.129846_b28","doi-asserted-by":"crossref","first-page":"101","DOI":"10.1016\/j.neucom.2015.11.075","article-title":"Delayed observer-based H\u221e control for networked control systems","volume":"179","year":"2016","journal-title":"Neurocomputing"},{"issue":"3","key":"10.1016\/j.neucom.2025.129846_b29","doi-asserted-by":"crossref","first-page":"689","DOI":"10.1109\/TSMCA.2012.2211587","article-title":"H\u221e controller design of networked control systems with Markov packet dropouts","volume":"43","author":"Wang","year":"2013","journal-title":"IEEE Trans. Syst. Man, Cybern.: Syst."},{"issue":"3","key":"10.1016\/j.neucom.2025.129846_b30","doi-asserted-by":"crossref","first-page":"655","DOI":"10.1109\/TAC.2008.919850","article-title":"Network-based H\u221e output tracking control","volume":"53","author":"Gao","year":"2008","journal-title":"IEEE Trans. Autom. Control"},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b31","doi-asserted-by":"crossref","first-page":"4033","DOI":"10.1109\/TSMC.2019.2946382","article-title":"Model-free optimal output regulation for linear discrete-time lossy networked control systems","volume":"50","author":"Fan","year":"2020","journal-title":"IEEE Trans. Syst. Man, Cybern.: Syst."},{"key":"10.1016\/j.neucom.2025.129846_b32","first-page":"4607","article-title":"Tracking control for linear discrete-time networked control systems with unknown dynamics and dropout","author":"Jiang","year":"2017","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"13","key":"10.1016\/j.neucom.2025.129846_b33","doi-asserted-by":"crossref","first-page":"5649","DOI":"10.1016\/j.jfranklin.2018.06.013","article-title":"Data-driven optimal tracking control for discrete-time systems with delays using adaptive dynamic programming","volume":"355","author":"Liu","year":"2018","journal-title":"J. Franklin Inst."},{"issue":"2","key":"10.1016\/j.neucom.2025.129846_b34","doi-asserted-by":"crossref","first-page":"737","DOI":"10.1109\/TAC.2022.3145632","article-title":"Linear quadratic control using model-free reinforcement learning","volume":"68","author":"Yaghmaie","year":"2023","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.neucom.2025.129846_b35","doi-asserted-by":"crossref","DOI":"10.1016\/j.automatica.2021.109548","article-title":"Low-complexity learning of linear quadratic regulators from noisy data","volume":"128","author":"De Persis","year":"2021","journal-title":"Automatica"},{"issue":"133","key":"10.1016\/j.neucom.2025.129846_b36","article-title":"Model-free H\u221e tracking control for de-oiling hydrocyclone systems via off-policy reinforcement learning","author":"Durdevic","year":"2021","journal-title":"Automatica"},{"issue":"1","key":"10.1016\/j.neucom.2025.129846_b37","doi-asserted-by":"crossref","first-page":"175","DOI":"10.1109\/LCSYS.2020.3001241","article-title":"H\u221e tracking control for linear discrete-time systems: Model-free Q-learning designs","volume":"5","author":"Yang","year":"2021","journal-title":"IEEE Control. Syst. Lett."},{"issue":"11","key":"10.1016\/j.neucom.2025.129846_b38","doi-asserted-by":"crossref","first-page":"3051","DOI":"10.1109\/TAC.2014.2317301","article-title":"Linear quadratic tracking control of partially-unknown continuous-time systems using reinforcement learning","volume":"59","author":"Modares","year":"2014","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.neucom.2025.129846_b39","series-title":"Optimal Control","author":"Lewis","year":"2012"}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231225005181?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231225005181?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T05:48:14Z","timestamp":1762321694000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231225005181"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6]]},"references-count":39,"alternative-id":["S0925231225005181"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2025.129846","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2025,6]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Model-free extended Q-learning method for  output tracking control of networked control systems with network delays and packet loss","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2025.129846","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"129846"}}