{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:10:15Z","timestamp":1730265015231,"version":"3.28.0"},"reference-count":10,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7]]},"DOI":"10.1109\/iiaiaai55812.2022.00068","type":"proceedings-article","created":{"date-parts":[[2022,9,23]],"date-time":"2022-09-23T20:07:41Z","timestamp":1663963661000},"page":"303-306","source":"Crossref","is-referenced-by-count":0,"title":["Hyperheuristic Method Based on Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Hitoshi","family":"Iima","sequence":"first","affiliation":[{"name":"Kyoto Institute of Technology,Information and Human Sciences,Kyoto,Japan"}]},{"given":"Yoshiyuki","family":"Nakamura","sequence":"additional","affiliation":[{"name":"Kyoto Institute of Technology,Information Science,Kyoto,Japan"}]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2021.3056143"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2014.2332443"},{"key":"ref10","article-title":"Distributed prioritized experience replay","author":"horgan","year":"2018","journal-title":"Proceedings of International Conference on Learning Representations"},{"key":"ref6","article-title":"Q-lealrning-based hyperheuristic evolutionary algorithm for dynamic task allocation","author":"ji","year":"0","journal-title":"IEEE Transactions on Cybernetics"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2021.3065340"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2019.07.073"},{"key":"ref9","first-page":"103","article-title":"Makespan minimization scheduling for a truck-drone parcel delivery system: An integer programming formulation","author":"karuno","year":"2019","journal-title":"Proceedings of International Symposium on Scheduling"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1057\/jors.2013.71"}],"event":{"name":"2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)","start":{"date-parts":[[2022,7,2]]},"location":"Kanazawa, Japan","end":{"date-parts":[[2022,7,8]]}},"container-title":["2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9894448\/9894449\/09894564.pdf?arnumber=9894564","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T22:53:36Z","timestamp":1667516016000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9894564\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7]]},"references-count":10,"URL":"https:\/\/doi.org\/10.1109\/iiaiaai55812.2022.00068","relation":{},"subject":[],"published":{"date-parts":[[2022,7]]}}}