Deep reinforcement learning has shown its success in game playing. However,
2.5D fighting games would be a challenging task to handle due to ambiguity in
visual appearances like height or depth of the characters. Moreover, actions in
such games typically involve particular sequential action orders, which also
makes the network design very difficult. Based on the network of Asynchronous
Advantage Actor-Critic (A3C), we create an OpenAI-gym-like gaming environment
with the game of Little Fighter 2 (LF2), and present a novel A3C+ network for
learning RL agents. The introduced model includes a Recurrent Info network,
which utilizes game-related info features with recurrent layers to observe
combo skills for fighting. In the experiments, we consider LF2 in different
settings, which successfully demonstrates the use of our proposed model for
learning 2.5D fighting games.
Description
[1805.02070] Deep Reinforcement Learning for Playing 2.5D Fighting Games
%0 Conference Paper
%1 li2018RLFightingGame
%A Li, Yu-Jhe
%A Chang, Hsin-Yu
%A Lin, Yu-Jing
%A Wu, Po-Wei
%A Wang, Yu-Chiang Frank
%B 2018 25th IEEE International Conference on Image Processing (ICIP)
%D 2018
%K A3C FightingGame reinforcement_learning
%P 3778-3782
%R 10.1109/ICIP.2018.8451491
%T Deep Reinforcement Learning for Playing 2.5D Fighting Games
%U http://arxiv.org/abs/1805.02070
%X Deep reinforcement learning has shown its success in game playing. However,
2.5D fighting games would be a challenging task to handle due to ambiguity in
visual appearances like height or depth of the characters. Moreover, actions in
such games typically involve particular sequential action orders, which also
makes the network design very difficult. Based on the network of Asynchronous
Advantage Actor-Critic (A3C), we create an OpenAI-gym-like gaming environment
with the game of Little Fighter 2 (LF2), and present a novel A3C+ network for
learning RL agents. The introduced model includes a Recurrent Info network,
which utilizes game-related info features with recurrent layers to observe
combo skills for fighting. In the experiments, we consider LF2 in different
settings, which successfully demonstrates the use of our proposed model for
learning 2.5D fighting games.
@inproceedings{li2018RLFightingGame,
abstract = {Deep reinforcement learning has shown its success in game playing. However,
2.5D fighting games would be a challenging task to handle due to ambiguity in
visual appearances like height or depth of the characters. Moreover, actions in
such games typically involve particular sequential action orders, which also
makes the network design very difficult. Based on the network of Asynchronous
Advantage Actor-Critic (A3C), we create an OpenAI-gym-like gaming environment
with the game of Little Fighter 2 (LF2), and present a novel A3C+ network for
learning RL agents. The introduced model includes a Recurrent Info network,
which utilizes game-related info features with recurrent layers to observe
combo skills for fighting. In the experiments, we consider LF2 in different
settings, which successfully demonstrates the use of our proposed model for
learning 2.5D fighting games.},
added-at = {2020-01-24T08:40:50.000+0100},
author = {Li, Yu-Jhe and Chang, Hsin-Yu and Lin, Yu-Jing and Wu, Po-Wei and Wang, Yu-Chiang Frank},
biburl = {https://www.bibsonomy.org/bibtex/24002b0f95e0072ccae88ff92ba765ef6/lanteunis},
booktitle = {2018 25th IEEE International Conference on Image Processing (ICIP)},
description = {[1805.02070] Deep Reinforcement Learning for Playing 2.5D Fighting Games},
doi = {10.1109/ICIP.2018.8451491},
interhash = {fbfc6db91ed92dff9ca44bea23d8bbff},
intrahash = {4002b0f95e0072ccae88ff92ba765ef6},
issn = {2381-8549},
keywords = {A3C FightingGame reinforcement_learning},
month = oct,
note = {cite arxiv:1805.02070Comment: ICIP 2018},
pages = {3778-3782},
timestamp = {2020-01-25T13:18:20.000+0100},
title = {Deep Reinforcement Learning for Playing 2.5D Fighting Games},
url = {http://arxiv.org/abs/1805.02070},
year = 2018
}