We propose a conceptually simple and lightweight framework for deep
reinforcement learning that uses asynchronous gradient descent for optimization
of deep neural network controllers. We present asynchronous variants of four
standard reinforcement learning algorithms and show that parallel
actor-learners have a stabilizing effect on training allowing all four methods
to successfully train neural network controllers. The best performing method,
an asynchronous variant of actor-critic, surpasses the current state-of-the-art
on the Atari domain while training for half the time on a single multi-core CPU
instead of a GPU. Furthermore, we show that asynchronous actor-critic succeeds
on a wide variety of continuous motor control problems as well as on a new task
of navigating random 3D mazes using a visual input.
Description
[1602.01783] Asynchronous Methods for Deep Reinforcement Learning
%0 Journal Article
%1 mnih2016asynchronous
%A Mnih, Volodymyr
%A Badia, Adrià Puigdomènech
%A Mirza, Mehdi
%A Graves, Alex
%A Lillicrap, Timothy P.
%A Harley, Tim
%A Silver, David
%A Kavukcuoglu, Koray
%D 2016
%K final reinforcement_learning thema:double_dqn
%T Asynchronous Methods for Deep Reinforcement Learning
%U http://arxiv.org/abs/1602.01783
%X We propose a conceptually simple and lightweight framework for deep
reinforcement learning that uses asynchronous gradient descent for optimization
of deep neural network controllers. We present asynchronous variants of four
standard reinforcement learning algorithms and show that parallel
actor-learners have a stabilizing effect on training allowing all four methods
to successfully train neural network controllers. The best performing method,
an asynchronous variant of actor-critic, surpasses the current state-of-the-art
on the Atari domain while training for half the time on a single multi-core CPU
instead of a GPU. Furthermore, we show that asynchronous actor-critic succeeds
on a wide variety of continuous motor control problems as well as on a new task
of navigating random 3D mazes using a visual input.
@article{mnih2016asynchronous,
abstract = {We propose a conceptually simple and lightweight framework for deep
reinforcement learning that uses asynchronous gradient descent for optimization
of deep neural network controllers. We present asynchronous variants of four
standard reinforcement learning algorithms and show that parallel
actor-learners have a stabilizing effect on training allowing all four methods
to successfully train neural network controllers. The best performing method,
an asynchronous variant of actor-critic, surpasses the current state-of-the-art
on the Atari domain while training for half the time on a single multi-core CPU
instead of a GPU. Furthermore, we show that asynchronous actor-critic succeeds
on a wide variety of continuous motor control problems as well as on a new task
of navigating random 3D mazes using a visual input.},
added-at = {2019-11-18T11:46:35.000+0100},
author = {Mnih, Volodymyr and Badia, Adrià Puigdomènech and Mirza, Mehdi and Graves, Alex and Lillicrap, Timothy P. and Harley, Tim and Silver, David and Kavukcuoglu, Koray},
biburl = {https://www.bibsonomy.org/bibtex/22d7a2fd5f7a61e2a6081f77b31e5ba67/jan.hofmann1},
description = {[1602.01783] Asynchronous Methods for Deep Reinforcement Learning},
interhash = {02e623113f85237b4ec7daf03736c6cc},
intrahash = {2d7a2fd5f7a61e2a6081f77b31e5ba67},
keywords = {final reinforcement_learning thema:double_dqn},
note = {cite arxiv:1602.01783},
timestamp = {2019-12-09T10:13:33.000+0100},
title = {Asynchronous Methods for Deep Reinforcement Learning},
url = {http://arxiv.org/abs/1602.01783},
year = 2016
}