Reinforcement learning (RL) for robotics is challenging due to the difficulty
in hand-engineering a dense cost function, which can lead to unintended
behavior, and dynamical uncertainty, which makes it hard to enforce constraints
during learning. We address these issues with a new model-based reinforcement
learning algorithm, safety augmented value estimation from demonstrations
(SAVED), which uses supervision that only identifies task completion and a
modest set of suboptimal demonstrations to constrain exploration and learn
efficiently while handling complex constraints. We derive iterative improvement
guarantees for SAVED under known stochastic nonlinear systems. We then compare
SAVED with 3 state-of-the-art model-based and model-free RL algorithms on 6
standard simulation benchmarks involving navigation and manipulation and 2
real-world tasks on the da Vinci surgical robot. Results suggest that SAVED
outperforms prior methods in terms of success rate, constraint satisfaction,
and sample efficiency, making it feasible to safely learn complex maneuvers
directly on a real robot in less than an hour. For tasks on the robot,
baselines succeed less than 5% of the time while SAVED has a success rate of
over 75% in the first 50 training iterations.
Description
[1905.13402v1] Extending Deep Model Predictive Control with Safety Augmented Value Estimation from Demonstrations
%0 Journal Article
%1 thananjeyan2019extending
%A Thananjeyan, Brijen
%A Balakrishna, Ashwin
%A Rosolia, Ugo
%A Li, Felix
%A McAllister, Rowan
%A Gonzalez, Joseph E.
%A Levine, Sergey
%A Borrelli, Francesco
%A Goldberg, Ken
%D 2019
%K reinforcement-learning sampling
%T Extending Deep Model Predictive Control with Safety Augmented Value
Estimation from Demonstrations
%U http://arxiv.org/abs/1905.13402
%X Reinforcement learning (RL) for robotics is challenging due to the difficulty
in hand-engineering a dense cost function, which can lead to unintended
behavior, and dynamical uncertainty, which makes it hard to enforce constraints
during learning. We address these issues with a new model-based reinforcement
learning algorithm, safety augmented value estimation from demonstrations
(SAVED), which uses supervision that only identifies task completion and a
modest set of suboptimal demonstrations to constrain exploration and learn
efficiently while handling complex constraints. We derive iterative improvement
guarantees for SAVED under known stochastic nonlinear systems. We then compare
SAVED with 3 state-of-the-art model-based and model-free RL algorithms on 6
standard simulation benchmarks involving navigation and manipulation and 2
real-world tasks on the da Vinci surgical robot. Results suggest that SAVED
outperforms prior methods in terms of success rate, constraint satisfaction,
and sample efficiency, making it feasible to safely learn complex maneuvers
directly on a real robot in less than an hour. For tasks on the robot,
baselines succeed less than 5% of the time while SAVED has a success rate of
over 75% in the first 50 training iterations.
@article{thananjeyan2019extending,
abstract = {Reinforcement learning (RL) for robotics is challenging due to the difficulty
in hand-engineering a dense cost function, which can lead to unintended
behavior, and dynamical uncertainty, which makes it hard to enforce constraints
during learning. We address these issues with a new model-based reinforcement
learning algorithm, safety augmented value estimation from demonstrations
(SAVED), which uses supervision that only identifies task completion and a
modest set of suboptimal demonstrations to constrain exploration and learn
efficiently while handling complex constraints. We derive iterative improvement
guarantees for SAVED under known stochastic nonlinear systems. We then compare
SAVED with 3 state-of-the-art model-based and model-free RL algorithms on 6
standard simulation benchmarks involving navigation and manipulation and 2
real-world tasks on the da Vinci surgical robot. Results suggest that SAVED
outperforms prior methods in terms of success rate, constraint satisfaction,
and sample efficiency, making it feasible to safely learn complex maneuvers
directly on a real robot in less than an hour. For tasks on the robot,
baselines succeed less than 5% of the time while SAVED has a success rate of
over 75% in the first 50 training iterations.},
added-at = {2019-06-04T13:54:26.000+0200},
author = {Thananjeyan, Brijen and Balakrishna, Ashwin and Rosolia, Ugo and Li, Felix and McAllister, Rowan and Gonzalez, Joseph E. and Levine, Sergey and Borrelli, Francesco and Goldberg, Ken},
biburl = {https://www.bibsonomy.org/bibtex/2803f7a0f4b942392e8511d244f6a97fe/kirk86},
description = {[1905.13402v1] Extending Deep Model Predictive Control with Safety Augmented Value Estimation from Demonstrations},
interhash = {301b6e951bd20fa52e608cb57dd82730},
intrahash = {803f7a0f4b942392e8511d244f6a97fe},
keywords = {reinforcement-learning sampling},
note = {cite arxiv:1905.13402},
timestamp = {2019-06-04T13:54:26.000+0200},
title = {Extending Deep Model Predictive Control with Safety Augmented Value
Estimation from Demonstrations},
url = {http://arxiv.org/abs/1905.13402},
year = 2019
}