Soft robots offer many advantages over traditional
rigid robots. However, soft robots can be difficult to
control with standard control methods. Fortunately,
evolutionary algorithms can offer an elegant solution
to this problem. Instead of creating controls to handle
the intricate dynamics of these robots, we can simply
evolve the controls using a simulation to provide an
evaluation function. In this article, we show how such
a control paradigm can be applied to an emerging field
within soft robotics: robots based on tensegrity
structures. We take the model of the Spherical
Underactuated Planetary Exploration Robot ball
(SUPERball), an icosahedron tensegrity robot under
production at NASA Ames Research Center, develop a
rolling locomotion algorithm, and study the learned
behavior using an accurate model of the SUPERball
simulated in the NASA Tensegrity Robotics Toolkit. We
first present the historical- average fitness-shaping
algorithm for coevolutionary algorithms to speed up
learning while favoring robustness over optimality.
Second, we use a distributed control approach by
coevolving open-loop control signals for each
controller. Being simple and distributed, open-loop
controllers can be readily implemented on SUPERball
hardware without the need for sensor information or
precise coordination. We analyze signals of different
complexities and frequencies. Among the learned
policies, we take one of the best and use it to analyze
different aspects of the rolling gait, such as lengths,
tensions, and energy consumption. We also discuss the
correlation between the signals controlling different
parts of the tensegrity robot.
%0 Journal Article
%1 iscen-learning-tensegrity-locomotion-2015
%A Iscen, Atil
%A Caluwaerts, Ken
%A Bruce, Jonathan
%A Agogino, Adrian
%A SunSpiral, Vytas
%A Tumer, Kagan
%D 2015
%I MIT Press - Journals
%J Artificial Life
%K alife
%N 2
%P 119--140
%R 10.1162/artl_a_00163
%T Learning Tensegrity Locomotion Using Open-Loop Control
Signals and Coevolutionary Algorithms
%U http://dx.doi.org/10.1162/ARTL_a_00163
%V 21
%X Soft robots offer many advantages over traditional
rigid robots. However, soft robots can be difficult to
control with standard control methods. Fortunately,
evolutionary algorithms can offer an elegant solution
to this problem. Instead of creating controls to handle
the intricate dynamics of these robots, we can simply
evolve the controls using a simulation to provide an
evaluation function. In this article, we show how such
a control paradigm can be applied to an emerging field
within soft robotics: robots based on tensegrity
structures. We take the model of the Spherical
Underactuated Planetary Exploration Robot ball
(SUPERball), an icosahedron tensegrity robot under
production at NASA Ames Research Center, develop a
rolling locomotion algorithm, and study the learned
behavior using an accurate model of the SUPERball
simulated in the NASA Tensegrity Robotics Toolkit. We
first present the historical- average fitness-shaping
algorithm for coevolutionary algorithms to speed up
learning while favoring robustness over optimality.
Second, we use a distributed control approach by
coevolving open-loop control signals for each
controller. Being simple and distributed, open-loop
controllers can be readily implemented on SUPERball
hardware without the need for sensor information or
precise coordination. We analyze signals of different
complexities and frequencies. Among the learned
policies, we take one of the best and use it to analyze
different aspects of the rolling gait, such as lengths,
tensions, and energy consumption. We also discuss the
correlation between the signals controlling different
parts of the tensegrity robot.
@article{iscen-learning-tensegrity-locomotion-2015,
abstract = {Soft robots offer many advantages over traditional
rigid robots. However, soft robots can be difficult to
control with standard control methods. Fortunately,
evolutionary algorithms can offer an elegant solution
to this problem. Instead of creating controls to handle
the intricate dynamics of these robots, we can simply
evolve the controls using a simulation to provide an
evaluation function. In this article, we show how such
a control paradigm can be applied to an emerging field
within soft robotics: robots based on tensegrity
structures. We take the model of the Spherical
Underactuated Planetary Exploration Robot ball
(SUPERball), an icosahedron tensegrity robot under
production at NASA Ames Research Center, develop a
rolling locomotion algorithm, and study the learned
behavior using an accurate model of the SUPERball
simulated in the NASA Tensegrity Robotics Toolkit. We
first present the historical- average fitness-shaping
algorithm for coevolutionary algorithms to speed up
learning while favoring robustness over optimality.
Second, we use a distributed control approach by
coevolving open-loop control signals for each
controller. Being simple and distributed, open-loop
controllers can be readily implemented on SUPERball
hardware without the need for sensor information or
precise coordination. We analyze signals of different
complexities and frequencies. Among the learned
policies, we take one of the best and use it to analyze
different aspects of the rolling gait, such as lengths,
tensions, and energy consumption. We also discuss the
correlation between the signals controlling different
parts of the tensegrity robot.},
added-at = {2015-08-10T16:52:44.000+0200},
author = {Iscen, Atil and Caluwaerts, Ken and Bruce, Jonathan and Agogino, Adrian and SunSpiral, Vytas and Tumer, Kagan},
biburl = {https://www.bibsonomy.org/bibtex/247fc62d482b81ca8d36ed587ba9c3ddc/mhwombat},
doi = {10.1162/artl_a_00163},
interhash = {33a9b83f46fdd17e923d813d01ec70a6},
intrahash = {47fc62d482b81ca8d36ed587ba9c3ddc},
journal = {Artificial Life},
keywords = {alife},
month = may,
number = 2,
pages = {119--140},
publisher = {{MIT} Press - Journals},
timestamp = {2016-07-12T19:25:30.000+0200},
title = {Learning Tensegrity Locomotion Using Open-Loop Control
Signals and Coevolutionary Algorithms},
url = {http://dx.doi.org/10.1162/ARTL_a_00163},
volume = 21,
year = 2015
}