This paper presents a set of methods by which a learning agent can
learn a sequence of increasingly abstract and powerful interfaces
to control a robot whose sensorimotor apparatus and environment are
initially unknown. The result of the learning is a rich hierarchical
model of the robot's world (its sensorimotor apparatus and environment).
The learning methods rely on generic properties of the robot's world
such as almost-everywhere smooth effects of motor control signals
on sensory features. At the lowest level of the hierarchy, the learning
agent analyzes the effects of its motor control signals in order
to define a new set of control signals, one for each of the robot's
degrees of freedom. It uses a generate-and-test approach to define
sensory features that capture important aspects of the environment.
It uses linear regression to learn models that characterize context-dependent
effects of the control signals on the learned features. It uses these
models to define high-level control laws for finding and following
paths defined using constraints on the learned features. The agent
abstracts these control laws, which interact with the continuous
environment, to a finite set of actions that implement discrete state
transitions. At this point, the agent has abstracted the robot's
continuous world to a finite-state world and can use existing methods
to learn its structure. The learning agent's methods are evaluated
on several simulated robots with different sensorimotor systems and
environments.
%0 Journal Article
%1 Pierce:1997
%A Pierce, David
%A Kuipers, Benjamin J.
%D 1997
%J Artificial Intelligence
%K Abstract Action Changes Cognitive Feature Map Spatial hierarchy; interfaces; learning; maps; models; of representation semantic
%P 169-227
%T Map learning with uninterpreted sensors and effectors
%V 92
%X This paper presents a set of methods by which a learning agent can
learn a sequence of increasingly abstract and powerful interfaces
to control a robot whose sensorimotor apparatus and environment are
initially unknown. The result of the learning is a rich hierarchical
model of the robot's world (its sensorimotor apparatus and environment).
The learning methods rely on generic properties of the robot's world
such as almost-everywhere smooth effects of motor control signals
on sensory features. At the lowest level of the hierarchy, the learning
agent analyzes the effects of its motor control signals in order
to define a new set of control signals, one for each of the robot's
degrees of freedom. It uses a generate-and-test approach to define
sensory features that capture important aspects of the environment.
It uses linear regression to learn models that characterize context-dependent
effects of the control signals on the learned features. It uses these
models to define high-level control laws for finding and following
paths defined using constraints on the learned features. The agent
abstracts these control laws, which interact with the continuous
environment, to a finite set of actions that implement discrete state
transitions. At this point, the agent has abstracted the robot's
continuous world to a finite-state world and can use existing methods
to learn its structure. The learning agent's methods are evaluated
on several simulated robots with different sensorimotor systems and
environments.
@article{Pierce:1997,
abstract = {This paper presents a set of methods by which a learning agent can
learn a sequence of increasingly abstract and powerful interfaces
to control a robot whose sensorimotor apparatus and environment are
initially unknown. The result of the learning is a rich hierarchical
model of the robot's world (its sensorimotor apparatus and environment).
The learning methods rely on generic properties of the robot's world
such as almost-everywhere smooth effects of motor control signals
on sensory features. At the lowest level of the hierarchy, the learning
agent analyzes the effects of its motor control signals in order
to define a new set of control signals, one for each of the robot's
degrees of freedom. It uses a generate-and-test approach to define
sensory features that capture important aspects of the environment.
It uses linear regression to learn models that characterize context-dependent
effects of the control signals on the learned features. It uses these
models to define high-level control laws for finding and following
paths defined using constraints on the learned features. The agent
abstracts these control laws, which interact with the continuous
environment, to a finite set of actions that implement discrete state
transitions. At this point, the agent has abstracted the robot's
continuous world to a finite-state world and can use existing methods
to learn its structure. The learning agent's methods are evaluated
on several simulated robots with different sensorimotor systems and
environments.},
added-at = {2009-06-26T15:25:19.000+0200},
author = {Pierce, David and Kuipers, Benjamin J.},
biburl = {https://www.bibsonomy.org/bibtex/27711a52ca843136e6d79691fc1927705/butz},
description = {diverse cognitive systems bib},
interhash = {9ae4fc3075f6b78e4e2c47f1918f1995},
intrahash = {7711a52ca843136e6d79691fc1927705},
journal = {Artificial Intelligence},
keywords = {Abstract Action Changes Cognitive Feature Map Spatial hierarchy; interfaces; learning; maps; models; of representation semantic},
owner = {butz},
pages = {169-227},
timestamp = {2009-06-26T15:25:50.000+0200},
title = {Map learning with uninterpreted sensors and effectors},
volume = 92,
year = 1997
}