The success of machine learning algorithms generally depends on data
representation, and we hypothesize that this is because different
representations can entangle and hide more or less the different explanatory
factors of variation behind the data. Although specific domain knowledge can be
used to help design representations, learning with generic priors can also be
used, and the quest for AI is motivating the design of more powerful
representation-learning algorithms implementing such priors. This paper reviews
recent work in the area of unsupervised feature learning and joint training of
deep learning, covering advances in probabilistic models, auto-encoders,
manifold learning, and deep architectures. This motivates longer-term
unanswered questions about the appropriate objectives for learning good
representations, for computing representations (i.e., inference), and the
geometrical connections between representation learning, density estimation and
manifold learning.
Description
[1206.5538] Representation Learning: A Review and New Perspectives
%0 Generic
%1 bengio2012representation
%A Bengio, Yoshua
%A Courville, Aaron
%A Vincent, Pascal
%D 2012
%K deep-learning feature-learning machine-learning
%T Representation Learning: A Review and New Perspectives
%U http://arxiv.org/abs/1206.5538
%X The success of machine learning algorithms generally depends on data
representation, and we hypothesize that this is because different
representations can entangle and hide more or less the different explanatory
factors of variation behind the data. Although specific domain knowledge can be
used to help design representations, learning with generic priors can also be
used, and the quest for AI is motivating the design of more powerful
representation-learning algorithms implementing such priors. This paper reviews
recent work in the area of unsupervised feature learning and joint training of
deep learning, covering advances in probabilistic models, auto-encoders,
manifold learning, and deep architectures. This motivates longer-term
unanswered questions about the appropriate objectives for learning good
representations, for computing representations (i.e., inference), and the
geometrical connections between representation learning, density estimation and
manifold learning.
@misc{bengio2012representation,
abstract = {The success of machine learning algorithms generally depends on data
representation, and we hypothesize that this is because different
representations can entangle and hide more or less the different explanatory
factors of variation behind the data. Although specific domain knowledge can be
used to help design representations, learning with generic priors can also be
used, and the quest for AI is motivating the design of more powerful
representation-learning algorithms implementing such priors. This paper reviews
recent work in the area of unsupervised feature learning and joint training of
deep learning, covering advances in probabilistic models, auto-encoders,
manifold learning, and deep architectures. This motivates longer-term
unanswered questions about the appropriate objectives for learning good
representations, for computing representations (i.e., inference), and the
geometrical connections between representation learning, density estimation and
manifold learning.},
added-at = {2013-01-08T16:16:58.000+0100},
author = {Bengio, Yoshua and Courville, Aaron and Vincent, Pascal},
biburl = {https://www.bibsonomy.org/bibtex/23456f601493e2c8b8ffb06dfa7ecc4fd/gromgull},
description = {[1206.5538] Representation Learning: A Review and New Perspectives},
interhash = {e6438f463ed34f038b5daf4dc22c87f5},
intrahash = {3456f601493e2c8b8ffb06dfa7ecc4fd},
keywords = {deep-learning feature-learning machine-learning},
note = {cite arxiv:1206.5538},
timestamp = {2013-01-08T16:16:58.000+0100},
title = {Representation Learning: A Review and New Perspectives},
url = {http://arxiv.org/abs/1206.5538},
year = 2012
}