Nonlinear independent component analysis (ICA) provides an appealing
framework for unsupervised feature learning, but the models proposed so far are
not identifiable. Here, we first propose a new intuitive principle of
unsupervised deep learning from time series which uses the nonstationary
structure of the data. Our learning principle, time-contrastive learning (TCL),
finds a representation which allows optimal discrimination of time segments
(windows). Surprisingly, we show how TCL can be related to a nonlinear ICA
model, when ICA is redefined to include temporal nonstationarities. In
particular, we show that TCL combined with linear ICA estimates the nonlinear
ICA model up to point-wise transformations of the sources, and this solution is
unique --- thus providing the first identifiability result for nonlinear ICA
which is rigorous, constructive, as well as very general.
Description
[1605.06336] Unsupervised Feature Extraction by Time-Contrastive Learning and Nonlinear ICA
%0 Journal Article
%1 hyvarinen2016unsupervised
%A Hyvarinen, Aapo
%A Morioka, Hiroshi
%D 2016
%K causal-analysis invariance
%T Unsupervised Feature Extraction by Time-Contrastive Learning and
Nonlinear ICA
%U http://arxiv.org/abs/1605.06336
%X Nonlinear independent component analysis (ICA) provides an appealing
framework for unsupervised feature learning, but the models proposed so far are
not identifiable. Here, we first propose a new intuitive principle of
unsupervised deep learning from time series which uses the nonstationary
structure of the data. Our learning principle, time-contrastive learning (TCL),
finds a representation which allows optimal discrimination of time segments
(windows). Surprisingly, we show how TCL can be related to a nonlinear ICA
model, when ICA is redefined to include temporal nonstationarities. In
particular, we show that TCL combined with linear ICA estimates the nonlinear
ICA model up to point-wise transformations of the sources, and this solution is
unique --- thus providing the first identifiability result for nonlinear ICA
which is rigorous, constructive, as well as very general.
@article{hyvarinen2016unsupervised,
abstract = {Nonlinear independent component analysis (ICA) provides an appealing
framework for unsupervised feature learning, but the models proposed so far are
not identifiable. Here, we first propose a new intuitive principle of
unsupervised deep learning from time series which uses the nonstationary
structure of the data. Our learning principle, time-contrastive learning (TCL),
finds a representation which allows optimal discrimination of time segments
(windows). Surprisingly, we show how TCL can be related to a nonlinear ICA
model, when ICA is redefined to include temporal nonstationarities. In
particular, we show that TCL combined with linear ICA estimates the nonlinear
ICA model up to point-wise transformations of the sources, and this solution is
unique --- thus providing the first identifiability result for nonlinear ICA
which is rigorous, constructive, as well as very general.},
added-at = {2019-08-06T11:40:35.000+0200},
author = {Hyvarinen, Aapo and Morioka, Hiroshi},
biburl = {https://www.bibsonomy.org/bibtex/2d1e2f90b0b6db2d5ec824c0ceee4dd5d/kirk86},
description = {[1605.06336] Unsupervised Feature Extraction by Time-Contrastive Learning and Nonlinear ICA},
interhash = {0bea30a81b3f877627b070802cec4afa},
intrahash = {d1e2f90b0b6db2d5ec824c0ceee4dd5d},
keywords = {causal-analysis invariance},
note = {cite arxiv:1605.06336},
timestamp = {2019-08-06T11:40:35.000+0200},
title = {Unsupervised Feature Extraction by Time-Contrastive Learning and
Nonlinear ICA},
url = {http://arxiv.org/abs/1605.06336},
year = 2016
}