Advances in unsupervised learning enable reconstruction and generation of
samples from complex distributions, but this success is marred by the
inscrutability of the representations learned. We propose an
information-theoretic approach to characterizing disentanglement and dependence
in representation learning using multivariate mutual information, also called
total correlation. The principle of total Cor-relation Ex-planation (CorEx) has
motivated successful unsupervised learning applications across a variety of
domains, but under some restrictive assumptions. Here we relax those
restrictions by introducing a flexible variational lower bound to CorEx.
Surprisingly, we find that this lower bound is equivalent to the one in
variational autoencoders (VAE) under certain conditions. This
information-theoretic view of VAE deepens our understanding of hierarchical VAE
and motivates a new algorithm, AnchorVAE, that makes latent codes more
interpretable through information maximization and enables generation of richer
and more realistic samples.
Description
[1802.05822] Auto-Encoding Total Correlation Explanation
%0 Generic
%1 gao2018autoencoding
%A Gao, Shuyang
%A Brekelmans, Rob
%A Steeg, Greg Ver
%A Galstyan, Aram
%D 2018
%K corex fulltext machine-learning shouldread statistics
%T Auto-Encoding Total Correlation Explanation
%U http://arxiv.org/abs/1802.05822
%X Advances in unsupervised learning enable reconstruction and generation of
samples from complex distributions, but this success is marred by the
inscrutability of the representations learned. We propose an
information-theoretic approach to characterizing disentanglement and dependence
in representation learning using multivariate mutual information, also called
total correlation. The principle of total Cor-relation Ex-planation (CorEx) has
motivated successful unsupervised learning applications across a variety of
domains, but under some restrictive assumptions. Here we relax those
restrictions by introducing a flexible variational lower bound to CorEx.
Surprisingly, we find that this lower bound is equivalent to the one in
variational autoencoders (VAE) under certain conditions. This
information-theoretic view of VAE deepens our understanding of hierarchical VAE
and motivates a new algorithm, AnchorVAE, that makes latent codes more
interpretable through information maximization and enables generation of richer
and more realistic samples.
@misc{gao2018autoencoding,
abstract = {Advances in unsupervised learning enable reconstruction and generation of
samples from complex distributions, but this success is marred by the
inscrutability of the representations learned. We propose an
information-theoretic approach to characterizing disentanglement and dependence
in representation learning using multivariate mutual information, also called
total correlation. The principle of total Cor-relation Ex-planation (CorEx) has
motivated successful unsupervised learning applications across a variety of
domains, but under some restrictive assumptions. Here we relax those
restrictions by introducing a flexible variational lower bound to CorEx.
Surprisingly, we find that this lower bound is equivalent to the one in
variational autoencoders (VAE) under certain conditions. This
information-theoretic view of VAE deepens our understanding of hierarchical VAE
and motivates a new algorithm, AnchorVAE, that makes latent codes more
interpretable through information maximization and enables generation of richer
and more realistic samples.},
added-at = {2018-10-04T13:52:14.000+0200},
author = {Gao, Shuyang and Brekelmans, Rob and Steeg, Greg Ver and Galstyan, Aram},
biburl = {https://www.bibsonomy.org/bibtex/22c5b4918fbdeb9866e27d6961612c4b7/marcsaric},
description = {[1802.05822] Auto-Encoding Total Correlation Explanation},
interhash = {1b82326b8ce1b78d793d2ddc10f8eea9},
intrahash = {2c5b4918fbdeb9866e27d6961612c4b7},
keywords = {corex fulltext machine-learning shouldread statistics},
note = {cite arxiv:1802.05822},
timestamp = {2018-10-04T13:52:14.000+0200},
title = {Auto-Encoding Total Correlation Explanation},
url = {http://arxiv.org/abs/1802.05822},
year = 2018
}