Amortized inference allows latent-variable models trained via variational
learning to scale to large datasets. The quality of approximate inference is
determined by two factors: a) the capacity of the variational distribution to
match the true posterior and b) the ability of the recognition network to
produce good variational parameters for each datapoint. We examine approximate
inference in variational autoencoders in terms of these factors. We find that
divergence from the true posterior is often due to imperfect recognition
networks, rather than the limited complexity of the approximating distribution.
We show that this is due partly to the generator learning to accommodate the
choice of approximation. Furthermore, we show that the parameters used to
increase the expressiveness of the approximation play a role in generalizing
inference rather than simply improving the complexity of the approximation.
Description
[1801.03558] Inference Suboptimality in Variational Autoencoders
%0 Journal Article
%1 cremer2018inference
%A Cremer, Chris
%A Li, Xuechen
%A Duvenaud, David
%D 2018
%K amortised optimization readings stochastic variational
%T Inference Suboptimality in Variational Autoencoders
%U http://arxiv.org/abs/1801.03558
%X Amortized inference allows latent-variable models trained via variational
learning to scale to large datasets. The quality of approximate inference is
determined by two factors: a) the capacity of the variational distribution to
match the true posterior and b) the ability of the recognition network to
produce good variational parameters for each datapoint. We examine approximate
inference in variational autoencoders in terms of these factors. We find that
divergence from the true posterior is often due to imperfect recognition
networks, rather than the limited complexity of the approximating distribution.
We show that this is due partly to the generator learning to accommodate the
choice of approximation. Furthermore, we show that the parameters used to
increase the expressiveness of the approximation play a role in generalizing
inference rather than simply improving the complexity of the approximation.
@article{cremer2018inference,
abstract = {Amortized inference allows latent-variable models trained via variational
learning to scale to large datasets. The quality of approximate inference is
determined by two factors: a) the capacity of the variational distribution to
match the true posterior and b) the ability of the recognition network to
produce good variational parameters for each datapoint. We examine approximate
inference in variational autoencoders in terms of these factors. We find that
divergence from the true posterior is often due to imperfect recognition
networks, rather than the limited complexity of the approximating distribution.
We show that this is due partly to the generator learning to accommodate the
choice of approximation. Furthermore, we show that the parameters used to
increase the expressiveness of the approximation play a role in generalizing
inference rather than simply improving the complexity of the approximation.},
added-at = {2020-01-08T15:29:14.000+0100},
author = {Cremer, Chris and Li, Xuechen and Duvenaud, David},
biburl = {https://www.bibsonomy.org/bibtex/2ae2491dded224c25dc3f9e701a517ce4/kirk86},
description = {[1801.03558] Inference Suboptimality in Variational Autoencoders},
interhash = {04dd11aea6f37dc53eb98a4756148e95},
intrahash = {ae2491dded224c25dc3f9e701a517ce4},
keywords = {amortised optimization readings stochastic variational},
note = {cite arxiv:1801.03558Comment: ICML},
timestamp = {2020-01-08T15:29:14.000+0100},
title = {Inference Suboptimality in Variational Autoencoders},
url = {http://arxiv.org/abs/1801.03558},
year = 2018
}