Auto-encoding generative adversarial networks (GANs) combine the standard GAN
algorithm, which discriminates between real and model-generated data, with a
reconstruction loss given by an auto-encoder. Such models aim to prevent mode
collapse in the learned generative model by ensuring that it is grounded in all
the available training data. In this paper, we develop a principle upon which
auto-encoders can be combined with generative adversarial networks by
exploiting the hierarchical structure of the generative model. The underlying
principle shows that variational inference can be used a basic tool for
learning, but with the in- tractable likelihood replaced by a synthetic
likelihood, and the unknown posterior distribution replaced by an implicit
distribution; both synthetic likelihoods and implicit posterior distributions
can be learned using discriminators. This allows us to develop a natural fusion
of variational auto-encoders and generative adversarial networks, combining the
best of both these methods. We describe a unified objective for optimization,
discuss the constraints needed to guide learning, connect to the wide range of
existing work, and use a battery of tests to systematically and quantitatively
assess the performance of our method.
Description
[1706.04987] Variational Approaches for Auto-Encoding Generative Adversarial Networks
%0 Journal Article
%1 rosca2017variational
%A Rosca, Mihaela
%A Lakshminarayanan, Balaji
%A Warde-Farley, David
%A Mohamed, Shakir
%D 2017
%K ali gans machine-learning vae
%T Variational Approaches for Auto-Encoding Generative Adversarial Networks
%U http://arxiv.org/abs/1706.04987
%X Auto-encoding generative adversarial networks (GANs) combine the standard GAN
algorithm, which discriminates between real and model-generated data, with a
reconstruction loss given by an auto-encoder. Such models aim to prevent mode
collapse in the learned generative model by ensuring that it is grounded in all
the available training data. In this paper, we develop a principle upon which
auto-encoders can be combined with generative adversarial networks by
exploiting the hierarchical structure of the generative model. The underlying
principle shows that variational inference can be used a basic tool for
learning, but with the in- tractable likelihood replaced by a synthetic
likelihood, and the unknown posterior distribution replaced by an implicit
distribution; both synthetic likelihoods and implicit posterior distributions
can be learned using discriminators. This allows us to develop a natural fusion
of variational auto-encoders and generative adversarial networks, combining the
best of both these methods. We describe a unified objective for optimization,
discuss the constraints needed to guide learning, connect to the wide range of
existing work, and use a battery of tests to systematically and quantitatively
assess the performance of our method.
@article{rosca2017variational,
abstract = {Auto-encoding generative adversarial networks (GANs) combine the standard GAN
algorithm, which discriminates between real and model-generated data, with a
reconstruction loss given by an auto-encoder. Such models aim to prevent mode
collapse in the learned generative model by ensuring that it is grounded in all
the available training data. In this paper, we develop a principle upon which
auto-encoders can be combined with generative adversarial networks by
exploiting the hierarchical structure of the generative model. The underlying
principle shows that variational inference can be used a basic tool for
learning, but with the in- tractable likelihood replaced by a synthetic
likelihood, and the unknown posterior distribution replaced by an implicit
distribution; both synthetic likelihoods and implicit posterior distributions
can be learned using discriminators. This allows us to develop a natural fusion
of variational auto-encoders and generative adversarial networks, combining the
best of both these methods. We describe a unified objective for optimization,
discuss the constraints needed to guide learning, connect to the wide range of
existing work, and use a battery of tests to systematically and quantitatively
assess the performance of our method.},
added-at = {2017-06-30T21:36:01.000+0200},
author = {Rosca, Mihaela and Lakshminarayanan, Balaji and Warde-Farley, David and Mohamed, Shakir},
biburl = {https://www.bibsonomy.org/bibtex/233a4507940345276cfcf9350bc01c4d1/hprop},
description = {[1706.04987] Variational Approaches for Auto-Encoding Generative Adversarial Networks},
interhash = {1d2ebde962b88491d5ef028d5e7aa011},
intrahash = {33a4507940345276cfcf9350bc01c4d1},
keywords = {ali gans machine-learning vae},
note = {cite arxiv:1706.04987},
timestamp = {2017-06-30T21:36:01.000+0200},
title = {Variational Approaches for Auto-Encoding Generative Adversarial Networks},
url = {http://arxiv.org/abs/1706.04987},
year = 2017
}