The Gumbel-Max trick is the basis of many relaxed gradient estimators. These
estimators are easy to implement and low variance, but the goal of scaling them
comprehensively to large combinatorial distributions is still outstanding.
Working within the perturbation model framework, we introduce stochastic
softmax tricks, which generalize the Gumbel-Softmax trick to combinatorial
spaces. Our framework is a unified perspective on existing relaxed estimators
for perturbation models, and it contains many novel relaxations. We design
structured relaxations for subset selection, spanning trees, arborescences, and
others. When compared to less structured baselines, we find that stochastic
softmax tricks can be used to train latent variable models that perform better
and discover more latent structure.
Description
[2006.08063] Gradient Estimation with Stochastic Softmax Tricks
%0 Journal Article
%1 paulus2020gradient
%A Paulus, Max B.
%A Choi, Dami
%A Tarlow, Daniel
%A Krause, Andreas
%A Maddison, Chris J.
%D 2020
%K combinatorics gradients optimization readings relaxation stochastic
%T Gradient Estimation with Stochastic Softmax Tricks
%U http://arxiv.org/abs/2006.08063
%X The Gumbel-Max trick is the basis of many relaxed gradient estimators. These
estimators are easy to implement and low variance, but the goal of scaling them
comprehensively to large combinatorial distributions is still outstanding.
Working within the perturbation model framework, we introduce stochastic
softmax tricks, which generalize the Gumbel-Softmax trick to combinatorial
spaces. Our framework is a unified perspective on existing relaxed estimators
for perturbation models, and it contains many novel relaxations. We design
structured relaxations for subset selection, spanning trees, arborescences, and
others. When compared to less structured baselines, we find that stochastic
softmax tricks can be used to train latent variable models that perform better
and discover more latent structure.
@article{paulus2020gradient,
abstract = {The Gumbel-Max trick is the basis of many relaxed gradient estimators. These
estimators are easy to implement and low variance, but the goal of scaling them
comprehensively to large combinatorial distributions is still outstanding.
Working within the perturbation model framework, we introduce stochastic
softmax tricks, which generalize the Gumbel-Softmax trick to combinatorial
spaces. Our framework is a unified perspective on existing relaxed estimators
for perturbation models, and it contains many novel relaxations. We design
structured relaxations for subset selection, spanning trees, arborescences, and
others. When compared to less structured baselines, we find that stochastic
softmax tricks can be used to train latent variable models that perform better
and discover more latent structure.},
added-at = {2020-07-16T13:14:57.000+0200},
author = {Paulus, Max B. and Choi, Dami and Tarlow, Daniel and Krause, Andreas and Maddison, Chris J.},
biburl = {https://www.bibsonomy.org/bibtex/2739b9b50421d8ec346391b986dfaac12/kirk86},
description = {[2006.08063] Gradient Estimation with Stochastic Softmax Tricks},
interhash = {21cc6202c8eba67208c40103f8240759},
intrahash = {739b9b50421d8ec346391b986dfaac12},
keywords = {combinatorics gradients optimization readings relaxation stochastic},
note = {cite arxiv:2006.08063},
timestamp = {2020-07-16T13:14:57.000+0200},
title = {Gradient Estimation with Stochastic Softmax Tricks},
url = {http://arxiv.org/abs/2006.08063},
year = 2020
}