Likelihood-free methods perform parameter inference in stochastic simulator
models where evaluating the likelihood is intractable but sampling synthetic
data is possible. One class of methods for this likelihood-free problem uses a
classifier to distinguish between pairs of parameter-observation samples
generated using the simulator and pairs sampled from some reference
distribution, which implicitly learns a density ratio proportional to the
likelihood. Another popular class of methods fits a conditional distribution to
the parameter posterior directly, and a particular recent variant allows for
the use of flexible neural density estimators for this task. In this work, we
show that both of these approaches can be unified under a general contrastive
learning scheme, and clarify how they should be run and compared.
Description
[2002.03712] On Contrastive Learning for Likelihood-free Inference
%0 Journal Article
%1 durkan2020contrastive
%A Durkan, Conor
%A Murray, Iain
%A Papamakarios, George
%D 2020
%K approximate bayesian
%T On Contrastive Learning for Likelihood-free Inference
%U http://arxiv.org/abs/2002.03712
%X Likelihood-free methods perform parameter inference in stochastic simulator
models where evaluating the likelihood is intractable but sampling synthetic
data is possible. One class of methods for this likelihood-free problem uses a
classifier to distinguish between pairs of parameter-observation samples
generated using the simulator and pairs sampled from some reference
distribution, which implicitly learns a density ratio proportional to the
likelihood. Another popular class of methods fits a conditional distribution to
the parameter posterior directly, and a particular recent variant allows for
the use of flexible neural density estimators for this task. In this work, we
show that both of these approaches can be unified under a general contrastive
learning scheme, and clarify how they should be run and compared.
@article{durkan2020contrastive,
abstract = {Likelihood-free methods perform parameter inference in stochastic simulator
models where evaluating the likelihood is intractable but sampling synthetic
data is possible. One class of methods for this likelihood-free problem uses a
classifier to distinguish between pairs of parameter-observation samples
generated using the simulator and pairs sampled from some reference
distribution, which implicitly learns a density ratio proportional to the
likelihood. Another popular class of methods fits a conditional distribution to
the parameter posterior directly, and a particular recent variant allows for
the use of flexible neural density estimators for this task. In this work, we
show that both of these approaches can be unified under a general contrastive
learning scheme, and clarify how they should be run and compared.},
added-at = {2020-02-12T00:56:05.000+0100},
author = {Durkan, Conor and Murray, Iain and Papamakarios, George},
biburl = {https://www.bibsonomy.org/bibtex/252c1372e6eb3e5c8817dd97326e781e8/kirk86},
description = {[2002.03712] On Contrastive Learning for Likelihood-free Inference},
interhash = {2820c36a9f42195212f39017d45c3579},
intrahash = {52c1372e6eb3e5c8817dd97326e781e8},
keywords = {approximate bayesian},
note = {cite arxiv:2002.03712},
timestamp = {2020-02-12T00:56:05.000+0100},
title = {On Contrastive Learning for Likelihood-free Inference},
url = {http://arxiv.org/abs/2002.03712},
year = 2020
}