Neural Processes (NPs) (Garnelo et al 2018a;b) approach regression by
learning to map a context set of observed input-output pairs to a distribution
over regression functions. Each function models the distribution of the output
given an input, conditioned on the context. NPs have the benefit of fitting
observed data efficiently with linear complexity in the number of context
input-output pairs, and can learn a wide family of conditional distributions;
they learn predictive distributions conditioned on context sets of arbitrary
size. Nonetheless, we show that NPs suffer a fundamental drawback of
underfitting, giving inaccurate predictions at the inputs of the observed data
they condition on. We address this issue by incorporating attention into NPs,
allowing each input location to attend to the relevant context points for the
prediction. We show that this greatly improves the accuracy of predictions,
results in noticeably faster training, and expands the range of functions that
can be modelled.
%0 Journal Article
%1 kim2019attentive
%A Kim, Hyunjik
%A Mnih, Andriy
%A Schwarz, Jonathan
%A Garnelo, Marta
%A Eslami, Ali
%A Rosenbaum, Dan
%A Vinyals, Oriol
%A Teh, Yee Whye
%D 2019
%K bayesian deep-learning memory stochastic
%T Attentive Neural Processes
%U http://arxiv.org/abs/1901.05761
%X Neural Processes (NPs) (Garnelo et al 2018a;b) approach regression by
learning to map a context set of observed input-output pairs to a distribution
over regression functions. Each function models the distribution of the output
given an input, conditioned on the context. NPs have the benefit of fitting
observed data efficiently with linear complexity in the number of context
input-output pairs, and can learn a wide family of conditional distributions;
they learn predictive distributions conditioned on context sets of arbitrary
size. Nonetheless, we show that NPs suffer a fundamental drawback of
underfitting, giving inaccurate predictions at the inputs of the observed data
they condition on. We address this issue by incorporating attention into NPs,
allowing each input location to attend to the relevant context points for the
prediction. We show that this greatly improves the accuracy of predictions,
results in noticeably faster training, and expands the range of functions that
can be modelled.
@article{kim2019attentive,
abstract = {Neural Processes (NPs) (Garnelo et al 2018a;b) approach regression by
learning to map a context set of observed input-output pairs to a distribution
over regression functions. Each function models the distribution of the output
given an input, conditioned on the context. NPs have the benefit of fitting
observed data efficiently with linear complexity in the number of context
input-output pairs, and can learn a wide family of conditional distributions;
they learn predictive distributions conditioned on context sets of arbitrary
size. Nonetheless, we show that NPs suffer a fundamental drawback of
underfitting, giving inaccurate predictions at the inputs of the observed data
they condition on. We address this issue by incorporating attention into NPs,
allowing each input location to attend to the relevant context points for the
prediction. We show that this greatly improves the accuracy of predictions,
results in noticeably faster training, and expands the range of functions that
can be modelled.},
added-at = {2019-07-15T04:33:05.000+0200},
author = {Kim, Hyunjik and Mnih, Andriy and Schwarz, Jonathan and Garnelo, Marta and Eslami, Ali and Rosenbaum, Dan and Vinyals, Oriol and Teh, Yee Whye},
biburl = {https://www.bibsonomy.org/bibtex/22fe7673088214b9f87c86d1c0e205a3d/kirk86},
description = {[1901.05761] Attentive Neural Processes},
interhash = {8f9e46672d9fc1e64f27ab14058569c1},
intrahash = {2fe7673088214b9f87c86d1c0e205a3d},
keywords = {bayesian deep-learning memory stochastic},
note = {cite arxiv:1901.05761},
timestamp = {2019-07-15T04:33:05.000+0200},
title = {Attentive Neural Processes},
url = {http://arxiv.org/abs/1901.05761},
year = 2019
}