In this work we explore a straightforward variational Bayes scheme for
Recurrent Neural Networks. Firstly, we show that a simple adaptation of
truncated backpropagation through time can yield good quality uncertainty
estimates and superior regularisation at only a small extra computational cost
during training, also reducing the amount of parameters by 80\%. Secondly, we
demonstrate how a novel kind of posterior approximation yields further
improvements to the performance of Bayesian RNNs. We incorporate local gradient
information into the approximate posterior to sharpen it around the current
batch statistics. We show how this technique is not exclusive to recurrent
neural networks and can be applied more widely to train Bayesian neural
networks. We also empirically demonstrate how Bayesian RNNs are superior to
traditional RNNs on a language modelling benchmark and an image captioning
task, as well as showing how each of these methods improve our model over a
variety of other schemes for training them. We also introduce a new benchmark
for studying uncertainty for language models so future methods can be easily
compared.
%0 Generic
%1 fortunato2017bayesian
%A Fortunato, Meire
%A Blundell, Charles
%A Vinyals, Oriol
%D 2017
%K BNN to_read
%T Bayesian Recurrent Neural Networks
%U http://arxiv.org/abs/1704.02798
%X In this work we explore a straightforward variational Bayes scheme for
Recurrent Neural Networks. Firstly, we show that a simple adaptation of
truncated backpropagation through time can yield good quality uncertainty
estimates and superior regularisation at only a small extra computational cost
during training, also reducing the amount of parameters by 80\%. Secondly, we
demonstrate how a novel kind of posterior approximation yields further
improvements to the performance of Bayesian RNNs. We incorporate local gradient
information into the approximate posterior to sharpen it around the current
batch statistics. We show how this technique is not exclusive to recurrent
neural networks and can be applied more widely to train Bayesian neural
networks. We also empirically demonstrate how Bayesian RNNs are superior to
traditional RNNs on a language modelling benchmark and an image captioning
task, as well as showing how each of these methods improve our model over a
variety of other schemes for training them. We also introduce a new benchmark
for studying uncertainty for language models so future methods can be easily
compared.
@misc{fortunato2017bayesian,
abstract = {In this work we explore a straightforward variational Bayes scheme for
Recurrent Neural Networks. Firstly, we show that a simple adaptation of
truncated backpropagation through time can yield good quality uncertainty
estimates and superior regularisation at only a small extra computational cost
during training, also reducing the amount of parameters by 80\%. Secondly, we
demonstrate how a novel kind of posterior approximation yields further
improvements to the performance of Bayesian RNNs. We incorporate local gradient
information into the approximate posterior to sharpen it around the current
batch statistics. We show how this technique is not exclusive to recurrent
neural networks and can be applied more widely to train Bayesian neural
networks. We also empirically demonstrate how Bayesian RNNs are superior to
traditional RNNs on a language modelling benchmark and an image captioning
task, as well as showing how each of these methods improve our model over a
variety of other schemes for training them. We also introduce a new benchmark
for studying uncertainty for language models so future methods can be easily
compared.},
added-at = {2018-03-22T08:35:21.000+0100},
author = {Fortunato, Meire and Blundell, Charles and Vinyals, Oriol},
biburl = {https://www.bibsonomy.org/bibtex/27ce65e3c8ee5a228ea80a4316c95e4fd/jk_itwm},
description = {Bayesian Recurrent Neural Networks},
interhash = {4de5d5178584be4d698f8ecf207c43ea},
intrahash = {7ce65e3c8ee5a228ea80a4316c95e4fd},
keywords = {BNN to_read},
note = {cite arxiv:1704.02798},
timestamp = {2018-03-22T08:35:21.000+0100},
title = {Bayesian Recurrent Neural Networks},
url = {http://arxiv.org/abs/1704.02798},
year = 2017
}