We introduce a new, efficient, principled and backpropagation-compatible
algorithm for learning a probability distribution on the weights of a neural
network, called Bayes by Backprop. It regularises the weights by minimising a
compression cost, known as the variational free energy or the expected lower
bound on the marginal likelihood. We show that this principled kind of
regularisation yields comparable performance to dropout on MNIST
classification. We then demonstrate how the learnt uncertainty in the weights
can be used to improve generalisation in non-linear regression problems, and
how this weight uncertainty can be used to drive the exploration-exploitation
trade-off in reinforcement learning.
Description
[1505.05424] Weight Uncertainty in Neural Networks
%0 Journal Article
%1 blundell2015weight
%A Blundell, Charles
%A Cornebise, Julien
%A Kavukcuoglu, Koray
%A Wierstra, Daan
%D 2015
%K uncertainty
%T Weight Uncertainty in Neural Networks
%U http://arxiv.org/abs/1505.05424
%X We introduce a new, efficient, principled and backpropagation-compatible
algorithm for learning a probability distribution on the weights of a neural
network, called Bayes by Backprop. It regularises the weights by minimising a
compression cost, known as the variational free energy or the expected lower
bound on the marginal likelihood. We show that this principled kind of
regularisation yields comparable performance to dropout on MNIST
classification. We then demonstrate how the learnt uncertainty in the weights
can be used to improve generalisation in non-linear regression problems, and
how this weight uncertainty can be used to drive the exploration-exploitation
trade-off in reinforcement learning.
@article{blundell2015weight,
abstract = {We introduce a new, efficient, principled and backpropagation-compatible
algorithm for learning a probability distribution on the weights of a neural
network, called Bayes by Backprop. It regularises the weights by minimising a
compression cost, known as the variational free energy or the expected lower
bound on the marginal likelihood. We show that this principled kind of
regularisation yields comparable performance to dropout on MNIST
classification. We then demonstrate how the learnt uncertainty in the weights
can be used to improve generalisation in non-linear regression problems, and
how this weight uncertainty can be used to drive the exploration-exploitation
trade-off in reinforcement learning.},
added-at = {2019-08-05T17:10:43.000+0200},
author = {Blundell, Charles and Cornebise, Julien and Kavukcuoglu, Koray and Wierstra, Daan},
biburl = {https://www.bibsonomy.org/bibtex/2aea5857fcf936abb5b80af67750d3e5b/kirk86},
description = {[1505.05424] Weight Uncertainty in Neural Networks},
interhash = {4aef7d870702dabf572366c6f33c5a58},
intrahash = {aea5857fcf936abb5b80af67750d3e5b},
keywords = {uncertainty},
note = {cite arxiv:1505.05424Comment: In Proceedings of the 32nd International Conference on Machine Learning (ICML 2015)},
timestamp = {2019-08-05T17:10:43.000+0200},
title = {Weight Uncertainty in Neural Networks},
url = {http://arxiv.org/abs/1505.05424},
year = 2015
}