C. Blundell, J. Cornebise, K. Kavukcuoglu, and D. Wierstra. Proceedings of the 32nd International Conference on Machine Learning, volume 37 of Proceedings of Machine Learning Research, page 1613--1622. Lille, France, PMLR, (07--09 Jul 2015)
Abstract
We introduce a new, efficient, principled and backpropagation-compatible algorithm for learning a probability distribution on the weights of a neural network, called Bayes by Backprop. It regularises the weights by minimising a compression cost, known as the variational free energy or the expected lower bound on the marginal likelihood. We show that this principled kind of regularisation yields comparable performance to dropout on MNIST classification. We then demonstrate how the learnt uncertainty in the weights can be used to improve generalisation in non-linear regression problems, and how this weight uncertainty can be used to drive the exploration-exploitation trade-off in reinforcement learning.
%0 Conference Paper
%1 pmlr-v37-blundell15
%A Blundell, Charles
%A Cornebise, Julien
%A Kavukcuoglu, Koray
%A Wierstra, Daan
%B Proceedings of the 32nd International Conference on Machine Learning
%C Lille, France
%D 2015
%E Bach, Francis
%E Blei, David
%I PMLR
%K bayesian deep-learning uncertainty
%P 1613--1622
%T Weight Uncertainty in Neural Network
%U http://proceedings.mlr.press/v37/blundell15.html
%V 37
%X We introduce a new, efficient, principled and backpropagation-compatible algorithm for learning a probability distribution on the weights of a neural network, called Bayes by Backprop. It regularises the weights by minimising a compression cost, known as the variational free energy or the expected lower bound on the marginal likelihood. We show that this principled kind of regularisation yields comparable performance to dropout on MNIST classification. We then demonstrate how the learnt uncertainty in the weights can be used to improve generalisation in non-linear regression problems, and how this weight uncertainty can be used to drive the exploration-exploitation trade-off in reinforcement learning.
@inproceedings{pmlr-v37-blundell15,
abstract = {We introduce a new, efficient, principled and backpropagation-compatible algorithm for learning a probability distribution on the weights of a neural network, called Bayes by Backprop. It regularises the weights by minimising a compression cost, known as the variational free energy or the expected lower bound on the marginal likelihood. We show that this principled kind of regularisation yields comparable performance to dropout on MNIST classification. We then demonstrate how the learnt uncertainty in the weights can be used to improve generalisation in non-linear regression problems, and how this weight uncertainty can be used to drive the exploration-exploitation trade-off in reinforcement learning.},
added-at = {2019-12-03T01:38:00.000+0100},
address = {Lille, France},
author = {Blundell, Charles and Cornebise, Julien and Kavukcuoglu, Koray and Wierstra, Daan},
biburl = {https://www.bibsonomy.org/bibtex/2c1d15ed614e3a68ed2e7de98783ae43a/kirk86},
booktitle = {Proceedings of the 32nd International Conference on Machine Learning},
description = {Weight Uncertainty in Neural Network},
editor = {Bach, Francis and Blei, David},
interhash = {97992ed4107d2398e559f6ed9ca33eff},
intrahash = {c1d15ed614e3a68ed2e7de98783ae43a},
keywords = {bayesian deep-learning uncertainty},
month = {07--09 Jul},
pages = {1613--1622},
pdf = {http://proceedings.mlr.press/v37/blundell15.pdf},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
timestamp = {2019-12-03T01:38:00.000+0100},
title = {Weight Uncertainty in Neural Network},
url = {http://proceedings.mlr.press/v37/blundell15.html},
volume = 37,
year = 2015
}