With the widespread success of deep neural networks in science and
technology, it is becoming increasingly important to quantify the uncertainty
of the predictions produced by deep learning. In this paper, we introduce a new
method that attaches an explicit uncertainty statement to the probabilities of
classification using deep neural networks. Precisely, we view that the
classification probabilities are sampled from an unknown distribution, and we
propose to learn this distribution through the Dirichlet mixture that is
flexible enough for approximating any continuous distribution on the simplex.
We then construct credible intervals from the learned distribution to assess
the uncertainty of the classification probabilities. Our approach is easy to
implement, computationally efficient, and can be coupled with any deep neural
network architecture. Our method leverages the crucial observation that, in
many classification applications such as medical diagnosis, more than one class
labels are available for each observational unit. We demonstrate the usefulness
of our approach through simulations and a real data example.
%0 Journal Article
%1 wu2019quantifying
%A Wu, Qingyang
%A Li, He
%A Su, Weijie
%A Li, Lexin
%A Yu, Zhou
%D 2019
%K robustness uncertainty
%T Quantifying Intrinsic Uncertainty in Classification via Deep Dirichlet
Mixture Networks
%U http://arxiv.org/abs/1906.04450
%X With the widespread success of deep neural networks in science and
technology, it is becoming increasingly important to quantify the uncertainty
of the predictions produced by deep learning. In this paper, we introduce a new
method that attaches an explicit uncertainty statement to the probabilities of
classification using deep neural networks. Precisely, we view that the
classification probabilities are sampled from an unknown distribution, and we
propose to learn this distribution through the Dirichlet mixture that is
flexible enough for approximating any continuous distribution on the simplex.
We then construct credible intervals from the learned distribution to assess
the uncertainty of the classification probabilities. Our approach is easy to
implement, computationally efficient, and can be coupled with any deep neural
network architecture. Our method leverages the crucial observation that, in
many classification applications such as medical diagnosis, more than one class
labels are available for each observational unit. We demonstrate the usefulness
of our approach through simulations and a real data example.
@article{wu2019quantifying,
abstract = {With the widespread success of deep neural networks in science and
technology, it is becoming increasingly important to quantify the uncertainty
of the predictions produced by deep learning. In this paper, we introduce a new
method that attaches an explicit uncertainty statement to the probabilities of
classification using deep neural networks. Precisely, we view that the
classification probabilities are sampled from an unknown distribution, and we
propose to learn this distribution through the Dirichlet mixture that is
flexible enough for approximating any continuous distribution on the simplex.
We then construct credible intervals from the learned distribution to assess
the uncertainty of the classification probabilities. Our approach is easy to
implement, computationally efficient, and can be coupled with any deep neural
network architecture. Our method leverages the crucial observation that, in
many classification applications such as medical diagnosis, more than one class
labels are available for each observational unit. We demonstrate the usefulness
of our approach through simulations and a real data example.},
added-at = {2019-06-19T15:37:35.000+0200},
author = {Wu, Qingyang and Li, He and Su, Weijie and Li, Lexin and Yu, Zhou},
biburl = {https://www.bibsonomy.org/bibtex/2b6428bfb2b55857a0b290c4685cb5934/kirk86},
interhash = {14a3a8562e3690f0ecdb62e1e1e7c551},
intrahash = {b6428bfb2b55857a0b290c4685cb5934},
keywords = {robustness uncertainty},
note = {cite arxiv:1906.04450},
timestamp = {2019-06-19T15:37:35.000+0200},
title = {Quantifying Intrinsic Uncertainty in Classification via Deep Dirichlet
Mixture Networks},
url = {http://arxiv.org/abs/1906.04450},
year = 2019
}