R. Martin. Proceedings of the Eleventh International Symposium on Imprecise Probabilities: Theories and Applications, volume 103 of Proceedings of Machine Learning Research, page 295--303. Thagaste, Ghent, Belgium, PMLR, (03--06 Jul 2019)
Abstract
Inference on parameters within a given model is familiar, as is ranking different models for the purpose of selection. Less familiar, however, is the quantification of uncertainty about the models themselves. A Bayesian approach provides a posterior distribution for the model but it comes with no validity guarantees, and, therefore, is only suited for ranking and selection. In this paper, I will present an alternative way to view this model uncertainty problem, through the lens of a valid inferential model based on random sets and non-additive beliefs. Specifically, I will show that valid uncertainty quantification about a model is attainable within this framework in general, and highlight the benefits in a classical signal detection problem.
%0 Conference Paper
%1 pmlr-v103-martin19b
%A Martin, Ryan
%B Proceedings of the Eleventh International Symposium on Imprecise Probabilities: Theories and Applications
%C Thagaste, Ghent, Belgium
%D 2019
%E De Bock, Jasper
%E de Campos, Cassio P.
%E de Cooman, Gert
%E Quaeghebeur, Erik
%E Wheeler, Gregory
%I PMLR
%K readings uncertainty
%P 295--303
%T On Valid Uncertainty Quantification About a Model
%U http://proceedings.mlr.press/v103/martin19b.html
%V 103
%X Inference on parameters within a given model is familiar, as is ranking different models for the purpose of selection. Less familiar, however, is the quantification of uncertainty about the models themselves. A Bayesian approach provides a posterior distribution for the model but it comes with no validity guarantees, and, therefore, is only suited for ranking and selection. In this paper, I will present an alternative way to view this model uncertainty problem, through the lens of a valid inferential model based on random sets and non-additive beliefs. Specifically, I will show that valid uncertainty quantification about a model is attainable within this framework in general, and highlight the benefits in a classical signal detection problem.
@inproceedings{pmlr-v103-martin19b,
abstract = {Inference on parameters within a given model is familiar, as is ranking different models for the purpose of selection. Less familiar, however, is the quantification of uncertainty about the models themselves. A Bayesian approach provides a posterior distribution for the model but it comes with no validity guarantees, and, therefore, is only suited for ranking and selection. In this paper, I will present an alternative way to view this model uncertainty problem, through the lens of a valid inferential model based on random sets and non-additive beliefs. Specifically, I will show that valid uncertainty quantification about a model is attainable within this framework in general, and highlight the benefits in a classical signal detection problem.},
added-at = {2020-01-15T19:11:24.000+0100},
address = {Thagaste, Ghent, Belgium},
author = {Martin, Ryan},
biburl = {https://www.bibsonomy.org/bibtex/251118964d1a14da8c4865c30f2c8e62b/kirk86},
booktitle = {Proceedings of the Eleventh International Symposium on Imprecise Probabilities: Theories and Applications},
description = {On Valid Uncertainty Quantification About a Model},
editor = {De Bock, Jasper and {de Campos}, Cassio P. and {de Cooman}, Gert and Quaeghebeur, Erik and Wheeler, Gregory},
interhash = {4c2178168eb28cea1b6a3c449ea833df},
intrahash = {51118964d1a14da8c4865c30f2c8e62b},
keywords = {readings uncertainty},
month = {03--06 Jul},
pages = {295--303},
pdf = {http://proceedings.mlr.press/v103/martin19b/martin19b.pdf},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
timestamp = {2020-01-15T19:11:24.000+0100},
title = {On Valid Uncertainty Quantification About a Model},
url = {http://proceedings.mlr.press/v103/martin19b.html},
volume = 103,
year = 2019
}