How can end users efficiently influence the predictions that machine learning systems make on their behalf? This paper presents Explanatory Debugging, an approach in which the system explains to users how it made each of its predictions, and the user then explains any necessary corrections back to the learning system. We present the principles underlying this approach and a prototype instantiating it. An empirical evaluation shows that Explanatory Debugging increased participants' understanding of the learning system by 52\% and allowed participants to correct its mistakes up to twice as efficiently as participants using a traditional learning system.
%0 Conference Paper
%1 citeulike:13566022
%A Kulesza, Todd
%A Burnett, Margaret
%A Wong, Weng K.
%A Stumpf, Simone
%B Proceedings of the 20th International Conference on Intelligent User Interfaces
%C New York, NY, USA
%D 2015
%I ACM
%K interactive-machine-learning personalization
%P 126--137
%R 10.1145/2678025.2701399
%T Principles of Explanatory Debugging to Personalize Interactive Machine Learning
%U http://dx.doi.org/10.1145/2678025.2701399
%X How can end users efficiently influence the predictions that machine learning systems make on their behalf? This paper presents Explanatory Debugging, an approach in which the system explains to users how it made each of its predictions, and the user then explains any necessary corrections back to the learning system. We present the principles underlying this approach and a prototype instantiating it. An empirical evaluation shows that Explanatory Debugging increased participants' understanding of the learning system by 52\% and allowed participants to correct its mistakes up to twice as efficiently as participants using a traditional learning system.
%@ 978-1-4503-3306-1
@inproceedings{citeulike:13566022,
abstract = {{How can end users efficiently influence the predictions that machine learning systems make on their behalf? This paper presents Explanatory Debugging, an approach in which the system explains to users how it made each of its predictions, and the user then explains any necessary corrections back to the learning system. We present the principles underlying this approach and a prototype instantiating it. An empirical evaluation shows that Explanatory Debugging increased participants' understanding of the learning system by 52\% and allowed participants to correct its mistakes up to twice as efficiently as participants using a traditional learning system.}},
added-at = {2018-03-19T12:24:51.000+0100},
address = {New York, NY, USA},
author = {Kulesza, Todd and Burnett, Margaret and Wong, Weng K. and Stumpf, Simone},
biburl = {https://www.bibsonomy.org/bibtex/2dc61ba82140cc311eacca8f9919a7a31/aho},
booktitle = {Proceedings of the 20th International Conference on Intelligent User Interfaces},
citeulike-article-id = {13566022},
citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=2701399},
citeulike-linkout-1 = {http://dx.doi.org/10.1145/2678025.2701399},
doi = {10.1145/2678025.2701399},
interhash = {f4022cc2e75f0a7b726e570cb0f72dc1},
intrahash = {dc61ba82140cc311eacca8f9919a7a31},
isbn = {978-1-4503-3306-1},
keywords = {interactive-machine-learning personalization},
location = {Atlanta, Georgia, USA},
pages = {126--137},
posted-at = {2015-03-30 20:38:57},
priority = {2},
publisher = {ACM},
series = {IUI '15},
timestamp = {2018-03-19T12:24:51.000+0100},
title = {{Principles of Explanatory Debugging to Personalize Interactive Machine Learning}},
url = {http://dx.doi.org/10.1145/2678025.2701399},
year = 2015
}