Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' "right to explanation". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and "white-box" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.
Description
Explaining Decision-Making Algorithms through UI | Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems
%0 Conference Paper
%1 Cheng_2019
%A Cheng, Hao-Fei
%A Wang, Ruotong
%A Zhang, Zheng
%A O'Connell, Fiona
%A Gray, Terrance
%A Harper, F. Maxwell
%A Zhu, Haiyi
%B Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems - CHI \textquotesingle19
%D 2019
%I ACM Press
%K XAI explanation human-centered-AI
%R 10.1145/3290605.3300789
%T Explaining Decision-Making Algorithms through UI: Strategies to Help Non-Expert Stakeholders
%U https://doi.org/10.1145%2F3290605.3300789
%X Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' "right to explanation". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and "white-box" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.
@inproceedings{Cheng_2019,
abstract = {Increasingly, algorithms are used to make important decisions across society. However, these algorithms are usually poorly understood, which can reduce transparency and evoke negative emotions. In this research, we seek to learn design principles for explanation interfaces that communicate how decision-making algorithms work, in order to help organizations explain their decisions to stakeholders, or to support users' "right to explanation". We conducted an online experiment where 199 participants used different explanation interfaces to understand an algorithm for making university admissions decisions. We measured users' objective and self-reported understanding of the algorithm. Our results show that both interactive explanations and "white-box" explanations (i.e. that show the inner workings of an algorithm) can improve users' comprehension. Although the interactive approach is more effective at improving comprehension, it comes with a trade-off of taking more time. Surprisingly, we also find that users' trust in algorithmic decisions is not affected by the explanation interface or their level of comprehension of the algorithm.
},
added-at = {2020-12-07T21:02:37.000+0100},
author = {Cheng, Hao-Fei and Wang, Ruotong and Zhang, Zheng and O'Connell, Fiona and Gray, Terrance and Harper, F. Maxwell and Zhu, Haiyi},
biburl = {https://www.bibsonomy.org/bibtex/2693e5fd54388a3a414a309d66ba272dc/brusilovsky},
booktitle = {Proceedings of the 2019 {CHI} Conference on Human Factors in Computing Systems - {CHI} {\textquotesingle}19},
description = {Explaining Decision-Making Algorithms through UI | Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems},
doi = {10.1145/3290605.3300789},
interhash = {ec80700c0f1c4cf44faffe2bf2ef3ad3},
intrahash = {693e5fd54388a3a414a309d66ba272dc},
keywords = {XAI explanation human-centered-AI},
publisher = {{ACM} Press},
timestamp = {2020-12-07T21:03:30.000+0100},
title = {Explaining Decision-Making Algorithms through {UI}: Strategies to Help Non-Expert Stakeholders},
url = {https://doi.org/10.1145%2F3290605.3300789},
year = 2019
}