Machine learning (ML)-based conversational systems represent a value enabler for human-machine interaction. Simultaneously, the opacity, complexity, and humanness accompanied by such systems introduce their own issues, including trust misalignment. While trust is viewed as a prerequisite for effective system use, few studies have considered calibrating for appropriate trust, and empirically testing the relationship between trust and related behavior. Moreover, the desired implications of transparency-enhancing design cues are ambiguous. My research aims to explore the impact of system performance on trust, the dichotomy between trust and behavior, and how transparency might help attenuate the effects caused by low system performance in the specific context of decision-making tasks assisted by ML-based conversational systems.
%0 Conference Paper
%1 ls_leimeister
%A Schmitt, Anuschka
%B AAAI/ACM Conference on AI, Ethics, and Society (AIES)
%C Oxford, United Kingdom
%D 2022
%K HCI conversational_systems itegpub trust trustworthiness
%R 10.1145/3514094.3539525
%T Examining Trust in Conversational Systems: Conceptual and Empirical Findings on User Trust, Related Behavior, and System Trustworthiness
%U https://pubs.wi-kassel.de/wp-content/uploads/2022/11/JML_898.pdf
%X Machine learning (ML)-based conversational systems represent a value enabler for human-machine interaction. Simultaneously, the opacity, complexity, and humanness accompanied by such systems introduce their own issues, including trust misalignment. While trust is viewed as a prerequisite for effective system use, few studies have considered calibrating for appropriate trust, and empirically testing the relationship between trust and related behavior. Moreover, the desired implications of transparency-enhancing design cues are ambiguous. My research aims to explore the impact of system performance on trust, the dichotomy between trust and behavior, and how transparency might help attenuate the effects caused by low system performance in the specific context of decision-making tasks assisted by ML-based conversational systems.
%@ 978-1-4503-9247-1
@inproceedings{ls_leimeister,
abstract = {Machine learning (ML)-based conversational systems represent a value enabler for human-machine interaction. Simultaneously, the opacity, complexity, and humanness accompanied by such systems introduce their own issues, including trust misalignment. While trust is viewed as a prerequisite for effective system use, few studies have considered calibrating for appropriate trust, and empirically testing the relationship between trust and related behavior. Moreover, the desired implications of transparency-enhancing design cues are ambiguous. My research aims to explore the impact of system performance on trust, the dichotomy between trust and behavior, and how transparency might help attenuate the effects caused by low system performance in the specific context of decision-making tasks assisted by ML-based conversational systems.},
added-at = {2022-11-24T15:44:52.000+0100},
address = {Oxford, United Kingdom},
author = {Schmitt, Anuschka},
biburl = {https://www.bibsonomy.org/bibtex/275a89417704587f681c86b6b6ea72344/ls_leimeister},
booktitle = {AAAI/ACM Conference on AI, Ethics, and Society (AIES)},
doi = {10.1145/3514094.3539525},
eventdate = {01-03 Aug 2022},
eventtitle = {AAAI/ACM Conference on AI, Ethics, and Society (AIES)},
interhash = {975b23fc187df78fce79a285c4d0e1ba},
intrahash = {75a89417704587f681c86b6b6ea72344},
isbn = {978-1-4503-9247-1},
keywords = {HCI conversational_systems itegpub trust trustworthiness},
language = {English},
timestamp = {2022-11-24T16:26:38.000+0100},
title = {Examining Trust in Conversational Systems: Conceptual and Empirical Findings on User Trust, Related Behavior, and System Trustworthiness},
url = {https://pubs.wi-kassel.de/wp-content/uploads/2022/11/JML_898.pdf},
venue = {Oxford, United Kingdom},
year = 2022
}