Evaluating interactive question answering (QA) systems with real users can be challenging because traditional evaluation measures based on the relevance of items returned are difficult to employ since relevance judgments can be unstable in multi-user evaluations. The work reported in this paper evaluates, in distinguishing among a set of interactive QA systems, the effectiveness of three questionnaires: a Cognitive Workload Questionnaire (NASA TLX), and Task and System Questionnaires customized to a specific interactive QA application. These Questionnaires were evaluated with four systems, seven analysts, and eight scenarios during a 2-week workshop. Overall, results demonstrate that all three Questionnaires are effective at distinguishing among systems, with the Task Questionnaire being the most sensitive. Results also provide initial support for the validity and reliability of the Questionnaires.
%0 Journal Article
%1 KellyKantorEtAl09nle
%A Kelly, Diane
%A Kantor, Paul B.
%A Morse, Emile L.
%A Scholtz, Jean
%A Sun, Ying
%D 2009
%J Natural Language Engineering
%K v1205 paper ai language processing interaction user interface answer test
%N 1
%P 119-141
%R 10.1017/S1351324908004932
%T Questionnaires for Eliciting Evaluation Data from Users of Interactive Question Answering Systems
%V 15
%X Evaluating interactive question answering (QA) systems with real users can be challenging because traditional evaluation measures based on the relevance of items returned are difficult to employ since relevance judgments can be unstable in multi-user evaluations. The work reported in this paper evaluates, in distinguishing among a set of interactive QA systems, the effectiveness of three questionnaires: a Cognitive Workload Questionnaire (NASA TLX), and Task and System Questionnaires customized to a specific interactive QA application. These Questionnaires were evaluated with four systems, seven analysts, and eight scenarios during a 2-week workshop. Overall, results demonstrate that all three Questionnaires are effective at distinguishing among systems, with the Task Questionnaire being the most sensitive. Results also provide initial support for the validity and reliability of the Questionnaires.
@article{KellyKantorEtAl09nle,
abstract = {Evaluating interactive question answering (QA) systems with real users can be challenging because traditional evaluation measures based on the relevance of items returned are difficult to employ since relevance judgments can be unstable in multi-user evaluations. The work reported in this paper evaluates, in distinguishing among a set of interactive QA systems, the effectiveness of three questionnaires: a Cognitive Workload Questionnaire (NASA TLX), and Task and System Questionnaires customized to a specific interactive QA application. These Questionnaires were evaluated with four systems, seven analysts, and eight scenarios during a 2-week workshop. Overall, results demonstrate that all three Questionnaires are effective at distinguishing among systems, with the Task Questionnaire being the most sensitive. Results also provide initial support for the validity and reliability of the Questionnaires. },
added-at = {2012-05-30T10:48:57.000+0200},
author = {Kelly, Diane and Kantor, Paul B. and Morse, Emile L. and Scholtz, Jean and Sun, Ying},
biburl = {https://www.bibsonomy.org/bibtex/252333ff9de4631c0daec9b980bd48718/flint63},
doi = {10.1017/S1351324908004932},
file = {Cambridge University Press Site:2009/KellyKantorEtAl09nle.pdf:PDF},
groups = {public},
interhash = {67d890a3e5eadb431555ef9d2806eb71},
intrahash = {52333ff9de4631c0daec9b980bd48718},
issn = {1351-3249},
journal = {Natural Language Engineering},
keywords = {v1205 paper ai language processing interaction user interface answer test},
number = 1,
pages = {119-141},
timestamp = {2018-04-16T11:45:37.000+0200},
title = {Questionnaires for Eliciting Evaluation Data from Users of Interactive Question Answering Systems},
username = {flint63},
volume = 15,
year = 2009
}