In this paper, we report on Qaviar, an experimental automated evaluation system for question answering applications. The goal of our research was to find an automatically calculated measure that correlates well with human judges' assessment of answer correctness in the context of question answering tasks. Qaviar judges the response by computing recall against the stemmed content words in the human-generated answer key. It counts the answer correct if it exceeds agiven recall threshold. We determined that the answer correctness predicted by Qaviar agreed with the human 93\% to 95\% of the time. 41 question-answering systems were ranked by both Qaviar and human assessors, and these rankings correlated with a Kendall's Tau measure of 0.920, compared to a correlation of 0.956 between human assessors on the same data.
%0 Conference Paper
%1 Breck:2000
%A Breck, Eric J.
%A Burger, John D.
%A Ferro, Lisa
%A Hirschman, Lynette
%A House, David
%A Light, Marc
%A Mani, Inderjeet
%B Proc. LREC2000
%D 2000
%K evaluation question_answering
%T How to Evaluate Your Question Answering System Every Day and Still Get Real Work Done
%U http://arXiv.org/abs/cs/0004008
%X In this paper, we report on Qaviar, an experimental automated evaluation system for question answering applications. The goal of our research was to find an automatically calculated measure that correlates well with human judges' assessment of answer correctness in the context of question answering tasks. Qaviar judges the response by computing recall against the stemmed content words in the human-generated answer key. It counts the answer correct if it exceeds agiven recall threshold. We determined that the answer correctness predicted by Qaviar agreed with the human 93\% to 95\% of the time. 41 question-answering systems were ranked by both Qaviar and human assessors, and these rankings correlated with a Kendall's Tau measure of 0.920, compared to a correlation of 0.956 between human assessors on the same data.
@inproceedings{Breck:2000,
abstract = {In this paper, we report on Qaviar, an experimental automated evaluation system for question answering applications. The goal of our research was to find an automatically calculated measure that correlates well with human judges' assessment of answer correctness in the context of question answering tasks. Qaviar judges the response by computing recall against the stemmed content words in the human-generated answer key. It counts the answer correct if it exceeds agiven recall threshold. We determined that the answer correctness predicted by Qaviar agreed with the human 93\% to 95\% of the time. 41 question-answering systems were ranked by both Qaviar and human assessors, and these rankings correlated with a Kendall's Tau measure of 0.920, compared to a correlation of 0.956 between human assessors on the same data.},
added-at = {2007-12-14T02:36:32.000+0100},
author = {Breck, Eric J. and Burger, John D. and Ferro, Lisa and Hirschman, Lynette and House, David and Light, Marc and Mani, Inderjeet},
biburl = {https://www.bibsonomy.org/bibtex/2689795fc0ab7aae5643e4adba7964080/diego_ma},
booktitle = {Proc. LREC2000},
interhash = {fa51861849f60bcccfafc1a6ab604980},
intrahash = {689795fc0ab7aae5643e4adba7964080},
keywords = {evaluation question_answering},
timestamp = {2007-12-14T02:36:32.000+0100},
title = {How to Evaluate Your Question Answering System Every Day \ldots and Still Get Real Work Done},
url = {http://arXiv.org/abs/cs/0004008},
year = 2000
}