A range of methods for measuring the effectiveness of information retrieval systems has been proposed. These are typically intended to provide a quantitative single-value summary of a document ranking relative to a query. However, many of these measures have failings. For example, recall is not well founded as a measure of satisfaction, since the user of an actual system cannot judge recall. Average precision is derived from recall, and suffers from the same problem. In addition, average precision lacks key stability properties that are needed for robust experiments. In this article, we introduce a new effectiveness metric, <i>rank-biased precision</i>, that avoids these problems. Rank-biased pre-cision is derived from a simple model of user behavior, is robust if answer rankings are extended to greater depths, and allows accurate quantification of experimental uncertainty, even when only partial relevance judgments are available.
Description
Rank-biased precision for measurement of retrieval effectiveness
%0 Journal Article
%1 Moffat:2008:RPM:1416950.1416952
%A Moffat, Alistair
%A Zobel, Justin
%C New York, NY, USA
%D 2008
%I ACM
%J ACM Trans. Inf. Syst.
%K informationretrieval recall relevance
%P 2:1--2:27
%R 10.1145/1416950.1416952
%T Rank-biased precision for measurement of retrieval effectiveness
%U http://doi.acm.org/10.1145/1416950.1416952
%V 27
%X A range of methods for measuring the effectiveness of information retrieval systems has been proposed. These are typically intended to provide a quantitative single-value summary of a document ranking relative to a query. However, many of these measures have failings. For example, recall is not well founded as a measure of satisfaction, since the user of an actual system cannot judge recall. Average precision is derived from recall, and suffers from the same problem. In addition, average precision lacks key stability properties that are needed for robust experiments. In this article, we introduce a new effectiveness metric, <i>rank-biased precision</i>, that avoids these problems. Rank-biased pre-cision is derived from a simple model of user behavior, is robust if answer rankings are extended to greater depths, and allows accurate quantification of experimental uncertainty, even when only partial relevance judgments are available.
@article{Moffat:2008:RPM:1416950.1416952,
abstract = {A range of methods for measuring the effectiveness of information retrieval systems has been proposed. These are typically intended to provide a quantitative single-value summary of a document ranking relative to a query. However, many of these measures have failings. For example, recall is not well founded as a measure of satisfaction, since the user of an actual system cannot judge recall. Average precision is derived from recall, and suffers from the same problem. In addition, average precision lacks key stability properties that are needed for robust experiments. In this article, we introduce a new effectiveness metric, <i>rank-biased precision</i>, that avoids these problems. Rank-biased pre-cision is derived from a simple model of user behavior, is robust if answer rankings are extended to greater depths, and allows accurate quantification of experimental uncertainty, even when only partial relevance judgments are available.},
acmid = {1416952},
added-at = {2011-05-09T16:24:19.000+0200},
address = {New York, NY, USA},
articleno = {2},
author = {Moffat, Alistair and Zobel, Justin},
biburl = {https://www.bibsonomy.org/bibtex/20f6b721b9faaded81d96cda2cda86001/cdevries},
description = {Rank-biased precision for measurement of retrieval effectiveness},
doi = {10.1145/1416950.1416952},
interhash = {d0a16a11876da59cf1274f9d2a9bd877},
intrahash = {0f6b721b9faaded81d96cda2cda86001},
issn = {1046-8188},
issue = {1},
journal = {ACM Trans. Inf. Syst.},
keywords = {informationretrieval recall relevance},
month = {December},
numpages = {27},
pages = {2:1--2:27},
publisher = {ACM},
timestamp = {2011-05-09T16:24:19.000+0200},
title = {Rank-biased precision for measurement of retrieval effectiveness},
url = {http://doi.acm.org/10.1145/1416950.1416952},
volume = 27,
year = 2008
}