Large-scale image retrieval on the Web relies on the availability of short snippets of text associated with the image. This user-generated content is a primary source of information about the content and context of an image. While traditional information retrieval models focus on finding the most relevant document without consideration for diversity, image search requires results that are both diverse and relevant. This is problematic for images because they are represented very sparsely by text, and as with all user-generated content the text for a given image can be extremely noisy.
%0 Conference Paper
%1 zwol_2008_diversifying
%A van Zwol, Roelof
%A Murdock, Vanessa
%A Pueyo, Lluis G.
%A Ramirez, Georgina
%B MIR '08: Proceeding of the 1st ACM international conference on Multimedia information retrieval
%C New York, NY, USA
%D 2008
%I ACM
%K master_thesis
%P 67--74
%R 10.1145/1460096.1460109
%T Diversifying image search with user generated content
%U http://dx.doi.org/10.1145/1460096.1460109
%X Large-scale image retrieval on the Web relies on the availability of short snippets of text associated with the image. This user-generated content is a primary source of information about the content and context of an image. While traditional information retrieval models focus on finding the most relevant document without consideration for diversity, image search requires results that are both diverse and relevant. This is problematic for images because they are represented very sparsely by text, and as with all user-generated content the text for a given image can be extremely noisy.
%@ 978-1-60558-312-9
@inproceedings{zwol_2008_diversifying,
abstract = {Large-scale image retrieval on the Web relies on the availability of short snippets of text associated with the image. This user-generated content is a primary source of information about the content and context of an image. While traditional information retrieval models focus on finding the most relevant document without consideration for diversity, image search requires results that are both diverse and relevant. This is problematic for images because they are represented very sparsely by text, and as with all user-generated content the text for a given image can be extremely noisy.},
added-at = {2010-03-03T19:42:15.000+0100},
address = {New York, NY, USA},
author = {van Zwol, Roelof and Murdock, Vanessa and Pueyo, Lluis G. and Ramirez, Georgina},
biburl = {https://www.bibsonomy.org/bibtex/2947c2ab5d0e88aa40f978a6bf38eb422/michi},
booktitle = {MIR '08: Proceeding of the 1st ACM international conference on Multimedia information retrieval},
citeulike-article-id = {3920677},
citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=1460096.1460109},
citeulike-linkout-1 = {http://dx.doi.org/10.1145/1460096.1460109},
doi = {10.1145/1460096.1460109},
interhash = {b94c28543580a7c8f2d5df0ea6c89b2e},
intrahash = {947c2ab5d0e88aa40f978a6bf38eb422},
isbn = {978-1-60558-312-9},
keywords = {master_thesis},
location = {Vancouver, British Columbia, Canada},
pages = {67--74},
posted-at = {2009-01-21 18:08:35},
priority = {2},
publisher = {ACM},
timestamp = {2010-03-03T19:42:15.000+0100},
title = {Diversifying image search with user generated content},
url = {http://dx.doi.org/10.1145/1460096.1460109},
year = 2008
}