Topic models could have a huge impact on improving the ways users find and discover content in digital libraries and search interfaces through their ability to automatically learn and apply subject tags to each and every item in a collection, and their ability to dynamically create virtual collections on the fly. However, much remains to be done to tap this potential, and empirically evaluate the true value of a given topic model to humans. In this work, we sketch out some sub-tasks that we suggest pave the way towards this goal, and present methods for assessing the coherence and interpretability of topics learned by topic models. Our large-scale user study includes over 70 human subjects evaluating and scoring almost 500 topics learned from collections from a wide range of genres and domains. We show how scoring model -- based on pointwise mutual information of word-pair using Wikipedia, Google and MEDLINE as external data sources - performs well at predicting human scores. This automated scoring of topics is an important first step to integrating topic modeling into digital libraries
%0 Conference Paper
%1 NewmanD2010
%A Newman, David
%A Noh, Youn
%A Talley, Edmund
%A Karimi, Sarvnaz
%A Baldwin, Timothy
%B Proceedings of the 10th Annual Joint Conference on Digital Libraries
%C New York, NY, USA
%D 2010
%I ACM
%K LDA
%P 215--224
%R 10.1145/1816123.1816156
%T Evaluating Topic Models for Digital Libraries
%U http://doi.acm.org/10.1145/1816123.1816156
%X Topic models could have a huge impact on improving the ways users find and discover content in digital libraries and search interfaces through their ability to automatically learn and apply subject tags to each and every item in a collection, and their ability to dynamically create virtual collections on the fly. However, much remains to be done to tap this potential, and empirically evaluate the true value of a given topic model to humans. In this work, we sketch out some sub-tasks that we suggest pave the way towards this goal, and present methods for assessing the coherence and interpretability of topics learned by topic models. Our large-scale user study includes over 70 human subjects evaluating and scoring almost 500 topics learned from collections from a wide range of genres and domains. We show how scoring model -- based on pointwise mutual information of word-pair using Wikipedia, Google and MEDLINE as external data sources - performs well at predicting human scores. This automated scoring of topics is an important first step to integrating topic modeling into digital libraries
%@ 978-1-4503-0085-8
@inproceedings{NewmanD2010,
abstract = {Topic models could have a huge impact on improving the ways users find and discover content in digital libraries and search interfaces through their ability to automatically learn and apply subject tags to each and every item in a collection, and their ability to dynamically create virtual collections on the fly. However, much remains to be done to tap this potential, and empirically evaluate the true value of a given topic model to humans. In this work, we sketch out some sub-tasks that we suggest pave the way towards this goal, and present methods for assessing the coherence and interpretability of topics learned by topic models. Our large-scale user study includes over 70 human subjects evaluating and scoring almost 500 topics learned from collections from a wide range of genres and domains. We show how scoring model -- based on pointwise mutual information of word-pair using Wikipedia, Google and MEDLINE as external data sources - performs well at predicting human scores. This automated scoring of topics is an important first step to integrating topic modeling into digital libraries},
acmid = {1816156},
added-at = {2014-02-14T15:43:30.000+0100},
address = {New York, NY, USA},
author = {Newman, David and Noh, Youn and Talley, Edmund and Karimi, Sarvnaz and Baldwin, Timothy},
biburl = {https://www.bibsonomy.org/bibtex/2167a7f010a3c2aa5d565ed3173239892/lopusz_kdd},
booktitle = {Proceedings of the 10th Annual Joint Conference on Digital Libraries},
description = {Evaluating topic models for digital libraries},
doi = {10.1145/1816123.1816156},
interhash = {c8fb21309d377df8a629857bd4c45403},
intrahash = {167a7f010a3c2aa5d565ed3173239892},
isbn = {978-1-4503-0085-8},
keywords = {LDA},
location = {Gold Coast, Queensland, Australia},
numpages = {10},
pages = {215--224},
publisher = {ACM},
series = {JCDL '10},
timestamp = {2014-02-14T15:48:17.000+0100},
title = {Evaluating Topic Models for Digital Libraries},
url = {http://doi.acm.org/10.1145/1816123.1816156},
year = 2010
}