Even though considerable attention has been given to the polarity of words (positive and
negative) and the creation of large polarity lexicons, research in emotion analysis has had to rely
on limited and small emotion lexicons. In this paper we show how the combined strength and
wisdom of the crowds can be used to generate a large, high-quality, word–emotion and word–polarity
association lexicon quickly and inexpensively. We enumerate the challenges in emotion annotation
in a crowdsourcing scenario and propose solutions to address them. Most notably, in addition to
questions about emotions associated with terms, we show how the inclusion of a word choice question
can discourage malicious data entry, help identify instances where the annotator may not be familiar
with the target term (allowing us to reject such annotations), and help obtain annotations at sense
level (rather than at word level). We conducted experiments on how to formulate the emotionannotation questions, and show that asking if a term is associated with an emotion leads to markedly
higher inter-annotator agreement than that obtained by asking if a term evokes an emotion
%0 Journal Article
%1 Mohammad13
%A Mohammad, Saif M.
%A Turney, Peter D.
%B Computational Intelligence
%D 2013
%K annotation-bias crowdsourcing inter-rater-agreement
%N 3
%P 436--465
%T Crowdsourcing a Word-Emotion Association Lexicon
%V 29
%X Even though considerable attention has been given to the polarity of words (positive and
negative) and the creation of large polarity lexicons, research in emotion analysis has had to rely
on limited and small emotion lexicons. In this paper we show how the combined strength and
wisdom of the crowds can be used to generate a large, high-quality, word–emotion and word–polarity
association lexicon quickly and inexpensively. We enumerate the challenges in emotion annotation
in a crowdsourcing scenario and propose solutions to address them. Most notably, in addition to
questions about emotions associated with terms, we show how the inclusion of a word choice question
can discourage malicious data entry, help identify instances where the annotator may not be familiar
with the target term (allowing us to reject such annotations), and help obtain annotations at sense
level (rather than at word level). We conducted experiments on how to formulate the emotionannotation questions, and show that asking if a term is associated with an emotion leads to markedly
higher inter-annotator agreement than that obtained by asking if a term evokes an emotion
@article{Mohammad13,
abstract = {Even though considerable attention has been given to the polarity of words (positive and
negative) and the creation of large polarity lexicons, research in emotion analysis has had to rely
on limited and small emotion lexicons. In this paper we show how the combined strength and
wisdom of the crowds can be used to generate a large, high-quality, word–emotion and word–polarity
association lexicon quickly and inexpensively. We enumerate the challenges in emotion annotation
in a crowdsourcing scenario and propose solutions to address them. Most notably, in addition to
questions about emotions associated with terms, we show how the inclusion of a word choice question
can discourage malicious data entry, help identify instances where the annotator may not be familiar
with the target term (allowing us to reject such annotations), and help obtain annotations at sense
level (rather than at word level). We conducted experiments on how to formulate the emotionannotation questions, and show that asking if a term is associated with an emotion leads to markedly
higher inter-annotator agreement than that obtained by asking if a term evokes an emotion},
added-at = {2020-11-25T19:43:06.000+0100},
author = {Mohammad, Saif M. and Turney, Peter D.},
biburl = {https://www.bibsonomy.org/bibtex/21d9ecf4308949499413221073472e1b8/marisaripoll},
booktitle = {Computational Intelligence},
interhash = {90e51246141645bd45e366cc36714b85},
intrahash = {1d9ecf4308949499413221073472e1b8},
keywords = {annotation-bias crowdsourcing inter-rater-agreement},
number = 3,
pages = {436--465},
timestamp = {2020-11-25T19:56:30.000+0100},
title = {Crowdsourcing a Word-Emotion Association Lexicon},
volume = 29,
year = 2013
}