To optimize the workflow on commercial crowdsourcing platforms like Amazon Mechanical Turk or Microworkers, it is important to understand how users choose their tasks. Current work usually explores the underlying processes by employing user studies based on surveys with a limited set of participants. In contrast, we formulate hypotheses based on the different findings in these studies and, instead of verifying them based on user feedback, we compare them directly on data from a commercial crowdsourcing platform. For evaluation, we use a Bayesian approach called HypTrails which allows us to give a relative ranking of the corresponding hypotheses. The hypotheses considered, are for example based on task categories, monetary incentives or semantic similarity of task descriptions. We find that, in our scenario, hypotheses based on employers as well the the task descriptions work best. Overall, we objectively compare different factors influencing users when choosing their tasks. Our approach enables crowdsourcing companies to better understand their users in order to optimize their platforms, e.g., by incorparting the gained knowledge about these factors into task recommentation systems.
%0 Conference Paper
%1 becker2015microtrails
%A Becker, Martin
%A Borchert, Kathrin
%A Hirth, Matthias
%A Mewes, Hauke
%A Hotho, Andreas
%A Tran-Gia, Phuoc
%B Proceedings of the 15th International Conference on Knowledge Technologies and Data-driven Business
%C New York, NY, USA
%D 2015
%I ACM
%K imported
%P 10:1--10:8
%R 10.1145/2809563.2809608
%T MicroTrails: Comparing Hypotheses About Task Selection on a Crowdsourcing Platform
%U http://doi.acm.org/10.1145/2809563.2809608
%X To optimize the workflow on commercial crowdsourcing platforms like Amazon Mechanical Turk or Microworkers, it is important to understand how users choose their tasks. Current work usually explores the underlying processes by employing user studies based on surveys with a limited set of participants. In contrast, we formulate hypotheses based on the different findings in these studies and, instead of verifying them based on user feedback, we compare them directly on data from a commercial crowdsourcing platform. For evaluation, we use a Bayesian approach called HypTrails which allows us to give a relative ranking of the corresponding hypotheses. The hypotheses considered, are for example based on task categories, monetary incentives or semantic similarity of task descriptions. We find that, in our scenario, hypotheses based on employers as well the the task descriptions work best. Overall, we objectively compare different factors influencing users when choosing their tasks. Our approach enables crowdsourcing companies to better understand their users in order to optimize their platforms, e.g., by incorparting the gained knowledge about these factors into task recommentation systems.
%@ 978-1-4503-3721-2
@inproceedings{becker2015microtrails,
abstract = {To optimize the workflow on commercial crowdsourcing platforms like Amazon Mechanical Turk or Microworkers, it is important to understand how users choose their tasks. Current work usually explores the underlying processes by employing user studies based on surveys with a limited set of participants. In contrast, we formulate hypotheses based on the different findings in these studies and, instead of verifying them based on user feedback, we compare them directly on data from a commercial crowdsourcing platform. For evaluation, we use a Bayesian approach called HypTrails which allows us to give a relative ranking of the corresponding hypotheses. The hypotheses considered, are for example based on task categories, monetary incentives or semantic similarity of task descriptions. We find that, in our scenario, hypotheses based on employers as well the the task descriptions work best. Overall, we objectively compare different factors influencing users when choosing their tasks. Our approach enables crowdsourcing companies to better understand their users in order to optimize their platforms, e.g., by incorparting the gained knowledge about these factors into task recommentation systems.},
acmid = {2809608},
added-at = {2016-11-28T10:15:50.000+0100},
address = {New York, NY, USA},
articleno = {10},
author = {Becker, Martin and Borchert, Kathrin and Hirth, Matthias and Mewes, Hauke and Hotho, Andreas and Tran-Gia, Phuoc},
biburl = {https://www.bibsonomy.org/bibtex/2476c98451e60ba174f94e6cd39718a0d/kde-alumni},
booktitle = {Proceedings of the 15th International Conference on Knowledge Technologies and Data-driven Business},
doi = {10.1145/2809563.2809608},
interhash = {597cca8bce7e0de88f265cba672e88eb},
intrahash = {476c98451e60ba174f94e6cd39718a0d},
isbn = {978-1-4503-3721-2},
keywords = {imported},
location = {Graz, Austria},
numpages = {8},
pages = {10:1--10:8},
publisher = {ACM},
series = {i-KNOW '15},
timestamp = {2016-11-28T10:15:50.000+0100},
title = {MicroTrails: Comparing Hypotheses About Task Selection on a Crowdsourcing Platform},
url = {http://doi.acm.org/10.1145/2809563.2809608},
year = 2015
}