We introduce a weakly supervised approach for inferring the property of
abstractness of words and expressions in the complete absence of labeled data.
Exploiting only minimal linguistic clues and the contextual usage of a concept
as manifested in textual data, we train sufficiently powerful classifiers,
obtaining high correlation with human labels. The results imply the
applicability of this approach to additional properties of concepts, additional
languages, and resource-scarce scenarios.
Description
[1809.01285] Learning Concept Abstractness Using Weak Supervision
%0 Generic
%1 rabinovich2018learning
%A Rabinovich, Ella
%A Sznajder, Benjamin
%A Spector, Artem
%A Shnayderman, Ilya
%A Aharonov, Ranit
%A Konopnicki, David
%A Slonim, Noam
%D 2018
%K bert_performance dataset instituteclustering supervision weak
%T Learning Concept Abstractness Using Weak Supervision
%U http://arxiv.org/abs/1809.01285
%X We introduce a weakly supervised approach for inferring the property of
abstractness of words and expressions in the complete absence of labeled data.
Exploiting only minimal linguistic clues and the contextual usage of a concept
as manifested in textual data, we train sufficiently powerful classifiers,
obtaining high correlation with human labels. The results imply the
applicability of this approach to additional properties of concepts, additional
languages, and resource-scarce scenarios.
@misc{rabinovich2018learning,
abstract = {We introduce a weakly supervised approach for inferring the property of
abstractness of words and expressions in the complete absence of labeled data.
Exploiting only minimal linguistic clues and the contextual usage of a concept
as manifested in textual data, we train sufficiently powerful classifiers,
obtaining high correlation with human labels. The results imply the
applicability of this approach to additional properties of concepts, additional
languages, and resource-scarce scenarios.},
added-at = {2021-01-20T02:03:46.000+0100},
author = {Rabinovich, Ella and Sznajder, Benjamin and Spector, Artem and Shnayderman, Ilya and Aharonov, Ranit and Konopnicki, David and Slonim, Noam},
biburl = {https://www.bibsonomy.org/bibtex/21693f211cef32a458344f898834e8545/parismic},
description = {[1809.01285] Learning Concept Abstractness Using Weak Supervision},
interhash = {9c22600a2ace4de5f54b0900f3ac0b08},
intrahash = {1693f211cef32a458344f898834e8545},
keywords = {bert_performance dataset instituteclustering supervision weak},
note = {cite arxiv:1809.01285Comment: 6 pages, EMNLP 2018},
timestamp = {2021-01-20T02:03:46.000+0100},
title = {Learning Concept Abstractness Using Weak Supervision},
url = {http://arxiv.org/abs/1809.01285},
year = 2018
}