The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several extensions that improve both the quality of the vectors and the training speed. By subsampling of the frequent words we obtain significant speedup and also learn more regular word representations. We also describe a simple alternative to the hierarchical softmax called negative sampling.
Mikolov et al. - Distributed Representations of Words and Phrases and their Compositionality.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Word Embeddings\\Mikolov et al. - Distributed Representations of Words and Phrases and their Compositionality.pdf:application/pdf
%0 Conference Paper
%1 mikolov_distributed_2013
%A Mikolov, Tomas
%A Sutskever, Ilya
%A Chen, Kai
%A Corrado, Greg S
%A Dean, Jeff
%B Advances in neural information processing systems
%D 2013
%K Embedding_Algorithm Skip-Gram Word_Embeddings
%P 9
%T Distributed Representations of Words and Phrases and their Compositionality
%X The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several extensions that improve both the quality of the vectors and the training speed. By subsampling of the frequent words we obtain significant speedup and also learn more regular word representations. We also describe a simple alternative to the hierarchical softmax called negative sampling.
@inproceedings{mikolov_distributed_2013,
abstract = {The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several extensions that improve both the quality of the vectors and the training speed. By subsampling of the frequent words we obtain significant speedup and also learn more regular word representations. We also describe a simple alternative to the hierarchical softmax called negative sampling.},
added-at = {2020-02-21T16:09:44.000+0100},
author = {Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff},
biburl = {https://www.bibsonomy.org/bibtex/2f7c9e6309e7ae230abab897e3046c573/tschumacher},
booktitle = {Advances in neural information processing systems},
file = {Mikolov et al. - Distributed Representations of Words and Phrases and their Compositionality.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Word Embeddings\\Mikolov et al. - Distributed Representations of Words and Phrases and their Compositionality.pdf:application/pdf},
interhash = {4d7ff49f008ec05928f11e50f2db1cf9},
intrahash = {f7c9e6309e7ae230abab897e3046c573},
keywords = {Embedding_Algorithm Skip-Gram Word_Embeddings},
language = {en},
pages = 9,
timestamp = {2020-02-21T16:09:44.000+0100},
title = {Distributed {Representations} of {Words} and {Phrases} and their {Compositionality}},
year = 2013
}