The word2vec software of Tomas Mikolov and colleagues
(https://code.google.com/p/word2vec/ ) has gained a lot of traction lately, and
provides state-of-the-art word embeddings. The learning models behind the
software are described in two research papers. We found the description of the
models in these papers to be somewhat cryptic and hard to follow. While the
motivations and presentation may be obvious to the neural-networks
language-modeling crowd, we had to struggle quite a bit to figure out the
rationale behind the equations.
This note is an attempt to explain equation (4) (negative sampling) in
"Distributed Representations of Words and Phrases and their Compositionality"
by Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado and Jeffrey Dean.
%0 Generic
%1 goldberg2014word2vec
%A Goldberg, Yoav
%A Levy, Omer
%D 2014
%K negative sampling word2vec
%T word2vec Explained: deriving Mikolov et al.'s negative-sampling
word-embedding method
%U http://arxiv.org/abs/1402.3722
%X The word2vec software of Tomas Mikolov and colleagues
(https://code.google.com/p/word2vec/ ) has gained a lot of traction lately, and
provides state-of-the-art word embeddings. The learning models behind the
software are described in two research papers. We found the description of the
models in these papers to be somewhat cryptic and hard to follow. While the
motivations and presentation may be obvious to the neural-networks
language-modeling crowd, we had to struggle quite a bit to figure out the
rationale behind the equations.
This note is an attempt to explain equation (4) (negative sampling) in
"Distributed Representations of Words and Phrases and their Compositionality"
by Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado and Jeffrey Dean.
@misc{goldberg2014word2vec,
abstract = {The word2vec software of Tomas Mikolov and colleagues
(https://code.google.com/p/word2vec/ ) has gained a lot of traction lately, and
provides state-of-the-art word embeddings. The learning models behind the
software are described in two research papers. We found the description of the
models in these papers to be somewhat cryptic and hard to follow. While the
motivations and presentation may be obvious to the neural-networks
language-modeling crowd, we had to struggle quite a bit to figure out the
rationale behind the equations.
This note is an attempt to explain equation (4) (negative sampling) in
"Distributed Representations of Words and Phrases and their Compositionality"
by Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado and Jeffrey Dean.},
added-at = {2016-06-10T09:35:13.000+0200},
author = {Goldberg, Yoav and Levy, Omer},
biburl = {https://www.bibsonomy.org/bibtex/2d03ee2d89572258e9951a2058b333312/thoni},
interhash = {529718927f289e07d24eb4b9b4d4e207},
intrahash = {d03ee2d89572258e9951a2058b333312},
keywords = {negative sampling word2vec},
note = {cite arxiv:1402.3722},
timestamp = {2016-09-06T08:23:07.000+0200},
title = {word2vec Explained: deriving Mikolov et al.'s negative-sampling
word-embedding method},
url = {http://arxiv.org/abs/1402.3722},
year = 2014
}