Continuous word representations, trained on large unlabeled corpora are
useful for many natural language processing tasks. Popular models that learn
such representations ignore the morphology of words, by assigning a distinct
vector to each word. This is a limitation, especially for languages with large
vocabularies and many rare words. In this paper, we propose a new approach
based on the skipgram model, where each word is represented as a bag of
character $n$-grams. A vector representation is associated to each character
$n$-gram; words being represented as the sum of these representations. Our
method is fast, allowing to train models on large corpora quickly and allows us
to compute word representations for words that did not appear in the training
data. We evaluate our word representations on nine different languages, both on
word similarity and analogy tasks. By comparing to recently proposed
morphological word representations, we show that our vectors achieve
state-of-the-art performance on these tasks.
Description
[1607.04606] Enriching Word Vectors with Subword Information
%0 Generic
%1 bojanowski2016enriching
%A Bojanowski, Piotr
%A Grave, Edouard
%A Joulin, Armand
%A Mikolov, Tomas
%D 2016
%K fasttext mlnlp word2vec wordembeddings
%T Enriching Word Vectors with Subword Information
%U http://arxiv.org/abs/1607.04606
%X Continuous word representations, trained on large unlabeled corpora are
useful for many natural language processing tasks. Popular models that learn
such representations ignore the morphology of words, by assigning a distinct
vector to each word. This is a limitation, especially for languages with large
vocabularies and many rare words. In this paper, we propose a new approach
based on the skipgram model, where each word is represented as a bag of
character $n$-grams. A vector representation is associated to each character
$n$-gram; words being represented as the sum of these representations. Our
method is fast, allowing to train models on large corpora quickly and allows us
to compute word representations for words that did not appear in the training
data. We evaluate our word representations on nine different languages, both on
word similarity and analogy tasks. By comparing to recently proposed
morphological word representations, we show that our vectors achieve
state-of-the-art performance on these tasks.
@misc{bojanowski2016enriching,
abstract = {Continuous word representations, trained on large unlabeled corpora are
useful for many natural language processing tasks. Popular models that learn
such representations ignore the morphology of words, by assigning a distinct
vector to each word. This is a limitation, especially for languages with large
vocabularies and many rare words. In this paper, we propose a new approach
based on the skipgram model, where each word is represented as a bag of
character $n$-grams. A vector representation is associated to each character
$n$-gram; words being represented as the sum of these representations. Our
method is fast, allowing to train models on large corpora quickly and allows us
to compute word representations for words that did not appear in the training
data. We evaluate our word representations on nine different languages, both on
word similarity and analogy tasks. By comparing to recently proposed
morphological word representations, we show that our vectors achieve
state-of-the-art performance on these tasks.},
added-at = {2018-05-10T18:14:40.000+0200},
author = {Bojanowski, Piotr and Grave, Edouard and Joulin, Armand and Mikolov, Tomas},
biburl = {https://www.bibsonomy.org/bibtex/2e362dd2fa03e3d45a9a633d337b3af52/albinzehe},
description = {[1607.04606] Enriching Word Vectors with Subword Information},
interhash = {debe3b6a16d55095970908abd79c1e4d},
intrahash = {e362dd2fa03e3d45a9a633d337b3af52},
keywords = {fasttext mlnlp word2vec wordembeddings},
note = {cite arxiv:1607.04606Comment: Accepted to TACL. The two first authors contributed equally},
timestamp = {2018-05-10T18:14:40.000+0200},
title = {Enriching Word Vectors with Subword Information},
url = {http://arxiv.org/abs/1607.04606},
year = 2016
}