We propose an extension to neural network language models to adapt their
prediction to the recent history. Our model is a simplified version of memory
augmented networks, which stores past hidden activations as memory and accesses
them through a dot product with the current hidden activation. This mechanism
is very efficient and scales to very large memory sizes. We also draw a link
between the use of external memory in neural network and cache models used with
count based language models. We demonstrate on several language model datasets
that our approach performs significantly better than recent memory augmented
networks.
Описание
Improving Neural Language Models with a Continuous Cache
%0 Generic
%1 grave2016improving
%A Grave, Edouard
%A Joulin, Armand
%A Usunier, Nicolas
%D 2016
%K deep_learning model recurrent_neural_network
%T Improving Neural Language Models with a Continuous Cache
%U http://arxiv.org/abs/1612.04426
%X We propose an extension to neural network language models to adapt their
prediction to the recent history. Our model is a simplified version of memory
augmented networks, which stores past hidden activations as memory and accesses
them through a dot product with the current hidden activation. This mechanism
is very efficient and scales to very large memory sizes. We also draw a link
between the use of external memory in neural network and cache models used with
count based language models. We demonstrate on several language model datasets
that our approach performs significantly better than recent memory augmented
networks.
@misc{grave2016improving,
abstract = {We propose an extension to neural network language models to adapt their
prediction to the recent history. Our model is a simplified version of memory
augmented networks, which stores past hidden activations as memory and accesses
them through a dot product with the current hidden activation. This mechanism
is very efficient and scales to very large memory sizes. We also draw a link
between the use of external memory in neural network and cache models used with
count based language models. We demonstrate on several language model datasets
that our approach performs significantly better than recent memory augmented
networks.},
added-at = {2017-01-26T11:12:17.000+0100},
author = {Grave, Edouard and Joulin, Armand and Usunier, Nicolas},
biburl = {https://www.bibsonomy.org/bibtex/2f07862c11df184420bdd7333ea2f6aea/dallmann},
description = {Improving Neural Language Models with a Continuous Cache},
interhash = {eff93c7b98edbd5bde69d4b23b036431},
intrahash = {f07862c11df184420bdd7333ea2f6aea},
keywords = {deep_learning model recurrent_neural_network},
note = {cite arxiv:1612.04426Comment: Submitted to ICLR 2017},
timestamp = {2017-01-26T11:12:17.000+0100},
title = {Improving Neural Language Models with a Continuous Cache},
url = {http://arxiv.org/abs/1612.04426},
year = 2016
}