This paper introduces a structured memory which can be easily integrated into
a neural network. The memory is very large by design and therefore
significantly increases the capacity of the architecture, by up to a billion
parameters with a negligible computational overhead. Its design and access
pattern is based on product keys, which enable fast and exact nearest neighbor
search. The ability to increase the number of parameters while keeping the same
computational budget lets the overall system strike a better trade-off between
prediction accuracy and computation efficiency both at training and test time.
This memory layer allows us to tackle very large scale language modeling tasks.
In our experiments we consider a dataset with up to 30 billion words, and we
plug our memory layer in a state-of-the-art transformer-based architecture. In
particular, we found that a memory augmented model with only 12 layers
outperforms a baseline transformer model with 24 layers, while being twice
faster at inference time. We release our code for reproducibility purposes.
Описание
[1907.05242] Large Memory Layers with Product Keys
%0 Journal Article
%1 lample2019large
%A Lample, Guillaume
%A Sablayrolles, Alexandre
%A Ranzato, Marc'Aurelio
%A Denoyer, Ludovic
%A Jégou, Hervé
%D 2019
%K complexity computation deep-learning
%T Large Memory Layers with Product Keys
%U http://arxiv.org/abs/1907.05242
%X This paper introduces a structured memory which can be easily integrated into
a neural network. The memory is very large by design and therefore
significantly increases the capacity of the architecture, by up to a billion
parameters with a negligible computational overhead. Its design and access
pattern is based on product keys, which enable fast and exact nearest neighbor
search. The ability to increase the number of parameters while keeping the same
computational budget lets the overall system strike a better trade-off between
prediction accuracy and computation efficiency both at training and test time.
This memory layer allows us to tackle very large scale language modeling tasks.
In our experiments we consider a dataset with up to 30 billion words, and we
plug our memory layer in a state-of-the-art transformer-based architecture. In
particular, we found that a memory augmented model with only 12 layers
outperforms a baseline transformer model with 24 layers, while being twice
faster at inference time. We release our code for reproducibility purposes.
@article{lample2019large,
abstract = {This paper introduces a structured memory which can be easily integrated into
a neural network. The memory is very large by design and therefore
significantly increases the capacity of the architecture, by up to a billion
parameters with a negligible computational overhead. Its design and access
pattern is based on product keys, which enable fast and exact nearest neighbor
search. The ability to increase the number of parameters while keeping the same
computational budget lets the overall system strike a better trade-off between
prediction accuracy and computation efficiency both at training and test time.
This memory layer allows us to tackle very large scale language modeling tasks.
In our experiments we consider a dataset with up to 30 billion words, and we
plug our memory layer in a state-of-the-art transformer-based architecture. In
particular, we found that a memory augmented model with only 12 layers
outperforms a baseline transformer model with 24 layers, while being twice
faster at inference time. We release our code for reproducibility purposes.},
added-at = {2019-09-04T16:08:28.000+0200},
author = {Lample, Guillaume and Sablayrolles, Alexandre and Ranzato, Marc'Aurelio and Denoyer, Ludovic and Jégou, Hervé},
biburl = {https://www.bibsonomy.org/bibtex/2e71a49917d2584da2a21814aaa8ff88a/kirk86},
description = {[1907.05242] Large Memory Layers with Product Keys},
interhash = {79a01f3b9cabea168cbbb92d37089d22},
intrahash = {e71a49917d2584da2a21814aaa8ff88a},
keywords = {complexity computation deep-learning},
note = {cite arxiv:1907.05242},
timestamp = {2019-09-04T16:08:28.000+0200},
title = {Large Memory Layers with Product Keys},
url = {http://arxiv.org/abs/1907.05242},
year = 2019
}