We introduce a neural network with a recurrent attention model over a possibly
large external memory. The architecture is a form of Memory Network 23
but unlike the model in that work, it is trained end-to-end, and hence requires
significantly less supervision during training, making it more generally applicable
in realistic settings. It can also be seen as an extension of RNNsearch 2 to the
case where multiple computational steps (hops) are performed per output symbol.
The flexibility of the model allows us to apply it to tasks as diverse as (synthetic)
question answering 22 and to language modeling. For the former our approach
is competitive with Memory Networks, but with less supervision. For the latter,
on the Penn TreeBank and Text8 datasets our approach demonstrates comparable
performance to RNNs and LSTMs. In both cases we show that the key concept
of multiple computational hops yields improved results.
%0 Conference Paper
%1 Sukhbaatar2015EndToEndMN
%A Sukhbaatar, Sainbayar
%A Szlam, Arthur
%A Weston, Jason
%A Fergus, Rob
%B NIPS
%D 2015
%K deep_learning memory neural_networks
%T End-To-End Memory Networks
%X We introduce a neural network with a recurrent attention model over a possibly
large external memory. The architecture is a form of Memory Network 23
but unlike the model in that work, it is trained end-to-end, and hence requires
significantly less supervision during training, making it more generally applicable
in realistic settings. It can also be seen as an extension of RNNsearch 2 to the
case where multiple computational steps (hops) are performed per output symbol.
The flexibility of the model allows us to apply it to tasks as diverse as (synthetic)
question answering 22 and to language modeling. For the former our approach
is competitive with Memory Networks, but with less supervision. For the latter,
on the Penn TreeBank and Text8 datasets our approach demonstrates comparable
performance to RNNs and LSTMs. In both cases we show that the key concept
of multiple computational hops yields improved results.
@inproceedings{Sukhbaatar2015EndToEndMN,
abstract = {We introduce a neural network with a recurrent attention model over a possibly
large external memory. The architecture is a form of Memory Network [23]
but unlike the model in that work, it is trained end-to-end, and hence requires
significantly less supervision during training, making it more generally applicable
in realistic settings. It can also be seen as an extension of RNNsearch [2] to the
case where multiple computational steps (hops) are performed per output symbol.
The flexibility of the model allows us to apply it to tasks as diverse as (synthetic)
question answering [22] and to language modeling. For the former our approach
is competitive with Memory Networks, but with less supervision. For the latter,
on the Penn TreeBank and Text8 datasets our approach demonstrates comparable
performance to RNNs and LSTMs. In both cases we show that the key concept
of multiple computational hops yields improved results.},
added-at = {2016-11-15T09:37:17.000+0100},
author = {Sukhbaatar, Sainbayar and Szlam, Arthur and Weston, Jason and Fergus, Rob},
biburl = {https://www.bibsonomy.org/bibtex/203f5581e3098cb5210a8aea4db1b657c/dallmann},
booktitle = {NIPS},
interhash = {8d78e4f5165a609eba5c8ed887cf2498},
intrahash = {03f5581e3098cb5210a8aea4db1b657c},
keywords = {deep_learning memory neural_networks},
timestamp = {2016-11-15T09:37:17.000+0100},
title = {End-To-End Memory Networks},
year = 2015
}