We describe a new class of learning models called memory networks. Memory
networks reason with inference components combined with a long-term memory
component; they learn how to use these jointly. The long-term memory can be
read and written to, with the goal of using it for prediction. We investigate
these models in the context of question answering (QA) where the long-term
memory effectively acts as a (dynamic) knowledge base, and the output is a
textual response. We evaluate them on a large-scale QA task, and a smaller, but
more complex, toy task generated from a simulated world. In the latter, we show
the reasoning power of such models by chaining multiple supporting sentences to
answer questions that require understanding the intension of verbs.
%0 Journal Article
%1 weston2014memory
%A Weston, Jason
%A Chopra, Sumit
%A Bordes, Antoine
%D 2014
%K deep-learning memory
%T Memory Networks
%U http://arxiv.org/abs/1410.3916
%X We describe a new class of learning models called memory networks. Memory
networks reason with inference components combined with a long-term memory
component; they learn how to use these jointly. The long-term memory can be
read and written to, with the goal of using it for prediction. We investigate
these models in the context of question answering (QA) where the long-term
memory effectively acts as a (dynamic) knowledge base, and the output is a
textual response. We evaluate them on a large-scale QA task, and a smaller, but
more complex, toy task generated from a simulated world. In the latter, we show
the reasoning power of such models by chaining multiple supporting sentences to
answer questions that require understanding the intension of verbs.
@article{weston2014memory,
abstract = {We describe a new class of learning models called memory networks. Memory
networks reason with inference components combined with a long-term memory
component; they learn how to use these jointly. The long-term memory can be
read and written to, with the goal of using it for prediction. We investigate
these models in the context of question answering (QA) where the long-term
memory effectively acts as a (dynamic) knowledge base, and the output is a
textual response. We evaluate them on a large-scale QA task, and a smaller, but
more complex, toy task generated from a simulated world. In the latter, we show
the reasoning power of such models by chaining multiple supporting sentences to
answer questions that require understanding the intension of verbs.},
added-at = {2019-03-21T13:37:52.000+0100},
author = {Weston, Jason and Chopra, Sumit and Bordes, Antoine},
biburl = {https://www.bibsonomy.org/bibtex/2f3d51ba55acd2f7b50c18830c1f1fbe6/kirk86},
description = {[1410.3916] Memory Networks},
interhash = {accbef7c6a665a063f7d6dc8f187de88},
intrahash = {f3d51ba55acd2f7b50c18830c1f1fbe6},
keywords = {deep-learning memory},
note = {cite arxiv:1410.3916},
timestamp = {2019-03-21T13:37:52.000+0100},
title = {Memory Networks},
url = {http://arxiv.org/abs/1410.3916},
year = 2014
}