Transformers have been successfully applied to sequential, auto-regressive
tasks despite being feedforward networks. Unlike recurrent neural networks,
Transformers use attention to capture temporal relations while processing input
tokens in parallel. While this parallelization makes them computationally
efficient, it restricts the model from fully exploiting the sequential nature
of the input. The representation at a given layer can only access
representations from lower layers, rather than the higher level representations
already available. In this work, we propose the Feedback Transformer
architecture that exposes all previous representations to all future
representations, meaning the lowest representation of the current timestep is
formed from the highest-level abstract representation of the past. We
demonstrate on a variety of benchmarks in language modeling, machine
translation, and reinforcement learning that the increased representation
capacity can create small, shallow models with much stronger performance than
comparable Transformers.
Description
Addressing Some Limitations of Transformers with Feedback Memory
%0 Generic
%1 fan2020addressing
%A Fan, Angela
%A Lavril, Thibaut
%A Grave, Edouard
%A Joulin, Armand
%A Sukhbaatar, Sainbayar
%D 2020
%K rnn transformers
%T Addressing Some Limitations of Transformers with Feedback Memory
%U http://arxiv.org/abs/2002.09402
%X Transformers have been successfully applied to sequential, auto-regressive
tasks despite being feedforward networks. Unlike recurrent neural networks,
Transformers use attention to capture temporal relations while processing input
tokens in parallel. While this parallelization makes them computationally
efficient, it restricts the model from fully exploiting the sequential nature
of the input. The representation at a given layer can only access
representations from lower layers, rather than the higher level representations
already available. In this work, we propose the Feedback Transformer
architecture that exposes all previous representations to all future
representations, meaning the lowest representation of the current timestep is
formed from the highest-level abstract representation of the past. We
demonstrate on a variety of benchmarks in language modeling, machine
translation, and reinforcement learning that the increased representation
capacity can create small, shallow models with much stronger performance than
comparable Transformers.
@misc{fan2020addressing,
abstract = {Transformers have been successfully applied to sequential, auto-regressive
tasks despite being feedforward networks. Unlike recurrent neural networks,
Transformers use attention to capture temporal relations while processing input
tokens in parallel. While this parallelization makes them computationally
efficient, it restricts the model from fully exploiting the sequential nature
of the input. The representation at a given layer can only access
representations from lower layers, rather than the higher level representations
already available. In this work, we propose the Feedback Transformer
architecture that exposes all previous representations to all future
representations, meaning the lowest representation of the current timestep is
formed from the highest-level abstract representation of the past. We
demonstrate on a variety of benchmarks in language modeling, machine
translation, and reinforcement learning that the increased representation
capacity can create small, shallow models with much stronger performance than
comparable Transformers.},
added-at = {2023-07-07T17:59:56.000+0200},
author = {Fan, Angela and Lavril, Thibaut and Grave, Edouard and Joulin, Armand and Sukhbaatar, Sainbayar},
biburl = {https://www.bibsonomy.org/bibtex/2edbf79553b96a7915085611d01122276/wanderinglogic},
description = {Addressing Some Limitations of Transformers with Feedback Memory},
interhash = {227ddca987d4df51ad40a4b2699d17f6},
intrahash = {edbf79553b96a7915085611d01122276},
keywords = {rnn transformers},
note = {cite arxiv:2002.09402},
timestamp = {2023-07-07T17:59:56.000+0200},
title = {Addressing Some Limitations of Transformers with Feedback Memory},
url = {http://arxiv.org/abs/2002.09402},
year = 2020
}