We explore options to use Transformer networks in neural transducer for
end-to-end speech recognition. Transformer networks use self-attention for
sequence modeling and comes with advantages in parallel computation and
capturing contexts. We propose 1) using VGGNet with causal convolution to
incorporate positional information and reduce frame rate for efficient
inference 2) using truncated self-attention to enable streaming for Transformer
and reduce computational complexity. All experiments are conducted on the
public LibriSpeech corpus. The proposed Transformer-Transducer outperforms
neural transducer with LSTM/BLSTM networks and achieved word error rates of
6.37 % on the test-clean set and 15.30 % on the test-other set, while remaining
streamable, compact with 45.7M parameters for the entire system, and
computationally efficient with complexity of O(T), where T is input sequence
length.
Description
Transformer-Transducer: End-to-End Speech Recognition with Self-Attention
%0 Generic
%1 yeh2019transformertransducer
%A Yeh, Ching-Feng
%A Mahadeokar, Jay
%A Kalgaonkar, Kaustubh
%A Wang, Yongqiang
%A Le, Duc
%A Jain, Mahaveer
%A Schubert, Kjell
%A Fuegen, Christian
%A Seltzer, Michael L.
%D 2019
%K seq2seq transformers
%T Transformer-Transducer: End-to-End Speech Recognition with
Self-Attention
%U http://arxiv.org/abs/1910.12977
%X We explore options to use Transformer networks in neural transducer for
end-to-end speech recognition. Transformer networks use self-attention for
sequence modeling and comes with advantages in parallel computation and
capturing contexts. We propose 1) using VGGNet with causal convolution to
incorporate positional information and reduce frame rate for efficient
inference 2) using truncated self-attention to enable streaming for Transformer
and reduce computational complexity. All experiments are conducted on the
public LibriSpeech corpus. The proposed Transformer-Transducer outperforms
neural transducer with LSTM/BLSTM networks and achieved word error rates of
6.37 % on the test-clean set and 15.30 % on the test-other set, while remaining
streamable, compact with 45.7M parameters for the entire system, and
computationally efficient with complexity of O(T), where T is input sequence
length.
@misc{yeh2019transformertransducer,
abstract = {We explore options to use Transformer networks in neural transducer for
end-to-end speech recognition. Transformer networks use self-attention for
sequence modeling and comes with advantages in parallel computation and
capturing contexts. We propose 1) using VGGNet with causal convolution to
incorporate positional information and reduce frame rate for efficient
inference 2) using truncated self-attention to enable streaming for Transformer
and reduce computational complexity. All experiments are conducted on the
public LibriSpeech corpus. The proposed Transformer-Transducer outperforms
neural transducer with LSTM/BLSTM networks and achieved word error rates of
6.37 % on the test-clean set and 15.30 % on the test-other set, while remaining
streamable, compact with 45.7M parameters for the entire system, and
computationally efficient with complexity of O(T), where T is input sequence
length.},
added-at = {2021-04-26T12:44:28.000+0200},
author = {Yeh, Ching-Feng and Mahadeokar, Jay and Kalgaonkar, Kaustubh and Wang, Yongqiang and Le, Duc and Jain, Mahaveer and Schubert, Kjell and Fuegen, Christian and Seltzer, Michael L.},
biburl = {https://www.bibsonomy.org/bibtex/253d9d015831f8f9bc2e687e3e0eaf16a/pjohnson88},
description = {Transformer-Transducer: End-to-End Speech Recognition with Self-Attention},
interhash = {dc48e942b123a0fe47e5232b71a2c07c},
intrahash = {53d9d015831f8f9bc2e687e3e0eaf16a},
keywords = {seq2seq transformers},
note = {cite arxiv:1910.12977},
timestamp = {2021-04-26T12:44:28.000+0200},
title = {Transformer-Transducer: End-to-End Speech Recognition with
Self-Attention},
url = {http://arxiv.org/abs/1910.12977},
year = 2019
}