This paper introduces WaveNet, a deep neural network for generating raw audio
waveforms. The model is fully probabilistic and autoregressive, with the
predictive distribution for each audio sample conditioned on all previous ones;
nonetheless we show that it can be efficiently trained on data with tens of
thousands of samples per second of audio. When applied to text-to-speech, it
yields state-of-the-art performance, with human listeners rating it as
significantly more natural sounding than the best parametric and concatenative
systems for both English and Mandarin. A single WaveNet can capture the
characteristics of many different speakers with equal fidelity, and can switch
between them by conditioning on the speaker identity. When trained to model
music, we find that it generates novel and often highly realistic musical
fragments. We also show that it can be employed as a discriminative model,
returning promising results for phoneme recognition.
Description
[1609.03499] WaveNet: A Generative Model for Raw Audio
%0 Generic
%1 oord2016wavenet
%A Oord, Aaron van den
%A Dieleman, Sander
%A Zen, Heiga
%A Simonyan, Karen
%A Vinyals, Oriol
%A Graves, Alex
%A Kalchbrenner, Nal
%A Senior, Andrew
%A Kavukcuoglu, Koray
%D 2016
%K cnn deeplearning deepmind wavenet
%T WaveNet: A Generative Model for Raw Audio
%U http://arxiv.org/abs/1609.03499
%X This paper introduces WaveNet, a deep neural network for generating raw audio
waveforms. The model is fully probabilistic and autoregressive, with the
predictive distribution for each audio sample conditioned on all previous ones;
nonetheless we show that it can be efficiently trained on data with tens of
thousands of samples per second of audio. When applied to text-to-speech, it
yields state-of-the-art performance, with human listeners rating it as
significantly more natural sounding than the best parametric and concatenative
systems for both English and Mandarin. A single WaveNet can capture the
characteristics of many different speakers with equal fidelity, and can switch
between them by conditioning on the speaker identity. When trained to model
music, we find that it generates novel and often highly realistic musical
fragments. We also show that it can be employed as a discriminative model,
returning promising results for phoneme recognition.
@conference{oord2016wavenet,
abstract = {This paper introduces WaveNet, a deep neural network for generating raw audio
waveforms. The model is fully probabilistic and autoregressive, with the
predictive distribution for each audio sample conditioned on all previous ones;
nonetheless we show that it can be efficiently trained on data with tens of
thousands of samples per second of audio. When applied to text-to-speech, it
yields state-of-the-art performance, with human listeners rating it as
significantly more natural sounding than the best parametric and concatenative
systems for both English and Mandarin. A single WaveNet can capture the
characteristics of many different speakers with equal fidelity, and can switch
between them by conditioning on the speaker identity. When trained to model
music, we find that it generates novel and often highly realistic musical
fragments. We also show that it can be employed as a discriminative model,
returning promising results for phoneme recognition.},
added-at = {2019-04-11T19:39:40.000+0200},
author = {Oord, Aaron van den and Dieleman, Sander and Zen, Heiga and Simonyan, Karen and Vinyals, Oriol and Graves, Alex and Kalchbrenner, Nal and Senior, Andrew and Kavukcuoglu, Koray},
biburl = {https://www.bibsonomy.org/bibtex/27383210cc60af486d2385cd730d4a620/vsathish},
description = {[1609.03499] WaveNet: A Generative Model for Raw Audio},
interhash = {b9e02f1ffc1411c7752c89f39a13ef9f},
intrahash = {7383210cc60af486d2385cd730d4a620},
keywords = {cnn deeplearning deepmind wavenet},
note = {cite arxiv:1609.03499},
timestamp = {2019-04-11T19:39:40.000+0200},
title = {WaveNet: A Generative Model for Raw Audio},
url = {http://arxiv.org/abs/1609.03499},
year = 2016
}