Previous works donahue2018adversarial, engel2019gansynth have found
that generating coherent raw audio waveforms with GANs is challenging. In this
paper, we show that it is possible to train GANs reliably to generate high
quality coherent waveforms by introducing a set of architectural changes and
simple training techniques. Subjective evaluation metric (Mean Opinion Score,
or MOS) shows the effectiveness of the proposed approach for high quality
mel-spectrogram inversion. To establish the generality of the proposed
techniques, we show qualitative results of our model in speech synthesis, music
domain translation and unconditional music synthesis. We evaluate the various
components of the model through ablation studies and suggest a set of
guidelines to design general purpose discriminators and generators for
conditional sequence synthesis tasks. Our model is non-autoregressive, fully
convolutional, with significantly fewer parameters than competing models and
generalizes to unseen speakers for mel-spectrogram inversion. Our pytorch
implementation runs at more than 100x faster than realtime on GTX 1080Ti GPU
and more than 2x faster than real-time on CPU, without any hardware specific
optimization tricks. Blog post with samples and accompanying code coming soon.
%0 Generic
%1 kumar2019melgan
%A Kumar, Kundan
%A Kumar, Rithesh
%A de Boissiere, Thibault
%A Gestin, Lucas
%A Teoh, Wei Zhen
%A Sotelo, Jose
%A de Brebisson, Alexandre
%A Bengio, Yoshua
%A Courville, Aaron
%D 2019
%K GAN
%T MelGAN: Generative Adversarial Networks for Conditional Waveform
Synthesis
%U http://arxiv.org/abs/1910.06711
%X Previous works donahue2018adversarial, engel2019gansynth have found
that generating coherent raw audio waveforms with GANs is challenging. In this
paper, we show that it is possible to train GANs reliably to generate high
quality coherent waveforms by introducing a set of architectural changes and
simple training techniques. Subjective evaluation metric (Mean Opinion Score,
or MOS) shows the effectiveness of the proposed approach for high quality
mel-spectrogram inversion. To establish the generality of the proposed
techniques, we show qualitative results of our model in speech synthesis, music
domain translation and unconditional music synthesis. We evaluate the various
components of the model through ablation studies and suggest a set of
guidelines to design general purpose discriminators and generators for
conditional sequence synthesis tasks. Our model is non-autoregressive, fully
convolutional, with significantly fewer parameters than competing models and
generalizes to unseen speakers for mel-spectrogram inversion. Our pytorch
implementation runs at more than 100x faster than realtime on GTX 1080Ti GPU
and more than 2x faster than real-time on CPU, without any hardware specific
optimization tricks. Blog post with samples and accompanying code coming soon.
@misc{kumar2019melgan,
abstract = {Previous works \citep{donahue2018adversarial, engel2019gansynth} have found
that generating coherent raw audio waveforms with GANs is challenging. In this
paper, we show that it is possible to train GANs reliably to generate high
quality coherent waveforms by introducing a set of architectural changes and
simple training techniques. Subjective evaluation metric (Mean Opinion Score,
or MOS) shows the effectiveness of the proposed approach for high quality
mel-spectrogram inversion. To establish the generality of the proposed
techniques, we show qualitative results of our model in speech synthesis, music
domain translation and unconditional music synthesis. We evaluate the various
components of the model through ablation studies and suggest a set of
guidelines to design general purpose discriminators and generators for
conditional sequence synthesis tasks. Our model is non-autoregressive, fully
convolutional, with significantly fewer parameters than competing models and
generalizes to unseen speakers for mel-spectrogram inversion. Our pytorch
implementation runs at more than 100x faster than realtime on GTX 1080Ti GPU
and more than 2x faster than real-time on CPU, without any hardware specific
optimization tricks. Blog post with samples and accompanying code coming soon.},
added-at = {2019-10-22T11:23:53.000+0200},
author = {Kumar, Kundan and Kumar, Rithesh and de Boissiere, Thibault and Gestin, Lucas and Teoh, Wei Zhen and Sotelo, Jose and de Brebisson, Alexandre and Bengio, Yoshua and Courville, Aaron},
biburl = {https://www.bibsonomy.org/bibtex/250da23023fd5b84d4972cc4cf8c02c63/topel},
interhash = {eb65abe230965da29604cb3c996b4028},
intrahash = {50da23023fd5b84d4972cc4cf8c02c63},
keywords = {GAN},
note = {cite arxiv:1910.06711},
timestamp = {2019-10-22T11:23:53.000+0200},
title = {MelGAN: Generative Adversarial Networks for Conditional Waveform
Synthesis},
url = {http://arxiv.org/abs/1910.06711},
year = 2019
}