Segmenting audio into homogeneous sections such as music and speech helps us
understand the content of audio. It is useful as a pre-processing step to
index, store, and modify audio recordings, radio broadcasts and TV programmes.
Deep learning models for segmentation are generally trained on copyrighted
material, which cannot be shared. Annotating these datasets is time-consuming
and expensive and therefore, it significantly slows down research progress. In
this study, we present a novel procedure that artificially synthesises data
that resembles radio signals. We replicate the workflow of a radio DJ in mixing
audio and investigate parameters like fade curves and audio ducking. We trained
a Convolutional Recurrent Neural Network (CRNN) on this synthesised data and
outperformed state-of-the-art algorithms for music-speech detection. This paper
demonstrates the data synthesis procedure as a highly effective technique to
generate large datasets to train deep neural networks for audio segmentation.
Description
[2102.09959] Artificially Synthesising Data for Audio Classification and Segmentation to Improve Speech and Music Detection in Radio Broadcast
%0 Generic
%1 venkatesh2021artificially
%A Venkatesh, Satvik
%A Moffat, David
%A Kirke, Alexis
%A Shakeri, Gözel
%A Brewster, Stephen
%A Fachner, Jörg
%A Odell-Miller, Helen
%A Street, Alex
%A Farina, Nicolas
%A Banerjee, Sube
%A Miranda, Eduardo Reck
%D 2021
%K audio mir plk segmentation
%T Artificially Synthesising Data for Audio Classification and Segmentation
to Improve Speech and Music Detection in Radio Broadcast
%U http://arxiv.org/abs/2102.09959
%X Segmenting audio into homogeneous sections such as music and speech helps us
understand the content of audio. It is useful as a pre-processing step to
index, store, and modify audio recordings, radio broadcasts and TV programmes.
Deep learning models for segmentation are generally trained on copyrighted
material, which cannot be shared. Annotating these datasets is time-consuming
and expensive and therefore, it significantly slows down research progress. In
this study, we present a novel procedure that artificially synthesises data
that resembles radio signals. We replicate the workflow of a radio DJ in mixing
audio and investigate parameters like fade curves and audio ducking. We trained
a Convolutional Recurrent Neural Network (CRNN) on this synthesised data and
outperformed state-of-the-art algorithms for music-speech detection. This paper
demonstrates the data synthesis procedure as a highly effective technique to
generate large datasets to train deep neural networks for audio segmentation.
@misc{venkatesh2021artificially,
abstract = {Segmenting audio into homogeneous sections such as music and speech helps us
understand the content of audio. It is useful as a pre-processing step to
index, store, and modify audio recordings, radio broadcasts and TV programmes.
Deep learning models for segmentation are generally trained on copyrighted
material, which cannot be shared. Annotating these datasets is time-consuming
and expensive and therefore, it significantly slows down research progress. In
this study, we present a novel procedure that artificially synthesises data
that resembles radio signals. We replicate the workflow of a radio DJ in mixing
audio and investigate parameters like fade curves and audio ducking. We trained
a Convolutional Recurrent Neural Network (CRNN) on this synthesised data and
outperformed state-of-the-art algorithms for music-speech detection. This paper
demonstrates the data synthesis procedure as a highly effective technique to
generate large datasets to train deep neural networks for audio segmentation.},
added-at = {2022-02-15T12:20:45.000+0100},
author = {Venkatesh, Satvik and Moffat, David and Kirke, Alexis and Shakeri, Gözel and Brewster, Stephen and Fachner, Jörg and Odell-Miller, Helen and Street, Alex and Farina, Nicolas and Banerjee, Sube and Miranda, Eduardo Reck},
biburl = {https://www.bibsonomy.org/bibtex/26ce8fd0970ce7e183fe0eae3d54d9664/simonha94},
description = {[2102.09959] Artificially Synthesising Data for Audio Classification and Segmentation to Improve Speech and Music Detection in Radio Broadcast},
interhash = {5ecbc2c8aa27bdeb15a41a9a93fbbf57},
intrahash = {6ce8fd0970ce7e183fe0eae3d54d9664},
keywords = {audio mir plk segmentation},
note = {cite arxiv:2102.09959Comment: 5 pages, 3 figures, Accepted to ICASSP 2021},
timestamp = {2022-02-15T12:20:45.000+0100},
title = {Artificially Synthesising Data for Audio Classification and Segmentation
to Improve Speech and Music Detection in Radio Broadcast},
url = {http://arxiv.org/abs/2102.09959},
year = 2021
}