@inproceedings{5011751bf0744c98937e181c89f2c6f0,
title = "Active Learning for Efficient Audio Annotation and Classification with a Large Amount of Unlabeled Data",
abstract = "There are many sound classification problems that have target classes which are rare or unique to the context of the problem. For these problems, existing data sets are not sufficient and we must create new problem-specific datasets to train classification models. However, annotating a new dataset for every new problem is costly. Active learning could potentially reduce this annotation cost, but it has been understudied in the context of audio annotation. In this work, we investigate active learning to reduce the annotation cost of a sound classification dataset unique to a particular problem. We evaluate three certainty-based active learning query strategies and propose a new strategy: alternating confidence sampling. Using this strategy, we demonstrate reduced annotation costs when actively training models with both experts and non-experts, and we perform a qualitative analysis on 20k unlabeled recordings to show our approach results in a model that generalizes well to unseen data.",
keywords = "active learning, audio annotations, machine listening, sound classification",
author = "Yu Wang and \{Mendez Mendez\}, \{Ana Elisa\} and Mark Cartwright and Bello, \{Juan Pablo\}",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 44th IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 ; Conference date: 12-05-2019 Through 17-05-2019",
year = "2019",
month = may,
doi = "10.1109/ICASSP.2019.8683063",
language = "English (US)",
series = "ICASSP, IEEE International Conference on Acoustics, Speech and Signal Processing - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "880--884",
booktitle = "2019 IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP 2019 - Proceedings",
address = "United States",
}