J. Barron. (2017)cite arxiv:1701.03077Comment: CVPR 2019.
Abstract
We present a generalization of the Cauchy/Lorentzian, Geman-McClure,
Welsch/Leclerc, generalized Charbonnier, Charbonnier/pseudo-Huber/L1-L2, and L2
loss functions. By introducing robustness as a continuous parameter, our loss
function allows algorithms built around robust loss minimization to be
generalized, which improves performance on basic vision tasks such as
registration and clustering. Interpreting our loss as the negative log of a
univariate density yields a general probability distribution that includes
normal and Cauchy distributions as special cases. This probabilistic
interpretation enables the training of neural networks in which the robustness
of the loss automatically adapts itself during training, which improves
performance on learning-based tasks such as generative image synthesis and
unsupervised monocular depth estimation, without requiring any manual parameter
tuning.
Description
[1701.03077] A General and Adaptive Robust Loss Function
%0 Journal Article
%1 barron2017general
%A Barron, Jonathan T.
%D 2017
%K objectives readings robustness
%T A General and Adaptive Robust Loss Function
%U http://arxiv.org/abs/1701.03077
%X We present a generalization of the Cauchy/Lorentzian, Geman-McClure,
Welsch/Leclerc, generalized Charbonnier, Charbonnier/pseudo-Huber/L1-L2, and L2
loss functions. By introducing robustness as a continuous parameter, our loss
function allows algorithms built around robust loss minimization to be
generalized, which improves performance on basic vision tasks such as
registration and clustering. Interpreting our loss as the negative log of a
univariate density yields a general probability distribution that includes
normal and Cauchy distributions as special cases. This probabilistic
interpretation enables the training of neural networks in which the robustness
of the loss automatically adapts itself during training, which improves
performance on learning-based tasks such as generative image synthesis and
unsupervised monocular depth estimation, without requiring any manual parameter
tuning.
@article{barron2017general,
abstract = {We present a generalization of the Cauchy/Lorentzian, Geman-McClure,
Welsch/Leclerc, generalized Charbonnier, Charbonnier/pseudo-Huber/L1-L2, and L2
loss functions. By introducing robustness as a continuous parameter, our loss
function allows algorithms built around robust loss minimization to be
generalized, which improves performance on basic vision tasks such as
registration and clustering. Interpreting our loss as the negative log of a
univariate density yields a general probability distribution that includes
normal and Cauchy distributions as special cases. This probabilistic
interpretation enables the training of neural networks in which the robustness
of the loss automatically adapts itself during training, which improves
performance on learning-based tasks such as generative image synthesis and
unsupervised monocular depth estimation, without requiring any manual parameter
tuning.},
added-at = {2020-01-03T23:01:30.000+0100},
author = {Barron, Jonathan T.},
biburl = {https://www.bibsonomy.org/bibtex/2b06d36bff21783192160f288ce73dbeb/kirk86},
description = {[1701.03077] A General and Adaptive Robust Loss Function},
interhash = {c51f251ca0eee34f7e09677d0766fa0a},
intrahash = {b06d36bff21783192160f288ce73dbeb},
keywords = {objectives readings robustness},
note = {cite arxiv:1701.03077Comment: CVPR 2019},
timestamp = {2020-01-03T23:01:30.000+0100},
title = {A General and Adaptive Robust Loss Function},
url = {http://arxiv.org/abs/1701.03077},
year = 2017
}