Recent advances in Convolutional Neural Networks (CNN) have achieved
remarkable results in localizing objects in images. In these networks, the
training procedure usually requires providing bounding boxes or the maximum
number of expected objects. In this paper, we address the task of estimating
object locations without annotated bounding boxes, which are typically
hand-drawn and time consuming to label. We propose a loss function that can be
used in any Fully Convolutional Network (FCN) to estimate object locations.
This loss function is a modification of the Average Hausdorff Distance between
two unordered sets of points. The proposed method does not require one to
"guess" the maximum number of objects in the image, and has no notion of
bounding boxes, region proposals, or sliding windows. We evaluate our method
with three datasets designed to locate people's heads, pupil centers and plant
centers. We report an average precision and recall of 94\% for the three
datasets, and an average location error of 6 pixels in 256x256 images.
%0 Generic
%1 citeulike:14617221
%A xxx,
%D 2018
%K counting grounding keypoints loss
%T Weighted Hausdorff Distance: A Loss Function For Object Localization
%U http://arxiv.org/abs/1806.07564
%X Recent advances in Convolutional Neural Networks (CNN) have achieved
remarkable results in localizing objects in images. In these networks, the
training procedure usually requires providing bounding boxes or the maximum
number of expected objects. In this paper, we address the task of estimating
object locations without annotated bounding boxes, which are typically
hand-drawn and time consuming to label. We propose a loss function that can be
used in any Fully Convolutional Network (FCN) to estimate object locations.
This loss function is a modification of the Average Hausdorff Distance between
two unordered sets of points. The proposed method does not require one to
"guess" the maximum number of objects in the image, and has no notion of
bounding boxes, region proposals, or sliding windows. We evaluate our method
with three datasets designed to locate people's heads, pupil centers and plant
centers. We report an average precision and recall of 94\% for the three
datasets, and an average location error of 6 pixels in 256x256 images.
@misc{citeulike:14617221,
abstract = {{Recent advances in Convolutional Neural Networks (CNN) have achieved
remarkable results in localizing objects in images. In these networks, the
training procedure usually requires providing bounding boxes or the maximum
number of expected objects. In this paper, we address the task of estimating
object locations without annotated bounding boxes, which are typically
hand-drawn and time consuming to label. We propose a loss function that can be
used in any Fully Convolutional Network (FCN) to estimate object locations.
This loss function is a modification of the Average Hausdorff Distance between
two unordered sets of points. The proposed method does not require one to
"guess" the maximum number of objects in the image, and has no notion of
bounding boxes, region proposals, or sliding windows. We evaluate our method
with three datasets designed to locate people's heads, pupil centers and plant
centers. We report an average precision and recall of 94\% for the three
datasets, and an average location error of 6 pixels in 256x256 images.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2bc35ba1b9cce37a2f6f511dce017f6f5/nmatsuk},
citeulike-article-id = {14617221},
citeulike-linkout-0 = {http://arxiv.org/abs/1806.07564},
citeulike-linkout-1 = {http://arxiv.org/pdf/1806.07564},
day = 20,
eprint = {1806.07564},
interhash = {613945b1d500b326de5159d302fa4c75},
intrahash = {bc35ba1b9cce37a2f6f511dce017f6f5},
keywords = {counting grounding keypoints loss},
month = jun,
posted-at = {2018-07-23 08:54:31},
priority = {4},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Weighted Hausdorff Distance: A Loss Function For Object Localization}},
url = {http://arxiv.org/abs/1806.07564},
year = 2018
}