We propose a novel image sampling method for differentiable image
transformation in deep neural networks. The sampling schemes currently used in
deep learning, such as Spatial Transformer Networks, rely on bilinear
interpolation, which performs poorly under severe scale changes, and more
importantly, results in poor gradient propagation. This is due to their strict
reliance on direct neighbors. Instead, we propose to generate random auxiliary
samples in the vicinity of each pixel in the sampled image, and create a linear
approximation using their intensity values. We then use this approximation as a
differentiable formula for the transformed image. However, we observe that
these auxiliary samples may collapse to a single pixel under severe image
transformations, and propose to address it by adding constraints to the
distance between the center pixel and the auxiliary samples. We demonstrate
that our approach produces more representative gradients with a wider basin of
convergence for image alignment, which leads to considerable performance
improvements when training networks for image registration and classification
tasks, particularly under large downsampling.
%0 Generic
%1 citeulike:14689642
%A xxx,
%D 2019
%K arch attention keypoints transform
%T Linearized Multi-Sampling for Differentiable Image Transformation
%U http://arxiv.org/abs/1901.07124
%X We propose a novel image sampling method for differentiable image
transformation in deep neural networks. The sampling schemes currently used in
deep learning, such as Spatial Transformer Networks, rely on bilinear
interpolation, which performs poorly under severe scale changes, and more
importantly, results in poor gradient propagation. This is due to their strict
reliance on direct neighbors. Instead, we propose to generate random auxiliary
samples in the vicinity of each pixel in the sampled image, and create a linear
approximation using their intensity values. We then use this approximation as a
differentiable formula for the transformed image. However, we observe that
these auxiliary samples may collapse to a single pixel under severe image
transformations, and propose to address it by adding constraints to the
distance between the center pixel and the auxiliary samples. We demonstrate
that our approach produces more representative gradients with a wider basin of
convergence for image alignment, which leads to considerable performance
improvements when training networks for image registration and classification
tasks, particularly under large downsampling.
@misc{citeulike:14689642,
abstract = {{ We propose a novel image sampling method for differentiable image
transformation in deep neural networks. The sampling schemes currently used in
deep learning, such as Spatial Transformer Networks, rely on bilinear
interpolation, which performs poorly under severe scale changes, and more
importantly, results in poor gradient propagation. This is due to their strict
reliance on direct neighbors. Instead, we propose to generate random auxiliary
samples in the vicinity of each pixel in the sampled image, and create a linear
approximation using their intensity values. We then use this approximation as a
differentiable formula for the transformed image. However, we observe that
these auxiliary samples may collapse to a single pixel under severe image
transformations, and propose to address it by adding constraints to the
distance between the center pixel and the auxiliary samples. We demonstrate
that our approach produces more representative gradients with a wider basin of
convergence for image alignment, which leads to considerable performance
improvements when training networks for image registration and classification
tasks, particularly under large downsampling.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2924512f95f34b0fe64e047c48fc9896b/nmatsuk},
citeulike-article-id = {14689642},
citeulike-linkout-0 = {http://arxiv.org/abs/1901.07124},
citeulike-linkout-1 = {http://arxiv.org/pdf/1901.07124},
day = 22,
eprint = {1901.07124},
interhash = {09c54e9d0c47f0fd5eb5897eede1465c},
intrahash = {924512f95f34b0fe64e047c48fc9896b},
keywords = {arch attention keypoints transform},
month = jan,
posted-at = {2019-02-13 16:12:12},
priority = {4},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Linearized Multi-Sampling for Differentiable Image Transformation}},
url = {http://arxiv.org/abs/1901.07124},
year = 2019
}