We propose and analyze an adaptive step-size variant of the Davis-Yin three
operator splitting. This method can solve optimization problems composed by a
sum of a smooth term for which we have access to its gradient and an arbitrary
number of potentially non-smooth terms for which we have access to their
proximal operator. The proposed method sets the step-size based on local
information of the objective --hence allowing for larger step-sizes--, only
requires two extra function evaluations per iteration and does not depend on
any step-size hyperparameter besides an initial estimate. We provide an
iteration complexity analysis that matches the best known results for the
non-adaptive variant: sublinear convergence for general convex functions and
linear convergence under strong convexity of the smooth term and smoothness of
one of the proximal terms. Finally, an empirical comparison with related
methods on 6 different problems illustrates the computational advantage of the
proposed method.
%0 Generic
%1 pedregosa2018adaptive
%A Pedregosa, Fabian
%A Gidel, Gauthier
%D 2018
%K fused-lasso optimization structured_sparsity total_variation trend_filter
%T Adaptive Three Operator Splitting.
%U http://arxiv.org/abs/1804.02339
%X We propose and analyze an adaptive step-size variant of the Davis-Yin three
operator splitting. This method can solve optimization problems composed by a
sum of a smooth term for which we have access to its gradient and an arbitrary
number of potentially non-smooth terms for which we have access to their
proximal operator. The proposed method sets the step-size based on local
information of the objective --hence allowing for larger step-sizes--, only
requires two extra function evaluations per iteration and does not depend on
any step-size hyperparameter besides an initial estimate. We provide an
iteration complexity analysis that matches the best known results for the
non-adaptive variant: sublinear convergence for general convex functions and
linear convergence under strong convexity of the smooth term and smoothness of
one of the proximal terms. Finally, an empirical comparison with related
methods on 6 different problems illustrates the computational advantage of the
proposed method.
@misc{pedregosa2018adaptive,
abstract = {We propose and analyze an adaptive step-size variant of the Davis-Yin three
operator splitting. This method can solve optimization problems composed by a
sum of a smooth term for which we have access to its gradient and an arbitrary
number of potentially non-smooth terms for which we have access to their
proximal operator. The proposed method sets the step-size based on local
information of the objective --hence allowing for larger step-sizes--, only
requires two extra function evaluations per iteration and does not depend on
any step-size hyperparameter besides an initial estimate. We provide an
iteration complexity analysis that matches the best known results for the
non-adaptive variant: sublinear convergence for general convex functions and
linear convergence under strong convexity of the smooth term and smoothness of
one of the proximal terms. Finally, an empirical comparison with related
methods on 6 different problems illustrates the computational advantage of the
proposed method.},
added-at = {2018-12-07T10:30:33.000+0100},
author = {Pedregosa, Fabian and Gidel, Gauthier},
biburl = {https://www.bibsonomy.org/bibtex/257273e19af372158ebfefcde762035b1/jpvaldes},
description = {Adaptive Three Operator Splitting},
interhash = {245723100ea24704a2c3ad2708dd09ff},
intrahash = {57273e19af372158ebfefcde762035b1},
keywords = {fused-lasso optimization structured_sparsity total_variation trend_filter},
note = {cite arxiv:1804.02339},
timestamp = {2018-12-07T10:30:33.000+0100},
title = {Adaptive Three Operator Splitting.},
url = {http://arxiv.org/abs/1804.02339},
year = 2018
}