The success of adversarial formulations in machine learning has brought
renewed motivation for smooth games. In this work, we focus on the class of
stochastic Hamiltonian methods and provide the first convergence guarantees for
certain classes of stochastic smooth games. We propose a novel unbiased
estimator for the stochastic Hamiltonian gradient descent (SHGD) and highlight
its benefits. Using tools from the optimization literature we show that SHGD
converges linearly to the neighbourhood of a stationary point. To guarantee
convergence to the exact solution, we analyze SHGD with a decreasing step-size
and we also present the first stochastic variance reduced Hamiltonian method.
Our results provide the first global non-asymptotic last-iterate convergence
guarantees for the class of stochastic unconstrained bilinear games and for the
more general class of stochastic games that satisfy a "sufficiently bilinear"
condition, notably including some non-convex non-concave problems. We
supplement our analysis with experiments on stochastic bilinear and
sufficiently bilinear games, where our theory is shown to be tight, and on
simple adversarial machine learning formulations.
Описание
[2007.04202] Stochastic Hamiltonian Gradient Methods for Smooth Games
%0 Journal Article
%1 loizou2020stochastic
%A Loizou, Nicolas
%A Berard, Hugo
%A Jolicoeur-Martineau, Alexia
%A Vincent, Pascal
%A Lacoste-Julien, Simon
%A Mitliagkas, Ioannis
%D 2020
%K game-theory optimization stochastic
%T Stochastic Hamiltonian Gradient Methods for Smooth Games
%U http://arxiv.org/abs/2007.04202
%X The success of adversarial formulations in machine learning has brought
renewed motivation for smooth games. In this work, we focus on the class of
stochastic Hamiltonian methods and provide the first convergence guarantees for
certain classes of stochastic smooth games. We propose a novel unbiased
estimator for the stochastic Hamiltonian gradient descent (SHGD) and highlight
its benefits. Using tools from the optimization literature we show that SHGD
converges linearly to the neighbourhood of a stationary point. To guarantee
convergence to the exact solution, we analyze SHGD with a decreasing step-size
and we also present the first stochastic variance reduced Hamiltonian method.
Our results provide the first global non-asymptotic last-iterate convergence
guarantees for the class of stochastic unconstrained bilinear games and for the
more general class of stochastic games that satisfy a "sufficiently bilinear"
condition, notably including some non-convex non-concave problems. We
supplement our analysis with experiments on stochastic bilinear and
sufficiently bilinear games, where our theory is shown to be tight, and on
simple adversarial machine learning formulations.
@article{loizou2020stochastic,
abstract = {The success of adversarial formulations in machine learning has brought
renewed motivation for smooth games. In this work, we focus on the class of
stochastic Hamiltonian methods and provide the first convergence guarantees for
certain classes of stochastic smooth games. We propose a novel unbiased
estimator for the stochastic Hamiltonian gradient descent (SHGD) and highlight
its benefits. Using tools from the optimization literature we show that SHGD
converges linearly to the neighbourhood of a stationary point. To guarantee
convergence to the exact solution, we analyze SHGD with a decreasing step-size
and we also present the first stochastic variance reduced Hamiltonian method.
Our results provide the first global non-asymptotic last-iterate convergence
guarantees for the class of stochastic unconstrained bilinear games and for the
more general class of stochastic games that satisfy a "sufficiently bilinear"
condition, notably including some non-convex non-concave problems. We
supplement our analysis with experiments on stochastic bilinear and
sufficiently bilinear games, where our theory is shown to be tight, and on
simple adversarial machine learning formulations.},
added-at = {2020-07-10T12:15:42.000+0200},
author = {Loizou, Nicolas and Berard, Hugo and Jolicoeur-Martineau, Alexia and Vincent, Pascal and Lacoste-Julien, Simon and Mitliagkas, Ioannis},
biburl = {https://www.bibsonomy.org/bibtex/2554bb66efaf4964187b593ba3519eaf4/kirk86},
description = {[2007.04202] Stochastic Hamiltonian Gradient Methods for Smooth Games},
interhash = {43ea4d009f01a9caaac1533c6393a8c6},
intrahash = {554bb66efaf4964187b593ba3519eaf4},
keywords = {game-theory optimization stochastic},
note = {cite arxiv:2007.04202Comment: ICML 2020 - Proceedings of the 37th International Conference on Machine Learning},
timestamp = {2020-07-10T12:15:42.000+0200},
title = {Stochastic Hamiltonian Gradient Methods for Smooth Games},
url = {http://arxiv.org/abs/2007.04202},
year = 2020
}