In this paper, we provide a rigorous proof of convergence of the Adaptive
Moment Estimate (Adam) algorithm for a wide class of optimization objectives.
Despite the popularity and efficiency of the Adam algorithm in training deep
neural networks, its theoretical properties are not yet fully understood, and
existing convergence proofs require unrealistically strong assumptions, such as
globally bounded gradients, to show the convergence to stationary points. In
this paper, we show that Adam provably converges to $\epsilon$-stationary
points with $O(\epsilon^-4)$ gradient complexity under far more
realistic conditions. The key to our analysis is a new proof of boundedness of
gradients along the optimization trajectory of Adam, under a generalized
smoothness assumption according to which the local smoothness (i.e., Hessian
norm when it exists) is bounded by a sub-quadratic function of the gradient
norm. Moreover, we propose a variance-reduced version of Adam with an
accelerated gradient complexity of $O(\epsilon^-3)$.
%0 Generic
%1 li2023convergence
%A Li, Haochuan
%A Rakhlin, Alexander
%A Jadbabaie, Ali
%D 2023
%K optimization
%T Convergence of Adam under Relaxed Assumptions
%U http://arxiv.org/abs/2304.13972
%X In this paper, we provide a rigorous proof of convergence of the Adaptive
Moment Estimate (Adam) algorithm for a wide class of optimization objectives.
Despite the popularity and efficiency of the Adam algorithm in training deep
neural networks, its theoretical properties are not yet fully understood, and
existing convergence proofs require unrealistically strong assumptions, such as
globally bounded gradients, to show the convergence to stationary points. In
this paper, we show that Adam provably converges to $\epsilon$-stationary
points with $O(\epsilon^-4)$ gradient complexity under far more
realistic conditions. The key to our analysis is a new proof of boundedness of
gradients along the optimization trajectory of Adam, under a generalized
smoothness assumption according to which the local smoothness (i.e., Hessian
norm when it exists) is bounded by a sub-quadratic function of the gradient
norm. Moreover, we propose a variance-reduced version of Adam with an
accelerated gradient complexity of $O(\epsilon^-3)$.
@misc{li2023convergence,
abstract = {In this paper, we provide a rigorous proof of convergence of the Adaptive
Moment Estimate (Adam) algorithm for a wide class of optimization objectives.
Despite the popularity and efficiency of the Adam algorithm in training deep
neural networks, its theoretical properties are not yet fully understood, and
existing convergence proofs require unrealistically strong assumptions, such as
globally bounded gradients, to show the convergence to stationary points. In
this paper, we show that Adam provably converges to $\epsilon$-stationary
points with $\mathcal{O}(\epsilon^{-4})$ gradient complexity under far more
realistic conditions. The key to our analysis is a new proof of boundedness of
gradients along the optimization trajectory of Adam, under a generalized
smoothness assumption according to which the local smoothness (i.e., Hessian
norm when it exists) is bounded by a sub-quadratic function of the gradient
norm. Moreover, we propose a variance-reduced version of Adam with an
accelerated gradient complexity of $\mathcal{O}(\epsilon^{-3})$.},
added-at = {2023-08-21T20:09:06.000+0200},
author = {Li, Haochuan and Rakhlin, Alexander and Jadbabaie, Ali},
biburl = {https://www.bibsonomy.org/bibtex/23a15ad03611f76ea6a2b2f54849db370/vincentqb},
description = {Convergence of Adam under Relaxed Assumptions},
interhash = {8a55d517d60eb175ff53c35c535d5349},
intrahash = {3a15ad03611f76ea6a2b2f54849db370},
keywords = {optimization},
note = {cite arxiv:2304.13972Comment: 33 pages},
timestamp = {2023-08-21T20:09:06.000+0200},
title = {Convergence of Adam under Relaxed Assumptions},
url = {http://arxiv.org/abs/2304.13972},
year = 2023
}