This paper shows that a perturbed form of gradient descent converges to a
second-order stationary point in a number iterations which depends only
poly-logarithmically on dimension (i.e., it is almost "dimension-free"). The
convergence rate of this procedure matches the well-known convergence rate of
gradient descent to first-order stationary points, up to log factors. When all
saddle points are non-degenerate, all second-order stationary points are local
minima, and our result thus shows that perturbed gradient descent can escape
saddle points almost for free. Our results can be directly applied to many
machine learning applications, including deep learning. As a particular
concrete example of such an application, we show that our results can be used
directly to establish sharp global convergence rates for matrix factorization.
Our results rely on a novel characterization of the geometry around saddle
points, which may be of independent interest to the non-convex optimization
community.
%0 Generic
%1 jin2017escape
%A Jin, Chi
%A Ge, Rong
%A Netrapalli, Praneeth
%A Kakade, Sham M.
%A Jordan, Michael I.
%D 2017
%K optimization
%T How to Escape Saddle Points Efficiently
%U http://arxiv.org/abs/1703.00887
%X This paper shows that a perturbed form of gradient descent converges to a
second-order stationary point in a number iterations which depends only
poly-logarithmically on dimension (i.e., it is almost "dimension-free"). The
convergence rate of this procedure matches the well-known convergence rate of
gradient descent to first-order stationary points, up to log factors. When all
saddle points are non-degenerate, all second-order stationary points are local
minima, and our result thus shows that perturbed gradient descent can escape
saddle points almost for free. Our results can be directly applied to many
machine learning applications, including deep learning. As a particular
concrete example of such an application, we show that our results can be used
directly to establish sharp global convergence rates for matrix factorization.
Our results rely on a novel characterization of the geometry around saddle
points, which may be of independent interest to the non-convex optimization
community.
@misc{jin2017escape,
abstract = {This paper shows that a perturbed form of gradient descent converges to a
second-order stationary point in a number iterations which depends only
poly-logarithmically on dimension (i.e., it is almost "dimension-free"). The
convergence rate of this procedure matches the well-known convergence rate of
gradient descent to first-order stationary points, up to log factors. When all
saddle points are non-degenerate, all second-order stationary points are local
minima, and our result thus shows that perturbed gradient descent can escape
saddle points almost for free. Our results can be directly applied to many
machine learning applications, including deep learning. As a particular
concrete example of such an application, we show that our results can be used
directly to establish sharp global convergence rates for matrix factorization.
Our results rely on a novel characterization of the geometry around saddle
points, which may be of independent interest to the non-convex optimization
community.},
added-at = {2020-01-19T14:15:11.000+0100},
author = {Jin, Chi and Ge, Rong and Netrapalli, Praneeth and Kakade, Sham M. and Jordan, Michael I.},
biburl = {https://www.bibsonomy.org/bibtex/208f6d21480f8e86ea6a8a5f3950dbe97/stdiff},
description = {How to Escape Saddle Points Efficiently},
interhash = {b782e9d61ee78f32caf6f2876b66171b},
intrahash = {08f6d21480f8e86ea6a8a5f3950dbe97},
keywords = {optimization},
note = {cite arxiv:1703.00887},
timestamp = {2020-01-19T14:15:11.000+0100},
title = {How to Escape Saddle Points Efficiently},
url = {http://arxiv.org/abs/1703.00887},
year = 2017
}