Despite its success in a wide range of applications, characterizing the
generalization properties of stochastic gradient descent (SGD) in non-convex
deep learning problems is still an important challenge. While modeling the
trajectories of SGD via stochastic differential equations (SDE) under
heavy-tailed gradient noise has recently shed light over several peculiar
characteristics of SGD, a rigorous treatment of the generalization properties
of such SDEs in a learning theoretical framework is still missing. Aiming to
bridge this gap, in this paper, we prove generalization bounds for SGD under
the assumption that its trajectories can be well-approximated by a Feller
process, which defines a rich class of Markov processes that include several
recent SDE representations (both Brownian or heavy-tailed) as its special case.
We show that the generalization error can be controlled by the Hausdorff
dimension of the trajectories, which is intimately linked to the tail behavior
of the driving process. Our results imply that heavier-tailed processes should
achieve better generalization; hence, the tail-index of the process can be used
as a notion of ``capacity metric''. We support our theory with experiments on
deep neural networks illustrating that the proposed capacity metric accurately
estimates the generalization error, and it does not necessarily grow with the
number of parameters unlike the existing capacity metrics in the literature.
Description
[2006.09313] Hausdorff Dimension, Stochastic Differential Equations, and Generalization in Neural Networks
%0 Journal Article
%1 simsekli2020hausdorff
%A Şimşekli, Umut
%A Sener, Ozan
%A Deligiannidis, George
%A Erdogdu, Murat A.
%D 2020
%K deep-learning differential-equations generalization readings stochastic
%T Hausdorff Dimension, Stochastic Differential Equations, and
Generalization in Neural Networks
%U http://arxiv.org/abs/2006.09313
%X Despite its success in a wide range of applications, characterizing the
generalization properties of stochastic gradient descent (SGD) in non-convex
deep learning problems is still an important challenge. While modeling the
trajectories of SGD via stochastic differential equations (SDE) under
heavy-tailed gradient noise has recently shed light over several peculiar
characteristics of SGD, a rigorous treatment of the generalization properties
of such SDEs in a learning theoretical framework is still missing. Aiming to
bridge this gap, in this paper, we prove generalization bounds for SGD under
the assumption that its trajectories can be well-approximated by a Feller
process, which defines a rich class of Markov processes that include several
recent SDE representations (both Brownian or heavy-tailed) as its special case.
We show that the generalization error can be controlled by the Hausdorff
dimension of the trajectories, which is intimately linked to the tail behavior
of the driving process. Our results imply that heavier-tailed processes should
achieve better generalization; hence, the tail-index of the process can be used
as a notion of ``capacity metric''. We support our theory with experiments on
deep neural networks illustrating that the proposed capacity metric accurately
estimates the generalization error, and it does not necessarily grow with the
number of parameters unlike the existing capacity metrics in the literature.
@article{simsekli2020hausdorff,
abstract = {Despite its success in a wide range of applications, characterizing the
generalization properties of stochastic gradient descent (SGD) in non-convex
deep learning problems is still an important challenge. While modeling the
trajectories of SGD via stochastic differential equations (SDE) under
heavy-tailed gradient noise has recently shed light over several peculiar
characteristics of SGD, a rigorous treatment of the generalization properties
of such SDEs in a learning theoretical framework is still missing. Aiming to
bridge this gap, in this paper, we prove generalization bounds for SGD under
the assumption that its trajectories can be well-approximated by a Feller
process, which defines a rich class of Markov processes that include several
recent SDE representations (both Brownian or heavy-tailed) as its special case.
We show that the generalization error can be controlled by the Hausdorff
dimension of the trajectories, which is intimately linked to the tail behavior
of the driving process. Our results imply that heavier-tailed processes should
achieve better generalization; hence, the tail-index of the process can be used
as a notion of ``capacity metric''. We support our theory with experiments on
deep neural networks illustrating that the proposed capacity metric accurately
estimates the generalization error, and it does not necessarily grow with the
number of parameters unlike the existing capacity metrics in the literature.},
added-at = {2020-06-18T20:10:39.000+0200},
author = {Şimşekli, Umut and Sener, Ozan and Deligiannidis, George and Erdogdu, Murat A.},
biburl = {https://www.bibsonomy.org/bibtex/2b55f3914abc9c2b1089632c0e1d91a8b/kirk86},
description = {[2006.09313] Hausdorff Dimension, Stochastic Differential Equations, and Generalization in Neural Networks},
interhash = {9a39c438024bdf547a335c0f4d4e57cd},
intrahash = {b55f3914abc9c2b1089632c0e1d91a8b},
keywords = {deep-learning differential-equations generalization readings stochastic},
note = {cite arxiv:2006.09313Comment: 34 Pages},
timestamp = {2020-06-18T20:10:39.000+0200},
title = {Hausdorff Dimension, Stochastic Differential Equations, and
Generalization in Neural Networks},
url = {http://arxiv.org/abs/2006.09313},
year = 2020
}