The success of deep neural networks hinges on our ability to accurately and
efficiently optimize high-dimensional, non-convex functions. In this paper, we
empirically investigate the loss functions of state-of-the-art networks, and
how commonly-used stochastic gradient descent variants optimize these loss
functions. To do this, we visualize the loss function by projecting them down
to low-dimensional spaces chosen based on the convergence points of different
optimization algorithms. Our observations suggest that optimization algorithms
encounter and choose different descent directions at many saddle points to find
different final weights. Based on consistency we observe across re-runs of the
same stochastic optimization algorithm, we hypothesize that each optimization
algorithm makes characteristic choices at these saddle points.
Beschreibung
[1612.04010] An empirical analysis of the optimization of deep network loss surfaces
%0 Journal Article
%1 im2016empirical
%A Im, Daniel Jiwoong
%A Tao, Michael
%A Branson, Kristin
%D 2016
%K generalization optimization readings
%T An empirical analysis of the optimization of deep network loss surfaces
%U http://arxiv.org/abs/1612.04010
%X The success of deep neural networks hinges on our ability to accurately and
efficiently optimize high-dimensional, non-convex functions. In this paper, we
empirically investigate the loss functions of state-of-the-art networks, and
how commonly-used stochastic gradient descent variants optimize these loss
functions. To do this, we visualize the loss function by projecting them down
to low-dimensional spaces chosen based on the convergence points of different
optimization algorithms. Our observations suggest that optimization algorithms
encounter and choose different descent directions at many saddle points to find
different final weights. Based on consistency we observe across re-runs of the
same stochastic optimization algorithm, we hypothesize that each optimization
algorithm makes characteristic choices at these saddle points.
@article{im2016empirical,
abstract = {The success of deep neural networks hinges on our ability to accurately and
efficiently optimize high-dimensional, non-convex functions. In this paper, we
empirically investigate the loss functions of state-of-the-art networks, and
how commonly-used stochastic gradient descent variants optimize these loss
functions. To do this, we visualize the loss function by projecting them down
to low-dimensional spaces chosen based on the convergence points of different
optimization algorithms. Our observations suggest that optimization algorithms
encounter and choose different descent directions at many saddle points to find
different final weights. Based on consistency we observe across re-runs of the
same stochastic optimization algorithm, we hypothesize that each optimization
algorithm makes characteristic choices at these saddle points.},
added-at = {2019-12-03T21:41:06.000+0100},
author = {Im, Daniel Jiwoong and Tao, Michael and Branson, Kristin},
biburl = {https://www.bibsonomy.org/bibtex/2d77442f6a2ff30eba86bad2c7e690d63/kirk86},
description = {[1612.04010] An empirical analysis of the optimization of deep network loss surfaces},
interhash = {fe04301a7913504f982e0043924014b7},
intrahash = {d77442f6a2ff30eba86bad2c7e690d63},
keywords = {generalization optimization readings},
note = {cite arxiv:1612.04010},
timestamp = {2019-12-03T21:41:06.000+0100},
title = {An empirical analysis of the optimization of deep network loss surfaces},
url = {http://arxiv.org/abs/1612.04010},
year = 2016
}