We consider neural networks with a single hidden layer and non-decreasing
homogeneous activa-tion functions like the rectified linear units. By letting
the number of hidden units grow unbounded and using classical non-Euclidean
regularization tools on the output weights, we provide a detailed theoretical
analysis of their generalization performance, with a study of both the
approximation and the estimation errors. We show in particular that they are
adaptive to unknown underlying linear structures, such as the dependence on the
projection of the input variables onto a low-dimensional subspace. Moreover,
when using sparsity-inducing norms on the input weights, we show that
high-dimensional non-linear variable selection may be achieved, without any
strong assumption regarding the data and with a total number of variables
potentially exponential in the number of ob-servations. In addition, we provide
a simple geometric interpretation to the non-convex problem of addition of a
new unit, which is the core potentially hard computational element in the
framework of learning from continuously many basis functions. We provide simple
conditions for convex relaxations to achieve the same generalization error
bounds, even when constant-factor approxi-mations cannot be found (e.g.,
because it is NP-hard such as for the zero-homogeneous activation function). We
were not able to find strong enough convex relaxations and leave open the
existence or non-existence of polynomial-time algorithms.
Description
[1412.8690] Breaking the Curse of Dimensionality with Convex Neural Networks
%0 Journal Article
%1 bach2014breaking
%A Bach, Francis
%D 2014
%K convex deep-learning optimization readings
%T Breaking the Curse of Dimensionality with Convex Neural Networks
%U http://arxiv.org/abs/1412.8690
%X We consider neural networks with a single hidden layer and non-decreasing
homogeneous activa-tion functions like the rectified linear units. By letting
the number of hidden units grow unbounded and using classical non-Euclidean
regularization tools on the output weights, we provide a detailed theoretical
analysis of their generalization performance, with a study of both the
approximation and the estimation errors. We show in particular that they are
adaptive to unknown underlying linear structures, such as the dependence on the
projection of the input variables onto a low-dimensional subspace. Moreover,
when using sparsity-inducing norms on the input weights, we show that
high-dimensional non-linear variable selection may be achieved, without any
strong assumption regarding the data and with a total number of variables
potentially exponential in the number of ob-servations. In addition, we provide
a simple geometric interpretation to the non-convex problem of addition of a
new unit, which is the core potentially hard computational element in the
framework of learning from continuously many basis functions. We provide simple
conditions for convex relaxations to achieve the same generalization error
bounds, even when constant-factor approxi-mations cannot be found (e.g.,
because it is NP-hard such as for the zero-homogeneous activation function). We
were not able to find strong enough convex relaxations and leave open the
existence or non-existence of polynomial-time algorithms.
@article{bach2014breaking,
abstract = {We consider neural networks with a single hidden layer and non-decreasing
homogeneous activa-tion functions like the rectified linear units. By letting
the number of hidden units grow unbounded and using classical non-Euclidean
regularization tools on the output weights, we provide a detailed theoretical
analysis of their generalization performance, with a study of both the
approximation and the estimation errors. We show in particular that they are
adaptive to unknown underlying linear structures, such as the dependence on the
projection of the input variables onto a low-dimensional subspace. Moreover,
when using sparsity-inducing norms on the input weights, we show that
high-dimensional non-linear variable selection may be achieved, without any
strong assumption regarding the data and with a total number of variables
potentially exponential in the number of ob-servations. In addition, we provide
a simple geometric interpretation to the non-convex problem of addition of a
new unit, which is the core potentially hard computational element in the
framework of learning from continuously many basis functions. We provide simple
conditions for convex relaxations to achieve the same generalization error
bounds, even when constant-factor approxi-mations cannot be found (e.g.,
because it is NP-hard such as for the zero-homogeneous activation function). We
were not able to find strong enough convex relaxations and leave open the
existence or non-existence of polynomial-time algorithms.},
added-at = {2020-02-08T18:03:07.000+0100},
author = {Bach, Francis},
biburl = {https://www.bibsonomy.org/bibtex/264280c6f456f8b1e2bce87cb669dac64/kirk86},
description = {[1412.8690] Breaking the Curse of Dimensionality with Convex Neural Networks},
interhash = {d329472cf46a8d7b19b979c6f2b4aafc},
intrahash = {64280c6f456f8b1e2bce87cb669dac64},
keywords = {convex deep-learning optimization readings},
note = {cite arxiv:1412.8690},
timestamp = {2020-02-08T18:03:07.000+0100},
title = {Breaking the Curse of Dimensionality with Convex Neural Networks},
url = {http://arxiv.org/abs/1412.8690},
year = 2014
}