Many modern machine learning models are trained to achieve zero or near-zero
training error in order to obtain near-optimal (but non-zero) test error. This
phenomenon of strong generalization performance for överfitted" / interpolated
classifiers appears to be ubiquitous in high-dimensional data, having been
observed in deep networks, kernel machines, boosting and random forests. Their
performance is consistently robust even when the data contain large amounts of
label noise.
Very little theory is available to explain these observations. The vast
majority of theoretical analyses of generalization allows for interpolation
only when there is little or no label noise. This paper takes a step toward a
theoretical foundation for interpolated classifiers by analyzing local
interpolating schemes, including geometric simplicial interpolation algorithm
and singularly weighted $k$-nearest neighbor schemes. Consistency or
near-consistency is proved for these schemes in classification and regression
problems. Moreover, the nearest neighbor schemes exhibit optimal rates under
some standard statistical assumptions.
Finally, this paper suggests a way to explain the phenomenon of adversarial
examples, which are seemingly ubiquitous in modern machine learning, and also
discusses some connections to kernel machines and random forests in the
interpolated regime.
%0 Journal Article
%1 belkin2018overfitting
%A Belkin, Mikhail
%A Hsu, Daniel
%A Mitra, Partha
%D 2018
%K bounds generalization learning
%T Overfitting or perfect fitting? Risk bounds for classification and
regression rules that interpolate
%U http://arxiv.org/abs/1806.05161
%X Many modern machine learning models are trained to achieve zero or near-zero
training error in order to obtain near-optimal (but non-zero) test error. This
phenomenon of strong generalization performance for överfitted" / interpolated
classifiers appears to be ubiquitous in high-dimensional data, having been
observed in deep networks, kernel machines, boosting and random forests. Their
performance is consistently robust even when the data contain large amounts of
label noise.
Very little theory is available to explain these observations. The vast
majority of theoretical analyses of generalization allows for interpolation
only when there is little or no label noise. This paper takes a step toward a
theoretical foundation for interpolated classifiers by analyzing local
interpolating schemes, including geometric simplicial interpolation algorithm
and singularly weighted $k$-nearest neighbor schemes. Consistency or
near-consistency is proved for these schemes in classification and regression
problems. Moreover, the nearest neighbor schemes exhibit optimal rates under
some standard statistical assumptions.
Finally, this paper suggests a way to explain the phenomenon of adversarial
examples, which are seemingly ubiquitous in modern machine learning, and also
discusses some connections to kernel machines and random forests in the
interpolated regime.
@article{belkin2018overfitting,
abstract = {Many modern machine learning models are trained to achieve zero or near-zero
training error in order to obtain near-optimal (but non-zero) test error. This
phenomenon of strong generalization performance for "overfitted" / interpolated
classifiers appears to be ubiquitous in high-dimensional data, having been
observed in deep networks, kernel machines, boosting and random forests. Their
performance is consistently robust even when the data contain large amounts of
label noise.
Very little theory is available to explain these observations. The vast
majority of theoretical analyses of generalization allows for interpolation
only when there is little or no label noise. This paper takes a step toward a
theoretical foundation for interpolated classifiers by analyzing local
interpolating schemes, including geometric simplicial interpolation algorithm
and singularly weighted $k$-nearest neighbor schemes. Consistency or
near-consistency is proved for these schemes in classification and regression
problems. Moreover, the nearest neighbor schemes exhibit optimal rates under
some standard statistical assumptions.
Finally, this paper suggests a way to explain the phenomenon of adversarial
examples, which are seemingly ubiquitous in modern machine learning, and also
discusses some connections to kernel machines and random forests in the
interpolated regime.},
added-at = {2019-02-28T20:57:53.000+0100},
author = {Belkin, Mikhail and Hsu, Daniel and Mitra, Partha},
biburl = {https://www.bibsonomy.org/bibtex/250fb4df19699105a60badbad7855e982/kirk86},
description = {1806.05161.pdf},
interhash = {2170d1a28bce83d60947f9a52a9d7f1c},
intrahash = {50fb4df19699105a60badbad7855e982},
keywords = {bounds generalization learning},
note = {cite arxiv:1806.05161},
timestamp = {2019-02-28T20:57:53.000+0100},
title = {Overfitting or perfect fitting? Risk bounds for classification and
regression rules that interpolate},
url = {http://arxiv.org/abs/1806.05161},
year = 2018
}