This paper introduces a new learning paradigm, called Learning Using Statistical Invariants (LUSI), which is different from the classical one. In a classical paradigm, the learning machine constructs a classification rule that minimizes the probability of expected error; it is data-driven model of learning. In the LUSI paradigm, in order to construct the desired classification function, a learning machine computes statistical invariants that are specific for the problem, and then minimizes the expected error in a way that preserves these invariants; it is thus both data- and invariant-driven learning. From a mathematical point of view, methods of the classical paradigm employ mechanisms of strong convergence of approximations to the desired function, whereas methods of the new paradigm employ both strong and weak convergence mechanisms. This can significantly increase the rate of convergence.
Description
Rethinking statistical learning theory: learning using statistical invariants | SpringerLink
%0 Journal Article
%1 Vapnik2019
%A Vapnik, Vladimir
%A Izmailov, Rauf
%D 2019
%J Machine Learning
%K deep-learning foundations generalization invariance machine-learning readings theory
%N 3
%P 381--423
%R 10.1007/s10994-018-5742-0
%T Rethinking statistical learning theory: learning using statistical invariants
%U https://doi.org/10.1007/s10994-018-5742-0
%V 108
%X This paper introduces a new learning paradigm, called Learning Using Statistical Invariants (LUSI), which is different from the classical one. In a classical paradigm, the learning machine constructs a classification rule that minimizes the probability of expected error; it is data-driven model of learning. In the LUSI paradigm, in order to construct the desired classification function, a learning machine computes statistical invariants that are specific for the problem, and then minimizes the expected error in a way that preserves these invariants; it is thus both data- and invariant-driven learning. From a mathematical point of view, methods of the classical paradigm employ mechanisms of strong convergence of approximations to the desired function, whereas methods of the new paradigm employ both strong and weak convergence mechanisms. This can significantly increase the rate of convergence.
@article{Vapnik2019,
abstract = {This paper introduces a new learning paradigm, called Learning Using Statistical Invariants (LUSI), which is different from the classical one. In a classical paradigm, the learning machine constructs a classification rule that minimizes the probability of expected error; it is data-driven model of learning. In the LUSI paradigm, in order to construct the desired classification function, a learning machine computes statistical invariants that are specific for the problem, and then minimizes the expected error in a way that preserves these invariants; it is thus both data- and invariant-driven learning. From a mathematical point of view, methods of the classical paradigm employ mechanisms of strong convergence of approximations to the desired function, whereas methods of the new paradigm employ both strong and weak convergence mechanisms. This can significantly increase the rate of convergence.},
added-at = {2019-10-20T16:47:15.000+0200},
author = {Vapnik, Vladimir and Izmailov, Rauf},
biburl = {https://www.bibsonomy.org/bibtex/227679bb4bc75045610bb2cdaa2fe6964/kirk86},
day = 01,
description = {Rethinking statistical learning theory: learning using statistical invariants | SpringerLink},
doi = {10.1007/s10994-018-5742-0},
interhash = {bdcb57d13a0daa6d07ac7049c1321c08},
intrahash = {27679bb4bc75045610bb2cdaa2fe6964},
issn = {1573-0565},
journal = {Machine Learning},
keywords = {deep-learning foundations generalization invariance machine-learning readings theory},
month = mar,
number = 3,
pages = {381--423},
timestamp = {2019-10-20T16:47:15.000+0200},
title = {Rethinking statistical learning theory: learning using statistical invariants},
url = {https://doi.org/10.1007/s10994-018-5742-0},
volume = 108,
year = 2019
}