PAC-Bayes bounds have been proposed to get risk estimates based on a training sample. In this paper the PAC-Bayes approach is combined with stability of the hypothesis learned by a Hilbert space valued algorithm. The PAC-Bayes setting is used with a Gaussian prior centered at the expected output. Thus a novelty of our paper is using priors defined in terms of the data-generating distribution. Our main result estimates the risk of the randomized algorithm in terms of the hypothesis stability coefficients. We also provide a new bound for the SVM classifier, which is compared to other known bounds experimentally. Ours appears to be the first uniform hypothesis stability-based bound that evaluates to non-trivial values.
%0 Conference Paper
%1 RSz18
%A Rivasplata, O.
%A Szepesvári, Cs.
%A Shawe-Taylor, J.
%A Parrado-Hernandez, E.
%A Sun, S.
%B NIPS
%D 2018
%K PAC-Bayes, bounds, generalization learning stability stability, theory, uniform
%T PAC-Bayes bounds for stable algorithms with instance-dependent priors
%X PAC-Bayes bounds have been proposed to get risk estimates based on a training sample. In this paper the PAC-Bayes approach is combined with stability of the hypothesis learned by a Hilbert space valued algorithm. The PAC-Bayes setting is used with a Gaussian prior centered at the expected output. Thus a novelty of our paper is using priors defined in terms of the data-generating distribution. Our main result estimates the risk of the randomized algorithm in terms of the hypothesis stability coefficients. We also provide a new bound for the SVM classifier, which is compared to other known bounds experimentally. Ours appears to be the first uniform hypothesis stability-based bound that evaluates to non-trivial values.
@inproceedings{RSz18,
abstract = {PAC-Bayes bounds have been proposed to get risk estimates based on a training sample. In this paper the PAC-Bayes approach is combined with stability of the hypothesis learned by a Hilbert space valued algorithm. The PAC-Bayes setting is used with a Gaussian prior centered at the expected output. Thus a novelty of our paper is using priors defined in terms of the data-generating distribution. Our main result estimates the risk of the randomized algorithm in terms of the hypothesis stability coefficients. We also provide a new bound for the SVM classifier, which is compared to other known bounds experimentally. Ours appears to be the first uniform hypothesis stability-based bound that evaluates to non-trivial values.},
added-at = {2020-03-17T03:03:01.000+0100},
author = {Rivasplata, O. and {Sz}epesv{\'a}ri, {Cs}. and Shawe-Taylor, J. and Parrado-Hernandez, E. and Sun, S.},
bdsk-url-1 = {http://proceedings.mlr.press/v54/hanawal17a.html},
biburl = {https://www.bibsonomy.org/bibtex/2a5b4d83c2b6363da66f010842d30da5b/csaba},
booktitle = {NIPS},
date-added = {2018-11-02 00:02:30 +0000},
date-modified = {2018-11-02 00:17:27 +0000},
interhash = {c7a42db5823bf508bbe36e499b7de49b},
intrahash = {a5b4d83c2b6363da66f010842d30da5b},
keywords = {PAC-Bayes, bounds, generalization learning stability stability, theory, uniform},
month = {September},
pdf = {papers/NIPS2018-PACBayes.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {{PAC-B}ayes bounds for stable algorithms with instance-dependent priors},
year = 2018
}