We give an algorithmically efficient version of the learner-to-compression
scheme conversion in Moran and Yehudayoff (2016). In extending this technique
to real-valued hypotheses, we also obtain an efficient regression-to-bounded
sample compression converter. To our knowledge, this is the first general
compressed regression result (regardless of efficiency or boundedness)
guaranteeing uniform approximate reconstruction. Along the way, we develop a
generic procedure for constructing weak real-valued learners out of abstract
regressors; this may be of independent interest. In particular, this result
sheds new light on an open question of H. Simon (1997). We show applications to
two regression problems: learning Lipschitz and bounded-variation functions.
Description
[1805.08254] Sample Compression for Real-Valued Learners
%0 Journal Article
%1 hanneke2018sample
%A Hanneke, Steve
%A Kontorovich, Aryeh
%A Sadigurschi, Menachem
%D 2018
%K complexity compression sampling
%T Sample Compression for Real-Valued Learners
%U http://arxiv.org/abs/1805.08254
%X We give an algorithmically efficient version of the learner-to-compression
scheme conversion in Moran and Yehudayoff (2016). In extending this technique
to real-valued hypotheses, we also obtain an efficient regression-to-bounded
sample compression converter. To our knowledge, this is the first general
compressed regression result (regardless of efficiency or boundedness)
guaranteeing uniform approximate reconstruction. Along the way, we develop a
generic procedure for constructing weak real-valued learners out of abstract
regressors; this may be of independent interest. In particular, this result
sheds new light on an open question of H. Simon (1997). We show applications to
two regression problems: learning Lipschitz and bounded-variation functions.
@article{hanneke2018sample,
abstract = {We give an algorithmically efficient version of the learner-to-compression
scheme conversion in Moran and Yehudayoff (2016). In extending this technique
to real-valued hypotheses, we also obtain an efficient regression-to-bounded
sample compression converter. To our knowledge, this is the first general
compressed regression result (regardless of efficiency or boundedness)
guaranteeing uniform approximate reconstruction. Along the way, we develop a
generic procedure for constructing weak real-valued learners out of abstract
regressors; this may be of independent interest. In particular, this result
sheds new light on an open question of H. Simon (1997). We show applications to
two regression problems: learning Lipschitz and bounded-variation functions.},
added-at = {2019-06-17T01:20:54.000+0200},
author = {Hanneke, Steve and Kontorovich, Aryeh and Sadigurschi, Menachem},
biburl = {https://www.bibsonomy.org/bibtex/2e1e682f004665e35c858849586110731/kirk86},
description = {[1805.08254] Sample Compression for Real-Valued Learners},
interhash = {7733e1fd617fc9020b85fa120b7ac964},
intrahash = {e1e682f004665e35c858849586110731},
keywords = {complexity compression sampling},
note = {cite arxiv:1805.08254},
timestamp = {2019-06-17T01:20:54.000+0200},
title = {Sample Compression for Real-Valued Learners},
url = {http://arxiv.org/abs/1805.08254},
year = 2018
}