Inspired by co-training, many multi-view semi-supervised kernel methods implement the following idea: find a function in each of multiple Reproducing Kernel Hilbert Spaces (RKHSs) such that (a) the chosen functions make similar predictions on unlabeled examples, and (b) the average prediction given by the chosen functions performs well on labeled examples. In this paper, we construct a single RKHS with a data-dependent "co-regularization" norm that reduces these approaches to standard supervised learning. The reproducing kernel for this RKHS can be explicitly derived and plugged into any kernel method, greatly extending the theoretical and algorithmic scope of coregularization. In particular, with this development, the Rademacher complexity bound for co-regularization given in (Rosenberg & Bartlett, 2007) follows easily from wellknown results. Furthermore, more refined bounds given by localized Rademacher complexity can also be easily applied. We propose a co-regularization based algorithmic alternative to manifold regularization (Belkin et al., 2006; Sindhwani et al., 2005a) that leads to major empirical improvements on semi-supervised tasks. Unlike the recently proposed transductive approach of (Yu et al., 2008), our RKHS formulation is truly semi-supervised and naturally extends to unseen test data.
%0 Conference Paper
%1 Sindhwani:2008:RML:1390156.1390279
%A Sindhwani, Vikas
%A Rosenberg, David S.
%B Proceedings of the 25th International Conference on Machine Learning
%C New York, NY, USA
%D 2008
%I ACM
%K co co-regularization introduction joint multi optimization problem training view
%P 976--983
%R 10.1145/1390156.1390279
%T An RKHS for Multi-view Learning and Manifold Co-regularization
%U http://doi.acm.org/10.1145/1390156.1390279
%X Inspired by co-training, many multi-view semi-supervised kernel methods implement the following idea: find a function in each of multiple Reproducing Kernel Hilbert Spaces (RKHSs) such that (a) the chosen functions make similar predictions on unlabeled examples, and (b) the average prediction given by the chosen functions performs well on labeled examples. In this paper, we construct a single RKHS with a data-dependent "co-regularization" norm that reduces these approaches to standard supervised learning. The reproducing kernel for this RKHS can be explicitly derived and plugged into any kernel method, greatly extending the theoretical and algorithmic scope of coregularization. In particular, with this development, the Rademacher complexity bound for co-regularization given in (Rosenberg & Bartlett, 2007) follows easily from wellknown results. Furthermore, more refined bounds given by localized Rademacher complexity can also be easily applied. We propose a co-regularization based algorithmic alternative to manifold regularization (Belkin et al., 2006; Sindhwani et al., 2005a) that leads to major empirical improvements on semi-supervised tasks. Unlike the recently proposed transductive approach of (Yu et al., 2008), our RKHS formulation is truly semi-supervised and naturally extends to unseen test data.
%@ 978-1-60558-205-4
@inproceedings{Sindhwani:2008:RML:1390156.1390279,
abstract = {Inspired by co-training, many multi-view semi-supervised kernel methods implement the following idea: find a function in each of multiple Reproducing Kernel Hilbert Spaces (RKHSs) such that (a) the chosen functions make similar predictions on unlabeled examples, and (b) the average prediction given by the chosen functions performs well on labeled examples. In this paper, we construct a single RKHS with a data-dependent "co-regularization" norm that reduces these approaches to standard supervised learning. The reproducing kernel for this RKHS can be explicitly derived and plugged into any kernel method, greatly extending the theoretical and algorithmic scope of coregularization. In particular, with this development, the Rademacher complexity bound for co-regularization given in (Rosenberg & Bartlett, 2007) follows easily from wellknown results. Furthermore, more refined bounds given by localized Rademacher complexity can also be easily applied. We propose a co-regularization based algorithmic alternative to manifold regularization (Belkin et al., 2006; Sindhwani et al., 2005a) that leads to major empirical improvements on semi-supervised tasks. Unlike the recently proposed transductive approach of (Yu et al., 2008), our RKHS formulation is truly semi-supervised and naturally extends to unseen test data.},
acmid = {1390279},
added-at = {2014-02-05T18:15:51.000+0100},
address = {New York, NY, USA},
author = {Sindhwani, Vikas and Rosenberg, David S.},
biburl = {https://www.bibsonomy.org/bibtex/20567d132afc4e4c35d76e3447a342674/jil},
booktitle = {Proceedings of the 25th International Conference on Machine Learning},
doi = {10.1145/1390156.1390279},
interhash = {3c71b7800bbdd40a49075ab93bc3a668},
intrahash = {0567d132afc4e4c35d76e3447a342674},
isbn = {978-1-60558-205-4},
keywords = {co co-regularization introduction joint multi optimization problem training view},
location = {Helsinki, Finland},
numpages = {8},
pages = {976--983},
publisher = {ACM},
series = {ICML '08},
timestamp = {2014-02-05T18:15:51.000+0100},
title = {An RKHS for Multi-view Learning and Manifold Co-regularization},
url = {http://doi.acm.org/10.1145/1390156.1390279},
year = 2008
}