We present novel, computationally efficient, and differentially private
algorithms for two fundamental high-dimensional learning problems: learning a
multivariate Gaussian in $R^d$ and learning a product distribution in
$\0,1\^d$ in total variation distance. The sample complexity of our
algorithms nearly matches the sample complexity of the optimal non-private
learners for these tasks in a wide range of parameters. Thus, our results show
that private comes essentially for free for these problems, providing a
counterpoint to the many negative results showing that privacy is often costly
in high dimensions. Our algorithms introduce a novel technical approach to
reducing the sensitivity of the estimation procedure that we call recursive
private preconditioning, which may find additional applications.
%0 Journal Article
%1 kamath2018privately
%A Kamath, Gautam
%A Li, Jerry
%A Singhal, Vikrant
%A Ullman, Jonathan
%D 2018
%K privacy probability stats
%T Privately Learning High-Dimensional Distributions
%U http://arxiv.org/abs/1805.00216
%X We present novel, computationally efficient, and differentially private
algorithms for two fundamental high-dimensional learning problems: learning a
multivariate Gaussian in $R^d$ and learning a product distribution in
$\0,1\^d$ in total variation distance. The sample complexity of our
algorithms nearly matches the sample complexity of the optimal non-private
learners for these tasks in a wide range of parameters. Thus, our results show
that private comes essentially for free for these problems, providing a
counterpoint to the many negative results showing that privacy is often costly
in high dimensions. Our algorithms introduce a novel technical approach to
reducing the sensitivity of the estimation procedure that we call recursive
private preconditioning, which may find additional applications.
@article{kamath2018privately,
abstract = {We present novel, computationally efficient, and differentially private
algorithms for two fundamental high-dimensional learning problems: learning a
multivariate Gaussian in $R^d$ and learning a product distribution in
$\{0,1\}^{d}$ in total variation distance. The sample complexity of our
algorithms nearly matches the sample complexity of the optimal non-private
learners for these tasks in a wide range of parameters. Thus, our results show
that private comes essentially for free for these problems, providing a
counterpoint to the many negative results showing that privacy is often costly
in high dimensions. Our algorithms introduce a novel technical approach to
reducing the sensitivity of the estimation procedure that we call recursive
private preconditioning, which may find additional applications.},
added-at = {2019-03-09T01:59:37.000+0100},
author = {Kamath, Gautam and Li, Jerry and Singhal, Vikrant and Ullman, Jonathan},
biburl = {https://www.bibsonomy.org/bibtex/2a1f3940a6d68129bea6ff230cffa60d4/kirk86},
description = {[1805.00216] Privately Learning High-Dimensional Distributions},
interhash = {b99c7d967842dbba8fce3f9cf1d56507},
intrahash = {a1f3940a6d68129bea6ff230cffa60d4},
keywords = {privacy probability stats},
note = {cite arxiv:1805.00216},
timestamp = {2019-07-19T20:56:07.000+0200},
title = {Privately Learning High-Dimensional Distributions},
url = {http://arxiv.org/abs/1805.00216},
year = 2018
}