We consider the problem of learning Markov Random Fields (including the
prototypical example, the Ising model) under the constraint of differential
privacy. Our learning goals include both structure learning, where we try to
estimate the underlying graph structure of the model, as well as the harder
goal of parameter learning, in which we additionally estimate the parameter on
each edge. We provide algorithms and lower bounds for both problems under a
variety of privacy constraints -- namely pure, concentrated, and approximate
differential privacy. While non-privately, both learning goals enjoy roughly
the same complexity, we show that this is not the case under differential
privacy. In particular, only structure learning under approximate differential
privacy maintains the non-private logarithmic dependence on the dimensionality
of the data, while a change in either the learning goal or the privacy notion
would necessitate a polynomial dependence. As a result, we show that the
privacy constraint imposes a strong separation between these two learning
problems in the high-dimensional data regime.
Description
[2002.09463] Privately Learning Markov Random Fields
%0 Journal Article
%1 zhang2020privately
%A Zhang, Huanyu
%A Kamath, Gautam
%A Kulkarni, Janardhan
%A Wu, Zhiwei Steven
%D 2020
%K differential-privacy markov-processes readings
%T Privately Learning Markov Random Fields
%U http://arxiv.org/abs/2002.09463
%X We consider the problem of learning Markov Random Fields (including the
prototypical example, the Ising model) under the constraint of differential
privacy. Our learning goals include both structure learning, where we try to
estimate the underlying graph structure of the model, as well as the harder
goal of parameter learning, in which we additionally estimate the parameter on
each edge. We provide algorithms and lower bounds for both problems under a
variety of privacy constraints -- namely pure, concentrated, and approximate
differential privacy. While non-privately, both learning goals enjoy roughly
the same complexity, we show that this is not the case under differential
privacy. In particular, only structure learning under approximate differential
privacy maintains the non-private logarithmic dependence on the dimensionality
of the data, while a change in either the learning goal or the privacy notion
would necessitate a polynomial dependence. As a result, we show that the
privacy constraint imposes a strong separation between these two learning
problems in the high-dimensional data regime.
@article{zhang2020privately,
abstract = {We consider the problem of learning Markov Random Fields (including the
prototypical example, the Ising model) under the constraint of differential
privacy. Our learning goals include both structure learning, where we try to
estimate the underlying graph structure of the model, as well as the harder
goal of parameter learning, in which we additionally estimate the parameter on
each edge. We provide algorithms and lower bounds for both problems under a
variety of privacy constraints -- namely pure, concentrated, and approximate
differential privacy. While non-privately, both learning goals enjoy roughly
the same complexity, we show that this is not the case under differential
privacy. In particular, only structure learning under approximate differential
privacy maintains the non-private logarithmic dependence on the dimensionality
of the data, while a change in either the learning goal or the privacy notion
would necessitate a polynomial dependence. As a result, we show that the
privacy constraint imposes a strong separation between these two learning
problems in the high-dimensional data regime.},
added-at = {2020-02-24T23:31:31.000+0100},
author = {Zhang, Huanyu and Kamath, Gautam and Kulkarni, Janardhan and Wu, Zhiwei Steven},
biburl = {https://www.bibsonomy.org/bibtex/2a070b4c44976e8fd1b6a939c6372b266/kirk86},
description = {[2002.09463] Privately Learning Markov Random Fields},
interhash = {0414de42f6d04412749dcd8b9329c552},
intrahash = {a070b4c44976e8fd1b6a939c6372b266},
keywords = {differential-privacy markov-processes readings},
note = {cite arxiv:2002.09463},
timestamp = {2020-02-24T23:31:31.000+0100},
title = {Privately Learning Markov Random Fields},
url = {http://arxiv.org/abs/2002.09463},
year = 2020
}