Using a low-dimensional parametrization of signals is a generic and powerful
way to enhance performance in signal processing and statistical inference. A
very popular and widely explored type of dimensionality reduction is sparsity;
another type is generative modelling of signal distributions. Generative models
based on neural networks, such as GANs or variational auto-encoders, are
particularly performant and are gaining on applicability. In this paper we
study spiked matrix models, where a low-rank matrix is observed through a noisy
channel. This problem with sparse structure of the spikes has attracted broad
attention in the past literature. Here, we replace the sparsity assumption by
generative modelling, and investigate the consequences on statistical and
algorithmic properties. We analyze the Bayes-optimal performance under specific
generative models for the spike. In contrast with the sparsity assumption, we
do not observe regions of parameters where statistical performance is superior
to the best known algorithmic performance. We show that in the analyzed cases
the approximate message passing algorithm is able to reach optimal performance.
We also design enhanced spectral algorithms and analyze their performance and
thresholds using random matrix theory, showing their superiority to the
classical principal component analysis. We complement our theoretical results
by illustrating the performance of the spectral algorithms when the spikes come
from real datasets.
Описание
[1905.12385] The spiked matrix model with generative priors
%0 Journal Article
%1 aubin2019spiked
%A Aubin, Benjamin
%A Loureiro, Bruno
%A Maillard, Antoine
%A Krzakala, Florent
%A Zdeborová, Lenka
%D 2019
%K deep-learning generative-models machine-learning mathematics probability sparsity stats theory
%T The spiked matrix model with generative priors
%U http://arxiv.org/abs/1905.12385
%X Using a low-dimensional parametrization of signals is a generic and powerful
way to enhance performance in signal processing and statistical inference. A
very popular and widely explored type of dimensionality reduction is sparsity;
another type is generative modelling of signal distributions. Generative models
based on neural networks, such as GANs or variational auto-encoders, are
particularly performant and are gaining on applicability. In this paper we
study spiked matrix models, where a low-rank matrix is observed through a noisy
channel. This problem with sparse structure of the spikes has attracted broad
attention in the past literature. Here, we replace the sparsity assumption by
generative modelling, and investigate the consequences on statistical and
algorithmic properties. We analyze the Bayes-optimal performance under specific
generative models for the spike. In contrast with the sparsity assumption, we
do not observe regions of parameters where statistical performance is superior
to the best known algorithmic performance. We show that in the analyzed cases
the approximate message passing algorithm is able to reach optimal performance.
We also design enhanced spectral algorithms and analyze their performance and
thresholds using random matrix theory, showing their superiority to the
classical principal component analysis. We complement our theoretical results
by illustrating the performance of the spectral algorithms when the spikes come
from real datasets.
@article{aubin2019spiked,
abstract = {Using a low-dimensional parametrization of signals is a generic and powerful
way to enhance performance in signal processing and statistical inference. A
very popular and widely explored type of dimensionality reduction is sparsity;
another type is generative modelling of signal distributions. Generative models
based on neural networks, such as GANs or variational auto-encoders, are
particularly performant and are gaining on applicability. In this paper we
study spiked matrix models, where a low-rank matrix is observed through a noisy
channel. This problem with sparse structure of the spikes has attracted broad
attention in the past literature. Here, we replace the sparsity assumption by
generative modelling, and investigate the consequences on statistical and
algorithmic properties. We analyze the Bayes-optimal performance under specific
generative models for the spike. In contrast with the sparsity assumption, we
do not observe regions of parameters where statistical performance is superior
to the best known algorithmic performance. We show that in the analyzed cases
the approximate message passing algorithm is able to reach optimal performance.
We also design enhanced spectral algorithms and analyze their performance and
thresholds using random matrix theory, showing their superiority to the
classical principal component analysis. We complement our theoretical results
by illustrating the performance of the spectral algorithms when the spikes come
from real datasets.},
added-at = {2019-05-31T17:17:59.000+0200},
author = {Aubin, Benjamin and Loureiro, Bruno and Maillard, Antoine and Krzakala, Florent and Zdeborová, Lenka},
biburl = {https://www.bibsonomy.org/bibtex/2207e658337a0594f6a79e4e1f1ac6e87/kirk86},
description = {[1905.12385] The spiked matrix model with generative priors},
interhash = {eaca3653150a4f5f77e947801f673e94},
intrahash = {207e658337a0594f6a79e4e1f1ac6e87},
keywords = {deep-learning generative-models machine-learning mathematics probability sparsity stats theory},
note = {cite arxiv:1905.12385Comment: 12 + 56, 8 figures, v2 lighter jpeg figures},
timestamp = {2019-05-31T17:17:59.000+0200},
title = {The spiked matrix model with generative priors},
url = {http://arxiv.org/abs/1905.12385},
year = 2019
}