Inference in matrix-variate Gaussian models has major applications for multi-
output prediction and joint learning of row and column covariances from matrix-
variate data. Here, we discuss an approach for efficient inference in such models
that explicitly account for iid observation noise. Computational tractability can be
retained by exploiting the Kronecker product between row and column covariance
matrices. Using this framework, we show how to generalize the Graphical Lasso
in order to learn a sparse inverse covariance between features while accounting for
a low-rank confounding covariance between samples. We show practical utility on
applications to biology, where we model covariances with more than 100,000 di-
mensions. We find greater accuracy in recovering biological network structures
and are able to better reconstruct the confounders.
%0 Conference Paper
%1 stegle2011efficient
%A Stegle, Oliver
%A Lippert, Christoph
%A Mooij, Joris M
%A Lawrence, Neil D
%A Borgwardt, Karsten M
%B Advances in neural information processing systems
%D 2011
%K GWAS Gaussian_processes Kronecker_product covariance_matrix linear_algebra methods statistics
%P 630--638
%T Efficient inference in matrix-variate Gaussian models with IID observation noise
%U http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2011_0443.pdf
%X Inference in matrix-variate Gaussian models has major applications for multi-
output prediction and joint learning of row and column covariances from matrix-
variate data. Here, we discuss an approach for efficient inference in such models
that explicitly account for iid observation noise. Computational tractability can be
retained by exploiting the Kronecker product between row and column covariance
matrices. Using this framework, we show how to generalize the Graphical Lasso
in order to learn a sparse inverse covariance between features while accounting for
a low-rank confounding covariance between samples. We show practical utility on
applications to biology, where we model covariances with more than 100,000 di-
mensions. We find greater accuracy in recovering biological network structures
and are able to better reconstruct the confounders.
@inproceedings{stegle2011efficient,
abstract = {Inference in matrix-variate Gaussian models has major applications for multi-
output prediction and joint learning of row and column covariances from matrix-
variate data. Here, we discuss an approach for efficient inference in such models
that explicitly account for iid observation noise. Computational tractability can be
retained by exploiting the Kronecker product between row and column covariance
matrices. Using this framework, we show how to generalize the Graphical Lasso
in order to learn a sparse inverse covariance between features while accounting for
a low-rank confounding covariance between samples. We show practical utility on
applications to biology, where we model covariances with more than 100,000 di-
mensions. We find greater accuracy in recovering biological network structures
and are able to better reconstruct the confounders.},
added-at = {2016-04-20T01:06:29.000+0200},
author = {Stegle, Oliver and Lippert, Christoph and Mooij, Joris M and Lawrence, Neil D and Borgwardt, Karsten M},
biburl = {https://www.bibsonomy.org/bibtex/2eaff4bbeb2730074e3117c3a3aad9bd2/peter.ralph},
booktitle = {Advances in neural information processing systems},
interhash = {ecd6be5f7baf9fd37513afe4d42b4ca9},
intrahash = {eaff4bbeb2730074e3117c3a3aad9bd2},
keywords = {GWAS Gaussian_processes Kronecker_product covariance_matrix linear_algebra methods statistics},
pages = {630--638},
timestamp = {2016-04-20T07:37:55.000+0200},
title = {Efficient inference in matrix-variate {Gaussian} models with {IID} observation noise},
url = {http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2011_0443.pdf},
year = 2011
}