In this work, we study the sparse non-negative matrix factorization (Sparse
NMF or S-NMF) problem. NMF and S-NMF are popular machine learning tools which
decompose a given non-negative dataset into a dictionary and an activation
matrix, where both are constrained to be non-negative. We review how common
concave sparsity measures from the compressed sensing literature can be
extended to the S-NMF problem. Furthermore, we show that these sparsity
measures have a Bayesian interpretation and each one corresponds to a specific
prior on the activations. We present a comprehensive Sparse Bayesian Learning
(SBL) framework for modeling non-negative data and provide details for Type I
and Type II inference procedures. We show that efficient multiplicative update
rules can be employed to solve the S-NMF problem for the penalty functions
discussed and present experimental results validating our assertions.
%0 Generic
%1 fedorov2016unified
%A Fedorov, Igor
%A Nalci, Alican
%A Giri, Ritwik
%A Rao, Bhaskar D.
%A Nguyen, Truong Q.
%A Garudadri, H.
%D 2016
%K NNMF acreuser bayesian
%T A Unified Bayesian Framework for Sparse Non-negative Matrix
Factorization
%U http://arxiv.org/abs/1604.02181
%X In this work, we study the sparse non-negative matrix factorization (Sparse
NMF or S-NMF) problem. NMF and S-NMF are popular machine learning tools which
decompose a given non-negative dataset into a dictionary and an activation
matrix, where both are constrained to be non-negative. We review how common
concave sparsity measures from the compressed sensing literature can be
extended to the S-NMF problem. Furthermore, we show that these sparsity
measures have a Bayesian interpretation and each one corresponds to a specific
prior on the activations. We present a comprehensive Sparse Bayesian Learning
(SBL) framework for modeling non-negative data and provide details for Type I
and Type II inference procedures. We show that efficient multiplicative update
rules can be employed to solve the S-NMF problem for the penalty functions
discussed and present experimental results validating our assertions.
@misc{fedorov2016unified,
abstract = {In this work, we study the sparse non-negative matrix factorization (Sparse
NMF or S-NMF) problem. NMF and S-NMF are popular machine learning tools which
decompose a given non-negative dataset into a dictionary and an activation
matrix, where both are constrained to be non-negative. We review how common
concave sparsity measures from the compressed sensing literature can be
extended to the S-NMF problem. Furthermore, we show that these sparsity
measures have a Bayesian interpretation and each one corresponds to a specific
prior on the activations. We present a comprehensive Sparse Bayesian Learning
(SBL) framework for modeling non-negative data and provide details for Type I
and Type II inference procedures. We show that efficient multiplicative update
rules can be employed to solve the S-NMF problem for the penalty functions
discussed and present experimental results validating our assertions.},
added-at = {2016-04-11T06:33:50.000+0200},
author = {Fedorov, Igor and Nalci, Alican and Giri, Ritwik and Rao, Bhaskar D. and Nguyen, Truong Q. and Garudadri, H.},
biburl = {https://www.bibsonomy.org/bibtex/2a301fa1cfd9c2f0d8c2b757f2dc48bf8/pixor},
description = {1604.02181v1.pdf},
interhash = {8e0c112c7d1fb420c7f9975463da3381},
intrahash = {a301fa1cfd9c2f0d8c2b757f2dc48bf8},
keywords = {NNMF acreuser bayesian},
note = {cite arxiv:1604.02181v1.pdfComment: Submitted to IEEE Transactions on Signal Processing},
timestamp = {2016-04-11T06:33:50.000+0200},
title = {A Unified Bayesian Framework for Sparse Non-negative Matrix
Factorization},
url = {http://arxiv.org/abs/1604.02181},
year = 2016
}