Hierarchical linear and generalized linear models can be fit using Gibbs samplers and Metropolis algorithms; these models, however, often have many parameters, and convergence of the seemingly most natural Gibbs and Metropolis algorithms can sometimes be slow. We examine solutions that involve reparameterization and over-parameterization. We begin with parameter expansion using working parameters, a strategy developed for the EM algorithm. This strategy can lead to algorithms that are much less susceptible to becoming stuck near zero values of the variance parameters than are more standard algorithms. Second, we consider a simple rotation of the regression coefficients based on an estimate of their posterior covariance matrix. This leads to a Gibbs algorithm based on updating the transformed parameters one at a time or a Metropolis algorithm with vector jumps; either of these algorithms can perform much better (in terms of total CPU time) than the two standard algorithms: one-at-a-time updating of untransformed parameters or vector updating using a linear regression at each step. We present an innovative evaluation of the algorithms in terms of how quickly they can get away from remote areas of parameter space, along with some more standard evaluation of computation and convergence speeds. We illustrate our methods with examples from our applied work. Our ultimate goal is to develop a fast and reliable method for fitting a hierarchical linear model as easily as one can now fit a nonhierarchical model, and to increase understanding of Gibbs samplers for hierarchical models in general.
%0 Journal Article
%1 gelman_using_2008
%A Gelman, Andrew
%A van Dyk, David A
%A Huang, Zaiying
%A Boscardin, John W
%D 2008
%J Journal of Computational and Graphical Statistics
%K \_tablet
%N 1
%P 95--122
%R 10.1198/106186008X287337
%T Using redundant parameterizations to fit hierarchical models
%U http://dx.doi.org/10.1198/106186008X287337
%V 17
%X Hierarchical linear and generalized linear models can be fit using Gibbs samplers and Metropolis algorithms; these models, however, often have many parameters, and convergence of the seemingly most natural Gibbs and Metropolis algorithms can sometimes be slow. We examine solutions that involve reparameterization and over-parameterization. We begin with parameter expansion using working parameters, a strategy developed for the EM algorithm. This strategy can lead to algorithms that are much less susceptible to becoming stuck near zero values of the variance parameters than are more standard algorithms. Second, we consider a simple rotation of the regression coefficients based on an estimate of their posterior covariance matrix. This leads to a Gibbs algorithm based on updating the transformed parameters one at a time or a Metropolis algorithm with vector jumps; either of these algorithms can perform much better (in terms of total CPU time) than the two standard algorithms: one-at-a-time updating of untransformed parameters or vector updating using a linear regression at each step. We present an innovative evaluation of the algorithms in terms of how quickly they can get away from remote areas of parameter space, along with some more standard evaluation of computation and convergence speeds. We illustrate our methods with examples from our applied work. Our ultimate goal is to develop a fast and reliable method for fitting a hierarchical linear model as easily as one can now fit a nonhierarchical model, and to increase understanding of Gibbs samplers for hierarchical models in general.
@article{gelman_using_2008,
abstract = {Hierarchical linear and generalized linear models can be fit using Gibbs samplers and Metropolis algorithms; these models, however, often have many parameters, and convergence of the seemingly most natural Gibbs and Metropolis algorithms can sometimes be slow. We examine solutions that involve reparameterization and over-parameterization. We begin with parameter expansion using working parameters, a strategy developed for the EM algorithm. This strategy can lead to algorithms that are much less susceptible to becoming stuck near zero values of the variance parameters than are more standard algorithms. Second, we consider a simple rotation of the regression coefficients based on an estimate of their posterior covariance matrix. This leads to a Gibbs algorithm based on updating the transformed parameters one at a time or a Metropolis algorithm with vector jumps; either of these algorithms can perform much better (in terms of total CPU time) than the two standard algorithms: one-at-a-time updating of untransformed parameters or vector updating using a linear regression at each step. We present an innovative evaluation of the algorithms in terms of how quickly they can get away from remote areas of parameter space, along with some more standard evaluation of computation and convergence speeds. We illustrate our methods with examples from our applied work. Our ultimate goal is to develop a fast and reliable method for fitting a hierarchical linear model as easily as one can now fit a nonhierarchical model, and to increase understanding of Gibbs samplers for hierarchical models in general.},
added-at = {2017-01-09T13:57:26.000+0100},
author = {Gelman, Andrew and van Dyk, David A and Huang, Zaiying and Boscardin, John W},
biburl = {https://www.bibsonomy.org/bibtex/21f04d386b061d543bc839b1dedece9f8/yourwelcome},
doi = {10.1198/106186008X287337},
interhash = {f14de91fe3418861657a06cfdd61a465},
intrahash = {1f04d386b061d543bc839b1dedece9f8},
issn = {1061-8600},
journal = {Journal of Computational and Graphical Statistics},
keywords = {\_tablet},
number = 1,
pages = {95--122},
timestamp = {2017-01-09T14:01:11.000+0100},
title = {Using redundant parameterizations to fit hierarchical models},
url = {http://dx.doi.org/10.1198/106186008X287337},
urldate = {2014-08-03},
volume = 17,
year = 2008
}