Averaged n-Dependence Estimators (AnDE) is an approach to probabilistic classification learning that learns by extrapolation from marginal
to full-multivariate probability distributions. It utilizes a single parameter that transforms the approach between a low-variance high-bias learner
(Naive Bayes) and a high-variance low-bias learner with Bayes optimal
asymptotic error. It extends the underlying strategy of Averaged One-Dependence Estimators (AODE), which relaxes the Naive Bayes independence assumption while retaining many of Naive Bayes' desirable computational and theoretical properties. AnDE further relaxes the independence assumption by generalizing AODE to higher-levels of dependence.
Extensive experimental evaluation shows that the bias-variance trade-off
for Averaged 2-Dependence Estimators results in strong predictive accuracy over a wide range of data sets. It has training time linear with
respect to the number of examples, supports incremental learning, handles directly missing values, and is robust in the face of noise. Beyond
the practical utility of its lower-dimensional variants, AnDE is of interest
in that it demonstrates that it is possible to create low-bias high-variance
generative learners and suggests strategies for developing even more powerful classifiers.
%0 Journal Article
%1 WebbEtAl12
%A Webb, G.I.
%A Boughton, J.
%A Zheng, F.
%A Ting, K.M.
%A Salem, H.
%C Netherlands
%D 2012
%I Springer
%J Machine Learning
%K Conditional Estimation,AODE Probability
%N 2
%P 233-272
%T Learning by extrapolation from marginal to full-multivariate probability distributions: Decreasingly naive Bayesian classification
%U http://dx.doi.org/10.1007/s10994-011-5263-6
%V 86
%X Averaged n-Dependence Estimators (AnDE) is an approach to probabilistic classification learning that learns by extrapolation from marginal
to full-multivariate probability distributions. It utilizes a single parameter that transforms the approach between a low-variance high-bias learner
(Naive Bayes) and a high-variance low-bias learner with Bayes optimal
asymptotic error. It extends the underlying strategy of Averaged One-Dependence Estimators (AODE), which relaxes the Naive Bayes independence assumption while retaining many of Naive Bayes' desirable computational and theoretical properties. AnDE further relaxes the independence assumption by generalizing AODE to higher-levels of dependence.
Extensive experimental evaluation shows that the bias-variance trade-off
for Averaged 2-Dependence Estimators results in strong predictive accuracy over a wide range of data sets. It has training time linear with
respect to the number of examples, supports incremental learning, handles directly missing values, and is robust in the face of noise. Beyond
the practical utility of its lower-dimensional variants, AnDE is of interest
in that it demonstrates that it is possible to create low-bias high-variance
generative learners and suggests strategies for developing even more powerful classifiers.
@article{WebbEtAl12,
abstract = {Averaged n-Dependence Estimators (AnDE) is an approach to probabilistic classification learning that learns by extrapolation from marginal
to full-multivariate probability distributions. It utilizes a single parameter that transforms the approach between a low-variance high-bias learner
(Naive Bayes) and a high-variance low-bias learner with Bayes optimal
asymptotic error. It extends the underlying strategy of Averaged One-Dependence Estimators (AODE), which relaxes the Naive Bayes independence assumption while retaining many of Naive Bayes' desirable computational and theoretical properties. AnDE further relaxes the independence assumption by generalizing AODE to higher-levels of dependence.
Extensive experimental evaluation shows that the bias-variance trade-off
for Averaged 2-Dependence Estimators results in strong predictive accuracy over a wide range of data sets. It has training time linear with
respect to the number of examples, supports incremental learning, handles directly missing values, and is robust in the face of noise. Beyond
the practical utility of its lower-dimensional variants, AnDE is of interest
in that it demonstrates that it is possible to create low-bias high-variance
generative learners and suggests strategies for developing even more powerful classifiers.},
added-at = {2016-03-20T05:42:04.000+0100},
address = {Netherlands},
author = {Webb, G.I. and Boughton, J. and Zheng, F. and Ting, K.M. and Salem, H.},
biburl = {https://www.bibsonomy.org/bibtex/2551ec377c12b96098b5f67a0f0f3c761/giwebb},
interhash = {92da3662a6933a45b0b3cbda6c62c349},
intrahash = {551ec377c12b96098b5f67a0f0f3c761},
issn = {0885-6125},
journal = {Machine Learning},
keywords = {Conditional Estimation,AODE Probability},
number = 2,
pages = {233-272},
publisher = {Springer},
timestamp = {2016-03-20T05:42:04.000+0100},
title = {Learning by extrapolation from marginal to full-multivariate probability distributions: Decreasingly naive Bayesian classification},
url = {http://dx.doi.org/10.1007/s10994-011-5263-6},
urltext = {Link to paper via SpringerLink},
volume = 86,
year = 2012
}