Inference in probabilistic (or: Bayesian) networks is a problem with many applications in AI, for example in decision support systems. One way of solving it is using the method of conditioning, which requires a loop cutset of the network. Finding an optimal loop cutset for a given network is NP-hard, but building on work by Bodlaender we show it does have a kernel of cubic size. A kernelization is typically used as a first step in a 'fixed parameter tractable' algorithm, but can also be used as an ordinary preprocessing heuristic by choosing a safe value for the parameter. We have imple- mented this kernelization algorithm and experimentally demonstrate that it is both efficient and effective: it handles networks with thousands of vertices in a couple of seconds and, even when used as just a regular preprocessing heuristic, significantly speeds up the subsequent calculation of an optimal loop cutset.
%0 Journal Article
%1 vandijk2007kernelization
%A van Dijk, T. C.
%D 2007
%E Edwin de Jong, Mehdi Dastani
%K myown
%T Kernelization for Loop Cutset
%X Inference in probabilistic (or: Bayesian) networks is a problem with many applications in AI, for example in decision support systems. One way of solving it is using the method of conditioning, which requires a loop cutset of the network. Finding an optimal loop cutset for a given network is NP-hard, but building on work by Bodlaender we show it does have a kernel of cubic size. A kernelization is typically used as a first step in a 'fixed parameter tractable' algorithm, but can also be used as an ordinary preprocessing heuristic by choosing a safe value for the parameter. We have imple- mented this kernelization algorithm and experimentally demonstrate that it is both efficient and effective: it handles networks with thousands of vertices in a couple of seconds and, even when used as just a regular preprocessing heuristic, significantly speeds up the subsequent calculation of an optimal loop cutset.
@article{vandijk2007kernelization,
abstract = {Inference in probabilistic {(or: Bayesian)} networks is a problem with many applications in AI, for example in decision support systems. One way of solving it is using the method of conditioning, which requires a loop cutset of the network. Finding an optimal loop cutset for a given network is NP-hard, but building on work by Bodlaender we show it does have a kernel of cubic size. A kernelization is typically used as a first step in a 'fixed parameter tractable' algorithm, but can also be used as an ordinary preprocessing heuristic by choosing a safe value for the parameter. We have imple- mented this kernelization algorithm and experimentally demonstrate that it is both efficient and effective: it handles networks with thousands of vertices in a couple of seconds and, even when used as just a regular preprocessing heuristic, significantly speeds up the subsequent calculation of an optimal loop cutset.},
added-at = {2014-10-14T13:43:41.000+0200},
author = {van Dijk, T. C.},
biburl = {https://www.bibsonomy.org/bibtex/2ef406b0afd487e023f0ea905469c9ab4/thomasd},
editor = {Edwin de Jong, Mehdi Dastani},
interhash = {0c203b7852132aedce3ab559d6eda025},
intrahash = {ef406b0afd487e023f0ea905469c9ab4},
keywords = {myown},
timestamp = {2014-10-16T15:04:35.000+0200},
title = {Kernelization for Loop Cutset},
year = 2007
}