On-line learning in domains where the target concept depends on some
hidden context poses serious problems. A changing context can induce
changes in the target concepts, producing what is known as concept
drift. We describe a family of learning algorithms that flexibly
react to concept drift and can take advantage of situations where
contexts reappear. The general approach underlying all these algorithms
consists of (1) keeping only a window of currently trusted examples
and hypotheses; (2) storing concept descriptions and reusing them
when a previous context re-appears; and (3) controlling both of these
functions by a heuristic that constantly monitors the system's behavior.
The paper reports on experiments that test the systems' perfomance
under various conditions such as different levels of noise and different
extent and rate of concept drift.
%0 Journal Article
%1 Widmer:1996
%A Widmer, Gerhard
%A Kubat, Miroslav
%D 1996
%J Machine Learning
%K - Incremental concept context dependence drift forgetting learning on-line
%P 69-101
%T Learning in the presence of concept drift and hidden contexts
%V 23
%X On-line learning in domains where the target concept depends on some
hidden context poses serious problems. A changing context can induce
changes in the target concepts, producing what is known as concept
drift. We describe a family of learning algorithms that flexibly
react to concept drift and can take advantage of situations where
contexts reappear. The general approach underlying all these algorithms
consists of (1) keeping only a window of currently trusted examples
and hypotheses; (2) storing concept descriptions and reusing them
when a previous context re-appears; and (3) controlling both of these
functions by a heuristic that constantly monitors the system's behavior.
The paper reports on experiments that test the systems' perfomance
under various conditions such as different levels of noise and different
extent and rate of concept drift.
@article{Widmer:1996,
abstract = { On-line learning in domains where the target concept depends on some
hidden context poses serious problems. A changing context can induce
changes in the target concepts, producing what is known as concept
drift. We describe a family of learning algorithms that flexibly
react to concept drift and can take advantage of situations where
contexts reappear. The general approach underlying all these algorithms
consists of (1) keeping only a window of currently trusted examples
and hypotheses; (2) storing concept descriptions and reusing them
when a previous context re-appears; and (3) controlling both of these
functions by a heuristic that constantly monitors the system's behavior.
The paper reports on experiments that test the systems' perfomance
under various conditions such as different levels of noise and different
extent and rate of concept drift.},
added-at = {2009-06-26T15:25:19.000+0200},
author = {Widmer, Gerhard and Kubat, Miroslav},
biburl = {https://www.bibsonomy.org/bibtex/2d6446ff97c8e18931273eab9f1e7324c/butz},
description = {diverse cognitive systems bib},
interhash = {4f514ea3af5257af987c0f9618fdf0a4},
intrahash = {d6446ff97c8e18931273eab9f1e7324c},
journal = {Machine Learning},
keywords = {- Incremental concept context dependence drift forgetting learning on-line},
owner = {butz},
pages = {69-101},
timestamp = {2009-06-26T15:26:01.000+0200},
title = {Learning in the presence of concept drift and hidden contexts},
volume = 23,
year = 1996
}