Identification of events from visual cues is in general an arduous task because of complex motion, cluttered backgrounds, occlusions, and geometric and photometric variations of the physical objects. This is even more challenging in case of detection of a logical chain of events, i.e., of a sequence of events called a workflow, and in case of the presence ofmultiple workflows of events in the environment, able to interact one with the other and affect one the outcome of the other. The recent research advances in computer vision and pattern recognition society have stimulated the development of a series of innovative algorithms, tools and methods for salient object detection and tracking in still images/video streams. These techniques are framed with appropriate descriptors (usually with invariance properties) such as the Scale-Invariant Feature Transform (SIFT) or the Speeded Up Robust Features (SURF), or the MPEG-7 visual descriptors. All these research methods can be considered as initial steps towards the ultimate goal for behavior/event understanding. However, automatic comprehension of someone’s behavior within a scene or even automatic supervision of workflows (e.g., industrial processes) is a complex research field of great attention but with limited results so far.
%0 Journal Article
%1 x.03_14mtaa
%A Doulamis, Anastasios
%A Doulamis, Nikolaos
%A Gool, Luc van
%A Nixon, Mark
%D 2014
%J Multimedia Tools and Applications
%K v1205 springer paper ai video image analysis action recognition zzz.vitra
%N 2
%P 247-251
%R 10.1007/s11042-013-1726-z
%T Guest Editorial: Event-based Video Analysis/Retrieval
%U http://link.springer.com/journal/11042/69/2/
%V 69
%X Identification of events from visual cues is in general an arduous task because of complex motion, cluttered backgrounds, occlusions, and geometric and photometric variations of the physical objects. This is even more challenging in case of detection of a logical chain of events, i.e., of a sequence of events called a workflow, and in case of the presence ofmultiple workflows of events in the environment, able to interact one with the other and affect one the outcome of the other. The recent research advances in computer vision and pattern recognition society have stimulated the development of a series of innovative algorithms, tools and methods for salient object detection and tracking in still images/video streams. These techniques are framed with appropriate descriptors (usually with invariance properties) such as the Scale-Invariant Feature Transform (SIFT) or the Speeded Up Robust Features (SURF), or the MPEG-7 visual descriptors. All these research methods can be considered as initial steps towards the ultimate goal for behavior/event understanding. However, automatic comprehension of someone’s behavior within a scene or even automatic supervision of workflows (e.g., industrial processes) is a complex research field of great attention but with limited results so far.
@article{x.03_14mtaa,
abstract = {Identification of events from visual cues is in general an arduous task because of complex motion, cluttered backgrounds, occlusions, and geometric and photometric variations of the physical objects. This is even more challenging in case of detection of a logical chain of events, i.e., of a sequence of events called a workflow, and in case of the presence ofmultiple workflows of events in the environment, able to interact one with the other and affect one the outcome of the other. The recent research advances in computer vision and pattern recognition society have stimulated the development of a series of innovative algorithms, tools and methods for salient object detection and tracking in still images/video streams. These techniques are framed with appropriate descriptors (usually with invariance properties) such as the Scale-Invariant Feature Transform (SIFT) or the Speeded Up Robust Features (SURF), or the MPEG-7 visual descriptors. All these research methods can be considered as initial steps towards the ultimate goal for behavior/event understanding. However, automatic comprehension of someone’s behavior within a scene or even automatic supervision of workflows (e.g., industrial processes) is a complex research field of great attention but with limited results so far.},
added-at = {2014-03-29T10:50:41.000+0100},
author = {Doulamis, Anastasios and Doulamis, Nikolaos and Gool, Luc van and Nixon, Mark},
biburl = {https://www.bibsonomy.org/bibtex/2e9197c78986d97932566aabfc1708a7c/flint63},
doi = {10.1007/s11042-013-1726-z},
groups = {public},
interhash = {2fa6e49d22c106cee44ca31a998d6d53},
intrahash = {e9197c78986d97932566aabfc1708a7c},
issn = {1380-7501},
journal = {Multimedia Tools and Applications},
keywords = {v1205 springer paper ai video image analysis action recognition zzz.vitra},
month = {#mar#},
number = 2,
pages = {247-251},
timestamp = {2014-03-29T10:50:41.000+0100},
title = {Guest Editorial: Event-based Video Analysis/Retrieval},
url = {http://link.springer.com/journal/11042/69/2/},
username = {flint63},
volume = 69,
year = 2014
}