This paper presents a driver simulator, which takes into account the information about the user's state of mind (level of attention, fatigue state, stress state). The user's state of mind analysis is based on video data and biological signals. Facial movements such as eyes blinking, yawning, head rotations, etc., are detected on video data: they are used in order to evaluate the fatigue and the attention level of the driver. The user's electrocardiogram and galvanic skin response are recorded and analyzed in order to evaluate the stress level of the driver. A driver simulator software is modified so that the system is able to appropriately react to these critical situations of fatigue and stress: some audio and visual messages are sent to the driver, wheel vibrations are generated and the driver is supposed to react to the alert messages. A multi-threaded system is proposed to support multi-messages sent by the different modalities. Strategies for data fusion and fission are also provided. Some of these components are integrated within the first prototype of OpenInterface: the multimodal similar platform.
%0 Journal Article
%1 BenoitBonnaudEtAl09puc
%A Benoit, Alexandre
%A Bonnaud, Laurent
%A Caplier, Alice
%A Ngo, Phillipe
%A Lawson, Lionel
%A Trevisan, Daniela G.
%A Levacic, Vjekoslav
%A Mancas, Céline
%A Chanel, Guillaume
%D 2009
%J Personal and Ubiquitous Computing
%K v1205 springer paper embedded ai adaptive multimodal user interface video sensor signal processing emotion recognition automotive
%N 1
%P 33-41
%R 10.1007/s00779-007-0173-0
%T Multimodal Focus Attention and Stress Detection and Feedback in an Augmented Driver Simulator
%V 13
%X This paper presents a driver simulator, which takes into account the information about the user's state of mind (level of attention, fatigue state, stress state). The user's state of mind analysis is based on video data and biological signals. Facial movements such as eyes blinking, yawning, head rotations, etc., are detected on video data: they are used in order to evaluate the fatigue and the attention level of the driver. The user's electrocardiogram and galvanic skin response are recorded and analyzed in order to evaluate the stress level of the driver. A driver simulator software is modified so that the system is able to appropriately react to these critical situations of fatigue and stress: some audio and visual messages are sent to the driver, wheel vibrations are generated and the driver is supposed to react to the alert messages. A multi-threaded system is proposed to support multi-messages sent by the different modalities. Strategies for data fusion and fission are also provided. Some of these components are integrated within the first prototype of OpenInterface: the multimodal similar platform.
@article{BenoitBonnaudEtAl09puc,
abstract = {This paper presents a driver simulator, which takes into account the information about the user's state of mind (level of attention, fatigue state, stress state). The user's state of mind analysis is based on video data and biological signals. Facial movements such as eyes blinking, yawning, head rotations, etc., are detected on video data: they are used in order to evaluate the fatigue and the attention level of the driver. The user's electrocardiogram and galvanic skin response are recorded and analyzed in order to evaluate the stress level of the driver. A driver simulator software is modified so that the system is able to appropriately react to these critical situations of fatigue and stress: some audio and visual messages are sent to the driver, wheel vibrations are generated and the driver is supposed to react to the alert messages. A multi-threaded system is proposed to support multi-messages sent by the different modalities. Strategies for data fusion and fission are also provided. Some of these components are integrated within the first prototype of OpenInterface: the multimodal similar platform.},
added-at = {2012-05-30T10:42:56.000+0200},
author = {Benoit, Alexandre and Bonnaud, Laurent and Caplier, Alice and Ngo, Phillipe and Lawson, Lionel and Trevisan, Daniela G. and Levacic, Vjekoslav and Mancas, C\'{e}line and Chanel, Guillaume},
biburl = {https://www.bibsonomy.org/bibtex/20e207479265ada1393dd8a4129fa5906/flint63},
doi = {10.1007/s00779-007-0173-0},
file = {SpringerLink:2009/BenoitBonnaudEtAl09puc.pdf:PDF},
groups = {public},
interhash = {e449f0785d0fc3214a4ad667bdbeef02},
intrahash = {0e207479265ada1393dd8a4129fa5906},
issn = {1617-4909},
journal = {Personal and Ubiquitous Computing},
keywords = {v1205 springer paper embedded ai adaptive multimodal user interface video sensor signal processing emotion recognition automotive},
number = 1,
pages = {33-41},
timestamp = {2018-04-16T12:36:48.000+0200},
title = {Multimodal Focus Attention and Stress Detection and Feedback in an Augmented Driver Simulator},
username = {flint63},
volume = 13,
year = 2009
}