This paper describes a system, which enables collaboration in a hybrid team consisting of a robot, physically present humans and remote humans, where the latter are connected via Virtual Reality. This setup spans the whole continuum between Physical and Virtual Reality, including Augmented Reality. The work presented herein, describes how such a scattered, hybrid team can interact and cooperate in a virtual representation of a factory, using eye-, head-, hand- and gesture-tracking as multimodal control and communication input.
%0 Conference Paper
%1 MoniriEspinosaEtAl16vrst
%A Moniri, Mohammad Mehdi
%A Espinosa Valcarcel, Fabio Andres
%A Merkel, Dieter
%A Schuffert, Winfried
%A Schwartz, Tim
%B Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology (VRST '16), Munich, Germany
%C New York
%D 2016
%I ACM
%K 01801 acm paper dfki embedded ai mobile factory user interface multimodal team interaction robot 3d graphics zzz.mmi
%P 335--336
%R 10.1145/2993369.2996318
%T Hybrid Team Interaction in the Mixed Reality Continuum
%X This paper describes a system, which enables collaboration in a hybrid team consisting of a robot, physically present humans and remote humans, where the latter are connected via Virtual Reality. This setup spans the whole continuum between Physical and Virtual Reality, including Augmented Reality. The work presented herein, describes how such a scattered, hybrid team can interact and cooperate in a virtual representation of a factory, using eye-, head-, hand- and gesture-tracking as multimodal control and communication input.
%@ 978-1-4503-4491-3
@inproceedings{MoniriEspinosaEtAl16vrst,
abstract = {This paper describes a system, which enables collaboration in a hybrid team consisting of a robot, physically present humans and remote humans, where the latter are connected via Virtual Reality. This setup spans the whole continuum between Physical and Virtual Reality, including Augmented Reality. The work presented herein, describes how such a scattered, hybrid team can interact and cooperate in a virtual representation of a factory, using eye-, head-, hand- and gesture-tracking as multimodal control and communication input.},
added-at = {2018-03-12T10:44:03.000+0100},
address = {New York},
author = {Moniri, Mohammad Mehdi and Espinosa Valcarcel, Fabio Andres and Merkel, Dieter and Schuffert, Winfried and Schwartz, Tim},
biburl = {https://www.bibsonomy.org/bibtex/2a5e1588ae155aec379d75506a43a4155/flint63},
booktitle = {Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology (VRST '16), Munich, Germany},
doi = {10.1145/2993369.2996318},
file = {ACM Digital Library:2016/MoniriEspinosaEtAl16vrst.pdf:PDF},
groups = {public},
interhash = {8f6386f146cdbb353f0b626ebdf0eecc},
intrahash = {a5e1588ae155aec379d75506a43a4155},
isbn = {978-1-4503-4491-3},
keywords = {01801 acm paper dfki embedded ai mobile factory user interface multimodal team interaction robot 3d graphics zzz.mmi},
pages = {335--336},
publisher = {ACM},
timestamp = {2018-04-16T12:07:01.000+0200},
title = {Hybrid Team Interaction in the Mixed Reality Continuum},
username = {flint63},
year = 2016
}