M. Johnston. Proceedings of the 11th International Conference on Multimodal Interfaces and the 6th Workshop on Machine Learning for Multimodal Interfaces (ICMI-MLMI '09), Cambridge, MA, USA, page 47-54. (2009)
DOI: 10.1145/1647314.1647325
Abstract
Multimodal interfaces combining natural modalities such as speech and touch with dynamic graphical user interfaces can make it easier and more effective for users to interact with applications and services on mobile devices. However, building these interfaces remains a complex and high specialized task. The W3C EMMA standard provides a representation language for inputs to multimodal systems facilitating plug-and-play of system components and rapid prototyping of interactive multimodal systems. We illustrate the capabilities of the EMMA standard through examination of its use in a series of mobile multimodal applications for the iPhone.
Proceedings of the 11th International Conference on Multimodal Interfaces and the 6th Workshop on Machine Learning for Multimodal Interfaces (ICMI-MLMI '09), Cambridge, MA, USA
%0 Conference Paper
%1 johnston2009building
%A Johnston, Michael
%B Proceedings of the 11th International Conference on Multimodal Interfaces and the 6th Workshop on Machine Learning for Multimodal Interfaces (ICMI-MLMI '09), Cambridge, MA, USA
%D 2009
%K dialog interaction mobile multimodal user
%P 47-54
%R 10.1145/1647314.1647325
%T Building Multimodal Applications with EMMA
%X Multimodal interfaces combining natural modalities such as speech and touch with dynamic graphical user interfaces can make it easier and more effective for users to interact with applications and services on mobile devices. However, building these interfaces remains a complex and high specialized task. The W3C EMMA standard provides a representation language for inputs to multimodal systems facilitating plug-and-play of system components and rapid prototyping of interactive multimodal systems. We illustrate the capabilities of the EMMA standard through examination of its use in a series of mobile multimodal applications for the iPhone.
@inproceedings{johnston2009building,
abstract = {Multimodal interfaces combining natural modalities such as speech and touch with dynamic graphical user interfaces can make it easier and more effective for users to interact with applications and services on mobile devices. However, building these interfaces remains a complex and high specialized task. The W3C EMMA standard provides a representation language for inputs to multimodal systems facilitating plug-and-play of system components and rapid prototyping of interactive multimodal systems. We illustrate the capabilities of the EMMA standard through examination of its use in a series of mobile multimodal applications for the iPhone.},
added-at = {2014-12-09T16:33:16.000+0100},
author = {Johnston, Michael},
biburl = {https://www.bibsonomy.org/bibtex/24a08c7f5728f354c8c39eb0753da7580/rnesselrath},
booktitle = {Proceedings of the 11th International Conference on Multimodal Interfaces and the 6th Workshop on Machine Learning for Multimodal Interfaces (ICMI-MLMI '09), Cambridge, MA, USA},
doi = {10.1145/1647314.1647325},
file = {ACM Digital Library:2009/Johnston09ICMI.pdf:PDF},
groups = {public},
interhash = {9ab9db7767ad999a286e90a9e6b0324f},
intrahash = {4a08c7f5728f354c8c39eb0753da7580},
keywords = {dialog interaction mobile multimodal user},
pages = {47-54},
timestamp = {2014-12-09T16:33:16.000+0100},
title = {Building Multimodal Applications with {EMMA}},
username = {flint63},
year = 2009
}