Mobile interfaces need to allow the user and system to adapt their choice of communication modes according to user preferences, the task at hand, and the physical and social environment. We describe a multimodal application architecture which combines finite-state multimodal language processing, a speech-act based multimodal dialogue manager, dynamic multimodal output generation, and user-tailored text planning to enable rapid prototyping of multimodal interfaces with flexible input and adaptive output. Our testbed application MATCH (Multimodal Access To City Help) provides a mobile multimodal speech-pen interface to restaurant and subway information for New York City.
%0 Conference Paper
%1 JohnstonBangaloreEtAl02ACL
%A Johnston, Michael
%A Bangalore, Srinivas
%A Vasireddy, Gunaranjan
%A Stent, Amanda
%A Ehlen, Patrick
%A Walker, Marilyn
%A Whittaker, Steve
%A Maloor, Preetam
%B Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, Philadelphia, PA, USA
%D 2002
%K v1205 acl paper ai multimodal dialog user interface interaction architecture zzz.th.c4
%P 376-383
%R 10.3115/1073083.1073146
%T MATCH: An Architecture for Multimodal Dialogue Systems
%U http://www.aclweb.org/anthology/P02-1048
%X Mobile interfaces need to allow the user and system to adapt their choice of communication modes according to user preferences, the task at hand, and the physical and social environment. We describe a multimodal application architecture which combines finite-state multimodal language processing, a speech-act based multimodal dialogue manager, dynamic multimodal output generation, and user-tailored text planning to enable rapid prototyping of multimodal interfaces with flexible input and adaptive output. Our testbed application MATCH (Multimodal Access To City Help) provides a mobile multimodal speech-pen interface to restaurant and subway information for New York City.
@inproceedings{JohnstonBangaloreEtAl02ACL,
abstract = {Mobile interfaces need to allow the user and system to adapt their choice of communication modes according to user preferences, the task at hand, and the physical and social environment. We describe a multimodal application architecture which combines finite-state multimodal language processing, a speech-act based multimodal dialogue manager, dynamic multimodal output generation, and user-tailored text planning to enable rapid prototyping of multimodal interfaces with flexible input and adaptive output. Our testbed application MATCH (Multimodal Access To City Help) provides a mobile multimodal speech-pen interface to restaurant and subway information for New York City.},
added-at = {2012-05-30T10:48:40.000+0200},
author = {Johnston, Michael and Bangalore, Srinivas and Vasireddy, Gunaranjan and Stent, Amanda and Ehlen, Patrick and Walker, Marilyn and Whittaker, Steve and Maloor, Preetam},
biburl = {https://www.bibsonomy.org/bibtex/2e69506991eae06784d30d90f08060d4e/flint63},
booktitle = {Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, Philadelphia, PA, USA},
doi = {10.3115/1073083.1073146},
file = {ACL Anthology:2000-04/JohnstonBangaloreEtAl02ACL.pdf:PDF},
groups = {public},
interhash = {a2fd745e71f4cec3e6b9424c95938b79},
intrahash = {e69506991eae06784d30d90f08060d4e},
keywords = {v1205 acl paper ai multimodal dialog user interface interaction architecture zzz.th.c4},
pages = {376-383},
timestamp = {2018-04-16T11:48:44.000+0200},
title = {{MATCH:} An Architecture for Multimodal Dialogue Systems},
url = {http://www.aclweb.org/anthology/P02-1048},
username = {flint63},
year = 2002
}