We propose to bring our novel rich media interface called MediaDiver demonstrating our new interaction techniques for viewing and annotating multiple view video. The demonstration allows attendees to experience novel moving target selection methods (called Hold and Chase), new multi-view selection techniques, automated quality of view analysis to switch viewpoints to follow targets, integrated annotation methods for viewing or authoring meta-content and advanced context sensitive transport and timeline functions. As users have become increasingly sophisticated when managing navigation and viewing of hyper-documents, they transfer their expectations to new media. Our proposal is a demonstration of the technology required to meet these expectations for video. Thus users will be able to directly click on objects in the video to link to more information or other video, easily change camera views and mark-up the video with their own content. The applications of this technology stretch from home video management to broadcast quality media production, which may be consumed on both desktop and mobile platforms.
%0 Conference Paper
%1 Miller:2011:MVA:1979742.1979711
%A Miller, Gregor
%A Fels, Sidney
%A Al Hajri, Abir
%A Ilich, Michael
%A Foley-Fisher, Zoltan
%A Fernandez, Manuel
%A Jang, Daesik
%B CHI '11 Extended Abstracts on Human Factors in Computing Systems
%C New York, NY, USA
%D 2011
%I ACM
%K MediaDiver annotate video video, view
%P 1141--1146
%R 10.1145/1979742.1979711
%T MediaDiver: Viewing and Annotating Multi-view Video
%U http://doi.acm.org/10.1145/1979742.1979711
%X We propose to bring our novel rich media interface called MediaDiver demonstrating our new interaction techniques for viewing and annotating multiple view video. The demonstration allows attendees to experience novel moving target selection methods (called Hold and Chase), new multi-view selection techniques, automated quality of view analysis to switch viewpoints to follow targets, integrated annotation methods for viewing or authoring meta-content and advanced context sensitive transport and timeline functions. As users have become increasingly sophisticated when managing navigation and viewing of hyper-documents, they transfer their expectations to new media. Our proposal is a demonstration of the technology required to meet these expectations for video. Thus users will be able to directly click on objects in the video to link to more information or other video, easily change camera views and mark-up the video with their own content. The applications of this technology stretch from home video management to broadcast quality media production, which may be consumed on both desktop and mobile platforms.
%@ 978-1-4503-0268-5
@inproceedings{Miller:2011:MVA:1979742.1979711,
abstract = {We propose to bring our novel rich media interface called MediaDiver demonstrating our new interaction techniques for viewing and annotating multiple view video. The demonstration allows attendees to experience novel moving target selection methods (called Hold and Chase), new multi-view selection techniques, automated quality of view analysis to switch viewpoints to follow targets, integrated annotation methods for viewing or authoring meta-content and advanced context sensitive transport and timeline functions. As users have become increasingly sophisticated when managing navigation and viewing of hyper-documents, they transfer their expectations to new media. Our proposal is a demonstration of the technology required to meet these expectations for video. Thus users will be able to directly click on objects in the video to link to more information or other video, easily change camera views and mark-up the video with their own content. The applications of this technology stretch from home video management to broadcast quality media production, which may be consumed on both desktop and mobile platforms.},
acmid = {1979711},
added-at = {2013-12-07T22:10:23.000+0100},
address = {New York, NY, USA},
author = {Miller, Gregor and Fels, Sidney and Al Hajri, Abir and Ilich, Michael and Foley-Fisher, Zoltan and Fernandez, Manuel and Jang, Daesik},
biburl = {https://www.bibsonomy.org/bibtex/2c11831c423955f1e9ebb7081b52a87be/cathytomato},
booktitle = {CHI '11 Extended Abstracts on Human Factors in Computing Systems},
doi = {10.1145/1979742.1979711},
interhash = {bcb1fd5b9ee0e1d83086441512869fad},
intrahash = {c11831c423955f1e9ebb7081b52a87be},
isbn = {978-1-4503-0268-5},
keywords = {MediaDiver annotate video video, view},
location = {Vancouver, BC, Canada},
numpages = {6},
pages = {1141--1146},
publisher = {ACM},
series = {CHI EA '11},
timestamp = {2013-12-07T22:10:23.000+0100},
title = {MediaDiver: Viewing and Annotating Multi-view Video},
url = {http://doi.acm.org/10.1145/1979742.1979711},
year = 2011
}