Multimodal object recognition is still an emerging field. Thus, publicly
available datasets are still rare and of small size. This dataset was developed
to help fill this void and presents multimodal data for 63 objects with some
visual and haptic ambiguity. The dataset contains visual, kinesthetic and
tactile (audio/vibrations) data. To completely solve sensory ambiguity, sensory
integration/fusion would be required. This report describes the creation and
structure of the dataset. The first section explains the underlying approach
used to capture the visual and haptic properties of the objects. The second
section describes the technical aspects (experimental setup) needed for the
collection of the data. The third section introduces the objects, while the
final section describes the structure and content of the dataset.
%0 Generic
%1 bonner2021dataset
%A Bonner, Lasse Emil R.
%A Buhl, Daniel Daugaard
%A Kristensen, Kristian
%A Navarro-Guerrero, Nicolás
%D 2021
%K myown
%R 10.6084/m9.figshare.14222486
%T AU Dataset for Visuo-Haptic Object Recognition for Robots
%U http://arxiv.org/abs/2112.13761
%X Multimodal object recognition is still an emerging field. Thus, publicly
available datasets are still rare and of small size. This dataset was developed
to help fill this void and presents multimodal data for 63 objects with some
visual and haptic ambiguity. The dataset contains visual, kinesthetic and
tactile (audio/vibrations) data. To completely solve sensory ambiguity, sensory
integration/fusion would be required. This report describes the creation and
structure of the dataset. The first section explains the underlying approach
used to capture the visual and haptic properties of the objects. The second
section describes the technical aspects (experimental setup) needed for the
collection of the data. The third section introduces the objects, while the
final section describes the structure and content of the dataset.
@misc{bonner2021dataset,
abstract = {Multimodal object recognition is still an emerging field. Thus, publicly
available datasets are still rare and of small size. This dataset was developed
to help fill this void and presents multimodal data for 63 objects with some
visual and haptic ambiguity. The dataset contains visual, kinesthetic and
tactile (audio/vibrations) data. To completely solve sensory ambiguity, sensory
integration/fusion would be required. This report describes the creation and
structure of the dataset. The first section explains the underlying approach
used to capture the visual and haptic properties of the objects. The second
section describes the technical aspects (experimental setup) needed for the
collection of the data. The third section introduces the objects, while the
final section describes the structure and content of the dataset.},
added-at = {2023-02-09T12:33:30.000+0100},
author = {Bonner, Lasse Emil R. and Buhl, Daniel Daugaard and Kristensen, Kristian and Navarro-Guerrero, Nicolás},
biburl = {https://www.bibsonomy.org/bibtex/235f82745a6e67350547387df39d73bd2/nng},
doi = {10.6084/m9.figshare.14222486},
interhash = {df131d20e4b65c3ad83a1d9430f69650},
intrahash = {35f82745a6e67350547387df39d73bd2},
keywords = {myown},
note = {cite arxiv:2112.13761},
timestamp = {2023-02-09T12:33:30.000+0100},
title = {AU Dataset for Visuo-Haptic Object Recognition for Robots},
url = {http://arxiv.org/abs/2112.13761},
year = 2021
}