The 3D deep learning community has seen significant strides in pointcloud
processing over the last few years. However, the datasets on which deep models
have been trained have largely remained the same. Most datasets comprise clean,
clutter-free pointclouds canonicalized for pose. Models trained on these
datasets fail in uninterpretible and unintuitive ways when presented with data
that contains transformations ünseen" at train time. While data augmentation
enables models to be robust to "previously seen" input transformations, 1) we
show that this does not work for unseen transformations during inference, and
2) data augmentation makes it difficult to analyze a model's inherent
robustness to transformations. To this end, we create a publicly available
dataset for robustness analysis of point cloud classification models
(independent of data augmentation) to input transformations, called
RobustPointSet. Our experiments indicate that despite all the progress in the
point cloud classification, there is no single architecture that consistently
performs better -- several fail drastically -- when evaluated on transformed
test sets. We also find that robustness to unseen transformations cannot be
brought about merely by extensive data augmentation. RobustPointSet can be
accessed through https://github.com/AutodeskAILab/RobustPointSet.
Description
[2011.11572] RobustPointSet: A Dataset for Benchmarking Robustness of Point Cloud Classifiers
%0 Generic
%1 taghanaki2020robustpointset
%A Taghanaki, Saeid Asgari
%A Luo, Jieliang
%A Zhang, Ran
%A Wang, Ye
%A Jayaraman, Pradeep Kumar
%A Jatavallabhula, Krishna Murthy
%D 2020
%K 3D classification dataset point-cloud
%T RobustPointSet: A Dataset for Benchmarking Robustness of Point Cloud
Classifiers
%U http://arxiv.org/abs/2011.11572
%X The 3D deep learning community has seen significant strides in pointcloud
processing over the last few years. However, the datasets on which deep models
have been trained have largely remained the same. Most datasets comprise clean,
clutter-free pointclouds canonicalized for pose. Models trained on these
datasets fail in uninterpretible and unintuitive ways when presented with data
that contains transformations ünseen" at train time. While data augmentation
enables models to be robust to "previously seen" input transformations, 1) we
show that this does not work for unseen transformations during inference, and
2) data augmentation makes it difficult to analyze a model's inherent
robustness to transformations. To this end, we create a publicly available
dataset for robustness analysis of point cloud classification models
(independent of data augmentation) to input transformations, called
RobustPointSet. Our experiments indicate that despite all the progress in the
point cloud classification, there is no single architecture that consistently
performs better -- several fail drastically -- when evaluated on transformed
test sets. We also find that robustness to unseen transformations cannot be
brought about merely by extensive data augmentation. RobustPointSet can be
accessed through https://github.com/AutodeskAILab/RobustPointSet.
@misc{taghanaki2020robustpointset,
abstract = {The 3D deep learning community has seen significant strides in pointcloud
processing over the last few years. However, the datasets on which deep models
have been trained have largely remained the same. Most datasets comprise clean,
clutter-free pointclouds canonicalized for pose. Models trained on these
datasets fail in uninterpretible and unintuitive ways when presented with data
that contains transformations "unseen" at train time. While data augmentation
enables models to be robust to "previously seen" input transformations, 1) we
show that this does not work for unseen transformations during inference, and
2) data augmentation makes it difficult to analyze a model's inherent
robustness to transformations. To this end, we create a publicly available
dataset for robustness analysis of point cloud classification models
(independent of data augmentation) to input transformations, called
RobustPointSet. Our experiments indicate that despite all the progress in the
point cloud classification, there is no single architecture that consistently
performs better -- several fail drastically -- when evaluated on transformed
test sets. We also find that robustness to unseen transformations cannot be
brought about merely by extensive data augmentation. RobustPointSet can be
accessed through https://github.com/AutodeskAILab/RobustPointSet.},
added-at = {2021-05-06T22:21:03.000+0200},
author = {Taghanaki, Saeid Asgari and Luo, Jieliang and Zhang, Ran and Wang, Ye and Jayaraman, Pradeep Kumar and Jatavallabhula, Krishna Murthy},
biburl = {https://www.bibsonomy.org/bibtex/228c047493f6e097f5a44c6983d5739a4/analyst},
description = {[2011.11572] RobustPointSet: A Dataset for Benchmarking Robustness of Point Cloud Classifiers},
interhash = {cc87156776d38d64f2905bcb3de9effe},
intrahash = {28c047493f6e097f5a44c6983d5739a4},
keywords = {3D classification dataset point-cloud},
note = {cite arxiv:2011.11572Comment: Published at the Robust and Reliable Machine Learning in the Real World Workshop, ICLR 2021},
timestamp = {2021-05-06T22:21:03.000+0200},
title = {RobustPointSet: A Dataset for Benchmarking Robustness of Point Cloud
Classifiers},
url = {http://arxiv.org/abs/2011.11572},
year = 2020
}