Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep autoencoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods for 3D
recognition tasks and enable basic shape editing via simple algebraic
manipulations, such as semantic part editing, shape analogies and shape
interpolation. We perform a thorough study of different generative models
including: GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space our AEs and Gaussian mixture models (GMM).
For our quantitative evaluation we propose measures of sample fidelity and
diversity based on matchings between sets of point clouds. Interestingly, our
careful evaluation of generalization, fidelity and diversity reveals that GMMs
trained in the latent space of our AEs produce the best results.
%0 Generic
%1 achlioptas2017learning
%A Achlioptas, Panos
%A Diamanti, Olga
%A Mitliagkas, Ioannis
%A Guibas, Leonidas
%D 2017
%K GAN point_cloud
%T Learning Representations and Generative Models for 3D Point Clouds
%U http://arxiv.org/abs/1707.02392
%X Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep autoencoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods for 3D
recognition tasks and enable basic shape editing via simple algebraic
manipulations, such as semantic part editing, shape analogies and shape
interpolation. We perform a thorough study of different generative models
including: GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space our AEs and Gaussian mixture models (GMM).
For our quantitative evaluation we propose measures of sample fidelity and
diversity based on matchings between sets of point clouds. Interestingly, our
careful evaluation of generalization, fidelity and diversity reveals that GMMs
trained in the latent space of our AEs produce the best results.
@misc{achlioptas2017learning,
abstract = {Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep autoencoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods for 3D
recognition tasks and enable basic shape editing via simple algebraic
manipulations, such as semantic part editing, shape analogies and shape
interpolation. We perform a thorough study of different generative models
including: GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space our AEs and Gaussian mixture models (GMM).
For our quantitative evaluation we propose measures of sample fidelity and
diversity based on matchings between sets of point clouds. Interestingly, our
careful evaluation of generalization, fidelity and diversity reveals that GMMs
trained in the latent space of our AEs produce the best results.},
added-at = {2018-02-10T15:18:42.000+0100},
author = {Achlioptas, Panos and Diamanti, Olga and Mitliagkas, Ioannis and Guibas, Leonidas},
biburl = {https://www.bibsonomy.org/bibtex/21bebf25afcd6bff423dd2a3c84b53102/jk_itwm},
description = {1707.02392.pdf},
interhash = {ca25745159fba69b3912a7fedf349c6f},
intrahash = {1bebf25afcd6bff423dd2a3c84b53102},
keywords = {GAN point_cloud},
note = {cite arxiv:1707.02392},
timestamp = {2018-02-10T15:18:42.000+0100},
title = {Learning Representations and Generative Models for 3D Point Clouds},
url = {http://arxiv.org/abs/1707.02392},
year = 2017
}