The recovery of 3D shape and pose solely from 2D landmarks stemming from a
large ensemble of images can be viewed as a non-rigid structure from motion
(NRSfM) problem. To date, however, the application of NRSfM to problems in the
wild has been problematic. Classical NRSfM approaches do not scale to large
numbers of images and can only handle certain types of 3D structure (e.g.
low-rank). A recent breakthrough in this problem has allowed for the
reconstruction of a substantially broader set of 3D structures, dramatically
expanding the approach's importance to many problems in computer vision.
However, the approach is still limited in that (i) it cannot handle
missing/occluded points, and (ii) it is applicable only to weak-perspective
camera models. In this paper, we present Deep NRSfM++, an approach to allow
NRSfM to be truly applicable in the wild by offering up innovative solutions to
the above two issues. Furthermore, we demonstrate state-of-the-art performance
across numerous benchmarks, even against recent methods based on deep neural
networks.
Beschreibung
Deep NRSfM : Towards 3D Reconstruction in the Wild
%0 Generic
%1 wang2020nrsfm
%A Wang, Chaoyang
%A Lin, Chen-Hsuan
%A Lucey, Simon
%D 2020
%K 3D_reconstruction
%T Deep NRSfM++: Towards 3D Reconstruction in the Wild
%U http://arxiv.org/abs/2001.10090
%X The recovery of 3D shape and pose solely from 2D landmarks stemming from a
large ensemble of images can be viewed as a non-rigid structure from motion
(NRSfM) problem. To date, however, the application of NRSfM to problems in the
wild has been problematic. Classical NRSfM approaches do not scale to large
numbers of images and can only handle certain types of 3D structure (e.g.
low-rank). A recent breakthrough in this problem has allowed for the
reconstruction of a substantially broader set of 3D structures, dramatically
expanding the approach's importance to many problems in computer vision.
However, the approach is still limited in that (i) it cannot handle
missing/occluded points, and (ii) it is applicable only to weak-perspective
camera models. In this paper, we present Deep NRSfM++, an approach to allow
NRSfM to be truly applicable in the wild by offering up innovative solutions to
the above two issues. Furthermore, we demonstrate state-of-the-art performance
across numerous benchmarks, even against recent methods based on deep neural
networks.
@misc{wang2020nrsfm,
abstract = {The recovery of 3D shape and pose solely from 2D landmarks stemming from a
large ensemble of images can be viewed as a non-rigid structure from motion
(NRSfM) problem. To date, however, the application of NRSfM to problems in the
wild has been problematic. Classical NRSfM approaches do not scale to large
numbers of images and can only handle certain types of 3D structure (e.g.
low-rank). A recent breakthrough in this problem has allowed for the
reconstruction of a substantially broader set of 3D structures, dramatically
expanding the approach's importance to many problems in computer vision.
However, the approach is still limited in that (i) it cannot handle
missing/occluded points, and (ii) it is applicable only to weak-perspective
camera models. In this paper, we present Deep NRSfM++, an approach to allow
NRSfM to be truly applicable in the wild by offering up innovative solutions to
the above two issues. Furthermore, we demonstrate state-of-the-art performance
across numerous benchmarks, even against recent methods based on deep neural
networks.},
added-at = {2020-11-25T11:00:46.000+0100},
author = {Wang, Chaoyang and Lin, Chen-Hsuan and Lucey, Simon},
biburl = {https://www.bibsonomy.org/bibtex/2bcdf23cd5faa4962f5f9260251ced581/shuncheng.wu},
description = {Deep NRSfM : Towards 3D Reconstruction in the Wild},
interhash = {cad739fe1a18e8936ebb7c452d813886},
intrahash = {bcdf23cd5faa4962f5f9260251ced581},
keywords = {3D_reconstruction},
note = {cite arxiv:2001.10090},
timestamp = {2020-11-25T11:00:46.000+0100},
title = {Deep NRSfM++: Towards 3D Reconstruction in the Wild},
url = {http://arxiv.org/abs/2001.10090},
year = 2020
}