In this work we integrate ideas from surface-based modeling with neural
synthesis: we propose a combination of surface-based pose estimation and deep
generative models that allows us to perform accurate pose transfer, i.e.
synthesize a new image of a person based on a single image of that person and
the image of a pose donor. We use a dense pose estimation system that maps
pixels from both images to a common surface-based coordinate system, allowing
the two images to be brought in correspondence with each other. We inpaint and
refine the source image intensities in the surface coordinate system, prior to
warping them onto the target pose. These predictions are fused with those of a
convolutional predictive module through a neural synthesis module allowing for
training the whole pipeline jointly end-to-end, optimizing a combination of
adversarial and perceptual losses. We show that dense pose estimation is a
substantially more powerful conditioning input than landmark-, or mask-based
alternatives, and report systematic improvements over state of the art
generators on DeepFashion and MVC datasets.
%0 Generic
%1 citeulike:14646272
%A xxx,
%D 2018
%K arch gan style\_transfer
%T Dense Pose Transfer
%U http://arxiv.org/abs/1809.01995
%X In this work we integrate ideas from surface-based modeling with neural
synthesis: we propose a combination of surface-based pose estimation and deep
generative models that allows us to perform accurate pose transfer, i.e.
synthesize a new image of a person based on a single image of that person and
the image of a pose donor. We use a dense pose estimation system that maps
pixels from both images to a common surface-based coordinate system, allowing
the two images to be brought in correspondence with each other. We inpaint and
refine the source image intensities in the surface coordinate system, prior to
warping them onto the target pose. These predictions are fused with those of a
convolutional predictive module through a neural synthesis module allowing for
training the whole pipeline jointly end-to-end, optimizing a combination of
adversarial and perceptual losses. We show that dense pose estimation is a
substantially more powerful conditioning input than landmark-, or mask-based
alternatives, and report systematic improvements over state of the art
generators on DeepFashion and MVC datasets.
@misc{citeulike:14646272,
abstract = {{In this work we integrate ideas from surface-based modeling with neural
synthesis: we propose a combination of surface-based pose estimation and deep
generative models that allows us to perform accurate pose transfer, i.e.
synthesize a new image of a person based on a single image of that person and
the image of a pose donor. We use a dense pose estimation system that maps
pixels from both images to a common surface-based coordinate system, allowing
the two images to be brought in correspondence with each other. We inpaint and
refine the source image intensities in the surface coordinate system, prior to
warping them onto the target pose. These predictions are fused with those of a
convolutional predictive module through a neural synthesis module allowing for
training the whole pipeline jointly end-to-end, optimizing a combination of
adversarial and perceptual losses. We show that dense pose estimation is a
substantially more powerful conditioning input than landmark-, or mask-based
alternatives, and report systematic improvements over state of the art
generators on DeepFashion and MVC datasets.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/29eb805ad3c9d24b7e39c5c741205381a/nmatsuk},
citeulike-article-id = {14646272},
citeulike-linkout-0 = {http://arxiv.org/abs/1809.01995},
citeulike-linkout-1 = {http://arxiv.org/pdf/1809.01995},
day = 6,
eprint = {1809.01995},
interhash = {9281c1536ecb1af7eded72e4d4ea7d57},
intrahash = {9eb805ad3c9d24b7e39c5c741205381a},
keywords = {arch gan style\_transfer},
month = sep,
posted-at = {2018-10-16 16:22:01},
priority = {3},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Dense Pose Transfer}},
url = {http://arxiv.org/abs/1809.01995},
year = 2018
}