We present a novel and unified deep learning framework which is capable of
learning domain-invariant representation from data across multiple domains.
Realized by adversarial training with additional ability to exploit
domain-specific information, the proposed network is able to perform continuous
cross-domain image translation and manipulation, and produces desirable output
images accordingly. In addition, the resulting feature representation exhibits
superior performance of unsupervised domain adaptation, which also verifies the
effectiveness of the proposed model in learning disentangled features for
describing cross-domain data.
%0 Generic
%1 citeulike:14645797
%A xxx,
%D 2018
%K domain\_adapt gan style\_transfer
%T A Unified Feature Disentangler for Multi-Domain Image Translation and Manipulation
%U http://arxiv.org/abs/1809.01361
%X We present a novel and unified deep learning framework which is capable of
learning domain-invariant representation from data across multiple domains.
Realized by adversarial training with additional ability to exploit
domain-specific information, the proposed network is able to perform continuous
cross-domain image translation and manipulation, and produces desirable output
images accordingly. In addition, the resulting feature representation exhibits
superior performance of unsupervised domain adaptation, which also verifies the
effectiveness of the proposed model in learning disentangled features for
describing cross-domain data.
@misc{citeulike:14645797,
abstract = {{We present a novel and unified deep learning framework which is capable of
learning domain-invariant representation from data across multiple domains.
Realized by adversarial training with additional ability to exploit
domain-specific information, the proposed network is able to perform continuous
cross-domain image translation and manipulation, and produces desirable output
images accordingly. In addition, the resulting feature representation exhibits
superior performance of unsupervised domain adaptation, which also verifies the
effectiveness of the proposed model in learning disentangled features for
describing cross-domain data.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/251f90b6ec870601650115ab4f65690ca/nmatsuk},
citeulike-article-id = {14645797},
citeulike-linkout-0 = {http://arxiv.org/abs/1809.01361},
citeulike-linkout-1 = {http://arxiv.org/pdf/1809.01361},
day = 8,
eprint = {1809.01361},
interhash = {23a61185168285e76a55614b9a716774},
intrahash = {51f90b6ec870601650115ab4f65690ca},
keywords = {domain\_adapt gan style\_transfer},
month = sep,
posted-at = {2018-10-13 15:23:38},
priority = {4},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{A Unified Feature Disentangler for Multi-Domain Image Translation and Manipulation}},
url = {http://arxiv.org/abs/1809.01361},
year = 2018
}