In this paper, we propose a method to learn a joint multimodal embedding
space. We compare the effect of various constraints using paired text and video
data. Additionally, we propose a method to improve the joint embedding space
using an adversarial formulation with unpaired text and video data. In addition
to testing on publicly available datasets, we introduce a new, large-scale
text/video dataset. We experimentally confirm that learning such a shared
embedding space benefits three difficult tasks (i) zero-shot activity
classification, (ii) unsupervised activity discovery, and (iii) unseen activity
captioning.
%0 Generic
%1 citeulike:14617222
%A xxx,
%D 2018
%K loss metric semisup zero\_shot
%T Learning Shared Multimodal Embeddings with Unpaired Data
%U http://arxiv.org/abs/1806.08251
%X In this paper, we propose a method to learn a joint multimodal embedding
space. We compare the effect of various constraints using paired text and video
data. Additionally, we propose a method to improve the joint embedding space
using an adversarial formulation with unpaired text and video data. In addition
to testing on publicly available datasets, we introduce a new, large-scale
text/video dataset. We experimentally confirm that learning such a shared
embedding space benefits three difficult tasks (i) zero-shot activity
classification, (ii) unsupervised activity discovery, and (iii) unseen activity
captioning.
@misc{citeulike:14617222,
abstract = {{In this paper, we propose a method to learn a joint multimodal embedding
space. We compare the effect of various constraints using paired text and video
data. Additionally, we propose a method to improve the joint embedding space
using an adversarial formulation with unpaired text and video data. In addition
to testing on publicly available datasets, we introduce a new, large-scale
text/video dataset. We experimentally confirm that learning such a shared
embedding space benefits three difficult tasks (i) zero-shot activity
classification, (ii) unsupervised activity discovery, and (iii) unseen activity
captioning.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2ecf6aa46eede6995c2c7afc1802488fb/nmatsuk},
citeulike-article-id = {14617222},
citeulike-linkout-0 = {http://arxiv.org/abs/1806.08251},
citeulike-linkout-1 = {http://arxiv.org/pdf/1806.08251},
day = 21,
eprint = {1806.08251},
interhash = {c431e5bb765b4e43a5a4be6e2c77949a},
intrahash = {ecf6aa46eede6995c2c7afc1802488fb},
keywords = {loss metric semisup zero\_shot},
month = jun,
posted-at = {2018-07-23 08:57:08},
priority = {4},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Learning Shared Multimodal Embeddings with Unpaired Data}},
url = {http://arxiv.org/abs/1806.08251},
year = 2018
}