We introduce SalGAN, a deep convolutional neural network for visual saliency
prediction trained with adversarial examples. The first stage of the network
consists of a generator model whose weights are learned by back-propagation
computed from a binary cross entropy (BCE) loss over downsampled versions of
the saliency maps. The resulting prediction is processed by a discriminator
network trained to solve a binary classification task between the saliency maps
generated by the generative stage and the ground truth ones. Our experiments
show how adversarial training allows reaching state-of-the-art performance
across different metrics when combined with a widely-used loss function like
BCE.
Description
SalGAN: Visual Saliency Prediction with Generative Adversarial Networks
cite arxiv:1701.01081Comment: Our results can be reproduced with the source code and trained models available at https://imatge-upc.github.io/saliency-salgan-2017/
%0 Journal Article
%1 pan2017salgan
%A Pan, Junting
%A Ferrer, Cristian Canton
%A McGuinness, Kevin
%A O'Connor, Noel E.
%A Torres, Jordi
%A Sayrol, Elisa
%A Giro-i Nieto, Xavier
%D 2017
%K deep-learning
%T SalGAN: Visual Saliency Prediction with Generative Adversarial Networks
%U http://arxiv.org/abs/1701.01081
%X We introduce SalGAN, a deep convolutional neural network for visual saliency
prediction trained with adversarial examples. The first stage of the network
consists of a generator model whose weights are learned by back-propagation
computed from a binary cross entropy (BCE) loss over downsampled versions of
the saliency maps. The resulting prediction is processed by a discriminator
network trained to solve a binary classification task between the saliency maps
generated by the generative stage and the ground truth ones. Our experiments
show how adversarial training allows reaching state-of-the-art performance
across different metrics when combined with a widely-used loss function like
BCE.
@article{pan2017salgan,
abstract = {We introduce SalGAN, a deep convolutional neural network for visual saliency
prediction trained with adversarial examples. The first stage of the network
consists of a generator model whose weights are learned by back-propagation
computed from a binary cross entropy (BCE) loss over downsampled versions of
the saliency maps. The resulting prediction is processed by a discriminator
network trained to solve a binary classification task between the saliency maps
generated by the generative stage and the ground truth ones. Our experiments
show how adversarial training allows reaching state-of-the-art performance
across different metrics when combined with a widely-used loss function like
BCE.},
added-at = {2017-06-07T13:58:45.000+0200},
author = {Pan, Junting and Ferrer, Cristian Canton and McGuinness, Kevin and O'Connor, Noel E. and Torres, Jordi and Sayrol, Elisa and Giro-i Nieto, Xavier},
biburl = {https://www.bibsonomy.org/bibtex/27cd93d22a0428b01558425281f6a4e9e/axel.vogler},
description = {SalGAN: Visual Saliency Prediction with Generative Adversarial Networks},
interhash = {fb1c19b51416e121f676656cbb9a95d5},
intrahash = {7cd93d22a0428b01558425281f6a4e9e},
keywords = {deep-learning},
note = {cite arxiv:1701.01081Comment: Our results can be reproduced with the source code and trained models available at https://imatge-upc.github.io/saliency-salgan-2017/},
timestamp = {2017-06-07T13:58:45.000+0200},
title = {SalGAN: Visual Saliency Prediction with Generative Adversarial Networks},
url = {http://arxiv.org/abs/1701.01081},
year = 2017
}