This paper introduces a visual sentiment concept classification method based
on deep convolutional neural networks (CNNs). The visual sentiment concepts are
adjective noun pairs (ANPs) automatically discovered from the tags of web
photos, and can be utilized as effective statistical cues for detecting
emotions depicted in the images. Nearly one million Flickr images tagged with
these ANPs are downloaded to train the classifiers of the concepts. We adopt
the popular model of deep convolutional neural networks which recently shows
great performance improvement on classifying large-scale web-based image
dataset such as ImageNet. Our deep CNNs model is trained based on Caffe, a
newly developed deep learning framework. To deal with the biased training data
which only contains images with strong sentiment and to prevent overfitting, we
initialize the model with the model weights trained from ImageNet. Performance
evaluation shows the newly trained deep CNNs model SentiBank 2.0 (or called
DeepSentiBank) is significantly improved in both annotation accuracy and
retrieval performance, compared to its predecessors which mainly use binary SVM
classification models.
Description
DeepSentiBank: Visual Sentiment Concept Classification with Deep
Convolutional Neural Networks
%0 Generic
%1 chen2014deepsentibank
%A Chen, Tao
%A Borth, Damian
%A Darrell, Trevor
%A Chang, Shih-Fu
%D 2014
%K classification deep learning nn sentiement toread
%T DeepSentiBank: Visual Sentiment Concept Classification with Deep
Convolutional Neural Networks
%U http://arxiv.org/abs/1410.8586
%X This paper introduces a visual sentiment concept classification method based
on deep convolutional neural networks (CNNs). The visual sentiment concepts are
adjective noun pairs (ANPs) automatically discovered from the tags of web
photos, and can be utilized as effective statistical cues for detecting
emotions depicted in the images. Nearly one million Flickr images tagged with
these ANPs are downloaded to train the classifiers of the concepts. We adopt
the popular model of deep convolutional neural networks which recently shows
great performance improvement on classifying large-scale web-based image
dataset such as ImageNet. Our deep CNNs model is trained based on Caffe, a
newly developed deep learning framework. To deal with the biased training data
which only contains images with strong sentiment and to prevent overfitting, we
initialize the model with the model weights trained from ImageNet. Performance
evaluation shows the newly trained deep CNNs model SentiBank 2.0 (or called
DeepSentiBank) is significantly improved in both annotation accuracy and
retrieval performance, compared to its predecessors which mainly use binary SVM
classification models.
@misc{chen2014deepsentibank,
abstract = {This paper introduces a visual sentiment concept classification method based
on deep convolutional neural networks (CNNs). The visual sentiment concepts are
adjective noun pairs (ANPs) automatically discovered from the tags of web
photos, and can be utilized as effective statistical cues for detecting
emotions depicted in the images. Nearly one million Flickr images tagged with
these ANPs are downloaded to train the classifiers of the concepts. We adopt
the popular model of deep convolutional neural networks which recently shows
great performance improvement on classifying large-scale web-based image
dataset such as ImageNet. Our deep CNNs model is trained based on Caffe, a
newly developed deep learning framework. To deal with the biased training data
which only contains images with strong sentiment and to prevent overfitting, we
initialize the model with the model weights trained from ImageNet. Performance
evaluation shows the newly trained deep CNNs model SentiBank 2.0 (or called
DeepSentiBank) is significantly improved in both annotation accuracy and
retrieval performance, compared to its predecessors which mainly use binary SVM
classification models.},
added-at = {2016-07-13T17:13:13.000+0200},
author = {Chen, Tao and Borth, Damian and Darrell, Trevor and Chang, Shih-Fu},
biburl = {https://www.bibsonomy.org/bibtex/2fdf8b6a336939178f6a7495993653718/hotho},
description = {DeepSentiBank: Visual Sentiment Concept Classification with Deep
Convolutional Neural Networks},
interhash = {f589429f82ac362c00d1dbf677d1ff26},
intrahash = {fdf8b6a336939178f6a7495993653718},
keywords = {classification deep learning nn sentiement toread},
note = {cite arxiv:1410.8586Comment: 7 pages, 4 figures},
timestamp = {2016-07-13T17:13:13.000+0200},
title = {DeepSentiBank: Visual Sentiment Concept Classification with Deep
Convolutional Neural Networks},
url = {http://arxiv.org/abs/1410.8586},
year = 2014
}