We present a method to create universal, robust, targeted adversarial image
patches in the real world. The patches are universal because they can be used
to attack any scene, robust because they work under a wide variety of
transformations, and targeted because they can cause a classifier to output any
target class. These adversarial patches can be printed, added to any scene,
photographed, and presented to image classifiers; even when the patches are
small, they cause the classifiers to ignore the other items in the scene and
report a chosen target class.
To reproduce the results from the paper, our code is available at
https://github.com/tensorflow/cleverhans/tree/master/examples/adversarial_patch
%0 Journal Article
%1 brown2017adversarial
%A Brown, Tom B.
%A Mané, Dandelion
%A Roy, Aurko
%A Abadi, Martín
%A Gilmer, Justin
%D 2017
%K adversarial
%T Adversarial Patch
%U http://arxiv.org/abs/1712.09665
%X We present a method to create universal, robust, targeted adversarial image
patches in the real world. The patches are universal because they can be used
to attack any scene, robust because they work under a wide variety of
transformations, and targeted because they can cause a classifier to output any
target class. These adversarial patches can be printed, added to any scene,
photographed, and presented to image classifiers; even when the patches are
small, they cause the classifiers to ignore the other items in the scene and
report a chosen target class.
To reproduce the results from the paper, our code is available at
https://github.com/tensorflow/cleverhans/tree/master/examples/adversarial_patch
@article{brown2017adversarial,
abstract = {We present a method to create universal, robust, targeted adversarial image
patches in the real world. The patches are universal because they can be used
to attack any scene, robust because they work under a wide variety of
transformations, and targeted because they can cause a classifier to output any
target class. These adversarial patches can be printed, added to any scene,
photographed, and presented to image classifiers; even when the patches are
small, they cause the classifiers to ignore the other items in the scene and
report a chosen target class.
To reproduce the results from the paper, our code is available at
https://github.com/tensorflow/cleverhans/tree/master/examples/adversarial_patch},
added-at = {2019-03-21T20:39:36.000+0100},
author = {Brown, Tom B. and Mané, Dandelion and Roy, Aurko and Abadi, Martín and Gilmer, Justin},
biburl = {https://www.bibsonomy.org/bibtex/2bb347faf16489bb367dcf681013c89d7/kirk86},
description = {[1712.09665] Adversarial Patch},
interhash = {50642b26ff209edcf2816eb890a2e314},
intrahash = {bb347faf16489bb367dcf681013c89d7},
keywords = {adversarial},
note = {cite arxiv:1712.09665},
timestamp = {2019-03-21T20:39:36.000+0100},
title = {Adversarial Patch},
url = {http://arxiv.org/abs/1712.09665},
year = 2017
}