In our recent work (Bubeck, Price, Razenshteyn, arXiv:1805.10204) we argued
that adversarial examples in machine learning might be due to an inherent
computational hardness of the problem. More precisely, we constructed a binary
classification task for which (i) a robust classifier exists; yet no
non-trivial accuracy can be obtained with an efficient algorithm in (ii) the
statistical query model. In the present paper we significantly strengthen both
(i) and (ii): we now construct a task which admits (i') a maximally robust
classifier (that is it can tolerate perturbations of size comparable to the
size of the examples themselves); and moreover we prove computational hardness
of learning this task under (ii') a standard cryptographic assumption.
Description
[1811.06418] Adversarial Examples from Cryptographic Pseudo-Random Generators
%0 Journal Article
%1 bubeck2018adversarial
%A Bubeck, Sébastien
%A Lee, Yin Tat
%A Price, Eric
%A Razenshteyn, Ilya
%D 2018
%K adversarial computation cryptographic
%T Adversarial Examples from Cryptographic Pseudo-Random Generators
%U http://arxiv.org/abs/1811.06418
%X In our recent work (Bubeck, Price, Razenshteyn, arXiv:1805.10204) we argued
that adversarial examples in machine learning might be due to an inherent
computational hardness of the problem. More precisely, we constructed a binary
classification task for which (i) a robust classifier exists; yet no
non-trivial accuracy can be obtained with an efficient algorithm in (ii) the
statistical query model. In the present paper we significantly strengthen both
(i) and (ii): we now construct a task which admits (i') a maximally robust
classifier (that is it can tolerate perturbations of size comparable to the
size of the examples themselves); and moreover we prove computational hardness
of learning this task under (ii') a standard cryptographic assumption.
@article{bubeck2018adversarial,
abstract = {In our recent work (Bubeck, Price, Razenshteyn, arXiv:1805.10204) we argued
that adversarial examples in machine learning might be due to an inherent
computational hardness of the problem. More precisely, we constructed a binary
classification task for which (i) a robust classifier exists; yet no
non-trivial accuracy can be obtained with an efficient algorithm in (ii) the
statistical query model. In the present paper we significantly strengthen both
(i) and (ii): we now construct a task which admits (i') a maximally robust
classifier (that is it can tolerate perturbations of size comparable to the
size of the examples themselves); and moreover we prove computational hardness
of learning this task under (ii') a standard cryptographic assumption.},
added-at = {2019-02-27T23:02:09.000+0100},
author = {Bubeck, Sébastien and Lee, Yin Tat and Price, Eric and Razenshteyn, Ilya},
biburl = {https://www.bibsonomy.org/bibtex/2500508d8878f96755283984a58ffc8f0/kirk86},
description = {[1811.06418] Adversarial Examples from Cryptographic Pseudo-Random Generators},
interhash = {73750e8c1ebe79966f4191a259a506b5},
intrahash = {500508d8878f96755283984a58ffc8f0},
keywords = {adversarial computation cryptographic},
note = {cite arxiv:1811.06418Comment: 4 pages, no figures},
timestamp = {2019-02-27T23:02:09.000+0100},
title = {Adversarial Examples from Cryptographic Pseudo-Random Generators},
url = {http://arxiv.org/abs/1811.06418},
year = 2018
}