Many recent works have shown that adversarial examples that fool classifiers
can be found by minimally perturbing a normal input. Recent theoretical
results, starting with Gilmer et al. (2018b), show that if the inputs are drawn
from a concentrated metric probability space, then adversarial examples with
small perturbation are inevitable. A concentrated space has the property that
any subset with $Ømega(1)$ (e.g., 1/100) measure, according to the imposed
distribution, has small distance to almost all (e.g., 99/100) of the points in
the space. It is not clear, however, whether these theoretical results apply to
actual distributions such as images. This paper presents a method for
empirically measuring and bounding the concentration of a concrete dataset
which is proven to converge to the actual concentration. We use it to
empirically estimate the intrinsic robustness to $\ell_ınfty$ and $\ell_2$
perturbations of several image classification benchmarks. Code for our
experiments is available at
https://github.com/xiaozhanguva/Measure-Concentration.
Description
[1905.12202] Empirically Measuring Concentration: Fundamental Limits on Intrinsic Robustness
%0 Journal Article
%1 mahloujifar2019empirically
%A Mahloujifar, Saeed
%A Zhang, Xiao
%A Mahmoody, Mohammad
%A Evans, David
%D 2019
%K learning readings robustness
%T Empirically Measuring Concentration: Fundamental Limits on Intrinsic
Robustness
%U http://arxiv.org/abs/1905.12202
%X Many recent works have shown that adversarial examples that fool classifiers
can be found by minimally perturbing a normal input. Recent theoretical
results, starting with Gilmer et al. (2018b), show that if the inputs are drawn
from a concentrated metric probability space, then adversarial examples with
small perturbation are inevitable. A concentrated space has the property that
any subset with $Ømega(1)$ (e.g., 1/100) measure, according to the imposed
distribution, has small distance to almost all (e.g., 99/100) of the points in
the space. It is not clear, however, whether these theoretical results apply to
actual distributions such as images. This paper presents a method for
empirically measuring and bounding the concentration of a concrete dataset
which is proven to converge to the actual concentration. We use it to
empirically estimate the intrinsic robustness to $\ell_ınfty$ and $\ell_2$
perturbations of several image classification benchmarks. Code for our
experiments is available at
https://github.com/xiaozhanguva/Measure-Concentration.
@article{mahloujifar2019empirically,
abstract = {Many recent works have shown that adversarial examples that fool classifiers
can be found by minimally perturbing a normal input. Recent theoretical
results, starting with Gilmer et al. (2018b), show that if the inputs are drawn
from a concentrated metric probability space, then adversarial examples with
small perturbation are inevitable. A concentrated space has the property that
any subset with $\Omega(1)$ (e.g., 1/100) measure, according to the imposed
distribution, has small distance to almost all (e.g., 99/100) of the points in
the space. It is not clear, however, whether these theoretical results apply to
actual distributions such as images. This paper presents a method for
empirically measuring and bounding the concentration of a concrete dataset
which is proven to converge to the actual concentration. We use it to
empirically estimate the intrinsic robustness to $\ell_\infty$ and $\ell_2$
perturbations of several image classification benchmarks. Code for our
experiments is available at
https://github.com/xiaozhanguva/Measure-Concentration.},
added-at = {2020-07-10T13:02:36.000+0200},
author = {Mahloujifar, Saeed and Zhang, Xiao and Mahmoody, Mohammad and Evans, David},
biburl = {https://www.bibsonomy.org/bibtex/2e98b89249019ea63810b63c29f70a98b/kirk86},
description = {[1905.12202] Empirically Measuring Concentration: Fundamental Limits on Intrinsic Robustness},
interhash = {29e27bf4a716dffdcba97b62e04fc6f3},
intrahash = {e98b89249019ea63810b63c29f70a98b},
keywords = {learning readings robustness},
note = {cite arxiv:1905.12202Comment: 17 pages, 3 figures, 5 tables; NeurIPS final version},
timestamp = {2020-07-10T13:02:36.000+0200},
title = {Empirically Measuring Concentration: Fundamental Limits on Intrinsic
Robustness},
url = {http://arxiv.org/abs/1905.12202},
year = 2019
}