Verification of neural networks enables us to gauge their robustness against
adversarial attacks. Verification algorithms fall into two categories: exact
verifiers that run in exponential time and relaxed verifiers that are efficient
but incomplete. In this paper, we unify all existing LP-relaxed verifiers, to
the best of our knowledge, under a general convex relaxation framework. This
framework works for neural networks with diverse architectures and
nonlinearities and covers both primal and dual views of robustness
verification. We further prove strong duality between the primal and dual
problems under very mild conditions. Next, we perform large-scale experiments,
amounting to more than 22 CPU-years, to obtain exact solution to the
convex-relaxed problem that is optimal within our framework for ReLU networks.
We find the exact solution does not significantly improve upon the gap between
PGD and existing relaxed verifiers for various networks trained normally or
robustly on MNIST and CIFAR datasets. Our results suggest there is an inherent
barrier to tight verification for the large class of methods captured by our
framework. We discuss possible causes of this barrier and potential future
directions for bypassing it.
Description
[1902.08722] A Convex Relaxation Barrier to Tight Robustness Verification of Neural Networks
%0 Journal Article
%1 salman2019convex
%A Salman, Hadi
%A Yang, Greg
%A Zhang, Huan
%A Hsieh, Cho-Jui
%A Zhang, Pengchuan
%D 2019
%K bounds convex relaxation
%T A Convex Relaxation Barrier to Tight Robustness Verification of Neural
Networks
%U http://arxiv.org/abs/1902.08722
%X Verification of neural networks enables us to gauge their robustness against
adversarial attacks. Verification algorithms fall into two categories: exact
verifiers that run in exponential time and relaxed verifiers that are efficient
but incomplete. In this paper, we unify all existing LP-relaxed verifiers, to
the best of our knowledge, under a general convex relaxation framework. This
framework works for neural networks with diverse architectures and
nonlinearities and covers both primal and dual views of robustness
verification. We further prove strong duality between the primal and dual
problems under very mild conditions. Next, we perform large-scale experiments,
amounting to more than 22 CPU-years, to obtain exact solution to the
convex-relaxed problem that is optimal within our framework for ReLU networks.
We find the exact solution does not significantly improve upon the gap between
PGD and existing relaxed verifiers for various networks trained normally or
robustly on MNIST and CIFAR datasets. Our results suggest there is an inherent
barrier to tight verification for the large class of methods captured by our
framework. We discuss possible causes of this barrier and potential future
directions for bypassing it.
@article{salman2019convex,
abstract = {Verification of neural networks enables us to gauge their robustness against
adversarial attacks. Verification algorithms fall into two categories: exact
verifiers that run in exponential time and relaxed verifiers that are efficient
but incomplete. In this paper, we unify all existing LP-relaxed verifiers, to
the best of our knowledge, under a general convex relaxation framework. This
framework works for neural networks with diverse architectures and
nonlinearities and covers both primal and dual views of robustness
verification. We further prove strong duality between the primal and dual
problems under very mild conditions. Next, we perform large-scale experiments,
amounting to more than 22 CPU-years, to obtain exact solution to the
convex-relaxed problem that is optimal within our framework for ReLU networks.
We find the exact solution does not significantly improve upon the gap between
PGD and existing relaxed verifiers for various networks trained normally or
robustly on MNIST and CIFAR datasets. Our results suggest there is an inherent
barrier to tight verification for the large class of methods captured by our
framework. We discuss possible causes of this barrier and potential future
directions for bypassing it.},
added-at = {2019-02-27T22:58:34.000+0100},
author = {Salman, Hadi and Yang, Greg and Zhang, Huan and Hsieh, Cho-Jui and Zhang, Pengchuan},
biburl = {https://www.bibsonomy.org/bibtex/266a83a8517da961f57a097ac7549db42/kirk86},
description = {[1902.08722] A Convex Relaxation Barrier to Tight Robustness Verification of Neural Networks},
interhash = {521723c865944466107897f370fa63b4},
intrahash = {66a83a8517da961f57a097ac7549db42},
keywords = {bounds convex relaxation},
note = {cite arxiv:1902.08722},
timestamp = {2019-02-27T22:58:34.000+0100},
title = {A Convex Relaxation Barrier to Tight Robustness Verification of Neural
Networks},
url = {http://arxiv.org/abs/1902.08722},
year = 2019
}