Adversarial training augments the training set with perturbations to improve
the robust error (over worst-case perturbations), but it often leads to an
increase in the standard error (on unperturbed test inputs). Previous
explanations for this tradeoff rely on the assumption that no predictor in the
hypothesis class has low standard and robust error. In this work, we precisely
characterize the effect of augmentation on the standard error in linear
regression when the optimal linear predictor has zero standard and robust
error. In particular, we show that the standard error could increase even when
the augmented perturbations have noiseless observations from the optimal linear
predictor. We then prove that the recently proposed robust self-training (RST)
estimator improves robust error without sacrificing standard error for
noiseless linear regression. Empirically, for neural networks, we find that RST
with different adversarial training methods improves both standard and robust
error for random and adversarial rotations and adversarial $\ell_ınfty$
perturbations in CIFAR-10.
Description
[2002.10716] Understanding and Mitigating the Tradeoff Between Robustness and Accuracy
%0 Journal Article
%1 raghunathan2020understanding
%A Raghunathan, Aditi
%A Xie, Sang Michael
%A Yang, Fanny
%A Duchi, John
%A Liang, Percy
%D 2020
%K readings robustness
%T Understanding and Mitigating the Tradeoff Between Robustness and
Accuracy
%U http://arxiv.org/abs/2002.10716
%X Adversarial training augments the training set with perturbations to improve
the robust error (over worst-case perturbations), but it often leads to an
increase in the standard error (on unperturbed test inputs). Previous
explanations for this tradeoff rely on the assumption that no predictor in the
hypothesis class has low standard and robust error. In this work, we precisely
characterize the effect of augmentation on the standard error in linear
regression when the optimal linear predictor has zero standard and robust
error. In particular, we show that the standard error could increase even when
the augmented perturbations have noiseless observations from the optimal linear
predictor. We then prove that the recently proposed robust self-training (RST)
estimator improves robust error without sacrificing standard error for
noiseless linear regression. Empirically, for neural networks, we find that RST
with different adversarial training methods improves both standard and robust
error for random and adversarial rotations and adversarial $\ell_ınfty$
perturbations in CIFAR-10.
@article{raghunathan2020understanding,
abstract = {Adversarial training augments the training set with perturbations to improve
the robust error (over worst-case perturbations), but it often leads to an
increase in the standard error (on unperturbed test inputs). Previous
explanations for this tradeoff rely on the assumption that no predictor in the
hypothesis class has low standard and robust error. In this work, we precisely
characterize the effect of augmentation on the standard error in linear
regression when the optimal linear predictor has zero standard and robust
error. In particular, we show that the standard error could increase even when
the augmented perturbations have noiseless observations from the optimal linear
predictor. We then prove that the recently proposed robust self-training (RST)
estimator improves robust error without sacrificing standard error for
noiseless linear regression. Empirically, for neural networks, we find that RST
with different adversarial training methods improves both standard and robust
error for random and adversarial rotations and adversarial $\ell_\infty$
perturbations in CIFAR-10.},
added-at = {2020-02-28T01:48:47.000+0100},
author = {Raghunathan, Aditi and Xie, Sang Michael and Yang, Fanny and Duchi, John and Liang, Percy},
biburl = {https://www.bibsonomy.org/bibtex/29a350df31e01ff63dbd4358ea920db52/kirk86},
description = {[2002.10716] Understanding and Mitigating the Tradeoff Between Robustness and Accuracy},
interhash = {7ef842b0537a7e654f193f5487605f1a},
intrahash = {9a350df31e01ff63dbd4358ea920db52},
keywords = {readings robustness},
note = {cite arxiv:2002.10716},
timestamp = {2020-02-28T01:48:47.000+0100},
title = {Understanding and Mitigating the Tradeoff Between Robustness and
Accuracy},
url = {http://arxiv.org/abs/2002.10716},
year = 2020
}