Recently, Mahloujifar and Mahmoody (TCC'17) studied attacks against learning
algorithms using a special case of Valiant's malicious noise, called
$p$-tampering, in which the adversary gets to change any training example with
independent probability $p$ but is limited to only choose malicious examples
with correct labels. They obtained $p$-tampering attacks that increase the
error probability in the so called targeted poisoning model in which the
adversary's goal is to increase the loss of the trained hypothesis over a
particular test example. At the heart of their attack was an efficient
algorithm to bias the expected value of any bounded real-output function
through $p$-tampering.
In this work, we present new biasing attacks for increasing the expected
value of bounded real-valued functions. Our improved biasing attacks, directly
imply improved $p$-tampering attacks against learners in the targeted poisoning
model. As a bonus, our attacks come with considerably simpler analysis. We also
study the possibility of PAC learning under $p$-tampering attacks in the
non-targeted (aka indiscriminate) setting where the adversary's goal is to
increase the risk of the generated hypothesis (for a random test example). We
show that PAC learning is possible under $p$-tampering poisoning attacks
essentially whenever it is possible in the realizable setting without the
attacks. We further show that PAC learning under "correct-label" adversarial
noise is not possible in general, if the adversary could choose the (still
limited to only $p$ fraction of) tampered examples that she substitutes with
adversarially chosen ones. Our formal model for such "bounded-budget" tampering
attackers is inspired by the notions of (strong) adaptive corruption in secure
multi-party computation.
%0 Generic
%1 mahloujifar2017learning
%A Mahloujifar, Saeed
%A Diochnos, Dimitrios I.
%A Mahmoody, Mohammad
%D 2017
%K adversarial learning noise readings robustness
%T Learning under $p$-Tampering Attacks
%U http://arxiv.org/abs/1711.03707
%X Recently, Mahloujifar and Mahmoody (TCC'17) studied attacks against learning
algorithms using a special case of Valiant's malicious noise, called
$p$-tampering, in which the adversary gets to change any training example with
independent probability $p$ but is limited to only choose malicious examples
with correct labels. They obtained $p$-tampering attacks that increase the
error probability in the so called targeted poisoning model in which the
adversary's goal is to increase the loss of the trained hypothesis over a
particular test example. At the heart of their attack was an efficient
algorithm to bias the expected value of any bounded real-output function
through $p$-tampering.
In this work, we present new biasing attacks for increasing the expected
value of bounded real-valued functions. Our improved biasing attacks, directly
imply improved $p$-tampering attacks against learners in the targeted poisoning
model. As a bonus, our attacks come with considerably simpler analysis. We also
study the possibility of PAC learning under $p$-tampering attacks in the
non-targeted (aka indiscriminate) setting where the adversary's goal is to
increase the risk of the generated hypothesis (for a random test example). We
show that PAC learning is possible under $p$-tampering poisoning attacks
essentially whenever it is possible in the realizable setting without the
attacks. We further show that PAC learning under "correct-label" adversarial
noise is not possible in general, if the adversary could choose the (still
limited to only $p$ fraction of) tampered examples that she substitutes with
adversarially chosen ones. Our formal model for such "bounded-budget" tampering
attackers is inspired by the notions of (strong) adaptive corruption in secure
multi-party computation.
@misc{mahloujifar2017learning,
abstract = {Recently, Mahloujifar and Mahmoody (TCC'17) studied attacks against learning
algorithms using a special case of Valiant's malicious noise, called
$p$-tampering, in which the adversary gets to change any training example with
independent probability $p$ but is limited to only choose malicious examples
with correct labels. They obtained $p$-tampering attacks that increase the
error probability in the so called targeted poisoning model in which the
adversary's goal is to increase the loss of the trained hypothesis over a
particular test example. At the heart of their attack was an efficient
algorithm to bias the expected value of any bounded real-output function
through $p$-tampering.
In this work, we present new biasing attacks for increasing the expected
value of bounded real-valued functions. Our improved biasing attacks, directly
imply improved $p$-tampering attacks against learners in the targeted poisoning
model. As a bonus, our attacks come with considerably simpler analysis. We also
study the possibility of PAC learning under $p$-tampering attacks in the
non-targeted (aka indiscriminate) setting where the adversary's goal is to
increase the risk of the generated hypothesis (for a random test example). We
show that PAC learning is possible under $p$-tampering poisoning attacks
essentially whenever it is possible in the realizable setting without the
attacks. We further show that PAC learning under "correct-label" adversarial
noise is not possible in general, if the adversary could choose the (still
limited to only $p$ fraction of) tampered examples that she substitutes with
adversarially chosen ones. Our formal model for such "bounded-budget" tampering
attackers is inspired by the notions of (strong) adaptive corruption in secure
multi-party computation.},
added-at = {2020-07-10T14:32:58.000+0200},
author = {Mahloujifar, Saeed and Diochnos, Dimitrios I. and Mahmoody, Mohammad},
biburl = {https://www.bibsonomy.org/bibtex/281e3ad35e5bc80a3d6a1868b02a7a96e/kirk86},
description = {[1711.03707] Learning under $p$-Tampering Attacks},
interhash = {7ceab8e91bd01642da18ec25d089cbbc},
intrahash = {81e3ad35e5bc80a3d6a1868b02a7a96e},
keywords = {adversarial learning noise readings robustness},
note = {cite arxiv:1711.03707},
timestamp = {2020-07-10T14:32:58.000+0200},
title = {Learning under $p$-Tampering Attacks},
url = {http://arxiv.org/abs/1711.03707},
year = 2017
}