M. Mirman, T. Gehr, and M. Vechev. (2020)cite arxiv:2004.14756Comment: Prior version submitted to ICLR 2020.
Abstract
Generative neural networks can be used to specify continuous transformations
between images via latent-space interpolation. However, certifying that all
images captured by the resulting path in the image manifold satisfy a given
property can be very challenging. This is because this set is highly
non-convex, thwarting existing scalable robustness analysis methods, which are
often based on convex relaxations. We present ApproxLine, a scalable
certification method that successfully verifies non-trivial specifications
involving generative models and classifiers. ApproxLine can provide both sound
deterministic and probabilistic guarantees, by capturing either infinite
non-convex sets of neural network activation vectors or distributions over such
sets. We show that ApproxLine is practically useful and can verify interesting
interpolations in the networks latent space.
Description
[2004.14756] Robustness Certification of Generative Models
%0 Generic
%1 mirman2020robustness
%A Mirman, Matthew
%A Gehr, Timon
%A Vechev, Martin
%D 2020
%K generative-models iclr2020 robustness
%T Robustness Certification of Generative Models
%U http://arxiv.org/abs/2004.14756
%X Generative neural networks can be used to specify continuous transformations
between images via latent-space interpolation. However, certifying that all
images captured by the resulting path in the image manifold satisfy a given
property can be very challenging. This is because this set is highly
non-convex, thwarting existing scalable robustness analysis methods, which are
often based on convex relaxations. We present ApproxLine, a scalable
certification method that successfully verifies non-trivial specifications
involving generative models and classifiers. ApproxLine can provide both sound
deterministic and probabilistic guarantees, by capturing either infinite
non-convex sets of neural network activation vectors or distributions over such
sets. We show that ApproxLine is practically useful and can verify interesting
interpolations in the networks latent space.
@misc{mirman2020robustness,
abstract = {Generative neural networks can be used to specify continuous transformations
between images via latent-space interpolation. However, certifying that all
images captured by the resulting path in the image manifold satisfy a given
property can be very challenging. This is because this set is highly
non-convex, thwarting existing scalable robustness analysis methods, which are
often based on convex relaxations. We present ApproxLine, a scalable
certification method that successfully verifies non-trivial specifications
involving generative models and classifiers. ApproxLine can provide both sound
deterministic and probabilistic guarantees, by capturing either infinite
non-convex sets of neural network activation vectors or distributions over such
sets. We show that ApproxLine is practically useful and can verify interesting
interpolations in the networks latent space.},
added-at = {2020-05-03T04:51:41.000+0200},
author = {Mirman, Matthew and Gehr, Timon and Vechev, Martin},
biburl = {https://www.bibsonomy.org/bibtex/29aeb4cac70c6699e19900568ba0587c5/kirk86},
description = {[2004.14756] Robustness Certification of Generative Models},
interhash = {3e53f1b96b6eab57c5392f4d9ca18d74},
intrahash = {9aeb4cac70c6699e19900568ba0587c5},
keywords = {generative-models iclr2020 robustness},
note = {cite arxiv:2004.14756Comment: Prior version submitted to ICLR 2020},
timestamp = {2020-05-03T04:51:41.000+0200},
title = {Robustness Certification of Generative Models},
url = {http://arxiv.org/abs/2004.14756},
year = 2020
}