Deep Neural Networks (DNNs) outshine alternative function approximators in
many settings thanks to their modularity in composing any desired
differentiable operator. The formed parametrized functional is then tuned to
solve a task at hand from simple gradient descent. This modularity comes at the
cost of making strict enforcement of constraints on DNNs, e.g. from a priori
knowledge of the task, or from desired physical properties, an open challenge.
In this paper we propose the first provable affine constraint enforcement
method for DNNs that requires minimal changes into a given DNN's forward-pass,
that is computationally friendly, and that leaves the optimization of the DNN's
parameter to be unconstrained i.e. standard gradient-based method can be
employed. Our method does not require any sampling and provably ensures that
the DNN fulfills the affine constraint on a given input space's region at any
point during training, and testing. We coin this method POLICE, standing for
Provably Optimal LInear Constraint Enforcement.
Description
POLICE: Provably Optimal Linear Constraint Enforcement for Deep Neural Networks
%0 Generic
%1 balestriero2022police
%A Balestriero, Randall
%A LeCun, Yann
%D 2022
%K cognition
%T POLICE: Provably Optimal Linear Constraint Enforcement for Deep Neural
Networks
%U http://arxiv.org/abs/2211.01340
%X Deep Neural Networks (DNNs) outshine alternative function approximators in
many settings thanks to their modularity in composing any desired
differentiable operator. The formed parametrized functional is then tuned to
solve a task at hand from simple gradient descent. This modularity comes at the
cost of making strict enforcement of constraints on DNNs, e.g. from a priori
knowledge of the task, or from desired physical properties, an open challenge.
In this paper we propose the first provable affine constraint enforcement
method for DNNs that requires minimal changes into a given DNN's forward-pass,
that is computationally friendly, and that leaves the optimization of the DNN's
parameter to be unconstrained i.e. standard gradient-based method can be
employed. Our method does not require any sampling and provably ensures that
the DNN fulfills the affine constraint on a given input space's region at any
point during training, and testing. We coin this method POLICE, standing for
Provably Optimal LInear Constraint Enforcement.
@misc{balestriero2022police,
abstract = {Deep Neural Networks (DNNs) outshine alternative function approximators in
many settings thanks to their modularity in composing any desired
differentiable operator. The formed parametrized functional is then tuned to
solve a task at hand from simple gradient descent. This modularity comes at the
cost of making strict enforcement of constraints on DNNs, e.g. from a priori
knowledge of the task, or from desired physical properties, an open challenge.
In this paper we propose the first provable affine constraint enforcement
method for DNNs that requires minimal changes into a given DNN's forward-pass,
that is computationally friendly, and that leaves the optimization of the DNN's
parameter to be unconstrained i.e. standard gradient-based method can be
employed. Our method does not require any sampling and provably ensures that
the DNN fulfills the affine constraint on a given input space's region at any
point during training, and testing. We coin this method POLICE, standing for
Provably Optimal LInear Constraint Enforcement.},
added-at = {2022-11-08T18:40:40.000+0100},
author = {Balestriero, Randall and LeCun, Yann},
biburl = {https://www.bibsonomy.org/bibtex/24380466b48937f2530b697b3c0741b7e/szhang104},
description = {POLICE: Provably Optimal Linear Constraint Enforcement for Deep Neural Networks},
interhash = {5914a57257e98e56ec95b6535ac092f7},
intrahash = {4380466b48937f2530b697b3c0741b7e},
keywords = {cognition},
note = {cite arxiv:2211.01340},
timestamp = {2022-11-08T18:40:40.000+0100},
title = {POLICE: Provably Optimal Linear Constraint Enforcement for Deep Neural
Networks},
url = {http://arxiv.org/abs/2211.01340},
year = 2022
}