Out-of-distribution (OoD) detection is a natural downstream task for deep
generative models, due to their ability to learn the input probability
distribution. There are mainly two classes of approaches for OoD detection
using deep generative models, viz., based on likelihood measure and the
reconstruction loss. However, both approaches are unable to carry out OoD
detection effectively, especially when the OoD samples have smaller variance
than the training samples. For instance, both flow based and VAE models assign
higher likelihood to images from SVHN when trained on CIFAR-10 images. We use a
recently proposed generative model known as neural rendering model (NRM) and
derive metrics for OoD. We show that NRM unifies both approaches since it
provides a likelihood estimate and also carries out reconstruction in each
layer of the neural network. Among various measures, we found the joint
likelihood of latent variables to be the most effective one for OoD detection.
Our results show that when trained on CIFAR-10, lower likelihood (of latent
variables) is assigned to SVHN images. Additionally, we show that this metric
is consistent across other OoD datasets. To the best of our knowledge, this is
the first work to show consistently lower likelihood for OoD data with smaller
variance with deep generative models.
Описание
[1907.04572v1] Out-of-Distribution Detection Using Neural Rendering Generative Models
%0 Journal Article
%1 huang2019outofdistribution
%A Huang, Yujia
%A Dai, Sihui
%A Nguyen, Tan
%A Baraniuk, Richard G.
%A Anandkumar, Anima
%D 2019
%K anomaly-detection generative-models uncertainty
%T Out-of-Distribution Detection Using Neural Rendering Generative Models
%U http://arxiv.org/abs/1907.04572
%X Out-of-distribution (OoD) detection is a natural downstream task for deep
generative models, due to their ability to learn the input probability
distribution. There are mainly two classes of approaches for OoD detection
using deep generative models, viz., based on likelihood measure and the
reconstruction loss. However, both approaches are unable to carry out OoD
detection effectively, especially when the OoD samples have smaller variance
than the training samples. For instance, both flow based and VAE models assign
higher likelihood to images from SVHN when trained on CIFAR-10 images. We use a
recently proposed generative model known as neural rendering model (NRM) and
derive metrics for OoD. We show that NRM unifies both approaches since it
provides a likelihood estimate and also carries out reconstruction in each
layer of the neural network. Among various measures, we found the joint
likelihood of latent variables to be the most effective one for OoD detection.
Our results show that when trained on CIFAR-10, lower likelihood (of latent
variables) is assigned to SVHN images. Additionally, we show that this metric
is consistent across other OoD datasets. To the best of our knowledge, this is
the first work to show consistently lower likelihood for OoD data with smaller
variance with deep generative models.
@article{huang2019outofdistribution,
abstract = {Out-of-distribution (OoD) detection is a natural downstream task for deep
generative models, due to their ability to learn the input probability
distribution. There are mainly two classes of approaches for OoD detection
using deep generative models, viz., based on likelihood measure and the
reconstruction loss. However, both approaches are unable to carry out OoD
detection effectively, especially when the OoD samples have smaller variance
than the training samples. For instance, both flow based and VAE models assign
higher likelihood to images from SVHN when trained on CIFAR-10 images. We use a
recently proposed generative model known as neural rendering model (NRM) and
derive metrics for OoD. We show that NRM unifies both approaches since it
provides a likelihood estimate and also carries out reconstruction in each
layer of the neural network. Among various measures, we found the joint
likelihood of latent variables to be the most effective one for OoD detection.
Our results show that when trained on CIFAR-10, lower likelihood (of latent
variables) is assigned to SVHN images. Additionally, we show that this metric
is consistent across other OoD datasets. To the best of our knowledge, this is
the first work to show consistently lower likelihood for OoD data with smaller
variance with deep generative models.},
added-at = {2019-07-15T03:49:03.000+0200},
author = {Huang, Yujia and Dai, Sihui and Nguyen, Tan and Baraniuk, Richard G. and Anandkumar, Anima},
biburl = {https://www.bibsonomy.org/bibtex/2829865dbe645c38fe4299e6e7f5f28e6/kirk86},
description = {[1907.04572v1] Out-of-Distribution Detection Using Neural Rendering Generative Models},
interhash = {f3f2d60320ae73ee28437894635c2c9a},
intrahash = {829865dbe645c38fe4299e6e7f5f28e6},
keywords = {anomaly-detection generative-models uncertainty},
note = {cite arxiv:1907.04572},
timestamp = {2019-07-15T03:49:03.000+0200},
title = {Out-of-Distribution Detection Using Neural Rendering Generative Models},
url = {http://arxiv.org/abs/1907.04572},
year = 2019
}