We propose a general framework called Network Dissection for quantifying the
interpretability of latent representations of CNNs by evaluating the alignment
between individual hidden units and a set of semantic concepts. Given any CNN
model, the proposed method draws on a broad data set of visual concepts to
score the semantics of hidden units at each intermediate convolutional layer.
The units with semantics are given labels across a range of objects, parts,
scenes, textures, materials, and colors. We use the proposed method to test the
hypothesis that interpretability of units is equivalent to random linear
combinations of units, then we apply our method to compare the latent
representations of various networks when trained to solve different supervised
and self-supervised training tasks. We further analyze the effect of training
iterations, compare networks trained with different initializations, examine
the impact of network depth and width, and measure the effect of dropout and
batch normalization on the interpretability of deep visual representations. We
demonstrate that the proposed method can shed light on characteristics of CNN
models and training methods that go beyond measurements of their discriminative
power.
Описание
[1704.05796] Network Dissection: Quantifying Interpretability of Deep Visual Representations
%0 Journal Article
%1 bau2017network
%A Bau, David
%A Zhou, Bolei
%A Khosla, Aditya
%A Oliva, Aude
%A Torralba, Antonio
%D 2017
%K interpretability
%T Network Dissection: Quantifying Interpretability of Deep Visual
Representations
%U http://arxiv.org/abs/1704.05796
%X We propose a general framework called Network Dissection for quantifying the
interpretability of latent representations of CNNs by evaluating the alignment
between individual hidden units and a set of semantic concepts. Given any CNN
model, the proposed method draws on a broad data set of visual concepts to
score the semantics of hidden units at each intermediate convolutional layer.
The units with semantics are given labels across a range of objects, parts,
scenes, textures, materials, and colors. We use the proposed method to test the
hypothesis that interpretability of units is equivalent to random linear
combinations of units, then we apply our method to compare the latent
representations of various networks when trained to solve different supervised
and self-supervised training tasks. We further analyze the effect of training
iterations, compare networks trained with different initializations, examine
the impact of network depth and width, and measure the effect of dropout and
batch normalization on the interpretability of deep visual representations. We
demonstrate that the proposed method can shed light on characteristics of CNN
models and training methods that go beyond measurements of their discriminative
power.
@article{bau2017network,
abstract = {We propose a general framework called Network Dissection for quantifying the
interpretability of latent representations of CNNs by evaluating the alignment
between individual hidden units and a set of semantic concepts. Given any CNN
model, the proposed method draws on a broad data set of visual concepts to
score the semantics of hidden units at each intermediate convolutional layer.
The units with semantics are given labels across a range of objects, parts,
scenes, textures, materials, and colors. We use the proposed method to test the
hypothesis that interpretability of units is equivalent to random linear
combinations of units, then we apply our method to compare the latent
representations of various networks when trained to solve different supervised
and self-supervised training tasks. We further analyze the effect of training
iterations, compare networks trained with different initializations, examine
the impact of network depth and width, and measure the effect of dropout and
batch normalization on the interpretability of deep visual representations. We
demonstrate that the proposed method can shed light on characteristics of CNN
models and training methods that go beyond measurements of their discriminative
power.},
added-at = {2020-03-13T18:59:45.000+0100},
author = {Bau, David and Zhou, Bolei and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
biburl = {https://www.bibsonomy.org/bibtex/25cc69a01a57cb47d0d08f87892585c3a/kirk86},
description = {[1704.05796] Network Dissection: Quantifying Interpretability of Deep Visual Representations},
interhash = {6810f83e7aec3e11727b545f1d0872df},
intrahash = {5cc69a01a57cb47d0d08f87892585c3a},
keywords = {interpretability},
note = {cite arxiv:1704.05796Comment: First two authors contributed equally. Oral presentation at CVPR 2017},
timestamp = {2020-03-13T18:59:45.000+0100},
title = {Network Dissection: Quantifying Interpretability of Deep Visual
Representations},
url = {http://arxiv.org/abs/1704.05796},
year = 2017
}