It is widely believed that the success of deep convolutional networks is
based on progressively discarding uninformative variability about the input
with respect to the problem at hand. This is supported empirically by the
difficulty of recovering images from their hidden representations, in most
commonly used network architectures. In this paper we show via a one-to-one
mapping that this loss of information is not a necessary condition to learn
representations that generalize well on complicated problems, such as ImageNet.
Via a cascade of homeomorphic layers, we build the i-RevNet, a network that can
be fully inverted up to the final projection onto the classes, i.e. no
information is discarded. Building an invertible architecture is difficult, for
one, because the local inversion is ill-conditioned, we overcome this by
providing an explicit inverse. An analysis of i-RevNets learned representations
suggests an alternative explanation for the success of deep networks by a
progressive contraction and linear separation with depth. To shed light on the
nature of the model learned by the i-RevNet we reconstruct linear
interpolations between natural image representations.
%0 Generic
%1 jacobsen2018irevnet
%A Jacobsen, Jörn-Henrik
%A Smeulders, Arnold
%A Oyallon, Edouard
%D 2018
%K Invertible
%T i-RevNet: Deep Invertible Networks
%U http://arxiv.org/abs/1802.07088
%X It is widely believed that the success of deep convolutional networks is
based on progressively discarding uninformative variability about the input
with respect to the problem at hand. This is supported empirically by the
difficulty of recovering images from their hidden representations, in most
commonly used network architectures. In this paper we show via a one-to-one
mapping that this loss of information is not a necessary condition to learn
representations that generalize well on complicated problems, such as ImageNet.
Via a cascade of homeomorphic layers, we build the i-RevNet, a network that can
be fully inverted up to the final projection onto the classes, i.e. no
information is discarded. Building an invertible architecture is difficult, for
one, because the local inversion is ill-conditioned, we overcome this by
providing an explicit inverse. An analysis of i-RevNets learned representations
suggests an alternative explanation for the success of deep networks by a
progressive contraction and linear separation with depth. To shed light on the
nature of the model learned by the i-RevNet we reconstruct linear
interpolations between natural image representations.
@misc{jacobsen2018irevnet,
abstract = {It is widely believed that the success of deep convolutional networks is
based on progressively discarding uninformative variability about the input
with respect to the problem at hand. This is supported empirically by the
difficulty of recovering images from their hidden representations, in most
commonly used network architectures. In this paper we show via a one-to-one
mapping that this loss of information is not a necessary condition to learn
representations that generalize well on complicated problems, such as ImageNet.
Via a cascade of homeomorphic layers, we build the i-RevNet, a network that can
be fully inverted up to the final projection onto the classes, i.e. no
information is discarded. Building an invertible architecture is difficult, for
one, because the local inversion is ill-conditioned, we overcome this by
providing an explicit inverse. An analysis of i-RevNets learned representations
suggests an alternative explanation for the success of deep networks by a
progressive contraction and linear separation with depth. To shed light on the
nature of the model learned by the i-RevNet we reconstruct linear
interpolations between natural image representations.},
added-at = {2019-05-09T13:49:59.000+0200},
author = {Jacobsen, Jörn-Henrik and Smeulders, Arnold and Oyallon, Edouard},
biburl = {https://www.bibsonomy.org/bibtex/284ffa84e42fae4b61994a848d1a8197b/straybird321},
description = {[1802.07088] i-RevNet: Deep Invertible Networks},
interhash = {1e0f4455a743d9b949096507165b3c35},
intrahash = {84ffa84e42fae4b61994a848d1a8197b},
keywords = {Invertible},
note = {cite arxiv:1802.07088},
timestamp = {2019-05-09T13:49:59.000+0200},
title = {i-RevNet: Deep Invertible Networks},
url = {http://arxiv.org/abs/1802.07088},
year = 2018
}