Residual networks (ResNet) and weight normalization play an important role in
various deep learning applications. However, parameter initialization
strategies have not been studied previously for weight normalized networks and,
in practice, initialization methods designed for un-normalized networks are
used as a proxy. Similarly, initialization for ResNets have also been studied
for un-normalized networks and often under simplified settings ignoring the
shortcut connection. To address these issues, we propose a novel parameter
initialization strategy that avoids explosion/vanishment of information across
layers for weight normalized networks with and without residual connections.
The proposed strategy is based on a theoretical analysis using mean field
approximation. We run over 2,500 experiments and evaluate our proposal on image
datasets showing that the proposed initialization outperforms existing
initialization methods in terms of generalization performance, robustness to
hyper-parameter values and variance between seeds, especially when networks get
deeper in which case existing methods fail to even start training. Finally, we
show that using our initialization in conjunction with learning rate warmup is
able to reduce the gap between the performance of weight normalized and batch
normalized networks.
%0 Journal Article
%1 arpit2019initialize
%A Arpit, Devansh
%A Campos, Victor
%A Bengio, Yoshua
%D 2019
%K deep-learning machine-learning
%T How to Initialize your Network? Robust Initialization for WeightNorm &
ResNets
%U http://arxiv.org/abs/1906.02341
%X Residual networks (ResNet) and weight normalization play an important role in
various deep learning applications. However, parameter initialization
strategies have not been studied previously for weight normalized networks and,
in practice, initialization methods designed for un-normalized networks are
used as a proxy. Similarly, initialization for ResNets have also been studied
for un-normalized networks and often under simplified settings ignoring the
shortcut connection. To address these issues, we propose a novel parameter
initialization strategy that avoids explosion/vanishment of information across
layers for weight normalized networks with and without residual connections.
The proposed strategy is based on a theoretical analysis using mean field
approximation. We run over 2,500 experiments and evaluate our proposal on image
datasets showing that the proposed initialization outperforms existing
initialization methods in terms of generalization performance, robustness to
hyper-parameter values and variance between seeds, especially when networks get
deeper in which case existing methods fail to even start training. Finally, we
show that using our initialization in conjunction with learning rate warmup is
able to reduce the gap between the performance of weight normalized and batch
normalized networks.
@article{arpit2019initialize,
abstract = {Residual networks (ResNet) and weight normalization play an important role in
various deep learning applications. However, parameter initialization
strategies have not been studied previously for weight normalized networks and,
in practice, initialization methods designed for un-normalized networks are
used as a proxy. Similarly, initialization for ResNets have also been studied
for un-normalized networks and often under simplified settings ignoring the
shortcut connection. To address these issues, we propose a novel parameter
initialization strategy that avoids explosion/vanishment of information across
layers for weight normalized networks with and without residual connections.
The proposed strategy is based on a theoretical analysis using mean field
approximation. We run over 2,500 experiments and evaluate our proposal on image
datasets showing that the proposed initialization outperforms existing
initialization methods in terms of generalization performance, robustness to
hyper-parameter values and variance between seeds, especially when networks get
deeper in which case existing methods fail to even start training. Finally, we
show that using our initialization in conjunction with learning rate warmup is
able to reduce the gap between the performance of weight normalized and batch
normalized networks.},
added-at = {2019-06-19T15:24:43.000+0200},
author = {Arpit, Devansh and Campos, Victor and Bengio, Yoshua},
biburl = {https://www.bibsonomy.org/bibtex/2a52756f88189081644bcbdf6f2938612/kirk86},
interhash = {e5b6937fd3587ee89f1393d70990eff4},
intrahash = {a52756f88189081644bcbdf6f2938612},
keywords = {deep-learning machine-learning},
note = {cite arxiv:1906.02341},
timestamp = {2019-06-19T15:24:43.000+0200},
title = {How to Initialize your Network? Robust Initialization for WeightNorm &
ResNets},
url = {http://arxiv.org/abs/1906.02341},
year = 2019
}