Convolutional neural networks (CNNs) have shown great capability of solving
various artificial intelligence tasks. However, the increasing model size has
raised challenges in employing them in resource-limited applications. In this
work, we propose to compress deep models by using channel-wise convolutions,
which re- place dense connections among feature maps with sparse ones in CNNs.
Based on this novel operation, we build light-weight CNNs known as ChannelNets.
Channel- Nets use three instances of channel-wise convolutions; namely group
channel-wise convolutions, depth-wise separable channel-wise convolutions, and
the convolu- tional classification layer. Compared to prior CNNs designed for
mobile devices, ChannelNets achieve a significant reduction in terms of the
number of parameters and computational cost without loss in accuracy. Notably,
our work represents the first attempt to compress the fully-connected
classification layer, which usually accounts for about 25\% of total parameters
in compact CNNs. Experimental results on the ImageNet dataset demonstrate that
ChannelNets achieve consistently better performance compared to prior methods.
%0 Generic
%1 citeulike:14645786
%A xxx,
%D 2018
%K arch backbone classification mobilenet
%T ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions
%U http://arxiv.org/abs/1809.01330
%X Convolutional neural networks (CNNs) have shown great capability of solving
various artificial intelligence tasks. However, the increasing model size has
raised challenges in employing them in resource-limited applications. In this
work, we propose to compress deep models by using channel-wise convolutions,
which re- place dense connections among feature maps with sparse ones in CNNs.
Based on this novel operation, we build light-weight CNNs known as ChannelNets.
Channel- Nets use three instances of channel-wise convolutions; namely group
channel-wise convolutions, depth-wise separable channel-wise convolutions, and
the convolu- tional classification layer. Compared to prior CNNs designed for
mobile devices, ChannelNets achieve a significant reduction in terms of the
number of parameters and computational cost without loss in accuracy. Notably,
our work represents the first attempt to compress the fully-connected
classification layer, which usually accounts for about 25\% of total parameters
in compact CNNs. Experimental results on the ImageNet dataset demonstrate that
ChannelNets achieve consistently better performance compared to prior methods.
@misc{citeulike:14645786,
abstract = {{Convolutional neural networks (CNNs) have shown great capability of solving
various artificial intelligence tasks. However, the increasing model size has
raised challenges in employing them in resource-limited applications. In this
work, we propose to compress deep models by using channel-wise convolutions,
which re- place dense connections among feature maps with sparse ones in CNNs.
Based on this novel operation, we build light-weight CNNs known as ChannelNets.
Channel- Nets use three instances of channel-wise convolutions; namely group
channel-wise convolutions, depth-wise separable channel-wise convolutions, and
the convolu- tional classification layer. Compared to prior CNNs designed for
mobile devices, ChannelNets achieve a significant reduction in terms of the
number of parameters and computational cost without loss in accuracy. Notably,
our work represents the first attempt to compress the fully-connected
classification layer, which usually accounts for about 25\% of total parameters
in compact CNNs. Experimental results on the ImageNet dataset demonstrate that
ChannelNets achieve consistently better performance compared to prior methods.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/277b95d5da8712dbbc5248816c554f802/nmatsuk},
citeulike-article-id = {14645786},
citeulike-linkout-0 = {http://arxiv.org/abs/1809.01330},
citeulike-linkout-1 = {http://arxiv.org/pdf/1809.01330},
day = 5,
eprint = {1809.01330},
interhash = {37735ed5762b9dc9c25ac23edc0246b1},
intrahash = {77b95d5da8712dbbc5248816c554f802},
keywords = {arch backbone classification mobilenet},
month = sep,
posted-at = {2018-10-13 14:52:40},
priority = {3},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions}},
url = {http://arxiv.org/abs/1809.01330},
year = 2018
}