Since the emergence of Deep Neural Networks (DNNs) as a prominent technique
in the field of computer vision, the ImageNet classification challenge has
played a major role in advancing the state-of-the-art. While accuracy figures
have steadily increased, the resource utilisation of winning models has not
been properly taken into account. In this work, we present a comprehensive
analysis of important metrics in practical applications: accuracy, memory
footprint, parameters, operations count, inference time and power consumption.
Key findings are: (1) power consumption is independent of batch size and
architecture; (2) accuracy and inference time are in a hyperbolic relationship;
(3) energy constraint is an upper bound on the maximum achievable accuracy and
model complexity; (4) the number of operations is a reliable estimate of the
inference time. We believe our analysis provides a compelling set of
information that helps design and engineer efficient DNNs.
Description
An Analysis of Deep Neural Network Models for Practical Applications
%0 Generic
%1 canziani2016analysis
%A Canziani, Alfredo
%A Paszke, Adam
%A Culurciello, Eugenio
%D 2016
%K computer deep learning models vision
%T An Analysis of Deep Neural Network Models for Practical Applications
%U http://arxiv.org/abs/1605.07678
%X Since the emergence of Deep Neural Networks (DNNs) as a prominent technique
in the field of computer vision, the ImageNet classification challenge has
played a major role in advancing the state-of-the-art. While accuracy figures
have steadily increased, the resource utilisation of winning models has not
been properly taken into account. In this work, we present a comprehensive
analysis of important metrics in practical applications: accuracy, memory
footprint, parameters, operations count, inference time and power consumption.
Key findings are: (1) power consumption is independent of batch size and
architecture; (2) accuracy and inference time are in a hyperbolic relationship;
(3) energy constraint is an upper bound on the maximum achievable accuracy and
model complexity; (4) the number of operations is a reliable estimate of the
inference time. We believe our analysis provides a compelling set of
information that helps design and engineer efficient DNNs.
@misc{canziani2016analysis,
abstract = {Since the emergence of Deep Neural Networks (DNNs) as a prominent technique
in the field of computer vision, the ImageNet classification challenge has
played a major role in advancing the state-of-the-art. While accuracy figures
have steadily increased, the resource utilisation of winning models has not
been properly taken into account. In this work, we present a comprehensive
analysis of important metrics in practical applications: accuracy, memory
footprint, parameters, operations count, inference time and power consumption.
Key findings are: (1) power consumption is independent of batch size and
architecture; (2) accuracy and inference time are in a hyperbolic relationship;
(3) energy constraint is an upper bound on the maximum achievable accuracy and
model complexity; (4) the number of operations is a reliable estimate of the
inference time. We believe our analysis provides a compelling set of
information that helps design and engineer efficient DNNs.},
added-at = {2021-12-05T15:38:17.000+0100},
author = {Canziani, Alfredo and Paszke, Adam and Culurciello, Eugenio},
biburl = {https://www.bibsonomy.org/bibtex/2d0e02c3192ecec48e4bf29424a7dcc00/sdo},
description = {An Analysis of Deep Neural Network Models for Practical Applications},
interhash = {b0e2dc2bcbc54c16fc480bab9eb7568e},
intrahash = {d0e02c3192ecec48e4bf29424a7dcc00},
keywords = {computer deep learning models vision},
note = {Avaliable at \url{http://arxiv.org/abs/1605.07678}},
timestamp = {2021-12-05T15:38:17.000+0100},
title = {An Analysis of Deep Neural Network Models for Practical Applications},
url = {http://arxiv.org/abs/1605.07678},
year = 2016
}