Overlays have shown significant promise for field-programmable gate-arrays
(FPGAs) as they allow for fast development cycles and remove many of the
challenges of the traditional FPGA hardware design flow. However, this often
comes with a significant performance burden resulting in very little adoption
of overlays for practical applications. In this paper, we tailor an overlay to
a specific application domain, and we show how we maintain its full
programmability without paying for the performance overhead traditionally
associated with overlays. Specifically, we introduce an overlay targeted for
deep neural network inference with only ~1% overhead to support the control and
reprogramming logic using a lightweight very-long instruction word (VLIW)
network. Additionally, we implement a sophisticated domain specific graph
compiler that compiles deep learning languages such as Caffe or Tensorflow to
easily target our overlay. We show how our graph compiler performs
architecture-driven software optimizations to significantly boost performance
of both convolutional and recurrent neural networks (CNNs/RNNs) - we
demonstrate a 3x improvement on ResNet-101 and a 12x improvement for long
short-term memory (LSTM) cells, compared to naive implementations. Finally, we
describe how we can tailor our hardware overlay, and use our graph compiler to
achieve ~900 fps on GoogLeNet on an Intel Arria 10 1150 - the fastest ever
reported on comparable FPGAs.
%0 Generic
%1 AbdHan18DLA
%A Abdelfattah, Mohamed S.
%A Han, David
%A Bitar, Andrew
%A DiCecco, Roberto
%A OConnell, Shane
%A Shanker, Nitika
%A Chu, Joseph
%A Prins, Ian
%A Fender, Joshua
%A Ling, Andrew C.
%A Chiu, Gordon R.
%D 2018
%K FPGA acceleration deep_learning overlay
%T DLA: Compiler and FPGA Overlay for Neural Network Inference Acceleration
%U http://arxiv.org/abs/1807.06434
%X Overlays have shown significant promise for field-programmable gate-arrays
(FPGAs) as they allow for fast development cycles and remove many of the
challenges of the traditional FPGA hardware design flow. However, this often
comes with a significant performance burden resulting in very little adoption
of overlays for practical applications. In this paper, we tailor an overlay to
a specific application domain, and we show how we maintain its full
programmability without paying for the performance overhead traditionally
associated with overlays. Specifically, we introduce an overlay targeted for
deep neural network inference with only ~1% overhead to support the control and
reprogramming logic using a lightweight very-long instruction word (VLIW)
network. Additionally, we implement a sophisticated domain specific graph
compiler that compiles deep learning languages such as Caffe or Tensorflow to
easily target our overlay. We show how our graph compiler performs
architecture-driven software optimizations to significantly boost performance
of both convolutional and recurrent neural networks (CNNs/RNNs) - we
demonstrate a 3x improvement on ResNet-101 and a 12x improvement for long
short-term memory (LSTM) cells, compared to naive implementations. Finally, we
describe how we can tailor our hardware overlay, and use our graph compiler to
achieve ~900 fps on GoogLeNet on an Intel Arria 10 1150 - the fastest ever
reported on comparable FPGAs.
@misc{AbdHan18DLA,
abstract = {Overlays have shown significant promise for field-programmable gate-arrays
(FPGAs) as they allow for fast development cycles and remove many of the
challenges of the traditional FPGA hardware design flow. However, this often
comes with a significant performance burden resulting in very little adoption
of overlays for practical applications. In this paper, we tailor an overlay to
a specific application domain, and we show how we maintain its full
programmability without paying for the performance overhead traditionally
associated with overlays. Specifically, we introduce an overlay targeted for
deep neural network inference with only ~1% overhead to support the control and
reprogramming logic using a lightweight very-long instruction word (VLIW)
network. Additionally, we implement a sophisticated domain specific graph
compiler that compiles deep learning languages such as Caffe or Tensorflow to
easily target our overlay. We show how our graph compiler performs
architecture-driven software optimizations to significantly boost performance
of both convolutional and recurrent neural networks (CNNs/RNNs) - we
demonstrate a 3x improvement on ResNet-101 and a 12x improvement for long
short-term memory (LSTM) cells, compared to naive implementations. Finally, we
describe how we can tailor our hardware overlay, and use our graph compiler to
achieve ~900 fps on GoogLeNet on an Intel Arria 10 1150 - the fastest ever
reported on comparable FPGAs.},
added-at = {2018-08-10T09:14:20.000+0200},
author = {Abdelfattah, Mohamed S. and Han, David and Bitar, Andrew and DiCecco, Roberto and OConnell, Shane and Shanker, Nitika and Chu, Joseph and Prins, Ian and Fender, Joshua and Ling, Andrew C. and Chiu, Gordon R.},
biburl = {https://www.bibsonomy.org/bibtex/2884d9be907708af91c54dd0bd50f4712/loroch},
interhash = {393efa1a6eb9ef9afa5a521225e55db6},
intrahash = {884d9be907708af91c54dd0bd50f4712},
keywords = {FPGA acceleration deep_learning overlay},
timestamp = {2018-08-10T09:14:20.000+0200},
title = {DLA: Compiler and FPGA Overlay for Neural Network Inference Acceleration},
url = {http://arxiv.org/abs/1807.06434},
year = 2018
}