Bayesian Neural Networks (BNNs) have been proposed to address the problem of
model uncertainty in training and inference. By introducing weights associated
with conditioned probability distributions, BNNs are capable of resolving the
overfitting issue commonly seen in conventional neural networks and allow for
small-data training, through the variational inference process. Frequent usage
of Gaussian random variables in this process requires a properly optimized
Gaussian Random Number Generator (GRNG). The high hardware cost of conventional
GRNG makes the hardware implementation of BNNs challenging.
In this paper, we propose VIBNN, an FPGA-based hardware accelerator design
for variational inference on BNNs. We explore the design space for massive
amount of Gaussian variable sampling tasks in BNNs. Specifically, we introduce
two high performance Gaussian (pseudo) random number generators: the RAM-based
Linear Feedback Gaussian Random Number Generator (RLF-GRNG), which is inspired
by the properties of binomial distribution and linear feedback logics; and the
Bayesian Neural Network-oriented Wallace Gaussian Random Number Generator. To
achieve high scalability and efficient memory access, we propose a deep
pipelined accelerator architecture with fast execution and good hardware
utilization. Experimental results demonstrate that the proposed VIBNN
implementations on an FPGA can achieve throughput of 321,543.4 Images/s and
energy efficiency upto 52,694.8 Images/J while maintaining similar accuracy as
its software counterpart.
Description
[1802.00822] VIBNN: Hardware Acceleration of Bayesian Neural Networks
%0 Generic
%1 cai2018vibnn
%A Cai, Ruizhe
%A Ren, Ao
%A Liu, Ning
%A Ding, Caiwen
%A Wang, Luhao
%A Qian, Xuehai
%A Pedram, Massoud
%A Wang, Yanzhi
%D 2018
%K 2018 arxiv deep-learning paper
%R 10.1145/3173162.3173212
%T VIBNN: Hardware Acceleration of Bayesian Neural Networks
%U http://arxiv.org/abs/1802.00822
%X Bayesian Neural Networks (BNNs) have been proposed to address the problem of
model uncertainty in training and inference. By introducing weights associated
with conditioned probability distributions, BNNs are capable of resolving the
overfitting issue commonly seen in conventional neural networks and allow for
small-data training, through the variational inference process. Frequent usage
of Gaussian random variables in this process requires a properly optimized
Gaussian Random Number Generator (GRNG). The high hardware cost of conventional
GRNG makes the hardware implementation of BNNs challenging.
In this paper, we propose VIBNN, an FPGA-based hardware accelerator design
for variational inference on BNNs. We explore the design space for massive
amount of Gaussian variable sampling tasks in BNNs. Specifically, we introduce
two high performance Gaussian (pseudo) random number generators: the RAM-based
Linear Feedback Gaussian Random Number Generator (RLF-GRNG), which is inspired
by the properties of binomial distribution and linear feedback logics; and the
Bayesian Neural Network-oriented Wallace Gaussian Random Number Generator. To
achieve high scalability and efficient memory access, we propose a deep
pipelined accelerator architecture with fast execution and good hardware
utilization. Experimental results demonstrate that the proposed VIBNN
implementations on an FPGA can achieve throughput of 321,543.4 Images/s and
energy efficiency upto 52,694.8 Images/J while maintaining similar accuracy as
its software counterpart.
@misc{cai2018vibnn,
abstract = {Bayesian Neural Networks (BNNs) have been proposed to address the problem of
model uncertainty in training and inference. By introducing weights associated
with conditioned probability distributions, BNNs are capable of resolving the
overfitting issue commonly seen in conventional neural networks and allow for
small-data training, through the variational inference process. Frequent usage
of Gaussian random variables in this process requires a properly optimized
Gaussian Random Number Generator (GRNG). The high hardware cost of conventional
GRNG makes the hardware implementation of BNNs challenging.
In this paper, we propose VIBNN, an FPGA-based hardware accelerator design
for variational inference on BNNs. We explore the design space for massive
amount of Gaussian variable sampling tasks in BNNs. Specifically, we introduce
two high performance Gaussian (pseudo) random number generators: the RAM-based
Linear Feedback Gaussian Random Number Generator (RLF-GRNG), which is inspired
by the properties of binomial distribution and linear feedback logics; and the
Bayesian Neural Network-oriented Wallace Gaussian Random Number Generator. To
achieve high scalability and efficient memory access, we propose a deep
pipelined accelerator architecture with fast execution and good hardware
utilization. Experimental results demonstrate that the proposed VIBNN
implementations on an FPGA can achieve throughput of 321,543.4 Images/s and
energy efficiency upto 52,694.8 Images/J while maintaining similar accuracy as
its software counterpart.},
added-at = {2018-06-13T18:00:46.000+0200},
author = {Cai, Ruizhe and Ren, Ao and Liu, Ning and Ding, Caiwen and Wang, Luhao and Qian, Xuehai and Pedram, Massoud and Wang, Yanzhi},
biburl = {https://www.bibsonomy.org/bibtex/29d4acfffd7c86087d8c437ecfa24c0a4/achakraborty},
description = {[1802.00822] VIBNN: Hardware Acceleration of Bayesian Neural Networks},
doi = {10.1145/3173162.3173212},
interhash = {965db2a25e5488b5048e3e7134aad924},
intrahash = {9d4acfffd7c86087d8c437ecfa24c0a4},
keywords = {2018 arxiv deep-learning paper},
note = {cite arxiv:1802.00822},
timestamp = {2018-06-13T18:00:46.000+0200},
title = {VIBNN: Hardware Acceleration of Bayesian Neural Networks},
url = {http://arxiv.org/abs/1802.00822},
year = 2018
}