Dynamic Vision Sensors (DVSs) asynchronously stream events in correspondence
of pixels subject to brightness changes. Differently from classic vision
devices, they produce a sparse representation of the scene. Therefore, to apply
standard computer vision algorithms, events need to be integrated into a frame
or event-surface. This is usually attained through hand-crafted grids that
reconstruct the frame using ad-hoc heuristics. In this paper, we propose
Matrix-LSTM, a grid of Long Short-Term Memory (LSTM) cells to learn end-to-end
a task-dependent event-surfaces. Compared to existing reconstruction
approaches, our learned event-surface shows good flexibility and expressiveness
improving the baselines on optical flow estimation on the MVSEC benchmark and
the state-of-the-art of event-based object classification on the N-Cars
dataset.
Description
[2001.03455] Matrix-LSTM: a Differentiable Recurrent Surface for Asynchronous Event-Based Data
%0 Generic
%1 cannici2020matrixlstm
%A Cannici, Marco
%A Ciccone, Marco
%A Romanoni, Andrea
%A Matteucci, Matteo
%D 2020
%K 2020 deep-learning lstm
%T Matrix-LSTM: a Differentiable Recurrent Surface for Asynchronous
Event-Based Data
%U http://arxiv.org/abs/2001.03455
%X Dynamic Vision Sensors (DVSs) asynchronously stream events in correspondence
of pixels subject to brightness changes. Differently from classic vision
devices, they produce a sparse representation of the scene. Therefore, to apply
standard computer vision algorithms, events need to be integrated into a frame
or event-surface. This is usually attained through hand-crafted grids that
reconstruct the frame using ad-hoc heuristics. In this paper, we propose
Matrix-LSTM, a grid of Long Short-Term Memory (LSTM) cells to learn end-to-end
a task-dependent event-surfaces. Compared to existing reconstruction
approaches, our learned event-surface shows good flexibility and expressiveness
improving the baselines on optical flow estimation on the MVSEC benchmark and
the state-of-the-art of event-based object classification on the N-Cars
dataset.
@misc{cannici2020matrixlstm,
abstract = {Dynamic Vision Sensors (DVSs) asynchronously stream events in correspondence
of pixels subject to brightness changes. Differently from classic vision
devices, they produce a sparse representation of the scene. Therefore, to apply
standard computer vision algorithms, events need to be integrated into a frame
or event-surface. This is usually attained through hand-crafted grids that
reconstruct the frame using ad-hoc heuristics. In this paper, we propose
Matrix-LSTM, a grid of Long Short-Term Memory (LSTM) cells to learn end-to-end
a task-dependent event-surfaces. Compared to existing reconstruction
approaches, our learned event-surface shows good flexibility and expressiveness
improving the baselines on optical flow estimation on the MVSEC benchmark and
the state-of-the-art of event-based object classification on the N-Cars
dataset.},
added-at = {2020-01-14T23:33:54.000+0100},
author = {Cannici, Marco and Ciccone, Marco and Romanoni, Andrea and Matteucci, Matteo},
biburl = {https://www.bibsonomy.org/bibtex/2f34b31ded1c9760586387d5a0e652f7b/analyst},
description = {[2001.03455] Matrix-LSTM: a Differentiable Recurrent Surface for Asynchronous Event-Based Data},
interhash = {5e754a784f1edfc7a5e4feac085083ca},
intrahash = {f34b31ded1c9760586387d5a0e652f7b},
keywords = {2020 deep-learning lstm},
note = {cite arxiv:2001.03455Comment: 13 pages, 6 figures},
timestamp = {2020-01-14T23:33:54.000+0100},
title = {Matrix-LSTM: a Differentiable Recurrent Surface for Asynchronous
Event-Based Data},
url = {http://arxiv.org/abs/2001.03455},
year = 2020
}