The development of efficient and robust algorithms for Big Data processing is a demanding task, which has to cope with the characteristics of this type of data (3Vs). Putting such algorithms as processing elements into larger pipelines adds an extra level of complexity, which can be alleviated by relying on a model-based approach including code generation. This allows data analysts to compose such pipelines on a higher level of abstraction, reducing the development effort as well as the risk of
errors. In this chapter, we outline a model-based and adaptive approach to the development of data processing pipelines in heterogeneous processing contexts. It relies on a flexible, tool-supported approach to configuration, which embraces three levels: (a) a heterogeneous processing infrastructure - including reconfigurable hardware, (b) the pipelines as well as (c) the stakeholder applications built upon the pipelines. Furthermore, selected aspects of implementing the approach, which is validated in the context of the financial domain, are presented.
%0 Book Section
%1 eichelberger2016configure
%A Eichelberger, Holger
%A Niederee, Claudia
%A Dollas, Apostolos
%A Ioannou, Ekaterini
%A Qin, Cui
%A Chrysos, Grigorios
%A Hube, Christoph
%A Tran, Tuan
%A Nydriotis, Apostolos
%A Malakonakis, Pavlos
%A Burkhard, Stefan
%A Becker, Tobias
%A Garofalakis, Minos
%B European Project Space on Intelligent Technologies, Software engineering, Computer Vision, Graphics, Optics and Photonics
%D 2016
%E Dell’Olmo, Paolo
%E Brambilla, Marco
%E Raposo, Maria
%I SCITEPRESS
%K IVML easyproducer generation myown qualimaster topologies
%P 124-148
%T Configure, Generate, Run: Model-based Development for Big Data Processing
%U http://scitepress.org/PublicationsDetail.aspx?ID=xDvyqc9+aL8=&t=1
%X The development of efficient and robust algorithms for Big Data processing is a demanding task, which has to cope with the characteristics of this type of data (3Vs). Putting such algorithms as processing elements into larger pipelines adds an extra level of complexity, which can be alleviated by relying on a model-based approach including code generation. This allows data analysts to compose such pipelines on a higher level of abstraction, reducing the development effort as well as the risk of
errors. In this chapter, we outline a model-based and adaptive approach to the development of data processing pipelines in heterogeneous processing contexts. It relies on a flexible, tool-supported approach to configuration, which embraces three levels: (a) a heterogeneous processing infrastructure - including reconfigurable hardware, (b) the pipelines as well as (c) the stakeholder applications built upon the pipelines. Furthermore, selected aspects of implementing the approach, which is validated in the context of the financial domain, are presented.
@incollection{eichelberger2016configure,
abstract = {The development of efficient and robust algorithms for Big Data processing is a demanding task, which has to cope with the characteristics of this type of data (3Vs). Putting such algorithms as processing elements into larger pipelines adds an extra level of complexity, which can be alleviated by relying on a model-based approach including code generation. This allows data analysts to compose such pipelines on a higher level of abstraction, reducing the development effort as well as the risk of
errors. In this chapter, we outline a model-based and adaptive approach to the development of data processing pipelines in heterogeneous processing contexts. It relies on a flexible, tool-supported approach to configuration, which embraces three levels: (a) a heterogeneous processing infrastructure - including reconfigurable hardware, (b) the pipelines as well as (c) the stakeholder applications built upon the pipelines. Furthermore, selected aspects of implementing the approach, which is validated in the context of the financial domain, are presented.},
added-at = {2016-09-22T04:11:01.000+0200},
author = {Eichelberger, Holger and Niederee, Claudia and Dollas, Apostolos and Ioannou, Ekaterini and Qin, Cui and Chrysos, Grigorios and Hube, Christoph and Tran, Tuan and Nydriotis, Apostolos and Malakonakis, Pavlos and Burkhard, Stefan and Becker, Tobias and Garofalakis, Minos},
biburl = {https://www.bibsonomy.org/bibtex/2b033f38d2bcf29062193949dc0b9d987/eichelbe},
booktitle = {European Project Space on Intelligent Technologies, Software engineering, Computer Vision, Graphics, Optics and Photonics},
editor = {Dell’Olmo, Paolo and Brambilla, Marco and Raposo, Maria},
interhash = {381d2fbd8c4201d6393c58eaa60deee6},
intrahash = {b033f38d2bcf29062193949dc0b9d987},
keywords = {IVML easyproducer generation myown qualimaster topologies},
pages = {124-148},
publisher = {SCITEPRESS},
timestamp = {2019-04-29T11:32:35.000+0200},
title = {Configure, Generate, Run: Model-based Development for Big Data Processing},
url = {http://scitepress.org/PublicationsDetail.aspx?ID=xDvyqc9+aL8=&t=1},
year = 2016
}