Serverless computing services, such as Function-as-a-Service (FaaS), hold the attractive promise of a high level of abstraction and high performance, combined with the minimization of operational logic. Several large ecosystems of serverless platforms, both open- and closed-source, aim to realize this promise. Consequently, a lucrative market has emerged. However, the performance trade-offs of these systems are not well-understood. Moreover, it is exactly the high level of abstraction and the opaqueness of the operational-side that make performance evaluation studies of serverless platforms challenging. Learning from the history of IT platforms, we argue that a benchmark for serverless platforms could help address this challenge. We envision a comprehensive serverless benchmark, which we contrast to the narrow focus of prior work in this area. We argue that a comprehensive benchmark will need to take into account more than just runtime overhead, and include notions of cost, realistic workloads, more (open-source) platforms, and cloud integrations. Finally, we show through preliminary real-world experiments how such a benchmark can help compare the performance overhead when running a serverless workload on state-of-the-art platforms.
%0 Conference Paper
%1 10.1145/3185768.3186293
%A van Eyk, Erwin
%A Scheuner, Joel
%A Eismann, Simon
%A Abad, Cristina
%A Iosup, Alexandru
%B Companion of the 2020 ACM/SPEC International Conference on Performance Engineering
%D 2020
%K Metrics_and_benchmarking_methodologies Performance SPEC descartes t_visionposition myown
%T Beyond Microbenchmarks: The SPEC-RG Vision for A Comprehensive Serverless Benchmark
%U https://dl.acm.org/doi/10.1145/3375555.3384381
%X Serverless computing services, such as Function-as-a-Service (FaaS), hold the attractive promise of a high level of abstraction and high performance, combined with the minimization of operational logic. Several large ecosystems of serverless platforms, both open- and closed-source, aim to realize this promise. Consequently, a lucrative market has emerged. However, the performance trade-offs of these systems are not well-understood. Moreover, it is exactly the high level of abstraction and the opaqueness of the operational-side that make performance evaluation studies of serverless platforms challenging. Learning from the history of IT platforms, we argue that a benchmark for serverless platforms could help address this challenge. We envision a comprehensive serverless benchmark, which we contrast to the narrow focus of prior work in this area. We argue that a comprehensive benchmark will need to take into account more than just runtime overhead, and include notions of cost, realistic workloads, more (open-source) platforms, and cloud integrations. Finally, we show through preliminary real-world experiments how such a benchmark can help compare the performance overhead when running a serverless workload on state-of-the-art platforms.
@inproceedings{10.1145/3185768.3186293,
abstract = {Serverless computing services, such as Function-as-a-Service (FaaS), hold the attractive promise of a high level of abstraction and high performance, combined with the minimization of operational logic. Several large ecosystems of serverless platforms, both open- and closed-source, aim to realize this promise. Consequently, a lucrative market has emerged. However, the performance trade-offs of these systems are not well-understood. Moreover, it is exactly the high level of abstraction and the opaqueness of the operational-side that make performance evaluation studies of serverless platforms challenging. Learning from the history of IT platforms, we argue that a benchmark for serverless platforms could help address this challenge. We envision a comprehensive serverless benchmark, which we contrast to the narrow focus of prior work in this area. We argue that a comprehensive benchmark will need to take into account more than just runtime overhead, and include notions of cost, realistic workloads, more (open-source) platforms, and cloud integrations. Finally, we show through preliminary real-world experiments how such a benchmark can help compare the performance overhead when running a serverless workload on state-of-the-art platforms.},
added-at = {2020-04-06T12:15:13.000+0200},
author = {van Eyk, Erwin and Scheuner, Joel and Eismann, Simon and Abad, Cristina and Iosup, Alexandru},
biburl = {https://www.bibsonomy.org/bibtex/236f2fb928c33dfa7c8e70de484f9fb4c/simon.eismann},
booktitle = {Companion of the 2020 ACM/SPEC International Conference on Performance Engineering},
interhash = {b4c2c9f9b153492179fadf98a1605814},
intrahash = {36f2fb928c33dfa7c8e70de484f9fb4c},
keywords = {Metrics_and_benchmarking_methodologies Performance SPEC descartes t_visionposition myown},
timestamp = {2022-09-15T01:05:46.000+0200},
title = {{Beyond Microbenchmarks: The SPEC-RG Vision for A Comprehensive Serverless Benchmark}},
url = {https://dl.acm.org/doi/10.1145/3375555.3384381},
year = 2020
}