Here you can find a consolidated (a.k.a. slowly updated) list of my publications. A frequently updated (and possibly noisy) list of works is available on my Google Scholar profile.
Please find below a short list of highlight publications for my recent activity.
Lomonaco, Vincenzo; Caro, Valerio De; Gallicchio, Claudio; Carta, Antonio; Sardianos, Christos; Varlamis, Iraklis; Tserpes, Konstantinos; Coppola, Massimo; Marpena, Mina; Politi, Sevasti; Schoitsch, Erwin; Bacciu, Davide AI-Toolkit: a Microservices Architecture for Low-Code Decentralized Machine Intelligence Conference Proceedings of 2023 IEEE International Conference on Acoustics, Speech and Signal Processing, 2023. Bacciu, Davide; Carta, Antonio; Sarli, Daniele Di; Gallicchio, Claudio; Lomonaco, Vincenzo; Petroni, Salvatore Towards Functional Safety Compliance of Recurrent Neural Networks Conference Proceedings of the International Conference on AI for People (CAIP 2021), 2021. Bacciu, Davide; Sarli, Daniele Di; Faraji, Pouria; Gallicchio, Claudio; Micheli, Alessio Federated Reservoir Computing Neural Networks Conference Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021), IEEE, 2021. Davide, Bacciu; Stefano, Chessa; Claudio, Gallicchio; Alessandro, Lenzi; Alessio, Micheli; Susanna, Pelagatti A General Purpose Distributed Learning Model for Robotic Ecologies Conference Robot Control - 10th IFAC Symposium on Robot Control, vol. 10, ELSEVIER SCIENCE BV, 2012.@conference{Lomonaco2023,
title = {AI-Toolkit: a Microservices Architecture for Low-Code Decentralized Machine Intelligence},
author = {Vincenzo Lomonaco and Valerio De Caro and Claudio Gallicchio and Antonio Carta and Christos Sardianos and Iraklis Varlamis and Konstantinos Tserpes and Massimo Coppola and Mina Marpena and Sevasti Politi and Erwin Schoitsch and Davide Bacciu},
year = {2023},
date = {2023-06-04},
urldate = {2023-06-04},
booktitle = {Proceedings of 2023 IEEE International Conference on Acoustics, Speech and Signal Processing},
abstract = {Artificial Intelligence and Machine Learning toolkits such as Scikit-learn, PyTorch and Tensorflow provide today a solid starting point for the rapid prototyping of R&D solutions. However, they can be hardly ported to heterogeneous decentralised hardware and real-world production environments. A common practice involves outsourcing deployment solutions to scalable cloud infrastructures such as Amazon SageMaker or Microsoft Azure. In this paper, we proposed an open-source microservices-based architecture for decentralised machine intelligence which aims at bringing R&D and deployment functionalities closer following a low-code approach. Such an approach would guarantee flexible integration of cutting-edge functionalities while preserving complete control over the deployed solutions at negligible costs and maintenance efforts.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
@conference{BacciuCAIP2021,
title = {Towards Functional Safety Compliance of Recurrent Neural Networks},
author = {Davide Bacciu and Antonio Carta and Daniele Di Sarli and Claudio Gallicchio and Vincenzo Lomonaco and Salvatore Petroni},
url = {https://aiforpeople.org/conference/assets/papers/CAIP21-P09.pdf, Open Access PDF},
year = {2021},
date = {2021-11-20},
booktitle = {Proceedings of the International Conference on AI for People (CAIP 2021)},
abstract = {Deploying Autonomous Driving systems requires facing some novel challenges for the Automotive industry. One of the most critical aspects that can severely compromise their deployment is Functional Safety. The ISO 26262 standard provides guidelines to ensure Functional Safety of road vehicles. However, this standard is not suitable to develop Artificial Intelligence
based systems such as systems based on Recurrent Neural Networks (RNNs). To address this issue, in this paper we propose a new methodology, composed of three steps. The first step is the robustness evaluation of the RNN against inputs perturbations. Then, a proper set of safety measures must be defined according to the model’s robustness, where less robust models will require stronger mitigation. Finally, the functionality of the entire system must be extensively tested
according to Safety Of The Intended Functionality (SOTIF) guidelines, providing quantitative results about the occurrence of unsafe scenarios, and by evaluating appropriate Safety Performance Indicators.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
based systems such as systems based on Recurrent Neural Networks (RNNs). To address this issue, in this paper we propose a new methodology, composed of three steps. The first step is the robustness evaluation of the RNN against inputs perturbations. Then, a proper set of safety measures must be defined according to the model’s robustness, where less robust models will require stronger mitigation. Finally, the functionality of the entire system must be extensively tested
according to Safety Of The Intended Functionality (SOTIF) guidelines, providing quantitative results about the occurrence of unsafe scenarios, and by evaluating appropriate Safety Performance Indicators.@conference{BacciuIJCNN2021,
title = {Federated Reservoir Computing Neural Networks},
author = {Davide Bacciu and Daniele Di Sarli and Pouria Faraji and Claudio Gallicchio and Alessio Micheli},
doi = {10.1109/IJCNN52387.2021.9534035},
year = {2021},
date = {2021-07-18},
urldate = {2021-07-18},
booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021)},
publisher = {IEEE},
abstract = {A critical aspect in Federated Learning is the aggregation strategy for the combination of multiple models, trained on the edge, into a single model that incorporates all the knowledge in the federation. Common Federated Learning approaches for Recurrent Neural Networks (RNNs) do not provide guarantees on the predictive performance of the aggregated model. In this paper we show how the use of Echo State Networks (ESNs), which are efficient state-of-the-art RNN models for time-series processing, enables a form of federation that is optimal in the sense that it produces models mathematically equivalent to the corresponding centralized model. Furthermore, the proposed method is compliant with privacy constraints. The proposed method, which we denote as Incremental Federated Learning, is experimentally evaluated against an averaging strategy on two datasets for human state and activity recognition.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
@conference{11568_193770,
title = {A General Purpose Distributed Learning Model for Robotic Ecologies},
author = {Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Lenzi Alessandro and Micheli Alessio and Pelagatti Susanna},
url = {http://www.ifac-papersonline.net/Detailed/55807.html},
doi = {10.3182/20120905-3-HR-2030.00178},
year = {2012},
date = {2012-01-01},
booktitle = {Robot Control - 10th IFAC Symposium on Robot Control},
journal = {IFAC PROCEEDINGS VOLUMES},
volume = {10},
pages = {435--440},
publisher = {ELSEVIER SCIENCE BV},
abstract = {The design of a learning system for robotic ecologies need to account for some key aspects of the ecology model such as distributivity, heterogeneity of the computational, sensory and actuator capabilities, as well as self-configurability. The paper proposes general guiding principles for learning systems' design that ensue from key ecology properties, and presents a distributed learning system for the Rubicon ecology that draws inspiration from such guidelines. The proposed learning system provides the Rubicon ecology with a set of general-purpose learning services which can be used to learn generic computational tasks that involve predicting information of interest based on dynamic sensorial input streams.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}