@conference{cosmo2024, title = {Constraint-Free Structure Learning with Smooth Acyclic Orientations}, author = {Martina Cinquini Francesco Landolfi Riccardo Massidda}, url = {https://openreview.net/forum?id=KWO8LSUC5W}, year = {2024}, date = {2024-05-06}, urldate = {2024-01-01}, booktitle = {The Twelfth International Conference on Learning Representations}, abstract = {The structure learning problem consists of fitting data generated by a Directed Acyclic Graph (DAG) to correctly reconstruct its arcs. In this context, differentiable approaches constrain or regularize an optimization problem with a continuous relaxation of the acyclicity property. The computational cost of evaluating graph acyclicity is cubic on the number of nodes and significantly affects scalability. In this paper, we introduce COSMO, a constraint-free continuous optimization scheme for acyclic structure learning. At the core of our method lies a novel differentiable approximation of an orientation matrix parameterized by a single priority vector. Differently from previous works, our parameterization fits a smooth orientation matrix and the resulting acyclic adjacency matrix without evaluating acyclicity at any step. Despite this absence, we prove that COSMO always converges to an acyclic solution. In addition to being asymptotically faster, our empirical analysis highlights how COSMO performance on graph reconstruction compares favorably with competing structure learning methods. }, keywords = {causal learning, graph data, structure learning}, pubstate = {published}, tppubtype = {conference} } @conference{nokey, title = {Deep Reinforcement Learning for Network Slice Placement and the DeepNetSlice Toolkit}, author = {Alex Pasquali and Vincenzo Lomonaco and Davide Bacciu and Federica Paganelli}, year = {2024}, date = {2024-05-05}, urldate = {2024-05-05}, booktitle = {Proceedings of the IEEE International Conference on Machine Learning for Communication and Networking 2024 (IEEE ICMLCN 2024)}, publisher = {IEEE}, keywords = {deep graph networks, deep learning for graphs, pervasive artificial intelligence, pervasive computing, reinforcement learning}, pubstate = {forthcoming}, tppubtype = {conference} } @workshop{Ninniri2024, title = {Classifier-free graph diffusion for molecular property targeting}, author = {Matteo Ninniri and Marco Podda and Davide Bacciu}, url = {https://arxiv.org/abs/2312.17397, Arxiv}, year = {2024}, date = {2024-02-27}, booktitle = {4th workshop on Graphs and more Complex structures for Learning and Reasoning (GCLR) at AAAI 2024}, abstract = {This work focuses on the task of property targeting: that is, generating molecules conditioned on target chemical properties to expedite candidate screening for novel drug and materials development. DiGress is a recent diffusion model for molecular graphs whose distinctive feature is allowing property targeting through classifier-based (CB) guidance. While CB guidance may work to generate molecular-like graphs, we hint at the fact that its assumptions apply poorly to the chemical domain. Based on this insight we propose a classifier-free DiGress (FreeGress), which works by directly injecting the conditioning information into the training process. CF guidance is convenient given its less stringent assumptions and since it does not require to train an auxiliary property regressor, thus halving the number of trainable parameters in the model. We empirically show that our model yields up to 79% improvement in Mean Absolute Error with respect to DiGress on property targeting tasks on QM9 and ZINC-250k benchmarks. As an additional contribution, we propose a simple yet powerful approach to improve chemical validity of generated samples, based on the observation that certain chemical properties such as molecular weight correlate with the number of atoms in molecules. }, keywords = {deep graph networks, deep learning for graphs, diffusion process, generative model, molecule generation, structured data processing}, pubstate = {published}, tppubtype = {workshop} } @article{lepri2023neural, title = {Neural Autoencoder-Based Structure-Preserving Model Order Reduction and Control Design for High-Dimensional Physical Systems}, author = {Marco Lepri and Davide Bacciu and Cosimo Della Santina}, year = {2023}, date = {2023-12-21}, urldate = {2023-01-01}, journal = {IEEE Control Systems Letters}, publisher = {IEEE}, keywords = {deep learning, dynamical systems, robotics}, pubstate = {published}, tppubtype = {article} } @workshop{Gravina2023b, title = {Effective Non-Dissipative Propagation for Continuous-Time Dynamic Graphs}, author = {Alessio Gravina and Giulio Lovisotto and Claudio Gallicchio and Davide Bacciu and Claas Grohnfeldt}, url = {https://openreview.net/forum?id=zAHFC2LNEe, PDF}, year = {2023}, date = {2023-12-11}, urldate = {2023-12-11}, booktitle = {Temporal Graph Learning Workshop, NeurIPS 2023}, abstract = {Recent research on Deep Graph Networks (DGNs) has broadened the domain of learning on graphs to real-world systems of interconnected entities that evolve over time. This paper addresses prediction problems on graphs defined by a stream of events, possibly irregularly sampled over time, generally referred to as Continuous-Time Dynamic Graphs (C-TDGs). While many predictive problems on graphs may require capturing interactions between nodes at different distances, existing DGNs for C-TDGs are not designed to propagate and preserve long-range information - resulting in suboptimal performance. In this work, we present Continuous-Time Graph Anti-Symmetric Network (CTAN), a DGN for C-TDGs designed within the ordinary differential equations framework that enables efficient propagation of long-range dependencies. We show that our method robustly performs stable and non-dissipative information propagation over dynamically evolving graphs, where the number of ODE discretization steps allows scaling the propagation range. We empirically validate the proposed approach on several real and synthetic graph benchmarks, showing that CTAN leads to improved performance while enabling the propagation of long-range information}, keywords = {deep graph networks, deep learning for graphs, dynamic graphs, dynamical systems}, pubstate = {published}, tppubtype = {workshop} } @proceedings{Georgiev2023, title = {Neural Algorithmic Reasoning for Combinatorial Optimisation}, author = {Dobrik Georgiev and Danilo Numeroso and Davide Bacciu and Pietro Lio }, year = {2023}, date = {2023-11-27}, urldate = {2023-11-27}, booktitle = {Proceedings of the Learning on Graphs Conference (LOG 2023)}, publisher = {PMRL}, abstract = { Solving NP-hard/complete combinatorial problems with neural networks is a challenging research area that aims to surpass classical approximate algorithms. The long-term objective is to outperform hand-designed heuristics for NP-hard/complete problems by learning to generate superior solutions solely from training data. Current neural-based methods for solving CO problems often overlook the inherent "algorithmic" nature of the problems. In contrast, heuristics designed for CO problems, e.g. TSP, frequently leverage well-established algorithms, such as those for finding the minimum spanning tree. In this paper, we propose leveraging recent advancements in neural algorithmic reasoning to improve the learning of CO problems. Specifically, we suggest pre-training our neural model on relevant algorithms before training it on CO instances. Our results demonstrate that, using this learning setup, we achieve superior performance compared to non-algorithmically informed deep learning models.}, keywords = {algorithmic reasoning, deep graph networks, deep learning for graphs}, pubstate = {published}, tppubtype = {proceedings} } @article{errica2023pydgn, title = {PyDGN: a Python Library for Flexible and Reproducible Research on Deep Learning for Graphs}, author = {Federico Errica and Davide Bacciu and Alessio Micheli}, year = {2023}, date = {2023-10-31}, urldate = {2023-01-01}, journal = {Journal of Open Source Software}, volume = {8}, number = {90}, pages = {5713}, keywords = {deep graph networks, deep learning for graphs, graph data, software}, pubstate = {published}, tppubtype = {article} } @conference{Errica2023, title = {Hidden Markov Models for Temporal Graph Representation Learning}, author = {Federico Errica and Alessio Gravina and Davide Bacciu and Alessio Micheli}, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {deep graph networks, generative model, hidden Markov models, learning with structured data}, pubstate = {published}, tppubtype = {conference} } @conference{Landolfi2023, title = { A Tropical View of Graph Neural Networks }, author = {Francesco Landolfi and Davide Bacciu and Danilo Numeroso }, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {deep learning for graphs, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{Ceni2023, title = { Improving Fairness via Intrinsic Plasticity in Echo State Networks }, author = {Andrea Ceni and Davide Bacciu and Valerio De Caro and Claudio Gallicchio and Luca Oneto }, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {intrinsic plasticity, recurrent neural network, reservoir computing, trustworthy AI}, pubstate = {published}, tppubtype = {conference} } @conference{Cossu2023, title = { A Protocol for Continual Explanation of SHAP }, author = {Andrea Cossu and Francesco Spinnato and Riccardo Guidotti and Davide Bacciu}, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {explainable AI, recurrent neural network, reservoir computing, trustworthy AI}, pubstate = {published}, tppubtype = {conference} } @conference{Caro2023, title = { Communication-Efficient Ridge Regression in Federated Echo State Networks }, author = {Valerio De Caro and Antonio Di Mauro and Davide Bacciu and Claudio Gallicchio }, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {Echo state networks, federated learning, pervasive artificial intelligence, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @conference{Bacciu2023c, title = {Graph Representation Learning }, author = {Davide Bacciu and Federico Errica and Alessio Micheli and Nicolò Navarin and Luca Pasa and Marco Podda and Daniele Zambon }, editor = {Michel Verleysen}, year = {2023}, date = {2023-10-04}, urldate = {2023-10-04}, booktitle = {Proceedings of the 31th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning }, keywords = {deep graph networks, deep learning for graphs, graph data}, pubstate = {published}, tppubtype = {conference} } @workshop{Ceni2023c, title = {Randomly Coupled Oscillators}, author = {Andrea Ceni and Andrea Cossu and Jingyue Liu and Maximilian Stölzle and Cosimo Della Santina and Claudio Gallicchio and Davide Bacciu}, year = {2023}, date = {2023-09-18}, booktitle = {Proceedings of the ECML/PKDD Workshop on Deep Learning meets Neuromorphic Hardware}, keywords = {dynamical systems, neuromorphic computing, recurrent neural network, reservoir computing}, pubstate = {published}, tppubtype = {workshop} } @workshop{Gravina2023c, title = {Non-Dissipative Propagation by Randomized Anti-Symmetric Deep Graph Networks}, author = {Alessio Gravina and Claudio Gallicchio and Davide Bacciu}, year = {2023}, date = {2023-09-18}, urldate = {2023-09-18}, booktitle = {Proceedings of the ECML/PKDD Workshop on Deep Learning meets Neuromorphic Hardware}, keywords = {deep graph networks, deep learning for graphs, dynamical systems, graph data}, pubstate = {published}, tppubtype = {workshop} } @conference{Cosenza2023, title = {Graph-based Polyphonic Multitrack Music Generation}, author = {Emanuele Cosenza and Andrea Valenti and Davide Bacciu }, year = {2023}, date = {2023-08-19}, urldate = {2023-08-19}, booktitle = {Proceedings of the 32nd INTERNATIONAL JOINT CONFERENCE ON ARTIFICIAL INTELLIGENCE (IJCAI 2023)}, keywords = {artificial creativity, deep graph networks, deep learning for graphs, music generation}, pubstate = {published}, tppubtype = {conference} } @conference{Hemati2023, title = {Partial Hypernetworks for Continual Learning}, author = {Hamed Hemati and Vincenzo Lomonaco and Davide Bacciu and Damian Borth}, year = {2023}, date = {2023-08-01}, urldate = {2023-08-01}, booktitle = {Proceedings of the International Conference on Lifelong Learning Agents (CoLLAs 2023)}, publisher = {Proceedings of Machine Learning Research}, keywords = {Continual learning}, pubstate = {published}, tppubtype = {conference} } @conference{Hemati2023b, title = {Class-Incremental Learning with Repetition }, author = {Hamed Hemati and Andrea Cossu and Antonio Carta and Julio Hurtado and Lorenzo Pellegrini and Davide Bacciu and Vincenzo Lomonaco and Damian Borth}, year = {2023}, date = {2023-08-01}, urldate = {2023-08-01}, booktitle = {Proceedings of the International Conference on Lifelong Learning Agents (CoLLAs 2023)}, publisher = {Proceedings of Machine Learning Research}, keywords = {Continual learning}, pubstate = {published}, tppubtype = {conference} } @workshop{nokey, title = {Decentralized Plasticity in Reservoir Dynamical Networks for Pervasive Environments}, author = {Valerio De Caro and Davide Bacciu and Claudio Gallicchio }, url = {https://openreview.net/forum?id=5hScPOeDaR, PDF}, year = {2023}, date = {2023-07-29}, urldate = {2023-07-29}, booktitle = {Proceedings of the 2023 ICML Workshop on Localized Learning: Decentralized Model Updates via Non-Global Objectives }, keywords = {Echo state networks, federated learning, intrinsic plasticity, recurrent neural network, unsupervised learning}, pubstate = {published}, tppubtype = {workshop} } @workshop{Ceni2023b, title = {Randomly Coupled Oscillators for Time Series Processing}, author = {Andrea Ceni and Andrea Cossu and Jingyue Liu and Maximilian Stölzle and Cosimo Della Santina and Claudio Gallicchio and Davide Bacciu}, url = {https://openreview.net/forum?id=fmn7PMykEb, PDF}, year = {2023}, date = {2023-07-28}, urldate = {2023-07-28}, booktitle = {Proceedings of the 2023 ICML Workshop on New Frontiers in Learning, Control, and Dynamical Systems }, keywords = {dynamical systems, recurrent neural network, reservoir computing}, pubstate = {published}, tppubtype = {workshop} } @workshop{Massidda2023b, title = {Differentiable Causal Discovery with Smooth Acyclic Orientations}, author = {Riccardo Massidda and Francesco Landolfi and Martina Cinquini and Davide Bacciu}, url = {https://openreview.net/forum?id=IVwWgscehR, PDF}, year = {2023}, date = {2023-07-28}, urldate = {2023-07-28}, booktitle = {Proceedings of the 2023 ICML Workshop on Differentiable Almost Everything: Differentiable Relaxations, Algorithms, Operators, and Simulators }, keywords = {causal learning, structure learning}, pubstate = {published}, tppubtype = {workshop} } @conference{nokey, title = {ECGAN: generative adversarial network for electrocardiography}, author = {Lorenzo Simone and Davide Bacciu }, year = {2023}, date = {2023-06-12}, urldate = {2023-06-12}, booktitle = {Proceedings of Artificial Intelligence In Medicine 2023 (AIME 2023)}, keywords = {bioinformatics, biomedical data, generative model, multivariate time-series}, pubstate = {published}, tppubtype = {conference} } @conference{Lomonaco2023, title = {AI-Toolkit: a Microservices Architecture for Low-Code Decentralized Machine Intelligence}, author = {Vincenzo Lomonaco and Valerio De Caro and Claudio Gallicchio and Antonio Carta and Christos Sardianos and Iraklis Varlamis and Konstantinos Tserpes and Massimo Coppola and Mina Marpena and Sevasti Politi and Erwin Schoitsch and Davide Bacciu}, year = {2023}, date = {2023-06-04}, urldate = {2023-06-04}, booktitle = {Proceedings of 2023 IEEE International Conference on Acoustics, Speech and Signal Processing}, abstract = {Artificial Intelligence and Machine Learning toolkits such as Scikit-learn, PyTorch and Tensorflow provide today a solid starting point for the rapid prototyping of R&D solutions. However, they can be hardly ported to heterogeneous decentralised hardware and real-world production environments. A common practice involves outsourcing deployment solutions to scalable cloud infrastructures such as Amazon SageMaker or Microsoft Azure. In this paper, we proposed an open-source microservices-based architecture for decentralised machine intelligence which aims at bringing R&D and deployment functionalities closer following a low-code approach. Such an approach would guarantee flexible integration of cutting-edge functionalities while preserving complete control over the deployed solutions at negligible costs and maintenance efforts.}, keywords = {AI-as-a-service, Continual learning, distributed learning, pervasive artificial intelligence, software}, pubstate = {published}, tppubtype = {conference} } @conference{DeCaro2023, title = {Prediction of Driver's Stress Affection in Simulated Autonomous Driving Scenarios}, author = {Valerio De Caro and Herbert Danzinger and Claudio Gallicchio and Clemens Könczöl and Vincenzo Lomonaco and Mina Marmpena and Mina Marpena and Sevasti Politi and Omar Veledar and Davide Bacciu}, year = {2023}, date = {2023-06-04}, urldate = {2023-06-04}, booktitle = {Proceedings of 2023 IEEE International Conference on Acoustics, Speech and Signal Processing}, abstract = {We investigate the task of predicting stress affection from physiological data of users experiencing simulations of autonomous driving. We approach this task on two levels of granularity, depending on whether the prediction is performed at end of the simulation, or along the simulation. In the former, denoted as coarse-grained prediction, we employed Decision Trees. In the latter, denoted as fine-grained prediction, we employed Echo State Networks, a Recurrent Neural Network that allows efficient learning from temporal data and hence is suitable for pervasive environments. We conduct experiments on a private dataset of physiological data from people participating in multiple driving scenarios simulating different stressful events. The results show that the proposed model is capable of detecting conditions of event-related cognitive stress proving, the existence of a correlation between stressful events and the physiological data.}, keywords = {activity recognition, biomedical data, Echo state networks, humanistic intelligence, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @conference{Gravina2023, title = {Anti-Symmetric DGN: a stable architecture for Deep Graph Networks}, author = {Alessio Gravina and Davide Bacciu and Claudio Gallicchio}, url = {https://openreview.net/pdf?id=J3Y7cgZOOS}, year = {2023}, date = {2023-05-01}, urldate = {2023-05-01}, booktitle = {Proceedings of the Eleventh International Conference on Learning Representations (ICLR 2023) }, abstract = {Deep Graph Networks (DGNs) currently dominate the research landscape of learning from graphs, due to their efficiency and ability to implement an adaptive message-passing scheme between the nodes. However, DGNs are typically limited in their ability to propagate and preserve long-term dependencies between nodes, i.e., they suffer from the over-squashing phenomena. As a result, we can expect them to under-perform, since different problems require to capture interactions at different (and possibly large) radii in order to be effectively solved. In this work, we present Anti-Symmetric Deep Graph Networks (A-DGNs), a framework for stable and non-dissipative DGN design, conceived through the lens of ordinary differential equations. We give theoretical proof that our method is stable and non-dissipative, leading to two key results: long-range information between nodes is preserved, and no gradient vanishing or explosion occurs in training. We empirically validate the proposed approach on several graph benchmarks, showing that A-DGN yields to improved performance and enables to learn effectively even when dozens of layers are used.ers are used.}, keywords = {deep graph networks, deep learning for graphs, dynamical systems}, pubstate = {published}, tppubtype = {conference} } @conference{Numeroso2023, title = {Dual Algorithmic Reasoning}, author = {Danilo Numeroso and Davide Bacciu and Petar Veličković}, url = {https://openreview.net/pdf?id=hhvkdRdWt1F}, year = {2023}, date = {2023-05-01}, urldate = {2023-05-01}, booktitle = {Proceedings of the Eleventh International Conference on Learning Representations (ICLR 2023)}, abstract = {Neural Algorithmic Reasoning is an emerging area of machine learning which seeks to infuse algorithmic computation in neural networks, typically by training neural models to approximate steps of classical algorithms. In this context, much of the current work has focused on learning reachability and shortest path graph algorithms, showing that joint learning on similar algorithms is beneficial for generalisation. However, when targeting more complex problems, such "similar" algorithms become more difficult to find. Here, we propose to learn algorithms by exploiting duality of the underlying algorithmic problem. Many algorithms solve optimisation problems. We demonstrate that simultaneously learning the dual definition of these optimisation problems in algorithmic learning allows for better learning and qualitatively better solutions. Specifically, we exploit the max-flow min-cut theorem to simultaneously learn these two algorithms over synthetically generated graphs, demonstrating the effectiveness of the proposed approach. We then validate the real-world utility of our dual algorithmic reasoner by deploying it on a challenging brain vessel classification task, which likely depends on the vessels’ flow properties. We demonstrate a clear performance gain when using our model within such a context, and empirically show that learning the max-flow and min-cut algorithms together is critical for achieving such a result.}, note = {Notable Spotlight paper}, keywords = {algorithmic reasoning, deep graph networks, deep learning for graphs, featured}, pubstate = {published}, tppubtype = {conference} } @conference{Massidda2023, title = {Causal Abstraction with Soft Interventions}, author = {Riccardo Massidda and Atticus Geiger and Thomas Icard and Davide Bacciu}, year = {2023}, date = {2023-04-17}, urldate = {2023-04-17}, booktitle = {Proceedings of the 2nd Conference on Causal Learning and Reasoning (CLeaR 2023)}, publisher = {PMLR}, keywords = {Bayesian networks, causal learning, explainable AI}, pubstate = {published}, tppubtype = {conference} } @workshop{nokey, title = {Non-Dissipative Propagation by Anti-Symmetric Deep Graph Networks}, author = {Alessio Gravina and Davide Bacciu and Claudio Gallicchio}, url = {https://drive.google.com/file/d/1uPHhjwSa3g_hRvHwx6UnbMLgGN_cAqMu/view. PDF}, year = {2023}, date = {2023-02-13}, urldate = {2023-02-13}, booktitle = {Proceedigns of the Ninth International Workshop on Deep Learning on Graphs: Method and Applications (DLG-AAAI’23)}, abstract = {Deep Graph Networks (DGNs) currently dominate the research landscape of learning from graphs, due to the efficiency of their adaptive message-passing scheme between nodes. However, DGNs are typically limited in their ability to propagate and preserve long-term dependencies between nodes, i.e., they suffer from the over-squashing phenomena. This reduces their effectiveness, since predictive problems may require to capture interactions at different, and possibly large, radii in order to be effectively solved. In this work, we present Anti-Symmetric DGN (A-DGN), a framework forstable and non-dissipative DGN design, conceived through the lens of ordinary differential equations. We give theoretical proof that our method is stable and non-dissipative, leading to two key results: long-range information between nodes is preserved, and no gradient vanishing or explosion occurs in training. We empirically validate the proposed approach on several graph benchmarks, showing that A-DGN yields to improved performance and enables to learn effectively even when dozens of layers are used.}, note = {Winner of the Best Student Paper Award at DLG-AAAI23}, keywords = {deep graph networks, deep learning for graphs, dynamical systems}, pubstate = {published}, tppubtype = {workshop} } @conference{Bacciu2023, title = {Generalizing Downsampling from Regular Data to Graphs}, author = {Davide Bacciu and Alessio Conte and Francesco Landolfi}, url = {https://arxiv.org/abs/2208.03523, Arxiv}, year = {2023}, date = {2023-02-07}, urldate = {2023-02-07}, booktitle = {Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence}, abstract = {Downsampling produces coarsened, multi-resolution representations of data and it is used, for example, to produce lossy compression and visualization of large images, reduce computational costs, and boost deep neural representation learning. Unfortunately, due to their lack of a regular structure, there is still no consensus on how downsampling should apply to graphs and linked data. Indeed reductions in graph data are still needed for the goals described above, but reduction mechanisms do not have the same focus on preserving topological structures and properties, while allowing for resolution-tuning, as is the case in regular data downsampling. In this paper, we take a step in this direction, introducing a unifying interpretation of downsampling in regular and graph data. In particular, we define a graph coarsening mechanism which is a graph-structured counterpart of controllable equispaced coarsening mechanisms in regular data. We prove theoretical guarantees for distortion bounds on path lengths, as well as the ability to preserve key topological properties in the coarsened graphs. We leverage these concepts to define a graph pooling mechanism that we empirically assess in graph classification tasks, providing a greedy algorithm that allows efficient parallel implementation on GPUs, and showing that it compares favorably against pooling methods in literature. }, keywords = {deep learning for graphs, graph data, graph pooling}, pubstate = {published}, tppubtype = {conference} } @article{Bacciu2023b, title = {Deep Graph Networks for Drug Repurposing with Multi-Protein Targets}, author = {Davide Bacciu and Federico Errica and Alessio Gravina and Lorenzo Madeddu and Marco Podda and Giovanni Stilo}, doi = {10.1109/TETC.2023.3238963}, year = {2023}, date = {2023-02-01}, urldate = {2023-02-01}, journal = {IEEE Transactions on Emerging Topics in Computing, 2023}, keywords = {bioinformatics, biomedical data, deep graph networks, deep learning for graphs}, pubstate = {published}, tppubtype = {article} } @article{Lanciano2023extending, title = {Extending OpenStack Monasca for Predictive Elasticity Control}, author = {Giacomo Lanciano and Filippo Galli and Tommaso Cucinotta and Davide Bacciu and Andrea Passarella}, doi = {10.26599/BDMA.2023.9020014}, year = {2023}, date = {2023-01-01}, urldate = {2023-01-01}, journal = {Big Data Mining and Analytics}, keywords = {}, pubstate = {published}, tppubtype = {article} } @article{DECARO2023126638, title = {Continual adaptation of federated reservoirs in pervasive environments}, author = {Valerio De Caro and Claudio Gallicchio and Davide Bacciu}, url = {https://www.sciencedirect.com/science/article/pii/S0925231223007610}, doi = {https://doi.org/10.1016/j.neucom.2023.126638}, issn = {0925-2312}, year = {2023}, date = {2023-01-01}, urldate = {2023-01-01}, journal = {Neurocomputing}, pages = {126638}, abstract = {When performing learning tasks in pervasive environments, the main challenge arises from the need of combining federated and continual settings. The former comes from the massive distribution of devices with privacy-regulated data. The latter is required by the low resources of the participating devices, which may retain data for short periods of time. In this paper, we propose a setup for learning with Echo State Networks (ESNs) in pervasive environments. Our proposal focuses on the use of Intrinsic Plasticity (IP), a gradient-based method for adapting the reservoir’s non-linearity. First, we extend the objective function of IP to include the uncertainty arising from the distribution of the data over space and time. Then, we propose Federated Intrinsic Plasticity (FedIP), which is intended for client–server federated topologies with stationary data, and adapts the learning scheme provided by Federated Averaging (FedAvg) to include the learning rule of IP. Finally, we further extend this algorithm for learning to Federated Continual Intrinsic Plasticity (FedCLIP) to equip clients with CL strategies for dealing with continuous data streams. We evaluate our approach on an incremental setup built upon real-world datasets from human monitoring, where we tune the complexity of the scenario in terms of the distribution of the data over space and time. Results show that both our algorithms improve the representation capabilities and the performance of the ESN, while being robust to catastrophic forgetting.}, keywords = {Continual learning, Echo state networks, federated learning, reservoir computing}, pubstate = {published}, tppubtype = {article} } @article{10239346, title = {A 2-phase Strategy For Intelligent Cloud Operations}, author = {Giacomo Lanciano and Remo Andreoli and Tommaso Cucinotta and Davide Bacciu and Andrea Passarella}, doi = {10.1109/ACCESS.2023.3312218}, year = {2023}, date = {2023-01-01}, urldate = {2023-01-01}, journal = {IEEE Access}, pages = {1-1}, keywords = {}, pubstate = {published}, tppubtype = {article} } @conference{Caro2022, title = {Federated Adaptation of Reservoirs via Intrinsic Plasticity}, author = {Valerio {De Caro} and Claudio Gallicchio and Davide Bacciu}, editor = {Michel Verleysen}, url = {https://arxiv.org/abs/2206.11087, Arxiv}, year = {2022}, date = {2022-10-05}, urldate = {2022-10-05}, booktitle = {Proceedings of the 30th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2022)}, abstract = {We propose a novel algorithm for performing federated learning with Echo State Networks (ESNs) in a client-server scenario. In particular, our proposal focuses on the adaptation of reservoirs by combining Intrinsic Plasticity with Federated Averaging. The former is a gradient-based method for adapting the reservoir's non-linearity in a local and unsupervised manner, while the latter provides the framework for learning in the federated scenario. We evaluate our approach on real-world datasets from human monitoring, in comparison with the previous approach for federated ESNs existing in literature. Results show that adapting the reservoir with our algorithm provides a significant improvement on the performance of the global model. }, keywords = {Echo state networks, federated learning, intrinsic plasticity, recurrent neural network, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{nokey, title = {Deep Learning for Graphs}, author = {Davide Bacciu and Federico Errica and Nicolò Navarin and Luca Pasa and Daniele Zambon}, editor = {Michel Verleysen}, year = {2022}, date = {2022-10-05}, urldate = {2022-10-05}, booktitle = {Proceedings of the 30th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2022)}, keywords = {deep graph networks, deep learning for graphs, learning with structured data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{Valenti2022c, title = {Modular Representations for Weak Disentanglement}, author = {Andrea Valenti and Davide Bacciu}, editor = {Michel Verleysen}, url = {https://arxiv.org/pdf/2209.05336.pdf}, year = {2022}, date = {2022-10-05}, urldate = {2022-10-05}, booktitle = {Proceedings of the 30th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2022)}, abstract = {The recently introduced weakly disentangled representations proposed to relax some constraints of the previous definitions of disentanglement, in exchange for more flexibility. However, at the moment, weak disentanglement can only be achieved by increasing the amount of supervision as the number of factors of variations of the data increase. In this paper, we introduce modular representations for weak disentanglement, a novel method that allows to keep the amount of supervised information constant with respect the number of generative factors. The experiments shows that models using modular representations can increase their performance with respect to previous work without the need of additional supervision.}, keywords = {autoencoder, disentanglement, learning-symbolic integration, representation learning}, pubstate = {published}, tppubtype = {conference} } @conference{Matteoni2022, title = {Continual Learning for Human State Monitoring}, author = {Federico Matteoni and Andrea Cossu and Claudio Gallicchio and Vincenzo Lomonaco and Davide Bacciu}, editor = {Michel Verleysen}, url = {https://arxiv.org/pdf/2207.00010, Arxiv}, year = {2022}, date = {2022-10-05}, urldate = {2022-10-05}, booktitle = {Proceedings of the 30th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2022)}, abstract = {Continual Learning (CL) on time series data represents a promising but under-studied avenue for real-world applications. We propose two new CL benchmarks for Human State Monitoring. We carefully designed the benchmarks to mirror real-world environments in which new subjects are continuously added. We conducted an empirical evaluation to assess the ability of popular CL strategies to mitigate forgetting in our benchmarks. Our results show that, possibly due to the domain-incremental properties of our benchmarks, forgetting can be easily tackled even with a simple finetuning and that existing strategies struggle in accumulating knowledge over a fixed, held-out, test subject.}, keywords = {Continual learning, humanistic intelligence, multivariate time-series, recurrent neural network}, pubstate = {published}, tppubtype = {conference} } @conference{Massidda2022, title = {Knowledge-Driven Interpretation of Convolutional Neural Networks}, author = {Riccardo Massidda and Davide Bacciu}, year = {2022}, date = {2022-09-20}, urldate = {2022-09-20}, booktitle = {Proceedings of the 2022 European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML-PKDD 2022)}, abstract = {Since the widespread adoption of deep learning solutions in critical environments, the interpretation of artificial neural networks has become a significant issue. To this end, numerous approaches currently try to align human-level concepts with the activation patterns of artificial neurons. Nonetheless, they often understate two related aspects: the distributed nature of neural representations and the semantic relations between concepts. We explicitly tackled this interrelatedness by defining a novel semantic alignment framework to align distributed activation patterns and structured knowledge. In particular, we detailed a solution to assign to both neurons and their linear combinations one or more concepts from the WordNet semantic network. Acknowledging semantic links also enabled the clustering of neurons into semantically rich and meaningful neural circuits. Our empirical analysis of popular convolutional networks for image classification found evidence of the emergence of such neural circuits. Finally, we discovered neurons in neural circuits to be pivotal for the network to perform effectively on semantically related tasks. We also contribute by releasing the code that implements our alignment framework.}, keywords = {explainable AI, graph data, neural network interpretation, representation learning}, pubstate = {published}, tppubtype = {conference} } @conference{nokey, title = {Deep Features for CBIR with Scarce Data using Hebbian Learning}, author = {Gabriele Lagani and Davide Bacciu and Claudio Gallicchio and Fabrizio Falchi and Claudio Gennaro and Giuseppe Amato}, url = {https://arxiv.org/abs/2205.08935, Arxiv}, year = {2022}, date = {2022-09-14}, urldate = {2022-09-14}, booktitle = {Proc. of the 19th International Conference on Content-based Multimedia Indexing (CBMI2022)}, abstract = { Features extracted from Deep Neural Networks (DNNs) have proven to be very effective in the context of Content Based Image Retrieval (CBIR). In recent work, biologically inspired textit{Hebbian} learning algorithms have shown promises for DNN training. In this contribution, we study the performance of such algorithms in the development of feature extractors for CBIR tasks. Specifically, we consider a semi-supervised learning strategy in two steps: first, an unsupervised pre-training stage is performed using Hebbian learning on the image dataset; second, the network is fine-tuned using supervised Stochastic Gradient Descent (SGD) training. For the unsupervised pre-training stage, we explore the nonlinear Hebbian Principal Component Analysis (HPCA) learning rule. For the supervised fine-tuning stage, we assume sample efficiency scenarios, in which the amount of labeled samples is just a small fraction of the whole dataset. Our experimental analysis, conducted on the CIFAR10 and CIFAR100 datasets shows that, when few labeled samples are available, our Hebbian approach provides relevant improvements compared to various alternative methods. }, keywords = {}, pubstate = {published}, tppubtype = {conference} } @workshop{nokey, title = {Studying the impact of magnitude pruning on contrastive learning methods}, author = {Francesco Corti and Rahim Entezari and Sara Hooker and Davide Bacciu and Olga Saukh}, year = {2022}, date = {2022-07-23}, urldate = {2022-07-23}, booktitle = {ICML 2022 workshop on Hardware Aware Efficient Training (HAET 2022)}, abstract = {We study the impact of different pruning techniques on the representation learned by deep neural networks trained with contrastive loss functions. Our work finds that at high sparsity levels, contrastive learning results in a higher number of misclassified examples relative to models trained with traditional cross-entropy loss. To understand this pronounced difference, we use metrics such as the number of PIEs, qscore and pdepth to measure the impact of pruning on the learned representation quality. Our analysis suggests the schedule of the pruning method implementation matters. We find that the negative impact of sparsity on the quality of the learned representation is the highest when pruning is introduced early-on in training phase.}, keywords = {efficient learning models, embedded learning, pervasive artificial intelligence, pervasive computing}, pubstate = {published}, tppubtype = {workshop} } @conference{Sangermano2022, title = {Sample Condensation in Online Continual Learning}, author = {Matteo Sangermano and Antonio Carta and Andrea Cossu and Vincenzo Lomonaco and Davide Bacciu }, url = {https://arxiv.org/abs/2206.11849, Arxiv}, year = {2022}, date = {2022-07-18}, urldate = {2022-07-18}, booktitle = {Proceedings of the 2022 IEEE World Congress on Computational Intelligence}, publisher = {IEEE}, abstract = {Online Continual learning is a challenging learning scenario where the model observes a non-stationary stream of data and learns online. The main challenge is to incrementally learn while avoiding catastrophic forgetting, namely the problem of forgetting previously acquired knowledge while learning from new data. A popular solution in these scenario is to use a small memory to retain old data and rehearse them over time. Unfortunately, due to the limited memory size, the quality of the memory will deteriorate over time. In this paper we propose OLCGM, a novel replay-based continual learning strategy that uses knowledge condensation techniques to continuously compress the memory and achieve a better use of its limited size. The sample condensation step compresses old samples, instead of removing them like other replay strategies. As a result, the experiments show that, whenever the memory budget is limited compared to the complexity of the data, OLCGM improves the final accuracy compared to state-of-the-art replay strategies.}, keywords = {Continual learning, generative model}, pubstate = {published}, tppubtype = {conference} } @conference{Valenti2022, title = { Leveraging Relational Information for Learning Weakly Disentangled Representations }, author = {Andrea Valenti and Davide Bacciu }, url = {https://arxiv.org/abs/2205.10056, Arxiv}, year = {2022}, date = {2022-07-18}, urldate = {2022-07-18}, booktitle = {Proceedings of the 2022 IEEE World Congress on Computational Intelligence}, publisher = {IEEE}, abstract = {Disentanglement is a difficult property to enforce in neural representations. This might be due, in part, to a formalization of the disentanglement problem that focuses too heavily on separating relevant factors of variation of the data in single isolated dimensions of the neural representation. We argue that such a definition might be too restrictive and not necessarily beneficial in terms of downstream tasks. In this work, we present an alternative view over learning (weakly) disentangled representations, which leverages concepts from relational learning. We identify the regions of the latent space that correspond to specific instances of generative factors, and we learn the relationships among these regions in order to perform controlled changes to the latent codes. We also introduce a compound generative model that implements such a weak disentanglement approach. Our experiments shows that the learned representations can separate the relevant factors of variation in the data, while preserving the information needed for effectively generating high quality data samples.}, keywords = {adversarial learning, autoencoder, generative model, learning-symbolic integration}, pubstate = {published}, tppubtype = {conference} } @conference{nokey, title = {The Infinite Contextual Graph Markov Model}, author = {Daniele Castellana and Federico Errica and Davide Bacciu and Alessio Micheli }, year = {2022}, date = {2022-07-18}, urldate = {2022-07-18}, booktitle = {Proceedings of the 39th International Conference on Machine Learning (ICML 2022)}, keywords = {bayesian learning, deep learning for graphs, generative model, graph data}, pubstate = {published}, tppubtype = {conference} } @workshop{Semola2022, title = {Continual-Learning-as-a-Service (CLaaS): On-Demand Efficient Adaptation of Predictive Models}, author = {Rudy Semola and Vincenzo Lomonaco and Davide Bacciu}, url = {https://arxiv.org/pdf/2206.06957.pdf}, year = {2022}, date = {2022-07-18}, urldate = {2022-07-18}, booktitle = {Proc. of the 1st International Workshop on Pervasive Artificial Intelligence, 2022 IEEE World Congress on Computational Intelligence}, abstract = {Predictive machine learning models nowadays are often updated in a stateless and expensive way. The two main future trends for companies that want to build machine learning-based applications and systems are real-time inference and continual updating. Unfortunately, both trends require a mature infrastructure that is hard and costly to realize on-premise. This paper defines a novel software service and model delivery infrastructure termed Continual Learning-as-a-Service (CLaaS) to address these issues. Specifically, it embraces continual machine learning and continuous integration techniques. It provides support for model updating and validation tools for data scientists without an on-premise solution and in an efficient, stateful and easy-to-use manner. Finally, this CL model service is easy to encapsulate in any machine learning infrastructure or cloud system. This paper presents the design and implementation of a CLaaS instantiation, called LiquidBrain, evaluated in two real-world scenarios. The former is a robotic object recognition setting using the CORe50 dataset while the latter is a named category and attribute prediction using the DeepFashion-C dataset in the fashion domain. Our preliminary results suggest the usability and efficiency of the Continual Learning model services and the effectiveness of the solution in addressing real-world use-cases regardless of where the computation happens in the continuum Edge-Cloud.}, howpublished = {CEUR-WS Proceedings}, keywords = {AI-as-a-service, Continual learning}, pubstate = {published}, tppubtype = {workshop} } @article{DUKIC2022, title = {Inductive-Transductive Learning for Very Sparse Fashion Graphs}, author = {Haris Dukic and Shahab Mokarizadeh and Georgios Deligiorgis and Pierpaolo Sepe and Davide Bacciu and Marco Trincavelli}, doi = {https://doi.org/10.1016/j.neucom.2022.06.050}, issn = {0925-2312}, year = {2022}, date = {2022-06-27}, urldate = {2022-06-27}, journal = {Neurocomputing}, abstract = {The assortments of global retailers are composed of hundreds of thousands of products linked by several types of relationships such as style compatibility, ”bought together”, ”watched together”, etc. Graphs are a natural representation for assortments, where products are nodes and relations are edges. Style compatibility relations are produced manually and do not cover the whole graph uniformly. We propose to use inductive learning to enhance a graph encoding style compatibility of a fashion assortment, leveraging rich node information comprising textual descriptions and visual data. Then, we show how the proposed graph enhancement substantially improves the performance on transductive tasks with a minor impact on graph sparsity. Although demonstrated in a challenging and novel industrial application case, the approach we propose is general enough to be applied to any node-level or edge-level prediction task in very sparse, large-scale networks.}, keywords = {deep graph networks, deep learning for graphs, fashion data, learning with structured data, recommendation systems}, pubstate = {published}, tppubtype = {article} } @article{nokey, title = {Graph Neural Network for Context-Aware Recommendation}, author = {Asma Sattar and Davide Bacciu}, doi = {10.1007/s11063-022-10917-3}, year = {2022}, date = {2022-06-22}, urldate = {2022-06-22}, journal = {Neural Processing Letters}, keywords = {deep learning for graphs, graph data, recommendation systems}, pubstate = {published}, tppubtype = {article} } @conference{carta2021ex, title = {Ex-Model: Continual Learning from a Stream of Trained Models}, author = {Antonio Carta and Andrea Cossu and Vincenzo Lomonaco and Davide Bacciu}, url = {https://arxiv.org/pdf/2112.06511.pdf, Arxiv}, year = {2022}, date = {2022-06-20}, urldate = {2022-06-20}, booktitle = {Proceedings of the CVPR 2022 Workshop on Continual Learning }, journal = {arXiv preprint arXiv:2112.06511}, pages = {3790-3799}, organization = {IEEE}, abstract = {Learning continually from non-stationary data streams is a challenging research topic of growing popularity in the last few years. Being able to learn, adapt, and generalize continually in an efficient, effective, and scalable way is fundamental for a sustainable development of Artificial Intelligent systems. However, an agent-centric view of continual learning requires learning directly from raw data, which limits the interaction between independent agents, the efficiency, and the privacy of current approaches. Instead, we argue that continual learning systems should exploit the availability of compressed information in the form of trained models. In this paper, we introduce and formalize a new paradigm named "Ex-Model Continual Learning" (ExML), where an agent learns from a sequence of previously trained models instead of raw data. We further contribute with three ex-model continual learning algorithms and an empirical setting comprising three datasets (MNIST, CIFAR-10 and CORe50), and eight scenarios, where the proposed algorithms are extensively tested. Finally, we highlight the peculiarities of the ex-model paradigm and we point out interesting future research directions. }, keywords = {Continual learning, deep learning, trustworthy AI}, pubstate = {published}, tppubtype = {conference} } @conference{Serramazza2022, title = {Learning image captioning as a structured transduction task}, author = {Davide Italo Serramazza and Davide Bacciu}, doi = {doi.org/10.1007/978-3-031-08223-8_20}, year = {2022}, date = {2022-06-20}, urldate = {2022-06-20}, booktitle = {Proceedings of the 23rd International Conference on Engineering Applications of Neural Networks (EANN 2022)}, volume = {1600}, pages = {235–246}, publisher = {Springer}, series = {Communications in Computer and Information Science }, abstract = {Image captioning is a task typically approached by deep encoder-decoder architectures, where the encoder component works on a flat representation of the image while the decoder considers a sequential representation of natural language sentences. As such, these encoder-decoder architectures implement a simple and very specific form of structured transduction, that is a generalization of a predictive problem where the input data and output predictions might have substantially different structures and topologies. In this paper, we explore a generalization of such an approach by addressing the problem as a general structured transduction problem. In particular, we provide a framework that allows considering input and output information with a tree-structured representation. This allows taking into account the hierarchical nature underlying both images and sentences. To this end, we introduce an approach to generate tree-structured representations from images along with an autoencoder working with this kind of data. We empirically assess our approach on both synthetic and realistic tasks.}, keywords = {generative model, image captioning, structured data processing, tree structured data, tree transductions}, pubstate = {published}, tppubtype = {conference} } @conference{Lucchesi2022, title = {Avalanche RL: a Continual Reinforcement Learning Library}, author = {Nicolò Lucchesi and Antonio Carta and Vincenzo Lomonaco and Davide Bacciu}, url = {https://arxiv.org/abs/2202.13657, Arxiv}, year = {2022}, date = {2022-05-23}, urldate = {2022-05-23}, booktitle = {Proceedings of the 21st International Conference on Image Analysis and Processing (ICIAP 2021)}, abstract = {Continual Reinforcement Learning (CRL) is a challenging setting where an agent learns to interact with an environment that is constantly changing over time (the stream of experiences). In this paper, we describe Avalanche RL, a library for Continual Reinforcement Learning which allows to easily train agents on a continuous stream of tasks. Avalanche RL is based on PyTorch and supports any OpenAI Gym environment. Its design is based on Avalanche, one of the more popular continual learning libraries, which allow us to reuse a large number of continual learning strategies and improve the interaction between reinforcement learning and continual learning researchers. Additionally, we propose Continual Habitat-Lab, a novel benchmark and a high-level library which enables the usage of the photorealistic simulator Habitat-Sim for CRL research. Overall, Avalanche RL attempts to unify under a common framework continual reinforcement learning applications, which we hope will foster the growth of the field. }, keywords = {Continual learning, reinforcement learning}, pubstate = {published}, tppubtype = {conference} } @article{DBLP:journals/corr/abs-2105-06998, title = {A causal learning framework for the analysis and interpretation of COVID-19 clinical data}, author = {Elisa Ferrari and Luna Gargani and Greta Barbieri and Lorenzo Ghiadoni and Francesco Faita and Davide Bacciu}, url = {https://arxiv.org/abs/2105.06998, Arxiv}, doi = {doi.org/10.1371/journal.pone.0268327}, year = {2022}, date = {2022-05-19}, urldate = {2022-05-19}, journal = {Plos One}, volume = {17}, number = {5}, abstract = {We present a workflow for clinical data analysis that relies on Bayesian Structure Learning (BSL), an unsupervised learning approach, robust to noise and biases, that allows to incorporate prior medical knowledge into the learning process and that provides explainable results in the form of a graph showing the causal connections among the analyzed features. The workflow consists in a multi-step approach that goes from identifying the main causes of patient's outcome through BSL, to the realization of a tool suitable for clinical practice, based on a Binary Decision Tree (BDT), to recognize patients at high-risk with information available already at hospital admission time. We evaluate our approach on a feature-rich COVID-19 dataset, showing that the proposed framework provides a schematic overview of the multi-factorial processes that jointly contribute to the outcome. We discuss how these computational findings are confirmed by current understanding of the COVID-19 pathogenesis. Further, our approach yields to a highly interpretable tool correctly predicting the outcome of 85% of subjects based exclusively on 3 features: age, a previous history of chronic obstructive pulmonary disease and the PaO2/FiO2 ratio at the time of arrival to the hospital. The inclusion of additional information from 4 routine blood tests (Creatinine, Glucose, pO2 and Sodium) increases predictive accuracy to 94.5%. }, keywords = {Bayesian networks, bioinformatics, biomedical data, explainable AI, unsupervised learning}, pubstate = {published}, tppubtype = {article} } @article{pandelea2022, title = {Modeling Mood Polarity and Declaration Occurrence by Neural Temporal Point Processes}, author = {Davide Bacciu and Davide Morelli and Vlad Pandelea}, doi = {10.1109/TNNLS.2022.3172871}, year = {2022}, date = {2022-05-13}, urldate = {2022-05-13}, journal = {IEEE Transactions on Neural Networks and Learning Systems}, pages = {1-8}, keywords = {biomedical data, multivariate time-series, neural point process, recurrent neural network}, pubstate = {published}, tppubtype = {article} } @workshop{Numeroso2022, title = {Learning heuristics for A*}, author = { Danilo Numeroso and Davide Bacciu and Petar Veličković}, year = {2022}, date = {2022-04-29}, urldate = {2022-04-29}, booktitle = {ICRL 2022 Workshop on Anchoring Machine Learning in Classical Algorithmic Theory (GroundedML 2022)}, abstract = {Path finding in graphs is one of the most studied classes of problems in computer science. In this context, search algorithms are often extended with heuristics for a more efficient search of target nodes. In this work we combine recent advancements in Neural Algorithmic Reasoning to learn efficient heuristic functions for path finding problems on graphs. At training time, we exploit multi-task learning to learn jointly the Dijkstra's algorithm and a {it consistent} heuristic function for the A* search algorithm. At inference time, we plug our learnt heuristics into the A* algorithm. Results show that running A* over the learnt heuristics value can greatly speed up target node searching compared to Dijkstra, while still finding minimal-cost paths. }, keywords = {algorithmic reasoning, deep learning for graphs, learning-symbolic integration}, pubstate = {published}, tppubtype = {workshop} } @article{Bacciu2022, title = {Explaining Deep Graph Networks via Input Perturbation}, author = {Davide Bacciu and Danilo Numeroso }, doi = {10.1109/TNNLS.2022.3165618}, year = {2022}, date = {2022-04-21}, urldate = {2022-04-21}, journal = {IEEE Transactions on Neural Networks and Learning Systems}, abstract = {Deep Graph Networks are a family of machine learning models for structured data which are finding heavy application in life-sciences (drug repurposing, molecular property predictions) and on social network data (recommendation systems). The privacy and safety-critical nature of such domains motivates the need for developing effective explainability methods for this family of models. So far, progress in this field has been challenged by the combinatorial nature and complexity of graph structures. In this respect, we present a novel local explanation framework specifically tailored to graph data and deep graph networks. Our approach leverages reinforcement learning to generate meaningful local perturbations of the input graph, whose prediction we seek an interpretation for. These perturbed data points are obtained by optimising a multi-objective score taking into account similarities both at a structural level as well as at the level of the deep model outputs. By this means, we are able to populate a set of informative neighbouring samples for the query graph, which is then used to fit an interpretable model for the predictive behaviour of the deep network locally to the query graph prediction. We show the effectiveness of the proposed explainer by a qualitative analysis on two chemistry datasets, TOS and ESOL and by quantitative results on a benchmark dataset for explanations, CYCLIQ. }, keywords = {adversarial examples, deep learning for graphs, explainable AI, generative model, structured data processing, trustworthy AI}, pubstate = {published}, tppubtype = {article} } @article{Collodi2022, title = {Learning with few examples the semantic description of novel human-inspired grasp strategies from RGB data}, author = { Lorenzo Collodi and Davide Bacciu and Matteo Bianchi and Giuseppe Averta}, url = {https://www.researchgate.net/profile/Giuseppe-Averta/publication/358006552_Learning_With_Few_Examples_the_Semantic_Description_of_Novel_Human-Inspired_Grasp_Strategies_From_RGB_Data/links/61eae01e8d338833e3857251/Learning-With-Few-Examples-the-Semantic-Description-of-Novel-Human-Inspired-Grasp-Strategies-From-RGB-Data.pdf, Open Version}, doi = {https://doi.org/10.1109/LRA.2022.3144520}, year = {2022}, date = {2022-04-04}, urldate = {2022-04-04}, journal = { IEEE Robotics and Automation Letters}, pages = { 2573 - 2580}, publisher = {IEEE}, abstract = {Data-driven approaches and human inspiration are fundamental to endow robotic manipulators with advanced autonomous grasping capabilities. However, to capitalize upon these two pillars, several aspects need to be considered, which include the number of human examples used for training; the need for having in advance all the required information for classification (hardly feasible in unstructured environments); the trade-off between the task performance and the processing cost. In this paper, we propose a RGB-based pipeline that can identify the object to be grasped and guide the actual execution of the grasping primitive selected through a combination of Convolutional and Gated Graph Neural Networks. We consider a set of human-inspired grasp strategies, which are afforded by the geometrical properties of the objects and identified from a human grasping taxonomy, and propose to learn new grasping skills with only a few examples. We test our framework with a manipulator endowed with an under-actuated soft robotic hand. Even though we use only 2D information to minimize the footprint of the network, we achieve 90% of successful identifications of the most appropriate human-inspired grasping strategy over ten different classes, of which three were few-shot learned, outperforming an ideal model trained with all the classes, in sample-scarce conditions.}, keywords = {deep learning for graphs, graph data, learning-symbolic integration, robotics}, pubstate = {published}, tppubtype = {article} } @article{Gravina2022, title = {Controlling astrocyte-mediated synaptic pruning signals for schizophrenia drug repurposing with Deep Graph Networks}, author = {Alessio Gravina and Jennifer L. Wilson and Davide Bacciu and Kevin J. Grimes and Corrado Priami}, url = {https://www.biorxiv.org/content/10.1101/2021.10.07.463459v1, BioArxiv}, doi = {doi.org/10.1371/journal.pcbi.1009531}, year = {2022}, date = {2022-04-01}, urldate = {2022-04-01}, journal = {Plos Computational Biology}, volume = {18}, number = {5}, abstract = {Schizophrenia is a debilitating psychiatric disorder, leading to both physical and social morbidity. Worldwide 1% of the population is struggling with the disease, with 100,000 new cases annually only in the United States. Despite its importance, the goal of finding effective treatments for schizophrenia remains a challenging task, and previous work conducted expensive large-scale phenotypic screens. This work investigates the benefits of Machine Learning for graphs to optimize drug phenotypic screens and predict compounds that mitigate abnormal brain reduction induced by excessive glial phagocytic activity in schizophrenia subjects. Given a compound and its concentration as input, we propose a method that predicts a score associated with three possible compound effects, ie reduce, increase, or not influence phagocytosis. We leverage a high-throughput screening to prove experimentally that our method achieves good generalization capabilities. The screening involves 2218 compounds at five different concentrations. Then, we analyze the usability of our approach in a practical setting, ie prioritizing the selection of compounds in the SWEETLEAD library. We provide a list of 64 compounds from the library that have the most potential clinical utility for glial phagocytosis mitigation. Lastly, we propose a novel approach to computationally validate their utility as possible therapies for schizophrenia.}, keywords = {bioinformatics, biomedical data, deep learning for graphs, structured data processing}, pubstate = {published}, tppubtype = {article} } @conference{decaro2022aiasaservice, title = {AI-as-a-Service Toolkit for Human-Centered Intelligence in Autonomous Driving}, author = {Valerio De Caro and Saira Bano and Achilles Machumilane and Alberto Gotta and Pietro Cassará and Antonio Carta and Christos Sardianos and Christos Chronis and Iraklis Varlamis and Konstantinos Tserpes and Vincenzo Lomonaco and Claudio Gallicchio and Davide Bacciu}, url = {https://arxiv.org/pdf/2202.01645.pdf, arxiv}, year = {2022}, date = {2022-03-21}, urldate = {2022-03-21}, booktitle = {Proceedings of the 20th International Conference on Pervasive Computing and Communications (PerCom 2022)}, keywords = {activity recognition, AI-as-a-service, deep learning, humanistic intelligence, machine vision, Sequential data}, pubstate = {published}, tppubtype = {conference} } @book{BacciuBook2022, title = {Deep Learning in Biology and Medicine}, author = {Davide Bacciu and Paulo J. G. Lisboa and Alfredo Vellido}, doi = {doi.org/10.1142/q0322 }, isbn = {978-1-80061-093-4}, year = {2022}, date = {2022-02-01}, urldate = {2022-02-01}, publisher = {World Scientific Publisher}, abstract = {Biology, medicine and biochemistry have become data-centric fields for which Deep Learning methods are delivering groundbreaking results. Addressing high impact challenges, Deep Learning in Biology and Medicine provides an accessible and organic collection of Deep Learning essays on bioinformatics and medicine. It caters for a wide readership, ranging from machine learning practitioners and data scientists seeking methodological knowledge to address biomedical applications, to life science specialists in search of a gentle reference for advanced data analytics. With contributions from internationally renowned experts, the book covers foundational methodologies in a wide spectrum of life sciences applications, including electronic health record processing, diagnostic imaging, text processing, as well as omics-data processing. This survey of consolidated problems is complemented by a selection of advanced applications, including cheminformatics and biomedical interaction network analysis. A modern and mindful approach to the use of data-driven methodologies in the life sciences also requires careful consideration of the associated societal, ethical, legal and transparency challenges, which are covered in the concluding chapters of this book.}, keywords = {artificial intelligence, bioinformatics, biomedical data, deep learning, featured}, pubstate = {published}, tppubtype = {book} } @article{Castellana2021, title = {A Tensor Framework for Learning in Structured Domains}, author = {Daniele Castellana and Davide Bacciu}, editor = {Kerstin Bunte and Niccolo Navarin and Luca Oneto}, doi = {10.1016/j.neucom.2021.05.110}, year = {2022}, date = {2022-01-22}, urldate = {2022-01-22}, journal = {Neurocomputing}, volume = {470}, pages = {405-426}, abstract = {Learning machines for structured data (e.g., trees) are intrinsically based on their capacity to learn representations by aggregating information from the multi-way relationships emerging from the structure topology. While complex aggregation functions are desirable in this context to increase the expressiveness of the learned representations, the modelling of higher-order interactions among structure constituents is unfeasible, in practice, due to the exponential number of parameters required. Therefore, the common approach is to define models which rely only on first-order interactions among structure constituents. In this work, we leverage tensors theory to define a framework for learning in structured domains. Such a framework is built on the observation that more expressive models require a tensor parameterisation. This observation is the stepping stone for the application of tensor decompositions in the context of recursive models. From this point of view, the advantage of using tensor decompositions is twofold since it allows limiting the number of model parameters while injecting inductive biases that do not ignore higher-order interactions. We apply the proposed framework on probabilistic and neural models for structured data, defining different models which leverage tensor decompositions. The experimental validation clearly shows the advantage of these models compared to first-order and full-tensorial models.}, keywords = {deep learning, structured data processing, tensor factorization, tensor neural networks, tree structured data}, pubstate = {published}, tppubtype = {article} } @article{Carta2022, title = {Catastrophic Forgetting in Deep Graph Networks: a Graph Classification benchmark}, author = {Antonio Carta and Andrea Cossu and Federico Errica and Davide Bacciu}, doi = {10.3389/frai.2022.824655}, year = {2022}, date = {2022-01-11}, urldate = {2022-01-11}, journal = {Frontiers in Artificial Intelligence }, abstract = { In this work, we study the phenomenon of catastrophic forgetting in the graph representation learning scenario. The primary objective of the analysis is to understand whether classical continual learning techniques for flat and sequential data have a tangible impact on performances when applied to graph data. To do so, we experiment with a structure-agnostic model and a deep graph network in a robust and controlled environment on three different datasets. The benchmark is complemented by an investigation on the effect of structure-preserving regularization techniques on catastrophic forgetting. We find that replay is the most effective strategy in so far, which also benefits the most from the use of regularization. Our findings suggest interesting future research at the intersection of the continual and graph representation learning fields. Finally, we provide researchers with a flexible software framework to reproduce our results and carry out further experiments. }, keywords = {Continual learning, deep learning for graphs, graph data, structured data processing}, pubstate = {published}, tppubtype = {article} } @article{10.3389/frai.2022.829842, title = {Is Class-Incremental Enough for Continual Learning?}, author = {Andrea Cossu and Gabriele Graffieti and Lorenzo Pellegrini and Davide Maltoni and Davide Bacciu and Antonio Carta and Vincenzo Lomonaco}, url = {https://www.frontiersin.org/article/10.3389/frai.2022.829842}, doi = {10.3389/frai.2022.829842}, issn = {2624-8212}, year = {2022}, date = {2022-01-01}, urldate = {2022-01-01}, journal = {Frontiers in Artificial Intelligence}, volume = {5}, abstract = {The ability of a model to learn continually can be empirically assessed in different continual learning scenarios. Each scenario defines the constraints and the opportunities of the learning environment. Here, we challenge the current trend in the continual learning literature to experiment mainly on class-incremental scenarios, where classes present in one experience are never revisited. We posit that an excessive focus on this setting may be limiting for future research on continual learning, since class-incremental scenarios artificially exacerbate catastrophic forgetting, at the expense of other important objectives like forward transfer and computational efficiency. In many real-world environments, in fact, repetition of previously encountered concepts occurs naturally and contributes to softening the disruption of previous knowledge. We advocate for a more in-depth study of alternative continual learning scenarios, in which repetition is integrated by design in the stream of incoming information. Starting from already existing proposals, we describe the advantages such class-incremental with repetition scenarios could offer for a more comprehensive assessment of continual learning models.}, keywords = {}, pubstate = {published}, tppubtype = {article} } @article{atzeni2022, title = {A Systematic Review of Wi-Fi and Machine Learning Integration with Topic Modeling Techniques}, author = {Daniele Atzeni and Davide Bacciu and Daniele Mazzei and Giuseppe Prencipe}, url = {https://www.mdpi.com/1424-8220/22/13/4925}, doi = {10.3390/s22134925}, issn = {1424-8220}, year = {2022}, date = {2022-01-01}, urldate = {2022-01-01}, journal = {Sensors}, volume = {22}, number = {13}, abstract = {Wireless networks have drastically influenced our lifestyle, changing our workplaces and society. Among the variety of wireless technology, Wi-Fi surely plays a leading role, especially in local area networks. The spread of mobiles and tablets, and more recently, the advent of Internet of Things, have resulted in a multitude of Wi-Fi-enabled devices continuously sending data to the Internet and between each other. At the same time, Machine Learning has proven to be one of the most effective and versatile tools for the analysis of fast streaming data. This systematic review aims at studying the interaction between these technologies and how it has developed throughout their lifetimes. We used Scopus, Web of Science, and IEEE Xplore databases to retrieve paper abstracts and leveraged a topic modeling technique, namely, BERTopic, to analyze the resulting document corpus. After these steps, we inspected the obtained clusters and computed statistics to characterize and interpret the topics they refer to. Our results include both the applications of Wi-Fi sensing and the variety of Machine Learning algorithms used to tackle them. We also report how the Wi-Fi advances have affected sensing applications and the choice of the most suitable Machine Learning models.}, keywords = {indoor user movement forecasting, pervasive computing, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @inproceedings{qskit2022, title = {Deep Reinforcement Learning Quantum Control on IBMQ Platforms and Qiskit Pulse}, author = {Rudy Semola and Lorenzo Moro and Davide Bacciu and Enrico Prati}, doi = {10.1109/QCE53715.2022.00108}, year = {2022}, date = {2022-01-01}, urldate = {2022-01-01}, booktitle = {2022 IEEE International Conference on Quantum Computing and Engineering (QCE)}, pages = {759-762}, keywords = {quantum computing, reinforcement learning}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Lanciano2021, title = { Predictive Auto-scaling with OpenStack Monasca}, author = {Giacomo Lanciano and Filippo Galli and Tommaso Cucinotta and Davide Bacciu and Andrea Passarella}, url = {https://arxiv.org/abs/2111.02133, Arxiv}, doi = {10.1145/3468737.3494104}, year = {2021}, date = {2021-12-06}, urldate = {2021-12-06}, booktitle = {Proceedings of the 14th IEEE/ACM International Conference on Utility and Cloud Computing (UCC 2021)}, pages = {1-10}, abstract = {Cloud auto-scaling mechanisms are typically based on reactive automation rules that scale a cluster whenever some metric, e.g., the average CPU usage among instances, exceeds a predefined threshold. Tuning these rules becomes particularly cumbersome when scaling-up a cluster involves non-negligible times to bootstrap new instances, as it happens frequently in production cloud services. To deal with this problem, we propose an architecture for auto-scaling cloud services based on the status in which the system is expected to evolve in the near future. Our approach leverages on time-series forecasting techniques, like those based on machine learning and artificial neural networks, to predict the future dynamics of key metrics, e.g., resource consumption metrics, and apply a threshold-based scaling policy on them. The result is a predictive automation policy that is able, for instance, to automatically anticipate peaks in the load of a cloud application and trigger ahead of time appropriate scaling actions to accommodate the expected increase in traffic. We prototyped our approach as an open-source OpenStack component, which relies on, and extends, the monitoring capabilities offered by Monasca, resulting in the addition of predictive metrics that can be leveraged by orchestration components like Heat or Senlin. We show experimental results using a recurrent neural network and a multi-layer perceptron as predictor, which are compared with a simple linear regression and a traditional non-predictive auto-scaling policy. However, the proposed framework allows for the easy customization of the prediction policy as needed. }, keywords = {cloud computing, pervasive computing, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {conference} } @article{Cossu2021b, title = {Continual Learning for Recurrent Neural Networks: an Empirical Evaluation}, author = {Andrea Cossu and Antonio Carta and Vincenzo Lomonaco and Davide Bacciu}, url = {https://arxiv.org/abs/2103.07492, Arxiv}, year = {2021}, date = {2021-12-03}, urldate = {2021-12-03}, journal = {Neural Networks}, volume = {143}, pages = {607-627}, abstract = { Learning continuously during all model lifetime is fundamental to deploy machine learning solutions robust to drifts in the data distribution. Advances in Continual Learning (CL) with recurrent neural networks could pave the way to a large number of applications where incoming data is non stationary, like natural language processing and robotics. However, the existing body of work on the topic is still fragmented, with approaches which are application-specific and whose assessment is based on heterogeneous learning protocols and datasets. In this paper, we organize the literature on CL for sequential data processing by providing a categorization of the contributions and a review of the benchmarks. We propose two new benchmarks for CL with sequential data based on existing datasets, whose characteristics resemble real-world applications. We also provide a broad empirical evaluation of CL and Recurrent Neural Networks in class-incremental scenario, by testing their ability to mitigate forgetting with a number of different strategies which are not specific to sequential data processing. Our results highlight the key role played by the sequence length and the importance of a clear specification of the CL scenario. }, keywords = {Continual learning, deep learning, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {article} } @conference{BacciuCAIP2021, title = {Towards Functional Safety Compliance of Recurrent Neural Networks}, author = {Davide Bacciu and Antonio Carta and Daniele Di Sarli and Claudio Gallicchio and Vincenzo Lomonaco and Salvatore Petroni}, url = {https://aiforpeople.org/conference/assets/papers/CAIP21-P09.pdf, Open Access PDF}, year = {2021}, date = {2021-11-20}, booktitle = {Proceedings of the International Conference on AI for People (CAIP 2021)}, abstract = {Deploying Autonomous Driving systems requires facing some novel challenges for the Automotive industry. One of the most critical aspects that can severely compromise their deployment is Functional Safety. The ISO 26262 standard provides guidelines to ensure Functional Safety of road vehicles. However, this standard is not suitable to develop Artificial Intelligence based systems such as systems based on Recurrent Neural Networks (RNNs). To address this issue, in this paper we propose a new methodology, composed of three steps. The first step is the robustness evaluation of the RNN against inputs perturbations. Then, a proper set of safety measures must be defined according to the model’s robustness, where less robust models will require stronger mitigation. Finally, the functionality of the entire system must be extensively tested according to Safety Of The Intended Functionality (SOTIF) guidelines, providing quantitative results about the occurrence of unsafe scenarios, and by evaluating appropriate Safety Performance Indicators.}, keywords = {distributed learning, humanistic intelligence, internet of things, recurrent neural network, trustworthy AI}, pubstate = {published}, tppubtype = {conference} } @article{Carta2021b, title = {Encoding-based Memory for Recurrent Neural Networks}, author = {Antonio Carta and Alessandro Sperduti and Davide Bacciu}, url = {https://arxiv.org/abs/2001.11771, Arxiv}, doi = {10.1016/j.neucom.2021.04.051}, year = {2021}, date = {2021-10-07}, urldate = {2021-10-07}, journal = {Neurocomputing}, volume = {456}, pages = {407-420}, publisher = {Elsevier}, abstract = {Learning to solve sequential tasks with recurrent models requires the ability to memorize long sequences and to extract task-relevant features from them. In this paper, we study the memorization subtask from the point of view of the design and training of recurrent neural networks. We propose a new model, the Linear Memory Network, which features an encoding-based memorization component built with a linear autoencoder for sequences. We extend the memorization component with a modular memory that encodes the hidden state sequence at different sampling frequencies. Additionally, we provide a specialized training algorithm that initializes the memory to efficiently encode the hidden activations of the network. The experimental results on synthetic and real-world datasets show that specializing the training algorithm to train the memorization component always improves the final performance whenever the memorization of long sequences is necessary to solve the problem. }, keywords = {autoencoder, deep learning, memory networks, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {article} } @article{Averta2021, title = {Learning to Prevent Grasp Failure with Soft Hands: From Online Prediction to Dual-Arm Grasp Recovery}, author = {Giuseppe Averta and Federica Barontini and Irene Valdambrini and Paolo Cheli and Davide Bacciu and Matteo Bianchi}, doi = {10.1002/aisy.202100146}, year = {2021}, date = {2021-10-07}, urldate = {2021-10-07}, journal = {Advanced Intelligent Systems}, abstract = {Soft hands allow to simplify the grasp planning to achieve a successful grasp, thanks to their intrinsic adaptability. At the same time, their usage poses new challenges, related to the adoption of classical sensing techniques originally developed for rigid end defectors, which provide fundamental information, such as to detect object slippage. Under this regard, model-based approaches for the processing of the gathered information are hard to use, due to the difficulties in modeling hand–object interaction when softness is involved. To overcome these limitations, in this article, we proposed to combine distributed tactile sensing and machine learning (recurrent neural network) to detect sliding conditions for a soft robotic hand mounted on a robotic manipulator, targeting the prediction of the grasp failure event and the direction of sliding. The outcomes of these predictions allow for an online triggering of a compensatory action performed with a second robotic arm–hand system, to prevent the failure. Despite the fact that the network is trained only with spherical and cylindrical objects, we demonstrate high generalization capabilities of our framework, achieving a correct prediction of the failure direction in 75% of cases, and a 85% of successful regrasps, for a selection of 12 objects of common use.}, keywords = {deep learning, machine vision, recurrent neural network, robotics, Sequential data}, pubstate = {published}, tppubtype = {article} } @conference{Cossu2021, title = { Continual Learning with Echo State Networks }, author = {Andrea Cossu and Davide Bacciu and Antonio Carta and Claudio Gallicchio and Vincenzo Lomonaco}, editor = {Michel Verleysen}, url = {https://arxiv.org/abs/2105.07674, Arxiv}, doi = {10.14428/esann/2021.ES2021-80}, year = {2021}, date = {2021-10-06}, urldate = {2021-10-06}, booktitle = {Proceedings of the 29th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2021) }, volume = {275-280}, abstract = { Continual Learning (CL) refers to a learning setup where data is non stationary and the model has to learn without forgetting existing knowledge. The study of CL for sequential patterns revolves around trained recurrent networks. In this work, instead, we introduce CL in the context of Echo State Networks (ESNs), where the recurrent component is kept fixed. We provide the first evaluation of catastrophic forgetting in ESNs and we highlight the benefits in using CL strategies which are not applicable to trained recurrent models. Our results confirm the ESN as a promising model for CL and open to its use in streaming scenarios. }, keywords = {Continual learning, Echo state networks, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {conference} } @conference{Bacciu2021c, title = { Deep learning for graphs}, author = {Davide Bacciu and Filippo Maria Bianchi and Benjamin Paassen and Cesare Alippi}, editor = {Michel Verleysen}, doi = {10.14428/esann/2021.ES2021-5}, year = {2021}, date = {2021-10-06}, urldate = {2021-10-06}, booktitle = {Proceedings of the 29th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2021)}, pages = {89-98}, abstract = { Deep learning for graphs encompasses all those models endowed with multiple layers of abstraction, which operate on data represented as graphs. The most common building blocks of these models are graph encoding layers, which compute a vector embedding for each node in a graph based on a sum of messages received from its neighbors. However, the family also includes architectures with decoders from vectors to graphs and models that process time-varying graphs and hypergraphs. In this paper, we provide an overview of the key concepts in the field, point towards open questions, and frame the contributions of the ESANN 2021 special session into the broader context of deep learning for graphs. }, keywords = {deep learning, deep learning for graphs, graph data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{Dukic2021, title = {Inductive learning for product assortment graph completion}, author = {Haris Dukic and Georgios Deligiorgis and Pierpaolo Sepe and Davide Bacciu and Marco Trincavelli}, editor = {Michel Verleysen}, doi = {10.14428/esann/2021.ES2021-73}, year = {2021}, date = {2021-10-06}, urldate = {2021-10-06}, booktitle = {Proceedings of the 29th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2021)}, pages = {129-134}, abstract = { Global retailers have assortments that contain hundreds of thousands of products that can be linked by several types of relationships like style compatibility, "bought together", "watched together", etc. Graphs are a natural representation for assortments, where products are nodes and relations are edges. Relations like style compatibility are often produced by a manual process and therefore do not cover uniformly the whole graph. We propose to use inductive learning to enhance a graph encoding style compatibility of a fashion assortment, leveraging rich node information comprising textual descriptions and visual data. Then, we show how the proposed graph enhancement improves substantially the performance on transductive tasks with a minor impact on graph sparsity. }, keywords = {deep learning for graphs, graph data, recommendation systems}, pubstate = {published}, tppubtype = {conference} } @conference{Valenti2021b, title = {Calliope - A Polyphonic Music Transformer}, author = {Andrea Valenti and Stefano Berti and Davide Bacciu}, editor = {Michel Verleysen}, url = { The polyphonic nature of music makes the application of deep learning to music modelling a challenging task. On the other hand, the Transformer architecture seems to be a good fit for this kind of data. In this work, we present Calliope, a novel autoencoder model based on Transformers for the efficient modelling of multi-track sequences of polyphonic music. The experiments show that our model is able to improve the state of the art on musical sequence reconstruction and generation, with remarkably good results especially on long sequences. }, doi = {10.14428/esann/2021.ES2021-63}, year = {2021}, date = {2021-10-06}, urldate = {2021-10-06}, booktitle = {Proceedings of the 29th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN 2021)}, pages = {405-410}, keywords = {artificial creativity, autoencoder, deep learning, generative model, music generation, transformer}, pubstate = {published}, tppubtype = {conference} } @periodical{Bacciu2021e, title = {Supporting Privacy Preservation by Distributed and Federated Learning on the Edge}, author = { Davide Bacciu and Patrizio Dazzi and Alberto Gotta}, editor = {Erwin Schoitsch and Georgios Mylonas}, url = {https://ercim-news.ercim.eu/en127/r-i/supporting-privacy-preservation-by-distributed-and-federated-learning-on-the-edge}, year = {2021}, date = {2021-09-30}, urldate = {2021-09-30}, issuetitle = {ERCIM News}, volume = {127}, keywords = {artificial intelligence, Continual learning, edge AI, federated learning, humanistic intelligence, reservoir computing, trustworthy AI}, pubstate = {published}, tppubtype = {periodical} } @article{Bacciu2021b, title = {K-Plex Cover Pooling for Graph Neural Networks}, author = {Davide Bacciu and Alessio Conte and Roberto Grossi and Francesco Landolfi and Andrea Marino}, editor = {Annalisa Appice and Sergio Escalera and José A. Gámez and Heike Trautmann}, url = {https://link.springer.com/article/10.1007/s10618-021-00779-z, Published version}, doi = {10.1007/s10618-021-00779-z}, year = {2021}, date = {2021-09-13}, urldate = {2021-09-13}, journal = {Data Mining and Knowledge Discovery}, abstract = {raph pooling methods provide mechanisms for structure reduction that are intended to ease the diffusion of context between nodes further in the graph, and that typically leverage community discovery mechanisms or node and edge pruning heuristics. In this paper, we introduce a novel pooling technique which borrows from classical results in graph theory that is non-parametric and generalizes well to graphs of different nature and connectivity patterns. Our pooling method, named KPlexPool, builds on the concepts of graph covers and k-plexes, i.e. pseudo-cliques where each node can miss up to k links. The experimental evaluation on benchmarks on molecular and social graph classification shows that KPlexPool achieves state of the art performances against both parametric and non-parametric pooling methods in the literature, despite generating pooled graphs based solely on topological information.}, note = {Accepted also as paper to the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases (ECML-PKDD 2021)}, keywords = {deep learning, deep learning for graphs, graph data, graph pooling, structured data processing}, pubstate = {published}, tppubtype = {article} } @workshop{Macher2021, title = {Dependable Integration Concepts for Human-Centric AI-based Systems}, author = {G. Macher and S. Akarmazyan and E. Armengaud and D. Bacciu and C. Calandra and H. Danzinger and P. Dazzi and C. Davalas and M.C. De Gennaro and A. Dimitriou and J. Dobaj and M. Dzambic and L. Giraudi and S. Girbal and D. Michail and R. Peroglio and R. Potenza and F. Pourdanesh and M. Seidl and C. Sardianos and K. Tserpes and J. Valtl and I. Varlamis and O. Veledar }, year = {2021}, date = {2021-09-07}, urldate = {2021-09-07}, booktitle = {Proceedings of the 40th International Conference on Computer Safety, Reliability and Security (SafeComp 2021)}, pages = {11-23}, publisher = {Springer}, note = {Invited discussion paper}, keywords = {dependable AI, humanistic intelligence, trustworthy AI}, pubstate = {published}, tppubtype = {workshop} } @workshop{Macher2021b, title = {Dependable Integration Concepts for Human-Centric AI-based Systems}, author = {Georg Macher and Eric Armengaud and Davide Bacciu and Jürgen Dobaj and Maid Dzambic and Matthias Seidl and Omar Veledar}, year = {2021}, date = {2021-09-07}, booktitle = {Proceedings of the 16th International Workshop on Dependable Smart Embedded Cyber-Physical Systems and Systems-of-Systems (DECSoS 2021)}, abstract = {The rising demand to integrate adaptive, cloud-based and/or AI-based systems is also increasing the need for associated dependability concepts. However, the practical processes and methods covering the whole life cycle still need to be instantiated. The assurance of dependability continues to be an open issue with no common solution. That is especially the case for novel AI and/or dynamical runtime-based approaches. This work focuses on engineering methods and design patterns that support the development of dependable AI-based autonomous systems. The paper presents the related body of knowledge of the TEACHING project and multiple automotive domain regulation activities and industrial working groups. It also considers the dependable architectural concepts and their impactful applicability to different scenarios to ensure the dependability of AI-based Cyber-Physical Systems of Systems (CPSoS) in the automotive domain. The paper shines the light on potential paths for dependable integration of AI-based systems into the automotive domain through identified analysis methods and targets. }, keywords = {dependable AI, humanistic intelligence, trustworthy AI}, pubstate = {published}, tppubtype = {workshop} } @article{Resta2021, title = { Occlusion-based Explanations in Deep Recurrent Models for Biomedical Signals }, author = {Michele Resta and Anna Monreale and Davide Bacciu}, editor = {Fabio Aiolli and Mirko Polato}, doi = {10.3390/e23081064}, year = {2021}, date = {2021-09-01}, urldate = {2021-09-01}, journal = {Entropy}, volume = {23}, number = {8}, pages = {1064}, abstract = { The biomedical field is characterized by an ever-increasing production of sequential data, which often come under the form of biosignals capturing the time-evolution of physiological processes, such as blood pressure and brain activity. This has motivated a large body of research dealing with the development of machine learning techniques for the predictive analysis of such biosignals. Unfortunately, in high-stakes decision making, such as clinical diagnosis, the opacity of machine learning models becomes a crucial aspect to be addressed in order to increase the trust and adoption of AI technology. In this paper we propose a model agnostic explanation method, based on occlusion, enabling the learning of the input influence on the model predictions. We specifically target problems involving the predictive analysis of time-series data and the models which are typically used to deal with data of such nature, i.e. recurrent neural networks. Our approach is able to provide two different kinds of explanations: one suitable for technical experts, who need to verify the quality and correctness of machine learning models, and one suited to physicians, who need to understand the rationale underlying the prediction to take aware decisions. A wide experimentation on different physiological data demonstrate the effectiveness of our approach, both in classification and regression tasks. }, note = {Special issue on Representation Learning}, keywords = {biomedical data, explainable AI, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {article} } @conference{Bacciu2021d, title = {TEACHING - Trustworthy autonomous cyber-physical applications through human-centred intelligence}, editor = {Davide Bacciu and Siranush Akarmazyan and Eric Armengaud and Manlio Bacco and George Bravos and Calogero Calandra and Emanuele Carlini and Antonio Carta and Pietro Cassara and Massimo Coppola and Charalampos Davalas and Patrizio Dazzi and Maria Carmela Degennaro and Daniele Di Sarli and Jürgen Dobaj and Claudio Gallicchio and Sylvain Girbal and Alberto Gotta and Riccardo Groppo and Vincenzo Lomonaco and Georg Macher and Daniele Mazzei and Gabriele Mencagli and Dimitrios Michail and Alessio Micheli and Roberta Peroglio and Salvatore Petroni and Rosaria Potenza and Farank Pourdanesh and Christos Sardianos and Konstantinos Tserpes and Fulvio Tagliabò and Jakob Valtl and Iraklis Varlamis and Omar Veledar}, doi = {10.1109/COINS51742.2021.9524099}, year = {2021}, date = {2021-08-23}, urldate = {2021-08-23}, booktitle = {Proceedings of the 2021 IEEE International Conference on Omni-Layer Intelligent Systems (COINS) }, abstract = {This paper discusses the perspective of the H2020 TEACHING project on the next generation of autonomous applications running in a distributed and highly heterogeneous environment comprising both virtual and physical resources spanning the edge-cloud continuum. TEACHING puts forward a human-centred vision leveraging the physiological, emotional, and cognitive state of the users as a driver for the adaptation and optimization of the autonomous applications. It does so by building a distributed, embedded and federated learning system complemented by methods and tools to enforce its dependability, security and privacy preservation. The paper discusses the main concepts of the TEACHING approach and singles out the main AI-related research challenges associated with it. Further, we provide a discussion of the design choices for the TEACHING system to tackle the aforementioned challenges}, keywords = {artificial intelligence, Continual learning, federated learning, humanistic intelligence, reservoir computing, trustworthy AI}, pubstate = {published}, tppubtype = {conference} } @workshop{Rosasco2021, title = {Distilled Replay: Overcoming Forgetting through Synthetic Samples}, author = {Andrea Rosasco and Antonio Carta and Andrea Cossu and Vincenzo Lomonaco and Davide Bacciu}, url = {https://arxiv.org/abs/2103.15851, Arxiv}, year = {2021}, date = {2021-08-19}, urldate = {2021-08-19}, booktitle = {IJCAI 2021 workshop on continual semi-supervised learning (CSSL 2021) }, abstract = {Replay strategies are Continual Learning techniques which mitigate catastrophic forgetting by keeping a buffer of patterns from previous experience, which are interleaved with new data during training. The amount of patterns stored in the buffer is a critical parameter which largely influences the final performance and the memory footprint of the approach. This work introduces Distilled Replay, a novel replay strategy for Continual Learning which is able to mitigate forgetting by keeping a very small buffer (up to pattern per class) of highly informative samples. Distilled Replay builds the buffer through a distillation process which compresses a large dataset into a tiny set of informative examples. We show the effectiveness of our Distilled Replay against naive replay, which randomly samples patterns from the dataset, on four popular Continual Learning benchmarks.}, keywords = {Continual learning, dataset distillation, deep learning}, pubstate = {published}, tppubtype = {workshop} } @conference{Atzeni2021, title = { Modeling Edge Features with Deep Bayesian Graph Networks}, author = {Daniele Atzeni and Davide Bacciu and Federico Errica and Alessio Micheli}, doi = {10.1109/IJCNN52387.2021.9533430}, year = {2021}, date = {2021-07-18}, urldate = {2021-07-18}, booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021)}, publisher = {IEEE}, organization = {IEEE}, abstract = {We propose an extension of the Contextual Graph Markov Model, a deep and probabilistic machine learning model for graphs, to model the distribution of edge features. Our approach is architectural, as we introduce an additional Bayesian network mapping edge features into discrete states to be used by the original model. In doing so, we are also able to build richer graph representations even in the absence of edge features, which is confirmed by the performance improvements on standard graph classification benchmarks. Moreover, we successfully test our proposal in a graph regression scenario where edge features are of fundamental importance, and we show that the learned edge representation provides substantial performance improvements against the original model on three link prediction tasks. By keeping the computational complexity linear in the number of edges, the proposed model is amenable to large-scale graph processing.}, keywords = {deep learning for graphs, generative model, hidden Markov models, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{Numeroso2021, title = {MEG: Generating Molecular Counterfactual Explanations for Deep Graph Networks}, author = {Danilo Numeroso and Davide Bacciu}, year = {2021}, date = {2021-07-18}, urldate = {2021-07-18}, booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021)}, organization = {IEEE}, keywords = {deep learning for graphs, explainable AI, graph data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{BacciuIJCNN2021, title = {Federated Reservoir Computing Neural Networks}, author = {Davide Bacciu and Daniele Di Sarli and Pouria Faraji and Claudio Gallicchio and Alessio Micheli}, doi = {10.1109/IJCNN52387.2021.9534035}, year = {2021}, date = {2021-07-18}, urldate = {2021-07-18}, booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021)}, publisher = {IEEE}, abstract = {A critical aspect in Federated Learning is the aggregation strategy for the combination of multiple models, trained on the edge, into a single model that incorporates all the knowledge in the federation. Common Federated Learning approaches for Recurrent Neural Networks (RNNs) do not provide guarantees on the predictive performance of the aggregated model. In this paper we show how the use of Echo State Networks (ESNs), which are efficient state-of-the-art RNN models for time-series processing, enables a form of federation that is optimal in the sense that it produces models mathematically equivalent to the corresponding centralized model. Furthermore, the proposed method is compliant with privacy constraints. The proposed method, which we denote as Incremental Federated Learning, is experimentally evaluated against an averaging strategy on two datasets for human state and activity recognition.}, keywords = {activity recognition, distributed learning, Echo state networks, federated learning, internet of things, pervasive computing, randomized networks, reservoir computing, Sequential data}, pubstate = {published}, tppubtype = {conference} } @conference{BacciuPoddaIJCNN2021, title = {GraphGen-Redux: a Fast and Lightweight Recurrent Model for Labeled Graph Generation}, author = {Davide Bacciu and Marco Podda}, doi = {10.1109/IJCNN52387.2021.9533743}, year = {2021}, date = {2021-07-18}, urldate = {2021-07-18}, booktitle = {Proceedings of the International Joint Conference on Neural Networks (IJCNN 2021)}, organization = {IEEE}, abstract = {The problem of labeled graph generation is gaining attention in the Deep Learning community. The task is challenging due to the sparse and discrete nature of graph spaces. Several approaches have been proposed in the literature, most of which require to transform the graphs into sequences that encode their structure and labels and to learn the distribution of such sequences through an auto-regressive generative model. Among this family of approaches, we focus on the Graphgen model. The preprocessing phase of Graphgen transforms graphs into unique edge sequences called Depth-First Search (DFS) codes, such that two isomorphic graphs are assigned the same DFS code. Each element of a DFS code is associated with a graph edge: specifically, it is a quintuple comprising one node identifier for each of the two endpoints, their node labels, and the edge label. Graphgen learns to generate such sequences auto-regressively and models the probability of each component of the quintuple independently. While effective, the independence assumption made by the model is too loose to capture the complex label dependencies of real-world graphs precisely. By introducing a novel graph preprocessing approach, we are able to process the labeling information of both nodes and edges jointly. The corresponding model, which we term Graphgen-redux, improves upon the generative performances of Graphgen in a wide range of datasets of chemical and social graphs. In addition, it uses approximately 78% fewer parameters than the vanilla variant and requires 50% fewer epochs of training on average.}, keywords = {deep learning, deep learning for graphs, generative model, graph data}, pubstate = {published}, tppubtype = {conference} } @conference{Errica2021, title = {Graph Mixture Density Networks}, author = {Federico Errica and Davide Bacciu and Alessio Micheli}, url = {https://proceedings.mlr.press/v139/errica21a.html, PDF}, year = {2021}, date = {2021-07-18}, urldate = {2021-07-18}, booktitle = {Proceedings of the 38th International Conference on Machine Learning (ICML 2021)}, pages = {3025-3035}, publisher = {PMLR}, keywords = {deep learning for graphs, generative model, graph data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @workshop{lomonaco2021avalanche, title = {Avalanche: an End-to-End Library for Continual Learning}, author = {Vincenzo Lomonaco and Lorenzo Pellegrini and Andrea Cossu and Antonio Carta and Gabriele Graffieti and Tyler L Hayes and Matthias De Lange and Marc Masana and Jary Pomponi and Gido van de Ven and Martin Mundt and Qi She and Keiland Cooper and Jeremy Forest and Eden Belouadah and Simone Calderara and German I Parisi and Fabio Cuzzolin and Andreas Tolias and Simone Scardapane and Luca Antiga and Subutai Amhad and Adrian Popescu and Christopher Kanan and Joost van de Weijer and Tinne Tuytelaars and Davide Bacciu and Davide Maltoni}, url = {https://arxiv.org/abs/2104.00405, Arxiv}, year = {2021}, date = {2021-06-19}, urldate = {2021-06-19}, booktitle = {Proceedings of the CVPR 2021 Workshop on Continual Learning }, pages = {3600-3610}, publisher = {IEEE}, keywords = {Continual learning, deep learning, featured, software}, pubstate = {published}, tppubtype = {workshop} } @conference{Sattar2021, title = {Context-aware Graph Convolutional Autoencoder}, author = {Asma Sattar and Davide Bacciu }, doi = {10.1007/978-3-030-85030-2_23}, year = {2021}, date = {2021-06-16}, urldate = {2021-06-16}, booktitle = {Proceedings of the 16th International Work Conference on Artificial Neural Networks (IWANN 2021)}, volume = {12862}, pages = { 279-290}, publisher = {Springer}, series = {LNCS}, abstract = {Recommendation problems can be addressed as link prediction tasks in a bipartite graph between user and item nodes, labelled with rating on edges. Existing matrix completion approaches model the user’s opinion on items by ignoring context information that can instead be associated with the edges of the bipartite graph. Context is an important factor to be considered as it heavily affects opinions and preferences. Following this line of research, this paper proposes a graph convolutional auto-encoder approach which considers users’ opinion on items as well as the static node features and context information on edges. Our graph encoder produces a representation of users and items from the perspective of context, static features, and rating opinion. The empirical analysis on three real-world datasets shows that the proposed approach outperforms recent state-of-the-art recommendation systems.}, keywords = {deep learning for graphs, graph data, recommendation systems}, pubstate = {published}, tppubtype = {conference} } @conference{Bacciu2021, title = {Benchmarking Reservoir and Recurrent Neural Networks for Human State and Activity Recognition}, author = {Davide Bacciu and Daniele Di Sarli and Claudio Gallicchio and Alessio Micheli and Niccolo Puccinelli}, doi = {10.1007/978-3-030-85099-9_14}, year = {2021}, date = {2021-06-16}, urldate = {2021-06-16}, booktitle = {Proceedings of the 16th International Work Conference on Artificial Neural Networks (IWANN 2021)}, volume = {12862}, pages = {168-179}, publisher = {Springer}, abstract = {Monitoring of human states from streams of sensor data is an appealing applicative area for Recurrent Neural Network (RNN) models. In such a scenario, Echo State Network (ESN) models from the Reservoir Computing paradigm can represent good candidates due to the efficient training algorithms, which, compared to fully trainable RNNs, definitely ease embedding on edge devices. In this paper, we provide an experimental analysis aimed at assessing the performance of ESNs on tasks of human state and activity recognition, in both shallow and deep setups. Our analysis is conducted in comparison with vanilla RNNs, Long Short-Term Memory, Gated Recurrent Units, and their deep variations. Our empirical results on several datasets clearly indicate that, despite their simplicity, ESNs are able to achieve a level of accuracy that is competitive with those models that require full adaptation of the parameters. From a broader perspective, our analysis also points out that recurrent networks can be a first choice for the class of tasks under consideration, in particular in their deep and gated variants.}, keywords = {activity recognition, Echo state networks, recurrent neural network, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @unpublished{Ferrari2021, title = {Addressing Fairness, Bias and Class Imbalance in Machine Learning: the FBI-loss}, author = {Elisa Ferrari and Davide Bacciu}, url = {https://arxiv.org/abs/2105.06345, Arxiv}, year = {2021}, date = {2021-05-13}, urldate = {2021-05-13}, abstract = {Resilience to class imbalance and confounding biases, together with the assurance of fairness guarantees are highly desirable properties of autonomous decision-making systems with real-life impact. Many different targeted solutions have been proposed to address separately these three problems, however a unifying perspective seems to be missing. With this work, we provide a general formalization, showing that they are different expressions of unbalance. Following this intuition, we formulate a unified loss correction to address issues related to Fairness, Biases and Imbalances (FBI-loss). The correction capabilities of the proposed approach are assessed on three real-world benchmarks, each associated to one of the issues under consideration, and on a family of synthetic data in order to better investigate the effectiveness of our loss on tasks with different complexities. The empirical results highlight that the flexible formulation of the FBI-loss leads also to competitive performances with respect to literature solutions specialised for the single problems.}, howpublished = {Online on Arxiv}, keywords = {bioinformatics, biomedical data, deep learning, trustworthy AI}, pubstate = {published}, tppubtype = {unpublished} } @workshop{Carta2021, title = { Catastrophic Forgetting in Deep Graph Networks: an Introductory Benchmark for Graph Classification }, author = {Antonio Carta and Andrea Cossu and Federico Errica and Davide Bacciu}, year = {2021}, date = {2021-04-12}, urldate = {2021-04-12}, booktitle = {The Web Conference 2021 Workshop on Graph Learning Benchmarks (GLB21)}, abstract = {In this work, we study the phenomenon of catastrophic forgetting in the graph representation learning scenario. The primary objective of the analysis is to understand whether classical continual learning techniques for flat and sequential data have a tangible impact on performances when applied to graph data. To do so, we experiment with a structure-agnostic model and a deep graph network in a robust and controlled environment on three different datasets. The benchmark is complemented by an investigation on the effect of structure-preserving regularization techniques on catastrophic forgetting. We find that replay is the most effective strategy in so far, which also benefits the most from the use of regularization. Our findings suggest interesting future research at the intersection of the continual and graph representation learning fields. Finally, we provide researchers with a flexible software framework to reproduce our results and carry out further experiments.}, keywords = {Continual learning, deep learning for graphs, structured data processing}, pubstate = {published}, tppubtype = {workshop} } @article{errica_deep_2021, title = {A deep graph network-enhanced sampling approach to efficiently explore the space of reduced representations of proteins}, author = {Federico Errica and Marco Giulini and Davide Bacciu and Roberto Menichetti and Alessio Micheli and Raffaello Potestio}, doi = {10.3389/fmolb.2021.637396}, year = {2021}, date = {2021-02-28}, urldate = {2021-02-28}, journal = {Frontiers in Molecular Biosciences}, volume = {8}, pages = {136}, publisher = {Frontiers}, keywords = {deep learning, deep learning for graphs, graph data, structured data processing}, pubstate = {published}, tppubtype = {article} } @article{Bontempi2021, title = {The CLAIRE COVID-19 initiative: approach, experiences and recommendations}, author = {Gianluca Bontempi and Ricardo Chavarriaga and Hans De Canck and Emanuela Girardi and Holger Hoos and Iarla Kilbane-Dawe and Tonio Ball and Ann Nowé and Jose Sousa and Davide Bacciu and Marco Aldinucci and Manlio De Domenico and Alessandro Saffiotti and Marco Maratea}, doi = {10.1007/s10676-020-09567-7}, year = {2021}, date = {2021-02-09}, journal = {Ethics and Information Technology}, keywords = {artificial intelligence, bioinformatics, biomedical data}, pubstate = {published}, tppubtype = {article} } @article{Valenti2021, title = {A Deep Classifier for Upper-Limbs Motor Anticipation Tasks in an Online BCI Setting}, author = {Andrea Valenti, Michele Barsotti, Davide Bacciu and Luca Ascari }, url = {https://www.mdpi.com/2306-5354/8/2/21, Open Access }, doi = {10.3390/bioengineering8020021}, year = {2021}, date = {2021-02-05}, urldate = {2021-02-05}, journal = {Bioengineering }, keywords = {autoencoder, biomedical data, deep learning, Sequential data}, pubstate = {published}, tppubtype = {article} } @article{BacciuNCA2020, title = {Topographic mapping for quality inspection and intelligent filtering of smart-bracelet data}, author = {Davide Bacciu and Gioele Bertoncini and Davide Morelli}, doi = {10.1007/s00521-020-05600-4}, year = {2021}, date = {2021-01-04}, urldate = {2021-01-04}, journal = {Neural Computing Applications}, keywords = {biomedical data, data visualization, explainable AI, internet of things, multivariate time-series, self-organizing map}, pubstate = {published}, tppubtype = {article} } @article{CRECCHI2021, title = {FADER: Fast Adversarial Example Rejection}, author = {Francesco Crecchi and Marco Melis and Angelo Sotgiu and Davide Bacciu and Battista Biggio}, url = {https://arxiv.org/abs/2010.09119, Arxiv}, doi = {https://doi.org/10.1016/j.neucom.2021.10.082}, issn = {0925-2312}, year = {2021}, date = {2021-01-01}, urldate = {2021-01-01}, journal = {Neurocomputing}, keywords = {adversarial examples, adversarial machine learning, deep learning, detection, evasion attacks, rbf networks}, pubstate = {published}, tppubtype = {article} } @workshop{tomographyNeurips2020, title = {Generative Tomography Reconstruction}, author = {Matteo Ronchetti and Davide Bacciu}, url = {https://arxiv.org/pdf/2010.14933.pdf, PDF}, year = {2020}, date = {2020-12-11}, urldate = {2020-12-11}, booktitle = {34th Conference on Neural Information Processing Systems (NeurIPS 2020), Workshop on Deep Learning and Inverse Problems}, abstract = {We propose an end-to-end differentiable architecture for tomography reconstruc-1tion that directly maps a noisy sinogram into a denoised reconstruction. Compared2to existing approaches our end-to-end architecture produces more accurate recon-3structions while using less parameters and time. We also propose a generative4model that, given a noisy sinogram, can sample realistic reconstructions. This5generative model can be used as prior inside an iterative process that, by tak-6ing into consideration the physical model, can reduce artifacts and errors in the7reconstructions.}, keywords = {adversarial learning, biomedical data, deep learning, generative model, inverse problems, machine vision}, pubstate = {published}, tppubtype = {workshop} } @workshop{kplexWS2020, title = {K-plex Cover Pooling for Graph Neural Networks}, author = {Davide Bacciu and Alessio Conte and Roberto Grossi and Francesco Landolfi and Andrea Marino}, year = {2020}, date = {2020-12-11}, urldate = {2020-12-11}, booktitle = {34th Conference on Neural Information Processing Systems (NeurIPS 2020), Workshop on Learning Meets Combinatorial Algorithms}, abstract = {We introduce a novel pooling technique which borrows from classical results in graph theory that is non-parametric and generalizes well to graphs of different nature and connectivity pattern. Our pooling method, named KPlexPool, builds on the concepts of graph covers and $k$-plexes, i.e. pseudo-cliques where each node can miss up to $k$ links. The experimental evaluation on molecular and social graph classification shows that KPlexPool achieves state of the art performances, supporting the intuition that well-founded graph-theoretic approaches can be effectively integrated in learning models for graphs. }, keywords = {deep learning, deep learning for graphs, graph data, graph pooling, structured data processing}, pubstate = {published}, tppubtype = {workshop} } @workshop{megWS2020, title = {Explaining Deep Graph Networks with Molecular Counterfactuals}, author = {Davide Bacciu and Danilo Numeroso}, url = {https://arxiv.org/pdf/2011.05134.pdf, Arxiv}, year = {2020}, date = {2020-12-11}, urldate = {2020-12-11}, booktitle = {34th Conference on Neural Information Processing Systems (NeurIPS 2020), Workshop on Machine Learning for Molecules - Accepted as Contributed Talk (Oral)}, abstract = {We present a novel approach to tackle explainability of deep graph networks in the context of molecule property prediction tasks, named MEG (Molecular Explanation Generator). We generate informative counterfactual explanations for a specific prediction under the form of (valid) compounds with high structural similarity and different predicted properties. We discuss preliminary results showing how the model can convey non-ML experts with key insights into the learning model focus in the neighborhood of a molecule. }, keywords = {deep learning for graphs, explainable AI, graph data, structured data processing}, pubstate = {published}, tppubtype = {workshop} } @workshop{CartaNeuripsWS2020, title = { Short-Term Memory Optimization in Recurrent Neural Networks by Autoencoder-based Initialization }, author = {Antonio Carta and Alessandro Sperduti and Davide Bacciu }, url = {https://arxiv.org/abs/2011.02886, Arxiv}, year = {2020}, date = {2020-12-11}, urldate = {2020-12-11}, booktitle = {34th Conference on Neural Information Processing Systems (NeurIPS 2020), Workshop on Beyond BackPropagation: Novel Ideas for Training Neural Architectures}, abstract = {Training RNNs to learn long-term dependencies is difficult due to vanishing gradients. We explore an alternative solution based on explicit memorization using linear autoencoders for sequences, which allows to maximize the short-term memory and that can be solved with a closed-form solution without backpropagation. We introduce an initialization schema that pretrains the weights of a recurrent neural network to approximate the linear autoencoder of the input sequences and we show how such pretraining can better support solving hard classification tasks with long sequences. We test our approach on sequential and permuted MNIST. We show that the proposed approach achieves a much lower reconstruction error for long sequences and a better gradient propagation during the finetuning phase. }, keywords = {deep learning, memory networks, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {workshop} } @conference{CastellanaCOLING2020, title = {Learning from Non-Binary Constituency Trees via Tensor Decomposition}, author = {Daniele Castellana and Davide Bacciu}, year = {2020}, date = {2020-12-08}, urldate = {2020-12-08}, booktitle = {PROCEEDINGS OF THE 2020 INTERNATIONAL CONFERENCE ON COMPUTATIONAL LINGUISTICS (COLING 2020)}, keywords = {natural language processing, tensor factorization, tensor neural networks, tree structured data}, pubstate = {published}, tppubtype = {conference} } @conference{smc2020, title = {ROS-Neuro Integration of Deep Convolutional Autoencoders for EEG Signal Compression in Real-time BCIs}, author = {Andrea Valenti and Michele Barsotti and Raffaello Brondi and Davide Bacciu and Luca Ascari}, url = {https://arxiv.org/abs/2008.13485, Arxiv}, year = {2020}, date = {2020-10-11}, urldate = {2020-10-11}, booktitle = {Proceedings of the 2020 IEEE International Conference on Systems, Man, and Cybernetics (SMC)}, publisher = {IEEE}, abstract = { Typical EEG-based BCI applications require the computation of complex functions over the noisy EEG channels to be carried out in an efficient way. Deep learning algorithms are capable of learning flexible nonlinear functions directly from data, and their constant processing latency is perfect for their deployment into online BCI systems. However, it is crucial for the jitter of the processing system to be as low as possible, in order to avoid unpredictable behaviour that can ruin the system's overall usability. In this paper, we present a novel encoding method, based on on deep convolutional autoencoders, that is able to perform efficient compression of the raw EEG inputs. We deploy our model in a ROS-Neuro node, thus making it suitable for the integration in ROS-based BCI and robotic systems in real world scenarios. The experimental results show that our system is capable to generate meaningful compressed encoding preserving to original information contained in the raw input. They also show that the ROS-Neuro node is able to produce such encodings at a steady rate, with minimal jitter. We believe that our system can represent an important step towards the development of an effective BCI processing pipeline fully standardized in ROS-Neuro framework. }, keywords = {autoencoder, biomedical data, deep learning, Sequential data}, pubstate = {published}, tppubtype = {conference} } @article{gentleGraphs2020, title = {A Gentle Introduction to Deep Learning for Graphs}, author = {Davide Bacciu and Federico Errica and Alessio Micheli and Marco Podda}, url = {https://arxiv.org/abs/1912.12693, Arxiv https://doi.org/10.1016/j.neunet.2020.06.006, Original Paper}, doi = {10.1016/j.neunet.2020.06.006}, year = {2020}, date = {2020-09-01}, urldate = {2020-09-01}, journal = {Neural Networks}, volume = {129}, pages = {203-221}, publisher = {Elsevier}, abstract = {The adaptive processing of graph data is a long-standing research topic which has been lately consolidated as a theme of major interest in the deep learning community. The snap increase in the amount and breadth of related research has come at the price of little systematization of knowledge and attention to earlier literature. This work is designed as a tutorial introduction to the field of deep learning for graphs. It favours a consistent and progressive introduction of the main concepts and architectural aspects over an exposition of the most recent literature, for which the reader is referred to available surveys. The paper takes a top-down view to the problem, introducing a generalized formulation of graph representation learning based on a local and iterative approach to structured information processing. It introduces the basic building blocks that can be combined to design novel and effective neural models for graphs. The methodological exposition is complemented by a discussion of interesting research challenges and applications in the field. }, keywords = {deep learning, deep learning for graphs, featured, graph data, structured data processing}, pubstate = {published}, tppubtype = {article} } @article{jmlrCGMM20, title = {Probabilistic Learning on Graphs via Contextual Architectures}, author = {Davide Bacciu and Federico Errica and Alessio Micheli}, editor = {Pushmeet Kohli}, url = {http://jmlr.org/papers/v21/19-470.html, Paper}, year = {2020}, date = {2020-07-27}, urldate = {2020-07-27}, journal = {Journal of Machine Learning Research}, volume = {21}, number = {134}, pages = {1−39}, abstract = {We propose a novel methodology for representation learning on graph-structured data, in which a stack of Bayesian Networks learns different distributions of a vertex's neighborhood. Through an incremental construction policy and layer-wise training, we can build deeper architectures with respect to typical graph convolutional neural networks, with benefits in terms of context spreading between vertices. First, the model learns from graphs via maximum likelihood estimation without using target labels. Then, a supervised readout is applied to the learned graph embeddings to deal with graph classification and vertex classification tasks, showing competitive results against neural models for graphs. The computational complexity is linear in the number of edges, facilitating learning on large scale data sets. By studying how depth affects the performances of our model, we discover that a broader context generally improves performances. In turn, this leads to a critical analysis of some benchmarks used in literature.}, keywords = {deep learning, deep learning for graphs, graph data, hidden tree Markov model, structured data processing}, pubstate = {published}, tppubtype = {article} } @conference{Wcci20Tensor, title = {Generalising Recursive Neural Models by Tensor Decomposition}, author = {Daniele Castellana and Davide Bacciu}, url = {https://arxiv.org/abs/2006.10021, Arxiv}, year = {2020}, date = {2020-07-19}, urldate = {2020-07-19}, booktitle = {Proceedings of the 2020 IEEE World Congress on Computational Intelligence}, keywords = {deep learning, structured data processing, tensor factorization, tensor neural networks, tree structured data}, pubstate = {published}, tppubtype = {conference} } @conference{Wcci20CL, title = {Continual Learning with Gated Incremental Memories for Sequential Data Processing}, author = {Andrea Cossu and Antonio Carta and Davide Bacciu}, url = {https://arxiv.org/pdf/2004.04077.pdf, Arxiv}, doi = {10.1109/IJCNN48605.2020.9207550}, year = {2020}, date = {2020-07-19}, urldate = {2020-07-19}, booktitle = {Proceedings of the 2020 IEEE World Congress on Computational Intelligence}, keywords = {Continual learning, deep learning, recurrent neural network, Sequential data}, pubstate = {published}, tppubtype = {conference} } @conference{ecai2020, title = { Learning a Latent Space of Style-Aware Music Representations by Adversarial Autoencoders}, author = {Andrea Valenti and Antonio Carta and Davide Bacciu}, url = {https://arxiv.org/abs/2001.05494}, year = {2020}, date = {2020-06-08}, booktitle = {Proceedings of the 24th European Conference on Artificial Intelligence (ECAI 2020)}, keywords = {artificial creativity, autoencoder, deep learning, generative model, music generation}, pubstate = {published}, tppubtype = {conference} } @conference{ecml2020LMN, title = {Incremental training of a recurrent neural network exploiting a multi-scale dynamic memory}, author = {Antonio Carta and Alessandro Sperduti and Davide Bacciu}, year = {2020}, date = {2020-06-05}, urldate = {2020-06-05}, booktitle = {Proceedings of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases 2020 (ECML-PKDD 2020)}, publisher = {Springer International Publishing}, abstract = {The effectiveness of recurrent neural networks can be largely influenced by their ability to store into their dynamical memory information extracted from input sequences at different frequencies and timescales. Such a feature can be introduced into a neural architecture by an appropriate modularization of the dynamic memory. In this paper we propose a novel incrementally trained recurrent architecture targeting explicitly multi-scale learning. First, we show how to extend the architecture of a simple RNN by separating its hidden state into different modules, each subsampling the network hidden activations at different frequencies. Then, we discuss a training algorithm where new modules are iteratively added to the model to learn progressively longer dependencies. Each new module works at a slower frequency than the previous ones and it is initialized to encode the subsampled sequence of hidden activations. Experimental results on synthetic and real-world datasets on speech recognition and handwritten characters show that the modular architecture and the incremental training algorithm improve the ability of recurrent neural networks to capture long-term dependencies.}, keywords = {autoencoder, deep learning, memory networks, recurrent neural network}, pubstate = {published}, tppubtype = {conference} } @conference{aistats2020, title = {A Deep Generative Model for Fragment-Based Molecule Generation}, author = {Marco Podda and Davide Bacciu and Alessio Micheli}, url = {https://arxiv.org/abs/2002.12826}, year = {2020}, date = {2020-06-03}, urldate = {2020-06-03}, booktitle = {Proceedings of the 23rd International Conference on Artificial Intelligence and Statistics (AISTATS 2020) }, abstract = {Molecule generation is a challenging open problem in cheminformatics. Currently, deep generative approaches addressing the challenge belong to two broad categories, differing in how molecules are represented. One approach encodes molecular graphs as strings of text, and learn their corresponding character-based language model. Another, more expressive, approach operates directly on the molecular graph. In this work, we address two limitations of the former: generation of invalid or duplicate molecules. To improve validity rates, we develop a language model for small molecular substructures called fragments, loosely inspired by the well-known paradigm of Fragment-Based Drug Design. In other words, we generate molecules fragment by fragment, instead of atom by atom. To improve uniqueness rates, we present a frequency-based clustering strategy that helps to generate molecules with infrequent fragments. We show experimentally that our model largely outperforms other language model-based competitors, reaching state-of-the-art performances typical of graph-based approaches. Moreover, generated molecules display molecular properties similar to those in the training sample, even in absence of explicit task-specific supervision.}, keywords = {deep learning for graphs, generative model, graph data, molecule generation, recurrent neural network, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{iclr19, title = {A Fair Comparison of Graph Neural Networks for Graph Classification}, author = {Federico Errica and Marco Podda and Davide Bacciu and Alessio Micheli}, url = {https://openreview.net/pdf?id=HygDF6NFPB, PDF https://iclr.cc/virtual_2020/poster_HygDF6NFPB.html, Talk https://github.com/diningphil/gnn-comparison, Code}, year = {2020}, date = {2020-04-30}, booktitle = {Proceedings of the Eighth International Conference on Learning Representations (ICLR 2020)}, abstract = {Experimental reproducibility and replicability are critical topics in machine learning. Authors have often raised concerns about their lack in scientific publications to improve the quality of the field. Recently, the graph representation learning field has attracted the attention of a wide research community, which resulted in a large stream of works. As such, several Graph Neural Network models have been developed to effectively tackle graph classification. However, experimental procedures often lack rigorousness and are hardly reproducible. Motivated by this, we provide an overview of common practices that should be avoided to fairly compare with the state of the art. To counter this troubling trend, we ran more than 47000 experiments in a controlled and uniform framework to re-evaluate five popular models across nine common benchmarks. Moreover, by comparing GNNs with structure-agnostic baselines we provide convincing evidence that, on some datasets, structural information has not been exploited yet. We believe that this work can contribute to the development of the graph learning field, by providing a much needed grounding for rigorous evaluations of graph classification models.}, keywords = {deep learning, deep learning for graphs, graph data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{esann20Podda, title = { Biochemical Pathway Robustness Prediction with Graph Neural Networks }, author = {Marco Podda and Alessio Micheli and Davide Bacciu and Paolo Milazzo}, editor = {Michel Verleysen}, year = {2020}, date = {2020-04-21}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'20)}, keywords = {bioinformatics, biomedical data, deep learning for graphs, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{esann20Errica, title = { Theoretically Expressive and Edge-aware Graph Learning }, author = {Federico Errica and Davide Bacciu and Alessio Micheli}, editor = {Michel Verleysen}, url = {https://arxiv.org/abs/2001.09005}, year = {2020}, date = {2020-04-21}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'20)}, abstract = {We propose a new Graph Neural Network that combines recent advancements in the field. We give theoretical contributions by proving that the model is strictly more general than the Graph Isomorphism Network and the Gated Graph Neural Network, as it can approximate the same functions and deal with arbitrary edge values. Then, we show how a single node information can flow through the graph unchanged. }, keywords = {deep learning for graphs, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{esann20Crecchi, title = { Perplexity-free Parametric t-SNE}, author = {Francesco Crecchi and Cyril de Bodt and Davide Bacciu and Michel Verleysen and Lee John}, editor = {Michel Verleysen}, year = {2020}, date = {2020-04-21}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'20)}, keywords = {data visualization, manifold learning, neural networks, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{esann20Tutorial, title = {Tensor Decompositions in Deep Learning}, author = {Davide Bacciu and Danilo Mandic}, editor = {Michel Verleysen}, url = {https://arxiv.org/abs/2002.11835}, year = {2020}, date = {2020-04-21}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'20)}, keywords = {deep learning, structured data processing, tensor factorization}, pubstate = {published}, tppubtype = {conference} } @conference{esann20Castellana, title = { Tensor Decompositions in Recursive Neural Networks for Tree-Structured Data }, author = {Daniele Castellana and Davide Bacciu}, editor = {Michel Verleysen}, url = {https://arxiv.org/pdf/2006.10619.pdf, Arxiv}, year = {2020}, date = {2020-04-21}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'20)}, keywords = {deep learning, structured data processing, tensor factorization, tree structured data}, pubstate = {published}, tppubtype = {conference} } @inbook{graphsBDDL2020, title = {Deep Learning for Graphs}, author = {Davide Bacciu and Alessio Micheli}, editor = {Luca Oneto and Nicolo Navarin and Alessandro Sperduti and Davide Anguita }, url = {https://link.springer.com/chapter/10.1007/978-3-030-43883-8_5}, doi = {10.1007/978-3-030-43883-8_5}, isbn = {978-3-030-43883-8}, year = {2020}, date = {2020-04-04}, booktitle = {Recent Trends in Learning From Data: Tutorials from the INNS Big Data and Deep Learning Conference (INNSBDDL2019)}, volume = {896}, pages = {99-127}, publisher = {Springer International Publishing}, series = {Studies in Computational Intelligence Series}, abstract = {We introduce an overview of methods for learning in structured domains covering foundational works developed within the last twenty years to deal with a whole range of complex data representations, including hierarchical structures, graphs and networks, and giving special attention to recent deep learning models for graphs. While we provide a general introduction to the field, we explicitly focus on the neural network paradigm showing how, across the years, these models have been extended to the adaptive processing of incrementally more complex classes of structured data. The ultimate aim is to show how to cope with the fundamental issue of learning adaptive representations for samples with varying size and topology.}, keywords = {deep learning for graphs, generative model, graph data, molecule generation, recurrent neural network, structured data processing}, pubstate = {published}, tppubtype = {inbook} } @article{aime20Confound, title = {Measuring the effects of confounders in medical supervised classification problems: the Confounding Index (CI)}, author = {Elisa Ferrari and Alessandra Retico and Davide Bacciu}, url = {https://arxiv.org/abs/1905.08871}, doi = {10.1016/j.artmed.2020.101804}, year = {2020}, date = {2020-03-01}, journal = {Artificial Intelligence in Medicine}, volume = {103}, abstract = {Over the years, there has been growing interest in using Machine Learning techniques for biomedical data processing. When tackling these tasks, one needs to bear in mind that biomedical data depends on a variety of characteristics, such as demographic aspects (age, gender, etc) or the acquisition technology, which might be unrelated with the target of the analysis. In supervised tasks, failing to match the ground truth targets with respect to such characteristics, called confounders, may lead to very misleading estimates of the predictive performance. Many strategies have been proposed to handle confounders, ranging from data selection, to normalization techniques, up to the use of training algorithm for learning with imbalanced data. However, all these solutions require the confounders to be known a priori. To this aim, we introduce a novel index that is able to measure the confounding effect of a data attribute in a bias-agnostic way. This index can be used to quantitatively compare the confounding effects of different variables and to inform correction methods such as normalization procedures or ad-hoc-prepared learning algorithms. The effectiveness of this index is validated on both simulated data and real-world neuroimaging data. }, keywords = {artificial intelligence, bioinformatics, biomedical data, explainable AI, statistics}, pubstate = {published}, tppubtype = {article} } @article{neucompEsann19, title = {Edge-based sequential graph generation with recurrent neural networks}, author = {Davide Bacciu and Alessio Micheli and Marco Podda}, url = {https://arxiv.org/abs/2002.00102v1}, year = {2019}, date = {2019-12-31}, journal = {Neurocomputing}, abstract = { Graph generation with Machine Learning is an open problem with applications in various research fields. In this work, we propose to cast the generative process of a graph into a sequential one, relying on a node ordering procedure. We use this sequential process to design a novel generative model composed of two recurrent neural networks that learn to predict the edges of graphs: the first network generates one endpoint of each edge, while the second network generates the other endpoint conditioned on the state of the first. We test our approach extensively on five different datasets, comparing with two well-known baselines coming from graph literature, and two recurrent approaches, one of which holds state of the art performances. Evaluation is conducted considering quantitative and qualitative characteristics of the generated samples. Results show that our approach is able to yield novel, and unique graphs originating from very different distributions, while retaining structural properties very similar to those in the training sample. Under the proposed evaluation framework, our approach is able to reach performances comparable to the current state of the art on the graph generation task. }, keywords = {deep learning for graphs, generative model, graph data, structured data processing}, pubstate = {published}, tppubtype = {article} } @conference{ssci19, title = {Sequential Sentence Embeddings for Semantic Similarity}, author = {Davide Bacciu and Antonio Carta}, doi = {10.1109/SSCI44817.2019.9002824}, year = {2019}, date = {2019-12-06}, urldate = {2019-12-06}, booktitle = {Proceedings of the 2019 IEEE Symposium Series on Computational Intelligence (SSCI'19)}, publisher = {IEEE}, abstract = { Sentence embeddings are distributed representations of sentences intended to be general features to be effectively used as input for deep learning models across different natural language processing tasks. State-of-the-art sentence embeddings for semantic similarity are computed with a weighted average of pretrained word embeddings, hence completely ignoring the contribution of word ordering within a sentence in defining its semantics. We propose a novel approach to compute sentence embeddings for semantic similarity that exploits a linear autoencoder for sequences. The method can be trained in closed form and it is easy to fit on unlabeled sentences. Our method provides a grounded approach to identify and subtract common discourse from a sentence and its embedding, to remove associated uninformative features. Unlike similar methods in the literature (e.g. the popular Smooth Inverse Frequency approach), our method is able to account for word order. We show that our estimate of the common discourse vector improves the results on two different semantic similarity benchmarks when compared to related approaches from the literature.}, keywords = {autoencoder, deep learning, memory networks, natural language, recurrent neural network}, pubstate = {published}, tppubtype = {conference} } @online{treccani19, title = {Reti neurali e linguaggio. Le insidie nascoste di un'algebra delle parole}, author = {Davide Bacciu}, editor = {Mirko Tavosanis}, url = {http://www.treccani.it/magazine/lingua_italiana/speciali/IA/02_Bacciu.html}, year = {2019}, date = {2019-12-03}, urldate = {2019-12-03}, organization = {Lingua Italiana - Treccani}, keywords = {artificial intelligence, natural language processing, neural networks}, pubstate = {published}, tppubtype = {online} } @conference{aiia2019, title = {A non-negative factorization approach to node pooling in graph convolutional neural networks}, author = {Davide Bacciu and Luigi {Di Sotto}}, url = {https://arxiv.org/pdf/1909.03287.pdf}, year = {2019}, date = {2019-11-22}, booktitle = {Proceedings of the 18th International Conference of the Italian Association for Artificial Intelligence (AIIA 2019)}, publisher = {Springer-Verlag}, series = {Lecture Notes in Artificial Intelligence}, keywords = {deep learning, deep learning for graphs, graph data, hidden tree Markov model, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{clic2019, title = {Suitable doesn’t mean attractive. Human-based evaluation of automatically generated headlines}, author = {Michele Cafagna and Lorenzo {De Mattei} and Davide Bacciu and Malvina Nissim}, editor = {Raffaella Bernardi and Roberto Navigli and Giovanni Semeraro}, url = {http://ceur-ws.org/Vol-2481/paper13.pdf}, year = {2019}, date = {2019-11-15}, urldate = {2019-11-15}, booktitle = {Proceedings of the 6th Italian Conference on Computational Linguistics (CLiC-it 2019)}, volume = {2481 }, publisher = {CEUR}, series = {AI*IA series}, abstract = {We train three different models to generate newspaper headlines from a portion of the corresponding article. The articles are obtained from two mainstream Italian newspapers. In order to assess the models’ performance, we set up a human-based evaluation where 30 different native speakers expressed their judgment over a variety of aspects. The outcome shows that (i) pointer networks perform better than standard sequence to sequence models, creating mostly correct and appropriate titles; (ii) the suitability of a headline to its article for pointer networks is on par or better than the gold headline; (iii) gold headlines are still by far more inviting than generated headlines to read the whole article, highlighting the contrast between human creativity and content appropriateness.}, keywords = {natural language, recurrent neural network, text generation}, pubstate = {published}, tppubtype = {conference} } @conference{lmnArx18, title = {Linear Memory Networks}, author = {Davide Bacciu and Antonio Carta and Alessandro Sperduti}, url = {https://arxiv.org/pdf/1811.03356.pdf}, doi = {10.1007/978-3-030-30487-4_40}, year = {2019}, date = {2019-09-17}, urldate = {2019-09-17}, booktitle = {Proceedings of the 28th International Conference on Artificial Neural Networks (ICANN 2019), }, volume = {11727}, pages = {513-525 }, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, abstract = {Recurrent neural networks can learn complex transduction problems that require maintaining and actively exploiting a memory of their inputs. Such models traditionally consider memory and input-output functionalities indissolubly entangled. We introduce a novel recurrent architecture based on the conceptual separation between the functional input-output transformation and the memory mechanism, showing how they can be implemented through different neural components. By building on such conceptualization, we introduce the Linear Memory Network, a recurrent model comprising a feedforward neural network, realizing the non-linear functional transformation, and a linear autoencoder for sequences, implementing the memory component. The resulting architecture can be efficiently trained by building on closed-form solutions to linear optimization problems. Further, by exploiting equivalence results between feedforward and recurrent neural networks we devise a pretraining schema for the proposed architecture. Experiments on polyphonic music datasets show competitive results against gated recurrent networks and other state of the art models. }, keywords = {autoencoder, deep learning, memory networks, recurrent neural network}, pubstate = {published}, tppubtype = {conference} } @misc{automatica2019, title = {Autonomous Grasping with SoftHands: Combining Human Inspiration, Deep Learning and Embodied Machine Intelligence}, author = {Della Santina Cosimo and Averta Giuseppe and Arapi Visar and Settimi Alessandro and Catalano Manuel Giuseppe and Bacciu Davide and Bicchi Antonio and Bianchi Matteo}, year = {2019}, date = {2019-09-11}, booktitle = {Oral contribution to AUTOMATICA.IT 2019 }, keywords = {deep learning, robotics}, pubstate = {published}, tppubtype = {presentation} } @article{rubicon2019CI, title = {An Ambient Intelligence Approach for Learning in Smart Robotic Environments}, author = {Bacciu Davide and Di Rocco Maurizio and Dragone Mauro and Gallicchio Claudio and Micheli Alessio and Saffiotti Alessandro}, doi = {10.1111/coin.12233}, year = {2019}, date = {2019-07-31}, journal = {Computational Intelligence}, abstract = {Smart robotic environments combine traditional (ambient) sensing devices and mobile robots. This combination extends the type of applications that can be considered, reduces their complexity, and enhances the individual values of the devices involved by enabling new services that cannot be performed by a single device. In order to reduce the amount of preparation and pre-programming required for their deployment in real world applications, it is important to make these systems self-learning, self-configuring, and self-adapting. The solution presented in this paper is based upon a type of compositional adaptation where (possibly multiple) plans of actions are created through planning and involve the activation of pre-existing capabilities. All the devices in the smart environment participate in a pervasive learning infrastructure, which is exploited to recognize which plans of actions are most suited to the current situation. The system is evaluated in experiments run in a real domestic environment, showing its ability to pro-actively and smoothly adapt to subtle changes in the environment and in the habits and preferences of their user(s).}, note = {Early View (Online Version of Record before inclusion in an issue) }, keywords = {ambient assisted living, Echo state networks, feature selection, multivariate time-series, pervasive computing, planning, recurrent neural network, reservoir computing, robotic ecology, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @conference{ijcnn2019, title = {Bayesian Tensor Factorisation for Bottom-up Hidden Tree Markov Models}, author = {Daniele Castellana and Davide Bacciu}, url = {https://arxiv.org/pdf/1905.13528.pdf}, year = {2019}, date = {2019-07-15}, urldate = {2019-07-15}, booktitle = {Proceedings of the 2019 International Joint Conference on Neural Networks (IJCNN 2019I) }, publisher = {IEEE}, abstract = {Bottom-Up Hidden Tree Markov Model is a highly expressive model for tree-structured data. Unfortunately, it cannot be used in practice due to the intractable size of its state-transition matrix. We propose a new approximation which lies on the Tucker factorisation of tensors. The probabilistic interpretation of such approximation allows us to define a new probabilistic model for tree-structured data. Hence, we define the new approximated model and we derive its learning algorithm. Then, we empirically assess the effective power of the new model evaluating it on two different tasks. In both cases, our model outperforms the other approximated model known in the literature.}, keywords = {graphical models, hidden tree Markov model, structured data processing, tree structured data; tensor factorization; Bayesian learning}, pubstate = {published}, tppubtype = {conference} } @article{neucomBayesHTMM, title = {Bayesian Mixtures of Hidden Tree Markov Models for Structured Data Clustering}, author = {Bacciu Davide and Castellana Daniele}, url = {https://doi.org/10.1016/j.neucom.2018.11.091}, doi = {10.1016/j.neucom.2018.11.091}, isbn = {0925-2312}, year = {2019}, date = {2019-05-21}, journal = {Neurocomputing}, volume = {342}, pages = {49-59}, abstract = {The paper deals with the problem of unsupervised learning with structured data, proposing a mixture model approach to cluster tree samples. First, we discuss how to use the Switching-Parent Hidden Tree Markov Model, a compositional model for learning tree distributions, to define a finite mixture model where the number of components is fixed by a hyperparameter. Then, we show how to relax such an assumption by introducing a Bayesian non-parametric mixture model where the number of necessary hidden tree components is learned from data. Experimental validation on synthetic and real datasets show the benefit of mixture models over simple hidden tree models in clustering applications. Further, we provide a characterization of the behaviour of the two mixture models for different choices of their hyperparameters.}, keywords = {graphical models, hidden tree Markov model, structured data processing, tree structured data, unsupervised learning}, pubstate = {published}, tppubtype = {article} } @conference{esann19Attacks, title = {Detecting Black-box Adversarial Examples through Nonlinear Dimensionality Reduction}, author = {Francesco Crecchi and Davide Bacciu and Battista Biggio }, editor = {Michel Verleysen}, url = {https://arxiv.org/pdf/1904.13094.pdf}, year = {2019}, date = {2019-04-24}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'19)}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, keywords = {adversarial attacks, deep learning, manifold learning}, pubstate = {published}, tppubtype = {conference} } @conference{esann19GraphGen, title = {Graph generation by sequential edge prediction}, author = {Davide Bacciu and Alessio Micheli and Marco Podda}, editor = {Michel Verleysen}, url = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2019-107.pdf}, year = {2019}, date = {2019-04-24}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'19)}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, keywords = {deep learning for graphs, generative model, graph data, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{esann19Tutorial, title = {Societal Issues in Machine Learning: When Learning from Data is Not Enough}, author = { Davide Bacciu and Battista Biggio and Francesco Crecchi and Paulo J. G. Lisboa and José D. Martin and Luca Oneto and Alfredo Vellido}, editor = {Michel Verleysen}, url = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2019-6.pdf}, year = {2019}, date = {2019-04-24}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'19)}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, keywords = {adversarial attacks, deep learning, explainable AI}, pubstate = {published}, tppubtype = {conference} } @article{tnnnls_dropin2019, title = {Augmenting Recurrent Neural Networks Resilience by Dropout}, author = {Davide Bacciu and Francesco Crecchi }, doi = {10.1109/TNNLS.2019.2899744}, year = {2019}, date = {2019-03-31}, urldate = {2019-03-31}, journal = {IEEE Transactions on Neural Networs and Learning Systems}, abstract = {The paper discusses the simple idea that dropout regularization can be used to efficiently induce resiliency to missing inputs at prediction time in a generic neural network. We show how the approach can be effective on tasks where imputation strategies often fail, namely involving recurrent neural networks and scenarios where whole sequences of input observations are missing. The experimental analysis provides an assessment of the accuracy-resiliency tradeoff in multiple recurrent models, including reservoir computing methods, and comprising real-world ambient intelligence and biomedical time series.}, keywords = {ambient assisted living, deep learning, Echo state networks, recurrent neural network, reservoir computing}, pubstate = {published}, tppubtype = {article} } @article{ral2019, title = {Learning from humans how to grasp: a data-driven architecture for autonomous grasping with anthropomorphic soft hands}, author = {Della Santina Cosimo and Arapi Visar and Averta Giuseppe and Damiani Francesca and Fiore Gaia and Settimi Alessandro and Catalano Manuel Giuseppe and Bacciu Davide and Bicchi Antonio and Bianchi Matteo}, url = {https://ieeexplore.ieee.org/document/8629968}, doi = {10.1109/LRA.2019.2896485}, issn = {2377-3766}, year = {2019}, date = {2019-02-01}, journal = {IEEE Robotics and Automation Letters}, pages = {1-8}, note = {Also accepted for presentation at ICRA 2019}, keywords = {convolutional neural network, deep learning, image understanding, machine vision, recurrent neural network, robotics}, pubstate = {published}, tppubtype = {article} } @conference{inns2019, title = {Deep Tree Transductions - A Short Survey}, author = {Bacciu Davide and Bruno Antonio}, editor = {Luca Oneto and Nicol{`o} Navarin and Alessandro Sperduti and Davide Anguita}, url = {https://arxiv.org/abs/1902.01737}, doi = {10.1007/978-3-030-16841-4_25}, year = {2019}, date = {2019-01-04}, urldate = {2019-01-04}, booktitle = {Proceedings of the 2019 INNS Big Data and Deep Learning (INNSBDDL 2019) }, pages = {236--245}, publisher = {Springer International Publishing}, series = {Recent Advances in Big Data and Deep Learning}, abstract = {The paper surveys recent extensions of the Long-Short Term Memory networks to handle tree structures from the perspective of learning non-trivial forms of isomorph structured transductions. It provides a discussion of modern TreeLSTM models, showing the effect of the bias induced by the direction of tree processing. An empirical analysis is performed on real-world benchmarks, highlighting how there is no single model adequate to effectively approach all transduction problems.}, keywords = {deep learning, recurrent neural network, structured data processing, tree structured data}, pubstate = {published}, tppubtype = {conference} } @article{frontNeurob18, title = {DeepDynamicHand: A deep neural architecture for labeling hand manipulation strategies in video sources exploiting temporal information }, author = {Visar Arapi and Cosimo Della Santina and Davide Bacciu and Matteo Bianchi and Antonio Bicchi}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2018.00086/full}, doi = {10.3389/fnbot.2018.00086}, year = {2018}, date = {2018-12-17}, urldate = {2018-12-17}, journal = {Frontiers in Neurorobotics}, volume = {12}, pages = {86}, abstract = {Humans are capable of complex manipulation interactions with the environment, relying on the intrinsic adaptability and compliance of their hands. Recently, soft robotic manipulation has attempted to reproduce such an extraordinary behavior, through the design of deformable yet robust end-effectors. To this goal, the investigation of human behavior has become crucial to correctly inform technological developments of robotic hands that can successfully exploit environmental constraint as humans actually do. Among the different tools robotics can leverage on to achieve this objective, deep learning has emerged as a promising approach for the study and then the implementation of neuro-scientific observations on the artificial side. However, current approaches tend to neglect the dynamic nature of hand pose recognition problems, limiting the effectiveness of these techniques in identifying sequences of manipulation primitives underpinning action generation, e.g. during purposeful interaction with the environment. In this work, we propose a vision-based supervised Hand Pose Recognition method which, for the first time, takes into account temporal information to identify meaningful sequences of actions in grasping and manipulation tasks . More specifically, we apply Deep Neural Networks to automatically learn features from hand posture images that consist of frames extracted from grasping and manipulation task videos with objects and external environmental constraints. For training purposes, videos are divided into intervals, each associated to a specific action by a human supervisor. The proposed algorithm combines a Convolutional Neural Network to detect the hand within each video frame and a Recurrent Neural Network to predict the hand action in the current frame, while taking into consideration the history of actions performed in the previous frames. Experimental validation has been performed on two datasets of dynamic hand-centric strategies, where subjects regularly interact with objects and environment. Proposed architecture achieved a very good classification accuracy on both datasets, reaching performance up to 94%, and outperforming state of the art techniques. The outcomes of this study can be successfully applied to robotics, e.g for planning and control of soft anthropomorphic manipulators. }, keywords = {convolutional neural network, deep learning, image understanding, machine vision, recurrent neural network, robotics}, pubstate = {published}, tppubtype = {article} } @conference{ssci2018, title = {Text Summarization as Tree Transduction by Top-Down TreeLSTM}, author = {Bacciu Davide and Bruno Antonio}, url = {https://arxiv.org/abs/1809.09096}, doi = {10.1109/SSCI.2018.8628873}, year = {2018}, date = {2018-11-18}, urldate = {2018-11-18}, booktitle = {Proceedings of the 2018 IEEE Symposium Series on Computational Intelligence (SSCI'18)}, pages = {1411-1418}, publisher = {IEEE}, abstract = {Extractive compression is a challenging natural language processing problem. This work contributes by formulating neural extractive compression as a parse tree transduction problem, rather than a sequence transduction task. Motivated by this, we introduce a deep neural model for learning structure-to-substructure tree transductions by extending the standard Long Short-Term Memory, considering the parent-child relationships in the structural recursion. The proposed model can achieve state of the art performance on sentence compression benchmarks, both in terms of accuracy and compression rate. }, keywords = {deep learning, deep learning for graphs, neural networks, structured data processing, tree structured data, tree transductions}, pubstate = {published}, tppubtype = {conference} } @article{naturescirep2018, title = {A machine learning approach to estimating preterm infants survival: development of the Preterm Infants Survival Assessment (PISA) predictor}, author = {Podda Marco and Bacciu Davide and Micheli Alessio and Bellu Roberto and Placidi Giulia and Gagliardi Luigi }, url = {https://doi.org/10.1038/s41598-018-31920-6}, doi = {10.1038/s41598-018-31920-6}, year = {2018}, date = {2018-09-13}, urldate = {2018-09-13}, journal = {Nature Scientific Reports}, volume = {8}, abstract = {Estimation of mortality risk of very preterm neonates is carried out in clinical and research settings. We aimed at elaborating a prediction tool using machine learning methods. We developed models on a cohort of 23747 neonates <30 weeks gestational age, or <1501 g birth weight, enrolled in the Italian Neonatal Network in 2008–2014 (development set), using 12 easily collected perinatal variables. We used a cohort from 2015–2016 (N = 5810) as a test set. Among several machine learning methods we chose artificial Neural Networks (NN). The resulting predictor was compared with logistic regression models. In the test cohort, NN had a slightly better discrimination than logistic regression (P < 0.002). The differences were greater in subgroups of neonates (at various gestational age or birth weight intervals, singletons). Using a cutoff of death probability of 0.5, logistic regression misclassified 67/5810 neonates (1.2 percent) more than NN. In conclusion our study – the largest published so far – shows that even in this very simplified scenario, using only limited information available up to 5 minutes after birth, a NN approach had a small but significant advantage over current approaches. The software implementing the predictor is made freely available to the community.}, keywords = {bioinformatics, biomedical data, neural networks, support vector machine}, pubstate = {published}, tppubtype = {article} } @workshop{learnaut18, title = {Learning Tree Distributions by Hidden Markov Models}, author = {Bacciu Davide and Castellana Daniele}, editor = {Rémi Eyraud and Jeffrey Heinz and Guillaume Rabusseau and Matteo Sammartino }, url = {https://arxiv.org/abs/1805.12372}, year = {2018}, date = {2018-07-13}, booktitle = {Proceedings of the FLOC 2018 Workshop on Learning and Automata (LearnAut'18)}, keywords = {graphical models, hidden tree Markov model, structured data processing, tree structured data}, pubstate = {published}, tppubtype = {workshop} } @article{neurocomp2017, title = {Randomized neural networks for preference learning with physiological data}, author = {Bacciu Davide and Colombo Michele and Morelli Davide and Plans David}, editor = {Fabio Aiolli and Luca Oneto and Michael Biehl }, url = {https://authors.elsevier.com/a/1Wxbz_L2Otpsb3}, doi = {10.1016/j.neucom.2017.11.070}, year = {2018}, date = {2018-07-12}, journal = {Neurocomputing}, volume = {298}, pages = {9-20}, abstract = {The paper discusses the use of randomized neural networks to learn a complete ordering between samples of heart-rate variability data by relying solely on partial and subject-dependent information concerning pairwise relations between samples. We confront two approaches, i.e. Extreme Learning Machines and Echo State Networks, assessing the effectiveness in exploiting hand-engineered heart-rate variability features versus using raw beat-to-beat sequential data. Additionally, we introduce a weight sharing architecture and a preference learning error function whose performance is compared with a standard architecture realizing pairwise ranking as a binary-classification task. The models are evaluated on real-world data from a mobile application realizing a guided breathing exercise, using a dataset of over 54K exercising sessions. Results show how a randomized neural model processing information in its raw sequential form can outperform its vectorial counterpart, increasing accuracy in predicting the correct sample ordering by about 20%. Further, the experiments highlight the importance of using weight sharing architectures to learn smooth and generalizable complete orders induced by the preference relation.}, keywords = {activity recognition, biomedical data, Echo state networks, preference learning, randomized networks, reservoir computing}, pubstate = {published}, tppubtype = {article} } @conference{icml2018, title = {Contextual Graph Markov Model: A Deep and Generative Approach to Graph Processing}, author = {Bacciu Davide and Errica Federico and Micheli Alessio}, url = {https://arxiv.org/abs/1805.10636}, year = {2018}, date = {2018-07-11}, urldate = {2018-07-11}, booktitle = {Proceedings of the 35th International Conference on Machine Learning (ICML 2018)}, keywords = {deep learning, deep learning for graphs, graph data, hidden tree Markov model, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{ijcnn2018, title = {Concentric ESN: Assessing the Effect of Modularity in Cycle Reservoirs}, author = {Bacciu Davide and Bongiorno Andrea}, url = {https://arxiv.org/abs/1805.09244}, year = {2018}, date = {2018-07-09}, urldate = {2018-07-09}, booktitle = {Proceedings of the 2018 International Joint Conference on Neural Networks (IJCNN 2018) }, pages = {1-9}, publisher = {IEEE}, keywords = {deep learning, Echo state networks, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @conference{esann2018Tree, title = {Mixture of Hidden Markov Models as Tree Encoder}, author = {Bacciu Davide and Castellana Daniele}, editor = {Michel Verleysen}, isbn = {978-287587047-6}, year = {2018}, date = {2018-04-26}, urldate = {2018-04-26}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'18)}, pages = {543-548}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, abstract = {The paper introduces a new probabilistic tree encoder based on a mixture of Bottom-up Hidden Tree Markov Models. The ability to recognise similar structures in data is experimentally assessed both in clusterization and classification tasks. The results of these preliminary experiments suggest that the model can be successfully used to compress the tree structural and label patterns in a vectorial representation.}, keywords = {graphical models, hidden tree Markov model, structured data processing, tree structured data, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{esann2018Tut, title = {Bioinformatics and medicine in the era of deep learning}, author = {Bacciu Davide and Lisboa Paulo JG and Martin Jose D and Stoean Ruxandra and Vellido Alfredo}, editor = {Michel Verleysen}, url = {http://arxiv.org/abs/1802.09791}, isbn = {978-287587047-6}, year = {2018}, date = {2018-04-26}, urldate = {2018-04-26}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'18)}, pages = {345-354}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, abstract = {Many of the current scientific advances in the life sciences have their origin in the intensive use of data for knowledge discovery. In no area this is so clear as in bioinformatics, led by technological breakthroughs in data acquisition technologies. It has been argued that bioinformatics could quickly become the field of research generating the largest data repositories, beating other data-intensive areas such as high-energy physics or astroinformatics. Over the last decade, deep learning has become a disruptive advance in machine learning, giving new live to the long-standing connectionist paradigm in artificial intelligence. Deep learning methods are ideally suited to large-scale data and, therefore, they should be ideally suited to knowledge discovery in bioinformatics and biomedicine at large. In this brief paper, we review key aspects of the application of deep learning in bioinformatics and medicine, drawing from the themes covered by the contributions to an ESANN 2018 special session devoted to this topic.}, keywords = {bioinformatics, biomedical data, deep learning}, pubstate = {published}, tppubtype = {conference} } @article{tnnlsTreeKer17, title = {Generative Kernels for Tree-Structured Data}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, doi = {10.1109/TNNLS.2017.2785292}, issn = {2162-2388 }, year = {2018}, date = {2018-01-15}, journal = {Neural Networks and Learning Systems, IEEE Transactions on}, abstract = {The paper presents a family of methods for the design of adaptive kernels for tree-structured data that exploits the summarization properties of hidden states of hidden Markov models for trees. We introduce a compact and discriminative feature space based on the concept of hidden states multisets and we discuss different approaches to estimate such hidden state encoding. We show how it can be used to build an efficient and general tree kernel based on Jaccard similarity. Further, we derive an unsupervised convolutional generative kernel using a topology induced on the Markov states by a tree topographic mapping. The paper provides an extensive empirical assessment on a variety of structured data learning tasks, comparing the predictive accuracy and computational efficiency of state-of-the-art generative, adaptive and syntactical tree kernels. The results show that the proposed generative approach has a good tradeoff between computational complexity and predictive performance, in particular when considering the soft matching introduced by the topographic mapping.}, keywords = {hidden tree Markov model, kernel methods, structured data processing, tree kernel, tree structured data}, pubstate = {published}, tppubtype = {article} } @conference{dl2017, title = {Hidden Tree Markov Networks: Deep and Wide Learning for Structured Data}, author = {Bacciu Davide}, url = {https://arxiv.org/abs/1711.07784}, year = {2017}, date = {2017-11-27}, urldate = {2017-11-27}, booktitle = {Proc. of the 2017 IEEE Symposium Series on Computational Intelligence (SSCI'17)}, publisher = {IEEE}, keywords = {deep learning, hidden tree Markov model, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{iml2017, title = {On the Need of Machine Learning as a Service for the Internet of Things}, author = {Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Micheli Alessio}, isbn = {978-1-4503-5243-7}, year = {2017}, date = {2017-10-18}, booktitle = {To appear in the Proc. of the International Conference on Internet of Things and Machine Learning (IML 2017)}, journal = {Proc}, publisher = {ACM}, series = {International Conference Proceedings Series (ICPS)}, keywords = {internet of things, pervasive computing, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @article{eaai2017, title = {A Learning System for Automatic Berg Balance Scale Score Estimation}, author = {Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Micheli Alessio and Pedrelli Luca and Ferro Erina and Fortunati Luigi and La Rosa Davide and Palumbo Filippo and Vozzi Federico and Parodi Oberdan}, url = {http://www.sciencedirect.com/science/article/pii/S0952197617302026}, doi = {https://doi.org/10.1016/j.engappai.2017.08.018}, year = {2017}, date = {2017-08-24}, urldate = {2017-08-24}, journal = {Engineering Applications of Artificial Intelligence journal}, volume = {66}, pages = {60-74}, abstract = {The objective of this work is the development of a learning system for the automatic assessment of balance abilities in elderly people. The system is based on estimating the Berg Balance Scale (BBS) score from the stream of sensor data gathered by a Wii Balance Board. The scientific challenge tackled by our investigation is to assess the feasibility of exploiting the richness of the temporal signals gathered by the balance board for inferring the complete BBS score based on data from a single BBS exercise. The relation between the data collected by the balance board and the BBS score is inferred by neural networks for temporal data, modeled in particular as Echo State Networks within the Reservoir Computing (RC) paradigm, as a result of a comprehensive comparison among different learning models. The proposed system results to be able to estimate the complete BBS score directly from temporal data on exercise #10 of the BBS test, with ≈≈10 s of duration. Experimental results on real-world data show an absolute error below 4 BBS score points (i.e. below the 7% of the whole BBS range), resulting in a favorable trade-off between predictive performance and user’s required time with respect to previous works in literature. Results achieved by RC models compare well also with respect to different related learning models. Overall, the proposed system puts forward as an effective tool for an accurate automated assessment of balance abilities in the elderly and it is characterized by being unobtrusive, easy to use and suitable for autonomous usage.}, keywords = {ambient assisted living, Echo state networks, multivariate time-series, reservoir computing}, pubstate = {published}, tppubtype = {article} } @inbook{iotBook17, title = {Internet of Robotic Things - Converging Sensing/Actuating, Hyperconnectivity, Artificial Intelligence and IoT Platforms}, author = {Vermesan Ovidiu and Broring Arne and Tragos Elias and Serrano Martin and Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Micheli Alessio and Dragone Mauro and Saffiotti Alessandro and Simoens Pieter and Cavallo Filippo and Bahr Roy}, editor = {Ovidiu Vermesan and Joel Bacquet}, url = {http://www.riverpublishers.com/downloadchapter.php?file=RP_9788793609105C4.pdf}, doi = {10.13052/rp-9788793609105}, isbn = {9788793609105}, year = {2017}, date = {2017-06-28}, booktitle = {Cognitive Hyperconnected Digital Transformation: Internet of Things Intelligence Evolution}, pages = {97-155}, publisher = {River Publishers}, chapter = {4}, keywords = {internet of things, pervasive computing, reservoir computing |}, pubstate = {published}, tppubtype = {inbook} } @article{jrie2017, title = {Reliability and human factors in Ambient Assisted Living environments: The DOREMI case study}, author = {Palumbo Filippo and La Rosa Davide and Ferro Erina and Bacciu Davide and Gallicchio Claudio and Micheli Alession and Chessa Stefano and Vozzi Federico and Parodi Oberdan}, doi = {10.1007/s40860-017-0042-1}, isbn = {2199-4668}, year = {2017}, date = {2017-06-17}, journal = {Journal of Reliable Intelligent Environments}, volume = {3}, number = {3}, pages = {139–157}, publisher = {Springer}, abstract = {Malnutrition, sedentariness, and cognitive decline in elderly people represent the target areas addressed by the DOREMI project. It aimed at developing a systemic solution for elderly, able to prolong their functional and cognitive capacity by empowering, stimulating, and unobtrusively monitoring the daily activities according to well-defined “Active Ageing” life-style protocols. Besides the key features of DOREMI in terms of technological and medical protocol solutions, this work is focused on the analysis of the impact of such a solution on the daily life of users and how the users’ behaviour modifies the expected results of the system in a long-term perspective. To this end, we analyse the reliability of the whole system in terms of human factors and their effects on the reliability requirements identified before starting the experimentation in the pilot sites. After giving an overview of the technological solutions we adopted in the project, this paper concentrates on the activities conducted during the two pilot site studies (32 test sites across UK and Italy), the users’ experience of the entire system, and how human factors influenced its overall reliability.}, keywords = {activity recognition, ambient assisted living, biomedical data, pervasive computing, reservoir computing}, pubstate = {published}, tppubtype = {article} } @conference{ijcnn2017, title = {DropIn: Making Neural Networks Robust to Missing Inputs by Dropout}, author = {Bacciu Davide and Crecchi Francesco and Morelli Davide}, url = {https://arxiv.org/abs/1705.02643}, doi = {10.1109/IJCNN.2017.7966106}, isbn = {978-1-5090-6182-2}, year = {2017}, date = {2017-05-19}, urldate = {2017-05-19}, booktitle = {Proceedings of the 2017 International Joint Conference on Neural Networks (IJCNN 2017) }, pages = {2080-2087}, publisher = {IEEE}, abstract = {The paper presents a novel, principled approach to train recurrent neural networks from the Reservoir Computing family that are robust to missing part of the input features at prediction time. By building on the ensembling properties of Dropout regularization, we propose a methodology, named DropIn, which efficiently trains a neural model as a committee machine of subnetworks, each capable of predicting with a subset of the original input features. We discuss the application of the DropIn methodology in the context of Reservoir Computing models and targeting applications characterized by input sources that are unreliable or prone to be disconnected, such as in pervasive wireless sensor networks and ambient intelligence. We provide an experimental assessment using real-world data from such application domains, showing how the Dropin methodology allows to maintain predictive performances comparable to those of a model without missing features, even when 20%–50% of the inputs are not available.}, keywords = {ambient assisted living, deep learning, Echo state networks, recurrent neural network, reservoir computing}, pubstate = {published}, tppubtype = {conference} } @conference{esann2017, title = {ELM Preference Learning for Physiological Data}, author = {Bacciu Davide and Colombo Michele and Morelli Davide and Plans David}, editor = {Michel Verleysen}, isbn = {978-2-875870384}, year = {2017}, date = {2017-04-28}, urldate = {2017-04-28}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'17)}, pages = {99-104}, publisher = {i6doc.com}, address = {Louvain-la-Neuve, Belgium}, keywords = {activity recognition, biomedical data, preference learning, randomized networks}, pubstate = {published}, tppubtype = {conference} } @article{jlamp2016, title = {An Experience in using Machine Learning for Short-term Predictions in Smart Transportation Systems}, author = {Bacciu Davide and Carta Antonio and Gnesi Stefania and Semini Laura}, editor = {Alberto Lluch Lafuente and Maurice ter Beek}, doi = {10.1016/j.jlamp.2016.11.002}, issn = {2352-2208}, year = {2017}, date = {2017-01-01}, journal = { Journal of Logical and Algebraic Methods in Programming }, volume = {87}, pages = {52-66}, publisher = {Elsevier}, abstract = {Bike-sharing systems (BSS) are a means of smart transportation with the benefit of a positive impact on urban mobility. To improve the satisfaction of a user of a BSS, it is useful to inform her/him on the status of the stations at run time, and indeed most of the current systems provide the information in terms of number of bicycles parked in each docking stations by means of services available via web. However, when the departure station is empty, the user could also be happy to know how the situation will evolve and, in particular, if a bike is going to arrive (and vice versa when the arrival station is full). To fulfill this expectation, we envisage services able to make a prediction and infer if there is in use a bike that could be, with high probability, returned at the station where she/he is waiting. The goal of this paper is hence to analyze the feasibility of these services. To this end, we put forward the idea of using Machine Learning methodologies, proposing and comparing different solutions.}, keywords = {big data, bike sharing system, kernel methods, software engineering, support vector machine}, pubstate = {published}, tppubtype = {article} } @conference{ie2016, title = { Detecting socialization events in ageing people: the experienze of the DOREMI project}, author = {Bacciu Davide and Chessa Stefano and Ferro Erina and Fortunati Luigi and Gallicchio Claudio and La Rosa Davide and Llorente Miguel and Micheli Alessio and Palumbo Filippo and Parodi Oberdan and Valenti Andrea and Vozzi Federico}, doi = {10.1109/IE.2016.28}, issn = {2472-7571 }, year = {2016}, date = {2016-10-27}, urldate = {2016-10-27}, booktitle = {Proceedings of the IEEE 12th International Conference on Intelligent Environments (IE 2016), }, pages = {132-135}, publisher = {IEEE}, address = {UK, London}, abstract = {The detection of socialization events is useful to build indicators about social isolation of people, which is an important indicator in e-health applications. On the other hand, it is rather difficult to achieve with non-invasive solutions. This paper reports about the currently work-in-progress on the technological solution for the detection of socialization events adopted in the DOREMI project.}, keywords = {activity recognition, ambient assisted living, pervasive computing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @article{icfNca15, title = {Unsupervised feature selection for sensor time-series in pervasive computing applications}, author = {Bacciu Davide}, url = {http://pages.di.unipi.it/bacciu/wp-content/uploads/sites/12/2016/04/nca2015.pdf}, doi = {10.1007/s00521-015-1924-x}, issn = {1433-3058}, year = {2016}, date = {2016-07-01}, urldate = {2016-07-01}, journal = {Neural Computing and Applications}, volume = {27}, number = {5}, pages = {1077-1091}, publisher = {Springer London}, abstract = {The paper introduces an efficient feature selection approach for multivariate time-series of heterogeneous sensor data within a pervasive computing scenario. An iterative filtering procedure is devised to reduce information redundancy measured in terms of time-series cross-correlation. The algorithm is capable of identifying nonredundant sensor sources in an unsupervised fashion even in presence of a large proportion of noisy features. In particular, the proposed feature selection process does not require expert intervention to determine the number of selected features, which is a key advancement with respect to time-series filters in the literature. The characteristic of the prosed algorithm allows enriching learning systems, in pervasive computing applications, with a fully automatized feature selection mechanism which can be triggered and performed at run time during system operation. A comparative experimental analysis on real-world data from three pervasive computing applications is provided, showing that the algorithm addresses major limitations of unsupervised filters in the literature when dealing with sensor time-series. Specifically, it is presented an assessment both in terms of reduction of time-series redundancy and in terms of preservation of informative features with respect to associated supervised learning tasks.}, keywords = {ambient assisted living, Echo state networks, feature selection, multivariate time-series, pervasive computing, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @conference{fun2016, title = {An Investigation into Cybernetic Humor, or: Can Machines Laugh?}, author = {Bacciu Davide and Gervasi Vincenzo and Prencipe Giuseppe}, editor = {Erik D. Demaine and Fabrizio Grandoni}, url = {http://drops.dagstuhl.de/opus/volltexte/2016/5882}, doi = {10.4230/LIPIcs.FUN.2016.3}, issn = {1868-8969}, year = {2016}, date = {2016-06-10}, booktitle = {Proceedings of the 8th International Conference on Fun with Algorithms (FUN'16) }, volume = {49}, pages = {1-15}, publisher = {Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, abstract = {The mechanisms of humour have been the subject of much study and investigation, starting with and up to our days. Much of this work is based on literary theories, put forward by some of the most eminent philosophers and thinkers of all times, or medical theories, investigating the impact of humor on brain activity or behaviour. Recent functional neuroimaging studies, for instance, have investigated the process of comprehending and appreciating humor by examining functional activity in distinctive regions of brains stimulated by joke corpora. Yet, there is precious little work on the computational side, possibly due to the less hilarious nature of computer scientists as compared to men of letters and sawbones. In this paper, we set to investigate whether literary theories of humour can stand the test of algorithmic laughter. Or, in other words, we ask ourselves the vexed question: Can machines laugh? We attempt to answer that question by testing whether an algorithm - namely, a neural network - can "understand" humour, and in particular whether it is possible to automatically identify abstractions that are predicted to be relevant by established literary theories about the mechanisms of humor. Notice that we do not focus here on distinguishing humorous from serious statements - a feat that is clearly way beyond the capabilities of the average human voter, not to mention the average machine - but rather on identifying the underlying mechanisms and triggers that are postulated to exist by literary theories, by verifying if similar mechanisms can be learned by machines. }, keywords = {deep learning, natural language, recurrent neural network, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{Amato2016, title = {A Benchmark Dataset for Human Activity Recognition and Ambient Assisted Living}, author = {Amato Giuseppe and Bacciu Davide and Chessa Stefano and Dragone Mauro and Gallicchio Claudio and Gennaro Claudio and Lozano Hector and Micheli Alessio and Renteria Arantxa and Vairo Claudio}, doi = {10.1007/978-3-319-40114-0_1}, isbn = {978-3-319-40113-3}, year = {2016}, date = {2016-06-03}, booktitle = {Proceedings of the 7th International Conference on Ambient Intelligence (ISAMI'16)}, volume = {476}, pages = {1-9}, publisher = {Springer}, series = {Advances in Intelligent Systems and Computing}, abstract = {We present a data benchmark for the assessment of human activity recognition solutions, collected as part of the EU FP7 RUBICON project, and available to the scientific community. The dataset provides fully annotated data pertaining to numerous user activities and comprises synchronized data streams collected from a highly sensor-rich home environment. A baseline activity recognition performance obtained through an Echo State Network approach is provided along with the dataset.}, keywords = {activity recognition, ambient assisted living, cognitive robotics, Echo state networks, multivariate time-series, robotic ecology, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{esann2016, title = {A reservoir activation kernel for trees}, author = {Bacciu Davide and Gallicchio Claudio and Micheli Alessio }, editor = {M. Verleysen}, url = {https://www.researchgate.net/profile/Claudio_Gallicchio/publication/313236954_A_Reservoir_Activation_Kernel_for_Trees/links/58a9db0892851cf0e3c6b8df/A-Reservoir-Activation-Kernel-for-Trees.pdf}, isbn = {978-287587027-}, year = {2016}, date = {2016-04-29}, urldate = {2016-04-29}, booktitle = {Proceedings of the European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'16)}, pages = {29-34}, publisher = { i6doc.com}, keywords = {Echo state networks, kernel methods, reservoir computing, structured data processing, tree kernel, tree structured data}, pubstate = {published}, tppubtype = {conference} } @online{ercim2016, title = {Adopting a Machine Learning Approach in the Design of Smart Transportation Systems}, author = {Bacciu Davide and Carta Antonio and Gnesi Stefania and Semini Laura }, editor = {Rob van der Me and Ariona Shashaj}, url = {http://ercim-news.ercim.eu/en105/special/adopting-a-machine-learning-approach-in-the-design-of-smart-transportation-systems}, issn = {0926-4981 }, year = {2016}, date = {2016-04-01}, urldate = {2016-04-01}, organization = {ERCIM News Magazine}, keywords = {big data, bike sharing system, kernel methods, software engineering, support vector machine}, pubstate = {published}, tppubtype = {online} } @misc{icities2015, title = {Preventing cognitive decline, sedentariness and malnutrition: the DOREMI approach}, author = {Parodi Oberdan and Vozzi Federico and Ferro Erina and Fortunati Luigi and Micheli Alessio and Gallicchio Claudio and Bacciu Davide and Chessa Stefano and Ascolese Antonio}, year = {2015}, date = {2015-10-29}, booktitle = {The CINI Annual Workshop on ICT for Smart Cities and Communities (I-CiTies 2015)}, note = {Palermo, October 29-30, 2015}, keywords = {activity recognition, ambient assisted living}, pubstate = {published}, tppubtype = {presentation} } @article{bacciuJirs15, title = {Robotic Ubiquitous Cognitive Ecology for Smart Homes}, author = {Amato Giuseppe and Bacciu Davide and Broxvall Mathias and Chessa Stefano and Coleman Sonya and Di Rocco Maurizio and Dragone Mauro and Gallicchio Claudio and Gennaro Claudio and Lozano Hector and McGinnity T Martin and Micheli Alessio and Ray AK and Renteria Arantxa and Saffiotti Alessandro and Swords David and Vairo Claudio and Vance Philip}, url = {http://dx.doi.org/10.1007/s10846-015-0178-2}, doi = {10.1007/s10846-015-0178-2}, issn = {0921-0296}, year = {2015}, date = {2015-01-01}, journal = {Journal of Intelligent & Robotic Systems}, volume = {80}, number = {1}, pages = {57-81}, publisher = {Springer Netherlands}, abstract = {Robotic ecologies are networks of heterogeneous robotic devices pervasively embedded in everyday environments, where they cooperate to perform complex tasks. While their potential makes them increasingly popular, one fundamental problem is how to make them both autonomous and adaptive, so as to reduce the amount of preparation, pre-programming and human supervision that they require in real world applications. The project RUBICON develops learning solutions which yield cheaper, adaptive and efficient coordination of robotic ecologies. The approach we pursue builds upon a unique combination of methods from cognitive robotics, machine learning, planning and agent-based control, and wireless sensor networks. This paper illustrates the innovations advanced by RUBICON in each of these fronts before describing how the resulting techniques have been integrated and applied to a proof of concept smart home scenario. The resulting system is able to provide useful services and pro-actively assist the users in their activities. RUBICON learns through an incremental and progressive approach driven by the feedback received from its own activities and from the user, while also self-organizing the manner in which it uses available sensors, actuators and other functional components in the process. This paper summarises some of the lessons learned by adopting such an approach and outlines promising directions for future work.}, keywords = {activity recognition, ambient assisted living, cognitive robotics, Echo state networks, multivariate time-series, networked robotics, pervasive computing, planning, reservoir computing, robotic ecology, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @article{Dragone:2015:CRE:2827370.2827596, title = {A Cognitive Robotic Ecology Approach to Self-configuring and Evolving AAL Systems}, author = {Dragone Mauro and Amato Giuseppe and Bacciu Davide and Chessa Stefano and Coleman Sonya and Di Rocco Maurizio and Gallicchio Claudio and Gennaro Claudio and Lozano Hector and Maguire Liam and McGinnity Martin and Micheli Alessio and O'Hare Gregory M.P. and Renteria Arantxa and Saffiotti Alessandro and Vairo Claudio and Vance Philip}, url = {http://dx.doi.org/10.1016/j.engappai.2015.07.004}, doi = {10.1016/j.engappai.2015.07.004}, issn = {0952-1976}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, journal = {Engineering Applications of Artificial Intelligence}, volume = {45}, number = {C}, pages = {269--280}, publisher = {Pergamon Press, Inc.}, address = {Tarrytown, NY, USA}, keywords = {ambient assisted living, cognitive robotics, Echo state networks, multivariate time-series, pervasive computing, planning, reservoir computing, robotic ecology, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @inbook{Bacciu2015, title = {Probabilistic Modeling in Machine Learning}, author = {Davide Bacciu and Paulo J.G. Lisboa and Alessandro Sperduti and Thomas Villmann}, editor = {Janusz Kacprzyk and Witold Pedrycz}, url = {http://dx.doi.org/10.1007/978-3-662-43505-2_31}, doi = {10.1007/978-3-662-43505-2_31}, isbn = {978-3-662-43505-2}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, pages = {545--575}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, keywords = {Bayesian networks, generative model, graphical models, hidden Markov models}, pubstate = {published}, tppubtype = {inbook} } @conference{11568_775269, title = {Smart environments and context-awareness for lifestyle management in a healthy active ageing framework}, author = {Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Micheli Alessio and Ferro Erina and Fortunati Luigi and Palumbo Filippo and Parodi Oberdan and Vozzi Federico and Hanke Sten and Kropf Johannes and Kreiner Karl}, url = {http://springerlink.com/content/0302-9743/copyright/2005/}, doi = {10.1007/978-3-319-23485-4_6}, year = {2015}, date = {2015-01-01}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {9273}, pages = {54--66}, publisher = {Springer Verlag}, keywords = {activity recognition, ambient assisted living, biomedical data, multivariate time-series, pervasive computing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_774434, title = {ESNigma: efficient feature selection for Echo State Networks}, author = {Bacciu Davide and Benedetti Filippo and Micheli Alessio}, url = {https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2015-104.pdf}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, booktitle = {Proceedings of the 23rd European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'15)}, pages = {189--194}, publisher = {i6doc.com publ.}, abstract = {The paper introduces a feature selection wrapper designed specifically for Echo State Networks. It defines a feature scoring heuristics, applicable to generic subset search algorithms, which allows to reduce the need for model retraining with respect to wrappers in literature. The experimental assessment on real-word noisy sequential data shows that the proposed method can identify a compact set of relevant, highly predictive features with as little as $60%$ of the time required by the original wrapper.}, keywords = {Echo state networks, feature selection, multivariate time-series, pervasive computing, reservoir computing, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_766969, title = {Using a Machine Learning Approach to Implement and Evaluate Product Line Features}, author = { Bacciu Davide and Gnesi Stefania and Semini Laura}, url = {http://dx.doi.org/10.4204/EPTCS.188.8}, doi = {10.4204/EPTCS.188.8}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings 11th International Workshop on Automated Specification and Verification of Web Systems, WWV 2015}, journal = {ELECTRONIC PROCEEDINGS IN THEORETICAL COMPUTER SCIENCE}, volume = {188}, pages = {75--83}, series = {Electronic Proceedings in Theoretical Computer Science (EPTCS)}, abstract = {Bike-sharing systems are a means of smart transportation in urban environments with the benefit of a positive impact on urban mobility. In this paper we are interested in studying and modeling the behavior of features that permit the end user to access, with her/his web browser, the status of the Bike-Sharing system. In particular, we address features able to make a prediction on the system state. We propose to use a machine learning approach to analyze usage patterns and learn computational models of such features from logs of system usage. On the one hand, machine learning methodologies provide a powerful and general means to implement a wide choice of predictive features. On the other hand, trained machine learning models are provided with a measure of predictive performance that can be used as a metric to assess the cost-performance trade-off of the feature. This provides a principled way to assess the runtime behavior of different components before putting them into operation.}, keywords = {big data, bike sharing system, kernel methods, software engineering, support vector machine}, pubstate = {published}, tppubtype = {conference} } @article{nca2014, title = {An experimental characterization of reservoir computing in ambient assisted living applications}, author = {Bacciu Davide and Barsocchi Paolo and Chessa Stefano and Gallicchio Claudio and Micheli Alessio}, url = {http://dx.doi.org/10.1007/s00521-013-1364-4, Publisher version https://archive.ics.uci.edu/ml/datasets/Indoor+User+Movement+Prediction+from+RSS+data, Dataset @ UCI}, doi = {10.1007/s00521-013-1364-4}, issn = {0941-0643}, year = {2014}, date = {2014-01-01}, urldate = {2014-01-01}, journal = {Neural Computing and Applications}, volume = {24}, number = {6}, pages = {1451-1464}, publisher = {Springer London}, abstract = {In this paper, we present an introduction and critical experimental evaluation of a reservoir computing (RC) approach for ambient assisted living (AAL) applications. Such an empirical analysis jointly addresses the issues of efficiency, by analyzing different system configurations toward the embedding into computationally constrained wireless sensor devices, and of efficacy, by analyzing the predictive performance on real-world applications. First, the approach is assessed on a validation scheme where training, validation and test data are sampled in homogeneous ambient conditions, i.e., from the same set of rooms. Then, it is introduced an external test set involving a new setting, i.e., a novel ambient, which was not available in the first phase of model training and validation. The specific test-bed considered in the paper allows us to investigate the capability of the RC approach to discriminate among user movement trajectories from received signal strength indicator sensor signals. This capability can be exploited in various AAL applications targeted at learning user indoor habits, such as in the proposed indoor movement forecasting task. Such a joint analysis of the efficiency/efficacy trade-off provides novel insight in the concrete successful exploitation of RC for AAL tasks and for their distributed implementation into wireless sensor networks.}, keywords = {ambient assisted living, Echo state networks, indoor user movement forecasting, reservoir computing, wireless sensor networks}, pubstate = {published}, tppubtype = {article} } @conference{11568_665864, title = {Modeling Bi-directional Tree Contexts by Generative Transductions}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, url = {http://dx.doi.org/10.1007/978-3-319-12637-1_68}, doi = {10.1007/978-3-319-12637-1_68}, year = {2014}, date = {2014-01-01}, booktitle = {Neural Information Processing}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {8834}, pages = {543--550}, publisher = {Springer International Publishing}, abstract = {We introduce an approach to integrate bi-directional contexts in a generative tree model by means of structured transductions. We show how this can be efficiently realized as the composition of a top-down and a bottom-up generative model for trees, that are trained independently within a circular encoding-decoding scheme. The resulting input-driven generative model is shown to capture information concerning bi-directional contexts within its state-space. An experimental evaluation using the Jaccard generative kernel for trees is presented, indicating that the approach can achieve state of the art performance on tree classification benchmarks.}, keywords = {generative model, graphical models, hidden tree Markov model, kernel methods, tree kernel, tree structured data}, pubstate = {published}, tppubtype = {conference} } @conference{11568_588269, title = {Learning context-aware mobile robot navigation in home environments}, author = {Bacciu Davide and Gallicchio Claudio and Micheli Alessio and Di Rocco Maurizio and Saffiotti Alessandro}, doi = {10.1109/IISA.2014.6878733}, isbn = {9781479961702}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the 5th International Conference on Information, Intelligence, Systems and Applications (IISA 2014)}, pages = {57--62}, publisher = {IEEE}, abstract = {We present an approach to make planning adaptive in order to enable context-aware mobile robot navigation. We integrate a model-based planner with a distributed learning system based on reservoir computing, to yield personalized planning and resource allocations that account for user preferences and environmental changes. We demonstrate our approach in a real robot ecology, and show that the learning system can effectively exploit historical data about navigation performance to modify the models in the planner, without any prior information oncerning the phenomenon being modeled. The plans produced by the adapted CL fail more rarely than the ones generated by a non-adaptive planner. The distributed learning system handles the new learning task autonomously, and is able to automatically identify the sensorial information most relevant for the task, thus reducing the communication and computational overhead of the predictive task}, keywords = {ambient assisted living, Echo state networks, feature selection, multivariate time-series, pervasive computing, planning, recurrent neural network, reservoir computing, robotic ecology, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_586070, title = {Integrating bi-directional contexts in a generative kernel for trees}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, doi = {10.1109/IJCNN.2014.6889768}, year = {2014}, date = {2014-01-01}, urldate = {2014-01-01}, booktitle = {Neural Networks (IJCNN), 2014 International Joint Conference on}, pages = {4145--4151}, publisher = {IEEE}, keywords = {generative model, graphical models, hidden tree Markov model, kernel methods, structured data processing, tree kernel, tree structured data, tree transductions}, pubstate = {published}, tppubtype = {conference} } @conference{icfEann14, title = {An Iterative Feature Filter for Sensor Timeseries in Pervasive Computing Applications}, author = {Bacciu Davide}, doi = {10.1007/978-3-319-11071-4_4}, year = {2014}, date = {2014-01-01}, urldate = {2014-01-01}, booktitle = {Communications in Computer and Information Science - Engineering Applications of Neural Networks}, journal = {COMMUNICATIONS IN COMPUTER AND INFORMATION SCIENCE}, volume = {459}, pages = {39--48}, publisher = {Springer International Publishing}, abstract = {The paper discusses an efficient feature selection approach for multivariate timeseries of heterogeneous sensor data within a pervasive computing scenario. An iterative filtering procedure is devised to reduce information redundancy measured in terms of timeseries cross-correlation. The algorithm is capable of identifying non-redundant sensor sources in an unsupervised fashion even in presence of a large proportion of noisy features. A comparative experimental analysis on real-world data from pervasive computing applications is provided, showing that the algorithm addresses major limitations of unsupervised filters in literature when dealing with sensor timeseries.}, keywords = {ambient assisted living, feature selection, multivariate time-series, pervasive computing, structured data processing, unsupervised learning, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @article{gmtsdII2012, title = {Compositional Generative Mapping for Tree-Structured Data - Part II: Topographic Projection Model}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6395856}, doi = {10.1109/TNNLS.2012.2228226}, issn = {2162-237X}, year = {2013}, date = {2013-02-01}, journal = {Neural Networks and Learning Systems, IEEE Transactions on}, volume = {24}, number = {2}, pages = {231 -247}, keywords = {generative topographic mapping, hidden Markov models, hidden tree Markov model, self-organizing map, tree structured data}, pubstate = {published}, tppubtype = {article} } @article{bgm2013, title = {Efficient identification of independence networks using mutual information}, author = {Bacciu Davide and Etchells Terence A and Lisboa Paulo JG and Whittaker Joe}, url = {http://dx.doi.org/10.1007/s00180-012-0320-6}, doi = {10.1007/s00180-012-0320-6}, issn = {0943-4062}, year = {2013}, date = {2013-01-01}, journal = {Computational Statistics}, volume = {28}, number = {2}, pages = {621-646}, publisher = {Springer-Verlag}, keywords = {Bayesian networks, graphical models, mutual information, PC algorithm}, pubstate = {published}, tppubtype = {article} } @article{bacciuNeuroComp2013, title = {An input–output hidden Markov model for tree transductions}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro }, url = {http://www.sciencedirect.com/science/article/pii/S0925231213001914}, doi = {10.1016/j.neucom.2012.12.044}, issn = {0925-2312}, year = {2013}, date = {2013-01-01}, journal = {Neurocomputing}, volume = {112}, pages = {34--46}, keywords = {hidden Markov models, hidden tree Markov model, structured data processing, tree transductions}, pubstate = {published}, tppubtype = {article} } @article{di2013italian, title = {Italian Machine Learning and Data Mining research: The last years}, author = {Di Mauro Nicola and Frasconi Paolo and Angiulli Fabrizio and Bacciu Davide and de Gemmis Marco and Esposito Floriana and Fanizzi Nicola and Ferilli Stefano and Gori Marco and Lisi Francesca A and others}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6353263}, doi = {10.3233/IA-130050}, year = {2013}, date = {2013-01-01}, journal = {Intelligenza Artificiale}, volume = {7}, number = {2}, pages = {77--89}, publisher = {IOS Press}, keywords = {graphical models, recurrent neural network, structured data processing}, pubstate = {published}, tppubtype = {article} } @conference{11568_238038, title = {Distributed Neural Computation over WSN in Ambient Intelligence}, author = {Bacciu Davide and Gallicchio Claudio and Lenzi Alessandro and Chessa Stefano and Micheli Alessio and Pelagatti Susanna and Vairo Claudio }, doi = {10.1007/978-3-319-00566-9_19}, year = {2013}, date = {2013-01-01}, booktitle = {Advances in Intelligent Systems and Computing - Ambient Intelligence - Software and Applications}, journal = {ADVANCES IN INTELLIGENT SYSTEMS AND COMPUTING}, volume = {219}, pages = {147--154}, publisher = {Springer Verlag}, keywords = {ambient assisted living, Echo state networks, multivariate time-series, pervasive computing, reservoir computing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_159900, title = {An Experimental Evaluation of Reservoir Computation for Ambient Assisted Living}, author = {Bacciu Davide and CHESSA Stefano and Gallicchio Claudio and MICHELI Alessio and Barsocchi Paolo}, doi = {10.1007/978-3-642-35467-0_5}, year = {2013}, date = {2013-01-01}, booktitle = {Neural Nets and Surroundings - 22nd Italian Workshop on Neural Nets}, journal = {SMART INNOVATION, SYSTEMS AND TECHNOLOGIES}, volume = {19}, pages = {41--50}, publisher = {Springer}, keywords = {Echo state networks, indoor user movement forecasting, pervasive computing, recurrent neural network, reservoir computing, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @article{gmtsdI2012, title = {Compositional Generative Mapping for Tree-Structured Data; Part I: Bottom-Up Probabilistic Modeling of Trees}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6353263}, doi = {10.1109/TNNLS.2012.2222044}, issn = {2162-237X}, year = {2012}, date = {2012-12-01}, journal = {Neural Networks and Learning Systems, IEEE Transactions on}, volume = {23}, number = {12}, pages = {1987 -2002}, keywords = {hidden Markov models, hidden tree Markov model, tree structured data}, pubstate = {published}, tppubtype = {article} } @conference{11568_193770, title = {A General Purpose Distributed Learning Model for Robotic Ecologies}, author = {Bacciu Davide and Chessa Stefano and Gallicchio Claudio and Lenzi Alessandro and Micheli Alessio and Pelagatti Susanna}, url = {http://www.ifac-papersonline.net/Detailed/55807.html}, doi = {10.3182/20120905-3-HR-2030.00178}, year = {2012}, date = {2012-01-01}, booktitle = {Robot Control - 10th IFAC Symposium on Robot Control}, journal = {IFAC PROCEEDINGS VOLUMES}, volume = {10}, pages = {435--440}, publisher = {ELSEVIER SCIENCE BV}, abstract = {The design of a learning system for robotic ecologies need to account for some key aspects of the ecology model such as distributivity, heterogeneity of the computational, sensory and actuator capabilities, as well as self-configurability. The paper proposes general guiding principles for learning systems' design that ensue from key ecology properties, and presents a distributed learning system for the Rubicon ecology that draws inspiration from such guidelines. The proposed learning system provides the Rubicon ecology with a set of general-purpose learning services which can be used to learn generic computational tasks that involve predicting information of interest based on dynamic sensorial input streams.}, keywords = {distributed learning, multivariate time-series, pervasive computing, recurrent neural network, reservoir computing, robotic ecology, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_465481, title = {Discovering Hidden Pathways in Bioinformatics}, author = {Lisboa Paulo J G and Jarman Ian H and Etchells Terence A and Chambers Simon J and Bacciu Davide and Whittaker Joe and Garibaldi Jon M and Ortega-Martorell Sandra and Vellido Alfredo and Ellis Ian O}, doi = {10.1007/978-3-642-35686-5_5}, year = {2012}, date = {2012-01-01}, booktitle = {Lecture Notes in Computer ScienceComputational Intelligence Methods for Bioinformatics and Biostatistics}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {7548}, pages = {49--60}, abstract = {The elucidation of biological networks regulating the metabolic basis of disease is critical for understanding disease progression and in identifying therapeutic targets. In molecular biology, this process often starts by clustering expression profiles which are candidates for disease phenotypes. However, each cluster may comprise several overlapping processes that are active in the cluster. This paper outlines empirical results using methods for blind source separation to map the pathways of biomarkers driving independent, hidden processes that underpin the clusters. The method is applied to a protein expression data set measured in tissue from breast cancer patients (n=1,076)}, keywords = {Bayesian networks, biomedical data, graphical models, mutual information, PC algorithm}, pubstate = {published}, tppubtype = {conference} } @misc{11568_466873, title = {Robotic UBIquitous COgnitive Networks}, author = {Abdel-Naby Same and Amato Giuseppe and Bacciu Davide and Broxvall Mathias and Chessa Stefano and Coleman Sonya and Di Rocco Maurizio and Dragone Mauro and Gallicchio Claudio and Gennaro Claudio and Guzman Roberto and Lopez Raul and Lozano Hector and Maguire Liam and McGinnity T Martin and Micheli Alessio and O'Hare Greg MP and Pecora Federico and Ray AK and Renteria Arantxa and Saffiotti Alessandro and Swords David and Vairo Claudio}, year = {2012}, date = {2012-01-01}, booktitle = {Poster in the 5th International Conference on Cognitive Systems (CogSys 2012)}, keywords = {cognitive robotics, pervasive computing, planning, robotic ecology}, pubstate = {published}, tppubtype = {presentation} } @conference{11568_466867, title = {Self-Sustaining Learning for Robotic Ecologies}, author = {BACCIU Davide and Broxvall Mathias and Coleman Sonya and Dragone Mauro and Gallicchio Claudio and Gennaro Claudio and Guzman Roberto and Lopez Raul and Lozano-Peiteado Hector and Ray AK and Renteria Arantxa and Saffiotti Alessandro and Vairo Claudio}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the 1st International Conference on Sensor Networks, SENSORNETS 2012}, pages = {99--103}, abstract = {The most common use of wireless sensor networks (WSNs) is to collect environmental data from a specific area, and to channel it to a central processing node for on-line or off-line analysis. The WSN technology, however, can be used for much more ambitious goals. We claim that merging the concepts and technology of WSN with the concepts and technology of distributed robotics and multi-agent systems can open new ways to design systems able to provide intelligent services in our homes and working places. We also claim that endowing these systems with learning capabilities can greatly increase their viability and acceptability, by simplifying design, customization and adaptation to changing user needs. To support these claims, we illustrate our architecture for an adaptive robotic ecology, named RUBICON, consisting of a network of sensors, effectors and mobile robots.}, keywords = {activity recognition, ambient assisted living, cognitive robotics, planning, recurrent neural network, reservoir computing, robotic ecology, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_156516, title = {A Generative Multiset Kernel for Structured Data}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, doi = {10.1007/978-3-642-33269-2_8}, year = {2012}, date = {2012-01-01}, urldate = {2012-01-01}, booktitle = {Artificial Neural Networks and Machine Learning - ICANN 2012 proceedings, Springer LNCS series}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {7552}, pages = {57--64}, publisher = {Springer-Verlag}, address = {BERLIN HEIDELBERG}, abstract = {The paper introduces a novel approach for defining efficient generative kernels for structured-data based on the concept of multisets and Jaccard similarity. The multiset feature-space allows to enhance the adaptive kernel with syntactic information on structure matching. The proposed approach is validated using an input-driven hidden Markov model for trees as generative model, but it is enough general to be straightforwardly applicable to any probabilistic latent variable model. The experimental evaluation shows that the proposed Jaccard kernel has a superior classification performance with respect to the Fisher Kernel, while consistently reducing the computational requirements.}, keywords = {generative model, graphical models, hidden tree Markov model, kernel methods, structured data processing, support vector machine, tree kernel, tree structured data}, pubstate = {published}, tppubtype = {conference} } @conference{11568_152836, title = {Input-Output Hidden Markov Models for Trees}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, year = {2012}, date = {2012-01-01}, urldate = {2012-01-01}, booktitle = {ESANN 2012 - The 20th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning - Proceedings}, pages = {25--30}, publisher = {Ciaco scrl - i6doc.com}, abstract = {The paper introduces an input-driven generative model for tree-structured data that extends the bottom-up hidden tree Markov model with non-homogenous transition and emission probabilities. The advantage of introducing an input-driven dynamics in structured-data pro- cessing is experimentally investigated. The results of this preliminary analysis suggest that input-driven models can capture more discrimina- tive structural information than non-input-driven approaches.}, keywords = {generative model, graphical models, hidden tree Markov model, structured data processing, tree structured data, tree transductions}, pubstate = {published}, tppubtype = {conference} } @article{soco2011, title = {Clustering of protein expression data: a benchmark of statistical and neural approaches}, author = {Jarman Ian H and Etchells Terence A and Bacciu Davide and Garibaldi John M and Ellis Ian O and Lisboa Paulo JG}, url = {http://dx.doi.org/10.1007/s00500-010-0596-9}, doi = {10.1007/s00500-010-0596-9}, issn = {1432-7643}, year = {2011}, date = {2011-01-01}, journal = {Soft Computing-A Fusion of Foundations, Methodologies and Applications}, volume = {15}, number = {8}, pages = {1459--1469}, publisher = {Springer}, keywords = {biomedical data, clustering, neural networks, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {article} } @conference{11568_202140, title = {Predicting User Movements in Heterogeneous Indoor Environments by Reservoir Computing}, author = {Bacciu Davide and Gallicchio Claudio and Micheli Alessio and Barsocchi Paolo and Chessa Stefano}, url = {http://ijcai-11.iiia.csic.es/files/proceedings/Space,%20Time%20and%20Ambient%20Intelligence%20Proceeding.pdf}, year = {2011}, date = {2011-01-01}, urldate = {2011-01-01}, booktitle = {Proceedings of the IJCAI Workshop on Space, Time and Ambient Intelligence (STAMI)}, pages = {1--6}, keywords = {activity recognition, ambient assisted living, Echo state networks, indoor user movement forecasting, multivariate time-series, pervasive computing, recurrent neural network, reservoir computing, structured data processing, wireless sensor networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_145907, title = {Adaptive Tree Kernel by Multinomial Generative Topographic Mapping}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?tp=&arnumber=6033423&contentType=Conference+Publications&refinements%3D4294413850%26sortType%3Dasc_p_Sequence%26filter%3DAND%28p_IS_Number%3A6033131%29}, doi = {10.1109/IJCNN.2011.6033423}, year = {2011}, date = {2011-01-01}, urldate = {2011-01-01}, booktitle = {Proceedings of the International Joint Conference on Neural Networks}, pages = {1651--1658}, publisher = {IEEE}, address = {Piscataway (NJ)}, keywords = {generative model, generative topographic mapping, graphical models, hidden tree Markov model, kernel methods, structured data processing, tree kernel, tree structured data}, pubstate = {published}, tppubtype = {conference} } @techreport{11568_254437, title = {A Bottom-up Hidden Tree Markov Model}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, url = {http://compass2.di.unipi.it/TR/Files/TR-10-08.pdf.gz}, year = {2010}, date = {2010-04-01}, urldate = {2010-04-01}, volume = {TR-10-08}, number = {TR-10-08}, pages = {1--22}, institution = {Università di Pisa}, keywords = {generative model, graphical models, hidden tree Markov model, structured data processing, tree structured data}, pubstate = {published}, tppubtype = {techreport} } @conference{11568_465483, title = {Different Methodologies for Patient Stratification Using Survival Data}, author = {Fernandes Ana S and Bacciu Davide and Jarman Ian H and Etchells Terence A and Fonseca Jose M and Lisboa Paulo JG}, doi = {10.1007/978-3-642-14571-1_21}, year = {2010}, date = {2010-01-01}, booktitle = {Lecture Notes in Computer ScienceComputational Intelligence Methods for Bioinformatics and Biostatistics}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {6160}, pages = {276--290}, abstract = {Clinical characterization of breast cancer patients related to their risk and profiles is an important part for making their correct prognostic assessments. This paper first proposes a prognostic index obtained when it is applied a flexible non-linear time-to-event model and compares it to a widely used linear survival estimator. This index underpins different stratification methodologies including informed clustering utilising the principle of learning metrics, regression trees and recursive application of the log-rank test. Missing data issue was overcome using multiple imputation, which was applied to a neural network model of survival fitted to a data set for breast cancer (n=743). It was found the three methodologies broadly agree, having however important differences.}, keywords = {biomedical data, clustering, competitive repetition suppression learning, neural networks, statistics, survival analysis, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{11568_465482, title = {Adaptive fuzzy-valued service selection}, author = {Bacciu Davide and Buscemi Maria Grazia and Mkrtchyan Lusine }, doi = {10.1145/1774088.1774598}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the 2010 ACM Symposium on Applied Computing - SAC '10}, journal = {PROCEEDINGS OF THE .. ACM SYMPOSIUM ON APPLIED COMPUTING}, pages = {2467--2471}, abstract = {Service composition concerns both integration of heterogeneous distributed applications and dynamic selection of services. QoS-aware selection enables a service requester with certain QoS requirements to classify services according to their QoS guarantees. In this paper we present a method that allows for a fuzzy-valued description of QoS parameters. Fuzzy sets are suited to specify both the QoS preferences raised by a service requester such as 'response time must be as lower as possible and cannot be more that 1000ms' and approximate estimates a provider can make on the QoS capabilities of its services like 'availability is roughly between 95% and 99%'. We propose a matchmaking procedure based on a fuzzy-valued similarity measure that, given the specifications of QoS parameters of the requester and the providers, selects the most appropriate service among several functionally-equivalent ones. We also devise a method for dynamical update of service offers by means of runtime monitoring of the actual QoS performance.}, keywords = {fuzzy graph matching, fuzzy reasoning, service matchmaking, web service}, pubstate = {published}, tppubtype = {conference} } @conference{11568_136433, title = {Compositional Generative Mapping of Structured Data}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, doi = {10.1109/IJCNN.2010.5596606}, year = {2010}, date = {2010-01-01}, urldate = {2010-01-01}, booktitle = {Proceedings of the 2010 IEEE InternationalJoint Conference on Neural Networks(IJCNN'10)}, pages = {1359--1366}, publisher = {IEEE}, keywords = {generative topographic mapping, graphical models, hidden tree Markov model, structured data processing, tree structured data, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{11568_142187, title = {Bottom-Up Generative Modeling of Tree-Structured Data}, author = {Bacciu Davide and Micheli Alessio and Sperduti Alessandro}, doi = {10.1007/978-3-642-17537-4_80}, year = {2010}, date = {2010-01-01}, booktitle = {LNCS 6443: Neural Information Processing. Theory and Algorithms. Part I}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {6443}, pages = {660--668}, publisher = {Springer-Verlag}, address = {BERLIN HEIDELBERG}, keywords = {generative model, graphical models, hidden tree Markov model, structured data processing, tree structured data}, pubstate = {published}, tppubtype = {conference} } @techreport{matchmakingTR09, title = {Adaptive Service Selection - A Fuzzy-valued Matchmaking Approach}, author = {Bacciu Davide and Buscemi Maria Grazia and Mkrtchyan Lusine}, url = {http://eprints.adm.unipi.it/id/eprint/2241}, year = {2009}, date = {2009-10-01}, urldate = {2009-10-01}, number = {TR-09-21}, institution = {Dipartimento di Informatica, Universita' di Pisa}, type = {Technical Report}, keywords = {fuzzy graph matching, fuzzy reasoning, service matchmaking, web service}, pubstate = {published}, tppubtype = {techreport} } @article{patrec2009, title = {Expansive competitive learning for kernel vector quantization}, author = {Bacciu Davide and Starita Antonina}, url = {http://dx.doi.org/10.1016/j.patrec.2009.01.002}, doi = {10.1016/j.patrec.2009.01.002}, issn = {0167-8655}, year = {2009}, date = {2009-01-01}, journal = {Pattern Recognition Letters}, volume = {30}, number = {6}, pages = {641--651}, publisher = {Elsevier}, keywords = {clustering, competitive repetition suppression learning, kernel methods, neural networks, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {article} } @conference{11568_466869, title = {Model-based and model-free clustering: a case study of protein expression data for breast cancer}, author = {Lisboa Paulo JG and Jarman Ian H and Etchells Terence A and Bacciu Davide and Garibaldi John M}, year = {2009}, date = {2009-01-01}, booktitle = {PROCEEDINGS OF THE 2009 UK WORKSHOP ON COMPUTATIONAL INTELLIGENCE}, keywords = {biomedical data, clustering, competitive repetition suppression learning, feature selection, neural networks, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{11568_465485, title = {p-Health in Breast Oncology: A Framework for Predictive and Participatory e-Systems}, author = { Fernandes Ana S and Bacciu Davide and Jarman Ian H and Etchells Terence A and Fonseca Jose M and Paulo J G Lisboa}, doi = {10.1109/DeSE.2009.68}, year = {2009}, date = {2009-01-01}, booktitle = {2009 Second International Conference on Developments in eSystems Engineering}, pages = {123--129}, publisher = {IEEE}, abstract = {Maintaining the financial sustainability of healthcare provision makes developments in e-Systems of the utmost priority in healthcare. In particular, it leads to a radical review of healthcare delivery for the future as personalised, preventive, predictive and participatory, or p-Health. It is a vision that places e-Systems at the core of healthcare delivery, in contrast to current practice. This view of the demands of the 21st century sets an agenda that builds upon advances in engineering devices and computing infrastructure, but also computational intelligence and new models for communication between healthcare providers and the public. This paper gives an overview of p-Health with reference to decision support in breast cancer.}, keywords = {biomedical data, clustering, neural networks}, pubstate = {published}, tppubtype = {conference} } @conference{11568_465484, title = {Patient stratification with competing risks by multivariate Fisher distance}, author = {Bacciu Davide and Jarman Ian H and Etchells Terence A and Lisboa Paulo J G}, doi = {10.1109/IJCNN.2009.5179077}, year = {2009}, date = {2009-01-01}, urldate = {2009-01-01}, booktitle = {2009 International Joint Conference on Neural Networks}, pages = {3453--3460}, publisher = {IEEE}, abstract = {Early characterization of patients with respect to their predicted response to treatment is a fundamental step towards the delivery of effective, personalized care. Starting from the results of a time-to-event model with competing risks using the framework of partial logistic artificial neural networks with automatic relevance determination (PLANNCR-ARD), we discuss an effective semi-supervised approach to patient stratification with application to Acute Myeloid Leukaemia (AML) data (n = 509) acquired prospectively by the GIMEMA consortium. Multiple prognostic indices provided by the survival model are exploited to build a metric based on the Fisher information matrix. Cluster number estimation is then performed in the Fisher-induced affine space, yielding to the discovery of a stratification of the patients into groups characterized by significantly different mortality risks following induction therapy in AML. The proposed model is shown to be able to cluster the input data, while promoting specificity of both target outcomes, namely Complete Remission (CR) and Induction Death (ID). This generic clustering methodology generates an affine transformation of the data space that is coherent with the prognostic information predicted by the PLANNCR-ARD model.}, keywords = {biomedical data, clustering, competitive repetition suppression learning, generative model, neural networks, statistics, survival analysis, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @article{coreTNN2008, title = {Competitive Repetition Suppression (CoRe) Clustering: A Biologically Inspired Learning Model With Application to Robust Clustering}, author = {Bacciu Davide and Starita Antonina}, url = {http://dx.doi.org/10.1016/j.patrec.2009.01.002}, doi = {10.1109/TNN.2008.2004407}, issn = {1045-9227}, year = {2008}, date = {2008-11-01}, urldate = {2008-11-01}, journal = {Neural Networks, IEEE Transactions on}, volume = {19}, number = {11}, pages = {1922 -1941}, keywords = {biologically inspired learning, clustering, competitive repetition suppression learning, neural networks, soft competitive learning, unsupervised learning}, pubstate = {published}, tppubtype = {article} } @incollection{bacciu2010unsupervised, title = {Unsupervised Breast Cancer Class Discovery: a Comparative Study on Model-based and Neural Clustering}, author = {Bacciu Davide and Biganzoli Elia and Lisboa Paulo JG and Starita Antonina}, year = {2008}, date = {2008-01-01}, pages = {13-26}, publisher = {KES Rapid Research Results Series}, keywords = {biologically inspired learning, clustering, competitive repetition suppression learning, feature selection}, pubstate = {published}, tppubtype = {incollection} } @incollection{bacciu2008discovering, title = {Discovering Strategic Behaviors in Multi-Agent Scenarios by Ontology-Driven Mining}, author = {Bacciu Davide and Bellandi Andrea and Romei Andrea and Furletti Barbara and Grossi Valerio}, year = {2008}, date = {2008-01-01}, pages = {171 - 198}, publisher = {INTECH Open Access Publisher}, keywords = {planning}, pubstate = {published}, tppubtype = {incollection} } @conference{11568_466669, title = {Fuzzy Admission Control with Similarity Evaluation for VoWLAN with QoS Support}, author = {Bacciu Davide and Botta Alessio and Badia Leonardo }, doi = {10.1109/WONS.2008.4459355}, year = {2008}, date = {2008-01-01}, booktitle = {2008 Fifth Annual Conference on Wireless on Demand Network Systems and Services}, pages = {57--64}, publisher = {IEEE}, abstract = {In this paper, we make use of a fuzzy approach to determine a soft Admission Control mechanism for Voice-over-Internet-Protocol services over Wireless Local Area Network. In such a system, complicated interactions between service provider and clients take place, since the network capacity constraints must be matched with users' preferences and needs. Most of the difficulties in dealing with these interactions stem from the fact that it is very difficult to define both the load condition of the network and the users' requirements in a crisp manner. To this end, we define a framework in which the provider expresses the network status and the clients describe their preferences by means of an approach based on Fuzzy Set Theory. In this way, we are able to develop an Admission Control strategy, based on Similarity Evaluation techniques, that enforces the soft constraints expressed by the two parties. The obtained framework is numerically evaluated, showing the benefit of employing Fuzzy Set Theory with respect to the traditional crisp approach.}, keywords = {fuzzy graph matching, fuzzy reasoning, service matchmaking}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466667, title = {Convergence Behavior of Competitive Repetition-Suppression Clustering}, author = {Bacciu Davide and Starita Antonina }, doi = {10.1007/978-3-540-69158-7_52}, year = {2008}, date = {2008-01-01}, booktitle = {Neural Information Processing, Lecture Notes in Computer Science}, volume = {4984}, pages = {497--506}, publisher = {Springer}, abstract = {Competitive Repetition-suppression (CoRe) clustering is a bio-inspired learning algorithm that is capable of automatically determining the unknown cluster number from the data. In a previous work it has been shown how CoRe clustering represents a robust generalization of rival penalized competitive learning (RPCL) by means of M-estimators. This paper studies the convergence behavior of the CoRe model, based on the analysis proposed for the distance-sensitive RPCL (DSRPCL) algorithm. Furthermore, it is proposed a global minimum criterion for learning vector quantization in kernel space that is used to assess the correct location property for the CoRe algorithm.}, keywords = {clustering, competitive repetition suppression learning, neural networks, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{11568_465487, title = {Are Model-based Clustering and Neural Clustering Consistent? A Case Study from Bioinformatics}, author = {BACCIU Davide and BIGANZOLI Elia and LISBOA Paulo JG and Starita Antonina}, doi = {10.1007/978-3-540-85565-1-23}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the 12th International Conference on Knowledge-Based and Intelligent Information & Engineering Systems (KES'08)}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {5178}, pages = {181--188}, publisher = {Springer}, abstract = {A novel neural network clustering algorithm, CoRe, is benchmarked against previously published results on a breast cancer data set and applying the method of Partition Around Medoids (PAM). The data serve to compare the samples partitions obtained with the neural network, PAM and model-based algorithms, namely Gaussian Mixture Model (GMM), Variational Bayesian Gaussian Mixture (VBG) and Variational Bayesian Mixtures with Splitting (VBS). It is found that CoRe, on the one hand, agrees with the previously published partitions; on the other hand, it supports the existence of a supplementary cluster that we hypothesize to be an additional tumor subgroup with respect to those previously identified by PAM}, keywords = {biomedical data, clustering, competitive repetition suppression learning, feature selection, neural networks, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @phdthesis{11568_466874, title = {A Perceptual Learning Model to Discover the Hierarchical Latent Structure of Image Collections}, author = { Bacciu Davide}, url = {http://e-theses.imtlucca.it/id/eprint/7}, doi = {10.6092/imtlucca/e-theses/7}, year = {2008}, date = {2008-01-01}, urldate = {2008-01-01}, publisher = {IMT Lucca}, abstract = {Biology has been an unparalleled source of inspiration for the work of researchers in several scientific and engineering fields including computer vision. The starting point of this thesis is the neurophysiological properties of the human early visual system, in particular, the cortical mechanism that mediates learning by exploiting information about stimuli repetition. Repetition has long been considered a fundamental correlate of skill acquisition andmemory formation in biological aswell as computational learning models. However, recent studies have shown that biological neural networks have differentways of exploiting repetition in forming memory maps. The thesis focuses on a perceptual learning mechanism called repetition suppression, which exploits the temporal distribution of neural activations to drive an efficient neural allocation for a set of stimuli. This explores the neurophysiological hypothesis that repetition suppression serves as an unsupervised perceptual learning mechanism that can drive efficient memory formation by reducing the overall size of stimuli representation while strengthening the responses of the most selective neurons. This interpretation of repetition is different from its traditional role in computational learning models mainly to induce convergence and reach training stability, without using this information to provide focus for the neural representations of the data. The first part of the thesis introduces a novel computational model with repetition suppression, which forms an unsupervised competitive systemtermed CoRe, for Competitive Repetition-suppression learning. The model is applied to generalproblems in the fields of computational intelligence and machine learning. Particular emphasis is placed on validating the model as an effective tool for the unsupervised exploration of bio-medical data. In particular, it is shown that the repetition suppression mechanism efficiently addresses the issues of automatically estimating the number of clusters within the data, as well as filtering noise and irrelevant input components in highly dimensional data, e.g. gene expression levels from DNA Microarrays. The CoRe model produces relevance estimates for the each covariate which is useful, for instance, to discover the best discriminating bio-markers. The description of the model includes a theoretical analysis using Huber’s robust statistics to show that the model is robust to outliers and noise in the data. The convergence properties of themodel also studied. It is shown that, besides its biological underpinning, the CoRe model has useful properties in terms of asymptotic behavior. By exploiting a kernel-based formulation for the CoRe learning error, a theoretically sound motivation is provided for the model’s ability to avoid local minima of its loss function. To do this a necessary and sufficient condition for global error minimization in vector quantization is generalized by extending it to distance metrics in generic Hilbert spaces. This leads to the derivation of a family of kernel-based algorithms that address the local minima issue of unsupervised vector quantization in a principled way. The experimental results show that the algorithm can achieve a consistent performance gain compared with state-of-the-art learning vector quantizers, while retaining a lower computational complexity (linear with respect to the dataset size). Bridging the gap between the low level representation of the visual content and the underlying high-level semantics is a major research issue of current interest. The second part of the thesis focuses on this problem by introducing a hierarchical and multi-resolution approach to visual content understanding. On a spatial level, CoRe learning is used to pool together the local visual patches by organizing them into perceptually meaningful intermediate structures. On the semantical level, it provides an extension of the probabilistic Latent Semantic Analysis (pLSA) model that allows discovery and organization of the visual topics into a hierarchy of aspects. The proposed hierarchical pLSA model is shown to effectively address the unsupervised discovery of relevant visual classes from pictorial collections, at the same time learning to segment the image regions containing the discovered classes. Furthermore, by drawing on a recent pLSA-based image annotation system, the hierarchical pLSA model is extended to process and representmulti-modal collections comprising textual and visual data. The results of the experimental evaluation show that the proposed model learns to attach textual labels (available only at the level of the whole image) to the discovered image regions, while increasing the precision/ recall performance with respect to flat, pLSA annotation model.}, keywords = {biomedical data, clustering, competitive repetition suppression learning, feature selection, generative model, graphical models, image understanding, latent topic model, neural networks, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {phdthesis} } @conference{11568_466674, title = {A Fuzzy Approach for Negotiating Quality of Services}, author = {Bacciu Davide and Botta Alessio and Melgratti Hernan }, doi = {10.1007/978-3-540-75336-0_13}, year = {2007}, date = {2007-01-01}, booktitle = {TRUSTWORTHY GLOBAL COMPUTING, Lecture Notes in Computer Science}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {4661}, pages = {200--217}, publisher = {Springer Verlag}, abstract = {A central point when integrating services concerns to the description, agreement and enforcement of the quality aspect of service interaction, usually known as Service Level Agreement (SLA). This paper presents a framework for SLA negotiation based on fuzzy sets. We propose (i) a request language for clients to describe quality preferences, (ii) a publication language for providers to define the qualities of their offered services, and (iii) a decision procedure for granting any client request with a SLA contract fitting the requestor requirements. We start with a restricted framework in which the different qualities of a service are handled independently (as being orthogonal) and then we propose an extension that allows clients and providers to express dependencies among different qualities.}, keywords = {fuzzy graph matching, fuzzy reasoning, service matchmaking, software engineering, web service}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466675, title = {Fuzzy Agreement for Network Service Contracts}, author = {BACCIU Davide and BADIA Leonardo and BOTTA Alessio}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the 6th International Conference on Computational Intelligence in Economics & Finance (CIEF 2007)}, keywords = {fuzzy graph matching, fuzzy reasoning, service matchmaking}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466673, title = {Augmenting the Distributed Evaluation of Path Queries via Information Granules}, author = {BACCIU Davide and BOTTA Alessio and STEFANESCU Dan}, url = {http://mlg07.dsi.unifi.it/pdf/16_Botta.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the 5th International Workshop on Mining and Learning with Graphs (MLG'07)}, pages = {105--109}, keywords = {fuzzy graph matching, fuzzy reasoning, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466672, title = {A framework for semantic querying of distributed data-graphs via information granules}, author = {BACCIU Davide and BOTTA Alessio and STEFANESCU Dan}, url = {http://pages.di.unipi.it/bacciu/wp-content/uploads/sites/12/2016/04/bbs_ISC07.pdf http://dl.acm.org/citation.cfm?id=1647449.1647477}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the 10th IASTED International Conference on Intelligent Systems and Control}, pages = {161--166}, publisher = {ACTA PRESS}, address = {Anaheim, CA, USA}, abstract = {Regular path queries (RPQ) represent a common and convenient way to access and extract knowledge represented as labeled and weighted data-graphs. In this paper, we look to enhance the information representation in data-graphs and RPQs by augmenting their expressive power with the use of semantically meaningful knowledge in the form of information granules. We extended a recent distributed algorithm for the evaluation of RPQs on spatial networks by introducing fuzzy weights in place of crisp values both in the data-graphs and the query formulation. Moreover, we describe two alternative strategies for determining the costs of the paths computed by the fuzzy RPQ evaluation process. A spatial network case-study is used to illustrate the soundness of the approach.}, keywords = {fuzzy graph matching, fuzzy reasoning, structured data processing}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466670, title = {A Robust Bio-Inspired Clustering Algorithm for the Automatic Determination of Unknown Cluster Number}, author = {Bacciu Davide and Starita Antonina }, doi = {10.1109/IJCNN.2007.4371148}, year = {2007}, date = {2007-01-01}, urldate = {2007-01-01}, booktitle = {2007 International Joint Conference on Neural Networks}, pages = {1314--1319}, publisher = {IEEE}, abstract = {The paper introduces a robust clustering algorithm that can automatically determine the unknown cluster number from noisy data without any a-priori information. We show how our clustering algorithm can be derived from a general learning theory, named CoRe learning, that models a cortical memory mechanism called repetition suppression. Moreover, we describe CoRe clustering relationships with Rival Penalized Competitive Learning (RPCL), showing how CoRe extends this model by strengthening the rival penalization estimation by means of robust loss functions. Finally, we present the results of simulations concerning the unsupervised segmentation of noisy images.}, keywords = {biomedical data, clustering, competitive repetition suppression learning, recurrent neural network, statistics, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @techreport{11568_255939, title = {Feature-wise Competitive Repetition Suppression Learning for Gene Data Clustering and Feature Ranking}, author = {Bacciu Davide and Micheli Alessio and Starita Antonina}, url = {http://compass2.di.unipi.it/TR/Files/TR-07-04.pdf.gz}, year = {2007}, date = {2007-01-01}, urldate = {2007-01-01}, volume = {TR-07-04}, pages = {1--14}, institution = {Università di Pisa}, keywords = {biomedical data, clustering, competitive repetition suppression learning, feature selection, kernel methods, neural networks}, pubstate = {published}, tppubtype = {techreport} } @conference{11568_116977, title = {Simultaneous clustering and feature ranking by competitive repetition suppression learning with application to gene data analysis}, author = {BACCIU Davide and MICHELI Alessio and STARITA Antonina}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the Third International Conference on Computational Intelligence in Medicine and Healthcare (CIMED 2007)}, keywords = {biomedical data, clustering, competitive repetition suppression learning, feature selection}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466676, title = {Competitive Repetition-suppression (CoRe) Learning}, author = {Bacciu Davide and Starita Antonina }, doi = {10.1007/11840817_14}, year = {2006}, date = {2006-01-01}, urldate = {2006-01-01}, booktitle = {ARTIFICIAL NEURAL NETWORKS - ICANN 2006, PT 1, Lecture Notes in Computer Science}, journal = {LECTURE NOTES IN COMPUTER SCIENCE}, volume = {4131}, pages = {130--139}, publisher = {Springer Verlag}, abstract = {The paper introduces Competitive Repetition-suppression (CoRe) learning, a novel paradigm inspired by a cortical mechanism of perceptual learning called repetition suppression. CoRe learning is an unsupervised, soft-competitive [1] model with conscience [2] that can be used for self-generating compact neural representations of the input stimuli. The key idea underlying the development of CoRe learning is to exploit the temporal distribution of neurons activations as a source of training information and to drive memory formation. As a case study, the paper reports the CoRe learning rules that have been derived for the unsupervised training of a Radial Basis Function network.}, keywords = {clustering, competitive repetition suppression learning, recurrent neural network, unsupervised learning}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466877, title = {Assessment of stroke patients by whole-body isometric force-torque measurements II: software design of the ALLADIN Diagnostic Device}, author = {Cinkelj J and Mihelj M and Bacciu Davide and Jurak M and Guglielmelli Eugenio and Toth A and De Lafonteyne J and Verschelde J and Mazzoleni S and Van Vaerenbergh J and Ruijter S D and Munih M }, year = {2005}, date = {2005-01-01}, booktitle = {Proceedings of the 3rd European Medical and Biological Engineering Conference}, journal = {IFMBE PROCEEDINGS (CD)}, volume = {1}, publisher = {IFMBE}, keywords = {biomedical data}, pubstate = {published}, tppubtype = {conference} } @conference{11568_466876, title = {A RLWPR network for learning the internal model of an anthropomorphic robot arm}, author = {Bacciu Davide and Zollo Loredana and Guglielmelli Eugenio and Leoni Fabio and Starita Antonina}, doi = {10.1109/IROS.2004.1389362}, year = {2004}, date = {2004-01-01}, urldate = {2004-01-01}, booktitle = {Proceedings of the 2004 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, volume = {1}, pages = {260--265}, publisher = {IEEE}, abstract = {Studies of human motor control suggest that humans develop internal models of the arm during the execution of voluntary movements. In particular, the internal model consists of the inverse dynamic model of the muscolo-skeletal system and intervenes in the feedforward loop of the motor control system to improve reactivity and stability in rapid movements. In this paper, an interaction control scheme inspired by biological motor control is resumed, i.e. the coactivation-based compliance control in the joint space and a feedforward module capable of online learning the manipulator inverse dynamics is presented. A novel recurrent learning paradigm is proposed which derives from an interesting functional equivalence between locally weighted regression networks and lakagi-Sugeno-Kang fuzzy systems. The proposed learning paradigm has been named recurrent locally weighted regression networks and strengthens the computational power of feedforward locally weighted regression networks. Simulation results are reported to validate the control scheme.}, keywords = {recurrent neural network}, pubstate = {published}, tppubtype = {conference} } @mastersthesis{mscThesis03, title = {Neural Architectures for Learning the Internal Model of an Anthropomorphic Robot Arm}, author = {Bacciu Davide}, year = {2003}, date = {2003-12-16}, urldate = {2003-12-16}, school = {M.Sc. Thesis in Computer Science, Universita' di Pisa}, note = {In Italian}, keywords = {cognitive robotics, recurrent neural network}, pubstate = {published}, tppubtype = {mastersthesis} }