Here you can find a consolidated (a.k.a. slowly updated) list of my publications. A frequently updated (and possibly noisy) list of works is available on my Google Scholar profile.
Please find below a short list of highlight publications for my recent activity.
Lepri, Marco; Bacciu, Davide; Santina, Cosimo Della Neural Autoencoder-Based Structure-Preserving Model Order Reduction and Control Design for High-Dimensional Physical Systems Journal Article In: IEEE Control Systems Letters, 2023. Collodi, Lorenzo; Bacciu, Davide; Bianchi, Matteo; Averta, Giuseppe Learning with few examples the semantic description of novel human-inspired grasp strategies from RGB data Journal Article In: IEEE Robotics and Automation Letters, pp. 2573 - 2580, 2022. Averta, Giuseppe; Barontini, Federica; Valdambrini, Irene; Cheli, Paolo; Bacciu, Davide; Bianchi, Matteo Learning to Prevent Grasp Failure with Soft Hands: From Online Prediction to Dual-Arm Grasp Recovery Journal Article In: Advanced Intelligent Systems, 2021. Cosimo, Della Santina; Giuseppe, Averta; Visar, Arapi; Alessandro, Settimi; Giuseppe, Catalano Manuel; Davide, Bacciu; Antonio, Bicchi; Matteo, Bianchi Autonomous Grasping with SoftHands: Combining Human Inspiration, Deep Learning and Embodied Machine Intelligence Presentation 11.09.2019. Cosimo, Della Santina; Visar, Arapi; Giuseppe, Averta; Francesca, Damiani; Gaia, Fiore; Alessandro, Settimi; Giuseppe, Catalano Manuel; Davide, Bacciu; Antonio, Bicchi; Matteo, Bianchi Learning from humans how to grasp: a data-driven architecture for autonomous grasping with anthropomorphic soft hands Journal Article In: IEEE Robotics and Automation Letters, pp. 1-8, 2019, ISSN: 2377-3766, (Also accepted for presentation at ICRA 2019). Arapi, Visar; Santina, Cosimo Della; Bacciu, Davide; Bianchi, Matteo; Bicchi, Antonio DeepDynamicHand: A deep neural architecture for labeling hand manipulation strategies in video sources exploiting temporal information Journal Article In: Frontiers in Neurorobotics, vol. 12, pp. 86, 2018.@article{lepri2023neural,
title = {Neural Autoencoder-Based Structure-Preserving Model Order Reduction and Control Design for High-Dimensional Physical Systems},
author = {Marco Lepri and Davide Bacciu and Cosimo Della Santina},
year = {2023},
date = {2023-12-21},
urldate = {2023-01-01},
journal = {IEEE Control Systems Letters},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Collodi2022,
title = {Learning with few examples the semantic description of novel human-inspired grasp strategies from RGB data},
author = { Lorenzo Collodi and Davide Bacciu and Matteo Bianchi and Giuseppe Averta},
url = {https://www.researchgate.net/profile/Giuseppe-Averta/publication/358006552_Learning_With_Few_Examples_the_Semantic_Description_of_Novel_Human-Inspired_Grasp_Strategies_From_RGB_Data/links/61eae01e8d338833e3857251/Learning-With-Few-Examples-the-Semantic-Description-of-Novel-Human-Inspired-Grasp-Strategies-From-RGB-Data.pdf, Open Version},
doi = {https://doi.org/10.1109/LRA.2022.3144520},
year = {2022},
date = {2022-04-04},
urldate = {2022-04-04},
journal = { IEEE Robotics and Automation Letters},
pages = { 2573 - 2580},
publisher = {IEEE},
abstract = {Data-driven approaches and human inspiration are fundamental to endow robotic manipulators with advanced autonomous grasping capabilities. However, to capitalize upon these two pillars, several aspects need to be considered, which include the number of human examples used for training; the need for having in advance all the required information for classification (hardly feasible in unstructured environments); the trade-off between the task performance and the processing cost. In this paper, we propose a RGB-based pipeline that can identify the object to be grasped and guide the actual execution of the grasping primitive selected through a combination of Convolutional and Gated Graph Neural Networks. We consider a set of human-inspired grasp strategies, which are afforded by the geometrical properties of the objects and identified from a human grasping taxonomy, and propose to learn new grasping skills with only a few examples. We test our framework with a manipulator endowed with an under-actuated soft robotic hand. Even though we use only 2D information to minimize the footprint of the network, we achieve 90% of successful identifications of the most appropriate human-inspired grasping strategy over ten different classes, of which three were few-shot learned, outperforming an ideal model trained with all the classes, in sample-scarce conditions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{Averta2021,
title = {Learning to Prevent Grasp Failure with Soft Hands: From Online Prediction to Dual-Arm Grasp Recovery},
author = {Giuseppe Averta and Federica Barontini and Irene Valdambrini and Paolo Cheli and Davide Bacciu and Matteo Bianchi},
doi = {10.1002/aisy.202100146},
year = {2021},
date = {2021-10-07},
urldate = {2021-10-07},
journal = {Advanced Intelligent Systems},
abstract = {Soft hands allow to simplify the grasp planning to achieve a successful grasp, thanks to their intrinsic adaptability. At the same time, their usage poses new challenges, related to the adoption of classical sensing techniques originally developed for rigid end defectors, which provide fundamental information, such as to detect object slippage. Under this regard, model-based approaches for the processing of the gathered information are hard to use, due to the difficulties in modeling hand–object interaction when softness is involved. To overcome these limitations, in this article, we proposed to combine distributed tactile sensing and machine learning (recurrent neural network) to detect sliding conditions for a soft robotic hand mounted on a robotic manipulator, targeting the prediction of the grasp failure event and the direction of sliding. The outcomes of these predictions allow for an online triggering of a compensatory action performed with a second robotic arm–hand system, to prevent the failure. Despite the fact that the network is trained only with spherical and cylindrical objects, we demonstrate high generalization capabilities of our framework, achieving a correct prediction of the failure direction in 75% of cases, and a 85% of successful regrasps, for a selection of 12 objects of common use.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@misc{automatica2019,
title = {Autonomous Grasping with SoftHands: Combining Human Inspiration, Deep Learning and Embodied Machine Intelligence},
author = {Della Santina Cosimo and Averta Giuseppe and Arapi Visar and Settimi Alessandro and Catalano Manuel Giuseppe and Bacciu Davide and Bicchi Antonio and Bianchi Matteo},
year = {2019},
date = {2019-09-11},
booktitle = {Oral contribution to AUTOMATICA.IT 2019 },
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
@article{ral2019,
title = {Learning from humans how to grasp: a data-driven architecture for autonomous grasping with anthropomorphic soft hands},
author = {Della Santina Cosimo and Arapi Visar and Averta Giuseppe and Damiani Francesca and Fiore Gaia and Settimi Alessandro and Catalano Manuel Giuseppe and Bacciu Davide and Bicchi Antonio and Bianchi Matteo},
url = {https://ieeexplore.ieee.org/document/8629968},
doi = {10.1109/LRA.2019.2896485},
issn = {2377-3766},
year = {2019},
date = {2019-02-01},
journal = {IEEE Robotics and Automation Letters},
pages = {1-8},
note = {Also accepted for presentation at ICRA 2019},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
@article{frontNeurob18,
title = {DeepDynamicHand: A deep neural architecture for labeling hand manipulation strategies in video sources exploiting temporal information },
author = {Visar Arapi and Cosimo Della Santina and Davide Bacciu and Matteo Bianchi and Antonio Bicchi},
url = {https://www.frontiersin.org/articles/10.3389/fnbot.2018.00086/full},
doi = {10.3389/fnbot.2018.00086},
year = {2018},
date = {2018-12-17},
urldate = {2018-12-17},
journal = {Frontiers in Neurorobotics},
volume = {12},
pages = {86},
abstract = {Humans are capable of complex manipulation interactions with the environment, relying on the intrinsic adaptability and compliance of their hands. Recently, soft robotic manipulation has attempted to reproduce such an extraordinary behavior, through the design of deformable yet robust end-effectors. To this goal, the investigation of human behavior has become crucial to correctly inform technological developments of robotic hands that can successfully exploit environmental constraint as humans actually do. Among the different tools robotics can leverage on to achieve this objective, deep learning has emerged as a promising approach for the study and then the implementation of neuro-scientific observations on the artificial side. However, current approaches tend to neglect the dynamic nature of hand pose recognition problems, limiting the effectiveness of these techniques in identifying sequences of manipulation primitives underpinning action generation, e.g. during purposeful interaction with the environment. In this work, we propose a vision-based supervised Hand Pose Recognition method which, for the first time, takes into account temporal information to identify meaningful sequences of actions in grasping and manipulation tasks . More specifically, we apply Deep Neural Networks to automatically learn features from hand posture images that consist of frames extracted from grasping and manipulation task videos with objects and external environmental constraints. For training purposes, videos are divided into intervals, each associated to a specific action by a human supervisor. The proposed algorithm combines a Convolutional Neural Network to detect the hand within each video frame and a Recurrent Neural Network to predict the hand action in the current frame, while taking into consideration the history of actions performed in the previous frames. Experimental validation has been performed on two datasets of dynamic hand-centric strategies, where subjects regularly interact with objects and environment. Proposed architecture achieved a very good classification accuracy on both datasets, reaching performance up to 94%, and outperforming state of the art techniques. The outcomes of this study can be successfully applied to robotics, e.g for planning and control of soft anthropomorphic manipulators. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}