Francesco Lanza

Researching  Machine Learning, Multi-Agent Systems, Machine Consciousness, Human-Robot Teaming Cooperation and Trust Theory, Cognitive Robotics
Mail francesco.lanza@unipa.it

 

Bibliography

2020

  • [DOI] A. Chella, F. Lanza, A. Pipitone, and V. Seidita, “The inner life of a robot in human-robot teaming,” in 2020 ieee international conference on human-machine systems (ichms), 2020, pp. 1-4.
    [Bibtex]
    @INPROCEEDINGS{9209477, author={A. {Chella} and F. {Lanza} and A. {Pipitone} and V. {Seidita}}, booktitle={2020 IEEE International Conference on Human-Machine Systems (ICHMS)}, title={The Inner Life of a Robot in Human-Robot Teaming}, year={2020}, volume={}, number={}, pages={1-4}, abstract={Giving the robot a “human” inner life, such as the capability to think about itself and to understand what the other team members are doing, would increase the efficiency of trustworthy interactions with the other members of the team. Our long-term research goal is to provide the robot with a computational model of inner life helping the robot to reason about itself, its capabilities, its environment and its teammates. Robot inner speech is a part of the research goal. In this paper, we summarize the results obtained in this direction.}, keywords={Human-Robot Teaming Interaction;Cognitive Architecture;Self-modeling;Introspection;Inner Speech;BDI agents.}, doi={10.1109/ICHMS49158.2020.9209477}, ISSN={}, month={Sep.},}
  • [DOI] F. Lanza, P. Hammer, V. Seidita, P. Wang, and A. Chella, “Agents in dynamic contexts, a system for learning plans,” in Proceedings of the acm symposium on applied computing, 2020.
    [Bibtex]
    @inproceedings{lanza2020sac,
    abstract = {Reproducing the human ability to cooperate and collaborate in a dynamic environment is a significant challenge in the field of human-robot teaming interaction. Generally, in this context, a robot has to adapt itself to handle unforeseen situations. The problem is runtime planning when some factors are not known before the execution starts. This work aims to show and discuss a method to handle this kind of situation. Our idea is to use the Belief-Desire-Intention agent paradigm, its the Jason reasoning cycle and a Non-Axiomatic Reasoning System. The result is a novel method that gives the robot the ability to select the best plan.},
    author = {Lanza, Francesco and Hammer, Patrick and Seidita, Valeria and Wang, Pei and Chella, Antonio},
    booktitle = {Proceedings of the ACM Symposium on Applied Computing},
    doi = {10.1145/3341105.3374083},
    isbn = {9781450368667},
    keywords = {BDI,Human-robot interaction,Jason,Planning},
    title = {{Agents in dynamic contexts, a system for learning plans}},
    year = {2020}
    }
  • J. de Berardinis, G. Pizzuto, F. Lanza, A. Chella, J. Meira, and A. Cangelosi, “At your service: coffee beans recommendation from a robot assistant,” Arxiv preprint arxiv:2008.13585, 2020.
    [Bibtex]
    @article{de2020your,
    title={At Your Service: Coffee Beans Recommendation From a Robot Assistant},
    author={de Berardinis, Jacopo and Pizzuto, Gabriella and Lanza, Francesco and Chella, Antonio and Meira, Jorge and Cangelosi, Angelo},
    journal={arXiv preprint arXiv:2008.13585},
    year={2020}
    }
  • [DOI] F. Lanza, V. Seidita, and A. Chella, “Agents and robots for collaborating and supporting physicians in healthcare scenarios,” Journal of biomedical informatics, vol. 108, p. 103483, 2020.
    [Bibtex]
    @article{lanza2020jbi,
    abstract = {Monitoring patients through robotics telehealth systems is an interesting scenario where patients' conditions, and their environment, are dynamic and unknown variables. We propose to improve telehealth systems' features to include the ability to serve patients with their needs, operating as human caregivers. The objective is to support the independent living of patients at home without losing the opportunity to monitor their health status. Application scenarios are several, and they spread from simple clinical assisting scenarios to an emergency one. For instance, in the case of a nursing home, the system would support in continuously monitoring the elderly patients. In contrast, in the case of an epidemic diffusion, such as COVID-19 pandemic, the system may help in all the early triage phases, significantly reducing the risk of contagion. However, the system has to let medical assistants perform actions remotely such as changing therapies or interacting with patients that need support. The paper proposes and describes a multi-agent architecture for intelligent medical care. We propose to use the beliefs-desires-intentions agent architecture, part of it is devised to be deployed in a robot. The result is an intelligent system that may allow robots the ability to select the most useful plan for unhandled situations and to communicate the choice to the physician for his validation and permission.},
    author = {Lanza, Francesco and Seidita, Valeria and Chella, Antonio},
    doi = {https://doi.org/10.1016/j.jbi.2020.103483},
    issn = {1532-0464},
    journal = {Journal of Biomedical Informatics},
    keywords = {Human-robot interaction,Multi-agent systems,Patient monitoring,Robots in Emergency Care for COVID-19,Robots in therapy},
    pages = {103483},
    title = {{Agents and robots for collaborating and supporting physicians in healthcare scenarios}},
    url = {http://www.sciencedirect.com/science/article/pii/S1532046420301118},
    volume = {108},
    year = {2020}
    }

2019

  • F. Lanza, S. Vinanzi, V. Seidita, A. Cangelosi, and A. Chella, “A global workspace theory model for trust estimation in human-robot interaction,” in Ceur workshop proceedings, 2019, p. 104–112.
    [Bibtex]
    @inproceedings{lanza2019aic,
    abstract = {Successful and genuine social connections between humans are based on trust, even more when the people involved have to collaborate to reach a shared goal. With the advent of new findings and technologies in the field of robotics, it appears that this same key factor that regulates relationships between humans also applies with the same importance to human-robot interactions (HRI). Previous studies have proven the usefulness of a robot able to estimate the trustworthiness of its human collaborators and in this position paper we discuss a method to extend an existing state-of-the-art trust model with considerations based on social cues such as emotions. The proposed model follows the Global Workspace Theory (GWT) principles to build a novel system able to combine multiple specialised expert systems to determine whether the partner can be considered trustworthy or not. Positive results would demonstrate the usefulness of using constructive biases to enhance the teaming skills of social robots.},
    author = {Lanza, Francesco and Vinanzi, Samuele and Seidita, Valeria and Cangelosi, Angelo and Chella, Antonio},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    keywords = {Cognitive Robotics,Emotions,Global Workspace Theory,Human-Robot Interaction,Theory of Mind,Trust},
    organization = {CEUR-WS},
    pages = {104--112},
    title = {{A global workspace theory model for trust estimation in human-robot interaction}},
    volume = {2483},
    year = {2019}
    }
  • A. Chella, F. Lanza, A. Pipitone, and V. Seidita, “Human-robot teaming: Perspective on analysis and implementation issues,” in Ceur workshop proceedings, 2019.
    [Bibtex]
    @inproceedings{chella2019human,
    abstract = {Interaction in a human-robot team in a changing environment is a big challenge. Several essential aspects that deserve investigation are at the base for efficient interactions. Among them the ability to produce a self-model and to apply elements from the theory of mind. This case is much more cumbersome than just implementing a system in which the various parts have to co-operate and collaborate to achieve a common goal. In the human-robot team, some factors that cannot be known before the execution phase intervene. Our goal is to investigate how a human-human team works and replicate it on the robot by defining a new cognitive architecture which attempts to model all the involved issues. This means enabling the robot with the capability to understand the world around, itself and the other, human or robot as well. In this paper, we present the first step towards the creation of a multi-agent architecture to realize human-robot teaming interaction.},
    author = {Chella, Antonio and Lanza, Francesco and Pipitone, Arianna and Seidita, Valeria},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    organization = {CEUR-WS},
    title = {{Human-robot teaming: Perspective on analysis and implementation issues}},
    volume = {2352},
    year = {2019}
    }
  • C. Castelfranchi, A. Chella, R. Falcone, F. Lanza, and V. Seidita, Endowing robots with self-modeling abilities for trustful human-robot interactions, 2019.
    [Bibtex]
    @misc{castelfranchi2endowing,
    abstract = {Robots involved in collaborative and cooperative tasks with humans cannot be programmed in all their functions. They are autonomous entities acting in a dynamic and often partially known environment. How to interact with the humans and the decision process are determined by the knowledge on the environment, on the other and on itself. Also, the level of trust that each member of the team places in the other is crucial to creating a fruitful collaborative relationship. We hypothesize that one of the main components of a trustful relationship resides in the self-modeling abilities of the robot. The paper illustrates how employing the model of trust by Falcone and Castelfranchi to include self-modeling skills in the NAO humanoid robot involved in trustworthy interactions. Self-modeling skills are then implemented employing features by the BDI paradigm.},
    author = {Castelfranchi, Cristiano and Chella, Antonio and Falcone, Rino and Lanza, Francesco and Seidita, Valeria},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    pages = {22--28},
    title = {{Endowing robots with self-modeling abilities for trustful human-robot interactions}},
    volume = {2404},
    year = {2019}
    }
  • [DOI] A. Chella, F. Lanza, and V. Seidita, “Decision Process in Human-Agent Interaction: Extending Jason Reasoning Cycle,” in Lecture notes in computer science (including subseries lecture notes in artificial intelligence and lecture notes in bioinformatics), 2019, p. 320–339.
    [Bibtex]
    @inproceedings{chella2018decision,
    abstract = {The main characteristic of an agent is acting on behalf of humans. Then, agents are employed as modeling paradigms for complex systems and their implementation. Today we are witnessing a growing increase in systems complexity, mainly when the presence of human beings and their interactions with the system introduces a dynamic variable not easily manageable during design phases. Design and implementation of this type of systems highlight the problem of making the system able to decide in autonomy. In this work we propose an implementation, based on Jason, of a cognitive architecture whose modules allow structuring the decision-making process by the internal states of the agents, thus combining aspects of self-modeling and theory of the mind.},
    author = {Chella, Antonio and Lanza, Francesco and Seidita, Valeria},
    booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
    doi = {10.1007/978-3-030-25693-7_17},
    isbn = {9783030256920},
    issn = {16113349},
    keywords = {BDI agent,Human-agent interaction,Jason},
    organization = {Springer, Cham},
    pages = {320--339},
    title = {{Decision Process in Human-Agent Interaction: Extending Jason Reasoning Cycle}},
    volume = {11375 LNAI},
    year = {2019}
    }
  • F. Lanza, V. Seidita, C. Diliberto, P. Zanardi, and A. Chella, “Inside the robot’s mind during human-robot interaction,” in Ceur workshop proceedings, 2019, p. 54–67.
    [Bibtex]
    @inproceedings{lanza2019inside,
    abstract = {Humans and robots collaborating and cooperating for pursuing a shared objective need to rely on the other for carrying out an effective decision process and for updating knowledge when necessary in a dynamic environment. Robots have to behave as they were human teammates. To model the cognitive process of robots during the interaction, we developed a cognitive architecture that we implemented employing the BDI (belief, desire, intention) agent paradigm. In this paper, we focus on how to let the robot show to the human its reasoning process and how its knowledge on the work environment grows. We realized a framework whose heart is a simulator that serves the human as a window on the robot's mind.},
    author = {Lanza, Francesco and Seidita, Valeria and Diliberto, Cristina and Zanardi, Paolo and Chella, Antonio},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    keywords = {Agent reasoning cycle,Cognitive architecture,Decision process,Human robot interaction},
    organization = {CEUR-WS},
    pages = {54--67},
    title = {{Inside the robot's mind during human-robot interaction}},
    volume = {2483},
    year = {2019}
    }

2018

  • A. Chella, F. Lanza, and V. Seidita, “Representing and developing knowledge using Jason, CArtAgO and OWL,” in Ceur workshop proceedings, 2018, p. 147–152.
    [Bibtex]
    @inproceedings{chella2018woa,
    abstract = {Contexts where agents and humans are required to collaborate and cooperate in a human-like fashion are complex systems where a high degree of self-adaptability of every component is demanding. A fundamental ingredient when developing and implementing this kind of systems is the knowledge representation. Knowledge of the goals, the environment, other agents' capabilities and task and of itself, is crucial in deciding which action to perform to reach an objective and to behave in a self-adaptive way. The problem of knowledge modeling and representation becomes more and more urgent if the agents' operation domain changes at runtime. Knowledge has to be updated and handled while the system is in execution. In this paper, we present a way for implementing a controlled semantic system to manage the belief base of a multi-agent system at runtime. Our work is based on the development of a specific approach for interfacing Jason, CArtAgO and Jena; the knowledge base representation employs OWL Ontology.},
    author = {Chella, Antonio and Lanza, Francesco and Seidita, Valeria},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    pages = {147--152},
    title = {Representing and Developing Knowledge using {J}ason, {CA}rt{A}g{O} and {OWL}},
    volume = {2215},
    year = {2018}
    }
  • [DOI] A. Chella, F. Lanza, A. Pipitone, and V. Seidita, “Knowledge acquisition through introspection in Human-Robot Cooperation,” Biologically inspired cognitive architectures, vol. 25, p. 1–7, 2018.
    [Bibtex]
    @article{chella2018knowledge,
    abstract = {When cooperating with a team including humans, robots have to understand and update semantic information concerning the state of the environment. The run-time evaluation and acquisition of new concepts fall in the critical mass learning. It is a cognitive skill that enables the robot to show environmental awareness to complete its tasks successfully. A kind of self-consciousness emerges: the robot activates the introspective mental processes inferring if it owns a domain concept or not, and correctly blends the conceptual meaning of new entities. Many works attempt to simulate human brain functions leading to neural network implementation of consciousness; regrettably, some of these produce accurate model that however do not provide means for creating virtual agents able to interact with a human in a teamwork in a human-like fashion, hence including aspects such as self-conscious abilities, trust, emotions and motivations. We propose a method that, based on a cognitive architecture for human-robot teaming interaction, endows a robot with the ability to model its knowledge about the environment it is interacting with and to acquire new knowledge when it occurs.},
    author = {Chella, Antonio and Lanza, Francesco and Pipitone, Arianna and Seidita, Valeria},
    doi = {10.1016/j.bica.2018.07.016},
    issn = {2212683X},
    journal = {Biologically Inspired Cognitive Architectures},
    keywords = {Cognitive agent,Cognitive architecture,Introspection,Knowledge acquisition,Ontology},
    pages = {1--7},
    publisher = {Elsevier},
    title = {{Knowledge acquisition through introspection in Human-Robot Cooperation}},
    volume = {25},
    year = {2018}
    }
  • A. Chella, F. Lanza, and V. Seidita, “A cognitive architecture for human-robot teaming interaction,” in Proceedings of the 6th international workshop on artificial intelligence and cognition, Palermo, 2018.
    [Bibtex]
    @conference{aic2018,
    Address = {Palermo},
    Author = {Chella, Antonio and Lanza, Francesco and Seidita, Valeria},
    Booktitle = {Proceedings of the 6th International Workshop on Artificial Intelligence and Cognition},
    Date-Added = {2019-02-19 14:48:16 +0000},
    Date-Modified = {2019-02-19 14:48:16 +0000},
    Month = {July 2-4},
    Title = {A Cognitive Architecture for Human-Robot Teaming Interaction},
    Year = {2018}}
  • A. Chella, F. Lanza, and V. Seidita, “Human-agent interaction, the system level using jason,” in Proceedings of the 6th international workshop on engineering multi-agent systems (emas 2018), Stockholm, 2018.
    [Bibtex]
    @conference{emas2018,
    Address = {Stockholm},
    Author = {Chella, Antonio and Lanza, Francesco and Seidita, Valeria},
    Booktitle = {Proceedings of the 6th International Workshop on Engineering Multi-Agent Systems (EMAS 2018)},
    Date-Added = {2018-09-10 12:41:36 +0000},
    Date-Modified = {2018-09-10 12:51:57 +0000},
    Month = {July, 10-15},
    Title = {Human-Agent Interaction, the System Level Using JASON},
    Year = {2018}
    }
  • A. Pipitone, F. Lanza, V. Seidita, and A. Chella, “Inner speech for a self-conscious robot,” in Ceur workshop proceedings, 2018.
    [Bibtex]
    @inproceedings{pipitone2019inner,
    abstract = {The experience self-conscious thinking in the verbose form of inner speech is a common one. Such a covert dialogue accompanies the introspection of mental life and fulfills important roles in our cognition, such as self-regulation, self-restructuring, and re-focusing on attentional resources. Although the functional underpinning and the phenomenology of inner speech are largely investigated in psychological and philosophical fields, robotic research generally does not address such a form of self-conscious behavior. Existing models of inner speech inspire computational tools to provide the robot with a form of self-consciousness. Here, the most widespread psychological models of inner speech are reviewed, and a robot architecture implementing such a capability is outlined.},
    author = {Pipitone, A. and Lanza, F. and Seidita, V. and Chella, A.},
    booktitle = {CEUR Workshop Proceedings},
    issn = {16130073},
    keywords = {Cognitive Architecture,Inner Speech,Robot Self-Consciousness,Robot Thought},
    title = {{Inner speech for a self-conscious robot}},
    volume = {2287},
    year = {2018}
    }