click-collect

Experimental environment developed with MATRX package.

Demo: https://youtu.be/89dSfAgS0LY

Related Paper: How Should an AI Trust its Human Teammates? Exploring Possible Cues of Artificial Trust

Please cite:

@article{10.1145/3635475,
author = {Jorge, Carolina Centeio and Jonker, Catholijn M. and Tielman, Myrthe L.},
title = {How Should an AI Trust its Human Teammates? Exploring Possible Cues of Artificial Trust},
year = {2024},
issue_date = {March 2024},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {14},
number = {1},
issn = {2160-6455},
url = {https://doi.org/10.1145/3635475},
doi = {10.1145/3635475},
abstract = {In teams composed of humans, we use trust in others to make decisions, such as what to do next, who to help and who to ask for help. When a team member is artificial, they should also be able to assess whether a human teammate is trustworthy for a certain task. We see trustworthiness as the combination of (1) whether someone will do a task and (2) whether they can do it. With building beliefs in trustworthiness as an ultimate goal, we explore which internal factors (krypta) of the human may play a role (e.g., ability, benevolence, and integrity) in determining trustworthiness, according to existing literature. Furthermore, we investigate which observable metrics (manifesta) an agent may take into account as cues for the human teammate’s krypta in an online 2D grid-world experiment (n = 54). Results suggest that cues of ability, benevolence and integrity influence trustworthiness. However, we observed that trustworthiness is mainly influenced by human’s playing strategy and cost-benefit analysis, which deserves further investigation. This is a first step towards building informed beliefs of human trustworthiness in human-AI teamwork.},
journal = {ACM Trans. Interact. Intell. Syst.},
month = {jan},
articleno = {5},
numpages = {26},
keywords = {human-agent interaction, human-AI teams, hybrid teams, teamwork, trustworthiness, Artificial trust}
}