@inproceedings{0d53ecebc6d74354b5adf7b6cce70a3c,
title = "The duet of representations and how explanations exacerbate it",
abstract = "An algorithm effects a causal representation of relations between features and labels in the human{\textquoteright}s perception. Such a representation might conflict with the human{\textquoteright}s prior belief. Explanations can direct the human{\textquoteright}s attention to the conflicting feature and away from other relevant features. This leads to causal overattribution and may adversely affect the human{\textquoteright}s information processing. In a field experiment we implemented an XGBoost-trained model as a decision-making aid for counselors at a public employment service to predict candidates{\textquoteright} risk of long-term unemployment. The treatment group of counselors was also provided with SHAP. The results show that the quality of the human{\textquoteright}s decision-making is worse when a feature on which the human holds a conflicting prior belief is displayed as part of the explanation.",
keywords = "biases, causal representations, communication, conflict, epistemic standpoint, explanations, human-AI interaction, information processing, prior beliefs, salience",
author = "Charles Wan and Rodrigo Belo and Leid Zejnilovi{\'c} and Susana Lavado",
note = "Publisher Copyright: {\textcopyright} 2023, The Author(s), under exclusive license to Springer Nature Switzerland AG.; 1st World Conference on eXplainable Artificial Intelligence, xAI 2023 ; Conference date: 26-07-2023 Through 28-07-2023",
year = "2023",
month = oct,
doi = "10.1007/978-3-031-44067-0_10",
language = "English",
isbn = "9783031440663",
series = "Communications in Computer and Information Science",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "181--197",
editor = "Luca Longo",
booktitle = "Explainable Artificial Intelligence - 1st World Conference, xAI 2023, Proceedings",
address = "Germany",
}