@inbook{bef343483de741d99a1ee3492afdc06a,
title = "Is it possible to program artificial emotions? A basis for behaviours with moral connotation?",
abstract = "The fact that machines can recognize emotions, or even be programmed with something functionally similar to an emotion, does not mean that they exhibit moral behaviour. The laws defined by Isaac Asimov are of little use if a machine agent has to make decisions in complex scenarios. It must be borne in mind that morality is primarily a group phenomenon. It serves to regulate the relationship among individuals having different motivations regarding the cohesion and benefit of that group. Concomitantly, it moderates expectations about one another. It is necessary to make sure agents do not hide malevolent purposes, that they are capable of acknowledging errors and to act accordingly. One must begin somewhere, even without presently possessing a detailed knowledge of human morality, to the extent of programming ethical machines in full possession of all the functions of justification and argumentation that underlie decisions. This chapter will discuss the bringing out of a moral lexicon shareable by most cultures. The specific case of guilt and the capacity to recognize it is present in all cultures. It can be computer-simulated and can be a starting point for exploring this field.",
author = "Pereira, {Lu{\'i}s Moniz} and Lopes, {Ant{\'o}nio Barata}",
year = "2020",
doi = "10.1007/978-3-030-39630-5_12",
language = "English",
isbn = "978-3-030-39629-9",
series = "Studies in Applied Philosophy, Epistemology and Rational Ethics",
publisher = "Springer",
pages = "87--92",
booktitle = "Machine Ethics",
address = "Netherlands",
}