Creative Commons Attribution 4.0 International license
Human oversight is a key safeguard for AI systems, intended to mitigate risks by adding a human layer of safety and control. Oversight personnel should, for example, detect malfunctions or violations of fundamental rights such as discriminatory decision-making and intervene accordingly. Human oversight is also central to AI governance and ethics, and is mandated by Articles 14 and 26 of the EU AI Act for high-risk AI. This Dagstuhl Seminar brought together experts from artificial intelligence, human-computer interaction, human factors and psychology, philosophy and ethics, and law to explore conceptual, technical, legal, and practical dimensions of human oversight of AI. Across the seminar, participants provided perspective talks from the different disciplines and engaged in working groups and use-case specific discussions in order to establish a science of human oversight of AI systems. The main outcome of this seminar is a general framework that outlines the architecture, processes, and sociotechnical design dimensions of human oversight of AI systems.
@Article{langer_et_al:DagRep.15.6.189,
author = {Langer, Markus and Dachselt, Raimund and Liao, Q. Vera and Miller, Tim and Tintarev, Nava},
title = {{Challenges of Human Oversight: Achieving Human Control of AI-Based Systems (Dagstuhl Seminar 25272)}},
pages = {189--204},
journal = {Dagstuhl Reports},
ISSN = {2192-5283},
year = {2026},
volume = {15},
number = {6},
editor = {Langer, Markus and Dachselt, Raimund and Liao, Q. Vera and Miller, Tim and Tintarev, Nava},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
address = {Dagstuhl, Germany},
URL = {https://drops.dagstuhl.de/entities/document/10.4230/DagRep.15.6.189},
URN = {urn:nbn:de:0030-drops-255735},
doi = {10.4230/DagRep.15.6.189},
annote = {Keywords: artifical intelligence, explainable ai, human oversight, norms and regulations, safety}
}