The use of artificial intelligence continues to impact a broad variety of domains, application areas, and people. However, interpretability, understandability, responsibility, accountability, and fairness of the algorithms' results - all crucial for increasing humans' trust into the systems - are still largely missing. The purpose of this seminar is to understand how these components factor into the holistic view of trust. Further, this seminar seeks to identify design guidelines and best practices for how to build interactive visualization systems to calibrate trust.
@Article{chau_et_al:DagRep.12.8.103, author = {Chau, Polo and Endert, Alex and Keim, Daniel A. and Oelke, Daniela}, title = {{Interactive Visualization for Fostering Trust in ML (Dagstuhl Seminar 22351)}}, pages = {103--116}, journal = {Dagstuhl Reports}, ISSN = {2192-5283}, year = {2023}, volume = {12}, number = {8}, editor = {Chau, Polo and Endert, Alex and Keim, Daniel A. and Oelke, Daniela}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/DagRep.12.8.103}, URN = {urn:nbn:de:0030-drops-177161}, doi = {10.4230/DagRep.12.8.103}, annote = {Keywords: accountability, artificial intelligence, explainability, fairness, interactive visualization, machine learning, responsibility, trust, understandability} }
Feedback for Dagstuhl Publishing