Artificial intelligence (AI), and in particular machine learning algorithms, are of increasing importance in many application areas but interpretability and understandability as well as responsibility, accountability, and fairness of the algorithms' results, all crucial for increasing the humans' trust into the systems, are still largely missing. Big industrial players, including Google, Microsoft, and Apple, have become aware of this gap and recently published their own guidelines for the use of AI in order to promote fairness, trust, interpretability, and other goals. Interactive visualization is one of the technologies that may help to increase trust in AI systems. During the seminar, we discussed the requirements for trustworthy AI systems as well as the technological possibilities provided by interactive visualizations to increase human trust in AI.
@Article{oelke_et_al:DagRep.10.4.37, author = {Oelke, Daniela and Keim, Daniel A. and Chau, Polo and Endert, Alex}, title = {{Interactive Visualization for Fostering Trust in AI (Dagstuhl Seminar 20382)}}, pages = {37--42}, journal = {Dagstuhl Reports}, ISSN = {2192-5283}, year = {2021}, volume = {10}, number = {4}, editor = {Oelke, Daniela and Keim, Daniel A. and Chau, Polo and Endert, Alex}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/DagRep.10.4.37}, URN = {urn:nbn:de:0030-drops-137360}, doi = {10.4230/DagRep.10.4.37}, annote = {Keywords: accountability, artificial intelligence, explainability, fairness, interactive visualization, machine learning, responsibility, trust, understandability} }
Feedback for Dagstuhl Publishing