In this paper, previously conducted studies regarding the development and certification of safe Artificial Intelligence (AI) systems from the practitioner’s viewpoint are summarized. Overall, both studies point towards a common theme: AI certification will mainly rely on the analysis of the processes used to create AI systems. While additional techniques such as methods from the field of eXplainable AI (XAI) and formal verification methods seem to hold a lot of promise, they can assist in creating safe AI-systems, but do not provide comprehensive solutions to the existing problems in regard to AI certification.
@InProceedings{fresz_et_al:OASIcs.SAIA.2024.13, author = {Fresz, Benjamin and Brajovic, Danilo and Huber, Marco F.}, title = {{AI Certification: Empirical Investigations into Possible Cul-De-Sacs and Ways Forward}}, booktitle = {Symposium on Scaling AI Assessments (SAIA 2024)}, pages = {13:1--13:4}, series = {Open Access Series in Informatics (OASIcs)}, ISBN = {978-3-95977-357-7}, ISSN = {2190-6807}, year = {2025}, volume = {126}, editor = {G\"{o}rge, Rebekka and Haedecke, Elena and Poretschkin, Maximilian and Schmitz, Anna}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/OASIcs.SAIA.2024.13}, URN = {urn:nbn:de:0030-drops-227533}, doi = {10.4230/OASIcs.SAIA.2024.13}, annote = {Keywords: AI certification, eXplainable AI (XAI), safe AI, trustworthy AI, AI documentation} }
Feedback for Dagstuhl Publishing