Automatic assessment tools (AATs) are software systems used in teaching environments to automatically evaluate code written by students. We have been using such a system since 2017, in multiple courses and across multiple evaluation types. This paper presents a set of lessons learned from our experience of using said system. These recommendations should help other teachers and instructors who wish to use or already use AATs in creating assessments which give students useful feedback in terms of improving their work and reduce the likelihood of unfair evaluations.
@InProceedings{cipriano_et_al:OASIcs.ICPEC.2024.3, author = {Cipriano, Bruno Pereira and Alves, Pedro}, title = {{Seven Years Later: Lessons Learned in Automated Assessment}}, booktitle = {5th International Computer Programming Education Conference (ICPEC 2024)}, pages = {3:1--3:14}, series = {Open Access Series in Informatics (OASIcs)}, ISBN = {978-3-95977-347-8}, ISSN = {2190-6807}, year = {2024}, volume = {122}, editor = {Santos, Andr\'{e} L. and Pinto-Albuquerque, Maria}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/OASIcs.ICPEC.2024.3}, URN = {urn:nbn:de:0030-drops-209725}, doi = {10.4230/OASIcs.ICPEC.2024.3}, annote = {Keywords: learning to program, automatic assessment tools, unit testing, feedback, large language models} }
Feedback for Dagstuhl Publishing