When solving decision and optimisation problems, many competing algorithms (model and solver choices) have complementary strengths. Typically, there is no single algorithm that works well for all instances of a problem. Automated algorithm selection has been shown to work very well for choosing a suitable algorithm for a given instance. However, the cost of training can be prohibitively large due to running candidate algorithms on a representative set of training instances. In this work, we explore reducing this cost by choosing a subset of the training instances on which to train. We approach this problem in three ways: using active learning to decide based on prediction uncertainty, augmenting the algorithm predictors with a timeout predictor, and collecting training data using a progressively increasing timeout. We evaluate combinations of these approaches on six datasets from ASLib and present the reduction in labelling cost achieved by each option.
@InProceedings{kus_et_al:LIPIcs.CP.2024.38, author = {Ku\c{s}, Erdem and Akg\"{u}n, \"{O}zg\"{u}r and Dang, Nguyen and Miguel, Ian}, title = {{Frugal Algorithm Selection}}, booktitle = {30th International Conference on Principles and Practice of Constraint Programming (CP 2024)}, pages = {38:1--38:16}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-336-2}, ISSN = {1868-8969}, year = {2024}, volume = {307}, editor = {Shaw, Paul}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.CP.2024.38}, URN = {urn:nbn:de:0030-drops-207239}, doi = {10.4230/LIPIcs.CP.2024.38}, annote = {Keywords: Algorithm Selection, Active Learning} }
Feedback for Dagstuhl Publishing