Many instances of decision making under objective uncertainty can be decomposed into two steps: predicting the objective function and then optimizing for the best feasible action under the estimate of the objective vector. We study the problem of ensembling models for optimization of uncertain linear objectives under arbitrary constraints. We imagine we are given a collection of predictive models mapping a feature space to multi-dimensional real-valued predictions, which form the coefficients of a linear objective that we would like to optimize. We give two ensembling methods that can provably result in transparent decisions that strictly improve on all initial policies. The first method operates in the "white box" setting in which we have access to the underlying prediction models and the second in the "black box" setting in which we only have access to the induced decisions (in the downstream optimization problem) of the constituent models, but not their underlying point predictions. They are transparent or trustworthy in the sense that the user can reliably predict long-term ensemble rewards even if the instance by instance predictions are imperfect.
@InProceedings{globusharris_et_al:LIPIcs.FORC.2025.14, author = {Globus Harris, Ira and Gupta, Varun and Kearns, Michael and Roth, Aaron}, title = {{Model Ensembling for Constrained Optimization}}, booktitle = {6th Symposium on Foundations of Responsible Computing (FORC 2025)}, pages = {14:1--14:17}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-367-6}, ISSN = {1868-8969}, year = {2025}, volume = {329}, editor = {Bun, Mark}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.FORC.2025.14}, URN = {urn:nbn:de:0030-drops-231412}, doi = {10.4230/LIPIcs.FORC.2025.14}, annote = {Keywords: model ensembling, trustworthy AI, decision-making under uncertainty} }
Feedback for Dagstuhl Publishing