Creative Commons Attribution 4.0 International license
In collaborative active learning, where multiple agents try to learn labels from a common hypothesis, we introduce an innovative framework for incentivized collaboration. Here, rational agents aim to obtain labels for their data sets while keeping label complexity at a minimum. We focus on designing (strict) individually rational (IR) collaboration protocols, ensuring that agents cannot reduce their expected label complexity by acting individually. We first show that given any optimal active learning algorithm, the collaboration protocol that runs the algorithm as is over the entire data is already IR. However, computing the optimal algorithm is NP-hard. We therefore provide collaboration protocols that achieve (strict) IR and are comparable with the best known tractable approximation algorithm in terms of label complexity.
@InProceedings{cohen_et_al:LIPIcs.FORC.2024.2,
author = {Cohen, Lee and Shao, Han},
title = {{Incentivized Collaboration in Active Learning}},
booktitle = {5th Symposium on Foundations of Responsible Computing (FORC 2024)},
pages = {2:1--2:20},
series = {Leibniz International Proceedings in Informatics (LIPIcs)},
ISBN = {978-3-95977-319-5},
ISSN = {1868-8969},
year = {2024},
volume = {295},
editor = {Rothblum, Guy N.},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
address = {Dagstuhl, Germany},
URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.FORC.2024.2},
URN = {urn:nbn:de:0030-drops-200851},
doi = {10.4230/LIPIcs.FORC.2024.2},
annote = {Keywords: pool-based active learning, individual rationality, incentives, Bayesian, collaboration}
}