Sufficiency and separation are two fundamental criteria in classification fairness. For binary classifiers, these concepts correspond to subgroup calibration and equalized odds, respectively, and are known to be incompatible except in trivial cases. In this work, we explore a relaxation of these criteria based on f-divergences between distributions - essentially the same relaxation studied in the literature on approximate multicalibration - analyze their relationships, and derive implications for fair representations and downstream uses (post-processing) of representations. We show that when a protected attribute is determinable from features present in the data, the (relaxed) criteria of sufficiency and separation exhibit a tradeoff, forming a convex Pareto frontier. Moreover, we prove that when a protected attribute is not fully encoded in the data, achieving full sufficiency may be impossible. This finding not only strengthens the case against "fairness through unawareness" but also highlights an important caveat for work on (multi-)calibration.
@InProceedings{benger_et_al:LIPIcs.FORC.2025.19, author = {Benger, Etam and Ligett, Katrina}, title = {{Mapping the Tradeoffs and Limitations of Algorithmic Fairness}}, booktitle = {6th Symposium on Foundations of Responsible Computing (FORC 2025)}, pages = {19:1--19:20}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-367-6}, ISSN = {1868-8969}, year = {2025}, volume = {329}, editor = {Bun, Mark}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.FORC.2025.19}, URN = {urn:nbn:de:0030-drops-231465}, doi = {10.4230/LIPIcs.FORC.2025.19}, annote = {Keywords: Algorithmic fairness, information theory, sufficiency-separation tradeoff} }
Feedback for Dagstuhl Publishing