Machine learning algorithms in high-dimensional settings are highly susceptible to the influence of even a small fraction of structured outliers, making robust optimization techniques essential. In particular, within the ε-contamination model, where an adversary can inspect and replace up to an ε-fraction of the samples, a fundamental open problem is determining the optimal rates for robust stochastic convex optimization (SCO) under such contamination. We develop novel algorithms that achieve minimax-optimal excess risk (up to logarithmic factors) under the ε-contamination model. Our approach improves over existing algorithms, which are not only suboptimal but also require stringent assumptions, including Lipschitz continuity and smoothness of individual sample functions. By contrast, our optimal algorithms do not require these stringent assumptions, assuming only population-level smoothness of the loss. Moreover, our algorithms can be adapted to handle the case in which the covariance parameter is unknown, and can be extended to nonsmooth population risks via convolutional smoothing. We complement our algorithmic developments with a tight information-theoretic lower bound for robust SCO.
@InProceedings{gao_et_al:LIPIcs.FORC.2025.9, author = {Gao, Changyu and Lowy, Andrew and Zhou, Xingyu and Wright, Stephen J.}, title = {{Optimal Rates for Robust Stochastic Convex Optimization}}, booktitle = {6th Symposium on Foundations of Responsible Computing (FORC 2025)}, pages = {9:1--9:21}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-367-6}, ISSN = {1868-8969}, year = {2025}, volume = {329}, editor = {Bun, Mark}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.FORC.2025.9}, URN = {urn:nbn:de:0030-drops-231369}, doi = {10.4230/LIPIcs.FORC.2025.9}, annote = {Keywords: Adversarial Robustness, Machine Learning, Optimization Algorithms, Robust Optimization, Stochastic Convex Optimization} }
Feedback for Dagstuhl Publishing