Creative Commons Attribution 3.0 Unported license
A parallel batched data structure is designed to process synchronized batches of operations on the data structure using a parallel program. In this paper, we propose parallel combining, a technique that implements a concurrent data structure from a parallel batched one. The idea is that we explicitly synchronize concurrent operations into batches: one of the processes becomes a combiner which collects concurrent requests and initiates a parallel batched algorithm involving the owners (clients) of the collected requests. Intuitively, the cost of synchronizing the concurrent calls can be compensated by running the parallel batched algorithm. We validate the intuition via two applications. First, we use parallel combining to design a concurrent data structure optimized for read-dominated workloads, taking a dynamic graph data structure as an example. Second, we use a novel parallel batched priority queue to build a concurrent one. In both cases, we obtain performance gains with respect to the state-of-the-art algorithms.
@InProceedings{aksenov_et_al:LIPIcs.OPODIS.2018.11,
author = {Aksenov, Vitaly and Kuznetsov, Petr and Shalyto, Anatoly},
title = {{Parallel Combining: Benefits of Explicit Synchronization}},
booktitle = {22nd International Conference on Principles of Distributed Systems (OPODIS 2018)},
pages = {11:1--11:16},
series = {Leibniz International Proceedings in Informatics (LIPIcs)},
ISBN = {978-3-95977-098-9},
ISSN = {1868-8969},
year = {2019},
volume = {125},
editor = {Cao, Jiannong and Ellen, Faith and Rodrigues, Luis and Ferreira, Bernardo},
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
address = {Dagstuhl, Germany},
URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.OPODIS.2018.11},
URN = {urn:nbn:de:0030-drops-100713},
doi = {10.4230/LIPIcs.OPODIS.2018.11},
annote = {Keywords: concurrent data structure, parallel batched data structure, combining}
}