2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Implementation of the lock-free skiplist data-structure created by Maurice Herlihy, Yossi Lev,
6 * and Nir Shavit. See Herlihy's and Shivit's book "The Art of Multiprocessor Programming".
7 * http://www.amazon.com/Art-Multiprocessor-Programming-Maurice-Herlihy/dp/0123705916/
9 * See also Kir Fraser's dissertation "Practical Lock Freedom".
10 * www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
12 * I've generalized the data structure to support update operations like set() and CAS() in addition to
13 * the normal add() and remove() operations.
15 * Warning: This code is written for the x86 memory-model. The algorithim depends on certain stores
16 * and loads being ordered. This code won't work correctly on platforms with weaker memory models if
17 * you don't add memory barriers in the right places.
31 // Setting MAX_LEVEL to 0 essentially makes this data structure the Harris-Michael lock-free list (in list.c).
43 const datatype_t *key_type;
46 static const map_impl_t sl_map_impl = {
47 (map_alloc_t)sl_alloc, (map_cas_t)sl_cas, (map_get_t)sl_lookup, (map_remove_t)sl_remove,
48 (map_count_t)sl_count, (map_print_t)sl_print, (map_free_t)sl_free
51 const map_impl_t *MAP_TYPE_SKIPLIST = &sl_map_impl;
53 static int random_level (void) {
54 unsigned r = nbd_rand();
58 r |= 1 << (MAX_LEVEL+1);
60 int n = __builtin_ctz(r)-1;
61 assert(n <= MAX_LEVEL);
65 static node_t *node_alloc (int level, void *key, uint64_t val) {
66 assert(level >= 0 && level <= MAX_LEVEL);
67 size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
68 node_t *item = (node_t *)nbd_malloc(sz);
72 item->top_level = level;
76 skiplist_t *sl_alloc (const datatype_t *key_type) {
77 skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
78 sl->key_type = key_type;
79 sl->head = node_alloc(MAX_LEVEL, NULL, 0);
80 memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
84 void sl_free (skiplist_t *sl) {
85 node_t *item = sl->head->next[0];
87 node_t *next = (node_t *)STRIP_TAG(item->next[0]);
93 uint64_t sl_count (skiplist_t *sl) {
95 node_t *item = sl->head->next[0];
97 if (!IS_TAGGED(item->next[0])) {
100 item = (node_t *)STRIP_TAG(item->next[0]);
105 static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, void *key, int help_remove) {
106 node_t *pred = sl->head;
108 TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
110 int start_level = MAX_LEVEL;
112 // Optimization for small lists. No need to traverse empty higher levels.
114 while (pred->next[start_level+1] != NULL) {
115 start_level += start_level - 1;
116 if (EXPECT_FALSE(start_level >= MAX_LEVEL)) {
117 start_level = MAX_LEVEL;
121 if (EXPECT_FALSE(start_level < n)) {
126 // Traverse the levels of <sl> from the top level to the bottom
127 for (int level = start_level; level >= 0; --level) {
128 TRACE("s3", "find_preds: level %llu", level, 0);
129 item = pred->next[level];
130 if (EXPECT_FALSE(IS_TAGGED(item))) {
131 TRACE("s2", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
132 return find_preds(preds, succs, n, sl, key, help_remove); // retry
134 while (item != NULL) {
135 node_t *next = item->next[level];
137 // A tag means an item is logically removed but not physically unlinked yet.
138 while (EXPECT_FALSE(IS_TAGGED(next))) {
140 // Skip over logically removed items.
142 item = (node_t *)STRIP_TAG(item->next);
143 if (EXPECT_FALSE(item == NULL))
145 TRACE("s3", "find_preds: skipping marked item %p (next is %p)", item, next);
146 next = item->next[level];
150 // Unlink logically removed items.
152 TRACE("s3", "find_preds: unlinking marked item %p; next is %p", item, next);
153 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) == item) {
154 item = (node_t *)STRIP_TAG(next);
155 if (EXPECT_FALSE(item == NULL))
157 next = item->next[level];
158 TRACE("s3", "find_preds: now the current item is %p next is %p", item, next);
160 // The thread that completes the unlink should free the memory.
162 if (sl->key_type != NULL) {
163 nbd_defer_free((void*)other->key);
165 nbd_defer_free(other);
168 TRACE("s3", "find_preds: lost race to unlink item %p from pred %p", item, pred);
169 TRACE("s3", "find_preds: pred's link changed to %p", other, 0);
170 if (IS_TAGGED(other))
171 return find_preds(preds, succs, n, sl, key, help_remove); // retry
173 if (EXPECT_FALSE(item == NULL))
175 next = item->next[level];
179 if (EXPECT_FALSE(item == NULL))
182 TRACE("s4", "find_preds: visiting item %p (next is %p)", item, next);
183 TRACE("s4", "find_preds: key %p val %p", STRIP_TAG(item->key), item->val);
185 if (EXPECT_TRUE(sl->key_type == NULL)) {
186 d = (uint64_t)item->key - (uint64_t)key;
188 d = sl->key_type->cmp(item->key, key);
192 TRACE("s4", "find_preds: found pred %p item %p", pred, item);
200 // The cast to unsigned is for the case when n is -1.
201 if ((unsigned)level <= (unsigned)n) {
211 // fill in empty levels
212 if (n == -1 && item != NULL) {
213 for (int level = start_level + 1; level <= item->top_level; ++level) {
214 preds[level] = sl->head;
219 TRACE("s2", "find_preds: found matching item %p in skiplist, pred is %p", item, pred);
222 TRACE("s2", "find_preds: found proper place for key %p in skiplist, pred is %p. returning null", key, pred);
226 // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
227 uint64_t sl_lookup (skiplist_t *sl, void *key) {
228 TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl);
229 node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE);
231 // If we found an <item> matching the <key> return its value.
233 uint64_t val = item->val;
234 if (val != DOES_NOT_EXIST) {
235 TRACE("s1", "sl_lookup: found item %p. val %p. returning item", item, item->val);
240 TRACE("l1", "sl_lookup: no item in the skiplist matched the key", 0, 0);
241 return DOES_NOT_EXIST;
244 uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_val) {
245 TRACE("s1", "sl_cas: key %p skiplist %p", key, sl);
246 TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val);
247 ASSERT((int64_t)new_val > 0);
249 node_t *preds[MAX_LEVEL+1];
250 node_t *nexts[MAX_LEVEL+1];
251 node_t *new_item = NULL;
252 int n = random_level();
254 node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE);
255 if (old_item == NULL) {
257 // There was not an item in the skiplist that matches the key.
258 if (EXPECT_FALSE((int64_t)expectation > 0 || expectation == CAS_EXPECT_EXISTS)) {
259 TRACE("l1", "sl_cas: the expectation was not met, the skiplist was not changed", 0, 0);
260 return DOES_NOT_EXIST; // failure
263 ASSERT(expectation == CAS_EXPECT_DOES_NOT_EXIST || expectation == CAS_EXPECT_WHATEVER);
265 // First insert <new_item> into the bottom level.
266 TRACE("s3", "sl_cas: attempting to insert item between %p and %p", preds[0], nexts[0]);
267 void *new_key = (sl->key_type == NULL) ? key : sl->key_type->clone(key);
268 new_item = node_alloc(n, new_key, new_val);
269 node_t *pred = preds[0];
270 node_t *next = new_item->next[0] = nexts[0];
271 for (int level = 1; level <= new_item->top_level; ++level) {
272 new_item->next[level] = nexts[level];
274 node_t *other = SYNC_CAS(&pred->next[0], next, new_item);
276 TRACE("s3", "sl_cas: successfully inserted item %p at level 0", new_item, 0);
279 TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other);
280 if (sl->key_type != NULL) {
287 // Found an item in the skiplist that matches the key.
288 uint64_t old_item_val = old_item->val;
290 // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us.
291 if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) {
292 TRACE("s2", "sl_cas: lost a race, found an item but another thread removed it. retry", 0, 0);
296 if (EXPECT_FALSE(expectation == CAS_EXPECT_DOES_NOT_EXIST)) {
297 TRACE("s1", "sl_cas: found an item %p in the skiplist that matched the key. the expectation was "
298 "not met, the skiplist was not changed", old_item, old_item_val);
299 return old_item_val; // failure
302 // Use a CAS and not a SWAP. If the node is in the process of being removed and we used a SWAP, we could
303 // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it
304 // succeeded and return our value even though we indicated that the node has been removed. If the CAS
305 // fails it means another thread either removed the node or updated its value.
306 uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val);
307 if (ret_val == old_item_val) {
308 TRACE("s1", "sl_cas: the CAS succeeded. updated the value of the item", 0, 0);
309 return ret_val; // success
311 TRACE("s2", "sl_cas: lost a race. the CAS failed. another thread changed the item's value", 0, 0);
313 old_item_val = ret_val;
317 // Link <new_item> into <sl> from the bottom up.
318 for (int level = 1; level <= new_item->top_level; ++level) {
319 node_t *pred = preds[level];
320 node_t *next = nexts[level];
322 TRACE("s3", "sl_cas: attempting to insert item between %p and %p", pred, next);
323 node_t *other = SYNC_CAS(&pred->next[level], next, new_item);
325 TRACE("s3", "sl_cas: successfully inserted item %p at level %llu", new_item, level);
328 TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other);
329 find_preds(preds, nexts, new_item->top_level, sl, key, TRUE);
333 // Update <new_item>'s next pointer
335 // There in no need to continue linking in the item if another thread removed it.
336 node_t *old_next = ((volatile node_t *)new_item)->next[level];
337 if (IS_TAGGED(old_next))
338 return DOES_NOT_EXIST; // success
340 // Use a CAS so we do not inadvertantly stomp on a mark another thread placed on the item.
341 if (old_next == next || SYNC_CAS(&new_item->next[level], old_next, next) == old_next)
346 return DOES_NOT_EXIST; // success
349 uint64_t sl_remove (skiplist_t *sl, void *key) {
350 TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl);
351 node_t *preds[MAX_LEVEL+1];
352 node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE);
354 TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
355 return DOES_NOT_EXIST;
358 // Mark and unlink <item> at each level of <sl> from the top down. If multiple threads try to concurrently remove
359 // the same item only one of them should succeed. Marking the bottom level establishes which of them succeeds.
360 for (int level = item->top_level; level > 0; --level) {
362 node_t *old_next = item->next[level];
365 old_next = SYNC_CAS(&item->next[level], next, TAG_VALUE(next));
366 if (IS_TAGGED(old_next)) {
367 TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level %llu", item, level);
370 } while (next != old_next);
372 node_t *pred = preds[level];
373 TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_TAG(next));
374 node_t *other = NULL;
375 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
376 TRACE("s1", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
377 // If our former predecessor now points past us we know another thread unlinked us. Otherwise, we need
378 // to search for a new set of preds.
380 continue; // <pred> points past <item> to the end of the list; go on to the next level.
383 if (!IS_TAGGED(other)) {
384 if (EXPECT_TRUE(sl->key_type == NULL)) {
385 d = (uint64_t)item->key - (uint64_t)other->key;
387 d = sl->key_type->cmp(item->key, other->key);
391 node_t *temp = find_preds(preds, NULL, level, sl, key, TRUE);
393 return DOES_NOT_EXIST; // Another thread removed the item we were targeting.
394 level++; // Redo this level.
400 node_t *old_next = item->next[0];
403 old_next = SYNC_CAS(&item->next[0], next, TAG_VALUE(next));
404 if (IS_TAGGED(old_next)) {
405 TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level 0", item, 0);
406 return DOES_NOT_EXIST;
408 } while (next != old_next);
409 TRACE("s1", "sl_remove: marked item %p removed at level 0", item, 0);
411 // Atomically swap out the item's value in case another thread is updating the item while we are
412 // removing it. This establishes which one occurs first, the update or the remove.
413 uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
414 TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0);
416 node_t *pred = preds[0];
417 TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_TAG(next));
418 if (SYNC_CAS(&pred->next[0], item, STRIP_TAG(next))) {
419 TRACE("s2", "sl_remove: unlinked item %p from the skiplist at level 0", item, 0);
420 // The thread that completes the unlink should free the memory.
421 if (sl->key_type != NULL) {
422 nbd_defer_free(item->key);
424 nbd_defer_free(item);
429 void sl_print (skiplist_t *sl) {
430 for (int level = MAX_LEVEL; level >= 0; --level) {
431 node_t *item = sl->head;
432 if (item->next[level] == NULL)
434 printf("(%d) ", level);
437 node_t *next = item->next[level];
438 printf("%s%p ", IS_TAGGED(next) ? "*" : "", item);
439 item = (node_t *)STRIP_TAG(next);
448 node_t *item = sl->head;
451 int is_marked = IS_TAGGED(item->next[0]);
452 printf("%s%p:%p ", is_marked ? "*" : "", item, item->key);
453 if (item != sl->head) {
454 printf("[%d]", item->top_level);
458 for (int level = 1; level <= item->top_level; ++level) {
459 node_t *next = (node_t *)STRIP_TAG(item->next[level]);
460 is_marked = IS_TAGGED(item->next[0]);
461 printf(" %p%s", next, is_marked ? "*" : "");
462 if (item == sl->head && item->next[level] == NULL)
467 item = (node_t *)STRIP_TAG(item->next[0]);