2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Implementation of the lock-free skiplist data-structure created by Maurice Herlihy, Yossi Lev,
6 * and Nir Shavit. See Herlihy's and Shivit's book "The Art of Multiprocessor Programming".
7 * http://www.amazon.com/Art-Multiprocessor-Programming-Maurice-Herlihy/dp/0123705916/
9 * See also Kir Fraser's dissertation "Practical Lock Freedom".
10 * www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
12 * This code is written for the x86 memory-model. The algorithim depends on certain stores and
13 * loads being ordered. Be careful, this code probably won't work correctly on platforms with
14 * weaker memory models if you don't add memory barriers in the right places.
26 // Setting MAX_LEVEL to 0 essentially makes this data structure the Harris-Michael lock-free list
41 static int random_level (void) {
42 unsigned r = nbd_rand();
45 int n = __builtin_ctz(r)-1;
50 assert(n <= MAX_LEVEL);
54 node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t value) {
55 assert(level >= 0 && level <= MAX_LEVEL);
56 size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
57 node_t *item = (node_t *)nbd_malloc(sz);
59 item->key = ns_alloc(key_data, key_len);
61 item->top_level = level;
65 skiplist_t *sl_alloc (void) {
66 skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
67 sl->head = node_alloc(MAX_LEVEL, " ", 0, 0);
68 memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
72 static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, const void *key_data, uint32_t key_len, int help_remove) {
73 node_t *pred = sl->head;
75 TRACE("s3", "find_preds: searching for key %p in sl (head is %p)", key_data, pred);
77 int start_level = MAX_LEVEL;
79 // Optimization for small lists. No need to traverse empty higher levels.
81 while (pred->next[start_level+1] != NULL) {
82 start_level += start_level - 1;
83 if (EXPECT_FALSE(start_level >= MAX_LEVEL)) {
84 start_level = MAX_LEVEL;
88 if (EXPECT_FALSE(start_level < n)) {
93 // Traverse the levels of <sl> from the top level to the bottom
94 for (int level = start_level; level >= 0; --level) {
95 TRACE("s3", "find_preds: level %llu", level, 0);
96 item = pred->next[level];
97 if (EXPECT_FALSE(IS_TAGGED(item))) {
98 TRACE("s3", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
99 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
101 while (item != NULL) {
102 node_t *next = item->next[level];
103 TRACE("s3", "find_preds: visiting item %p (next %p)", item, next);
104 TRACE("s3", "find_preds: key %p", item->key, 0);
106 // A tag means an item is logically removed but not physically unlinked yet.
107 while (EXPECT_FALSE(IS_TAGGED(next))) {
109 // Skip over logically removed items.
111 item = (node_t *)STRIP_TAG(item->next);
112 if (EXPECT_FALSE(item == NULL))
114 next = item->next[level];
118 // Unlink logically removed items.
120 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) == item) {
121 item = (node_t *)STRIP_TAG(next);
122 if (EXPECT_FALSE(item == NULL))
124 next = item->next[level];
125 TRACE("s3", "find_preds: unlinked item %p from pred %p", item, pred);
126 TRACE("s3", "find_preds: now item is %p next is %p", item, next);
128 // The thread that completes the unlink should free the memory.
129 if (level == 0) { nbd_defer_free(other); }
131 TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other);
132 if (IS_TAGGED(other))
133 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
135 if (EXPECT_FALSE(item == NULL))
137 next = item->next[level];
141 if (EXPECT_FALSE(item == NULL))
144 // If we reached the key (or passed where it should be), we found a pred. Save it and continue down.
145 x = ns_cmp_raw(item->key, key_data, key_len);
147 TRACE("s3", "find_preds: found pred %p item %p", pred, item);
155 // The comparison is unsigned for the case when n is -1.
156 if ((unsigned)level <= (unsigned)n) {
165 if (n == -1 && item != NULL) {
166 for (int level = start_level + 1; level <= item->top_level; ++level) {
167 preds[level] = sl->head;
170 return x == 0 ? item : NULL;
173 // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
174 uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) {
175 TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl);
176 node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE);
178 // If we found an <item> matching the <key> return its value.
179 return item != NULL ? item->value : DOES_NOT_EXIST;
182 // Insert the <key> if it doesn't already exist in <sl>
183 uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value) {
184 TRACE("s3", "sl_add: inserting key %p value %p", key, value);
185 node_t *preds[MAX_LEVEL+1];
186 node_t *nexts[MAX_LEVEL+1];
188 int n = random_level();
190 node_t *next = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
192 // If a node matching <key> already exists in <sl>, return its value.
194 TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->value);
195 if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
196 return nexts[0]->value;
199 // First insert <item> into the bottom level.
200 if (EXPECT_TRUE(item == NULL)) { item = node_alloc(n, key_data, key_len, value); }
201 node_t *pred = preds[0];
202 item->next[0] = next = nexts[0];
203 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
204 for (int level = 1; level <= item->top_level; ++level) {
205 item->next[level] = nexts[level];
207 node_t *other = SYNC_CAS(&pred->next[0], next, item);
209 TRACE("s3", "sl_add: successfully inserted item %p at level 0", item, 0);
212 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
216 // Insert <item> into <sl> from the bottom level up.
217 for (int level = 1; level <= item->top_level; ++level) {
218 node_t *pred = preds[level];
219 node_t *next = nexts[level];
221 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
222 node_t *other = SYNC_CAS(&pred->next[level], next, item);
224 TRACE("s3", "sl_add: successfully inserted item %p at level %llu", item, level);
227 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
228 find_preds(preds, nexts, item->top_level, sl, key_data, key_len, TRUE);
232 // Update <item>'s next pointer
234 // There in no need to continue linking in the item if another thread removed it.
235 node_t *old_next = ((volatile node_t *)item)->next[level];
236 if (IS_TAGGED(old_next))
239 // Use a CAS so we to not inadvertantly stomp on a mark another thread placed on the item.
240 if (old_next == next || SYNC_CAS(&item->next[level], old_next, next) == old_next)
248 uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) {
249 TRACE("s3", "sl_remove: removing item with key %p from sl %p", key_data, sl);
250 node_t *preds[MAX_LEVEL+1];
251 node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE);
253 TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0);
254 return DOES_NOT_EXIST;
257 // Mark <item> removed at each level of <sl> from the top down. This must be atomic. If multiple threads
258 // try to remove the same item only one of them should succeed. Marking the bottom level establishes which of
260 for (int level = item->top_level; level >= 0; --level) {
261 if (EXPECT_FALSE(IS_TAGGED(item->next[level]))) {
262 TRACE("s3", "sl_remove: %p is already marked for removal by another thread", item, 0);
264 return DOES_NOT_EXIST;
267 node_t *next = SYNC_FETCH_AND_OR(&item->next[level], TAG);
268 if (EXPECT_FALSE(IS_TAGGED(next))) {
269 TRACE("s3", "sl_remove: lost race -- %p is already marked for removal by another thread", item, 0);
271 return DOES_NOT_EXIST;
276 uint64_t value = item->value;
278 // Unlink <item> from the top down.
279 int level = item->top_level;
281 node_t *pred = preds[level];
282 node_t *next = item->next[level];
283 TRACE("s3", "sl_remove: link item's pred %p to it's successor %p", pred, STRIP_TAG(next));
284 node_t *other = NULL;
285 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
286 TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
287 // By marking the item earlier, we logically removed it. It is safe to leave the item partially
288 // unlinked. Another thread will finish physically removing it from <sl>.
294 // The thread that completes the unlink should free the memory.
295 nbd_defer_free(item);
299 void sl_print (skiplist_t *sl) {
300 for (int level = MAX_LEVEL; level >= 0; --level) {
301 node_t *item = sl->head;
302 if (item->next[level] == NULL)
304 printf("(%d) ", level);
306 node_t *next = item->next[level];
307 printf("%s%p ", IS_TAGGED(next) ? "*" : "", item);
308 item = (node_t *)STRIP_TAG(next);
315 node_t *item = sl->head;
317 int is_marked = IS_TAGGED(item->next[0]);
318 printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)ns_data(item->key));
319 if (item != sl->head) {
320 printf("[%d]", item->top_level);
324 for (int level = 1; level <= item->top_level; ++level) {
325 node_t *next = (node_t *)STRIP_TAG(item->next[level]);
326 is_marked = IS_TAGGED(item->next[0]);
327 printf(" %p%s", next, is_marked ? "*" : "");
328 if (item == sl->head && item->next[level] == NULL)
333 item = (node_t *)STRIP_TAG(item->next[0]);