2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Implementation of the lock-free skiplist data-structure created by Maurice Herlihy, Yossi Lev,
6 * and Nir Shavit. See Herlihy's and Shivit's book "The Art of Multiprocessor Programming".
7 * http://www.amazon.com/Art-Multiprocessor-Programming-Maurice-Herlihy/dp/0123705916/
9 * See also Kir Fraser's dissertation "Practical Lock Freedom".
10 * www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
12 * This code is written for the x86 memory-model. The algorithim depends on certain stores and
13 * loads being ordered. Be careful, this code probably won't work correctly on platforms with
14 * weaker memory models if you don't add memory barriers in the right places.
26 // Setting MAX_LEVEL to 0 essentially makes this data structure the Harris-Michael lock-free list
41 static int random_level (void) {
42 unsigned r = nbd_rand();
46 r |= 1 << (MAX_LEVEL+1);
48 int n = __builtin_ctz(r)-1;
49 assert(n <= MAX_LEVEL);
53 node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t value) {
54 assert(level >= 0 && level <= MAX_LEVEL);
55 size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
56 node_t *item = (node_t *)nbd_malloc(sz);
58 // If <key_len> is -1 it indicates <key_data> is an integer and not a pointer
59 item->key = (key_len == (unsigned)-1)
60 ? (void *)TAG_VALUE(key_data)
61 : ns_alloc(key_data, key_len);
63 item->top_level = level;
67 skiplist_t *sl_alloc (void) {
68 skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
69 sl->head = node_alloc(MAX_LEVEL, " ", 0, 0);
70 memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
74 static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, const void *key_data, uint32_t key_len, int help_remove) {
75 node_t *pred = sl->head;
77 TRACE("s3", "find_preds: searching for key %p in sl (head is %p)", key_data, pred);
79 int start_level = MAX_LEVEL;
81 // Optimization for small lists. No need to traverse empty higher levels.
83 while (pred->next[start_level+1] != NULL) {
84 start_level += start_level - 1;
85 if (EXPECT_FALSE(start_level >= MAX_LEVEL)) {
86 start_level = MAX_LEVEL;
90 if (EXPECT_FALSE(start_level < n)) {
95 // Traverse the levels of <sl> from the top level to the bottom
96 for (int level = start_level; level >= 0; --level) {
97 TRACE("s3", "find_preds: level %llu", level, 0);
98 item = pred->next[level];
99 if (EXPECT_FALSE(IS_TAGGED(item))) {
100 TRACE("s3", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
101 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
103 while (item != NULL) {
104 node_t *next = item->next[level];
105 TRACE("s3", "find_preds: visiting item %p (next %p)", item, next);
106 TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->value);
108 // A tag means an item is logically removed but not physically unlinked yet.
109 while (EXPECT_FALSE(IS_TAGGED(next))) {
111 // Skip over logically removed items.
113 item = (node_t *)STRIP_TAG(item->next);
114 if (EXPECT_FALSE(item == NULL))
116 next = item->next[level];
120 // Unlink logically removed items.
122 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) == item) {
123 item = (node_t *)STRIP_TAG(next);
124 if (EXPECT_FALSE(item == NULL))
126 next = item->next[level];
127 TRACE("s3", "find_preds: unlinked item %p from pred %p", item, pred);
128 TRACE("s3", "find_preds: now item is %p next is %p", item, next);
130 // The thread that completes the unlink should free the memory.
131 if (level == 0) { nbd_defer_free(other); }
133 TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other);
134 if (IS_TAGGED(other))
135 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
137 if (EXPECT_FALSE(item == NULL))
139 next = item->next[level];
143 if (EXPECT_FALSE(item == NULL))
146 // If we reached the key (or passed where it should be), we found a pred. Save it and continue down.
147 x = (IS_TAGGED(item->key))
148 ? (STRIP_TAG(item->key) - (uint64_t)key_data)
149 : ns_cmp_raw(item->key, key_data, key_len);
151 TRACE("s3", "find_preds: found pred %p item %p", pred, item);
159 // The cast to unsigned is for the case when n is -1.
160 if ((unsigned)level <= (unsigned)n) {
169 if (n == -1 && item != NULL) {
170 for (int level = start_level + 1; level <= item->top_level; ++level) {
171 preds[level] = sl->head;
174 return x == 0 ? item : NULL;
177 // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
178 uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) {
179 TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl);
180 node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE);
182 // If we found an <item> matching the <key> return its value.
183 return item != NULL ? item->value : DOES_NOT_EXIST;
186 // Insert the <key> if it doesn't already exist in <sl>
187 uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value) {
188 TRACE("s3", "sl_add: inserting key %p value %p", key_data, value);
189 node_t *preds[MAX_LEVEL+1];
190 node_t *nexts[MAX_LEVEL+1];
192 int n = random_level();
194 node_t *next = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
196 // If a node matching <key> already exists in <sl>, return its value.
198 TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->value);
199 if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
200 return nexts[0]->value;
203 // First insert <item> into the bottom level.
204 if (EXPECT_TRUE(item == NULL)) { item = node_alloc(n, key_data, key_len, value); }
205 node_t *pred = preds[0];
206 item->next[0] = next = nexts[0];
207 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
208 for (int level = 1; level <= item->top_level; ++level) {
209 item->next[level] = nexts[level];
211 node_t *other = SYNC_CAS(&pred->next[0], next, item);
213 TRACE("s3", "sl_add: successfully inserted item %p at level 0", item, 0);
216 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
220 // Insert <item> into <sl> from the bottom level up.
221 for (int level = 1; level <= item->top_level; ++level) {
222 node_t *pred = preds[level];
223 node_t *next = nexts[level];
225 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
226 node_t *other = SYNC_CAS(&pred->next[level], next, item);
228 TRACE("s3", "sl_add: successfully inserted item %p at level %llu", item, level);
231 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
232 find_preds(preds, nexts, item->top_level, sl, key_data, key_len, TRUE);
236 // Update <item>'s next pointer
238 // There in no need to continue linking in the item if another thread removed it.
239 node_t *old_next = ((volatile node_t *)item)->next[level];
240 if (IS_TAGGED(old_next))
243 // Use a CAS so we to not inadvertantly stomp on a mark another thread placed on the item.
244 if (old_next == next || SYNC_CAS(&item->next[level], old_next, next) == old_next)
252 uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) {
253 TRACE("s3", "sl_remove: removing item with key %p from sl %p", key_data, sl);
254 node_t *preds[MAX_LEVEL+1];
255 node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE);
257 TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0);
258 return DOES_NOT_EXIST;
261 // Mark <item> removed at each level of <sl> from the top down. This must be atomic. If multiple threads
262 // try to remove the same item only one of them should succeed. Marking the bottom level establishes which of
264 for (int level = item->top_level; level >= 0; --level) {
265 if (EXPECT_FALSE(IS_TAGGED(item->next[level]))) {
266 TRACE("s3", "sl_remove: %p is already marked for removal by another thread", item, 0);
268 return DOES_NOT_EXIST;
271 node_t *next = SYNC_FETCH_AND_OR(&item->next[level], TAG);
272 if (EXPECT_FALSE(IS_TAGGED(next))) {
273 TRACE("s3", "sl_remove: lost race -- %p is already marked for removal by another thread", item, 0);
275 return DOES_NOT_EXIST;
280 uint64_t value = item->value;
282 // Unlink <item> from the top down.
283 int level = item->top_level;
285 node_t *pred = preds[level];
286 node_t *next = item->next[level];
287 TRACE("s3", "sl_remove: link item's pred %p to it's successor %p", pred, STRIP_TAG(next));
288 node_t *other = NULL;
289 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
290 TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
291 // By marking the item earlier, we logically removed it. It is safe to leave the item partially
292 // unlinked. Another thread will finish physically removing it from <sl>.
298 // The thread that completes the unlink should free the memory.
299 nbd_defer_free(item);
303 void sl_print (skiplist_t *sl) {
304 for (int level = MAX_LEVEL; level >= 0; --level) {
305 node_t *item = sl->head;
306 if (item->next[level] == NULL)
308 printf("(%d) ", level);
310 node_t *next = item->next[level];
311 printf("%s%p ", IS_TAGGED(next) ? "*" : "", item);
312 item = (node_t *)STRIP_TAG(next);
319 node_t *item = sl->head;
321 int is_marked = IS_TAGGED(item->next[0]);
323 if (IS_TAGGED(item->key)) {
324 printf("%s%p:%llx ", is_marked ? "*" : "", item, STRIP_TAG(item->key));
326 printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)ns_data(item->key));
328 if (item != sl->head) {
329 printf("[%d]", item->top_level);
333 for (int level = 1; level <= item->top_level; ++level) {
334 node_t *next = (node_t *)STRIP_TAG(item->next[level]);
335 is_marked = IS_TAGGED(item->next[0]);
336 printf(" %p%s", next, is_marked ? "*" : "");
337 if (item == sl->head && item->next[level] == NULL)
342 item = (node_t *)STRIP_TAG(item->next[0]);