2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Implementation of the lock-free skiplist data-structure created by Maurice Herlihy, Yossi Lev,
6 * and Nir Shavit. See Herlihy's and Shivit's book "The Art of Multiprocessor Programming".
7 * http://www.amazon.com/Art-Multiprocessor-Programming-Maurice-Herlihy/dp/0123705916/
9 * See also Kir Fraser's dissertation "Practical Lock Freedom".
10 * www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
12 * This code is written for the x86 memory-model. The algorithim depends on certain stores and
13 * loads being ordered. Be careful, this code probably won't work correctly on platforms with
14 * weaker memory models if you don't add memory barriers in the right places.
26 // Setting MAX_LEVEL to 0 essentially makes this data structure the Harris-Michael lock-free list
41 static int random_level (void) {
42 unsigned r = nbd_rand();
45 int n = __builtin_ctz(r)-1;
50 assert(n <= MAX_LEVEL);
54 node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t value) {
55 assert(level >= 0 && level <= MAX_LEVEL);
56 size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
57 node_t *item = (node_t *)nbd_malloc(sz);
59 // If <key_len> is -1 it indicates <key_data> is an integer and not a pointer
60 item->key = (key_len == (unsigned)-1)
61 ? (void *)TAG_VALUE(key_data)
62 : ns_alloc(key_data, key_len);
64 item->top_level = level;
68 skiplist_t *sl_alloc (void) {
69 skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
70 sl->head = node_alloc(MAX_LEVEL, " ", 0, 0);
71 memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
75 static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, const void *key_data, uint32_t key_len, int help_remove) {
76 node_t *pred = sl->head;
78 TRACE("s3", "find_preds: searching for key %p in sl (head is %p)", key_data, pred);
80 int start_level = MAX_LEVEL;
82 // Optimization for small lists. No need to traverse empty higher levels.
84 while (pred->next[start_level+1] != NULL) {
85 start_level += start_level - 1;
86 if (EXPECT_FALSE(start_level >= MAX_LEVEL)) {
87 start_level = MAX_LEVEL;
91 if (EXPECT_FALSE(start_level < n)) {
96 // Traverse the levels of <sl> from the top level to the bottom
97 for (int level = start_level; level >= 0; --level) {
98 TRACE("s3", "find_preds: level %llu", level, 0);
99 item = pred->next[level];
100 if (EXPECT_FALSE(IS_TAGGED(item))) {
101 TRACE("s3", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
102 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
104 while (item != NULL) {
105 node_t *next = item->next[level];
106 TRACE("s3", "find_preds: visiting item %p (next %p)", item, next);
107 TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->value);
109 // A tag means an item is logically removed but not physically unlinked yet.
110 while (EXPECT_FALSE(IS_TAGGED(next))) {
112 // Skip over logically removed items.
114 item = (node_t *)STRIP_TAG(item->next);
115 if (EXPECT_FALSE(item == NULL))
117 next = item->next[level];
121 // Unlink logically removed items.
123 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) == item) {
124 item = (node_t *)STRIP_TAG(next);
125 if (EXPECT_FALSE(item == NULL))
127 next = item->next[level];
128 TRACE("s3", "find_preds: unlinked item %p from pred %p", item, pred);
129 TRACE("s3", "find_preds: now item is %p next is %p", item, next);
131 // The thread that completes the unlink should free the memory.
132 if (level == 0) { nbd_defer_free(other); }
134 TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other);
135 if (IS_TAGGED(other))
136 return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
138 if (EXPECT_FALSE(item == NULL))
140 next = item->next[level];
144 if (EXPECT_FALSE(item == NULL))
147 // If we reached the key (or passed where it should be), we found a pred. Save it and continue down.
148 x = (IS_TAGGED(item->key))
149 ? (STRIP_TAG(item->key) - (uint64_t)key_data)
150 : ns_cmp_raw(item->key, key_data, key_len);
152 TRACE("s3", "find_preds: found pred %p item %p", pred, item);
160 // The cast to unsigned is for the case when n is -1.
161 if ((unsigned)level <= (unsigned)n) {
170 if (n == -1 && item != NULL) {
171 for (int level = start_level + 1; level <= item->top_level; ++level) {
172 preds[level] = sl->head;
175 return x == 0 ? item : NULL;
178 // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
179 uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) {
180 TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl);
181 node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE);
183 // If we found an <item> matching the <key> return its value.
184 return item != NULL ? item->value : DOES_NOT_EXIST;
187 // Insert the <key> if it doesn't already exist in <sl>
188 uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value) {
189 TRACE("s3", "sl_add: inserting key %p value %p", key_data, value);
190 node_t *preds[MAX_LEVEL+1];
191 node_t *nexts[MAX_LEVEL+1];
193 int n = random_level();
195 node_t *next = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
197 // If a node matching <key> already exists in <sl>, return its value.
199 TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->value);
200 if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
201 return nexts[0]->value;
204 // First insert <item> into the bottom level.
205 if (EXPECT_TRUE(item == NULL)) { item = node_alloc(n, key_data, key_len, value); }
206 node_t *pred = preds[0];
207 item->next[0] = next = nexts[0];
208 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
209 for (int level = 1; level <= item->top_level; ++level) {
210 item->next[level] = nexts[level];
212 node_t *other = SYNC_CAS(&pred->next[0], next, item);
214 TRACE("s3", "sl_add: successfully inserted item %p at level 0", item, 0);
217 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
221 // Insert <item> into <sl> from the bottom level up.
222 for (int level = 1; level <= item->top_level; ++level) {
223 node_t *pred = preds[level];
224 node_t *next = nexts[level];
226 TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
227 node_t *other = SYNC_CAS(&pred->next[level], next, item);
229 TRACE("s3", "sl_add: successfully inserted item %p at level %llu", item, level);
232 TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
233 find_preds(preds, nexts, item->top_level, sl, key_data, key_len, TRUE);
237 // Update <item>'s next pointer
239 // There in no need to continue linking in the item if another thread removed it.
240 node_t *old_next = ((volatile node_t *)item)->next[level];
241 if (IS_TAGGED(old_next))
244 // Use a CAS so we to not inadvertantly stomp on a mark another thread placed on the item.
245 if (old_next == next || SYNC_CAS(&item->next[level], old_next, next) == old_next)
253 uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) {
254 TRACE("s3", "sl_remove: removing item with key %p from sl %p", key_data, sl);
255 node_t *preds[MAX_LEVEL+1];
256 node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE);
258 TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0);
259 return DOES_NOT_EXIST;
262 // Mark <item> removed at each level of <sl> from the top down. This must be atomic. If multiple threads
263 // try to remove the same item only one of them should succeed. Marking the bottom level establishes which of
265 for (int level = item->top_level; level >= 0; --level) {
266 if (EXPECT_FALSE(IS_TAGGED(item->next[level]))) {
267 TRACE("s3", "sl_remove: %p is already marked for removal by another thread", item, 0);
269 return DOES_NOT_EXIST;
272 node_t *next = SYNC_FETCH_AND_OR(&item->next[level], TAG);
273 if (EXPECT_FALSE(IS_TAGGED(next))) {
274 TRACE("s3", "sl_remove: lost race -- %p is already marked for removal by another thread", item, 0);
276 return DOES_NOT_EXIST;
281 uint64_t value = item->value;
283 // Unlink <item> from the top down.
284 int level = item->top_level;
286 node_t *pred = preds[level];
287 node_t *next = item->next[level];
288 TRACE("s3", "sl_remove: link item's pred %p to it's successor %p", pred, STRIP_TAG(next));
289 node_t *other = NULL;
290 if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
291 TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
292 // By marking the item earlier, we logically removed it. It is safe to leave the item partially
293 // unlinked. Another thread will finish physically removing it from <sl>.
299 // The thread that completes the unlink should free the memory.
300 nbd_defer_free(item);
304 void sl_print (skiplist_t *sl) {
305 for (int level = MAX_LEVEL; level >= 0; --level) {
306 node_t *item = sl->head;
307 if (item->next[level] == NULL)
309 printf("(%d) ", level);
311 node_t *next = item->next[level];
312 printf("%s%p ", IS_TAGGED(next) ? "*" : "", item);
313 item = (node_t *)STRIP_TAG(next);
320 node_t *item = sl->head;
322 int is_marked = IS_TAGGED(item->next[0]);
324 if (IS_TAGGED(item->key)) {
325 printf("%s%p:%llx ", is_marked ? "*" : "", item, STRIP_TAG(item->key));
327 printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)ns_data(item->key));
329 if (item != sl->head) {
330 printf("[%d]", item->top_level);
334 for (int level = 1; level <= item->top_level; ++level) {
335 node_t *next = (node_t *)STRIP_TAG(item->next[level]);
336 is_marked = IS_TAGGED(item->next[0]);
337 printf(" %p%s", next, is_marked ? "*" : "");
338 if (item == sl->head && item->next[level] == NULL)
343 item = (node_t *)STRIP_TAG(item->next[0]);