2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * C implementation of Cliff Click's lock-free hash table from
6 * http://www.azulsystems.com/events/javaone_2008/2008_CodingNonBlock.pdf
7 * http://sourceforge.net/projects/high-scale-lib
9 * Note: This is code uses synchronous atomic operations because that is all that x86 provides.
10 * Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
11 * the code a bit, but it won't be as fast as it could be on platforms like SPARC that provide
12 * weaker operations which would still do the job.
20 #define COPIED_VALUE (-1)
21 #define TOMBSTONE STRIP_TAG(COPIED_VALUE)
23 #define ENTRIES_PER_BUCKET (CACHE_LINE_SIZE/sizeof(entry_t))
24 #define ENTRIES_PER_COPY_CHUNK (ENTRIES_PER_BUCKET * 2)
25 #define MIN_SCALE (__builtin_ctz(ENTRIES_PER_BUCKET) + 2) // min 4 buckets
26 #define MAX_BUCKETS_TO_PROBE 250
28 #define GET_PTR(x) (string_t *)((x) & MASK(48)) // low-order 48 bits is a pointer to a string_t
30 typedef struct ht_entry {
35 typedef struct string {
40 typedef struct hash_table_i {
41 volatile entry_t *table;
42 hash_table_t *ht; // parent ht;
43 struct hash_table_i *next;
44 struct hash_table_i *next_free;
47 int count; // TODO: make these counters distributed
48 int num_entries_copied;
52 static int hti_copy_entry
53 (hash_table_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hash_table_i_t *ht2);
55 // Choose the next bucket to probe using the high-order bits of <key_hash>.
56 static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) {
57 int incr = (key_hash >> (32 - ht_scale));
58 incr += !incr; // If the increment is 0, make it 1.
59 return (old_ndx + incr) & MASK(ht_scale);
64 // A key is made up of two parts. The 48 low-order bits are a pointer to a null terminated string.
65 // The high-order 16 bits are taken from the hash of that string. The bits from the hash are used
66 // as a quick check to rule out non-equal keys without doing a complete string compare.
67 static inline int ht_key_equals (uint64_t a, uint32_t b_hash, const char *b_value, uint32_t b_len) {
68 if ((b_hash >> 16) != (a >> 48)) // high-order 16 bits are from the hash value
70 const string_t *a_key = GET_PTR(a);
72 return a_key->len == b_len && memcmp(a_key->val, b_value, b_len) == 0;
75 // Lookup <key> in <hti>.
77 // Return the entry that <key> is in, or if <key> isn't in <hti> return the entry that it would be
78 // in if it were inserted into <hti>. If there is no room for <key> in <hti> then return NULL, to
79 // indicate that the caller should look in <hti->next>.
81 // Record if the entry being returned is empty. Otherwise the caller will have to waste time with
82 // ht_key_equals() to confirm that it did not lose a race to fill an empty entry.
83 static volatile entry_t *hti_lookup (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, int *is_empty) {
84 TRACE("h0", "hti_lookup(key \"%s\" in hti %p)", key_val, hti);
87 // Probe one cache line at a time
88 int ndx = key_hash & MASK(hti->scale); // the first entry to search
90 for (i = 0; i < hti->max_probe; ++i) {
92 // The start of the bucket is the first entry in the cache line.
93 volatile entry_t *bucket = hti->table + (ndx & ~(ENTRIES_PER_BUCKET-1));
95 // Start searching at the indexed entry. Then loop around to the begining of the cache line.
97 for (j = 0; j < ENTRIES_PER_BUCKET; ++j) {
98 volatile entry_t *e = bucket + ((ndx + j) & (ENTRIES_PER_BUCKET-1));
100 uint64_t e_key = e->key;
101 if (e_key == DOES_NOT_EXIST) {
102 TRACE("h0", "hti_lookup: empty entry %p found on probe %d", e, i*ENTRIES_PER_BUCKET+j+1);
103 // we tag the pointer so the caller can avoid an expensive ht_key_equals()
108 if (ht_key_equals(e_key, key_hash, key_val, key_len)) {
109 TRACE("h0", "hti_lookup: entry %p found on probe %d", e, i*ENTRIES_PER_BUCKET+j+1);
110 TRACE("h0", "hti_lookup: with key \"%s\" value %p", GET_PTR(e_key)->val, e->value);
115 ndx = get_next_ndx(ndx, key_hash, hti->scale);
118 // maximum number of probes exceeded
119 TRACE("h0", "hti_lookup: maximum number of probes exceeded returning 0x0", 0, 0);
123 // Allocate and initialize a hash_table_i_t with 2^<scale> entries.
124 static hash_table_i_t *hti_alloc (hash_table_t *parent, int scale) {
125 // Include enough slop to align the actual table on a cache line boundry
126 size_t n = sizeof(hash_table_i_t)
127 + sizeof(entry_t) * (1 << scale)
128 + (CACHE_LINE_SIZE - 1);
129 hash_table_i_t *hti = (hash_table_i_t *)calloc(n, 1);
131 // Align the table of hash entries on a cache line boundry.
132 hti->table = (entry_t *)(((uint64_t)hti + sizeof(hash_table_i_t) + (CACHE_LINE_SIZE-1))
133 & ~(CACHE_LINE_SIZE-1));
137 // When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets.
138 hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 2;
139 if (hti->max_probe > MAX_BUCKETS_TO_PROBE) {
140 hti->max_probe = MAX_BUCKETS_TO_PROBE;
145 assert(hti->scale >= MIN_SCALE && hti->scale < 63); // size must be a power of 2
146 assert(sizeof(entry_t) * ENTRIES_PER_BUCKET % CACHE_LINE_SIZE == 0); // divisible into cache
147 assert((size_t)hti->table % CACHE_LINE_SIZE == 0); // cache aligned
152 // Called when <hti> runs out of room for new keys.
154 // Initiates a copy by creating a larger hash_table_i_t and installing it in <hti->next>.
155 static void hti_start_copy (hash_table_i_t *hti) {
156 TRACE("h0", "hti_start_copy(hti %p hti->next %p)", hti, hti->next);
158 return; // another thread beat us to it
160 // heuristics to determine the size of the new table
161 uint64_t count = ht_count(hti->ht);
162 unsigned int new_scale = hti->scale;
163 new_scale += (count > (1 << (new_scale - 2))); // double size if more than 1/4 full
164 new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full
166 // Allocate the new table and attempt to install it.
167 hash_table_i_t *next = hti_alloc(hti->ht, hti->scale + 1);
168 hash_table_i_t *old_next = SYNC_CAS(&hti->next, NULL, next);
169 if (old_next != NULL) {
170 TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
171 // Another thread beat us to it.
175 TRACE("h0", "hti_start_copy: new hti is %p", next, 0);
178 // Copy the key and value stored in <ht1_e> (which must be an entry in <ht1>) to <ht2>.
180 // Return 1 unless <ht1_e> is already copied (then return 0), so the caller can account for the total
181 // number of entries left to copy.
182 static int hti_copy_entry (hash_table_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash,
183 hash_table_i_t *ht2) {
184 TRACE("h0", "hti_copy_entry(copy entry from %p to %p)", ht1, ht2);
188 assert(ht1_e >= ht1->table && ht1_e < ht1->table + (1 << ht1->scale));
189 assert(key_hash == 0 || (key_hash >> 16) == (ht1_e->key >> 48));
191 uint64_t ht1_e_value = ht1_e->value;
192 TRACE("h0", "hti_copy_entry: entry %p current value %p", ht1_e, ht1_e_value);
193 if (EXPECT_FALSE(ht1_e_value == COPIED_VALUE))
194 return FALSE; // already copied
196 // Kill empty entries.
197 if (EXPECT_FALSE(ht1_e_value == DOES_NOT_EXIST)) {
198 uint64_t ht1_e_value = SYNC_CAS(&ht1_e->value, DOES_NOT_EXIST, COPIED_VALUE);
199 if (ht1_e_value == DOES_NOT_EXIST) {
200 TRACE("h0", "hti_copy_entry: old entry killed", 0, 0);
203 if (ht1_e_value == COPIED_VALUE) {
204 TRACE("h0", "hti_copy_entry: lost race to kill empty entry in old hti", 0, 0);
205 return FALSE; // another thread beat us to it
207 TRACE("h0", "hti_copy_entry: lost race to kill empty entry in old hti; "
208 "the entry is now being used", 0, 0);
211 // Tag the value in the old entry to indicate a copy is in progress.
212 ht1_e_value = SYNC_FETCH_AND_OR(&ht1_e->value, TAG_VALUE(0));
213 TRACE("h0", "hti_copy_entry: tagged the value %p in old entry %p", ht1_e_value, ht1_e);
214 if (ht1_e_value == COPIED_VALUE)
215 return FALSE; // <value> was already copied by another thread.
217 // Deleted entries don't need to be installed into to the new table, but their keys do need to
219 assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE));
220 if (ht1_e_value == TOMBSTONE) {
221 nbd_defer_free(GET_PTR(ht1_e->key));
225 // Install the key in the new table.
226 uint64_t key = ht1_e->key;
227 string_t *key_string = GET_PTR(key);
228 uint64_t value = STRIP_TAG(ht1_e_value);
229 TRACE("h0", "hti_copy_entry: key %p is %s", key, key_string->val);
231 // We use 0 to indicate that <key_hash> isn't initiallized. Occasionally the <key_hash> will
232 // really be 0 and we will waste time recomputing it. That is rare enough that it is OK.
234 key_hash = murmur32(key_string->val, key_string->len);
238 volatile entry_t *ht2_e = hti_lookup(ht2, key_hash, key_string->val, key_string->len, &is_empty);
240 // it is possible that there is not any room in the new table either
241 if (EXPECT_FALSE(ht2_e == NULL)) {
242 hti_start_copy(ht2); // initiate nested copy, if not already started
243 return hti_copy_entry(ht1, ht1_e, key_hash, ht2->next); // recursive tail-call
246 // a tagged entry returned from hti_lookup() means it is either empty or has a new key
248 uint64_t old_ht2_e_key = SYNC_CAS(&ht2_e->key, DOES_NOT_EXIST, key);
249 if (old_ht2_e_key != DOES_NOT_EXIST) {
250 TRACE("h0", "hti_copy_entry: lost race to CAS key %p into new entry; found %p",
252 return hti_copy_entry(ht1, ht1_e, key_hash, ht2); // recursive tail-call
255 assert(ht_key_equals(ht2_e->key, key_hash, key_string->val, key_string->len));
256 TRACE("h0", "hti_copy_entry: key %p installed in new old hti %p", key_string->val, ht2);
258 // Copy the value to the entry in the new table.
259 uint64_t old_ht2_e_value = SYNC_CAS(&ht2_e->value, DOES_NOT_EXIST, value);
261 // If there is a nested copy in progress, we might have installed the key into a dead entry.
262 if (old_ht2_e_value == COPIED_VALUE)
263 return hti_copy_entry(ht1, ht1_e, key_hash, ht2->next); // recursive tail-call
265 // Mark the old entry as dead.
266 ht1_e->value = COPIED_VALUE;
268 // Update the count if we were the one that completed the copy.
269 if (old_ht2_e_value == DOES_NOT_EXIST) {
270 TRACE("h0", "hti_copy_entry: value %p installed in new hti %p", value, ht2);
271 SYNC_ADD(&ht1->count, -1);
272 SYNC_ADD(&ht2->count, 1);
276 TRACE("h0", "hti_copy_entry: lost race to CAS value %p in new hti; found %p",
277 value, old_ht2_e_value);
278 return FALSE; // another thread completed the copy
281 // Compare <expected> with the existing value associated with <key>. If the values match then
282 // replace the existing value with <new>. If <new> is TOMBSTONE, delete the value associated with
283 // the key by replacing it with a TOMBSTONE.
285 // Return the previous value associated with <key>, or DOES_NOT_EXIST if <key> is not in the table
286 // or associated with a TOMBSTONE. If a copy is in progress and <key> has been copied to the next
287 // table then return COPIED_VALUE.
289 // NOTE: the returned value matches <expected> iff the set succeeds
291 // Certain values of <expected> have special meaning. If <expected> is HT_EXPECT_EXISTS then any
292 // real value matches (i.e. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
293 // <expected> is HT_EXPECT_WHATEVER then skip the test entirely.
295 static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, const char *key_val,
296 uint32_t key_len, uint64_t expected, uint64_t new) {
297 TRACE("h0", "hti_compare_and_set(hti %p key \"%s\")", hti, key_val);
298 TRACE("h0", "hti_compare_and_set(new value %p; caller expects value %p)", new, expected);
300 assert(new != DOES_NOT_EXIST && !IS_TAGGED(new));
304 volatile entry_t *e = hti_lookup(hti, key_hash, key_val, key_len, &is_empty);
306 // There is no room for <key>, grow the table and try again.
312 // Install <key> in the table if it doesn't exist.
314 TRACE("h0", "hti_compare_and_set: entry %p is empty", e, 0);
315 if (expected != HT_EXPECT_WHATEVER && expected != HT_EXPECT_NOT_EXISTS)
316 return DOES_NOT_EXIST;
318 // No need to do anything, <key> is already deleted.
319 if (new == TOMBSTONE)
320 return DOES_NOT_EXIST;
323 string_t *key = nbd_malloc(sizeof(uint32_t) + key_len);
325 memcpy(key->val, key_val, key_len);
327 // CAS <key> into the table.
328 uint64_t e_key = SYNC_CAS(&e->key, DOES_NOT_EXIST, ((uint64_t)(key_hash >> 16) << 48) | (uint64_t)key);
330 // Retry if another thread stole the entry out from under us.
331 if (e_key != DOES_NOT_EXIST) {
332 TRACE("h0", "hti_compare_and_set: key in entry %p is \"%s\"", e, GET_PTR(e_key)->val);
333 TRACE("h0", "hti_compare_and_set: lost race to install key \"%s\" in %p", key->val, e);
335 return hti_compare_and_set(hti, key_hash, key_val, key_len, expected, new); // tail-call
337 TRACE("h0", "hti_compare_and_set: installed key \"%s\" in entry %p", key_val, e);
340 // If the entry is in the middle of a copy, the copy must be completed first.
341 uint64_t e_value = e->value;
342 TRACE("h0", "hti_compare_and_set: value in entry %p is %p", e, e_value);
343 if (EXPECT_FALSE(IS_TAGGED(e_value))) {
344 int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next);
346 SYNC_ADD(&hti->num_entries_copied, 1);
351 // Fail if the old value is not consistent with the caller's expectation.
352 int old_existed = (e_value != TOMBSTONE && e_value != DOES_NOT_EXIST);
353 if (EXPECT_FALSE(expected != HT_EXPECT_WHATEVER && expected != e_value)) {
354 if (EXPECT_FALSE(expected != (old_existed ? HT_EXPECT_EXISTS : HT_EXPECT_NOT_EXISTS))) {
355 TRACE("h0", "hti_compare_and_set: value expected by caller for key \"%s\" not found; "
356 "found value %p", key_val, e_value);
361 // CAS the value into the entry. Retry if it fails.
362 uint64_t v = SYNC_CAS(&e->value, e_value, new);
363 if (EXPECT_FALSE(v != e_value)) {
364 TRACE("h0", "hti_compare_and_set: value CAS failed; expected %p found %p", e_value, v);
365 return hti_compare_and_set(hti, key_hash, key_val, key_len, expected, new); // recursive tail-call
368 // The set succeeded. Adjust the value count.
369 if (old_existed && new == TOMBSTONE) {
370 SYNC_ADD(&hti->count, -1);
371 } else if (!old_existed && new != TOMBSTONE) {
372 SYNC_ADD(&hti->count, 1);
375 // Return the previous value.
376 TRACE("h0", "hti_compare_and_set: CAS succeeded; old value %p new value %p", e_value, new);
381 static uint64_t hti_get (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len) {
385 volatile entry_t *e = hti_lookup(hti, key_hash, key_val, key_len, &is_empty);
387 // When hti_lookup() returns NULL it means we hit the reprobe limit while
388 // searching the table. In that case, if a copy is in progress the key
389 // might exist in the copy.
390 if (EXPECT_FALSE(e == NULL)) {
391 if (((volatile hash_table_i_t *)hti)->next != NULL)
392 return hti_get(hti->next, key_hash, key_val, key_len); // recursive tail-call
393 return DOES_NOT_EXIST;
397 return DOES_NOT_EXIST;
399 // If the entry is being copied, finish the copy and retry on the next table.
400 uint64_t e_value = e->value;
401 if (EXPECT_FALSE(IS_TAGGED(e_value))) {
402 if (EXPECT_FALSE(e_value != COPIED_VALUE)) {
403 int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next);
405 SYNC_ADD(&hti->num_entries_copied, 1);
408 return hti_get(((volatile hash_table_i_t *)hti)->next, key_hash, key_val, key_len); // tail-call
411 return (e_value == TOMBSTONE) ? DOES_NOT_EXIST : e_value;
415 uint64_t ht_get (hash_table_t *ht, const char *key_val, uint32_t key_len) {
416 return hti_get(*ht, murmur32(key_val, key_len), key_val, key_len);
420 uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key_len,
421 uint64_t expected_val, uint64_t new_val) {
424 assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST);
426 hash_table_i_t *hti = *ht;
428 // Help with an ongoing copy.
429 if (EXPECT_FALSE(hti->next != NULL)) {
435 // Panic if we've been around the array twice and still haven't finished the copy.
436 int panic = (x >= (1 << (hti->scale + 1)));
438 limit = ENTRIES_PER_COPY_CHUNK;
440 // Reserve some entries for this thread to copy. There is a race condition here because the
441 // fetch and add isn't atomic, but that is ok.
442 hti->scan = x + ENTRIES_PER_COPY_CHUNK;
444 // <hti->scan> might be larger than the size of the table, if some thread stalls while
445 // copying. In that case we just wrap around to the begining and make another pass through
447 e = hti->table + (x & MASK(hti->scale));
449 // scan the whole table
450 limit = (1 << hti->scale);
455 for (int i = 0; i < limit; ++i) {
456 num_copied += hti_copy_entry(hti, e++, 0, hti->next);
457 assert(e <= hti->table + (1 << hti->scale));
459 if (num_copied != 0) {
460 SYNC_ADD(&hti->num_entries_copied, num_copied);
463 // Dispose of fully copied tables.
464 if (hti->num_entries_copied == (1 << hti->scale) || panic) {
466 if (SYNC_CAS(ht, hti, hti->next) == hti) {
473 uint32_t key_hash = murmur32(key_val, key_len);
474 while ((old_val = hti_compare_and_set(hti, key_hash, key_val, key_len, expected_val, new_val))
480 return old_val == TOMBSTONE ? DOES_NOT_EXIST : old_val;
483 // Remove the value in <ht> associated with <key_val>. Returns the value removed, or
484 // DOES_NOT_EXIST if there was no value for that key.
485 uint64_t ht_remove (hash_table_t *ht, const char *key_val, uint32_t key_len) {
486 hash_table_i_t *hti = *ht;
488 uint32_t key_hash = murmur32(key_val, key_len);
490 val = hti_compare_and_set(hti, key_hash, key_val, key_len, HT_EXPECT_WHATEVER, TOMBSTONE);
491 if (val != COPIED_VALUE)
492 return val == TOMBSTONE ? DOES_NOT_EXIST : val;
499 // Returns the number of key-values pairs in <ht>
500 uint64_t ht_count (hash_table_t *ht) {
501 hash_table_i_t *hti = *ht;
510 // Allocate and initialize a new hash table.
511 hash_table_t *ht_alloc (void) {
512 hash_table_t *ht = nbd_malloc(sizeof(hash_table_t));
513 *ht = (hash_table_i_t *)hti_alloc(ht, MIN_SCALE);
517 // Free <ht> and its internal structures.
518 void ht_free (hash_table_t *ht) {
519 hash_table_i_t *hti = *ht;
521 for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
522 assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value));
523 if (hti->table[i].key != DOES_NOT_EXIST) {
524 nbd_free(GET_PTR(hti->table[i].key));
527 hash_table_i_t *next = hti->next;