2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * C implementation of Cliff Click's lock-free hash table from
6 * http://www.azulsystems.com/events/javaone_2008/2008_CodingNonBlock.pdf
7 * http://sourceforge.net/projects/high-scale-lib
9 * Note: This is code uses synchronous atomic operations because that is all that x86 provides.
10 * Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
11 * the code a bit, but it won't be as fast as it could be on platforms that provide weaker
12 * operations like unfenced CAS which would still do the job.
14 * 11FebO9 - Bug fix in ht_iter_next() from Rui Ueyama
22 #include "hashtable.h"
25 #define GET_PTR(x) ((void *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
27 #define GET_PTR(x) ((void *)(x))
30 typedef struct entry {
36 volatile entry_t *table;
37 hashtable_t *ht; // parent ht;
39 #ifdef USE_SYSTEM_MALLOC
40 void *unaligned_table_ptr; // system malloc doesn't guarentee cache-line alignment
42 size_t count; // TODO: make these counters distributed
45 size_t num_entries_copied;
58 const datatype_t *key_type;
64 static const map_val_t COPIED_VALUE = TAG_VALUE(DOES_NOT_EXIST, TAG1);
65 static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1);
67 static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t);
68 static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2;
69 static const unsigned MIN_SCALE = 4; // min 16 entries (4 buckets)
71 static int hti_copy_entry (hti_t *ht1, volatile entry_t *ent, uint32_t ent_key_hash, hti_t *ht2);
73 // Choose the next bucket to probe using the high-order bits of <key_hash>.
74 static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) {
76 int incr = (key_hash >> (32 - ht_scale));
77 if (incr < ENTRIES_PER_BUCKET) { incr += ENTRIES_PER_BUCKET; }
78 return (old_ndx + incr) & MASK(ht_scale);
80 return (old_ndx + ENTRIES_PER_BUCKET) & MASK(ht_scale);
84 // Lookup <key> in <hti>.
86 // Return the entry that <key> is in, or if <key> isn't in <hti> return the entry that it would be
87 // in if it were inserted into <hti>. If there is no room for <key> in <hti> then return NULL, to
88 // indicate that the caller should look in <hti->next>.
90 // Record if the entry being returned is empty. Otherwise the caller will have to waste time
91 // re-comparing the keys to confirm that it did not lose a race to fill an empty entry.
92 static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_hash, int *is_empty) {
93 TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti);
96 // Probe one cache line at a time
97 int ndx = key_hash & MASK(hti->scale); // the first entry to search
98 for (int i = 0; i < hti->probe; ++i) {
100 // The start of the bucket is the first entry in the cache line.
101 volatile entry_t *bucket = hti->table + (ndx & ~(ENTRIES_PER_BUCKET-1));
103 // Start searching at the indexed entry. Then loop around to the begining of the cache line.
104 for (int j = 0; j < ENTRIES_PER_BUCKET; ++j) {
105 volatile entry_t *ent = bucket + ((ndx + j) & (ENTRIES_PER_BUCKET-1));
107 map_key_t ent_key = ent->key;
108 if (ent_key == DOES_NOT_EXIST) {
109 TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent,
110 (hti->ht->key_type == NULL) ? (void *)key : GET_PTR(key));
111 *is_empty = 1; // indicate an empty so the caller avoids an expensive key compare
115 // Compare <key> with the key in the entry.
116 if (EXPECT_TRUE(hti->ht->key_type == NULL)) {
117 // fast path for integer keys
118 if (ent_key == key) {
119 TRACE("h1", "hti_lookup: found entry %p with key %p", ent, ent_key);
124 // The key in <ent> is made up of two parts. The 48 low-order bits are a pointer. The
125 // high-order 16 bits are taken from the hash. The bits from the hash are used as a
126 // quick check to rule out non-equal keys without doing a complete compare.
127 if ((key_hash >> 16) == (ent_key >> 48)) {
129 if (hti->ht->key_type->cmp(GET_PTR(ent_key), (void *)key) == 0) {
130 TRACE("h1", "hti_lookup: found entry %p with key %p", ent, GET_PTR(ent_key));
139 ndx = get_next_ndx(ndx, key_hash, hti->scale);
142 // maximum number of probes exceeded
143 TRACE("h1", "hti_lookup: maximum number of probes exceeded returning 0x0", 0, 0);
147 // Allocate and initialize a hti_t with 2^<scale> entries.
148 static hti_t *hti_alloc (hashtable_t *parent, int scale) {
149 hti_t *hti = (hti_t *)nbd_malloc(sizeof(hti_t));
150 memset(hti, 0, sizeof(hti_t));
153 size_t sz = sizeof(entry_t) * (1ULL << scale);
154 #ifdef USE_SYSTEM_MALLOC
155 hti->unaligned_table_ptr = nbd_malloc(sz + CACHE_LINE_SIZE - 1);
156 hti->table = (void *)(((size_t)hti->unaligned_table_ptr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1));
158 hti->table = nbd_malloc(sz);
160 memset((void *)hti->table, 0, sz);
162 hti->probe = (int)(hti->scale * 1.5) + 2;
163 int quarter = (1ULL << (hti->scale - 2)) / ENTRIES_PER_BUCKET;
164 if (hti->probe > quarter && quarter > 4) {
165 // When searching for a key probe a maximum of 1/4
166 hti->probe = quarter;
170 hti->ref_count = 1; // one for the parent
172 assert(hti->scale >= MIN_SCALE && hti->scale < 63); // size must be a power of 2
173 assert(sizeof(entry_t) * ENTRIES_PER_BUCKET % CACHE_LINE_SIZE == 0); // divisible into cache
174 assert((size_t)hti->table % CACHE_LINE_SIZE == 0); // cache aligned
179 // Called when <hti> runs out of room for new keys.
181 // Initiates a copy by creating a larger hti_t and installing it in <hti->next>.
182 static void hti_start_copy (hti_t *hti) {
183 TRACE("h0", "hti_start_copy(hti %p scale %llu)", hti, hti->scale);
185 // heuristics to determine the size of the new table
186 size_t count = ht_count(hti->ht);
187 unsigned int new_scale = hti->scale;
188 new_scale += (count > (1ULL << (hti->scale - 1))) || (hti->key_count > (1ULL << (hti->scale - 2)) + (1ULL << (hti->scale - 3))); // double size if more than 1/2 full
190 // Allocate the new table and attempt to install it.
191 hti_t *next = hti_alloc(hti->ht, new_scale);
192 hti_t *old_next = SYNC_CAS(&hti->next, NULL, next);
193 if (old_next != NULL) {
194 // Another thread beat us to it.
195 TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
196 #ifdef USE_SYSTEM_MALLOC
197 nbd_free(next->unaligned_table_ptr);
199 nbd_free((void *)next->table);
203 TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale);
204 SYNC_ADD(&hti->ht->hti_copies, 1);
205 hti->ht->density = (double)hti->key_count / (1ULL << hti->scale) * 100;
206 hti->ht->probe = hti->probe;
209 // Copy the key and value stored in <ht1_ent> (which must be an entry in <ht1>) to <ht2>.
211 // Return 1 unless <ht1_ent> is already copied (then return 0), so the caller can account for the total
212 // number of entries left to copy.
213 static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_hash, hti_t *ht2) {
214 TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_ent, ht2);
218 assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1ULL << ht1->scale));
220 assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
223 map_val_t ht1_ent_val = ht1_ent->val;
224 if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE || ht1_ent_val == TAG_VALUE(TOMBSTONE, TAG1))) {
225 TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
226 return FALSE; // already copied
229 // Kill empty entries.
230 if (EXPECT_FALSE(ht1_ent_val == DOES_NOT_EXIST)) {
231 map_val_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE);
232 if (ht1_ent_val == DOES_NOT_EXIST) {
233 TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_ent, 0);
236 TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is not empty", ht1_ent, 0);
239 // Tag the value in the old entry to indicate a copy is in progress.
240 ht1_ent_val = SYNC_FETCH_AND_OR(&ht1_ent->val, TAG_VALUE(0, TAG1));
241 TRACE("h2", "hti_copy_entry: tagged the value %p in old entry %p", ht1_ent_val, ht1_ent);
242 if (ht1_ent_val == COPIED_VALUE || ht1_ent_val == TAG_VALUE(TOMBSTONE, TAG1)) {
243 TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
244 return FALSE; // <value> was already copied by another thread.
247 // The old table's dead entries don't need to be copied to the new table
248 if (ht1_ent_val == TOMBSTONE)
251 // Install the key in the new table.
252 map_key_t ht1_ent_key = ht1_ent->key;
253 map_key_t key = (ht1->ht->key_type == NULL) ? (map_key_t)ht1_ent_key : (map_key_t)GET_PTR(ht1_ent_key);
255 // We use 0 to indicate that <key_hash> is uninitiallized. Occasionally the key's hash will really be 0 and we
256 // waste time recomputing it every time. It is rare enough that it won't hurt performance.
259 key_hash = (ht1->ht->key_type == NULL) ? murmur32_4b(ht1_ent_key) : ht1->ht->key_type->hash((void *)key);
261 key_hash = (ht1->ht->key_type == NULL) ? murmur32_8b(ht1_ent_key) : ht1->ht->key_type->hash((void *)key);
265 int ht2_ent_is_empty;
266 volatile entry_t *ht2_ent = hti_lookup(ht2, key, key_hash, &ht2_ent_is_empty);
267 TRACE("h0", "hti_copy_entry: copy entry %p to entry %p", ht1_ent, ht2_ent);
269 // It is possible that there isn't any room in the new table either.
270 if (EXPECT_FALSE(ht2_ent == NULL)) {
271 TRACE("h0", "hti_copy_entry: no room in table %p copy to next table %p", ht2, ht2->next);
272 if (ht2->next == NULL) {
273 hti_start_copy(ht2); // initiate nested copy, if not already started
275 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
278 if (ht2_ent_is_empty) {
279 map_key_t old_ht2_ent_key = SYNC_CAS(&ht2_ent->key, DOES_NOT_EXIST, ht1_ent_key);
280 if (old_ht2_ent_key != DOES_NOT_EXIST) {
281 TRACE("h0", "hti_copy_entry: lost race to CAS key %p into new entry; found %p",
282 ht1_ent_key, old_ht2_ent_key);
283 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2); // recursive tail-call
285 SYNC_ADD(&ht2->key_count, 1);
288 // Copy the value to the entry in the new table.
289 ht1_ent_val = STRIP_TAG(ht1_ent_val, TAG1);
290 map_val_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
292 // If there is a nested copy in progress, we might have installed the key into a dead entry.
293 if (old_ht2_ent_val == COPIED_VALUE) {
294 TRACE("h0", "hti_copy_entry: nested copy in progress; copy %p to next table %p", ht2_ent, ht2->next);
295 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
298 // Mark the old entry as dead.
299 ht1_ent->val = COPIED_VALUE;
301 // Update the count if we were the one that completed the copy.
302 if (old_ht2_ent_val == DOES_NOT_EXIST) {
303 TRACE("h0", "hti_copy_entry: key %p value %p copied to new entry", key, ht1_ent_val);
304 (void)SYNC_ADD(&ht1->count, -1);
305 (void)SYNC_ADD(&ht2->count, 1);
309 TRACE("h0", "hti_copy_entry: lost race to install value %p in new entry; found value %p",
310 ht1_ent_val, old_ht2_ent_val);
311 return FALSE; // another thread completed the copy
314 // Compare <expected> with the existing value associated with <key>. If the values match then
315 // replace the existing value with <new>. If <new> is DOES_NOT_EXIST, delete the value associated with
316 // the key by replacing it with a TOMBSTONE.
318 // Return the previous value associated with <key>, or DOES_NOT_EXIST if <key> is not in the table
319 // or associated with a TOMBSTONE. If a copy is in progress and <key> has been copied to the next
320 // table then return COPIED_VALUE.
322 // NOTE: the returned value matches <expected> iff the set succeeds
324 // Certain values of <expected> have special meaning. If <expected> is CAS_EXPECT_EXISTS then any
325 // real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
326 // <expected> is CAS_EXPECT_WHATEVER then skip the test entirely.
328 static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_t expected, map_val_t new) {
329 TRACE("h1", "hti_cas: hti %p key %p", hti, key);
330 TRACE("h1", "hti_cas: value %p expect %p", new, expected);
332 assert(!IS_TAGGED(new, TAG1));
336 volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
338 // There is no room for <key>, grow the table and try again.
340 if (hti->next == NULL) {
346 // Install <key> in the table if it doesn't exist.
348 TRACE("h0", "hti_cas: entry %p is empty", ent, 0);
349 if (expected != CAS_EXPECT_WHATEVER && expected != CAS_EXPECT_DOES_NOT_EXIST)
350 return DOES_NOT_EXIST;
352 // No need to do anything, <key> is already deleted.
353 if (new == DOES_NOT_EXIST)
354 return DOES_NOT_EXIST;
356 // Allocate <new_key>.
357 map_key_t new_key = (hti->ht->key_type == NULL)
359 : (map_key_t)hti->ht->key_type->clone((void *)key);
361 if (EXPECT_FALSE(hti->ht->key_type != NULL)) {
362 // Combine <new_key> pointer with bits from its hash
363 new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key;
367 // CAS the key into the table.
368 map_key_t old_ent_key = SYNC_CAS(&ent->key, DOES_NOT_EXIST, new_key);
370 // Retry if another thread stole the entry out from under us.
371 if (old_ent_key != DOES_NOT_EXIST) {
372 TRACE("h0", "hti_cas: lost race to install key %p in entry %p", new_key, ent);
373 TRACE("h0", "hti_cas: found %p instead of NULL",
374 (hti->ht->key_type == NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0);
375 if (hti->ht->key_type != NULL) {
376 nbd_free(GET_PTR(new_key));
378 return hti_cas(hti, key, key_hash, expected, new); // tail-call
380 TRACE("h2", "hti_cas: installed key %p in entry %p", new_key, ent);
381 SYNC_ADD(&hti->key_count, 1);
384 TRACE("h0", "hti_cas: entry for key %p is %p",
385 (hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
387 // If the entry is in the middle of a copy, the copy must be completed first.
388 map_val_t ent_val = ent->val;
389 if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
390 if (ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1)) {
391 int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next);
393 (void)SYNC_ADD(&hti->num_entries_copied, 1);
395 TRACE("h0", "hti_cas: value in the middle of a copy, copy completed by %s",
396 (did_copy ? "self" : "other"), 0);
398 TRACE("h0", "hti_cas: value copied to next table, retry on next table", 0, 0);
402 // Fail if the old value is not consistent with the caller's expectation.
403 int old_existed = (ent_val != TOMBSTONE && ent_val != DOES_NOT_EXIST);
404 if (EXPECT_FALSE(expected != CAS_EXPECT_WHATEVER && expected != ent_val)) {
405 if (EXPECT_FALSE(expected != (old_existed ? CAS_EXPECT_EXISTS : CAS_EXPECT_DOES_NOT_EXIST))) {
406 TRACE("h1", "hti_cas: value %p expected by caller not found; found value %p",
412 // No need to update if value is unchanged.
413 if ((new == DOES_NOT_EXIST && !old_existed) || ent_val == new) {
414 TRACE("h1", "hti_cas: old value and new value were the same", 0, 0);
418 // CAS the value into the entry. Retry if it fails.
419 map_val_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new);
420 if (EXPECT_FALSE(v != ent_val)) {
421 TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v);
422 return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call
425 // The set succeeded. Adjust the value count.
426 if (old_existed && new == DOES_NOT_EXIST) {
427 (void)SYNC_ADD(&hti->count, -1);
428 } else if (!old_existed && new != DOES_NOT_EXIST) {
429 (void)SYNC_ADD(&hti->count, 1);
432 // Return the previous value.
433 TRACE("h0", "hti_cas: CAS succeeded; old value %p new value %p", ent_val, new);
438 static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) {
440 volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
442 // When hti_lookup() returns NULL it means we hit the reprobe limit while
443 // searching the table. In that case, if a copy is in progress the key
444 // might exist in the copy.
445 if (EXPECT_FALSE(ent == NULL)) {
446 if (VOLATILE_DEREF(hti).next != NULL)
447 return hti_get(hti->next, key, key_hash); // recursive tail-call
448 return DOES_NOT_EXIST;
452 return DOES_NOT_EXIST;
454 // If the entry is being copied, finish the copy and retry on the next table.
455 map_val_t ent_val = ent->val;
456 if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
457 if (EXPECT_FALSE(ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1))) {
458 int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next);
460 (void)SYNC_ADD(&hti->num_entries_copied, 1);
463 return hti_get(VOLATILE_DEREF(hti).next, key, key_hash); // tail-call
466 return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val;
470 map_val_t ht_get (hashtable_t *ht, map_key_t key) {
472 uint32_t hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
474 uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
476 return hti_get(ht->hti, key, hash);
479 // returns TRUE if copy is done
480 static int hti_help_copy (hti_t *hti) {
481 volatile entry_t *ent;
483 size_t total_copied = hti->num_entries_copied;
484 size_t num_copied = 0;
485 size_t x = hti->copy_scan;
487 TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<<hti->scale);
488 if (total_copied != (1ULL << hti->scale)) {
489 // Panic if we've been around the array twice and still haven't finished the copy.
490 int panic = (x >= (1ULL << (hti->scale + 1)));
492 limit = ENTRIES_PER_COPY_CHUNK;
494 // Reserve some entries for this thread to copy. There is a race condition here because the
495 // fetch and add isn't atomic, but that is ok.
496 hti->copy_scan = x + ENTRIES_PER_COPY_CHUNK;
498 // <copy_scan> might be larger than the size of the table, if some thread stalls while
499 // copying. In that case we just wrap around to the begining and make another pass through
501 ent = hti->table + (x & MASK(hti->scale));
503 TRACE("h1", "ht_cas: help copy panic", 0, 0);
504 // scan the whole table
506 limit = (1ULL << hti->scale);
510 for (int i = 0; i < limit; ++i) {
511 num_copied += hti_copy_entry(hti, ent++, 0, hti->next);
512 assert(ent <= hti->table + (1ULL << hti->scale));
514 if (num_copied != 0) {
515 total_copied = SYNC_ADD(&hti->num_entries_copied, num_copied);
519 return (total_copied == (1ULL << hti->scale));
522 static void hti_defer_free (hti_t *hti) {
523 assert(hti->ref_count == 0);
525 for (uint32_t i = 0; i < (1ULL << hti->scale); ++i) {
526 map_key_t key = hti->table[i].key;
527 map_val_t val = hti->table[i].val;
528 if (val == COPIED_VALUE)
530 assert(!IS_TAGGED(val, TAG1) || val == TAG_VALUE(TOMBSTONE, TAG1)); // copy not in progress
531 if (hti->ht->key_type != NULL && key != DOES_NOT_EXIST) {
532 rcu_defer_free(GET_PTR(key));
535 #ifdef USE_SYSTEM_MALLOC
536 rcu_defer_free(hti->unaligned_table_ptr);
538 rcu_defer_free((void *)hti->table);
543 static void hti_release (hti_t *hti) {
544 assert(hti->ref_count > 0);
545 int ref_count = SYNC_ADD(&hti->ref_count, -1);
546 if (ref_count == 0) {
552 map_val_t ht_cas (hashtable_t *ht, map_key_t key, map_val_t expected_val, map_val_t new_val) {
554 TRACE("h2", "ht_cas: key %p ht %p", key, ht);
555 TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val);
556 assert(key != DOES_NOT_EXIST);
557 assert(!IS_TAGGED(new_val, TAG1) && new_val != DOES_NOT_EXIST && new_val != TOMBSTONE);
559 hti_t *hti = ht->hti;
561 // Help with an ongoing copy.
562 if (EXPECT_FALSE(hti->next != NULL)) {
563 int done = hti_help_copy(hti);
565 // Unlink fully copied tables.
568 if (SYNC_CAS(&ht->hti, hti, hti->next) == hti) {
576 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
578 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
580 while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
585 return old_val == TOMBSTONE ? DOES_NOT_EXIST : old_val;
588 // Remove the value in <ht> associated with <key>. Returns the value removed, or DOES_NOT_EXIST if there was
589 // no value for that key.
590 map_val_t ht_remove (hashtable_t *ht, map_key_t key) {
591 hti_t *hti = ht->hti;
594 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
596 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
599 val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST);
600 if (val != COPIED_VALUE)
601 return val == TOMBSTONE ? DOES_NOT_EXIST : val;
608 // Returns the number of key-values pairs in <ht>
609 size_t ht_count (hashtable_t *ht) {
610 hti_t *hti = ht->hti;
619 // Allocate and initialize a new hash table.
620 hashtable_t *ht_alloc (const datatype_t *key_type) {
621 hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
622 ht->key_type = key_type;
623 ht->hti = (hti_t *)hti_alloc(ht, MIN_SCALE);
629 // Free <ht> and its internal structures.
630 void ht_free (hashtable_t *ht) {
631 hti_t *hti = ht->hti;
633 hti_t *next = hti->next;
634 assert(hti->ref_count == 1);
641 void ht_print (hashtable_t *ht, int verbose) {
642 printf("probe:%-2d density:%.1f%% count:%-8lld ", ht->probe, ht->density, (uint64_t)ht_count(ht));
643 hti_t *hti = ht->hti;
646 for (int i = 0; i < (1ULL << hti->scale); ++i) {
647 volatile entry_t *ent = hti->table + i;
648 printf("[0x%x] 0x%llx:0x%llx\n", i, (uint64_t)ent->key, (uint64_t)ent->val);
655 int scale = hti->scale;
656 printf("hti count:%lld scale:%d key density:%.1f%% value density:%.1f%% probe:%d\n",
657 (uint64_t)hti->count, scale, (double)hti->key_count / (1ULL << scale) * 100,
658 (double)hti->count / (1ULL << scale) * 100, hti->probe);
663 ht_iter_t *ht_iter_begin (hashtable_t *ht, map_key_t key) {
668 while (hti->next != NULL) {
669 do { } while (hti_help_copy(hti) != TRUE);
673 ref_count = hti->ref_count;
676 } while (ref_count != SYNC_CAS(&hti->ref_count, ref_count, ref_count + 1));
677 } while (ref_count == 0);
679 ht_iter_t *iter = nbd_malloc(sizeof(ht_iter_t));
686 map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) {
687 volatile entry_t *ent;
690 size_t table_size = (1ULL << iter->hti->scale);
693 if (iter->idx == table_size) {
694 return DOES_NOT_EXIST;
696 ent = &iter->hti->table[iter->idx];
697 key = (iter->hti->ht->key_type == NULL) ? (map_key_t)ent->key : (map_key_t)GET_PTR(ent->key);
700 } while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE);
702 if (val == COPIED_VALUE) {
703 const datatype_t *key_type = iter->hti->ht->key_type;
705 uint32_t hash = (key_type == NULL) ? murmur32_4b((uint64_t)key) : key_type->hash((void *)key);
707 uint32_t hash = (key_type == NULL) ? murmur32_8b((uint64_t)key) : key_type->hash((void *)key);
709 val = hti_get(iter->hti->next, (map_key_t)ent->key, hash);
711 // Go to the next entry if key is already deleted.
712 if (val == DOES_NOT_EXIST)
713 return ht_iter_next(iter, key_ptr); // recursive tail-call
722 void ht_iter_free (ht_iter_t *iter) {
723 hti_release(iter->hti);