2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * C implementation of Cliff Click's lock-free hash table from
6 * http://www.azulsystems.com/events/javaone_2008/2008_CodingNonBlock.pdf
7 * http://sourceforge.net/projects/high-scale-lib
9 * Note: This is code uses synchronous atomic operations because that is all that x86 provides.
10 * Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
11 * the code a bit, but it won't be as fast as it could be on platforms that provide weaker
12 * operations like and unfenced CAS which would still do the job.
19 #include "hashtable.h"
21 #define GET_PTR(x) ((void *)(size_t)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
23 typedef struct entry {
29 volatile entry_t *table;
30 hashtable_t *ht; // parent ht;
35 int count; // TODO: make these counters distributed
36 int num_entries_copied;
47 const datatype_t *key_type;
50 static const map_val_t COPIED_VALUE = -1;
51 static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1);
53 static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t);
54 static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2;
55 static const unsigned MIN_SCALE = 4; // min 16 entries (4 buckets)
56 static const unsigned MAX_BUCKETS_TO_PROBE = 250;
58 static int hti_copy_entry (hti_t *ht1, volatile entry_t *ent, uint32_t ent_key_hash, hti_t *ht2);
60 // Choose the next bucket to probe using the high-order bits of <key_hash>.
61 static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) {
62 int incr = (key_hash >> (32 - ht_scale));
63 incr += !incr; // If the increment is 0, make it 1.
64 return (old_ndx + incr) & MASK(ht_scale);
67 // Lookup <key> in <hti>.
69 // Return the entry that <key> is in, or if <key> isn't in <hti> return the entry that it would be
70 // in if it were inserted into <hti>. If there is no room for <key> in <hti> then return NULL, to
71 // indicate that the caller should look in <hti->next>.
73 // Record if the entry being returned is empty. Otherwise the caller will have to waste time
74 // re-comparing the keys to confirm that it did not lose a race to fill an empty entry.
75 static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_hash, int *is_empty) {
76 TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti);
79 // Probe one cache line at a time
80 int ndx = key_hash & MASK(hti->scale); // the first entry to search
81 for (int i = 0; i < hti->max_probe; ++i) {
83 // The start of the bucket is the first entry in the cache line.
84 volatile entry_t *bucket = hti->table + (ndx & ~(ENTRIES_PER_BUCKET-1));
86 // Start searching at the indexed entry. Then loop around to the begining of the cache line.
87 for (int j = 0; j < ENTRIES_PER_BUCKET; ++j) {
88 volatile entry_t *ent = bucket + ((ndx + j) & (ENTRIES_PER_BUCKET-1));
90 uint64_t ent_key = ent->key;
91 if (ent_key == DOES_NOT_EXIST) {
92 TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent,
93 (hti->ht->key_type == NULL) ? (void *)ent_key : GET_PTR(ent_key));
94 *is_empty = 1; // indicate an empty so the caller avoids an expensive key compare
98 // Compare <key> with the key in the entry.
99 if (EXPECT_TRUE(hti->ht->key_type == NULL)) {
100 // fast path for integer keys
101 if (ent_key == (uint64_t)key) {
102 TRACE("h1", "hti_lookup: found entry %p with key %p", ent, ent_key);
106 // The key in <ent> is made up of two parts. The 48 low-order bits are a pointer. The
107 // high-order 16 bits are taken from the hash. The bits from the hash are used as a
108 // quick check to rule out non-equal keys without doing a complete compare.
109 if ((key_hash >> 16) == (ent_key >> 48)) {
110 if (hti->ht->key_type->cmp(GET_PTR(ent_key), (void *)(size_t)key) == 0) {
111 TRACE("h1", "hti_lookup: found entry %p with key %p", ent, GET_PTR(ent_key));
118 ndx = get_next_ndx(ndx, key_hash, hti->scale);
121 // maximum number of probes exceeded
122 TRACE("h1", "hti_lookup: maximum number of probes exceeded returning 0x0", 0, 0);
126 // Allocate and initialize a hti_t with 2^<scale> entries.
127 static hti_t *hti_alloc (hashtable_t *parent, int scale) {
128 hti_t *hti = (hti_t *)nbd_malloc(sizeof(hti_t));
129 memset(hti, 0, sizeof(hti_t));
131 size_t sz = sizeof(entry_t) * (1 << scale);
132 entry_t *table = nbd_malloc(sz);
133 memset(table, 0, sz);
138 // When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets.
139 hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4;
140 if (hti->max_probe > MAX_BUCKETS_TO_PROBE) {
141 hti->max_probe = MAX_BUCKETS_TO_PROBE;
146 assert(hti->scale >= MIN_SCALE && hti->scale < 63); // size must be a power of 2
147 assert(sizeof(entry_t) * ENTRIES_PER_BUCKET % CACHE_LINE_SIZE == 0); // divisible into cache
148 assert((size_t)hti->table % CACHE_LINE_SIZE == 0); // cache aligned
153 // Called when <hti> runs out of room for new keys.
155 // Initiates a copy by creating a larger hti_t and installing it in <hti->next>.
156 static void hti_start_copy (hti_t *hti) {
157 TRACE("h0", "hti_start_copy(hti %p scale %llu)", hti, hti->scale);
159 // heuristics to determine the size of the new table
160 uint64_t count = ht_count(hti->ht);
161 unsigned int new_scale = hti->scale;
162 new_scale += (count > (1 << (new_scale - 2))); // double size if more than 1/4 full
163 new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full
165 // Allocate the new table and attempt to install it.
166 hti_t *next = hti_alloc(hti->ht, new_scale);
167 hti_t *old_next = SYNC_CAS(&hti->next, NULL, next);
168 if (old_next != NULL) {
169 // Another thread beat us to it.
170 TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
174 TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale);
177 // Copy the key and value stored in <ht1_ent> (which must be an entry in <ht1>) to <ht2>.
179 // Return 1 unless <ht1_ent> is already copied (then return 0), so the caller can account for the total
180 // number of entries left to copy.
181 static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_hash, hti_t *ht2) {
182 TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_ent, ht2);
186 assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale));
187 assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
189 map_val_t ht1_ent_val = ht1_ent->val;
190 if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE)) {
191 TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
192 return FALSE; // already copied
195 // Kill empty entries.
196 if (EXPECT_FALSE(ht1_ent_val == DOES_NOT_EXIST)) {
197 map_val_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE);
198 if (ht1_ent_val == DOES_NOT_EXIST) {
199 TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_ent, 0);
202 if (ht1_ent_val == COPIED_VALUE) {
203 TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is already killed", ht1_ent, 0);
204 return FALSE; // another thread beat us to it
206 TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is not empty", ht1_ent, 0);
209 // Tag the value in the old entry to indicate a copy is in progress.
210 ht1_ent_val = SYNC_FETCH_AND_OR(&ht1_ent->val, TAG_VALUE(0, TAG1));
211 TRACE("h2", "hti_copy_entry: tagged the value %p in old entry %p", ht1_ent_val, ht1_ent);
212 if (ht1_ent_val == COPIED_VALUE) {
213 TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
214 return FALSE; // <value> was already copied by another thread.
217 // Install the key in the new table.
218 uint64_t ht1_ent_key = ht1_ent->key;
219 map_key_t key = (ht1->ht->key_type == NULL) ? (map_key_t)ht1_ent_key : (map_key_t)(size_t)GET_PTR(ht1_ent_key);
221 // The old table's dead entries don't need to be copied to the new table, but their keys need to be freed.
222 assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE, TAG1));
223 if (ht1_ent_val == TOMBSTONE) {
224 TRACE("h1", "hti_copy_entry: entry %p old value was deleted, now freeing key %p", ht1_ent, key);
225 if (EXPECT_FALSE(ht1->ht->key_type != NULL)) {
226 nbd_defer_free((void *)(size_t)key);
231 // We use 0 to indicate that <key_hash> is uninitiallized. Occasionally the key's hash will really be 0 and we
232 // waste time recomputing it every time. It is rare enough (1 in 65k) that it won't hurt performance.
234 key_hash = (ht1->ht->key_type == NULL)
235 ? murmur32_8b(ht1_ent_key)
236 : ht1->ht->key_type->hash((void *)(size_t)key);
239 int ht2_ent_is_empty;
240 volatile entry_t *ht2_ent = hti_lookup(ht2, key, key_hash, &ht2_ent_is_empty);
241 TRACE("h0", "hti_copy_entry: copy entry %p to entry %p", ht1_ent, ht2_ent);
243 // It is possible that there isn't any room in the new table either.
244 if (EXPECT_FALSE(ht2_ent == NULL)) {
245 TRACE("h0", "hti_copy_entry: no room in table %p copy to next table %p", ht2, ht2->next);
246 if (ht2->next == NULL) {
247 hti_start_copy(ht2); // initiate nested copy, if not already started
249 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
252 if (ht2_ent_is_empty) {
253 uint64_t old_ht2_ent_key = SYNC_CAS(&ht2_ent->key, DOES_NOT_EXIST, ht1_ent_key);
254 if (old_ht2_ent_key != DOES_NOT_EXIST) {
255 TRACE("h0", "hti_copy_entry: lost race to CAS key %p into new entry; found %p",
256 ht1_ent_key, old_ht2_ent_key);
257 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2); // recursive tail-call
261 // Copy the value to the entry in the new table.
262 ht1_ent_val = STRIP_TAG(ht1_ent_val, TAG1);
263 map_val_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
265 // If there is a nested copy in progress, we might have installed the key into a dead entry.
266 if (old_ht2_ent_val == COPIED_VALUE) {
267 TRACE("h0", "hti_copy_entry: nested copy in progress; copy %p to next table %p", ht2_ent, ht2->next);
268 return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
271 // Mark the old entry as dead.
272 ht1_ent->val = COPIED_VALUE;
274 // Update the count if we were the one that completed the copy.
275 if (old_ht2_ent_val == DOES_NOT_EXIST) {
276 TRACE("h0", "hti_copy_entry: key %p value %p copied to new entry", key, ht1_ent_val);
277 SYNC_ADD(&ht1->count, -1);
278 SYNC_ADD(&ht2->count, 1);
282 TRACE("h0", "hti_copy_entry: lost race to install value %p in new entry; found value %p",
283 ht1_ent_val, old_ht2_ent_val);
284 return FALSE; // another thread completed the copy
287 // Compare <expected> with the existing value associated with <key>. If the values match then
288 // replace the existing value with <new>. If <new> is DOES_NOT_EXIST, delete the value associated with
289 // the key by replacing it with a TOMBSTONE.
291 // Return the previous value associated with <key>, or DOES_NOT_EXIST if <key> is not in the table
292 // or associated with a TOMBSTONE. If a copy is in progress and <key> has been copied to the next
293 // table then return COPIED_VALUE.
295 // NOTE: the returned value matches <expected> iff the set succeeds
297 // Certain values of <expected> have special meaning. If <expected> is CAS_EXPECT_EXISTS then any
298 // real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
299 // <expected> is CAS_EXPECT_WHATEVER then skip the test entirely.
301 static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_t expected, map_val_t new) {
302 TRACE("h1", "hti_cas: hti %p key %p", hti, key);
303 TRACE("h1", "hti_cas: value %p expect %p", new, expected);
305 assert(!IS_TAGGED(new, TAG1));
309 volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
311 // There is no room for <key>, grow the table and try again.
313 if (hti->next == NULL) {
319 // Install <key> in the table if it doesn't exist.
321 TRACE("h0", "hti_cas: entry %p is empty", ent, 0);
322 if (expected != CAS_EXPECT_WHATEVER && expected != CAS_EXPECT_DOES_NOT_EXIST)
323 return DOES_NOT_EXIST;
325 // No need to do anything, <key> is already deleted.
326 if (new == DOES_NOT_EXIST)
327 return DOES_NOT_EXIST;
329 // Allocate <new_key>.
330 uint64_t new_key = (uint64_t)((hti->ht->key_type == NULL) ? key : (map_key_t)(size_t)hti->ht->key_type->clone((void *)(size_t)key));
331 if (EXPECT_FALSE(hti->ht->key_type != NULL)) {
332 // Combine <new_key> pointer with bits from its hash
333 new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key;
336 // CAS the key into the table.
337 uint64_t old_ent_key = SYNC_CAS(&ent->key, DOES_NOT_EXIST, new_key);
339 // Retry if another thread stole the entry out from under us.
340 if (old_ent_key != DOES_NOT_EXIST) {
341 TRACE("h0", "hti_cas: lost race to install key %p in entry %p", new_key, ent);
342 TRACE("h0", "hti_cas: found %p instead of NULL",
343 (hti->ht->key_type == NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0);
344 if (hti->ht->key_type != NULL) {
345 nbd_free(GET_PTR(new_key));
347 return hti_cas(hti, key, key_hash, expected, new); // tail-call
349 TRACE("h2", "hti_cas: installed key %p in entry %p", new_key, ent);
352 TRACE("h0", "hti_cas: entry for key %p is %p",
353 (hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
355 // If the entry is in the middle of a copy, the copy must be completed first.
356 map_val_t ent_val = ent->val;
357 if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
358 if (ent_val != COPIED_VALUE) {
359 int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
361 SYNC_ADD(&hti->num_entries_copied, 1);
363 TRACE("h0", "hti_cas: value in the middle of a copy, copy completed by %s",
364 (did_copy ? "self" : "other"), 0);
366 TRACE("h0", "hti_cas: value copied to next table, retry on next table", 0, 0);
370 // Fail if the old value is not consistent with the caller's expectation.
371 int old_existed = (ent_val != TOMBSTONE && ent_val != DOES_NOT_EXIST);
372 if (EXPECT_FALSE(expected != CAS_EXPECT_WHATEVER && expected != ent_val)) {
373 if (EXPECT_FALSE(expected != (old_existed ? CAS_EXPECT_EXISTS : CAS_EXPECT_DOES_NOT_EXIST))) {
374 TRACE("h1", "hti_cas: value %p expected by caller not found; found value %p",
380 // No need to update if value is unchanged.
381 if ((new == DOES_NOT_EXIST && !old_existed) || ent_val == new) {
382 TRACE("h1", "hti_cas: old value and new value were the same", 0, 0);
386 // CAS the value into the entry. Retry if it fails.
387 map_val_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new);
388 if (EXPECT_FALSE(v != ent_val)) {
389 TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v);
390 return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call
393 // The set succeeded. Adjust the value count.
394 if (old_existed && new == DOES_NOT_EXIST) {
395 SYNC_ADD(&hti->count, -1);
396 } else if (!old_existed && new != DOES_NOT_EXIST) {
397 SYNC_ADD(&hti->count, 1);
400 // Return the previous value.
401 TRACE("h0", "hti_cas: CAS succeeded; old value %p new value %p", ent_val, new);
406 static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) {
408 volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
410 // When hti_lookup() returns NULL it means we hit the reprobe limit while
411 // searching the table. In that case, if a copy is in progress the key
412 // might exist in the copy.
413 if (EXPECT_FALSE(ent == NULL)) {
414 if (((volatile hti_t *)hti)->next != NULL)
415 return hti_get(hti->next, key, key_hash); // recursive tail-call
416 return DOES_NOT_EXIST;
420 return DOES_NOT_EXIST;
422 // If the entry is being copied, finish the copy and retry on the next table.
423 map_val_t ent_val = ent->val;
424 if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
425 if (EXPECT_FALSE(ent_val != COPIED_VALUE)) {
426 int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
428 SYNC_ADD(&hti->num_entries_copied, 1);
431 return hti_get(((volatile hti_t *)hti)->next, key, key_hash); // tail-call
434 return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val;
438 map_val_t ht_get (hashtable_t *ht, map_key_t key) {
439 uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)(size_t)key);
440 return hti_get(ht->hti, key, hash);
443 // returns TRUE if copy is done
444 int hti_help_copy (hti_t *hti) {
445 volatile entry_t *ent;
447 uint64_t total_copied = hti->num_entries_copied;
448 uint64_t num_copied = 0;
449 uint64_t x = hti->copy_scan;
451 TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<<hti->scale);
452 if (total_copied != (1 << hti->scale)) {
453 // Panic if we've been around the array twice and still haven't finished the copy.
454 int panic = (x >= (1 << (hti->scale + 1)));
456 limit = ENTRIES_PER_COPY_CHUNK;
458 // Reserve some entries for this thread to copy. There is a race condition here because the
459 // fetch and add isn't atomic, but that is ok.
460 hti->copy_scan = x + ENTRIES_PER_COPY_CHUNK;
462 // <copy_scan> might be larger than the size of the table, if some thread stalls while
463 // copying. In that case we just wrap around to the begining and make another pass through
465 ent = hti->table + (x & MASK(hti->scale));
467 TRACE("h1", "ht_cas: help copy panic", 0, 0);
468 // scan the whole table
470 limit = (1 << hti->scale);
474 for (int i = 0; i < limit; ++i) {
475 num_copied += hti_copy_entry(hti, ent++, 0, hti->next);
476 assert(ent <= hti->table + (1 << hti->scale));
478 if (num_copied != 0) {
479 total_copied = SYNC_ADD(&hti->num_entries_copied, num_copied);
483 return (total_copied == (1 << hti->scale));
487 map_val_t ht_cas (hashtable_t *ht, map_key_t key, map_val_t expected_val, map_val_t new_val) {
489 TRACE("h2", "ht_cas: key %p ht %p", key, ht);
490 TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val);
491 assert(key != DOES_NOT_EXIST);
492 assert(!IS_TAGGED(new_val, TAG1) && new_val != DOES_NOT_EXIST && new_val != TOMBSTONE);
494 hti_t *hti = ht->hti;
496 // Help with an ongoing copy.
497 if (EXPECT_FALSE(hti->next != NULL)) {
498 int done = hti_help_copy(hti);
500 // Dispose of fully copied tables.
501 if (done && hti->references == 0) {
503 int r = SYNC_CAS(&hti->references, 0, -1);
506 if (SYNC_CAS(&ht->hti, hti, hti->next) == hti) {
507 nbd_defer_free((void *)hti->table);
515 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)(size_t)key);
516 while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
521 return old_val == TOMBSTONE ? DOES_NOT_EXIST : old_val;
524 // Remove the value in <ht> associated with <key>. Returns the value removed, or DOES_NOT_EXIST if there was
525 // no value for that key.
526 map_val_t ht_remove (hashtable_t *ht, map_key_t key) {
527 hti_t *hti = ht->hti;
529 uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)(size_t)key);
531 val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST);
532 if (val != COPIED_VALUE)
533 return val == TOMBSTONE ? DOES_NOT_EXIST : val;
540 // Returns the number of key-values pairs in <ht>
541 uint64_t ht_count (hashtable_t *ht) {
542 hti_t *hti = ht->hti;
551 // Allocate and initialize a new hash table.
552 hashtable_t *ht_alloc (const datatype_t *key_type) {
553 hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
554 ht->key_type = key_type;
555 ht->hti = (hti_t *)hti_alloc(ht, MIN_SCALE);
559 // Free <ht> and its internal structures.
560 void ht_free (hashtable_t *ht) {
561 hti_t *hti = ht->hti;
563 for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
564 assert(hti->table[i].val == COPIED_VALUE || !IS_TAGGED(hti->table[i].val, TAG1));
565 if (ht->key_type != NULL && hti->table[i].key != DOES_NOT_EXIST) {
566 nbd_free(GET_PTR(hti->table[i].key));
569 hti_t *next = hti->next;
570 nbd_free((void *)hti->table);
577 void ht_print (hashtable_t *ht) {
578 hti_t *hti = ht->hti;
580 printf("hti:%p scale:%u count:%d copied:%d\n", hti, hti->scale, hti->count, hti->num_entries_copied);
581 for (int i = 0; i < (1 << hti->scale); ++i) {
582 volatile entry_t *ent = hti->table + i;
583 printf("[0x%x] 0x%llx:0x%llx\n", i, (uint64_t)ent->key, ent->val);
593 ht_iter_t *ht_iter_begin (hashtable_t *ht, map_key_t key) {
594 hti_t *hti = ht->hti;
597 while (((volatile hti_t *)hti)->next != NULL) {
598 do { } while (hti_help_copy(hti) != TRUE);
602 int old = hti->references;
606 old = SYNC_CAS(&hti->references, rcount, rcount + 1);
608 } while (rcount != old);
609 } while (rcount == -1);
611 ht_iter_t *iter = nbd_malloc(sizeof(ht_iter_t));
618 map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) {
619 volatile entry_t *ent;
622 uint64_t table_size = (1 << iter->hti->scale);
625 if (iter->idx == table_size) {
626 return DOES_NOT_EXIST;
628 ent = &iter->hti->table[iter->idx];
629 key = (iter->hti->ht->key_type == NULL) ? (map_key_t)ent->key : (map_key_t)(size_t)GET_PTR(ent->key);
632 } while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE);
637 if (val == COPIED_VALUE) {
638 uint32_t hash = (iter->hti->ht->key_type == NULL)
639 ? murmur32_8b((uint64_t)key)
640 : iter->hti->ht->key_type->hash((void *)(size_t)key);
641 val = hti_get(iter->hti->next, (map_key_t)ent->key, hash);
647 void ht_iter_free (ht_iter_t *iter) {
648 SYNC_ADD(&iter->hti->references, -1);