X-Git-Url: https://pd.if.org/git/?p=nbds;a=blobdiff_plain;f=map%2Fhashtable.c;h=410d97b3f51f23b852656ca5738ecec5b5bbd7f5;hp=88bf6311efcea09aeca0bae1cff82fa62595d7a0;hb=86fd9c8abfbacea2902b4fe42a8a4664b2a531cf;hpb=a1d0b3ca99552878b1becf561d8f3291992aaa67 diff --git a/map/hashtable.c b/map/hashtable.c index 88bf631..410d97b 100644 --- a/map/hashtable.c +++ b/map/hashtable.c @@ -1,15 +1,17 @@ -/* +/* * Written by Josh Dybnis and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain - * - * C implementation of Cliff Click's lock-free hash table from + * + * C implementation of Cliff Click's lock-free hash table from * http://www.azulsystems.com/events/javaone_2008/2008_CodingNonBlock.pdf * http://sourceforge.net/projects/high-scale-lib * - * Note: This is code uses synchronous atomic operations because that is all that x86 provides. + * Note: This is code uses synchronous atomic operations because that is all that x86 provides. * Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies - * the code a bit, but it won't be as fast as it could be on platforms that provide weaker - * operations like and unfenced CAS which would still do the job. + * the code a bit, but it won't be as fast as it could be on platforms that provide weaker + * operations like unfenced CAS which would still do the job. + * + * 11FebO9 - Bug fix in ht_iter_next() from Rui Ueyama */ #include @@ -34,12 +36,16 @@ typedef struct hti { volatile entry_t *table; hashtable_t *ht; // parent ht; struct hti *next; - unsigned scale; - int max_probe; +#ifdef USE_SYSTEM_MALLOC + void *unaligned_table_ptr; // system malloc doesn't guarentee cache-line alignment +#endif + size_t count; // TODO: make these counters distributed + size_t key_count; + size_t copy_scan; + size_t num_entries_copied; + int probe; int ref_count; - int count; // TODO: make these counters distributed - int num_entries_copied; - int copy_scan; + uint8_t scale; } hti_t; struct ht_iter { @@ -50,32 +56,38 @@ struct ht_iter { struct ht { hti_t *hti; const datatype_t *key_type; + uint32_t hti_copies; + double density; + int probe; }; -static const map_val_t COPIED_VALUE = TAG_VALUE(DOES_NOT_EXIST, TAG1); -static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1); +static const map_val_t COPIED_VALUE = TAG_VALUE(DOES_NOT_EXIST, TAG1); +static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1); static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t); static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2; static const unsigned MIN_SCALE = 4; // min 16 entries (4 buckets) -static const unsigned MAX_BUCKETS_TO_PROBE = 250; static int hti_copy_entry (hti_t *ht1, volatile entry_t *ent, uint32_t ent_key_hash, hti_t *ht2); // Choose the next bucket to probe using the high-order bits of . static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) { +#if 1 int incr = (key_hash >> (32 - ht_scale)); - incr += !incr; // If the increment is 0, make it 1. + if (incr < ENTRIES_PER_BUCKET) { incr += ENTRIES_PER_BUCKET; } return (old_ndx + incr) & MASK(ht_scale); +#else + return (old_ndx + ENTRIES_PER_BUCKET) & MASK(ht_scale); +#endif } -// Lookup in . +// Lookup in . // -// Return the entry that is in, or if isn't in return the entry that it would be -// in if it were inserted into . If there is no room for in then return NULL, to +// Return the entry that is in, or if isn't in return the entry that it would be +// in if it were inserted into . If there is no room for in then return NULL, to // indicate that the caller should look in next>. // -// Record if the entry being returned is empty. Otherwise the caller will have to waste time +// Record if the entry being returned is empty. Otherwise the caller will have to waste time // re-comparing the keys to confirm that it did not lose a race to fill an empty entry. static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_hash, int *is_empty) { TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti); @@ -83,10 +95,10 @@ static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_has // Probe one cache line at a time int ndx = key_hash & MASK(hti->scale); // the first entry to search - for (int i = 0; i < hti->max_probe; ++i) { + for (int i = 0; i < hti->probe; ++i) { // The start of the bucket is the first entry in the cache line. - volatile entry_t *bucket = hti->table + (ndx & ~(ENTRIES_PER_BUCKET-1)); + volatile entry_t *bucket = hti->table + (ndx & ~(ENTRIES_PER_BUCKET-1)); // Start searching at the indexed entry. Then loop around to the begining of the cache line. for (int j = 0; j < ENTRIES_PER_BUCKET; ++j) { @@ -94,13 +106,13 @@ static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_has map_key_t ent_key = ent->key; if (ent_key == DOES_NOT_EXIST) { - TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent, + TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent, (hti->ht->key_type == NULL) ? (void *)key : GET_PTR(key)); *is_empty = 1; // indicate an empty so the caller avoids an expensive key compare return ent; } - // Compare with the key in the entry. + // Compare with the key in the entry. if (EXPECT_TRUE(hti->ht->key_type == NULL)) { // fast path for integer keys if (ent_key == key) { @@ -136,20 +148,24 @@ static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_has static hti_t *hti_alloc (hashtable_t *parent, int scale) { hti_t *hti = (hti_t *)nbd_malloc(sizeof(hti_t)); memset(hti, 0, sizeof(hti_t)); - - size_t sz = sizeof(entry_t) * (1 << scale); - entry_t *table = nbd_malloc(sz); - memset(table, 0, sz); - hti->table = table; - hti->scale = scale; - // When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets. - hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4; - if (hti->max_probe > MAX_BUCKETS_TO_PROBE) { - hti->max_probe = MAX_BUCKETS_TO_PROBE; - } + size_t sz = sizeof(entry_t) * (1ULL << scale); +#ifdef USE_SYSTEM_MALLOC + hti->unaligned_table_ptr = nbd_malloc(sz + CACHE_LINE_SIZE - 1); + hti->table = (void *)(((size_t)hti->unaligned_table_ptr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)); +#else + hti->table = nbd_malloc(sz); +#endif + memset((void *)hti->table, 0, sz); + hti->probe = (int)(hti->scale * 1.5) + 2; + int quarter = (1ULL << (hti->scale - 2)) / ENTRIES_PER_BUCKET; + if (hti->probe > quarter && quarter > 4) { + // When searching for a key probe a maximum of 1/4 + hti->probe = quarter; + } + ASSERT(hti->probe); hti->ht = parent; hti->ref_count = 1; // one for the parent @@ -169,8 +185,7 @@ static void hti_start_copy (hti_t *hti) { // heuristics to determine the size of the new table size_t count = ht_count(hti->ht); unsigned int new_scale = hti->scale; - new_scale += (count > (1 << (new_scale - 2))); // double size if more than 1/4 full - new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full + new_scale += (count > (1ULL << (hti->scale - 1))) || (hti->key_count > (1ULL << (hti->scale - 2)) + (1ULL << (hti->scale - 3))); // double size if more than 1/2 full // Allocate the new table and attempt to install it. hti_t *next = hti_alloc(hti->ht, new_scale); @@ -178,13 +193,20 @@ static void hti_start_copy (hti_t *hti) { if (old_next != NULL) { // Another thread beat us to it. TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0); - nbd_free(next); +#ifdef USE_SYSTEM_MALLOC + nbd_free(next->unaligned_table_ptr); +#else + nbd_free((void *)next->table); +#endif return; } TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale); + SYNC_ADD(&hti->ht->hti_copies, 1); + hti->ht->density = (double)hti->key_count / (1ULL << hti->scale) * 100; + hti->ht->probe = hti->probe; } -// Copy the key and value stored in (which must be an entry in ) to . +// Copy the key and value stored in (which must be an entry in ) to . // // Return 1 unless is already copied (then return 0), so the caller can account for the total // number of entries left to copy. @@ -193,7 +215,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h assert(ht1); assert(ht1->next); assert(ht2); - assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale)); + assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1ULL << ht1->scale)); #ifndef NBD32 assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48)); #endif @@ -224,15 +246,15 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // The old table's dead entries don't need to be copied to the new table if (ht1_ent_val == TOMBSTONE) - return TRUE; + return TRUE; // Install the key in the new table. map_key_t ht1_ent_key = ht1_ent->key; map_key_t key = (ht1->ht->key_type == NULL) ? (map_key_t)ht1_ent_key : (map_key_t)GET_PTR(ht1_ent_key); // We use 0 to indicate that is uninitiallized. Occasionally the key's hash will really be 0 and we - // waste time recomputing it every time. It is rare enough that it won't hurt performance. - if (key_hash == 0) { + // waste time recomputing it every time. It is rare enough that it won't hurt performance. + if (key_hash == 0) { #ifdef NBD32 key_hash = (ht1->ht->key_type == NULL) ? murmur32_4b(ht1_ent_key) : ht1->ht->key_type->hash((void *)key); #else @@ -260,6 +282,7 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h ht1_ent_key, old_ht2_ent_key); return hti_copy_entry(ht1, ht1_ent, key_hash, ht2); // recursive tail-call } + SYNC_ADD(&ht2->key_count, 1); } // Copy the value to the entry in the new table. @@ -278,27 +301,27 @@ static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_h // Update the count if we were the one that completed the copy. if (old_ht2_ent_val == DOES_NOT_EXIST) { TRACE("h0", "hti_copy_entry: key %p value %p copied to new entry", key, ht1_ent_val); - SYNC_ADD(&ht1->count, -1); - SYNC_ADD(&ht2->count, 1); + (void)SYNC_ADD(&ht1->count, -1); + (void)SYNC_ADD(&ht2->count, 1); return TRUE; } - TRACE("h0", "hti_copy_entry: lost race to install value %p in new entry; found value %p", + TRACE("h0", "hti_copy_entry: lost race to install value %p in new entry; found value %p", ht1_ent_val, old_ht2_ent_val); return FALSE; // another thread completed the copy } -// Compare with the existing value associated with . If the values match then -// replace the existing value with . If is DOES_NOT_EXIST, delete the value associated with +// Compare with the existing value associated with . If the values match then +// replace the existing value with . If is DOES_NOT_EXIST, delete the value associated with // the key by replacing it with a TOMBSTONE. // // Return the previous value associated with , or DOES_NOT_EXIST if is not in the table -// or associated with a TOMBSTONE. If a copy is in progress and has been copied to the next -// table then return COPIED_VALUE. +// or associated with a TOMBSTONE. If a copy is in progress and has been copied to the next +// table then return COPIED_VALUE. // // NOTE: the returned value matches iff the set succeeds // -// Certain values of have special meaning. If is CAS_EXPECT_EXISTS then any +// Certain values of have special meaning. If is CAS_EXPECT_EXISTS then any // real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as is in the table. If // is CAS_EXPECT_WHATEVER then skip the test entirely. // @@ -331,13 +354,13 @@ static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_ return DOES_NOT_EXIST; // Allocate . - map_key_t new_key = (hti->ht->key_type == NULL) - ? (map_key_t)key + map_key_t new_key = (hti->ht->key_type == NULL) + ? (map_key_t)key : (map_key_t)hti->ht->key_type->clone((void *)key); #ifndef NBD32 if (EXPECT_FALSE(hti->ht->key_type != NULL)) { - // Combine pointer with bits from its hash - new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key; + // Combine pointer with bits from its hash + new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key; } #endif @@ -347,7 +370,7 @@ static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_ // Retry if another thread stole the entry out from under us. if (old_ent_key != DOES_NOT_EXIST) { TRACE("h0", "hti_cas: lost race to install key %p in entry %p", new_key, ent); - TRACE("h0", "hti_cas: found %p instead of NULL", + TRACE("h0", "hti_cas: found %p instead of NULL", (hti->ht->key_type == NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0); if (hti->ht->key_type != NULL) { nbd_free(GET_PTR(new_key)); @@ -355,20 +378,21 @@ static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_ return hti_cas(hti, key, key_hash, expected, new); // tail-call } TRACE("h2", "hti_cas: installed key %p in entry %p", new_key, ent); + SYNC_ADD(&hti->key_count, 1); } - TRACE("h0", "hti_cas: entry for key %p is %p", + TRACE("h0", "hti_cas: entry for key %p is %p", (hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent); // If the entry is in the middle of a copy, the copy must be completed first. map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1)) { - int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); + int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next); if (did_copy) { - SYNC_ADD(&hti->num_entries_copied, 1); + (void)SYNC_ADD(&hti->num_entries_copied, 1); } - TRACE("h0", "hti_cas: value in the middle of a copy, copy completed by %s", + TRACE("h0", "hti_cas: value in the middle of a copy, copy completed by %s", (did_copy ? "self" : "other"), 0); } TRACE("h0", "hti_cas: value copied to next table, retry on next table", 0, 0); @@ -400,9 +424,9 @@ static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_ // The set succeeded. Adjust the value count. if (old_existed && new == DOES_NOT_EXIST) { - SYNC_ADD(&hti->count, -1); + (void)SYNC_ADD(&hti->count, -1); } else if (!old_existed && new != DOES_NOT_EXIST) { - SYNC_ADD(&hti->count, 1); + (void)SYNC_ADD(&hti->count, 1); } // Return the previous value. @@ -416,10 +440,10 @@ static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) { volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty); // When hti_lookup() returns NULL it means we hit the reprobe limit while - // searching the table. In that case, if a copy is in progress the key + // searching the table. In that case, if a copy is in progress the key // might exist in the copy. if (EXPECT_FALSE(ent == NULL)) { - if (((volatile hti_t *)hti)->next != NULL) + if (VOLATILE_DEREF(hti).next != NULL) return hti_get(hti->next, key, key_hash); // recursive tail-call return DOES_NOT_EXIST; } @@ -431,12 +455,12 @@ static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) { map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (EXPECT_FALSE(ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1))) { - int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); + int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next); if (did_copy) { - SYNC_ADD(&hti->num_entries_copied, 1); + (void)SYNC_ADD(&hti->num_entries_copied, 1); } } - return hti_get(((volatile hti_t *)hti)->next, key, key_hash); // tail-call + return hti_get(VOLATILE_DEREF(hti).next, key, key_hash); // tail-call } return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val; @@ -455,23 +479,23 @@ map_val_t ht_get (hashtable_t *ht, map_key_t key) { // returns TRUE if copy is done static int hti_help_copy (hti_t *hti) { volatile entry_t *ent; - size_t limit; + size_t limit; size_t total_copied = hti->num_entries_copied; size_t num_copied = 0; - size_t x = hti->copy_scan; + size_t x = hti->copy_scan; TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<scale); - if (total_copied != (1 << hti->scale)) { + if (total_copied != (1ULL << hti->scale)) { // Panic if we've been around the array twice and still haven't finished the copy. - int panic = (x >= (1 << (hti->scale + 1))); + int panic = (x >= (1ULL << (hti->scale + 1))); if (!panic) { limit = ENTRIES_PER_COPY_CHUNK; // Reserve some entries for this thread to copy. There is a race condition here because the // fetch and add isn't atomic, but that is ok. - hti->copy_scan = x + ENTRIES_PER_COPY_CHUNK; + hti->copy_scan = x + ENTRIES_PER_COPY_CHUNK; - // might be larger than the size of the table, if some thread stalls while + // might be larger than the size of the table, if some thread stalls while // copying. In that case we just wrap around to the begining and make another pass through // the table. ent = hti->table + (x & MASK(hti->scale)); @@ -479,26 +503,26 @@ static int hti_help_copy (hti_t *hti) { TRACE("h1", "ht_cas: help copy panic", 0, 0); // scan the whole table ent = hti->table; - limit = (1 << hti->scale); + limit = (1ULL << hti->scale); } // Copy the entries for (int i = 0; i < limit; ++i) { num_copied += hti_copy_entry(hti, ent++, 0, hti->next); - assert(ent <= hti->table + (1 << hti->scale)); + assert(ent <= hti->table + (1ULL << hti->scale)); } if (num_copied != 0) { total_copied = SYNC_ADD(&hti->num_entries_copied, num_copied); } } - return (total_copied == (1 << hti->scale)); + return (total_copied == (1ULL << hti->scale)); } static void hti_defer_free (hti_t *hti) { assert(hti->ref_count == 0); - for (uint32_t i = 0; i < (1 << hti->scale); ++i) { + for (uint32_t i = 0; i < (1ULL << hti->scale); ++i) { map_key_t key = hti->table[i].key; map_val_t val = hti->table[i].val; if (val == COPIED_VALUE) @@ -508,7 +532,11 @@ static void hti_defer_free (hti_t *hti) { rcu_defer_free(GET_PTR(key)); } } +#ifdef USE_SYSTEM_MALLOC + rcu_defer_free(hti->unaligned_table_ptr); +#else rcu_defer_free((void *)hti->table); +#endif rcu_defer_free(hti); } @@ -583,7 +611,7 @@ size_t ht_count (hashtable_t *ht) { size_t count = 0; while (hti) { count += hti->count; - hti = hti->next; + hti = hti->next; } return count; } @@ -593,6 +621,8 @@ hashtable_t *ht_alloc (const datatype_t *key_type) { hashtable_t *ht = nbd_malloc(sizeof(hashtable_t)); ht->key_type = key_type; ht->hti = (hti_t *)hti_alloc(ht, MIN_SCALE); + ht->hti_copies = 0; + ht->density = 0.0; return ht; } @@ -608,18 +638,24 @@ void ht_free (hashtable_t *ht) { nbd_free(ht); } -void ht_print (hashtable_t *ht) { +void ht_print (hashtable_t *ht, int verbose) { + printf("probe:%-2d density:%.1f%% count:%-8lld ", ht->probe, ht->density, (uint64_t)ht_count(ht)); hti_t *hti = ht->hti; while (hti) { - printf("hti:%p scale:%u count:%d copied:%d\n", hti, hti->scale, hti->count, hti->num_entries_copied); - for (int i = 0; i < (1 << hti->scale); ++i) { - volatile entry_t *ent = hti->table + i; - printf("[0x%x] 0x%llx:0x%llx\n", i, (uint64_t)ent->key, (uint64_t)ent->val); - if (i > 30) { - printf("...\n"); - break; + if (verbose) { + for (int i = 0; i < (1ULL << hti->scale); ++i) { + volatile entry_t *ent = hti->table + i; + printf("[0x%x] 0x%llx:0x%llx\n", i, (uint64_t)ent->key, (uint64_t)ent->val); + if (i > 30) { + printf("...\n"); + break; + } } } + int scale = hti->scale; + printf("hti count:%lld scale:%d key density:%.1f%% value density:%.1f%% probe:%d\n", + (uint64_t)hti->count, scale, (double)hti->key_count / (1ULL << scale) * 100, + (double)hti->count / (1ULL << scale) * 100, hti->probe); hti = hti->next; } } @@ -651,7 +687,7 @@ map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) { volatile entry_t *ent; map_key_t key; map_val_t val; - size_t table_size = (1 << iter->hti->scale); + size_t table_size = (1ULL << iter->hti->scale); do { iter->idx++; if (iter->idx == table_size) { @@ -663,9 +699,6 @@ map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) { } while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE); - if (key_ptr) { - *key_ptr = key; - } if (val == COPIED_VALUE) { const datatype_t *key_type = iter->hti->ht->key_type; #ifdef NBD32 @@ -674,8 +707,15 @@ map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr) { uint32_t hash = (key_type == NULL) ? murmur32_8b((uint64_t)key) : key_type->hash((void *)key); #endif val = hti_get(iter->hti->next, (map_key_t)ent->key, hash); - } + // Go to the next entry if key is already deleted. + if (val == DOES_NOT_EXIST) + return ht_iter_next(iter, key_ptr); // recursive tail-call + } + + if (key_ptr) { + *key_ptr = key; + } return val; }