#define MIN_SCALE (__builtin_ctz(ENTRIES_PER_BUCKET) + 2) // min 4 buckets
#define MAX_BUCKETS_TO_PROBE 250
+#define GET_PTR(x) (string_t *)((x) & MASK(48)) // low-order 48 bits is a pointer to a string_t
+
typedef struct ht_entry {
uint64_t key;
uint64_t value;
static inline int ht_key_equals (uint64_t a, uint32_t b_hash, const char *b_value, uint32_t b_len) {
if ((b_hash >> 16) != (a >> 48)) // high-order 16 bits are from the hash value
return FALSE;
- const string_t *a_key = (string_t *)(a & MASK(48)); // low-order 48 bits is a pointer
+ const string_t *a_key = GET_PTR(a);
assert(a_key);
return a_key->len == b_len && memcmp(a_key->val, b_value, b_len) == 0;
}
if (ht_key_equals(e_key, key_hash, key_val, key_len)) {
TRACE("h0", "hti_lookup: entry %p found on probe %d", e, i*ENTRIES_PER_BUCKET+j+1);
- TRACE("h0", "hti_lookup: with key \"%s\" value %p",
- ((string_t *)(e_key & MASK(48)))->val, e->value);
+ TRACE("h0", "hti_lookup: with key \"%s\" value %p", GET_PTR(e_key)->val, e->value);
return e;
}
}
// be freed.
assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE));
if (ht1_e_value == TOMBSTONE) {
- nbd_defer_free((string_t *)(ht1_e->key & MASK(48)));
+ nbd_defer_free(GET_PTR(ht1_e->key));
return TRUE;
}
// Install the key in the new table.
uint64_t key = ht1_e->key;
- string_t *key_string = (string_t *)(key & MASK(48));
+ string_t *key_string = GET_PTR(key);
uint64_t value = STRIP_TAG(ht1_e_value);
TRACE("h0", "hti_copy_entry: key %p is %s", key, key_string->val);
// Retry if another thread stole the entry out from under us.
if (e_key != DOES_NOT_EXIST) {
- TRACE("h0", "hti_compare_and_set: key in entry %p is \"%s\"", e, e_key & MASK(48));
+ TRACE("h0", "hti_compare_and_set: key in entry %p is \"%s\"", e, GET_PTR(e_key)->val);
TRACE("h0", "hti_compare_and_set: lost race to install key \"%s\" in %p", key->val, e);
nbd_free(key);
return hti_compare_and_set(hti, key_hash, key_val, key_len, expected, new); // tail-call
// Help with an ongoing copy.
if (EXPECT_FALSE(hti->next != NULL)) {
-
- // Reserve some entries for this thread to copy. There is a race condition here because the
- // fetch and add isn't atomic, but that is ok.
+ volatile entry_t *e;
+ uint64_t limit;
+ int num_copied = 0;
int x = hti->scan;
- hti->scan = x + ENTRIES_PER_COPY_CHUNK;
- // <hti->scan> might be larger than the size of the table, if some thread stalls while
- // copying. In that case we just wrap around to the begining and make another pass through
- // the table.
- volatile entry_t *e = hti->table + (x & MASK(hti->scale));
+ // Panic if we've been around the array twice and still haven't finished the copy.
+ int panic = (x >= (1 << (hti->scale + 1)));
+ if (!panic) {
+ limit = ENTRIES_PER_COPY_CHUNK;
+
+ // Reserve some entries for this thread to copy. There is a race condition here because the
+ // fetch and add isn't atomic, but that is ok.
+ hti->scan = x + ENTRIES_PER_COPY_CHUNK;
+
+ // <hti->scan> might be larger than the size of the table, if some thread stalls while
+ // copying. In that case we just wrap around to the begining and make another pass through
+ // the table.
+ e = hti->table + (x & MASK(hti->scale));
+ } else {
+ // scan the whole table
+ limit = (1 << hti->scale);
+ e = hti->table;
+ }
// Copy the entries
- int num_copied = 0, i;
- for (i = 0; i < ENTRIES_PER_COPY_CHUNK; ++i) {
+ for (int i = 0; i < limit; ++i) {
num_copied += hti_copy_entry(hti, e++, 0, hti->next);
assert(e <= hti->table + (1 << hti->scale));
}
if (num_copied != 0) {
SYNC_ADD(&hti->num_entries_copied, num_copied);
}
- }
- // Dispose of fully copied tables.
- while (hti->num_entries_copied == (1 << hti->scale)) {
- assert(hti->next);
- if (SYNC_CAS(ht, hti, hti->next) == hti) {
- nbd_defer_free(hti);
+ // Dispose of fully copied tables.
+ if (hti->num_entries_copied == (1 << hti->scale) || panic) {
+ assert(hti->next);
+ if (SYNC_CAS(ht, hti, hti->next) == hti) {
+ nbd_defer_free(hti);
+ }
}
- hti = *ht;
}
uint64_t old_val;
void ht_free (hash_table_t *ht) {
hash_table_i_t *hti = *ht;
do {
+ for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
+ assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value));
+ if (hti->table[i].key != DOES_NOT_EXIST) {
+ nbd_free(GET_PTR(hti->table[i].key));
+ }
+ }
hash_table_i_t *next = hti->next;
nbd_free(hti);
hti = next;