#include "common.h"
#include "murmur.h"
#include "mem.h"
-#include "mlocal.h"
+#include "hashtable.h"
#define GET_PTR(x) ((void *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
} hti_t;
struct ht {
- hti_t *hti;
- cmp_fun_t cmp_fun;
- hash_fun_t hash_fun;
- clone_fun_t clone_fun;
+ hti_t *hti;
+ const datatype_t *key_type;
};
-static const map_impl_t ht_map_impl = {
- (map_alloc_t)ht_alloc, (map_cas_t)ht_cas, (map_get_t)ht_get, (map_remove_t)ht_remove,
- (map_count_t)ht_count, (map_print_t)ht_print, (map_free_t)ht_free
-};
-
-const map_impl_t *MAP_TYPE_HASHTABLE = &ht_map_impl;
-
static const uint64_t COPIED_VALUE = -1;
-static const uint64_t TOMBSTONE = STRIP_TAG(-1);
+static const uint64_t TOMBSTONE = STRIP_TAG(-1, TAG1);
static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t);
static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2;
uint64_t ent_key = ent->key;
if (ent_key == DOES_NOT_EXIST) {
TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent,
- (hti->ht->clone_fun == NULL) ? (void *)ent_key : GET_PTR(ent_key));
+ (hti->ht->key_type == NULL) ? (void *)ent_key : GET_PTR(ent_key));
*is_empty = 1; // indicate an empty so the caller avoids an expensive key compare
return ent;
}
// Compare <key> with the key in the entry.
- if (EXPECT_TRUE(hti->ht->cmp_fun == NULL)) {
+ if (EXPECT_TRUE(hti->ht->key_type == NULL)) {
// fast path for integer keys
if (ent_key == (uint64_t)key) {
TRACE("h1", "hti_lookup: found entry %p with key %p", ent, ent_key);
// The key in <ent> is made up of two parts. The 48 low-order bits are a pointer. The
// high-order 16 bits are taken from the hash. The bits from the hash are used as a
// quick check to rule out non-equal keys without doing a complete compare.
- if ((key_hash >> 16) == (ent_key >> 48) && hti->ht->cmp_fun(GET_PTR(ent_key), key) == 0) {
+ if ((key_hash >> 16) == (ent_key >> 48) && hti->ht->key_type->cmp(GET_PTR(ent_key), key) == 0) {
TRACE("h1", "hti_lookup: found entry %p with key %p", ent, GET_PTR(ent_key));
return ent;
}
assert(ht1->next);
assert(ht2);
assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale));
- assert(key_hash == 0 || ht1->ht->hash_fun == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
+ assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
uint64_t ht1_ent_val = ht1_ent->val;
if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE)) {
}
// Tag the value in the old entry to indicate a copy is in progress.
- ht1_ent_val = SYNC_FETCH_AND_OR(&ht1_ent->val, TAG_VALUE(0));
+ ht1_ent_val = SYNC_FETCH_AND_OR(&ht1_ent->val, TAG_VALUE(0, TAG1));
TRACE("h2", "hti_copy_entry: tagged the value %p in old entry %p", ht1_ent_val, ht1_ent);
if (ht1_ent_val == COPIED_VALUE) {
TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
// Install the key in the new table.
uint64_t ht1_ent_key = ht1_ent->key;
- void *key = (ht1->ht->hash_fun == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key);
+ void *key = (ht1->ht->key_type == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key);
// The old table's dead entries don't need to be copied to the new table, but their keys need to be freed.
- assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE));
+ assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE, TAG1));
if (ht1_ent_val == TOMBSTONE) {
TRACE("h1", "hti_copy_entry: entry %p old value was deleted, now freeing key %p", ht1_ent, key);
- if (EXPECT_FALSE(ht1->ht->clone_fun != NULL)) {
+ if (EXPECT_FALSE(ht1->ht->key_type != NULL)) {
nbd_defer_free(key);
}
return TRUE;
// We use 0 to indicate that <key_hash> is uninitiallized. Occasionally the key's hash will really be 0 and we
// waste time recomputing it every time. It is rare enough (1 in 65k) that it won't hurt performance.
if (key_hash == 0) {
- key_hash = (ht1->ht->hash_fun == NULL) ? murmur32_8b(ht1_ent_key) : ht1->ht->hash_fun(key);
+ key_hash = (ht1->ht->key_type == NULL) ? murmur32_8b(ht1_ent_key) : ht1->ht->key_type->hash(key);
}
int ht2_ent_is_empty;
}
// Copy the value to the entry in the new table.
- ht1_ent_val = STRIP_TAG(ht1_ent_val);
+ ht1_ent_val = STRIP_TAG(ht1_ent_val, TAG1);
uint64_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
// If there is a nested copy in progress, we might have installed the key into a dead entry.
}
// Compare <expected> with the existing value associated with <key>. If the values match then
-// replace the existing value with <new>. If <new> is TOMBSTONE, delete the value associated with
+// replace the existing value with <new>. If <new> is DOES_NOT_EXIST, delete the value associated with
// the key by replacing it with a TOMBSTONE.
//
// Return the previous value associated with <key>, or DOES_NOT_EXIST if <key> is not in the table
TRACE("h1", "hti_cas: hti %p key %p", hti, key);
TRACE("h1", "hti_cas: value %p expect %p", new, expected);
assert(hti);
- assert(new != DOES_NOT_EXIST && !IS_TAGGED(new));
+ assert(!IS_TAGGED(new, TAG1));
assert(key);
int is_empty;
return DOES_NOT_EXIST;
// No need to do anything, <key> is already deleted.
- if (new == TOMBSTONE)
+ if (new == DOES_NOT_EXIST)
return DOES_NOT_EXIST;
// Allocate <new_key>.
- uint64_t new_key = (uint64_t)((hti->ht->clone_fun == NULL) ? key : hti->ht->clone_fun(key));
- if (EXPECT_FALSE(hti->ht->hash_fun != NULL)) {
+ uint64_t new_key = (uint64_t)((hti->ht->key_type == NULL) ? key : hti->ht->key_type->clone(key));
+ if (EXPECT_FALSE(hti->ht->key_type != NULL)) {
// Combine <new_key> pointer with bits from its hash
new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key;
}
if (old_ent_key != DOES_NOT_EXIST) {
TRACE("h0", "hti_cas: lost race to install key %p in entry %p", new_key, ent);
TRACE("h0", "hti_cas: found %p instead of NULL",
- (hti->ht->clone_fun != NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0);
- if (hti->ht->clone_fun != NULL) {
+ (hti->ht->key_type == NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0);
+ if (hti->ht->key_type != NULL) {
nbd_free(GET_PTR(new_key));
}
return hti_cas(hti, key, key_hash, expected, new); // tail-call
}
TRACE("h0", "hti_cas: entry for key %p is %p",
- (hti->ht->clone_fun != NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
+ (hti->ht->key_type == NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
// If the entry is in the middle of a copy, the copy must be completed first.
uint64_t ent_val = ent->val;
- if (EXPECT_FALSE(IS_TAGGED(ent_val))) {
+ if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (ent_val != COPIED_VALUE) {
int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
if (did_copy) {
}
// No need to update if value is unchanged.
- if ((new == TOMBSTONE && !old_existed) || ent_val == new) {
+ if ((new == DOES_NOT_EXIST && !old_existed) || ent_val == new) {
TRACE("h1", "hti_cas: old value and new value were the same", 0, 0);
return ent_val;
}
// CAS the value into the entry. Retry if it fails.
- uint64_t v = SYNC_CAS(&ent->val, ent_val, new);
+ uint64_t v = SYNC_CAS(&ent->val, ent_val, new == DOES_NOT_EXIST ? TOMBSTONE : new);
if (EXPECT_FALSE(v != ent_val)) {
TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v);
return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call
}
// The set succeeded. Adjust the value count.
- if (old_existed && new == TOMBSTONE) {
+ if (old_existed && new == DOES_NOT_EXIST) {
SYNC_ADD(&hti->count, -1);
- } else if (!old_existed && new != TOMBSTONE) {
+ } else if (!old_existed && new != DOES_NOT_EXIST) {
SYNC_ADD(&hti->count, 1);
}
// If the entry is being copied, finish the copy and retry on the next table.
uint64_t ent_val = ent->val;
- if (EXPECT_FALSE(IS_TAGGED(ent_val))) {
+ if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (EXPECT_FALSE(ent_val != COPIED_VALUE)) {
int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
if (did_copy) {
//
uint64_t ht_get (hashtable_t *ht, void *key) {
- uint32_t hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
+ uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
return hti_get(ht->hti, key, hash);
}
TRACE("h2", "ht_cas: key %p ht %p", key, ht);
TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val);
assert(key != DOES_NOT_EXIST);
- assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST && new_val != TOMBSTONE);
+ assert(!IS_TAGGED(new_val, TAG1) && new_val != DOES_NOT_EXIST && new_val != TOMBSTONE);
hti_t *hti = ht->hti;
}
uint64_t old_val;
- uint32_t key_hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
+ uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
assert(hti->next);
hti = hti->next;
uint64_t ht_remove (hashtable_t *ht, void *key) {
hti_t *hti = ht->hti;
uint64_t val;
- uint32_t key_hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
+ uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash(key);
do {
- val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, TOMBSTONE);
+ val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST);
if (val != COPIED_VALUE)
return val == TOMBSTONE ? DOES_NOT_EXIST : val;
assert(hti->next);
}
// Allocate and initialize a new hash table.
-hashtable_t *ht_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun) {
+hashtable_t *ht_alloc (const datatype_t *key_type) {
hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
- ht->cmp_fun = cmp_fun;
- ht->hash_fun = hash_fun;
- ht->clone_fun = clone_fun;
+ ht->key_type = key_type;
ht->hti = (hti_t *)hti_alloc(ht, MIN_SCALE);
return ht;
}
hti_t *hti = ht->hti;
do {
for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
- assert(hti->table[i].val == COPIED_VALUE || !IS_TAGGED(hti->table[i].val));
- if (ht->clone_fun != NULL && hti->table[i].key != DOES_NOT_EXIST) {
+ assert(hti->table[i].val == COPIED_VALUE || !IS_TAGGED(hti->table[i].val, TAG1));
+ if (ht->key_type != NULL && hti->table[i].key != DOES_NOT_EXIST) {
nbd_free(GET_PTR(hti->table[i].key));
}
}