* Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
* the code a bit, but it won't be as fast as it could be on platforms that provide weaker
* operations like and unfenced CAS which would still do the job.
+ *
+ * 11FebO9 - Bug fix in ht_iter_next() from Rui Ueyama
*/
#include <stdio.h>
#include "common.h"
#include "murmur.h"
#include "mem.h"
+#include "rcu.h"
#include "hashtable.h"
#ifndef NBD32
volatile entry_t *table;
hashtable_t *ht; // parent ht;
struct hti *next;
+#ifdef USE_SYSTEM_MALLOC
+ void *unaligned_table_ptr; // system malloc doesn't guarentee cache-line alignment
+#endif
unsigned scale;
int max_probe;
int ref_count;
map_key_t ent_key = ent->key;
if (ent_key == DOES_NOT_EXIST) {
TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent,
- (hti->ht->key_type == NULL) ? (void *)ent_key : GET_PTR(ent_key));
+ (hti->ht->key_type == NULL) ? (void *)key : GET_PTR(key));
*is_empty = 1; // indicate an empty so the caller avoids an expensive key compare
return ent;
}
static hti_t *hti_alloc (hashtable_t *parent, int scale) {
hti_t *hti = (hti_t *)nbd_malloc(sizeof(hti_t));
memset(hti, 0, sizeof(hti_t));
+ hti->scale = scale;
size_t sz = sizeof(entry_t) * (1 << scale);
- entry_t *table = nbd_malloc(sz);
- memset(table, 0, sz);
- hti->table = table;
-
- hti->scale = scale;
+#ifdef USE_SYSTEM_MALLOC
+ hti->unaligned_table_ptr = nbd_malloc(sz + CACHE_LINE_SIZE - 1);
+ hti->table = (void *)(((size_t)hti->unaligned_table_ptr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1));
+#else
+ hti->table = nbd_malloc(sz);
+#endif
+ memset((void *)hti->table, 0, sz);
// When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets.
hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4;
if (old_next != NULL) {
// Another thread beat us to it.
TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
- nbd_free(next);
+#ifdef USE_SYSTEM_MALLOC
+ nbd_free(next->unaligned_table_ptr);
+#else
+ nbd_free((void *)next->table);
+#endif
return;
}
TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale);
// We use 0 to indicate that <key_hash> is uninitiallized. Occasionally the key's hash will really be 0 and we
// waste time recomputing it every time. It is rare enough that it won't hurt performance.
if (key_hash == 0) {
- key_hash = (ht1->ht->key_type == NULL)
- ? murmur32_8b(ht1_ent_key)
- : ht1->ht->key_type->hash((void *)key);
+#ifdef NBD32
+ key_hash = (ht1->ht->key_type == NULL) ? murmur32_4b(ht1_ent_key) : ht1->ht->key_type->hash((void *)key);
+#else
+ key_hash = (ht1->ht->key_type == NULL) ? murmur32_8b(ht1_ent_key) : ht1->ht->key_type->hash((void *)key);
+#endif
}
int ht2_ent_is_empty;
// Update the count if we were the one that completed the copy.
if (old_ht2_ent_val == DOES_NOT_EXIST) {
TRACE("h0", "hti_copy_entry: key %p value %p copied to new entry", key, ht1_ent_val);
- SYNC_ADD(&ht1->count, -1);
- SYNC_ADD(&ht2->count, 1);
+ (void)SYNC_ADD(&ht1->count, -1);
+ (void)SYNC_ADD(&ht2->count, 1);
return TRUE;
}
map_val_t ent_val = ent->val;
if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1)) {
- int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
+ int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next);
if (did_copy) {
- SYNC_ADD(&hti->num_entries_copied, 1);
+ (void)SYNC_ADD(&hti->num_entries_copied, 1);
}
TRACE("h0", "hti_cas: value in the middle of a copy, copy completed by %s",
(did_copy ? "self" : "other"), 0);
// The set succeeded. Adjust the value count.
if (old_existed && new == DOES_NOT_EXIST) {
- SYNC_ADD(&hti->count, -1);
+ (void)SYNC_ADD(&hti->count, -1);
} else if (!old_existed && new != DOES_NOT_EXIST) {
- SYNC_ADD(&hti->count, 1);
+ (void)SYNC_ADD(&hti->count, 1);
}
// Return the previous value.
// searching the table. In that case, if a copy is in progress the key
// might exist in the copy.
if (EXPECT_FALSE(ent == NULL)) {
- if (((volatile hti_t *)hti)->next != NULL)
+ if (VOLATILE_DEREF(hti).next != NULL)
return hti_get(hti->next, key, key_hash); // recursive tail-call
return DOES_NOT_EXIST;
}
map_val_t ent_val = ent->val;
if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) {
if (EXPECT_FALSE(ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1))) {
- int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
+ int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next);
if (did_copy) {
- SYNC_ADD(&hti->num_entries_copied, 1);
+ (void)SYNC_ADD(&hti->num_entries_copied, 1);
}
}
- return hti_get(((volatile hti_t *)hti)->next, key, key_hash); // tail-call
+ return hti_get(VOLATILE_DEREF(hti).next, key, key_hash); // tail-call
}
return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val;
//
map_val_t ht_get (hashtable_t *ht, map_key_t key) {
+#ifdef NBD32
+ uint32_t hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
+#else
uint32_t hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
+#endif
return hti_get(ht->hti, key, hash);
}
continue;
assert(!IS_TAGGED(val, TAG1) || val == TAG_VALUE(TOMBSTONE, TAG1)); // copy not in progress
if (hti->ht->key_type != NULL && key != DOES_NOT_EXIST) {
- nbd_defer_free(GET_PTR(key));
+ rcu_defer_free(GET_PTR(key));
}
}
- nbd_defer_free((void *)hti->table);
- nbd_defer_free(hti);
+#ifdef USE_SYSTEM_MALLOC
+ rcu_defer_free(hti->unaligned_table_ptr);
+#else
+ rcu_defer_free((void *)hti->table);
+#endif
+ rcu_defer_free(hti);
}
static void hti_release (hti_t *hti) {
}
map_val_t old_val;
+#ifdef NBD32
+ uint32_t key_hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
+#else
uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
+#endif
while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
assert(hti->next);
hti = hti->next;
map_val_t ht_remove (hashtable_t *ht, map_key_t key) {
hti_t *hti = ht->hti;
map_val_t val;
+#ifdef NBD32
+ uint32_t key_hash = (ht->key_type == NULL) ? murmur32_4b((uint64_t)key) : ht->key_type->hash((void *)key);
+#else
uint32_t key_hash = (ht->key_type == NULL) ? murmur32_8b((uint64_t)key) : ht->key_type->hash((void *)key);
+#endif
do {
val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, DOES_NOT_EXIST);
if (val != COPIED_VALUE)
} while (key == DOES_NOT_EXIST || val == DOES_NOT_EXIST || val == TOMBSTONE);
- if (key_ptr) {
- *key_ptr = key;
- }
if (val == COPIED_VALUE) {
- uint32_t hash = (iter->hti->ht->key_type == NULL)
- ? murmur32_8b((uint64_t)key)
- : iter->hti->ht->key_type->hash((void *)key);
+ const datatype_t *key_type = iter->hti->ht->key_type;
+#ifdef NBD32
+ uint32_t hash = (key_type == NULL) ? murmur32_4b((uint64_t)key) : key_type->hash((void *)key);
+#else
+ uint32_t hash = (key_type == NULL) ? murmur32_8b((uint64_t)key) : key_type->hash((void *)key);
+#endif
val = hti_get(iter->hti->next, (map_key_t)ent->key, hash);
+
+ // Go to the next entry if key is already deleted.
+ if (val == DOES_NOT_EXIST)
+ return ht_iter_next(iter, key_ptr); // recursive tail-call
}
+ if (key_ptr) {
+ *key_ptr = key;
+ }
return val;
}