typedef const struct map_impl *map_type_t;
+typedef int (*cmp_fun_t) (void *, void *);
+typedef void * (*clone_fun_t) (void *);
+typedef uint32_t (*hash_fun_t) (void *);
+
extern map_type_t MAP_TYPE_HASHTABLE;
extern map_type_t MAP_TYPE_SKIPLIST;
extern map_type_t MAP_TYPE_LIST;
-map_t * map_alloc (map_type_t map_type);
-uint64_t map_get (map_t *map, const void *key_data, uint32_t key_len);
-uint64_t map_set (map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val);
-uint64_t map_add (map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val);
-uint64_t map_cas (map_t *map, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val);
-uint64_t map_replace(map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val);
-uint64_t map_remove (map_t *map, const void *key_data, uint32_t key_len);
+map_t * map_alloc (map_type_t map_type, cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun);
+uint64_t map_get (map_t *map, void *key);
+uint64_t map_set (map_t *map, void *key, uint64_t new_val);
+uint64_t map_add (map_t *map, void *key, uint64_t new_val);
+uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val);
+uint64_t map_replace(map_t *map, void *key, uint64_t new_val);
+uint64_t map_remove (map_t *map, void *key);
uint64_t map_count (map_t *map);
void map_print (map_t *map);
void map_free (map_t *map);
return h;
}
+
+static inline unsigned int murmur32_8b (uint64_t key)
+{
+ return murmur32((char *)&key, 8);
+}
--- /dev/null
+#ifndef NSTRING_H
+#define NSTRING_H
+
+typedef struct nstring {
+ uint32_t len;
+ char data[];
+} nstring_t;
+
+nstring_t * ns_alloc (uint32_t len);
+int ns_cmp (const nstring_t *ns1, const nstring_t *ns2);
+uint32_t ns_hash (const nstring_t *ns);
+nstring_t * ns_dup (const nstring_t *ns);
+
+#endif//NSTRING_H
void txn_abort (txn_t *txn);
txn_state_e txn_commit (txn_t *txn);
-uint64_t tm_get (txn_t *txn, const char *key, uint32_t key_len);
-void tm_set (txn_t *txn, const char *key, uint32_t key_len, uint64_t value);
+uint64_t tm_get (txn_t *txn, void *key);
+void tm_set (txn_t *txn, void *key, uint64_t value);
#endif//TXN_H
TESTS := output/map_test1 output/map_test2 output/rcu_test output/txn_test
EXES := $(TESTS)
-RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c
-MAP_SRCS := map/map.c map/nstring.c map/list.c map/skiplist.c map/hashtable.c
+RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c runtime/nstring.c
+MAP_SRCS := map/map.c map/list.c map/skiplist.c map/hashtable.c
rcu_test_SRCS := $(RUNTIME_SRCS) test/rcu_test.c
txn_test_SRCS := $(RUNTIME_SRCS) $(MAP_SRCS) test/txn_test.c test/CuTest.c txn/txn.c
* weaker operations which would still do the job.
*/
+#include <stdio.h>
#include "common.h"
#include "murmur.h"
#include "mem.h"
#include "mlocal.h"
-#include "nstring.h"
-#define GET_PTR(x) ((nstring_t *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
+#define GET_PTR(x) ((void *)((x) & MASK(48))) // low-order 48 bits is a pointer to a nstring_t
typedef struct ht_entry {
- uint64_t key; // ptr to nstring_t
- uint64_t value;
+ uint64_t key;
+ uint64_t val;
} entry_t;
typedef struct hti {
volatile entry_t *table;
hashtable_t *ht; // parent ht;
struct hti *next;
- struct hti *next_free;
unsigned int scale;
int max_probe;
int count; // TODO: make these counters distributed
int num_entries_copied;
int scan;
-} hashtable_i_t;
+} hti_t;
struct ht {
- hashtable_i_t *hti;
+ hti_t *hti;
+ cmp_fun_t cmp_fun;
+ hash_fun_t hash_fun;
+ clone_fun_t clone_fun;
};
static const map_impl_t ht_map_impl = {
static const unsigned MIN_SCALE = 4; // min 16 entries (4 buckets)
static const unsigned MAX_BUCKETS_TO_PROBE = 250;
-static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hashtable_i_t *ht2);
+static int hti_copy_entry (hti_t *ht1, volatile entry_t *ent, uint32_t ent_key_hash, hti_t *ht2);
// Choose the next bucket to probe using the high-order bits of <key_hash>.
static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) {
return (old_ndx + incr) & MASK(ht_scale);
}
-// Compare two keys.
-//
-// A key is made up of two parts. The 48 low-order bits are a pointer to a null terminated string.
-// The high-order 16 bits are taken from the hash of that string. The bits from the hash are used
-// as a quick check to rule out non-equal keys without doing a complete string compare.
-static inline int ht_key_equals (uint64_t a, uint32_t b_hash, const char *b_value, uint32_t b_len) {
- if ((b_hash >> 16) != (a >> 48)) // high-order 16 bits are from the hash value
- return FALSE;
- return ns_cmp_raw(GET_PTR(a), b_value, b_len) == 0;
-}
-
// Lookup <key> in <hti>.
//
// Return the entry that <key> is in, or if <key> isn't in <hti> return the entry that it would be
// in if it were inserted into <hti>. If there is no room for <key> in <hti> then return NULL, to
// indicate that the caller should look in <hti->next>.
//
-// Record if the entry being returned is empty. Otherwise the caller will have to waste time with
-// ht_key_equals() to confirm that it did not lose a race to fill an empty entry.
-static volatile entry_t *hti_lookup (hashtable_i_t *hti, uint32_t key_hash, const char *key_data, uint32_t key_len, int *is_empty) {
- TRACE("h2", "hti_lookup(key %p in hti %p)", key_data, hti);
+// Record if the entry being returned is empty. Otherwise the caller will have to waste time
+// re-comparing the keys to confirm that it did not lose a race to fill an empty entry.
+static volatile entry_t *hti_lookup (hti_t *hti, void *key, uint32_t key_hash, int *is_empty) {
+ TRACE("h2", "hti_lookup(key %p in hti %p)", key, hti);
*is_empty = 0;
// Probe one cache line at a time
// Start searching at the indexed entry. Then loop around to the begining of the cache line.
for (int j = 0; j < ENTRIES_PER_BUCKET; ++j) {
- volatile entry_t *e = bucket + ((ndx + j) & (ENTRIES_PER_BUCKET-1));
-
- uint64_t e_key = e->key;
- if (e_key == DOES_NOT_EXIST) {
- TRACE("h1", "hti_lookup: entry %p for key \"%s\" is empty", e, GET_PTR(e_key)->data);
- *is_empty = 1; // indicate an empty so the caller avoids an expensive ht_key_equals
- return e;
+ volatile entry_t *ent = bucket + ((ndx + j) & (ENTRIES_PER_BUCKET-1));
+
+ uint64_t ent_key = ent->key;
+ if (ent_key == DOES_NOT_EXIST) {
+ TRACE("h1", "hti_lookup: entry %p for key %p is empty", ent,
+ (hti->ht->clone_fun == NULL) ? (void *)ent_key : GET_PTR(ent_key));
+ *is_empty = 1; // indicate an empty so the caller avoids an expensive key compare
+ return ent;
}
- if (ht_key_equals(e_key, key_hash, key_data, key_len)) {
- TRACE("h1", "hti_lookup: entry %p key \"%s\"", e, GET_PTR(e_key)->data);
- TRACE("h2", "hti_lookup: entry key len %llu, value %p", GET_PTR(e_key)->len, e->value);
- return e;
+ // Compare <key> with the key in the entry.
+ if (EXPECT_TRUE(hti->ht->cmp_fun == NULL)) {
+ // fast path for integer keys
+ if (ent_key == (uint64_t)key) {
+ TRACE("h1", "hti_lookup: found entry %p with key %p", ent, ent_key);
+ return ent;
+ }
+ } else {
+ // The key in <ent> is made up of two parts. The 48 low-order bits are a pointer. The
+ // high-order 16 bits are taken from the hash. The bits from the hash are used as a
+ // quick check to rule out non-equal keys without doing a complete compare.
+ if ((key_hash >> 16) == (ent_key >> 48) && hti->ht->cmp_fun(GET_PTR(ent_key), key) == 0) {
+ TRACE("h1", "hti_lookup: found entry %p with key %p", ent, GET_PTR(ent_key));
+ return ent;
+ }
}
}
return NULL;
}
-// Allocate and initialize a hashtable_i_t with 2^<scale> entries.
-static hashtable_i_t *hti_alloc (hashtable_t *parent, int scale) {
+// Allocate and initialize a hti_t with 2^<scale> entries.
+static hti_t *hti_alloc (hashtable_t *parent, int scale) {
// Include enough slop to align the actual table on a cache line boundry
- size_t n = sizeof(hashtable_i_t)
+ size_t n = sizeof(hti_t)
+ sizeof(entry_t) * (1 << scale)
+ (CACHE_LINE_SIZE - 1);
- hashtable_i_t *hti = (hashtable_i_t *)calloc(n, 1);
+ hti_t *hti = (hti_t *)calloc(n, 1);
// Align the table of hash entries on a cache line boundry.
- hti->table = (entry_t *)(((uint64_t)hti + sizeof(hashtable_i_t) + (CACHE_LINE_SIZE-1))
+ hti->table = (entry_t *)(((uint64_t)hti + sizeof(hti_t) + (CACHE_LINE_SIZE-1))
& ~(CACHE_LINE_SIZE-1));
hti->scale = scale;
// When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets.
- hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 2;
+ hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4;
if (hti->max_probe > MAX_BUCKETS_TO_PROBE) {
hti->max_probe = MAX_BUCKETS_TO_PROBE;
}
// Called when <hti> runs out of room for new keys.
//
-// Initiates a copy by creating a larger hashtable_i_t and installing it in <hti->next>.
-static void hti_start_copy (hashtable_i_t *hti) {
+// Initiates a copy by creating a larger hti_t and installing it in <hti->next>.
+static void hti_start_copy (hti_t *hti) {
TRACE("h0", "hti_start_copy(hti %p scale %llu)", hti, hti->scale);
// heuristics to determine the size of the new table
new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full
// Allocate the new table and attempt to install it.
- hashtable_i_t *next = hti_alloc(hti->ht, new_scale);
- hashtable_i_t *old_next = SYNC_CAS(&hti->next, NULL, next);
+ hti_t *next = hti_alloc(hti->ht, new_scale);
+ hti_t *old_next = SYNC_CAS(&hti->next, NULL, next);
if (old_next != NULL) {
// Another thread beat us to it.
TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale);
}
-// Copy the key and value stored in <ht1_e> (which must be an entry in <ht1>) to <ht2>.
+// Copy the key and value stored in <ht1_ent> (which must be an entry in <ht1>) to <ht2>.
//
-// Return 1 unless <ht1_e> is already copied (then return 0), so the caller can account for the total
+// Return 1 unless <ht1_ent> is already copied (then return 0), so the caller can account for the total
// number of entries left to copy.
-static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash,
- hashtable_i_t *ht2) {
- TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_e, ht2);
+static int hti_copy_entry (hti_t *ht1, volatile entry_t *ht1_ent, uint32_t key_hash, hti_t *ht2) {
+ TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_ent, ht2);
assert(ht1);
assert(ht1->next);
assert(ht2);
- assert(ht1_e >= ht1->table && ht1_e < ht1->table + (1 << ht1->scale));
- assert(key_hash == 0 || (key_hash >> 16) == (ht1_e->key >> 48));
+ assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale));
+ assert(key_hash == 0 || ht1->ht->hash_fun == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
- uint64_t ht1_e_value = ht1_e->value;
- if (EXPECT_FALSE(ht1_e_value == COPIED_VALUE)) {
- TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_e, ht2);
+ uint64_t ht1_ent_val = ht1_ent->val;
+ if (EXPECT_FALSE(ht1_ent_val == COPIED_VALUE)) {
+ TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
return FALSE; // already copied
}
// Kill empty entries.
- if (EXPECT_FALSE(ht1_e_value == DOES_NOT_EXIST)) {
- uint64_t ht1_e_value = SYNC_CAS(&ht1_e->value, DOES_NOT_EXIST, COPIED_VALUE);
- if (ht1_e_value == DOES_NOT_EXIST) {
- TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_e, 0);
+ if (EXPECT_FALSE(ht1_ent_val == DOES_NOT_EXIST)) {
+ uint64_t ht1_ent_val = SYNC_CAS(&ht1_ent->val, DOES_NOT_EXIST, COPIED_VALUE);
+ if (ht1_ent_val == DOES_NOT_EXIST) {
+ TRACE("h1", "hti_copy_entry: empty entry %p killed", ht1_ent, 0);
return TRUE;
}
- if (ht1_e_value == COPIED_VALUE) {
- TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p", ht1_e, 0);
+ if (ht1_ent_val == COPIED_VALUE) {
+ TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is already killed", ht1_ent, 0);
return FALSE; // another thread beat us to it
}
- TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is now"
- "in use and should be copied", ht1_e, 0);
+ TRACE("h0", "hti_copy_entry: lost race to kill empty entry %p; the entry is not empty", ht1_ent, 0);
}
// Tag the value in the old entry to indicate a copy is in progress.
- ht1_e_value = SYNC_FETCH_AND_OR(&ht1_e->value, TAG_VALUE(0));
- TRACE("h2", "hti_copy_entry: tagged the value %p in old entry %p", ht1_e_value, ht1_e);
- if (ht1_e_value == COPIED_VALUE) {
- TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_e, ht2);
+ ht1_ent_val = SYNC_FETCH_AND_OR(&ht1_ent->val, TAG_VALUE(0));
+ TRACE("h2", "hti_copy_entry: tagged the value %p in old entry %p", ht1_ent_val, ht1_ent);
+ if (ht1_ent_val == COPIED_VALUE) {
+ TRACE("h1", "hti_copy_entry: entry %p already copied to table %p", ht1_ent, ht2);
return FALSE; // <value> was already copied by another thread.
}
- // The old table's deleted entries don't need to be copied to the new table, but their keys need
- // to be freed.
+ // Install the key in the new table.
+ uint64_t ht1_ent_key = ht1_ent->key;
+ void *key = (ht1->ht->hash_fun == NULL) ? (void *)ht1_ent_key : GET_PTR(ht1_ent_key);
+
+ // The old table's dead entries don't need to be copied to the new table, but their keys need to be freed.
assert(COPIED_VALUE == TAG_VALUE(TOMBSTONE));
- if (ht1_e_value == TOMBSTONE) {
- TRACE("h1", "hti_copy_entry: entry %p old value was deleted, now freeing key %p", ht1_e, GET_PTR(ht1_e->key));
- nbd_defer_free(GET_PTR(ht1_e->key));
+ if (ht1_ent_val == TOMBSTONE) {
+ TRACE("h1", "hti_copy_entry: entry %p old value was deleted, now freeing key %p", ht1_ent, key);
+ if (EXPECT_FALSE(ht1->ht->clone_fun != NULL)) {
+ nbd_defer_free(key);
+ }
return TRUE;
}
- // Install the key in the new table.
- uint64_t key = ht1_e->key;
- nstring_t *key_string = GET_PTR(key);
- uint64_t value = STRIP_TAG(ht1_e_value);
-
- // We use 0 to indicate that <key_hash> isn't initiallized. Occasionally the <key_hash> will
- // really be 0 and we will waste time recomputing it. That is rare enough that it is OK.
+ // We use 0 to indicate that <key_hash> is uninitiallized. Occasionally the key's hash will really be 0 and we
+ // waste time recomputing it every time. It is rare enough (1 in 65k) that it won't hurt performance.
if (key_hash == 0) {
- key_hash = murmur32(key_string->data, key_string->len);
+ key_hash = (ht1->ht->hash_fun == NULL) ? murmur32_8b(ht1_ent_key) : ht1->ht->hash_fun(key);
}
- int is_empty;
- volatile entry_t *ht2_e = hti_lookup(ht2, key_hash, key_string->data, key_string->len, &is_empty);
- TRACE("h0", "hti_copy_entry: copy entry %p to entry %p", ht1_e, ht2_e);
+ int ht2_ent_is_empty;
+ volatile entry_t *ht2_ent = hti_lookup(ht2, key, key_hash, &ht2_ent_is_empty);
+ TRACE("h0", "hti_copy_entry: copy entry %p to entry %p", ht1_ent, ht2_ent);
- // it is possible that there is not any room in the new table either
- if (EXPECT_FALSE(ht2_e == NULL)) {
+ // It is possible that there isn't any room in the new table either.
+ if (EXPECT_FALSE(ht2_ent == NULL)) {
TRACE("h0", "hti_copy_entry: no room in table %p copy to next table %p", ht2, ht2->next);
if (ht2->next == NULL) {
hti_start_copy(ht2); // initiate nested copy, if not already started
}
- return hti_copy_entry(ht1, ht1_e, key_hash, ht2->next); // recursive tail-call
+ return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
}
- // a tagged entry returned from hti_lookup() means it is either empty or has a new key
- if (is_empty) {
- uint64_t old_ht2_e_key = SYNC_CAS(&ht2_e->key, DOES_NOT_EXIST, key);
- if (old_ht2_e_key != DOES_NOT_EXIST) {
+ if (ht2_ent_is_empty) {
+ uint64_t old_ht2_ent_key = SYNC_CAS(&ht2_ent->key, DOES_NOT_EXIST, ht1_ent_key);
+ if (old_ht2_ent_key != DOES_NOT_EXIST) {
TRACE("h0", "hti_copy_entry: lost race to CAS key %p into new entry; found %p",
- key, old_ht2_e_key);
- return hti_copy_entry(ht1, ht1_e, key_hash, ht2); // recursive tail-call
+ ht1_ent_key, old_ht2_ent_key);
+ return hti_copy_entry(ht1, ht1_ent, key_hash, ht2); // recursive tail-call
}
}
// Copy the value to the entry in the new table.
- uint64_t old_ht2_e_value = SYNC_CAS(&ht2_e->value, DOES_NOT_EXIST, value);
+ ht1_ent_val = STRIP_TAG(ht1_ent_val);
+ uint64_t old_ht2_ent_val = SYNC_CAS(&ht2_ent->val, DOES_NOT_EXIST, ht1_ent_val);
// If there is a nested copy in progress, we might have installed the key into a dead entry.
- if (old_ht2_e_value == COPIED_VALUE) {
- TRACE("h0", "hti_copy_entry: nested copy in progress; copy %p to next table %p", ht2_e, ht2->next);
- return hti_copy_entry(ht1, ht1_e, key_hash, ht2->next); // recursive tail-call
+ if (old_ht2_ent_val == COPIED_VALUE) {
+ TRACE("h0", "hti_copy_entry: nested copy in progress; copy %p to next table %p", ht2_ent, ht2->next);
+ return hti_copy_entry(ht1, ht1_ent, key_hash, ht2->next); // recursive tail-call
}
// Mark the old entry as dead.
- ht1_e->value = COPIED_VALUE;
+ ht1_ent->val = COPIED_VALUE;
// Update the count if we were the one that completed the copy.
- if (old_ht2_e_value == DOES_NOT_EXIST) {
- TRACE("h0", "hti_copy_entry: key \"%s\" value %p copied to new entry", key_string->data, value);
+ if (old_ht2_ent_val == DOES_NOT_EXIST) {
+ TRACE("h0", "hti_copy_entry: key %p value %p copied to new entry", key, ht1_ent_val);
SYNC_ADD(&ht1->count, -1);
SYNC_ADD(&ht2->count, 1);
return TRUE;
}
TRACE("h0", "hti_copy_entry: lost race to install value %p in new entry; found value %p",
- value, old_ht2_e_value);
+ ht1_ent_val, old_ht2_ent_val);
return FALSE; // another thread completed the copy
}
// NOTE: the returned value matches <expected> iff the set succeeds
//
// Certain values of <expected> have special meaning. If <expected> is CAS_EXPECT_EXISTS then any
-// real value matches (i.e. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
+// real value matches (i.ent. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
// <expected> is CAS_EXPECT_WHATEVER then skip the test entirely.
//
-static uint64_t hti_cas (hashtable_i_t *hti, uint32_t key_hash, const char *key_data, uint32_t key_len,
- uint64_t expected, uint64_t new) {
- TRACE("h1", "hti_cas: hti %p key %p", hti, key_data);
+static uint64_t hti_cas (hti_t *hti, void *key, uint32_t key_hash, uint64_t expected, uint64_t new) {
+ TRACE("h1", "hti_cas: hti %p key %p", hti, key);
TRACE("h1", "hti_cas: value %p expect %p", new, expected);
assert(hti);
assert(new != DOES_NOT_EXIST && !IS_TAGGED(new));
- assert(key_data);
+ assert(key);
int is_empty;
- volatile entry_t *e = hti_lookup(hti, key_hash, key_data, key_len, &is_empty);
+ volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
// There is no room for <key>, grow the table and try again.
- if (e == NULL) {
+ if (ent == NULL) {
if (hti->next == NULL) {
hti_start_copy(hti);
}
// Install <key> in the table if it doesn't exist.
if (is_empty) {
- TRACE("h0", "hti_cas: entry %p is empty", e, 0);
+ TRACE("h0", "hti_cas: entry %p is empty", ent, 0);
if (expected != CAS_EXPECT_WHATEVER && expected != CAS_EXPECT_DOES_NOT_EXIST)
return DOES_NOT_EXIST;
if (new == TOMBSTONE)
return DOES_NOT_EXIST;
- // Allocate <key>.
- nstring_t *key = ns_alloc(key_data, key_len);
+ // Allocate <new_key>.
+ uint64_t new_key = (uint64_t)((hti->ht->clone_fun == NULL) ? key : hti->ht->clone_fun(key));
+ if (EXPECT_FALSE(hti->ht->hash_fun != NULL)) {
+ // Combine <new_key> pointer with bits from its hash
+ new_key = ((uint64_t)(key_hash >> 16) << 48) | new_key;
+ }
- // Combine <key> pointer with bits from its hash, CAS it into the table.
- uint64_t temp = ((uint64_t)(key_hash >> 16) << 48) | (uint64_t)key;
- uint64_t e_key = SYNC_CAS(&e->key, DOES_NOT_EXIST, temp);
+ // CAS the key into the table.
+ uint64_t old_ent_key = SYNC_CAS(&ent->key, DOES_NOT_EXIST, new_key);
// Retry if another thread stole the entry out from under us.
- if (e_key != DOES_NOT_EXIST) {
- TRACE("h0", "hti_cas: lost race to install key %p in entry %p", key, e);
- TRACE("h0", "hti_cas: found %p instead of NULL", GET_PTR(e_key), 0);
- nbd_free(key);
- return hti_cas(hti, key_hash, key_data, key_len, expected, new); // tail-call
+ if (old_ent_key != DOES_NOT_EXIST) {
+ TRACE("h0", "hti_cas: lost race to install key %p in entry %p", new_key, ent);
+ TRACE("h0", "hti_cas: found %p instead of NULL",
+ (hti->ht->clone_fun != NULL) ? (void *)old_ent_key : GET_PTR(old_ent_key), 0);
+ if (hti->ht->clone_fun != NULL) {
+ nbd_free(GET_PTR(new_key));
+ }
+ return hti_cas(hti, key, key_hash, expected, new); // tail-call
}
- TRACE("h2", "hti_cas: installed key %p in entry %p", key, e);
+ TRACE("h2", "hti_cas: installed key %p in entry %p", new_key, ent);
}
- TRACE("h0", "hti_cas: entry for key \"%s\" is %p", GET_PTR(e->key)->data, e);
+ TRACE("h0", "hti_cas: entry for key %p is %p",
+ (hti->ht->clone_fun != NULL) ? (void *)ent->key : GET_PTR(ent->key), ent);
// If the entry is in the middle of a copy, the copy must be completed first.
- uint64_t e_value = e->value;
- if (EXPECT_FALSE(IS_TAGGED(e_value))) {
- if (e_value != COPIED_VALUE) {
- int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next);
+ uint64_t ent_val = ent->val;
+ if (EXPECT_FALSE(IS_TAGGED(ent_val))) {
+ if (ent_val != COPIED_VALUE) {
+ int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
if (did_copy) {
SYNC_ADD(&hti->num_entries_copied, 1);
}
}
// Fail if the old value is not consistent with the caller's expectation.
- int old_existed = (e_value != TOMBSTONE && e_value != DOES_NOT_EXIST);
- if (EXPECT_FALSE(expected != CAS_EXPECT_WHATEVER && expected != e_value)) {
+ int old_existed = (ent_val != TOMBSTONE && ent_val != DOES_NOT_EXIST);
+ if (EXPECT_FALSE(expected != CAS_EXPECT_WHATEVER && expected != ent_val)) {
if (EXPECT_FALSE(expected != (old_existed ? CAS_EXPECT_EXISTS : CAS_EXPECT_DOES_NOT_EXIST))) {
TRACE("h1", "hti_cas: value %p expected by caller not found; found value %p",
- expected, e_value);
- return e_value;
+ expected, ent_val);
+ return ent_val;
}
}
// No need to update if value is unchanged.
- if ((new == TOMBSTONE && !old_existed) || e_value == new) {
+ if ((new == TOMBSTONE && !old_existed) || ent_val == new) {
TRACE("h1", "hti_cas: old value and new value were the same", 0, 0);
- return e_value;
+ return ent_val;
}
// CAS the value into the entry. Retry if it fails.
- uint64_t v = SYNC_CAS(&e->value, e_value, new);
- if (EXPECT_FALSE(v != e_value)) {
- TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", e_value, v);
- return hti_cas(hti, key_hash, key_data, key_len, expected, new); // recursive tail-call
+ uint64_t v = SYNC_CAS(&ent->val, ent_val, new);
+ if (EXPECT_FALSE(v != ent_val)) {
+ TRACE("h0", "hti_cas: value CAS failed; expected %p found %p", ent_val, v);
+ return hti_cas(hti, key, key_hash, expected, new); // recursive tail-call
}
// The set succeeded. Adjust the value count.
}
// Return the previous value.
- TRACE("h0", "hti_cas: CAS succeeded; old value %p new value %p", e_value, new);
- return e_value;
+ TRACE("h0", "hti_cas: CAS succeeded; old value %p new value %p", ent_val, new);
+ return ent_val;
}
//
-static uint64_t hti_get (hashtable_i_t *hti, uint32_t key_hash, const char *key_data, uint32_t key_len) {
- assert(key_data);
-
+static uint64_t hti_get (hti_t *hti, void *key, uint32_t key_hash) {
int is_empty;
- volatile entry_t *e = hti_lookup(hti, key_hash, key_data, key_len, &is_empty);
+ volatile entry_t *ent = hti_lookup(hti, key, key_hash, &is_empty);
// When hti_lookup() returns NULL it means we hit the reprobe limit while
// searching the table. In that case, if a copy is in progress the key
// might exist in the copy.
- if (EXPECT_FALSE(e == NULL)) {
- if (((volatile hashtable_i_t *)hti)->next != NULL)
- return hti_get(hti->next, key_hash, key_data, key_len); // recursive tail-call
+ if (EXPECT_FALSE(ent == NULL)) {
+ if (((volatile hti_t *)hti)->next != NULL)
+ return hti_get(hti->next, key, key_hash); // recursive tail-call
return DOES_NOT_EXIST;
}
return DOES_NOT_EXIST;
// If the entry is being copied, finish the copy and retry on the next table.
- uint64_t e_value = e->value;
- if (EXPECT_FALSE(IS_TAGGED(e_value))) {
- if (EXPECT_FALSE(e_value != COPIED_VALUE)) {
- int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next);
+ uint64_t ent_val = ent->val;
+ if (EXPECT_FALSE(IS_TAGGED(ent_val))) {
+ if (EXPECT_FALSE(ent_val != COPIED_VALUE)) {
+ int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next);
if (did_copy) {
SYNC_ADD(&hti->num_entries_copied, 1);
}
}
- return hti_get(((volatile hashtable_i_t *)hti)->next, key_hash, key_data, key_len); // tail-call
+ return hti_get(((volatile hti_t *)hti)->next, key, key_hash); // tail-call
}
- return (e_value == TOMBSTONE) ? DOES_NOT_EXIST : e_value;
+ return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val;
}
//
-uint64_t ht_get (hashtable_t *ht, const char *key_data, uint32_t key_len) {
- return hti_get(ht->hti, murmur32(key_data, key_len), key_data, key_len);
+uint64_t ht_get (hashtable_t *ht, void *key) {
+ uint32_t hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
+ return hti_get(ht->hti, key, hash);
}
//
-uint64_t ht_cas (hashtable_t *ht, const char *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val) {
+uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t new_val) {
- TRACE("h2", "ht_cas: key %p len %u", key_data, key_len);
+ TRACE("h2", "ht_cas: key %p ht %p", key, ht);
TRACE("h2", "ht_cas: expected val %p new val %p", expected_val, new_val);
- assert(key_data);
- assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST);
+ assert(key != DOES_NOT_EXIST);
+ assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST && new_val != TOMBSTONE);
- hashtable_i_t *hti = ht->hti;
+ hti_t *hti = ht->hti;
// Help with an ongoing copy.
if (EXPECT_FALSE(hti->next != NULL)) {
- volatile entry_t *e;
+ volatile entry_t *ent;
uint64_t limit;
int num_copied = 0;
int x = hti->scan;
// <hti->scan> might be larger than the size of the table, if some thread stalls while
// copying. In that case we just wrap around to the begining and make another pass through
// the table.
- e = hti->table + (x & MASK(hti->scale));
+ ent = hti->table + (x & MASK(hti->scale));
} else {
TRACE("h1", "ht_cas: help copy panic", 0, 0);
// scan the whole table
limit = (1 << hti->scale);
- e = hti->table;
+ ent = hti->table;
}
// Copy the entries
for (int i = 0; i < limit; ++i) {
- num_copied += hti_copy_entry(hti, e++, 0, hti->next);
- assert(e <= hti->table + (1 << hti->scale));
+ num_copied += hti_copy_entry(hti, ent++, 0, hti->next);
+ assert(ent <= hti->table + (1 << hti->scale));
}
if (num_copied != 0) {
SYNC_ADD(&hti->num_entries_copied, num_copied);
}
uint64_t old_val;
- uint32_t key_hash = murmur32(key_data, key_len);
- while ((old_val = hti_cas(hti, key_hash, key_data, key_len, expected_val, new_val))
- == COPIED_VALUE) {
+ uint32_t key_hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
+ while ((old_val = hti_cas(hti, key, key_hash, expected_val, new_val)) == COPIED_VALUE) {
assert(hti->next);
hti = hti->next;
}
return old_val == TOMBSTONE ? DOES_NOT_EXIST : old_val;
}
-// Remove the value in <ht> associated with <key_data>. Returns the value removed, or
-// DOES_NOT_EXIST if there was no value for that key.
-uint64_t ht_remove (hashtable_t *ht, const char *key_data, uint32_t key_len) {
- hashtable_i_t *hti = ht->hti;
+// Remove the value in <ht> associated with <key>. Returns the value removed, or DOES_NOT_EXIST if there was
+// no value for that key.
+uint64_t ht_remove (hashtable_t *ht, void *key) {
+ hti_t *hti = ht->hti;
uint64_t val;
- uint32_t key_hash = murmur32(key_data, key_len);
+ uint32_t key_hash = (ht->hash_fun == NULL) ? murmur32_8b((uint64_t)key) : ht->hash_fun(key);
do {
- val = hti_cas(hti, key_hash, key_data, key_len, CAS_EXPECT_WHATEVER, TOMBSTONE);
+ val = hti_cas(hti, key, key_hash, CAS_EXPECT_WHATEVER, TOMBSTONE);
if (val != COPIED_VALUE)
return val == TOMBSTONE ? DOES_NOT_EXIST : val;
assert(hti->next);
// Returns the number of key-values pairs in <ht>
uint64_t ht_count (hashtable_t *ht) {
- hashtable_i_t *hti = ht->hti;
+ hti_t *hti = ht->hti;
uint64_t count = 0;
while (hti) {
count += hti->count;
}
// Allocate and initialize a new hash table.
-hashtable_t *ht_alloc (void) {
+hashtable_t *ht_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun) {
hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
- ht->hti = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE);
+ ht->cmp_fun = cmp_fun;
+ ht->hash_fun = hash_fun;
+ ht->clone_fun = clone_fun;
+ ht->hti = (hti_t *)hti_alloc(ht, MIN_SCALE);
return ht;
}
// Free <ht> and its internal structures.
void ht_free (hashtable_t *ht) {
- hashtable_i_t *hti = ht->hti;
+ hti_t *hti = ht->hti;
do {
for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
- assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value));
- if (hti->table[i].key != DOES_NOT_EXIST) {
+ assert(hti->table[i].val == COPIED_VALUE || !IS_TAGGED(hti->table[i].val));
+ if (ht->clone_fun != NULL && hti->table[i].key != DOES_NOT_EXIST) {
nbd_free(GET_PTR(hti->table[i].key));
}
}
- hashtable_i_t *next = hti->next;
+ hti_t *next = hti->next;
nbd_free(hti);
hti = next;
} while (hti);
}
void ht_print (hashtable_t *ht) {
+ hti_t *hti = ht->hti;
+ while (hti) {
+ printf("hti:%p scale:%u count:%d copied:%d\n", hti, hti->scale, hti->count, hti->num_entries_copied);
+ for (int i = 0; i < (1 << hti->scale); ++i) {
+ volatile entry_t *ent = hti->table + i;
+ printf("[0x%x] %p:%p\n", i, (void *)ent->key, (void *)ent->val);
+ if (i > 30) {
+ printf("...\n");
+ break;
+ }
+ }
+ hti = hti->next;
+ }
}
#include "common.h"
#include "mlocal.h"
-#include "nstring.h"
#include "mem.h"
typedef struct node {
- nstring_t *key;
+ void *key;
uint64_t val;
struct node *next;
} node_t;
struct ll {
node_t *head;
+ cmp_fun_t cmp_fun;
+ clone_fun_t clone_fun;
};
static const map_impl_t ll_map_impl = {
const map_impl_t *MAP_TYPE_LIST = &ll_map_impl;
-static node_t *node_alloc (const void *key_data, uint32_t key_len, uint64_t val) {
+static node_t *node_alloc (void *key, uint64_t val) {
node_t *item = (node_t *)nbd_malloc(sizeof(node_t));
- memset(item, 0, sizeof(node_t));
- // If <key_len> is -1 it indicates <key_data> is an integer and not a pointer
- item->key = (key_len == (unsigned)-1)
- ? (void *)TAG_VALUE(key_data)
- : ns_alloc(key_data, key_len);
+ item->key = key;
item->val = val;
return item;
}
-static void node_free (node_t *item) {
- if (!IS_TAGGED(item->key)) {
- nbd_free(item->key);
- }
- nbd_free(item);
-}
-
-static void node_defer_free (node_t *item) {
- if (!IS_TAGGED(item->key)) {
- nbd_defer_free(item->key);
- }
- nbd_defer_free(item);
-}
-
-list_t *ll_alloc (void) {
+list_t *ll_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun) {
list_t *ll = (list_t *)nbd_malloc(sizeof(list_t));
- ll->head = node_alloc(" ", 0, 0);
+ ll->cmp_fun = cmp_fun;
+ ll->clone_fun = clone_fun;
+ ll->head = node_alloc(NULL, 0);
ll->head->next = NULL;
return ll;
}
return count;
}
-static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, const void *key_data, uint32_t key_len, int help_remove) {
+static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, void *key, int help_remove) {
node_t *pred = ll->head;
node_t *item = pred->next;
- TRACE("l2", "find_pred: searching for key %p in ll (head is %p)", key_data, pred);
+ TRACE("l2", "find_pred: searching for key %p in list (head is %p)", key, pred);
while (item != NULL) {
node_t *next = item->next;
TRACE("l3", "find_pred: now current item is %p next is %p", item, next);
// The thread that completes the unlink should free the memory.
- node_defer_free(other);
+ if (ll->clone_fun != NULL) {
+ nbd_defer_free((void*)other->key);
+ }
+ nbd_defer_free(other);
} else {
TRACE("l2", "find_pred: lost a race to unlink item %p from pred %p", item, pred);
TRACE("l2", "find_pred: pred's link changed to %p", other, 0);
if (IS_TAGGED(other))
- return find_pred(pred_ptr, item_ptr, ll, key_data, key_len, help_remove); // retry
+ return find_pred(pred_ptr, item_ptr, ll, key, help_remove); // retry
item = other;
if (EXPECT_FALSE(item == NULL))
break;
break;
TRACE("l3", "find_pred: visiting item %p (next is %p)", item, next);
- TRACE("l4", "find_pred: key %p val %p", STRIP_TAG(item->key), item->val);
+ TRACE("l4", "find_pred: key %p val %p", item->key, item->val);
- // A tagged key is an integer, otherwise it is a pointer to a string
int d;
- if (IS_TAGGED(item->key)) {
- d = (STRIP_TAG(item->key) - (uint64_t)key_data);
+ if (EXPECT_TRUE(ll->cmp_fun == NULL)) {
+ d = (uint64_t)item->key - (uint64_t)key;
} else {
- int item_key_len = item->key->len;
- int len = (key_len < item_key_len) ? key_len : item_key_len;
- d = memcmp(item->key->data, key_data, len);
- if (d == 0) { d = item_key_len - key_len; }
+ d = ll->cmp_fun(item->key, key);
}
// If we reached the key (or passed where it should be), we found the right predesssor
TRACE("l2", "find_pred: found matching item %p in list, pred is %p", item, pred);
return TRUE;
}
- TRACE("l2", "find_pred: found proper place for key %p in list, pred is %p", key_data, pred);
+ TRACE("l2", "find_pred: found proper place for key %p in list, pred is %p", key, pred);
return FALSE;
}
}
// Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor.
-uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len) {
- TRACE("l1", "ll_lookup: searching for key %p in list %p", key_data, ll);
+uint64_t ll_lookup (list_t *ll, void *key) {
+ TRACE("l1", "ll_lookup: searching for key %p in list %p", key, ll);
node_t *item;
- int found = find_pred(NULL, &item, ll, key_data, key_len, FALSE);
+ int found = find_pred(NULL, &item, ll, key, FALSE);
// If we found an <item> matching the key return its value.
if (found) {
return DOES_NOT_EXIST;
}
-uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expectation, uint64_t new_val) {
- TRACE("l1", "ll_cas: key %p list %p", key_data, ll);
+uint64_t ll_cas (list_t *ll, void *key, uint64_t expectation, uint64_t new_val) {
+ TRACE("l1", "ll_cas: key %p list %p", key, ll);
TRACE("l1", "ll_cas: expectation %p new value %p", expectation, new_val);
ASSERT((int64_t)new_val > 0);
do {
node_t *pred, *old_item;
- if (!find_pred(&pred, &old_item, ll, key_data, key_len, TRUE)) {
+ int found = find_pred(&pred, &old_item, ll, key, TRUE);
+ if (!found) {
// There was not an item in the list that matches the key.
if (EXPECT_FALSE((int64_t)expectation > 0 || expectation == CAS_EXPECT_EXISTS)) {
// Create a new item and insert it into the list.
TRACE("l2", "ll_cas: attempting to insert item between %p and %p", pred, pred->next);
- node_t *new_item = node_alloc(key_data, key_len, new_val);
+ void *new_key = (ll->clone_fun == NULL) ? key : ll->clone_fun(key);
+ node_t *new_item = node_alloc(new_key, new_val);
node_t *next = new_item->next = old_item;
node_t *other = SYNC_CAS(&pred->next, next, new_item);
if (other == next) {
// Lost a race. Failed to insert the new item into the list.
TRACE("l1", "ll_cas: lost a race. CAS failed. expected pred's link to be %p but found %p", next, other);
- node_free(new_item);
+ if (ll->clone_fun != NULL) {
+ nbd_free(new_key);
+ }
+ nbd_free(new_item);
continue; // retry
}
} while (1);
}
-uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len) {
- TRACE("l1", "ll_remove: removing item with key %p from list %p", key_data, ll);
+uint64_t ll_remove (list_t *ll, void *key) {
+ TRACE("l1", "ll_remove: removing item with key %p from list %p", key, ll);
node_t *pred;
node_t *item;
- int found = find_pred(&pred, &item, ll, key_data, key_len, TRUE);
+ int found = find_pred(&pred, &item, ll, key, TRUE);
if (!found) {
TRACE("l1", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0);
return DOES_NOT_EXIST;
}
// The thread that completes the unlink should free the memory.
- node_defer_free(item);
+ if (ll->clone_fun != NULL) {
+ nbd_defer_free(item->key);
+ }
+ nbd_defer_free(item);
TRACE("l1", "ll_remove: successfully unlinked item %p from the list", item, 0);
return val;
}
void ll_print (list_t *ll) {
node_t *item;
item = ll->head->next;
+ int i = 0;
while (item) {
node_t *next = item->next;
if (IS_TAGGED(item)) {
printf("*");
}
- printf("%p:", item);
- if (IS_TAGGED(item->key)) {
- printf("0x%llx ", STRIP_TAG(item->key));
- } else {
- printf("%s ", (char *)item->key->data);
- }
+ printf("%p:%p ", item, item->key);
fflush(stdout);
item = (node_t *)STRIP_TAG(next);
+ if (i++ > 30) {
+ printf("...");
+ break;
+ }
}
printf("\n");
}
void *data;
};
-map_t *map_alloc (map_type_t map_type) {
+map_t *map_alloc (map_type_t map_type, cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun) {
const map_impl_t *map_impl = map_type;
map_t *map = nbd_malloc(sizeof(map_t));
map->impl = map_impl;
- map->data = map->impl->alloc();
+ map->data = map->impl->alloc(cmp_fun, hash_fun, clone_fun);
return map;
}
return map->impl->count(map->data);
}
-uint64_t map_get (map_t *map, const void *key_data, uint32_t key_len) {
- return map->impl->get(map->data, key_data, key_len);
+uint64_t map_get (map_t *map, void *key) {
+ return map->impl->get(map->data, key);
}
-uint64_t map_set (map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val) {
- return map->impl->cas(map->data, key_data, key_len, CAS_EXPECT_WHATEVER, new_val);
+uint64_t map_set (map_t *map, void *key, uint64_t new_val) {
+ return map->impl->cas(map->data, key, CAS_EXPECT_WHATEVER, new_val);
}
-uint64_t map_add (map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val) {
- return map->impl->cas(map->data, key_data, key_len, CAS_EXPECT_DOES_NOT_EXIST, new_val);
+uint64_t map_add (map_t *map, void *key, uint64_t new_val) {
+ return map->impl->cas(map->data, key, CAS_EXPECT_DOES_NOT_EXIST, new_val);
}
-uint64_t map_cas (map_t *map, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val) {
- return map->impl->cas(map->data, key_data, key_len, expected_val, new_val);
+uint64_t map_cas (map_t *map, void *key, uint64_t expected_val, uint64_t new_val) {
+ return map->impl->cas(map->data, key, expected_val, new_val);
}
-uint64_t map_replace(map_t *map, const void *key_data, uint32_t key_len, uint64_t new_val) {
- return map->impl->cas(map->data, key_data, key_len, CAS_EXPECT_EXISTS, new_val);
+uint64_t map_replace(map_t *map, void *key, uint64_t new_val) {
+ return map->impl->cas(map->data, key, CAS_EXPECT_EXISTS, new_val);
}
-uint64_t map_remove (map_t *map, const void *key_data, uint32_t key_len) {
- return map->impl->remove(map->data, key_data, key_len);
+uint64_t map_remove (map_t *map, void *key) {
+ return map->impl->remove(map->data, key);
}
#ifndef MLOCAL_H
#define MLOCAL_H
+#include "map.h"
+
#define CAS_EXPECT_DOES_NOT_EXIST ( 0)
#define CAS_EXPECT_EXISTS (-1)
#define CAS_EXPECT_WHATEVER (-2)
-typedef void * (*map_alloc_t) (void);
-typedef uint64_t (*map_cas_t) (void *, const void *, uint32_t, uint64_t, uint64_t);
-typedef uint64_t (*map_get_t) (void *, const void *, uint32_t);
-typedef uint64_t (*map_remove_t) (void *, const void *, uint32_t);
+typedef void * (*map_alloc_t) (cmp_fun_t, hash_fun_t, clone_fun_t);
+typedef uint64_t (*map_cas_t) (void *, void *, uint64_t, uint64_t);
+typedef uint64_t (*map_get_t) (void *, void *);
+typedef uint64_t (*map_remove_t) (void *, void *);
typedef uint64_t (*map_count_t) (void *);
typedef void (*map_print_t) (void *);
typedef void (*map_free_t) (void *);
typedef struct sl skiplist_t;
typedef struct ll list_t;
-hashtable_t * ht_alloc (void);
-skiplist_t * sl_alloc (void);
-list_t * ll_alloc (void);
+hashtable_t * ht_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun);
+skiplist_t * sl_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun);
+list_t * ll_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun);
-uint64_t ht_cas (hashtable_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val);
-uint64_t ht_get (hashtable_t *ht, const char *key, uint32_t len);
-uint64_t ht_remove (hashtable_t *ht, const char *key, uint32_t len);
+uint64_t ht_cas (hashtable_t *ht, void *key, uint64_t expected_val, uint64_t val);
+uint64_t ht_get (hashtable_t *ht, void *key);
+uint64_t ht_remove (hashtable_t *ht, void *key);
uint64_t ht_count (hashtable_t *ht);
void ht_print (hashtable_t *ht);
void ht_free (hashtable_t *ht);
-uint64_t sl_cas (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val);
-uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len);
-uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len);
+uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expected_val, uint64_t new_val);
+uint64_t sl_lookup (skiplist_t *sl, void *key);
+uint64_t sl_remove (skiplist_t *sl, void *key);
uint64_t sl_count (skiplist_t *sl);
void sl_print (skiplist_t *sl);
void sl_free (skiplist_t *sl);
-uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val);
-uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len);
-uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len);
+uint64_t ll_cas (list_t *ll, void *key, uint64_t expected_val, uint64_t new_val);
+uint64_t ll_lookup (list_t *ll, void *key);
+uint64_t ll_remove (list_t *ll, void *key);
uint64_t ll_count (list_t *ll);
void ll_print (list_t *ll);
void ll_free (list_t *ll);
+++ /dev/null
-#include "common.h"
-#include "nstring.h"
-#include "mem.h"
-
-nstring_t *ns_alloc (const void *data, uint32_t len) {
- nstring_t *ns = nbd_malloc(sizeof(nstring_t) + len);
- ns->len = len;
- memcpy(ns->data, data, len);
- return ns;
-}
-
-int ns_cmp_raw (nstring_t *ns, const void *data, uint32_t len) {
- int d = memcmp(ns->data, data, (len < ns->len) ? len : ns->len);
- return (d == 0) ? ns->len - len : d;
-}
+++ /dev/null
-#ifndef NSTRING_H
-#define NSTRING_H
-
-typedef struct nstring {
- uint32_t len;
- char data[];
-} nstring_t;
-
-nstring_t * ns_alloc (const void *data, uint32_t len);
-int ns_cmp_raw (nstring_t *ns, const void *data, uint32_t len);
-
-#endif//NSTRING_H
#include <stdio.h>
#include <string.h>
+#define ENABLE_TRACE
+
#include "common.h"
#include "runtime.h"
#include "mlocal.h"
-#include "nstring.h"
#include "mem.h"
#include "tls.h"
#define MAX_LEVEL 31
typedef struct node {
- nstring_t *key;
+ void *key;
uint64_t val;
int top_level;
struct node *next[];
struct sl {
node_t *head;
+ cmp_fun_t cmp_fun;
+ clone_fun_t clone_fun;
};
static const map_impl_t sl_map_impl = {
return n;
}
-static node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t val) {
+static node_t *node_alloc (int level, void *key, uint64_t val) {
assert(level >= 0 && level <= MAX_LEVEL);
size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
node_t *item = (node_t *)nbd_malloc(sz);
memset(item, 0, sz);
- // If <key_len> is -1 it indicates <key_data> is an integer and not a pointer
- item->key = (key_len == (unsigned)-1)
- ? (void *)TAG_VALUE(key_data)
- : ns_alloc(key_data, key_len);
+ item->key = key;
item->val = val;
item->top_level = level;
return item;
}
-static void node_free (node_t *item) {
- if (!IS_TAGGED(item->key)) {
- nbd_free(item->key);
- }
- nbd_free(item);
-}
-
-static void node_defer_free (node_t *item) {
- if (!IS_TAGGED(item->key)) {
- nbd_defer_free(item->key);
- }
- nbd_defer_free(item);
-}
-
-skiplist_t *sl_alloc (void) {
+skiplist_t *sl_alloc (cmp_fun_t cmp_fun, hash_fun_t hash_fun, clone_fun_t clone_fun) {
skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
- sl->head = node_alloc(MAX_LEVEL, " ", 0, 0);
+ sl->cmp_fun = cmp_fun;
+ sl->clone_fun = clone_fun;
+ sl->head = node_alloc(MAX_LEVEL, NULL, 0);
memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
return sl;
}
uint64_t count = 0;
node_t *item = sl->head->next[0];
while (item) {
- count++;
+ if (!IS_TAGGED(item->next[0])) {
+ count++;
+ }
item = (node_t *)STRIP_TAG(item->next[0]);
}
return count;
}
-static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, const void *key_data, uint32_t key_len, int help_remove) {
+static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, void *key, int help_remove) {
node_t *pred = sl->head;
node_t *item = NULL;
- TRACE("s2", "find_preds: searching for key %p in sl (head is %p)", key_data, pred);
+ TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
int d;
int start_level = MAX_LEVEL;
#if MAX_LEVEL > 2
item = pred->next[level];
if (EXPECT_FALSE(IS_TAGGED(item))) {
TRACE("s2", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
- return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
+ return find_preds(preds, succs, n, sl, key, help_remove); // retry
}
while (item != NULL) {
node_t *next = item->next[level];
TRACE("s3", "find_preds: now the current item is %p next is %p", item, next);
// The thread that completes the unlink should free the memory.
- if (level == 0) { node_defer_free(other); }
+ if (level == 0) {
+ if (sl->clone_fun != NULL) {
+ nbd_defer_free((void*)other->key);
+ }
+ nbd_defer_free(other);
+ }
} else {
- TRACE("s2", "find_preds: lost race to unlink item %p from pred %p", item, pred);
- TRACE("s2", "find_preds: pred's link changed to %p", other, 0);
+ TRACE("s3", "find_preds: lost race to unlink item %p from pred %p", item, pred);
+ TRACE("s3", "find_preds: pred's link changed to %p", other, 0);
if (IS_TAGGED(other))
- return find_preds(preds, succs, n, sl, key_data, key_len, help_remove); // retry
+ return find_preds(preds, succs, n, sl, key, help_remove); // retry
item = other;
if (EXPECT_FALSE(item == NULL))
break;
if (EXPECT_FALSE(item == NULL))
break;
- TRACE("s3", "find_preds: visiting item %p (next is %p)", item, next);
+ TRACE("s4", "find_preds: visiting item %p (next is %p)", item, next);
TRACE("s4", "find_preds: key %p val %p", STRIP_TAG(item->key), item->val);
- // A tagged key is an integer, otherwise it is a pointer to a string
- if (IS_TAGGED(item->key)) {
- d = (STRIP_TAG(item->key) - (uint64_t)key_data);
+ if (EXPECT_TRUE(sl->cmp_fun == NULL)) {
+ d = (uint64_t)item->key - (uint64_t)key;
} else {
- int item_key_len = item->key->len;
- int len = (key_len < item_key_len) ? key_len : item_key_len;
- d = memcmp(item->key->data, key_data, len);
- if (d == 0) { d = item_key_len - key_len; }
+ d = sl->cmp_fun(item->key, key);
}
if (d >= 0) {
- TRACE("s2", "find_preds: found pred %p item %p", pred, item);
+ TRACE("s4", "find_preds: found pred %p item %p", pred, item);
break;
}
}
}
- // fill in empty levels
- if (n == -1 && item != NULL) {
- for (int level = start_level + 1; level <= item->top_level; ++level) {
- preds[level] = sl->head;
- }
- }
-
+ // fill in empty levels
+ if (n == -1 && item != NULL) {
+ for (int level = start_level + 1; level <= item->top_level; ++level) {
+ preds[level] = sl->head;
+ }
+ }
+
if (d == 0) {
TRACE("s2", "find_preds: found matching item %p in skiplist, pred is %p", item, pred);
return item;
}
- TRACE("s2", "find_preds: found proper place for key %p in skiplist, pred is %p. returning null", key_data, pred);
+ TRACE("s2", "find_preds: found proper place for key %p in skiplist, pred is %p. returning null", key, pred);
return NULL;
}
// Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
-uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) {
- TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key_data, sl);
- node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE);
+uint64_t sl_lookup (skiplist_t *sl, void *key) {
+ TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl);
+ node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE);
// If we found an <item> matching the <key> return its value.
if (item != NULL) {
return DOES_NOT_EXIST;
}
-uint64_t sl_cas (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t expectation, uint64_t new_val) {
- TRACE("s1", "sl_cas: key %p skiplist %p", key_data, sl);
+uint64_t sl_cas (skiplist_t *sl, void *key, uint64_t expectation, uint64_t new_val) {
+ TRACE("s1", "sl_cas: key %p skiplist %p", key, sl);
TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val);
ASSERT((int64_t)new_val > 0);
node_t *new_item = NULL;
int n = random_level();
do {
- node_t *old_item = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
+ node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE);
if (old_item == NULL) {
// There was not an item in the skiplist that matches the key.
// First insert <new_item> into the bottom level.
TRACE("s3", "sl_cas: attempting to insert item between %p and %p", preds[0], nexts[0]);
- new_item = node_alloc(n, key_data, key_len, new_val);
+ void *new_key = (sl->clone_fun == NULL) ? key : sl->clone_fun(key);
+ new_item = node_alloc(n, new_key, new_val);
node_t *pred = preds[0];
node_t *next = new_item->next[0] = nexts[0];
for (int level = 1; level <= new_item->top_level; ++level) {
break; // success
}
TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other);
- node_free(new_item);
+ if (sl->clone_fun != NULL) {
+ nbd_free(new_key);
+ }
+ nbd_free(new_item);
continue;
}
break; // success
}
TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other);
- find_preds(preds, nexts, new_item->top_level, sl, key_data, key_len, TRUE);
+ find_preds(preds, nexts, new_item->top_level, sl, key, TRUE);
pred = preds[level];
next = nexts[level];
return DOES_NOT_EXIST; // success
}
-uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) {
- TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key_data, sl);
+uint64_t sl_remove (skiplist_t *sl, void *key) {
+ TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl);
node_t *preds[MAX_LEVEL+1];
- node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE);
+ node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE);
if (item == NULL) {
TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
return DOES_NOT_EXIST;
}
- // Mark <item> removed at each level of <sl> from the top down. This must be atomic. If multiple threads
- // try to remove the same item only one of them should succeed. Marking the bottom level establishes which of
- // them succeeds.
- for (int level = item->top_level; level >= 0; --level) {
- if (EXPECT_FALSE(IS_TAGGED(item->next[level]))) {
- TRACE("s3", "sl_remove: %p is already marked for removal by another thread", item, 0);
- if (level == 0)
- return DOES_NOT_EXIST;
- continue;
- }
+ // Mark and unlink <item> at each level of <sl> from the top down. If multiple threads try to concurrently remove
+ // the same item only one of them should succeed. Marking the bottom level establishes which of them succeeds.
+ for (int level = item->top_level; level > 0; --level) {
node_t *next;
node_t *old_next = item->next[level];
do {
next = old_next;
old_next = SYNC_CAS(&item->next[level], next, TAG_VALUE(next));
if (IS_TAGGED(old_next)) {
- TRACE("s2", "sl_remove: lost race -- %p is already marked for removal by another thread", item, 0);
- if (level == 0)
- return DOES_NOT_EXIST;
+ TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level %llu", item, level);
break;
}
} while (next != old_next);
- }
-
- // This has to be an atomic swap in case another thread is updating the item while we are removing it.
- uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
- TRACE("s2", "sl_remove: replaced item's val %p with DOES_NOT_EXIT", val, 0);
- // Unlink <item> from <sl>. If we lose a race to another thread just back off. It is safe to leave the
- // item partially unlinked for a later call (or some other thread) to physically unlink. By marking the
- // item earlier, we logically removed it.
- int level = item->top_level;
- while (level >= 0) {
node_t *pred = preds[level];
- node_t *next = item->next[level];
- TRACE("s2", "sl_remove: unlink the item by linking its pred %p to it's successor %p", pred, STRIP_TAG(next));
+ TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_TAG(next));
node_t *other = NULL;
if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
TRACE("s1", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
- return val;
+ // If our former predecessor now points past us we know another thread unlinked us. Otherwise, we need
+ // to search for a new set of preds.
+ if (other == NULL)
+ continue; // <pred> points past <item> to the end of the list; go on to the next level.
+
+ int d = -1;
+ if (!IS_TAGGED(other)) {
+ if (EXPECT_TRUE(sl->cmp_fun == NULL)) {
+ d = (uint64_t)item->key - (uint64_t)other->key;
+ } else {
+ d = sl->cmp_fun(item->key, other->key);
+ }
+ }
+ if (d > 0) {
+ node_t *temp = find_preds(preds, NULL, level, sl, key, TRUE);
+ if (temp != item)
+ return DOES_NOT_EXIST; // Another thread removed the item we were targeting.
+ level++; // Redo this level.
+ }
}
- --level;
}
- // The thread that completes the unlink should free the memory.
- TRACE("s1", "sl_remove: successfully unlinked item %p from the skiplist", item, 0);
- node_defer_free(item);
+ node_t *next;
+ node_t *old_next = item->next[0];
+ do {
+ next = old_next;
+ old_next = SYNC_CAS(&item->next[0], next, TAG_VALUE(next));
+ if (IS_TAGGED(old_next)) {
+ TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level 0", item, 0);
+ return DOES_NOT_EXIST;
+ }
+ } while (next != old_next);
+ TRACE("s1", "sl_remove: marked item %p removed at level 0", item, 0);
+
+ // Atomically swap out the item's value in case another thread is updating the item while we are
+ // removing it. This establishes which one occurs first, the update or the remove.
+ uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST);
+ TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0);
+
+ node_t *pred = preds[0];
+ TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_TAG(next));
+ if (SYNC_CAS(&pred->next[0], item, STRIP_TAG(next))) {
+ TRACE("s2", "sl_remove: unlinked item %p from the skiplist at level 0", item, 0);
+ // The thread that completes the unlink should free the memory.
+ if (sl->clone_fun != NULL) {
+ nbd_defer_free(item->key);
+ }
+ nbd_defer_free(item);
+ }
return val;
}
if (item->next[level] == NULL)
continue;
printf("(%d) ", level);
+ int i = 0;
while (item) {
node_t *next = item->next[level];
printf("%s%p ", IS_TAGGED(next) ? "*" : "", item);
item = (node_t *)STRIP_TAG(next);
+ if (i++ > 30) {
+ printf("...");
+ break;
+ }
}
printf("\n");
fflush(stdout);
}
- printf("\n");
node_t *item = sl->head;
+ int i = 0;
while (item) {
int is_marked = IS_TAGGED(item->next[0]);
-
- if (IS_TAGGED(item->key)) {
- printf("%s%p:%llx ", is_marked ? "*" : "", item, STRIP_TAG(item->key));
- } else {
- printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)item->key->data);
- }
+ printf("%s%p:%p ", is_marked ? "*" : "", item, item->key);
if (item != sl->head) {
printf("[%d]", item->top_level);
} else {
- printf("[*]");
+ printf("[HEAD]");
}
for (int level = 1; level <= item->top_level; ++level) {
node_t *next = (node_t *)STRIP_TAG(item->next[level]);
printf("\n");
fflush(stdout);
item = (node_t *)STRIP_TAG(item->next[0]);
+ if (i++ > 30) {
+ printf("...\n");
+ break;
+ }
}
}
--- /dev/null
+#include "common.h"
+#include "nstring.h"
+#include "murmur.h"
+#include "mem.h"
+
+nstring_t *ns_alloc (uint32_t len) {
+ nstring_t *ns = nbd_malloc(sizeof(nstring_t) + len);
+ ns->len = len;
+ return ns;
+}
+
+int ns_cmp (const nstring_t *ns1, const nstring_t *ns2) {
+ int d = memcmp(ns1->data, ns2->data, (ns1->len < ns2->len) ? ns1->len : ns1->len);
+ return (d == 0) ? ns1->len - ns2->len : d;
+}
+
+uint32_t ns_hash (const nstring_t *ns) {
+ return murmur32(ns->data, ns->len);
+}
+
+nstring_t *ns_dup (const nstring_t *ns1) {
+ nstring_t *ns2 = ns_alloc(ns1->len);
+ memcpy(ns2->data, ns1->data, ns1->len);
+ return ns2;
+}
tc->failed = 1;
tc->message = string->buffer;
-#ifdef ENABLE_TRACE
+ extern void lwt_halt(void);
extern void lwt_dump(const char *);
lwt_dump(tc->name);
-#endif
if (tc->jumpBuf != 0) longjmp(*(tc->jumpBuf), 0);
}
#include <sys/time.h>
#include "common.h"
+#include "nstring.h"
#include "runtime.h"
#include "map.h"
#define NUM_ITERATIONS 10000000
+//#define TEST_STRING_KEYS
+
static volatile int wait_;
static long num_threads_;
static map_t *map_;
SYNC_ADD(&wait_, -1);
do {} while (wait_);
+#ifdef TEST_STRING_KEYS
+ nstring_t *key_str = ns_alloc(10);
+#endif
+
for (int i = 0; i < NUM_ITERATIONS/num_threads_; ++i) {
unsigned r = nbd_rand();
uint64_t key = r & 0xF;
-#if 1
- char key_str[10];
- sprintf(key_str, "%llX", key);
+#ifdef TEST_STRING_KEYS
+ key_str->len = sprintf(key_str->data, "%llX", key) + 1;
+ assert(key_str->len <= 10);
if (r & (1 << 8)) {
- map_set(map_, key_str, strlen(key_str) + 1, 1);
+ map_set(map_, key_str, 1);
} else {
- map_remove(map_, key_str, strlen(key_str) + 1);
+ map_remove(map_, key_str);
}
#else
if (r & (1 << 8)) {
- map_set(map_, (void *)key, -1, 1);
+ map_set(map_, (void *)(key + 1), 1);
} else {
- map_remove(map_, (void *)key, -1);
+ map_remove(map_, (void *)(key + 1));
}
#endif
int main (int argc, char **argv) {
nbd_init();
- //lwt_set_trace_level("l3");
+ lwt_set_trace_level("l3");
char* program_name = argv[0];
pthread_t thread[MAX_NUM_THREADS];
map_type_t map_types[] = { MAP_TYPE_LIST, MAP_TYPE_SKIPLIST, MAP_TYPE_HASHTABLE };
for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) {
- map_ = map_alloc(map_types[i]);
+#ifdef TEST_STRING_KEYS
+ map_ = map_alloc(map_types[i], (cmp_fun_t)ns_cmp, (hash_fun_t)ns_hash, (clone_fun_t)ns_dup);
+#else
+ map_ = map_alloc(map_types[i], NULL, NULL, NULL);
+#endif
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
gettimeofday(&tv2, NULL);
int ms = (int)(1000000*(tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec - tv1.tv_usec) / 1000;
map_print(map_);
- printf("Th:%ld Time:%dms\n", num_threads_, ms);
+ printf("Th:%ld Time:%dms\n\n", num_threads_, ms);
fflush(stdout);
}
*/
#include <stdio.h>
#include <pthread.h>
+#include <sys/time.h>
#include "CuTest.h"
int id;
CuTest *tc;
map_t *map;
- int *wait;
+ volatile int *wait;
} worker_data_t;
static map_type_t map_type_;
// Test some basic stuff; add a few keys, remove a few keys
-void basic_test (CuTest* tc) {
+void simple (CuTest* tc) {
- map_t *map = map_alloc(map_type_);
+ map_t *map = map_alloc(map_type_, NULL, NULL, NULL);
ASSERT_EQUAL( 0, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map,"a",2,10) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map, (void *)'a',10) );
ASSERT_EQUAL( 1, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map,"b",2,20) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map, (void *)'b',20) );
ASSERT_EQUAL( 2, map_count(map) );
- ASSERT_EQUAL( 20, map_get(map,"b",2) );
- ASSERT_EQUAL( 10, map_set(map,"a",2,11) );
- ASSERT_EQUAL( 20, map_set(map,"b",2,21) );
+ ASSERT_EQUAL( 20, map_get(map, (void *)'b') );
+ ASSERT_EQUAL( 10, map_set(map, (void *)'a',11) );
+ ASSERT_EQUAL( 20, map_set(map, (void *)'b',21) );
ASSERT_EQUAL( 2, map_count(map) );
- ASSERT_EQUAL( 21, map_add(map,"b",2,22) );
- ASSERT_EQUAL( 11, map_remove(map,"a",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"a",2) );
+ ASSERT_EQUAL( 21, map_add(map, (void *)'b',22) );
+ ASSERT_EQUAL( 11, map_remove(map, (void *)'a') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'a') );
ASSERT_EQUAL( 1, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map,"a",2) );
- ASSERT_EQUAL( 21, map_remove(map,"b",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map, (void *)'a') );
+ ASSERT_EQUAL( 21, map_remove(map, (void *)'b') );
ASSERT_EQUAL( 0, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map,"b",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map,"c",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map, (void *)'b') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_remove(map, (void *)'c') );
ASSERT_EQUAL( 0, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map,"d",2,40) );
- ASSERT_EQUAL( 40, map_get(map,"d",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_add(map, (void *)'d',40) );
+ ASSERT_EQUAL( 40, map_get(map, (void *)'d') );
ASSERT_EQUAL( 1, map_count(map) );
- ASSERT_EQUAL( 40, map_remove(map,"d",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"d",2) );
+ ASSERT_EQUAL( 40, map_remove(map, (void *)'d') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'d') );
ASSERT_EQUAL( 0, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_replace(map,"d",2,10) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"d",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_set(map,"d",2,40) );
- ASSERT_EQUAL( 40, map_replace(map,"d",2,41) );
- ASSERT_EQUAL( 41, map_get(map,"d",2) );
- ASSERT_EQUAL( 41, map_remove(map,"d",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"d",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_replace(map, (void *)'d',10) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'d') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_set(map, (void *)'d',40) );
+ ASSERT_EQUAL( 40, map_replace(map, (void *)'d',41) );
+ ASSERT_EQUAL( 41, map_get(map, (void *)'d') );
+ ASSERT_EQUAL( 41, map_remove(map, (void *)'d') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'d') );
ASSERT_EQUAL( 0, map_count(map) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_replace(map,"b",2,20) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"b",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_replace(map, (void *)'b',20) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'b') );
// In the end, all entries should be removed
- ASSERT_EQUAL( DOES_NOT_EXIST, map_set(map,"b",2,20) );
- ASSERT_EQUAL( 20, map_replace(map,"b",2,21) );
- ASSERT_EQUAL( 21, map_get(map,"b",2) );
- ASSERT_EQUAL( 21, map_remove(map,"b",2) );
- ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map,"b",2) );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_set(map, (void *)'b',20) );
+ ASSERT_EQUAL( 20, map_replace(map, (void *)'b',21) );
+ ASSERT_EQUAL( 21, map_get(map, (void *)'b') );
+ ASSERT_EQUAL( 21, map_remove(map, (void *)'b') );
+ ASSERT_EQUAL( DOES_NOT_EXIST, map_get(map, (void *)'b') );
ASSERT_EQUAL( 0, map_count(map) );
map_free(map);
rcu_update();
}
-void *simple_worker (void *arg) {
+void *add_remove_worker (void *arg) {
worker_data_t *wd = (worker_data_t *)arg;
map_t *map = wd->map;
CuTest* tc = wd->tc;
uint64_t d = wd->id;
- int iters = map_type_ == MAP_TYPE_LIST ? 100 : 1000000;
+ int iters = (map_type_ == MAP_TYPE_LIST) ? 5000 : 500000;
SYNC_ADD(wd->wait, -1);
- do { } while (*((volatile worker_data_t *)wd)->wait); // wait for all workers to be ready
+ do { } while (*wd->wait); // wait for all workers to be ready
for (int j = 0; j < 10; ++j) {
- for (int i = d; i < iters; i+=2) {
- char key[10];
- sprintf(key, "k%u", i);
+ for (uint64_t i = d+1; i < iters; i+=2) {
TRACE("t0", "test map_add() iteration (%llu, %llu)", j, i);
- CuAssertIntEquals_Msg(tc, key, DOES_NOT_EXIST, map_add(map, key, strlen(key)+1, d+1) );
+ CuAssertIntEquals_Msg(tc, (void *)i, DOES_NOT_EXIST, map_add(map, (void *)i, d+1) );
rcu_update();
}
- for (int i = d; i < iters; i+=2) {
- char key[10];
- sprintf(key, "k%u", i);
+ for (uint64_t i = d+1; i < iters; i+=2) {
TRACE("t0", "test map_remove() iteration (%llu, %llu)", j, i);
- CuAssertIntEquals_Msg(tc, key, d+1, map_remove(map, key, strlen(key)+1) );
+ CuAssertIntEquals_Msg(tc, (void *)i, d+1, map_remove(map, (void *)i) );
rcu_update();
}
}
}
// Do some simple concurrent testing
-void simple_add_remove (CuTest* tc) {
+void add_remove (CuTest* tc) {
pthread_t thread[2];
worker_data_t wd[2];
- int wait = 2;
- map_t *map = map_alloc(map_type_);
+ volatile int wait = 2;
+ map_t *map = map_alloc(map_type_, NULL, NULL, NULL);
+
+ struct timeval tv1, tv2;
+ gettimeofday(&tv1, NULL);
// In 2 threads, add & remove even & odd elements concurrently
int i;
wd[i].tc = tc;
wd[i].map = map;
wd[i].wait = &wait;
- int rc = nbd_thread_create(thread + i, i, simple_worker, wd + i);
+ int rc = nbd_thread_create(thread + i, i, add_remove_worker, wd + i);
if (rc != 0) { perror("nbd_thread_create"); return; }
}
+
for (i = 0; i < 2; ++i) {
pthread_join(thread[i], NULL);
}
+ gettimeofday(&tv2, NULL);
+ int ms = (int)(1000000*(tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec - tv1.tv_usec) / 1000;
+ map_print(map);
+ printf("Th:%d Time:%dms\n", 2, ms);
+ fflush(stdout);
+
// In the end, all members should be removed
ASSERT_EQUAL( 0, map_count(map) );
map_free(map);
}
-
void *inserter_worker (void *arg) {
//pthread_t thread[NUM_THREADS];
int main (void) {
nbd_init();
- //lwt_set_trace_level("h0");
+ lwt_set_trace_level("h3");
map_type_t map_types[] = { MAP_TYPE_LIST, MAP_TYPE_SKIPLIST, MAP_TYPE_HASHTABLE };
for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) {
CuString *output = CuStringNew();
CuSuite* suite = CuSuiteNew();
- SUITE_ADD_TEST(suite, basic_test);
- SUITE_ADD_TEST(suite, simple_add_remove);
+ SUITE_ADD_TEST(suite, simple);
+ SUITE_ADD_TEST(suite, add_remove);
CuSuiteRun(suite);
CuSuiteDetails(suite, output);
#define ASSERT_EQUAL(x, y) CuAssertIntEquals(tc, x, y)
void test1 (CuTest* tc) {
- map_t *map = map_alloc(MAP_TYPE_LIST);
+ map_t *map = map_alloc(MAP_TYPE_LIST, NULL, NULL, NULL);
txn_t *t1 = txn_begin(TXN_READ_WRITE, TXN_REPEATABLE_READ, map);
txn_t *t2 = txn_begin(TXN_READ_WRITE, TXN_REPEATABLE_READ, map);
- tm_set(t1, "abc", 4, 2);
- tm_set(t1, "abc", 4, 3);
- ASSERT_EQUAL( DOES_NOT_EXIST, tm_get(t2, "abc", 4) );
- tm_set(t2, "abc", 4, 4);
- ASSERT_EQUAL( 3, tm_get(t1, "abc", 4) );
- ASSERT_EQUAL( 4, tm_get(t2, "abc", 4) );
+ tm_set(t1, "abc", 2);
+ tm_set(t1, "abc", 3);
+ ASSERT_EQUAL( DOES_NOT_EXIST, tm_get(t2, "abc") );
+ tm_set(t2, "abc", 4);
+ ASSERT_EQUAL( 3, tm_get(t1, "abc") );
+ ASSERT_EQUAL( 4, tm_get(t2, "abc") );
ASSERT_EQUAL( TXN_VALIDATED, txn_commit(t2));
ASSERT_EQUAL( TXN_ABORTED, txn_commit(t1));
}
+ optimize integer keys
- ht_print()
- iterators
+- characterize performance of data structures
+- experiment with the performance impact of not passing the hash between functions
+- experiment with embedding key is the list/skiplist nodes
};
typedef struct write_rec {
- const char *key;
+ void *key;
update_rec_t *rec;
} write_rec_t;
// Validate the updates for <key>. Validation fails for a key we have written to if there is a
// write committed newer than our read version.
-static txn_state_e tm_validate_key (txn_t *txn, const char *key, uint32_t key_len) {
+static txn_state_e tm_validate_key (txn_t *txn, void *key) {
- update_rec_t *update = (update_rec_t *) map_get(txn->map, key, key_len);
+ update_rec_t *update = (update_rec_t *) map_get(txn->map, key);
for (; update != NULL; update = update->prev) {
uint64_t writer_version = update->version;
if (writer_version <= txn->rv)
}
for (i = 0; i < txn->writes_count; ++i) {
- txn_state_e s = tm_validate_key(txn, txn->writes[i].key, strlen(txn->writes[i].key));
+ txn_state_e s = tm_validate_key(txn, txn->writes[i].key);
if (s == TXN_ABORTED) {
txn->state = TXN_ABORTED;
break;
}
// Get most recent committed version prior to our read version.
-uint64_t tm_get (txn_t *txn, const char *key, uint32_t key_len) {
+uint64_t tm_get (txn_t *txn, void *key) {
// Iterate through update records associated with <key> to find the latest committed version.
// We can use the first matching version. Older updates always come later in the list.
- update_rec_t *update = (update_rec_t *) map_get(txn->map, key, key_len);
+ update_rec_t *update = (update_rec_t *) map_get(txn->map, key);
for (; update != NULL; update = update->prev) {
uint64_t writer_version = update->version;
if (writer_version < txn->rv)
return DOES_NOT_EXIST;
}
-void tm_set (txn_t *txn, const char *key, uint32_t key_len, uint64_t value) {
+void tm_set (txn_t *txn, void *key, uint64_t value) {
// create a new update record
update_rec_t *update = alloc_update_rec();
// push the new update record onto <key>'s update list
uint64_t update_prev;
do {
- update->prev = (update_rec_t *) map_get(txn->map, key, key_len);
+ update->prev = (update_rec_t *) map_get(txn->map, key);
update_prev = (uint64_t)update->prev;
- } while (map_cas(txn->map, key, key_len, update_prev, (uint64_t)update) != update_prev);
+ } while (map_cas(txn->map, key, update_prev, (uint64_t)update) != update_prev);
// add <key> to the write set for commit-time validation
if (txn->writes_count == txn->writes_size) {