#define DOES_NOT_EXIST 0
-#define HT_EXPECT_NOT_EXISTS ( 0)
-#define HT_EXPECT_EXISTS (-1)
-#define HT_EXPECT_WHATEVER (-2)
+#define EXPECT_DOES_NOT_EXIST ( 0)
+#define EXPECT_EXISTS (-1)
+#define EXPECT_WHATEVER (-2)
-typedef struct hash_table_i *hash_table_t;
+typedef struct hti *hashtable_t;
-hash_table_t *ht_alloc (void);
-void ht_free (hash_table_t *ht);
-uint64_t ht_get (hash_table_t *ht, const char *key, uint32_t len);
-uint64_t ht_compare_and_set (hash_table_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val);
-uint64_t ht_remove (hash_table_t *ht, const char *key, uint32_t len);
-uint64_t ht_count (hash_table_t *ht);
+hashtable_t *ht_alloc (void);
+void ht_free (hashtable_t *ht);
+uint64_t ht_get (hashtable_t *ht, const char *key, uint32_t len);
+uint64_t ht_compare_and_set (hashtable_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val);
+uint64_t ht_remove (hashtable_t *ht, const char *key, uint32_t len);
+uint64_t ht_count (hashtable_t *ht);
#endif//STRUCT_H
typedef struct txn txn_t;
-txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hash_table_t *ht);
+txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hashtable_t *ht);
#endif//TXN_H
char val[];
} string_t;
-typedef struct hash_table_i {
+typedef struct hti {
volatile entry_t *table;
- hash_table_t *ht; // parent ht;
- struct hash_table_i *next;
- struct hash_table_i *next_free;
+ hashtable_t *ht; // parent ht;
+ struct hti *next;
+ struct hti *next_free;
unsigned int scale;
int max_probe;
int count; // TODO: make these counters distributed
int num_entries_copied;
int scan;
-} hash_table_i_t;
+} hashtable_i_t;
static const uint64_t COPIED_VALUE = -1;
static const uint64_t TOMBSTONE = STRIP_TAG(-1);
static const unsigned MAX_BUCKETS_TO_PROBE = 250;
static int hti_copy_entry
- (hash_table_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hash_table_i_t *ht2);
+ (hashtable_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hashtable_i_t *ht2);
// Choose the next bucket to probe using the high-order bits of <key_hash>.
static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) {
//
// Record if the entry being returned is empty. Otherwise the caller will have to waste time with
// ht_key_equals() to confirm that it did not lose a race to fill an empty entry.
-static volatile entry_t *hti_lookup (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, int *is_empty) {
+static volatile entry_t *hti_lookup (hashtable_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, int *is_empty) {
TRACE("h2", "hti_lookup(key %p in hti %p)", key_val, hti);
*is_empty = 0;
return NULL;
}
-// Allocate and initialize a hash_table_i_t with 2^<scale> entries.
-static hash_table_i_t *hti_alloc (hash_table_t *parent, int scale) {
+// Allocate and initialize a hashtable_i_t with 2^<scale> entries.
+static hashtable_i_t *hti_alloc (hashtable_t *parent, int scale) {
// Include enough slop to align the actual table on a cache line boundry
- size_t n = sizeof(hash_table_i_t)
+ size_t n = sizeof(hashtable_i_t)
+ sizeof(entry_t) * (1 << scale)
+ (CACHE_LINE_SIZE - 1);
- hash_table_i_t *hti = (hash_table_i_t *)calloc(n, 1);
+ hashtable_i_t *hti = (hashtable_i_t *)calloc(n, 1);
// Align the table of hash entries on a cache line boundry.
- hti->table = (entry_t *)(((uint64_t)hti + sizeof(hash_table_i_t) + (CACHE_LINE_SIZE-1))
+ hti->table = (entry_t *)(((uint64_t)hti + sizeof(hashtable_i_t) + (CACHE_LINE_SIZE-1))
& ~(CACHE_LINE_SIZE-1));
hti->scale = scale;
// Called when <hti> runs out of room for new keys.
//
-// Initiates a copy by creating a larger hash_table_i_t and installing it in <hti->next>.
-static void hti_start_copy (hash_table_i_t *hti) {
+// Initiates a copy by creating a larger hashtable_i_t and installing it in <hti->next>.
+static void hti_start_copy (hashtable_i_t *hti) {
TRACE("h0", "hti_start_copy(hti %p scale %llu)", hti, hti->scale);
// heuristics to determine the size of the new table
new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full
// Allocate the new table and attempt to install it.
- hash_table_i_t *next = hti_alloc(hti->ht, new_scale);
- hash_table_i_t *old_next = SYNC_CAS(&hti->next, NULL, next);
+ hashtable_i_t *next = hti_alloc(hti->ht, new_scale);
+ hashtable_i_t *old_next = SYNC_CAS(&hti->next, NULL, next);
if (old_next != NULL) {
// Another thread beat us to it.
TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0);
//
// Return 1 unless <ht1_e> is already copied (then return 0), so the caller can account for the total
// number of entries left to copy.
-static int hti_copy_entry (hash_table_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash,
- hash_table_i_t *ht2) {
+static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash,
+ hashtable_i_t *ht2) {
TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_e, ht2);
assert(ht1);
assert(ht1->next);
//
// NOTE: the returned value matches <expected> iff the set succeeds
//
-// Certain values of <expected> have special meaning. If <expected> is HT_EXPECT_EXISTS then any
+// Certain values of <expected> have special meaning. If <expected> is EXPECT_EXISTS then any
// real value matches (i.e. not a TOMBSTONE or DOES_NOT_EXIST) as long as <key> is in the table. If
-// <expected> is HT_EXPECT_WHATEVER then skip the test entirely.
+// <expected> is EXPECT_WHATEVER then skip the test entirely.
//
-static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, const char *key_val,
+static uint64_t hti_compare_and_set (hashtable_i_t *hti, uint32_t key_hash, const char *key_val,
uint32_t key_len, uint64_t expected, uint64_t new) {
TRACE("h1", "hti_compare_and_set: hti %p key %p", hti, key_val);
TRACE("h1", "hti_compare_and_set: value %p expect %p", new, expected);
// Install <key> in the table if it doesn't exist.
if (is_empty) {
TRACE("h0", "hti_compare_and_set: entry %p is empty", e, 0);
- if (expected != HT_EXPECT_WHATEVER && expected != HT_EXPECT_NOT_EXISTS)
+ if (expected != EXPECT_WHATEVER && expected != EXPECT_DOES_NOT_EXIST)
return DOES_NOT_EXIST;
// No need to do anything, <key> is already deleted.
uint64_t e_value = e->value;
if (EXPECT_FALSE(IS_TAGGED(e_value))) {
if (e_value != COPIED_VALUE) {
- int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next);
+ int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next);
if (did_copy) {
SYNC_ADD(&hti->num_entries_copied, 1);
}
// Fail if the old value is not consistent with the caller's expectation.
int old_existed = (e_value != TOMBSTONE && e_value != DOES_NOT_EXIST);
- if (EXPECT_FALSE(expected != HT_EXPECT_WHATEVER && expected != e_value)) {
- if (EXPECT_FALSE(expected != (old_existed ? HT_EXPECT_EXISTS : HT_EXPECT_NOT_EXISTS))) {
+ if (EXPECT_FALSE(expected != EXPECT_WHATEVER && expected != e_value)) {
+ if (EXPECT_FALSE(expected != (old_existed ? EXPECT_EXISTS : EXPECT_DOES_NOT_EXIST))) {
TRACE("h1", "hti_compare_and_set: value %p expected by caller not found; found value %p",
expected, e_value);
return e_value;
}
//
-static uint64_t hti_get (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len) {
+static uint64_t hti_get (hashtable_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len) {
assert(key_val);
int is_empty;
// searching the table. In that case, if a copy is in progress the key
// might exist in the copy.
if (EXPECT_FALSE(e == NULL)) {
- if (((volatile hash_table_i_t *)hti)->next != NULL)
+ if (((volatile hashtable_i_t *)hti)->next != NULL)
return hti_get(hti->next, key_hash, key_val, key_len); // recursive tail-call
return DOES_NOT_EXIST;
}
uint64_t e_value = e->value;
if (EXPECT_FALSE(IS_TAGGED(e_value))) {
if (EXPECT_FALSE(e_value != COPIED_VALUE)) {
- int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next);
+ int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next);
if (did_copy) {
SYNC_ADD(&hti->num_entries_copied, 1);
}
}
- return hti_get(((volatile hash_table_i_t *)hti)->next, key_hash, key_val, key_len); // tail-call
+ return hti_get(((volatile hashtable_i_t *)hti)->next, key_hash, key_val, key_len); // tail-call
}
return (e_value == TOMBSTONE) ? DOES_NOT_EXIST : e_value;
}
//
-uint64_t ht_get (hash_table_t *ht, const char *key_val, uint32_t key_len) {
+uint64_t ht_get (hashtable_t *ht, const char *key_val, uint32_t key_len) {
return hti_get(*ht, murmur32(key_val, key_len), key_val, key_len);
}
//
-uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key_len,
+uint64_t ht_compare_and_set (hashtable_t *ht, const char *key_val, uint32_t key_len,
uint64_t expected_val, uint64_t new_val) {
TRACE("h2", "ht_compare_and_set: key %p len %u", key_val, key_len);
assert(key_val);
assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST);
- hash_table_i_t *hti = *ht;
+ hashtable_i_t *hti = *ht;
// Help with an ongoing copy.
if (EXPECT_FALSE(hti->next != NULL)) {
// Remove the value in <ht> associated with <key_val>. Returns the value removed, or
// DOES_NOT_EXIST if there was no value for that key.
-uint64_t ht_remove (hash_table_t *ht, const char *key_val, uint32_t key_len) {
- hash_table_i_t *hti = *ht;
+uint64_t ht_remove (hashtable_t *ht, const char *key_val, uint32_t key_len) {
+ hashtable_i_t *hti = *ht;
uint64_t val;
uint32_t key_hash = murmur32(key_val, key_len);
do {
- val = hti_compare_and_set(hti, key_hash, key_val, key_len, HT_EXPECT_WHATEVER, TOMBSTONE);
+ val = hti_compare_and_set(hti, key_hash, key_val, key_len, EXPECT_WHATEVER, TOMBSTONE);
if (val != COPIED_VALUE)
return val == TOMBSTONE ? DOES_NOT_EXIST : val;
assert(hti->next);
}
// Returns the number of key-values pairs in <ht>
-uint64_t ht_count (hash_table_t *ht) {
- hash_table_i_t *hti = *ht;
+uint64_t ht_count (hashtable_t *ht) {
+ hashtable_i_t *hti = *ht;
uint64_t count = 0;
while (hti) {
count += hti->count;
}
// Allocate and initialize a new hash table.
-hash_table_t *ht_alloc (void) {
- hash_table_t *ht = nbd_malloc(sizeof(hash_table_t));
- *ht = (hash_table_i_t *)hti_alloc(ht, MIN_SCALE);
+hashtable_t *ht_alloc (void) {
+ hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
+ *ht = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE);
return ht;
}
// Free <ht> and its internal structures.
-void ht_free (hash_table_t *ht) {
- hash_table_i_t *hti = *ht;
+void ht_free (hashtable_t *ht) {
+ hashtable_i_t *hti = *ht;
do {
for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value));
nbd_free(GET_PTR(hti->table[i].key));
}
}
- hash_table_i_t *next = hti->next;
+ hashtable_i_t *next = hti->next;
nbd_free(hti);
hti = next;
} while (hti);
struct node *next;
} node_t;
-typedef struct list {
+typedef struct ll {
node_t *head;
} list_t;
}
list_t *ll_alloc (void) {
- list_t *list = (list_t *)nbd_malloc(sizeof(list_t));
- list->head = node_alloc((uint64_t)-1, 0);
- list->head->next = NULL;
- return list;
+ list_t *ll = (list_t *)nbd_malloc(sizeof(list_t));
+ ll->head = node_alloc((uint64_t)-1, 0);
+ ll->head->next = NULL;
+ return ll;
}
-static node_t *find_pred (node_t **pred_ptr, list_t *list, uint64_t key, int help_remove) {
- node_t *pred = list->head;
+static node_t *find_pred (node_t **pred_ptr, list_t *ll, uint64_t key, int help_remove) {
+ node_t *pred = ll->head;
node_t *item = pred->next;
- TRACE("l3", "find_pred: searching for key %p in list (head is %p)", key, pred);
+ TRACE("l3", "find_pred: searching for key %p in ll (head is %p)", key, pred);
while (item != NULL) {
node_t *next = item->next;
} else {
TRACE("l3", "find_pred: lost race to unlink from pred %p; its link changed to %p", pred, other);
if (IS_TAGGED(other))
- return find_pred(pred_ptr, list, key, help_remove); // retry
+ return find_pred(pred_ptr, ll, key, help_remove); // retry
item = other;
if (EXPECT_FALSE(item == NULL))
break;
}
- // <key> is not in the list.
+ // <key> is not in <ll>.
if (pred_ptr != NULL) {
*pred_ptr = pred;
}
}
// Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor.
-uint64_t ll_lookup (list_t *list, uint64_t key) {
- TRACE("l3", "ll_lookup: searching for key %p in list %p", key, list);
- node_t *item = find_pred(NULL, list, key, FALSE);
+uint64_t ll_lookup (list_t *ll, uint64_t key) {
+ TRACE("l3", "ll_lookup: searching for key %p in ll %p", key, ll);
+ node_t *item = find_pred(NULL, ll, key, FALSE);
// If we found an <item> matching the <key> return its value.
return (item && item->key == key) ? item->value : DOES_NOT_EXIST;
}
-// Insert the <key>, if it doesn't already exist in the <list>
-uint64_t ll_add (list_t *list, uint64_t key, uint64_t value) {
+// Insert the <key>, if it doesn't already exist in <ll>
+uint64_t ll_add (list_t *ll, uint64_t key, uint64_t value) {
TRACE("l3", "ll_add: inserting key %p value %p", key, value);
node_t *pred;
node_t *item = NULL;
do {
- node_t *next = find_pred(&pred, list, key, TRUE);
+ node_t *next = find_pred(&pred, ll, key, TRUE);
- // If a node matching <key> already exists in the list, return its value.
+ // If a node matching <key> already exists in <ll>, return its value.
if (next != NULL && next->key == key) {
TRACE("l3", "ll_add: there is already an item %p (value %p) with the same key", next, next->value);
if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
} while (1);
}
-uint64_t ll_remove (list_t *list, uint64_t key) {
- TRACE("l3", "ll_remove: removing item with key %p from list %p", key, list);
+uint64_t ll_remove (list_t *ll, uint64_t key) {
+ TRACE("l3", "ll_remove: removing item with key %p from ll %p", key, ll);
node_t *pred;
- node_t *item = find_pred(&pred, list, key, TRUE);
+ node_t *item = find_pred(&pred, ll, key, TRUE);
if (item == NULL || item->key != key) {
- TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0);
+ TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the ll", 0, 0);
return DOES_NOT_EXIST;
}
uint64_t value = item->value;
- // Unlink <item> from the list.
+ // Unlink <item> from <ll>.
TRACE("l3", "ll_remove: link item's pred %p to it's successor %p", pred, next);
node_t *other;
if ((other = SYNC_CAS(&pred->next, item, next)) != item) {
TRACE("l3", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other);
// By marking the item earlier, we logically removed it. It is safe to leave the item.
- // Another thread will finish physically removing it from the list.
+ // Another thread will finish physically removing it from the ll.
return value;
}
return value;
}
-void ll_print (list_t *list) {
+void ll_print (list_t *ll) {
node_t *item;
- item = list->head->next;
+ item = ll->head->next;
while (item) {
printf("0x%llx ", item->key);
fflush(stdout);
for (int i = 0; i < NUM_ITERATIONS/num_threads_; ++i) {
unsigned r = nbd_rand();
- int key = (r & 0xF);
+ int key = r & 0xF;
if (r & (1 << 8)) {
ll_add(ll_, key, 1);
} else {
struct node *next[];
} node_t;
-typedef struct skiplist {
+typedef struct sl {
node_t *head;
} skiplist_t;
}
skiplist_t *sl_alloc (void) {
- skiplist_t *skiplist = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
- skiplist->head = node_alloc(MAX_LEVEL, 0, 0);
- memset(skiplist->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
- return skiplist;
+ skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
+ sl->head = node_alloc(MAX_LEVEL, 0, 0);
+ memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
+ return sl;
}
-static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *skiplist, uint64_t key, int help_remove) {
- node_t *pred = skiplist->head;
+static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *sl, uint64_t key, int help_remove) {
+ node_t *pred = sl->head;
node_t *item = NULL;
- TRACE("s3", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
+ TRACE("s3", "find_preds: searching for key %p in sl (head is %p)", key, pred);
+ int start_level = MAX_LEVEL;
+#if MAX_LEVEL > 2
// Optimization for small lists. No need to traverse empty higher levels.
- assert(MAX_LEVEL > 2);
- int start_level = 2;
+ start_level = 2;
while (pred->next[start_level+1] != NULL) {
start_level += start_level - 1;
if (EXPECT_FALSE(start_level >= MAX_LEVEL)) {
if (EXPECT_FALSE(start_level < n)) {
start_level = n;
}
+#endif
- // Traverse the levels of the skiplist from the top level to the bottom
+ // Traverse the levels of <sl> from the top level to the bottom
for (int level = start_level; level >= 0; --level) {
TRACE("s3", "find_preds: level %llu", level, 0);
item = pred->next[level];
if (EXPECT_FALSE(IS_TAGGED(item))) {
TRACE("s3", "find_preds: pred %p is marked for removal (item %p); retry", pred, item);
- return find_preds(preds, n, skiplist, key, help_remove); // retry
+ return find_preds(preds, n, sl, key, help_remove); // retry
}
while (item != NULL) {
node_t *next = item->next[level];
} else {
TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other);
if (IS_TAGGED(other))
- return find_preds(preds, n, skiplist, key, help_remove); // retry
+ return find_preds(preds, n, sl, key, help_remove); // retry
item = other;
if (EXPECT_FALSE(item == NULL))
break;
if (n == -1 && item != NULL) {
assert(preds != NULL);
for (int level = start_level + 1; level <= item->top_level; ++level) {
- preds[level] = skiplist->head;
+ preds[level] = sl->head;
}
}
return item;
}
// Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
-uint64_t sl_lookup (skiplist_t *skiplist, uint64_t key) {
- TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key, skiplist);
- node_t *item = find_preds(NULL, 0, skiplist, key, FALSE);
+uint64_t sl_lookup (skiplist_t *sl, uint64_t key) {
+ TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl);
+ node_t *item = find_preds(NULL, 0, sl, key, FALSE);
// If we found an <item> matching the <key> return its value.
return (item && item->key == key) ? item->value : DOES_NOT_EXIST;
}
-// Insert the <key> if it doesn't already exist in the <skiplist>
-uint64_t sl_add (skiplist_t *skiplist, uint64_t key, uint64_t value) {
+// Insert the <key> if it doesn't already exist in <sl>
+uint64_t sl_add (skiplist_t *sl, uint64_t key, uint64_t value) {
TRACE("s3", "sl_add: inserting key %p value %p", key, value);
node_t *preds[MAX_LEVEL+1];
node_t *item = NULL;
do {
int n = random_level();
- node_t *next = find_preds(preds, n, skiplist, key, TRUE);
+ node_t *next = find_preds(preds, n, sl, key, TRUE);
- // If a node matching <key> already exists in the skiplist, return its value.
+ // If a node matching <key> already exists in <sl>, return its value.
if (next != NULL && next->key == key) {
TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", next, next->value);
if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
} while (1);
- // Insert <item> into the skiplist from the bottom level up.
+ // Insert <item> into <sl> from the bottom level up.
for (int level = 1; level <= item->top_level; ++level) {
do {
node_t *pred;
break;
if (!IS_TAGGED(next) && next->key > key) // pred's link changed
break;
- find_preds(preds, item->top_level, skiplist, key, TRUE);
+ find_preds(preds, item->top_level, sl, key, TRUE);
} while (1);
do {
return value;
}
-uint64_t sl_remove (skiplist_t *skiplist, uint64_t key) {
- TRACE("s3", "sl_remove: removing item with key %p from skiplist %p", key, skiplist);
+uint64_t sl_remove (skiplist_t *sl, uint64_t key) {
+ TRACE("s3", "sl_remove: removing item with key %p from sl %p", key, sl);
node_t *preds[MAX_LEVEL+1];
- node_t *item = find_preds(preds, -1, skiplist, key, TRUE);
+ node_t *item = find_preds(preds, -1, sl, key, TRUE);
if (item == NULL || item->key != key) {
- TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
+ TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0);
return DOES_NOT_EXIST;
}
- // Mark <item> removed at each level of the skiplist from the top down. This must be atomic. If multiple threads
+ // Mark <item> removed at each level of <sl> from the top down. This must be atomic. If multiple threads
// try to remove the same item only one of them should succeed. Marking the bottom level establishes which of
// them succeeds.
for (int level = item->top_level; level >= 0; --level) {
if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
// By marking the item earlier, we logically removed it. It is safe to leave the item partially
- // unlinked. Another thread will finish physically removing it from the skiplist.
+ // unlinked. Another thread will finish physically removing it from <sl>.
return value;
}
--level;
return value;
}
-void sl_print (skiplist_t *skiplist) {
+void sl_print (skiplist_t *sl) {
for (int level = MAX_LEVEL; level >= 0; --level) {
- node_t *item = skiplist->head;
+ node_t *item = sl->head;
if (item->next[level] == NULL)
continue;
printf("(%d) ", level);
}
printf("\n");
- node_t *item = skiplist->head;
+ node_t *item = sl->head;
while (item) {
int is_marked = IS_TAGGED(item->next[0]);
- printf("%s%p:0x%llx [%d]", is_marked ? "*" : "", item, item->key, item->top_level);
+ printf("%s%p:0x%llx ", is_marked ? "*" : "", item, item->key);
+ if (item != sl->head) {
+ printf("[%d]", item->top_level);
+ } else {
+ printf("[*]");
+ }
for (int level = 1; level <= item->top_level; ++level) {
node_t *next = (node_t *)STRIP_TAG(item->next[level]);
is_marked = IS_TAGGED(item->next[0]);
printf(" %p%s", next, is_marked ? "*" : "");
- if (item == skiplist->head && item->next[level] == NULL)
+ if (item == sl->head && item->next[level] == NULL)
break;
}
printf("\n");
for (int i = 0; i < NUM_ITERATIONS/num_threads_; ++i) {
unsigned r = nbd_rand();
- int key = (r & 0xF) + 1;
+ int key = r & 0xF;
if (r & (1 << 8)) {
sl_add(sl_, key, 1);
} else {
typedef struct worker_data {
int id;
CuTest *tc;
- hash_table_t *ht;
+ hashtable_t *ht;
int *wait;
} worker_data_t;
-int64_t ht_set (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) {
- return ht_compare_and_set(ht, key, key_len, HT_EXPECT_WHATEVER, val);
+int64_t ht_set (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) {
+ return ht_compare_and_set(ht, key, key_len, EXPECT_WHATEVER, val);
}
-int64_t ht_add (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) {
- return ht_compare_and_set(ht, key, key_len, HT_EXPECT_NOT_EXISTS, val);
+int64_t ht_add (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) {
+ return ht_compare_and_set(ht, key, key_len, EXPECT_DOES_NOT_EXIST, val);
}
-int64_t ht_replace (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) {
- return ht_compare_and_set(ht, key, key_len, HT_EXPECT_EXISTS, val);
+int64_t ht_replace (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) {
+ return ht_compare_and_set(ht, key, key_len, EXPECT_EXISTS, val);
}
// Test some basic stuff; add a few keys, remove a few keys
void basic_test (CuTest* tc) {
- hash_table_t *ht = ht_alloc();
+ hashtable_t *ht = ht_alloc();
ASSERT_EQUAL( 0, ht_count(ht) );
ASSERT_EQUAL( DOES_NOT_EXIST, ht_add(ht,"a",2,10) );
void *simple_worker (void *arg) {
worker_data_t *wd = (worker_data_t *)arg;
- hash_table_t *ht = wd->ht;
+ hashtable_t *ht = wd->ht;
CuTest* tc = wd->tc;
uint64_t d = wd->id;
int iters = 1000000;
pthread_t thread[2];
worker_data_t wd[2];
int wait = 2;
- hash_table_t *ht = ht_alloc();
+ hashtable_t *ht = ht_alloc();
// In 2 threads, add & remove even & odd elements concurrently
int i;
void *inserter_worker (void *arg) {
//pthread_t thread[NUM_THREADS];
- //hash_table_t *ht = ht_alloc();
+ //hashtable_t *ht = ht_alloc();
return NULL;
}
+ optimize tracing code, still too much overhead
+ use NULL instead of a sentinal node in skiplist and list
- make interfaces for all data structures consistent
+- make list and skiplist use string keys
struct txn {
uint64_t rv;
uint64_t wv;
- hash_table_t *ht;
+ hashtable_t *ht;
write_rec_t *writes;
uint32_t writes_size;
uint32_t writes_count;
return u;
}
-txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hash_table_t *ht) {
+txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hashtable_t *ht) {
txn_t *txn = (txn_t *)nbd_malloc(sizeof(txn_t));
memset(txn, 0, sizeof(txn_t));
txn->access = access;