From a1fae129c758d7ea83dfdbb5f14ec1df12f0aa34 Mon Sep 17 00:00:00 2001 From: jdybnis Date: Tue, 25 Nov 2008 09:11:15 +0000 Subject: [PATCH] change the names of some variables and types for consistency across API's --- include/struct.h | 20 +++++------ include/txn.h | 2 +- struct/hashtable.c | 86 +++++++++++++++++++++++----------------------- struct/list.c | 52 ++++++++++++++-------------- struct/skiplist.c | 77 ++++++++++++++++++++++------------------- test/ht_test.c | 22 ++++++------ todo | 1 + txn/txn.c | 4 +-- 8 files changed, 136 insertions(+), 128 deletions(-) diff --git a/include/struct.h b/include/struct.h index bb5125a..5894c0b 100644 --- a/include/struct.h +++ b/include/struct.h @@ -3,17 +3,17 @@ #define DOES_NOT_EXIST 0 -#define HT_EXPECT_NOT_EXISTS ( 0) -#define HT_EXPECT_EXISTS (-1) -#define HT_EXPECT_WHATEVER (-2) +#define EXPECT_DOES_NOT_EXIST ( 0) +#define EXPECT_EXISTS (-1) +#define EXPECT_WHATEVER (-2) -typedef struct hash_table_i *hash_table_t; +typedef struct hti *hashtable_t; -hash_table_t *ht_alloc (void); -void ht_free (hash_table_t *ht); -uint64_t ht_get (hash_table_t *ht, const char *key, uint32_t len); -uint64_t ht_compare_and_set (hash_table_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val); -uint64_t ht_remove (hash_table_t *ht, const char *key, uint32_t len); -uint64_t ht_count (hash_table_t *ht); +hashtable_t *ht_alloc (void); +void ht_free (hashtable_t *ht); +uint64_t ht_get (hashtable_t *ht, const char *key, uint32_t len); +uint64_t ht_compare_and_set (hashtable_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val); +uint64_t ht_remove (hashtable_t *ht, const char *key, uint32_t len); +uint64_t ht_count (hashtable_t *ht); #endif//STRUCT_H diff --git a/include/txn.h b/include/txn.h index b000074..54928cb 100644 --- a/include/txn.h +++ b/include/txn.h @@ -11,5 +11,5 @@ typedef enum { TXN_DIRTY_READ, TXN_READ_COMMITTED, TXN_REPEATABLE_READ } txn_iso typedef struct txn txn_t; -txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hash_table_t *ht); +txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hashtable_t *ht); #endif//TXN_H diff --git a/struct/hashtable.c b/struct/hashtable.c index 45c9ffc..523a29a 100644 --- a/struct/hashtable.c +++ b/struct/hashtable.c @@ -29,17 +29,17 @@ typedef struct string { char val[]; } string_t; -typedef struct hash_table_i { +typedef struct hti { volatile entry_t *table; - hash_table_t *ht; // parent ht; - struct hash_table_i *next; - struct hash_table_i *next_free; + hashtable_t *ht; // parent ht; + struct hti *next; + struct hti *next_free; unsigned int scale; int max_probe; int count; // TODO: make these counters distributed int num_entries_copied; int scan; -} hash_table_i_t; +} hashtable_i_t; static const uint64_t COPIED_VALUE = -1; static const uint64_t TOMBSTONE = STRIP_TAG(-1); @@ -50,7 +50,7 @@ static const unsigned MIN_SCALE = 4; // min 16 entries (4 buckets) static const unsigned MAX_BUCKETS_TO_PROBE = 250; static int hti_copy_entry - (hash_table_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hash_table_i_t *ht2); + (hashtable_i_t *ht1, volatile entry_t *e, uint32_t e_key_hash, hashtable_i_t *ht2); // Choose the next bucket to probe using the high-order bits of . static inline int get_next_ndx(int old_ndx, uint32_t key_hash, int ht_scale) { @@ -80,7 +80,7 @@ static inline int ht_key_equals (uint64_t a, uint32_t b_hash, const char *b_valu // // Record if the entry being returned is empty. Otherwise the caller will have to waste time with // ht_key_equals() to confirm that it did not lose a race to fill an empty entry. -static volatile entry_t *hti_lookup (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, int *is_empty) { +static volatile entry_t *hti_lookup (hashtable_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, int *is_empty) { TRACE("h2", "hti_lookup(key %p in hti %p)", key_val, hti); *is_empty = 0; @@ -117,16 +117,16 @@ static volatile entry_t *hti_lookup (hash_table_i_t *hti, uint32_t key_hash, con return NULL; } -// Allocate and initialize a hash_table_i_t with 2^ entries. -static hash_table_i_t *hti_alloc (hash_table_t *parent, int scale) { +// Allocate and initialize a hashtable_i_t with 2^ entries. +static hashtable_i_t *hti_alloc (hashtable_t *parent, int scale) { // Include enough slop to align the actual table on a cache line boundry - size_t n = sizeof(hash_table_i_t) + size_t n = sizeof(hashtable_i_t) + sizeof(entry_t) * (1 << scale) + (CACHE_LINE_SIZE - 1); - hash_table_i_t *hti = (hash_table_i_t *)calloc(n, 1); + hashtable_i_t *hti = (hashtable_i_t *)calloc(n, 1); // Align the table of hash entries on a cache line boundry. - hti->table = (entry_t *)(((uint64_t)hti + sizeof(hash_table_i_t) + (CACHE_LINE_SIZE-1)) + hti->table = (entry_t *)(((uint64_t)hti + sizeof(hashtable_i_t) + (CACHE_LINE_SIZE-1)) & ~(CACHE_LINE_SIZE-1)); hti->scale = scale; @@ -148,8 +148,8 @@ static hash_table_i_t *hti_alloc (hash_table_t *parent, int scale) { // Called when runs out of room for new keys. // -// Initiates a copy by creating a larger hash_table_i_t and installing it in next>. -static void hti_start_copy (hash_table_i_t *hti) { +// Initiates a copy by creating a larger hashtable_i_t and installing it in next>. +static void hti_start_copy (hashtable_i_t *hti) { TRACE("h0", "hti_start_copy(hti %p scale %llu)", hti, hti->scale); // heuristics to determine the size of the new table @@ -159,8 +159,8 @@ static void hti_start_copy (hash_table_i_t *hti) { new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full // Allocate the new table and attempt to install it. - hash_table_i_t *next = hti_alloc(hti->ht, new_scale); - hash_table_i_t *old_next = SYNC_CAS(&hti->next, NULL, next); + hashtable_i_t *next = hti_alloc(hti->ht, new_scale); + hashtable_i_t *old_next = SYNC_CAS(&hti->next, NULL, next); if (old_next != NULL) { // Another thread beat us to it. TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0); @@ -174,8 +174,8 @@ static void hti_start_copy (hash_table_i_t *hti) { // // Return 1 unless is already copied (then return 0), so the caller can account for the total // number of entries left to copy. -static int hti_copy_entry (hash_table_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash, - hash_table_i_t *ht2) { +static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *ht1_e, uint32_t key_hash, + hashtable_i_t *ht2) { TRACE("h2", "hti_copy_entry: entry %p to table %p", ht1_e, ht2); assert(ht1); assert(ht1->next); @@ -291,11 +291,11 @@ static int hti_copy_entry (hash_table_i_t *ht1, volatile entry_t *ht1_e, uint32_ // // NOTE: the returned value matches iff the set succeeds // -// Certain values of have special meaning. If is HT_EXPECT_EXISTS then any +// Certain values of have special meaning. If is EXPECT_EXISTS then any // real value matches (i.e. not a TOMBSTONE or DOES_NOT_EXIST) as long as is in the table. If -// is HT_EXPECT_WHATEVER then skip the test entirely. +// is EXPECT_WHATEVER then skip the test entirely. // -static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, +static uint64_t hti_compare_and_set (hashtable_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len, uint64_t expected, uint64_t new) { TRACE("h1", "hti_compare_and_set: hti %p key %p", hti, key_val); TRACE("h1", "hti_compare_and_set: value %p expect %p", new, expected); @@ -317,7 +317,7 @@ static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, con // Install in the table if it doesn't exist. if (is_empty) { TRACE("h0", "hti_compare_and_set: entry %p is empty", e, 0); - if (expected != HT_EXPECT_WHATEVER && expected != HT_EXPECT_NOT_EXISTS) + if (expected != EXPECT_WHATEVER && expected != EXPECT_DOES_NOT_EXIST) return DOES_NOT_EXIST; // No need to do anything, is already deleted. @@ -349,7 +349,7 @@ static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, con uint64_t e_value = e->value; if (EXPECT_FALSE(IS_TAGGED(e_value))) { if (e_value != COPIED_VALUE) { - int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next); + int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next); if (did_copy) { SYNC_ADD(&hti->num_entries_copied, 1); } @@ -362,8 +362,8 @@ static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, con // Fail if the old value is not consistent with the caller's expectation. int old_existed = (e_value != TOMBSTONE && e_value != DOES_NOT_EXIST); - if (EXPECT_FALSE(expected != HT_EXPECT_WHATEVER && expected != e_value)) { - if (EXPECT_FALSE(expected != (old_existed ? HT_EXPECT_EXISTS : HT_EXPECT_NOT_EXISTS))) { + if (EXPECT_FALSE(expected != EXPECT_WHATEVER && expected != e_value)) { + if (EXPECT_FALSE(expected != (old_existed ? EXPECT_EXISTS : EXPECT_DOES_NOT_EXIST))) { TRACE("h1", "hti_compare_and_set: value %p expected by caller not found; found value %p", expected, e_value); return e_value; @@ -396,7 +396,7 @@ static uint64_t hti_compare_and_set (hash_table_i_t *hti, uint32_t key_hash, con } // -static uint64_t hti_get (hash_table_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len) { +static uint64_t hti_get (hashtable_i_t *hti, uint32_t key_hash, const char *key_val, uint32_t key_len) { assert(key_val); int is_empty; @@ -406,7 +406,7 @@ static uint64_t hti_get (hash_table_i_t *hti, uint32_t key_hash, const char *key // searching the table. In that case, if a copy is in progress the key // might exist in the copy. if (EXPECT_FALSE(e == NULL)) { - if (((volatile hash_table_i_t *)hti)->next != NULL) + if (((volatile hashtable_i_t *)hti)->next != NULL) return hti_get(hti->next, key_hash, key_val, key_len); // recursive tail-call return DOES_NOT_EXIST; } @@ -418,24 +418,24 @@ static uint64_t hti_get (hash_table_i_t *hti, uint32_t key_hash, const char *key uint64_t e_value = e->value; if (EXPECT_FALSE(IS_TAGGED(e_value))) { if (EXPECT_FALSE(e_value != COPIED_VALUE)) { - int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hash_table_i_t *)hti)->next); + int did_copy = hti_copy_entry(hti, e, key_hash, ((volatile hashtable_i_t *)hti)->next); if (did_copy) { SYNC_ADD(&hti->num_entries_copied, 1); } } - return hti_get(((volatile hash_table_i_t *)hti)->next, key_hash, key_val, key_len); // tail-call + return hti_get(((volatile hashtable_i_t *)hti)->next, key_hash, key_val, key_len); // tail-call } return (e_value == TOMBSTONE) ? DOES_NOT_EXIST : e_value; } // -uint64_t ht_get (hash_table_t *ht, const char *key_val, uint32_t key_len) { +uint64_t ht_get (hashtable_t *ht, const char *key_val, uint32_t key_len) { return hti_get(*ht, murmur32(key_val, key_len), key_val, key_len); } // -uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key_len, +uint64_t ht_compare_and_set (hashtable_t *ht, const char *key_val, uint32_t key_len, uint64_t expected_val, uint64_t new_val) { TRACE("h2", "ht_compare_and_set: key %p len %u", key_val, key_len); @@ -443,7 +443,7 @@ uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key assert(key_val); assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST); - hash_table_i_t *hti = *ht; + hashtable_i_t *hti = *ht; // Help with an ongoing copy. if (EXPECT_FALSE(hti->next != NULL)) { @@ -504,12 +504,12 @@ uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key // Remove the value in associated with . Returns the value removed, or // DOES_NOT_EXIST if there was no value for that key. -uint64_t ht_remove (hash_table_t *ht, const char *key_val, uint32_t key_len) { - hash_table_i_t *hti = *ht; +uint64_t ht_remove (hashtable_t *ht, const char *key_val, uint32_t key_len) { + hashtable_i_t *hti = *ht; uint64_t val; uint32_t key_hash = murmur32(key_val, key_len); do { - val = hti_compare_and_set(hti, key_hash, key_val, key_len, HT_EXPECT_WHATEVER, TOMBSTONE); + val = hti_compare_and_set(hti, key_hash, key_val, key_len, EXPECT_WHATEVER, TOMBSTONE); if (val != COPIED_VALUE) return val == TOMBSTONE ? DOES_NOT_EXIST : val; assert(hti->next); @@ -519,8 +519,8 @@ uint64_t ht_remove (hash_table_t *ht, const char *key_val, uint32_t key_len) { } // Returns the number of key-values pairs in -uint64_t ht_count (hash_table_t *ht) { - hash_table_i_t *hti = *ht; +uint64_t ht_count (hashtable_t *ht) { + hashtable_i_t *hti = *ht; uint64_t count = 0; while (hti) { count += hti->count; @@ -530,15 +530,15 @@ uint64_t ht_count (hash_table_t *ht) { } // Allocate and initialize a new hash table. -hash_table_t *ht_alloc (void) { - hash_table_t *ht = nbd_malloc(sizeof(hash_table_t)); - *ht = (hash_table_i_t *)hti_alloc(ht, MIN_SCALE); +hashtable_t *ht_alloc (void) { + hashtable_t *ht = nbd_malloc(sizeof(hashtable_t)); + *ht = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE); return ht; } // Free and its internal structures. -void ht_free (hash_table_t *ht) { - hash_table_i_t *hti = *ht; +void ht_free (hashtable_t *ht) { + hashtable_i_t *hti = *ht; do { for (uint32_t i = 0; i < (1 << hti->scale); ++i) { assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value)); @@ -546,7 +546,7 @@ void ht_free (hash_table_t *ht) { nbd_free(GET_PTR(hti->table[i].key)); } } - hash_table_i_t *next = hti->next; + hashtable_i_t *next = hti->next; nbd_free(hti); hti = next; } while (hti); diff --git a/struct/list.c b/struct/list.c index 124043d..c6c1780 100644 --- a/struct/list.c +++ b/struct/list.c @@ -18,7 +18,7 @@ typedef struct node { struct node *next; } node_t; -typedef struct list { +typedef struct ll { node_t *head; } list_t; @@ -31,16 +31,16 @@ node_t *node_alloc (uint64_t key, uint64_t value) { } list_t *ll_alloc (void) { - list_t *list = (list_t *)nbd_malloc(sizeof(list_t)); - list->head = node_alloc((uint64_t)-1, 0); - list->head->next = NULL; - return list; + list_t *ll = (list_t *)nbd_malloc(sizeof(list_t)); + ll->head = node_alloc((uint64_t)-1, 0); + ll->head->next = NULL; + return ll; } -static node_t *find_pred (node_t **pred_ptr, list_t *list, uint64_t key, int help_remove) { - node_t *pred = list->head; +static node_t *find_pred (node_t **pred_ptr, list_t *ll, uint64_t key, int help_remove) { + node_t *pred = ll->head; node_t *item = pred->next; - TRACE("l3", "find_pred: searching for key %p in list (head is %p)", key, pred); + TRACE("l3", "find_pred: searching for key %p in ll (head is %p)", key, pred); while (item != NULL) { node_t *next = item->next; @@ -74,7 +74,7 @@ static node_t *find_pred (node_t **pred_ptr, list_t *list, uint64_t key, int hel } else { TRACE("l3", "find_pred: lost race to unlink from pred %p; its link changed to %p", pred, other); if (IS_TAGGED(other)) - return find_pred(pred_ptr, list, key, help_remove); // retry + return find_pred(pred_ptr, ll, key, help_remove); // retry item = other; if (EXPECT_FALSE(item == NULL)) break; @@ -99,7 +99,7 @@ static node_t *find_pred (node_t **pred_ptr, list_t *list, uint64_t key, int hel } - // is not in the list. + // is not in . if (pred_ptr != NULL) { *pred_ptr = pred; } @@ -107,23 +107,23 @@ static node_t *find_pred (node_t **pred_ptr, list_t *list, uint64_t key, int hel } // Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor. -uint64_t ll_lookup (list_t *list, uint64_t key) { - TRACE("l3", "ll_lookup: searching for key %p in list %p", key, list); - node_t *item = find_pred(NULL, list, key, FALSE); +uint64_t ll_lookup (list_t *ll, uint64_t key) { + TRACE("l3", "ll_lookup: searching for key %p in ll %p", key, ll); + node_t *item = find_pred(NULL, ll, key, FALSE); // If we found an matching the return its value. return (item && item->key == key) ? item->value : DOES_NOT_EXIST; } -// Insert the , if it doesn't already exist in the -uint64_t ll_add (list_t *list, uint64_t key, uint64_t value) { +// Insert the , if it doesn't already exist in +uint64_t ll_add (list_t *ll, uint64_t key, uint64_t value) { TRACE("l3", "ll_add: inserting key %p value %p", key, value); node_t *pred; node_t *item = NULL; do { - node_t *next = find_pred(&pred, list, key, TRUE); + node_t *next = find_pred(&pred, ll, key, TRUE); - // If a node matching already exists in the list, return its value. + // If a node matching already exists in , return its value. if (next != NULL && next->key == key) { TRACE("l3", "ll_add: there is already an item %p (value %p) with the same key", next, next->value); if (EXPECT_FALSE(item != NULL)) { nbd_free(item); } @@ -143,12 +143,12 @@ uint64_t ll_add (list_t *list, uint64_t key, uint64_t value) { } while (1); } -uint64_t ll_remove (list_t *list, uint64_t key) { - TRACE("l3", "ll_remove: removing item with key %p from list %p", key, list); +uint64_t ll_remove (list_t *ll, uint64_t key) { + TRACE("l3", "ll_remove: removing item with key %p from ll %p", key, ll); node_t *pred; - node_t *item = find_pred(&pred, list, key, TRUE); + node_t *item = find_pred(&pred, ll, key, TRUE); if (item == NULL || item->key != key) { - TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0); + TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the ll", 0, 0); return DOES_NOT_EXIST; } @@ -166,13 +166,13 @@ uint64_t ll_remove (list_t *list, uint64_t key) { uint64_t value = item->value; - // Unlink from the list. + // Unlink from . TRACE("l3", "ll_remove: link item's pred %p to it's successor %p", pred, next); node_t *other; if ((other = SYNC_CAS(&pred->next, item, next)) != item) { TRACE("l3", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other); // By marking the item earlier, we logically removed it. It is safe to leave the item. - // Another thread will finish physically removing it from the list. + // Another thread will finish physically removing it from the ll. return value; } @@ -181,9 +181,9 @@ uint64_t ll_remove (list_t *list, uint64_t key) { return value; } -void ll_print (list_t *list) { +void ll_print (list_t *ll) { node_t *item; - item = list->head->next; + item = ll->head->next; while (item) { printf("0x%llx ", item->key); fflush(stdout); @@ -213,7 +213,7 @@ void *worker (void *arg) { for (int i = 0; i < NUM_ITERATIONS/num_threads_; ++i) { unsigned r = nbd_rand(); - int key = (r & 0xF); + int key = r & 0xF; if (r & (1 << 8)) { ll_add(ll_, key, 1); } else { diff --git a/struct/skiplist.c b/struct/skiplist.c index 5e9066c..1d65efa 100644 --- a/struct/skiplist.c +++ b/struct/skiplist.c @@ -33,7 +33,7 @@ typedef struct node { struct node *next[]; } node_t; -typedef struct skiplist { +typedef struct sl { node_t *head; } skiplist_t; @@ -62,20 +62,21 @@ node_t *node_alloc (int level, uint64_t key, uint64_t value) { } skiplist_t *sl_alloc (void) { - skiplist_t *skiplist = (skiplist_t *)nbd_malloc(sizeof(skiplist_t)); - skiplist->head = node_alloc(MAX_LEVEL, 0, 0); - memset(skiplist->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *)); - return skiplist; + skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t)); + sl->head = node_alloc(MAX_LEVEL, 0, 0); + memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *)); + return sl; } -static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *skiplist, uint64_t key, int help_remove) { - node_t *pred = skiplist->head; +static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *sl, uint64_t key, int help_remove) { + node_t *pred = sl->head; node_t *item = NULL; - TRACE("s3", "find_preds: searching for key %p in skiplist (head is %p)", key, pred); + TRACE("s3", "find_preds: searching for key %p in sl (head is %p)", key, pred); + int start_level = MAX_LEVEL; +#if MAX_LEVEL > 2 // Optimization for small lists. No need to traverse empty higher levels. - assert(MAX_LEVEL > 2); - int start_level = 2; + start_level = 2; while (pred->next[start_level+1] != NULL) { start_level += start_level - 1; if (EXPECT_FALSE(start_level >= MAX_LEVEL)) { @@ -86,14 +87,15 @@ static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *skipli if (EXPECT_FALSE(start_level < n)) { start_level = n; } +#endif - // Traverse the levels of the skiplist from the top level to the bottom + // Traverse the levels of from the top level to the bottom for (int level = start_level; level >= 0; --level) { TRACE("s3", "find_preds: level %llu", level, 0); item = pred->next[level]; if (EXPECT_FALSE(IS_TAGGED(item))) { TRACE("s3", "find_preds: pred %p is marked for removal (item %p); retry", pred, item); - return find_preds(preds, n, skiplist, key, help_remove); // retry + return find_preds(preds, n, sl, key, help_remove); // retry } while (item != NULL) { node_t *next = item->next[level]; @@ -127,7 +129,7 @@ static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *skipli } else { TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other); if (IS_TAGGED(other)) - return find_preds(preds, n, skiplist, key, help_remove); // retry + return find_preds(preds, n, sl, key, help_remove); // retry item = other; if (EXPECT_FALSE(item == NULL)) break; @@ -154,31 +156,31 @@ static node_t *find_preds (node_t *preds[MAX_LEVEL+1], int n, skiplist_t *skipli if (n == -1 && item != NULL) { assert(preds != NULL); for (int level = start_level + 1; level <= item->top_level; ++level) { - preds[level] = skiplist->head; + preds[level] = sl->head; } } return item; } // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors. -uint64_t sl_lookup (skiplist_t *skiplist, uint64_t key) { - TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key, skiplist); - node_t *item = find_preds(NULL, 0, skiplist, key, FALSE); +uint64_t sl_lookup (skiplist_t *sl, uint64_t key) { + TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl); + node_t *item = find_preds(NULL, 0, sl, key, FALSE); // If we found an matching the return its value. return (item && item->key == key) ? item->value : DOES_NOT_EXIST; } -// Insert the if it doesn't already exist in the -uint64_t sl_add (skiplist_t *skiplist, uint64_t key, uint64_t value) { +// Insert the if it doesn't already exist in +uint64_t sl_add (skiplist_t *sl, uint64_t key, uint64_t value) { TRACE("s3", "sl_add: inserting key %p value %p", key, value); node_t *preds[MAX_LEVEL+1]; node_t *item = NULL; do { int n = random_level(); - node_t *next = find_preds(preds, n, skiplist, key, TRUE); + node_t *next = find_preds(preds, n, sl, key, TRUE); - // If a node matching already exists in the skiplist, return its value. + // If a node matching already exists in , return its value. if (next != NULL && next->key == key) { TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", next, next->value); if (EXPECT_FALSE(item != NULL)) { nbd_free(item); } @@ -203,7 +205,7 @@ uint64_t sl_add (skiplist_t *skiplist, uint64_t key, uint64_t value) { } while (1); - // Insert into the skiplist from the bottom level up. + // Insert into from the bottom level up. for (int level = 1; level <= item->top_level; ++level) { do { node_t *pred; @@ -215,7 +217,7 @@ uint64_t sl_add (skiplist_t *skiplist, uint64_t key, uint64_t value) { break; if (!IS_TAGGED(next) && next->key > key) // pred's link changed break; - find_preds(preds, item->top_level, skiplist, key, TRUE); + find_preds(preds, item->top_level, sl, key, TRUE); } while (1); do { @@ -242,16 +244,16 @@ uint64_t sl_add (skiplist_t *skiplist, uint64_t key, uint64_t value) { return value; } -uint64_t sl_remove (skiplist_t *skiplist, uint64_t key) { - TRACE("s3", "sl_remove: removing item with key %p from skiplist %p", key, skiplist); +uint64_t sl_remove (skiplist_t *sl, uint64_t key) { + TRACE("s3", "sl_remove: removing item with key %p from sl %p", key, sl); node_t *preds[MAX_LEVEL+1]; - node_t *item = find_preds(preds, -1, skiplist, key, TRUE); + node_t *item = find_preds(preds, -1, sl, key, TRUE); if (item == NULL || item->key != key) { - TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0); + TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0); return DOES_NOT_EXIST; } - // Mark removed at each level of the skiplist from the top down. This must be atomic. If multiple threads + // Mark removed at each level of from the top down. This must be atomic. If multiple threads // try to remove the same item only one of them should succeed. Marking the bottom level establishes which of // them succeeds. for (int level = item->top_level; level >= 0; --level) { @@ -282,7 +284,7 @@ uint64_t sl_remove (skiplist_t *skiplist, uint64_t key) { if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) { TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other); // By marking the item earlier, we logically removed it. It is safe to leave the item partially - // unlinked. Another thread will finish physically removing it from the skiplist. + // unlinked. Another thread will finish physically removing it from . return value; } --level; @@ -293,9 +295,9 @@ uint64_t sl_remove (skiplist_t *skiplist, uint64_t key) { return value; } -void sl_print (skiplist_t *skiplist) { +void sl_print (skiplist_t *sl) { for (int level = MAX_LEVEL; level >= 0; --level) { - node_t *item = skiplist->head; + node_t *item = sl->head; if (item->next[level] == NULL) continue; printf("(%d) ", level); @@ -309,15 +311,20 @@ void sl_print (skiplist_t *skiplist) { } printf("\n"); - node_t *item = skiplist->head; + node_t *item = sl->head; while (item) { int is_marked = IS_TAGGED(item->next[0]); - printf("%s%p:0x%llx [%d]", is_marked ? "*" : "", item, item->key, item->top_level); + printf("%s%p:0x%llx ", is_marked ? "*" : "", item, item->key); + if (item != sl->head) { + printf("[%d]", item->top_level); + } else { + printf("[*]"); + } for (int level = 1; level <= item->top_level; ++level) { node_t *next = (node_t *)STRIP_TAG(item->next[level]); is_marked = IS_TAGGED(item->next[0]); printf(" %p%s", next, is_marked ? "*" : ""); - if (item == skiplist->head && item->next[level] == NULL) + if (item == sl->head && item->next[level] == NULL) break; } printf("\n"); @@ -347,7 +354,7 @@ void *worker (void *arg) { for (int i = 0; i < NUM_ITERATIONS/num_threads_; ++i) { unsigned r = nbd_rand(); - int key = (r & 0xF) + 1; + int key = r & 0xF; if (r & (1 << 8)) { sl_add(sl_, key, 1); } else { diff --git a/test/ht_test.c b/test/ht_test.c index 6d4a53b..a334db2 100644 --- a/test/ht_test.c +++ b/test/ht_test.c @@ -16,26 +16,26 @@ typedef struct worker_data { int id; CuTest *tc; - hash_table_t *ht; + hashtable_t *ht; int *wait; } worker_data_t; -int64_t ht_set (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) { - return ht_compare_and_set(ht, key, key_len, HT_EXPECT_WHATEVER, val); +int64_t ht_set (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) { + return ht_compare_and_set(ht, key, key_len, EXPECT_WHATEVER, val); } -int64_t ht_add (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) { - return ht_compare_and_set(ht, key, key_len, HT_EXPECT_NOT_EXISTS, val); +int64_t ht_add (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) { + return ht_compare_and_set(ht, key, key_len, EXPECT_DOES_NOT_EXIST, val); } -int64_t ht_replace (hash_table_t *ht, const char *key, uint32_t key_len, int64_t val) { - return ht_compare_and_set(ht, key, key_len, HT_EXPECT_EXISTS, val); +int64_t ht_replace (hashtable_t *ht, const char *key, uint32_t key_len, int64_t val) { + return ht_compare_and_set(ht, key, key_len, EXPECT_EXISTS, val); } // Test some basic stuff; add a few keys, remove a few keys void basic_test (CuTest* tc) { - hash_table_t *ht = ht_alloc(); + hashtable_t *ht = ht_alloc(); ASSERT_EQUAL( 0, ht_count(ht) ); ASSERT_EQUAL( DOES_NOT_EXIST, ht_add(ht,"a",2,10) ); @@ -91,7 +91,7 @@ void basic_test (CuTest* tc) { void *simple_worker (void *arg) { worker_data_t *wd = (worker_data_t *)arg; - hash_table_t *ht = wd->ht; + hashtable_t *ht = wd->ht; CuTest* tc = wd->tc; uint64_t d = wd->id; int iters = 1000000; @@ -123,7 +123,7 @@ void simple_add_remove (CuTest* tc) { pthread_t thread[2]; worker_data_t wd[2]; int wait = 2; - hash_table_t *ht = ht_alloc(); + hashtable_t *ht = ht_alloc(); // In 2 threads, add & remove even & odd elements concurrently int i; @@ -150,7 +150,7 @@ void simple_add_remove (CuTest* tc) { void *inserter_worker (void *arg) { //pthread_t thread[NUM_THREADS]; - //hash_table_t *ht = ht_alloc(); + //hashtable_t *ht = ht_alloc(); return NULL; } diff --git a/todo b/todo index 805a9a5..4640dd6 100644 --- a/todo +++ b/todo @@ -5,3 +5,4 @@ + optimize tracing code, still too much overhead + use NULL instead of a sentinal node in skiplist and list - make interfaces for all data structures consistent +- make list and skiplist use string keys diff --git a/txn/txn.c b/txn/txn.c index 013388e..8f54370 100644 --- a/txn/txn.c +++ b/txn/txn.c @@ -29,7 +29,7 @@ typedef struct write_rec { struct txn { uint64_t rv; uint64_t wv; - hash_table_t *ht; + hashtable_t *ht; write_rec_t *writes; uint32_t writes_size; uint32_t writes_count; @@ -50,7 +50,7 @@ update_rec_t *alloc_update_rec (void) { return u; } -txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hash_table_t *ht) { +txn_t *txn_begin (txn_access_t access, txn_isolation_t isolation, hashtable_t *ht) { txn_t *txn = (txn_t *)nbd_malloc(sizeof(txn_t)); memset(txn, 0, sizeof(txn_t)); txn->access = access; -- 2.40.0