From 9b3e566281f7e2ac0683205042796958bfd8939f Mon Sep 17 00:00:00 2001 From: jdybnis Date: Thu, 27 Nov 2008 08:20:57 +0000 Subject: [PATCH] code cleanup --- include/common.h | 6 --- include/struct.h | 4 +- makefile | 21 +++++----- struct/hashtable.c | 18 ++++---- struct/list.c | 77 +++++++++++++++++++--------------- struct/skiplist.c | 100 ++++++++++++++++++++++++++------------------- test/ll_test.c | 6 +-- 7 files changed, 129 insertions(+), 103 deletions(-) diff --git a/include/common.h b/include/common.h index d638fc9..205ea34 100644 --- a/include/common.h +++ b/include/common.h @@ -18,12 +18,6 @@ #define CACHE_LINE_SIZE 64 -#define CAT(x,y) x##y -#define ON_EXIT_SCOPE_I(x,i) \ - inline void CAT(scope_cleanup_function_, i) (int *CAT(scope_cleanup_dummy_argument_, i)) { x; }; \ - int CAT(scope_cleanup_dummy_variable_, i) __attribute__((cleanup(CAT(scope_cleanup_function_, i)))); -#define ON_EXIT_SCOPE(x) ON_EXIT_SCOPE_I(x,__LINE__) - #define EXPECT_TRUE(x) __builtin_expect(x, 1) #define EXPECT_FALSE(x) __builtin_expect(x, 0) diff --git a/include/struct.h b/include/struct.h index 513c126..3bb724b 100644 --- a/include/struct.h +++ b/include/struct.h @@ -7,7 +7,7 @@ #define EXPECT_EXISTS (-1) #define EXPECT_WHATEVER (-2) -typedef struct hti *hashtable_t; +typedef struct ht hashtable_t; hashtable_t *ht_alloc (void); void ht_free (hashtable_t *ht); @@ -20,7 +20,7 @@ typedef struct ll list_t; list_t * ll_alloc (void); uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len); -uint64_t ll_add (list_t *ll, const void *key_data, uint32_t key_len, uint64_t value); +uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val); uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len); void ll_print (list_t *ll); diff --git a/makefile b/makefile index f2d1691..901a363 100644 --- a/makefile +++ b/makefile @@ -6,7 +6,7 @@ # Makefile for building programs with whole-program interfile optimization ################################################################################################### OPT := -fwhole-program -combine -03# -DNDEBUG -CFLAGS := -g -Wall -Werror -std=c99 -m64 -fnested-functions $(OPT)# -DENABLE_TRACE +CFLAGS := -g -Wall -Werror -std=c99 -m64 $(OPT) #-DENABLE_TRACE INCS := $(addprefix -I, include) TESTS := output/ll_test output/sl_test output/ht_test output/rcu_test EXES := $(TESTS) @@ -33,11 +33,15 @@ $(addsuffix .log, $(TESTS)) : %.log : % ################################################################################################### # Rebuild an executable if any of it's source files need to be recompiled # -# Note: Calculating dependancies as a side-effect of compilation is disabled. There is a bug in +# Note: Calculating dependencies as a side-effect of compilation is disabled. There is a bug in # gcc. Compilation fails when -MM -MF is used and there is more than one source file. -# -MM -MT $@.d -MF $@.d +# Otherwise "-MM -MT $@.d -MF $@.d" should be part of the command line for the compile. +# +# Also, when calculating dependencies -combine is removed from CFLAGS because of another bug +# in gcc. It chokes when -MM is used with -combine. ################################################################################################### $(EXES): output/% : output/%.d makefile + gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@.d gcc $(CFLAGS) $(INCS) -o $@ $($*_SRCS) ################################################################################################### @@ -54,13 +58,10 @@ clean: .PHONY: clean test tags +-include $(addsuffix .d, $(EXES)) + ################################################################################################### -# Generate the dependencies lists for each executable -# -# Note: -combine is removed from CFLAGS because of a bug in gcc. The compiler chokes when -# -MM is used with -combine. +# Dummy rule for boostrapping dependency files ################################################################################################### $(addsuffix .d, $(EXES)) : output/%.d : - gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@ - --include $(addsuffix .d, $(EXES)) + touch $@ diff --git a/struct/hashtable.c b/struct/hashtable.c index 0e9c57e..a332819 100644 --- a/struct/hashtable.c +++ b/struct/hashtable.c @@ -37,6 +37,10 @@ typedef struct hti { int scan; } hashtable_i_t; +struct ht { + hashtable_i_t *hti; +}; + static const uint64_t COPIED_VALUE = -1; static const uint64_t TOMBSTONE = STRIP_TAG(-1); @@ -421,7 +425,7 @@ static uint64_t hti_get (hashtable_i_t *hti, uint32_t key_hash, const char *key_ // uint64_t ht_get (hashtable_t *ht, const char *key_data, uint32_t key_len) { - return hti_get(*ht, murmur32(key_data, key_len), key_data, key_len); + return hti_get(ht->hti, murmur32(key_data, key_len), key_data, key_len); } // @@ -433,7 +437,7 @@ uint64_t ht_compare_and_set (hashtable_t *ht, const char *key_data, uint32_t key assert(key_data); assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST); - hashtable_i_t *hti = *ht; + hashtable_i_t *hti = ht->hti; // Help with an ongoing copy. if (EXPECT_FALSE(hti->next != NULL)) { @@ -475,7 +479,7 @@ uint64_t ht_compare_and_set (hashtable_t *ht, const char *key_data, uint32_t key // Dispose of fully copied tables. if (hti->num_entries_copied == (1 << hti->scale) || panic) { assert(hti->next); - if (SYNC_CAS(ht, hti, hti->next) == hti) { + if (SYNC_CAS(&ht->hti, hti, hti->next) == hti) { nbd_defer_free(hti); } } @@ -495,7 +499,7 @@ uint64_t ht_compare_and_set (hashtable_t *ht, const char *key_data, uint32_t key // Remove the value in associated with . Returns the value removed, or // DOES_NOT_EXIST if there was no value for that key. uint64_t ht_remove (hashtable_t *ht, const char *key_data, uint32_t key_len) { - hashtable_i_t *hti = *ht; + hashtable_i_t *hti = ht->hti; uint64_t val; uint32_t key_hash = murmur32(key_data, key_len); do { @@ -510,7 +514,7 @@ uint64_t ht_remove (hashtable_t *ht, const char *key_data, uint32_t key_len) { // Returns the number of key-values pairs in uint64_t ht_count (hashtable_t *ht) { - hashtable_i_t *hti = *ht; + hashtable_i_t *hti = ht->hti; uint64_t count = 0; while (hti) { count += hti->count; @@ -522,13 +526,13 @@ uint64_t ht_count (hashtable_t *ht) { // Allocate and initialize a new hash table. hashtable_t *ht_alloc (void) { hashtable_t *ht = nbd_malloc(sizeof(hashtable_t)); - *ht = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE); + ht->hti = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE); return ht; } // Free and its internal structures. void ht_free (hashtable_t *ht) { - hashtable_i_t *hti = *ht; + hashtable_i_t *hti = ht->hti; do { for (uint32_t i = 0; i < (1 << hti->scale); ++i) { assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value)); diff --git a/struct/list.c b/struct/list.c index 94822fc..1920e60 100644 --- a/struct/list.c +++ b/struct/list.c @@ -15,7 +15,7 @@ typedef struct node { nstring_t *key; - uint64_t value; + uint64_t val; struct node *next; } node_t; @@ -23,17 +23,31 @@ struct ll { node_t *head; }; -static node_t *node_alloc (const void *key_data, uint32_t key_len, uint64_t value) { +static node_t *node_alloc (const void *key_data, uint32_t key_len, uint64_t val) { node_t *item = (node_t *)nbd_malloc(sizeof(node_t)); memset(item, 0, sizeof(node_t)); // If is -1 it indicates is an integer and not a pointer item->key = (key_len == (unsigned)-1) ? (void *)TAG_VALUE(key_data) : ns_alloc(key_data, key_len); - item->value = value; + item->val = val; return item; } +static void node_free (node_t *item) { + if (!IS_TAGGED(item->key)) { + nbd_free(item->key); + } + nbd_free(item); +} + +static void node_defer_free (node_t *item) { + if (!IS_TAGGED(item->key)) { + nbd_defer_free(item->key); + } + nbd_defer_free(item); +} + list_t *ll_alloc (void) { list_t *ll = (list_t *)nbd_malloc(sizeof(list_t)); ll->head = node_alloc(" ", 0, 0); @@ -49,7 +63,7 @@ static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, u while (item != NULL) { node_t *next = item->next; TRACE("l3", "find_pred: visiting item %p (next %p)", item, next); - TRACE("l3", "find_pred: key %p", STRIP_TAG(item->key), item->value); + TRACE("l3", "find_pred: key %p", STRIP_TAG(item->key), item->val); // A tag means an item is logically removed but not physically unlinked yet. while (EXPECT_FALSE(IS_TAGGED(next))) { @@ -74,7 +88,7 @@ static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, u TRACE("l3", "find_pred: now item is %p next is %p", item, next); // The thread that completes the unlink should free the memory. - nbd_defer_free(other); + node_defer_free(other); } else { TRACE("l3", "find_pred: lost race to unlink from pred %p; its link changed to %p", pred, other); if (IS_TAGGED(other)) @@ -115,48 +129,47 @@ static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, u // Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor. uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len) { - TRACE("l3", "ll_lookup: searching for key %p in ll %p", key_data, ll); + TRACE("l3", "ll_lookup: searching for key %p in list %p", key_data, ll); node_t *item = find_pred(NULL, ll, key_data, key_len, FALSE); // If we found an matching the key return its value. - return item != NULL ? item->value : DOES_NOT_EXIST; + return item != NULL ? item->val : DOES_NOT_EXIST; } // Insert a new item if a matching key doesn't already exist in -uint64_t ll_add (list_t *ll, const void *key_data, uint32_t key_len, uint64_t value) { - TRACE("l3", "ll_add: inserting key %p value %p", key_data, value); - node_t *pred; - node_t *item = NULL; +uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val) { + assert(new_val != DOES_NOT_EXIST); + TRACE("l3", "ll_cas: inserting key %p val %p", key_data, new_val); do { - node_t *next = find_pred(&pred, ll, key_data, key_len, TRUE); + node_t *pred; + node_t *old_item = find_pred(&pred, ll, key_data, key_len, TRUE); // If a node matching the key already exists in return its value. - if (next != NULL) { - TRACE("l3", "ll_add: there is already an item %p (value %p) with the same key", next, next->value); - if (EXPECT_FALSE(item != NULL)) { nbd_free(item); } - return next->value; + if (old_item != NULL) { + TRACE("l3", "ll_cas: there is already an item %p (value %p) with the same key", old_item, old_item->val); + return old_item->val; } - next = pred->next; - TRACE("l3", "ll_add: attempting to insert item between %p and %p", pred, next); - if (EXPECT_TRUE(item == NULL)) { item = node_alloc(key_data, key_len, value); } - item->next = next; - node_t *other = SYNC_CAS(&pred->next, next, item); + TRACE("l3", "ll_cas: attempting to insert item between %p and %p", pred, pred->next); + node_t *new_item = node_alloc(key_data, key_len, new_val); + node_t *next = new_item->next = pred->next; + node_t *other = SYNC_CAS(&pred->next, next, new_item); if (other == next) { - TRACE("l3", "ll_add: successfully inserted item %p", item, 0); + TRACE("l3", "ll_cas: successfully inserted item %p", new_item, 0); return DOES_NOT_EXIST; // success } - TRACE("l3", "ll_add: failed to change pred's link: expected %p found %p", next, other); + TRACE("l3", "ll_cas: failed to change pred's link: expected %p found %p", next, other); + node_free(new_item); } while (1); } uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len) { - TRACE("l3", "ll_remove: removing item with key %p from ll %p", key_data, ll); + TRACE("l3", "ll_remove: removing item with key %p from list %p", key_data, ll); node_t *pred; node_t *item = find_pred(&pred, ll, key_data, key_len, TRUE); if (item == NULL) { - TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the ll", 0, 0); + TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0); return DOES_NOT_EXIST; } @@ -172,21 +185,21 @@ uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len) { return DOES_NOT_EXIST; } - uint64_t value = item->value; + uint64_t val = item->val; - // Unlink from . + // Unlink from . If we lose a race to another thread just back off. It is safe to leave the + // item logically removed for a later call (or some other thread) to physically unlink. By marking the + // item earlier, we logically removed it. TRACE("l3", "ll_remove: link item's pred %p to it's successor %p", pred, next); node_t *other; if ((other = SYNC_CAS(&pred->next, item, next)) != item) { TRACE("l3", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other); - // By marking the item earlier, we logically removed it. It is safe to leave the item. - // Another thread will finish physically removing it from the ll. - return value; + return val; } // The thread that completes the unlink should free the memory. - nbd_defer_free(item); - return value; + node_defer_free(item); + return val; } void ll_print (list_t *ll) { diff --git a/struct/skiplist.c b/struct/skiplist.c index aa283fb..a3fab39 100644 --- a/struct/skiplist.c +++ b/struct/skiplist.c @@ -29,7 +29,7 @@ typedef struct node { nstring_t *key; - uint64_t value; + uint64_t val; int top_level; struct node *next[]; } node_t; @@ -50,7 +50,7 @@ static int random_level (void) { return n; } -node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t value) { +node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t val) { assert(level >= 0 && level <= MAX_LEVEL); size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *); node_t *item = (node_t *)nbd_malloc(sz); @@ -59,11 +59,25 @@ node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t item->key = (key_len == (unsigned)-1) ? (void *)TAG_VALUE(key_data) : ns_alloc(key_data, key_len); - item->value = value; + item->val = val; item->top_level = level; return item; } +static void node_free (node_t *item) { + if (!IS_TAGGED(item->key)) { + nbd_free(item->key); + } + nbd_free(item); +} + +static void node_defer_free (node_t *item) { + if (!IS_TAGGED(item->key)) { + nbd_defer_free(item->key); + } + nbd_defer_free(item); +} + skiplist_t *sl_alloc (void) { skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t)); sl->head = node_alloc(MAX_LEVEL, " ", 0, 0); @@ -103,7 +117,7 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl while (item != NULL) { node_t *next = item->next[level]; TRACE("s3", "find_preds: visiting item %p (next %p)", item, next); - TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->value); + TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->val); // A tag means an item is logically removed but not physically unlinked yet. while (EXPECT_FALSE(IS_TAGGED(next))) { @@ -128,7 +142,7 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl TRACE("s3", "find_preds: now item is %p next is %p", item, next); // The thread that completes the unlink should free the memory. - if (level == 0) { nbd_defer_free(other); } + if (level == 0) { node_defer_free(other); } } else { TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other); if (IS_TAGGED(other)) @@ -176,85 +190,85 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors. uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) { - TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl); + TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key, sl); node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE); // If we found an matching the return its value. - return item != NULL ? item->value : DOES_NOT_EXIST; + return item != NULL ? item->val : DOES_NOT_EXIST; } // Insert the if it doesn't already exist in -uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value) { - TRACE("s3", "sl_add: inserting key %p value %p", key_data, value); +uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t val) { + TRACE("s3", "sl_add: inserting key %p val %p", key_data, val); node_t *preds[MAX_LEVEL+1]; node_t *nexts[MAX_LEVEL+1]; - node_t *item = NULL; + node_t *new_item = NULL; int n = random_level(); do { - node_t *next = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE); + node_t *old_item = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE); // If a node matching already exists in , return its value. - if (next != NULL) { - TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->value); - if (EXPECT_FALSE(item != NULL)) { nbd_free(item); } - return nexts[0]->value; + if (old_item != NULL) { + TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->val); + return nexts[0]->val; } - // First insert into the bottom level. - if (EXPECT_TRUE(item == NULL)) { item = node_alloc(n, key_data, key_len, value); } + // First insert into the bottom level. + TRACE("s3", "sl_add: attempting to insert item between %p and %p", preds[0], nexts[0]); + new_item = node_alloc(n, key_data, key_len, val); node_t *pred = preds[0]; - item->next[0] = next = nexts[0]; - TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next); - for (int level = 1; level <= item->top_level; ++level) { - item->next[level] = nexts[level]; + node_t *next = new_item->next[0] = nexts[0]; + for (int level = 1; level <= new_item->top_level; ++level) { + new_item->next[level] = nexts[level]; } - node_t *other = SYNC_CAS(&pred->next[0], next, item); + node_t *other = SYNC_CAS(&pred->next[0], next, new_item); if (other == next) { - TRACE("s3", "sl_add: successfully inserted item %p at level 0", item, 0); + TRACE("s3", "sl_add: successfully inserted item %p at level 0", new_item, 0); break; // success } TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other); + node_free(new_item); } while (1); - // Insert into from the bottom level up. - for (int level = 1; level <= item->top_level; ++level) { + // Insert into from the bottom level up. + for (int level = 1; level <= new_item->top_level; ++level) { node_t *pred = preds[level]; node_t *next = nexts[level]; do { TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next); - node_t *other = SYNC_CAS(&pred->next[level], next, item); + node_t *other = SYNC_CAS(&pred->next[level], next, new_item); if (other == next) { - TRACE("s3", "sl_add: successfully inserted item %p at level %llu", item, level); + TRACE("s3", "sl_add: successfully inserted item %p at level %llu", new_item, level); break; // success } TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other); - find_preds(preds, nexts, item->top_level, sl, key_data, key_len, TRUE); + find_preds(preds, nexts, new_item->top_level, sl, key_data, key_len, TRUE); pred = preds[level]; next = nexts[level]; - // Update 's next pointer + // Update 's next pointer do { // There in no need to continue linking in the item if another thread removed it. - node_t *old_next = ((volatile node_t *)item)->next[level]; + node_t *old_next = ((volatile node_t *)new_item)->next[level]; if (IS_TAGGED(old_next)) - return value; + return val; - // Use a CAS so we to not inadvertantly stomp on a mark another thread placed on the item. - if (old_next == next || SYNC_CAS(&item->next[level], old_next, next) == old_next) + // Use a CAS so we do not inadvertantly stomp on a mark another thread placed on the item. + if (old_next == next || SYNC_CAS(&new_item->next[level], old_next, next) == old_next) break; } while (1); } while (1); } - return value; + return val; } uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) { - TRACE("s3", "sl_remove: removing item with key %p from sl %p", key_data, sl); + TRACE("s3", "sl_remove: removing item with key %p from skiplist %p", key_data, sl); node_t *preds[MAX_LEVEL+1]; node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE); if (item == NULL) { - TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0); + TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0); return DOES_NOT_EXIST; } @@ -277,9 +291,11 @@ uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) { } } - uint64_t value = item->value; + uint64_t val = item->val; - // Unlink from the top down. + // Unlink from . If we lose a race to another thread just back off. It is safe to leave the + // item partially unlinked for a later call (or some other thread) to physically unlink. By marking the + // item earlier, we logically removed it. int level = item->top_level; while (level >= 0) { node_t *pred = preds[level]; @@ -288,16 +304,14 @@ uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) { node_t *other = NULL; if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) { TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other); - // By marking the item earlier, we logically removed it. It is safe to leave the item partially - // unlinked. Another thread will finish physically removing it from . - return value; + return val; } --level; } // The thread that completes the unlink should free the memory. - nbd_defer_free(item); - return value; + node_defer_free(item); + return val; } void sl_print (skiplist_t *sl) { diff --git a/test/ll_test.c b/test/ll_test.c index b907381..4f2e236 100644 --- a/test/ll_test.c +++ b/test/ll_test.c @@ -26,13 +26,13 @@ void *worker (void *arg) { char key_str[10]; sprintf(key_str, "%llX", key); if (r & (1 << 8)) { - ll_add(ll_, key_str, strlen(key_str) + 1, 1); + ll_cas(ll_, key_str, strlen(key_str) + 1, EXPECT_WHATEVER, 1); } else { ll_remove(ll_, key_str, strlen(key_str) + 1); } #else if (r & (1 << 8)) { - ll_add(ll_, (void *)key, -1, 1); + ll_cas(ll_, (void *)key, -1, EXPECT_WHATEVER, 1); } else { ll_remove(ll_, (void *)key, -1); } @@ -46,7 +46,7 @@ void *worker (void *arg) { int main (int argc, char **argv) { nbd_init(); - //lwt_set_trace_level("m0l0"); + lwt_set_trace_level("l3"); char* program_name = argv[0]; pthread_t thread[MAX_NUM_THREADS]; -- 2.40.0