From: jdybnis Date: Tue, 13 Jan 2009 02:21:20 +0000 (+0000) Subject: port to Ubuntu 8.10 x86-64 w/ gcc 4.3.2 X-Git-Url: https://pd.if.org/git/?p=nbds;a=commitdiff_plain;h=2b107655a1df8ae7703b44ef8cf1430a7250a5c3 port to Ubuntu 8.10 x86-64 w/ gcc 4.3.2 use system malloc by default until portability issues are worked out fix unlink bug in skiplist --- diff --git a/include/common.h b/include/common.h index c644bb5..8988ed0 100644 --- a/include/common.h +++ b/include/common.h @@ -40,10 +40,12 @@ #define STRIP_TAG(v, tag) ((v) & ~tag) #define DOES_NOT_EXIST 0 -#define ERROR_INVALID_OPTION (-1) -#define ERROR_INVALID_ARGUMENT (-2) +#define ERROR_INVALID_OPTION (-1) +#define ERROR_INVALID_ARGUMENT (-2) #define ERROR_UNSUPPORTED_FEATURE (-3) -#define ERROR_TXN_NOT_RUNNING (-4) +#define ERROR_TXN_NOT_RUNNING (-4) + +#define VOLATILE(x) *((volatile typeof(x) *)&x) typedef unsigned long long uint64_t; typedef unsigned int uint32_t; diff --git a/include/lwt.h b/include/lwt.h index d31c9eb..5c0b6a4 100644 --- a/include/lwt.h +++ b/include/lwt.h @@ -13,10 +13,10 @@ #define TRACE(flag, format, v1, v2) lwt_trace(flag, format, (size_t)(v1), (size_t)(v2)) #endif -#ifdef NDEBUG -#define ASSERT(x) +#ifndef NDEBUG +#define ASSERT(x) do { if (!(x)) { lwt_halt(); assert(!#x); } } while (0) #else -#define ASSERT(x) if (!(x)) { lwt_halt(); assert(!#x); } +#define ASSERT(x) do { } while (0) #endif // Dump trace records to . The file should be post-processed with "sort" before viewing. diff --git a/include/murmur.h b/include/murmur.h index 6bc3694..cf54201 100644 --- a/include/murmur.h +++ b/include/murmur.h @@ -73,10 +73,8 @@ static inline uint32_t murmur32_8b (uint64_t key) // Initialize the hash to a 'random' value uint32_t h = 8; - const unsigned char *data = (const unsigned char *)&key; - - uint32_t k1 = *(uint32_t *)data; - uint32_t k2 = *(uint32_t *)(data + 4); + uint32_t k1 = (uint32_t)(key >> 32); + uint32_t k2 = (uint32_t)key; k1 *= m; k1 ^= k1 >> r; diff --git a/makefile b/makefile index 8b0738b..b7649ed 100644 --- a/makefile +++ b/makefile @@ -4,13 +4,15 @@ ################################################################################################### # Makefile for building programs with whole-program interfile optimization ################################################################################################### -OPT := -O3 #-DNDEBUG #-fwhole-program -combine -CFLAGS := -g -Wall -Werror -std=c99 $(OPT) -lpthread #-DNBD32 -DENABLE_TRACE #-DLIST_USE_HAZARD_POINTER #-DTEST_STRING_KEYS # -INCS := $(addprefix -I, include) -TESTS := output/map_test2 output/map_test1 output/txn_test output/rcu_test output/haz_test -EXES := $(TESTS) +CFLAGS0 := -g -Wall -Werror -std=c99 -lpthread +CFLAGS1 := $(CFLAGS0) -O3 #-DNDEBUG #-DENABLE_TRACE #-fwhole-program -combine +CFLAGS := $(CFLAGS1) -DUSE_SYSTEM_MALLOC #-DLIST_USE_HAZARD_POINTER #-DTEST_STRING_KEYS #-DNBD32 +INCS := $(addprefix -I, include) +TESTS := output/rcu_test output/haz_test output/map_test2 output/map_test1 output/txn_test +EXES := $(TESTS) -RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c runtime/hazard.c +RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c \ + runtime/hazard.c MAP_SRCS := map/map.c map/list.c map/skiplist.c map/hashtable.c haz_test_SRCS := $(RUNTIME_SRCS) test/haz_test.c @@ -48,7 +50,7 @@ asm: $(addsuffix .s, $(EXES)) $(addsuffix .s, $(EXES)): output/%.s : output/%.d makefile gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > output/$*.d - gcc $(CFLAGS) $(INCS) -S -o $@.temp $($*_SRCS) + gcc $(CFLAGS) $(INCS) -combine -S -o $@.temp $($*_SRCS) grep -v "^L[BFM]\|^LCF" $@.temp > $@ rm $@.temp diff --git a/map/hashtable.c b/map/hashtable.c index 88bf631..231384c 100644 --- a/map/hashtable.c +++ b/map/hashtable.c @@ -34,6 +34,9 @@ typedef struct hti { volatile entry_t *table; hashtable_t *ht; // parent ht; struct hti *next; +#ifdef USE_SYSTEM_MALLOC + void *unaligned_table_ptr; // system malloc doesn't guarentee cache-line alignment +#endif unsigned scale; int max_probe; int ref_count; @@ -136,13 +139,16 @@ static volatile entry_t *hti_lookup (hti_t *hti, map_key_t key, uint32_t key_has static hti_t *hti_alloc (hashtable_t *parent, int scale) { hti_t *hti = (hti_t *)nbd_malloc(sizeof(hti_t)); memset(hti, 0, sizeof(hti_t)); + hti->scale = scale; size_t sz = sizeof(entry_t) * (1 << scale); - entry_t *table = nbd_malloc(sz); - memset(table, 0, sz); - hti->table = table; - - hti->scale = scale; +#ifdef USE_SYSTEM_MALLOC + hti->unaligned_table_ptr = nbd_malloc(sz + CACHE_LINE_SIZE - 1); + hti->table = (void *)(((size_t)hti->unaligned_table_ptr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1)); +#else + hti->table = nbd_malloc(sz); +#endif + memset((void *)hti->table, 0, sz); // When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets. hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4; @@ -178,7 +184,11 @@ static void hti_start_copy (hti_t *hti) { if (old_next != NULL) { // Another thread beat us to it. TRACE("h0", "hti_start_copy: lost race to install new hti; found %p", old_next, 0); - nbd_free(next); +#ifdef USE_SYSTEM_MALLOC + nbd_free(next->unaligned_table_ptr); +#else + nbd_free((void *)next->table); +#endif return; } TRACE("h0", "hti_start_copy: new hti %p scale %llu", next, next->scale); @@ -508,7 +518,11 @@ static void hti_defer_free (hti_t *hti) { rcu_defer_free(GET_PTR(key)); } } +#ifdef USE_SYSTEM_MALLOC + rcu_defer_free(hti->unaligned_table_ptr); +#else rcu_defer_free((void *)hti->table); +#endif rcu_defer_free(hti); } diff --git a/map/skiplist.c b/map/skiplist.c index 6e02e12..70cb6e5 100644 --- a/map/skiplist.c +++ b/map/skiplist.c @@ -33,7 +33,7 @@ typedef struct node { map_key_t key; map_val_t val; int top_level; - markable_t next[]; + markable_t next[1]; } node_t; struct sl_iter { @@ -43,6 +43,7 @@ struct sl_iter { struct sl { node_t *head; const datatype_t *key_type; + int high_water; // max level of any item in the list }; // Marking the field of a node logically removes it from the list @@ -60,30 +61,27 @@ static inline node_t * STRIP_MARK(markable_t x) { return ((node_t *)STRIP_TAG(x, static int random_level (void) { unsigned r = nbd_rand(); - if (r & 1) - return 0; -#if MAX_LEVEL < 31 - r |= 1 << (MAX_LEVEL+1); -#endif - int n = __builtin_ctz(r)-1; - assert(n <= MAX_LEVEL); + int n = __builtin_ctz(r) / 2; + if (n > MAX_LEVEL) { n = MAX_LEVEL; } return n; } static node_t *node_alloc (int level, map_key_t key, map_val_t val) { assert(level >= 0 && level <= MAX_LEVEL); - size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *); + size_t sz = sizeof(node_t) + level * sizeof(node_t *); node_t *item = (node_t *)nbd_malloc(sz); memset(item, 0, sz); item->key = key; item->val = val; item->top_level = level; + TRACE("s2", "node_alloc: new node %p (%llu levels)", item, level); return item; } skiplist_t *sl_alloc (const datatype_t *key_type) { skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t)); sl->key_type = key_type; + sl->high_water = 0; sl->head = node_alloc(MAX_LEVEL, 0, 0); memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *)); return sl; @@ -118,28 +116,20 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl node_t *item = NULL; TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred); int d = 0; - int start_level = MAX_LEVEL; -#if MAX_LEVEL > 2 - // Optimization for small lists. No need to traverse empty higher levels. - start_level = 2; - while (pred->next[start_level+1] != DOES_NOT_EXIST) { - start_level += start_level - 1; - if (EXPECT_FALSE(start_level >= MAX_LEVEL)) { - start_level = MAX_LEVEL; - break; - } - } + int start_level = sl->high_water; if (EXPECT_FALSE(start_level < n)) { start_level = n; } -#endif // Traverse the levels of from the top level to the bottom for (int level = start_level; level >= 0; --level) { - TRACE("s3", "find_preds: level %llu", level, 0); markable_t next = pred->next[level]; + if (next == DOES_NOT_EXIST && level > n) + continue; + TRACE("s3", "find_preds: traversing level %p starting at %p", level, pred); if (EXPECT_FALSE(HAS_MARK(next))) { TRACE("s2", "find_preds: pred %p is marked for removal (next %p); retry", pred, next); + ASSERT(level == pred->top_level || HAS_MARK(pred->next[level+1])); return find_preds(preds, succs, n, sl, key, help_remove); // retry } item = GET_NODE(next); @@ -148,45 +138,35 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl // A tag means an item is logically removed but not physically unlinked yet. while (EXPECT_FALSE(HAS_MARK(next))) { - - // Skip over logically removed items. + TRACE("s3", "find_preds: found marked item %p (next is %p)", item, next); if (!help_remove) { + + // Skip over logically removed items. item = STRIP_MARK(next); if (EXPECT_FALSE(item == NULL)) break; next = item->next[level]; - TRACE("s3", "find_preds: skipping marked item %p (next is 0x%llx)", item, next); - continue; - } + } else { - // Unlink logically removed items. - TRACE("s3", "find_preds: unlinking marked item %p; next is 0x%llx", item, next); - markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next)); - if (other == (markable_t)item) { - item = STRIP_MARK(next); - next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST; - TRACE("s3", "find_preds: now the current item is %p next is 0x%llx", item, next); - - // The thread that completes the unlink should free the memory. - if (level == 0) { - node_t *unlinked = GET_NODE(other); - if (sl->key_type != NULL) { - rcu_defer_free((void *)unlinked->key); - } - rcu_defer_free(unlinked); + // Unlink logically removed items. + markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next)); + if (other == (markable_t)item) { + TRACE("s3", "find_preds: unlinked item from pred %p", pred, 0); + item = STRIP_MARK(next); + } else { + TRACE("s3", "find_preds: lost race to unlink item pred %p's link changed to %p", pred, other); + if (HAS_MARK(other)) + return find_preds(preds, succs, n, sl, key, help_remove); // retry + item = GET_NODE(other); } - } else { - TRACE("s3", "find_preds: lost race to unlink item %p from pred %p", item, pred); - TRACE("s3", "find_preds: pred's link changed to %p", other, 0); - if (HAS_MARK(other)) - return find_preds(preds, succs, n, sl, key, help_remove); // retry - item = GET_NODE(other); next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST; } } - if (EXPECT_FALSE(item == NULL)) + if (EXPECT_FALSE(item == NULL)) { + TRACE("s3", "find_preds: past the last item in the skiplist", 0, 0); break; + } TRACE("s4", "find_preds: visiting item %p (next is %p)", item, next); TRACE("s4", "find_preds: key %p val %p", STRIP_MARK(item->key), item->val); @@ -197,15 +177,15 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl d = sl->key_type->cmp((void *)item->key, (void *)key); } - if (d >= 0) { - TRACE("s4", "find_preds: found pred %p item %p", pred, item); + if (d >= 0) break; - } pred = item; item = GET_NODE(next); } + TRACE("s3", "find_preds: found pred %p next %p", pred, item); + // The cast to unsigned is for the case when n is -1. if ((unsigned)level <= (unsigned)n) { if (preds != NULL) { @@ -217,14 +197,14 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl } } - // fill in empty levels - if (n == -1 && item != NULL) { - assert(item->top_level <= MAX_LEVEL); - for (int level = start_level + 1; level <= item->top_level; ++level) { - preds[level] = sl->head; - } - } - + // fill in empty levels + if (n == -1 && item != NULL && preds != NULL) { + assert(item->top_level <= MAX_LEVEL); + for (int level = start_level + 1; level <= item->top_level; ++level) { + preds[level] = sl->head; + } + } + if (d == 0) { TRACE("s2", "find_preds: found matching item %p in skiplist, pred is %p", item, pred); return item; @@ -233,6 +213,68 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl return NULL; } +static void sl_unlink (skiplist_t *sl, map_key_t key) { + node_t *pred = sl->head; + node_t *item = NULL; + TRACE("s2", "sl_unlink: unlinking marked item with key %p", key, 0); + int d = 0; + + // Traverse the levels of from the top level to the bottom + for (int level = sl->high_water; level >= 0; --level) { + markable_t next = pred->next[level]; + if (next == DOES_NOT_EXIST) + continue; + TRACE("s3", "sl_unlink: traversing level %p starting at %p", level, pred); + if (EXPECT_FALSE(HAS_MARK(next))) { + TRACE("s2", "sl_unlink: lost a race; pred %p is marked for removal (next %p); retry", pred, next); + ASSERT(level == pred->top_level || HAS_MARK(pred->next[level+1])); + return sl_unlink(sl, key); // retry + } + item = GET_NODE(next); + while (item != NULL) { + next = item->next[level]; + + while (HAS_MARK(next)) { + TRACE("s3", "sl_unlink: found marked item %p (next is %p)", item, next); + + markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next)); + if (other == (markable_t)item) { + TRACE("s3", "sl_unlink: unlinked item from pred %p", pred, 0); + item = STRIP_MARK(next); + } else { + TRACE("s3", "sl_unlink: lost race to unlink item, pred %p's link changed to %p", pred, other); + if (HAS_MARK(other)) + return sl_unlink(sl, key); // retry + item = GET_NODE(other); + } + next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST; + } + + if (EXPECT_FALSE(item == NULL)) { + TRACE("s3", "sl_unlink: past the last item in the skiplist", 0, 0); + break; + } + + TRACE("s4", "sl_unlink: visiting item %p (next is %p)", item, next); + TRACE("s4", "sl_unlink: key %p val %p", STRIP_MARK(item->key), item->val); + + if (EXPECT_TRUE(sl->key_type == NULL)) { + d = item->key - key; + } else { + d = sl->key_type->cmp((void *)item->key, (void *)key); + } + + if (d > 0) + break; + + pred = item; + item = GET_NODE(next); + } + + TRACE("s3", "sl_unlink: at pred %p next %p", pred, item); + } +} + // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors. map_val_t sl_lookup (skiplist_t *sl, map_key_t key) { TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl); @@ -262,6 +304,35 @@ map_key_t sl_min_key (skiplist_t *sl) { return DOES_NOT_EXIST; } +static map_val_t update_item (node_t *item, map_val_t expectation, map_val_t new_val) { + map_val_t old_val = item->val; + + // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us. + if (EXPECT_FALSE(old_val == DOES_NOT_EXIST)) { + TRACE("s2", "update_item: lost a race to another thread removing the item. retry", 0, 0); + return DOES_NOT_EXIST; // retry + } + + if (EXPECT_FALSE(expectation == CAS_EXPECT_DOES_NOT_EXIST)) { + TRACE("s1", "update_item: found an item %p in the skiplist that matched the key. the expectation was " + "not met, the skiplist was not changed", item, old_val); + return old_val; // failure + } + + // Use a CAS and not a SWAP. If the CAS fails it means another thread removed the node or updated its + // value. If another thread removed the node but it is not unlinked yet and we used a SWAP, we could + // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it + // succeeded and return our value even though it should return DOES_NOT_EXIST. + if (old_val == SYNC_CAS(&item->val, old_val, new_val)) { + TRACE("s1", "update_item: the CAS succeeded. updated the value of the item", 0, 0); + return old_val; // success + } + TRACE("s2", "update_item: lost a race. the CAS failed. another thread changed the item's value", 0, 0); + + // retry + return update_item(item, expectation, new_val); // tail call +} + map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expectation, map_val_t new_val) { TRACE("s1", "sl_cas: key %p skiplist %p", key, sl); TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val); @@ -271,98 +342,101 @@ map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expectation, map_val_ node_t *nexts[MAX_LEVEL+1]; node_t *new_item = NULL; int n = random_level(); - do { - node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE); - if (old_item == NULL) { - - // There was not an item in the skiplist that matches the key. - if (EXPECT_FALSE(expectation != CAS_EXPECT_DOES_NOT_EXIST && expectation != CAS_EXPECT_WHATEVER)) { - TRACE("l1", "sl_cas: the expectation was not met, the skiplist was not changed", 0, 0); - return DOES_NOT_EXIST; // failure - } + node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE); - // First insert into the bottom level. - TRACE("s3", "sl_cas: attempting to insert item between %p and %p", preds[0], nexts[0]); - map_key_t new_key = sl->key_type == NULL ? key : (map_key_t)sl->key_type->clone((void *)key); - new_item = node_alloc(n, new_key, new_val); - node_t *pred = preds[0]; - markable_t next = new_item->next[0] = (markable_t)nexts[0]; - for (int level = 1; level <= new_item->top_level; ++level) { - new_item->next[level] = (markable_t)nexts[level]; - } - markable_t other = SYNC_CAS(&pred->next[0], next, new_item); - if (other == next) { - TRACE("s3", "sl_cas: successfully inserted item %p at level 0", new_item, 0); - break; // success - } - TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other); - if (sl->key_type != NULL) { - nbd_free((void *)new_key); - } - nbd_free(new_item); - continue; - } + // If there is already an item in the skiplist that matches the key just update its value. + if (old_item != NULL) { + map_val_t ret_val = update_item(old_item, expectation, new_val); + if (ret_val != DOES_NOT_EXIST) + return ret_val; - // Found an item in the skiplist that matches the key. - map_val_t old_item_val = old_item->val; - do { - // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us. - if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) { - TRACE("s2", "sl_cas: lost a race, found an item but another thread removed it. retry", 0, 0); - break; // retry - } + // If we lose a race with a thread removing the item we tried to update then we have to retry. + return sl_cas(sl, key, expectation, new_val); // tail call + } - if (EXPECT_FALSE(expectation == CAS_EXPECT_DOES_NOT_EXIST)) { - TRACE("s1", "sl_cas: found an item %p in the skiplist that matched the key. the expectation was " - "not met, the skiplist was not changed", old_item, old_item_val); - return old_item_val; // failure - } + if (EXPECT_FALSE(expectation != CAS_EXPECT_DOES_NOT_EXIST && expectation != CAS_EXPECT_WHATEVER)) { + TRACE("l1", "sl_cas: the expectation was not met, the skiplist was not changed", 0, 0); + return DOES_NOT_EXIST; // failure, the caller expected an item for the to already exist + } - // Use a CAS and not a SWAP. If the node is in the process of being removed and we used a SWAP, we could - // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it - // succeeded and return our value even though we indicated that the node has been removed. If the CAS - // fails it means another thread either removed the node or updated its value. - map_val_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); - if (ret_val == old_item_val) { - TRACE("s1", "sl_cas: the CAS succeeded. updated the value of the item", 0, 0); - return ret_val; // success - } - TRACE("s2", "sl_cas: lost a race. the CAS failed. another thread changed the item's value", 0, 0); + // Create a new node and insert it into the skiplist. + TRACE("s3", "sl_cas: attempting to insert a new item between %p and %p", preds[0], nexts[0]); + map_key_t new_key = sl->key_type == NULL ? key : (map_key_t)sl->key_type->clone((void *)key); + if (n > sl->high_water) { + n = sl->high_water + 1; + int x = SYNC_ADD(&sl->high_water, 1); + x = x; + TRACE("s2", "sl_cas: incremented high water mark to %p", x, 0); + } + new_item = node_alloc(n, new_key, new_val); - old_item_val = ret_val; - } while (1); - } while (1); + // Set 's next pointers to their proper values + markable_t next = new_item->next[0] = (markable_t)nexts[0]; + for (int level = 1; level <= new_item->top_level; ++level) { + new_item->next[level] = (markable_t)nexts[level]; + } + + // Link into from the bottom level up. After is inserted into the bottom level + // it is officially part of the skiplist. + node_t *pred = preds[0]; + markable_t other = SYNC_CAS(&pred->next[0], next, new_item); + if (other != next) { + TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other); + + // Lost a race to another thread modifying the skiplist. Free the new item we allocated and retry. + if (sl->key_type != NULL) { + nbd_free((void *)new_key); + } + nbd_free(new_item); + return sl_cas(sl, key, expectation, new_val); // tail call + } + + TRACE("s3", "sl_cas: successfully inserted a new item %p at the bottom level", new_item, 0); - // Link into from the bottom up. for (int level = 1; level <= new_item->top_level; ++level) { - node_t *pred = preds[level]; - markable_t next = (markable_t)nexts[level]; + TRACE("s3", "sl_cas: inserting the new item %p at level %p", new_item, level); do { - TRACE("s3", "sl_cas: attempting to insert item between %p and %p", pred, next); - markable_t other = SYNC_CAS(&pred->next[level], next, (markable_t)new_item); - if (other == next) { - TRACE("s3", "sl_cas: successfully inserted item %p at level %llu", new_item, level); - break; // success - } - TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other); + node_t * pred = preds[level]; + ASSERT(new_item->next[level]==(markable_t)nexts[level] || new_item->next[level]==MARK_NODE(nexts[level])); + TRACE("s3", "sl_cas: attempting to to insert the new item between %p and %p", pred, nexts[level]); + + markable_t other = SYNC_CAS(&pred->next[level], nexts[level], (markable_t)new_item); + if (other == (markable_t)nexts[level]) + break; // successfully linked into the skiplist at the current + TRACE("s3", "sl_cas: lost a race. failed to change pred's link. expected %p found %p", nexts[level], other); + + // Find 's new preds and nexts. find_preds(preds, nexts, new_item->top_level, sl, key, TRUE); - pred = preds[level]; - next = (markable_t)nexts[level]; - - // Update 's next pointer - do { - // There in no need to continue linking in the item if another thread removed it. - markable_t old_next = ((volatile node_t *)new_item)->next[level]; - if (HAS_MARK(old_next)) - return DOES_NOT_EXIST; // success - - // Use a CAS so we do not inadvertantly stomp on a mark another thread placed on the item. - if (old_next == next || SYNC_CAS(&new_item->next[level], old_next, next) == old_next) - break; - } while (1); + + for (int i = level; i <= new_item->top_level; ++i) { + markable_t old_next = new_item->next[i]; + if ((markable_t)nexts[i] == old_next) + continue; + + // Update 's inconsistent next pointer before trying again. Use a CAS so if another thread + // is trying to remove the new item concurrently we do not stomp on the mark it places on the item. + TRACE("s3", "sl_cas: attempting to update the new item's link from %p to %p", old_next, nexts[i]); + other = SYNC_CAS(&new_item->next[i], old_next, nexts[i]); + ASSERT(other == old_next || other == MARK_NODE(old_next)); + + // If another thread is removing this item we can stop linking it into to skiplist + if (HAS_MARK(other)) { + sl_unlink(sl, key); // see comment below + return DOES_NOT_EXIST; + } + } } while (1); } - return DOES_NOT_EXIST; // success + + // In case another thread was in the process of removing the while we were added it, we have to + // make sure it is completely unlinked before we return. We might have lost a race and inserted the new item + // at some level after the other thread thought it was fully removed. That is a problem because once a thread + // thinks it completely unlinks a node it queues it to be freed + if (HAS_MARK(new_item->next[new_item->top_level])) { + sl_unlink(sl, key); + } + + return DOES_NOT_EXIST; // success, inserted a new item } map_val_t sl_remove (skiplist_t *sl, map_key_t key) { @@ -374,79 +448,45 @@ map_val_t sl_remove (skiplist_t *sl, map_key_t key) { return DOES_NOT_EXIST; } - // Mark and unlink at each level of from the top down. If multiple threads try to concurrently remove + // Mark at each level of from the top down. If multiple threads try to concurrently remove // the same item only one of them should succeed. Marking the bottom level establishes which of them succeeds. - for (int level = item->top_level; level > 0; --level) { + markable_t old_next = 0; + for (int level = item->top_level; level >= 0; --level) { markable_t next; - markable_t old_next = item->next[level]; + old_next = item->next[level]; do { + TRACE("s3", "sl_remove: marking item at level %p (next %p)", level, old_next); next = old_next; old_next = SYNC_CAS(&item->next[level], next, MARK_NODE((node_t *)next)); if (HAS_MARK(old_next)) { - TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level %llu", item, level); + TRACE("s2", "sl_remove: %p is already marked for removal by another thread (next %p)", item, old_next); + if (level == 0) + return DOES_NOT_EXIST; break; } } while (next != old_next); - - node_t *pred = preds[level]; - TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_MARK(next)); - markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next)); - if (other != (markable_t)item) { - TRACE("s1", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other); - // If our former predecessor now points past us we know another thread unlinked us. Otherwise, we need - // to search for a new set of preds. - if (other == DOES_NOT_EXIST) - continue; // points past to the end of the list; go on to the next level. - - int d = -1; - if (!HAS_MARK(other)) { - map_key_t other_key = GET_NODE(other)->key; - if (EXPECT_TRUE(sl->key_type == NULL)) { - d = item->key - other_key; - } else { - d = sl->key_type->cmp((void *)item->key, (void *)other_key); - } - } - if (d > 0) { - node_t *temp = find_preds(preds, NULL, level, sl, key, TRUE); - if (temp != item) - return DOES_NOT_EXIST; // Another thread removed the item we were targeting. - level++; // Redo this level. - } - } } - markable_t next; - markable_t old_next = item->next[0]; - do { - next = old_next; - old_next = SYNC_CAS(&item->next[0], next, MARK_NODE((node_t *)next)); - if (HAS_MARK(old_next)) { - TRACE("s2", "sl_remove: %p is already marked for removal by another thread at level 0", item, 0); - return DOES_NOT_EXIST; - } - } while (next != old_next); - TRACE("s1", "sl_remove: marked item %p removed at level 0", item, 0); - // Atomically swap out the item's value in case another thread is updating the item while we are // removing it. This establishes which operation occurs first logically, the update or the remove. map_val_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0); - node_t *pred = preds[0]; - TRACE("s2", "sl_remove: linking the item's pred %p to the item's successor %p", pred, STRIP_MARK(next)); - if (SYNC_CAS(&pred->next[0], item, STRIP_MARK(next))) { - TRACE("s2", "sl_remove: unlinked item %p from the skiplist at level 0", item, 0); - // The thread that completes the unlink should free the memory. - if (sl->key_type != NULL) { - rcu_defer_free((void *)item->key); - } - rcu_defer_free(item); + // unlink the item + sl_unlink(sl, key); + + // free the node + if (sl->key_type != NULL) { + rcu_defer_free((void *)item->key); } + rcu_defer_free(item); + return val; } void sl_print (skiplist_t *sl) { + + printf("high water: %d levels\n", sl->high_water); for (int level = MAX_LEVEL; level >= 0; --level) { node_t *item = sl->head; if (item->next[level] == DOES_NOT_EXIST) diff --git a/runtime/mem.c b/runtime/mem.c index f8cecec..281c719 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -17,6 +17,8 @@ #define REGION_SCALE 22 // 4MB regions #define REGION_SIZE (1 << REGION_SCALE) #define HEADER_REGION_SCALE 22 // 4MB is space enough for headers for over 2,000,000 regions +#define HEADER_REGION_SIZE (1 << HEADER_REGION_SCALE) +#define HEADER_COUNT (HEADER_REGION_SIZE / sizeof(header_t)) typedef struct block { struct block *next; @@ -34,52 +36,79 @@ typedef struct private_list { uint32_t count; } private_list_t; -static header_t *region_header_ = NULL; +static header_t *headers_ = NULL; static block_t *pub_free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS] = {}; static private_list_t pri_free_list_[MAX_NUM_THREADS][MAX_SCALE+1] = {}; -static void *get_new_region (int scale) { - if (scale < REGION_SCALE) { - scale = REGION_SCALE; +static inline header_t *get_header (void *r) { + return headers_ + (((size_t)r >> REGION_SCALE) & (HEADER_COUNT - 1)); +} + +static void *get_new_region (int block_scale) { + size_t sz = (1 << block_scale); + if (sz < REGION_SIZE) { + sz = REGION_SIZE; } - TRACE("m0", "get_new_region(): mmap new region scale: %llu", scale, 0); - void *region = mmap(NULL, (1 << scale), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + TRACE("m1", "get_new_region: mmap new region %p (size %p)", region, sz); if (region == (void *)-1) { perror("get_new_region: mmap"); exit(-1); } assert(region); + if (headers_ != NULL) { + LOCALIZE_THREAD_LOCAL(tid_, int); + header_t *h = get_header(region); + TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_); + + assert(h->scale == 0); + h->scale = block_scale; + h->owner = tid_; + } + return region; } void mem_init (void) { - assert(region_header_ == NULL); - region_header_ = (header_t *)get_new_region(HEADER_REGION_SCALE); - memset(region_header_, 0, REGION_SIZE); +#ifdef USE_SYSTEM_MALLOC + return; +#endif + assert(headers_ == NULL); + headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE); + TRACE("m1", "mem_init: header region %p", headers_, 0); + memset(headers_, 0, HEADER_REGION_SIZE); } // Put onto its owner's public free list (in the appropriate size bin). // // TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space? void nbd_free (void *x) { +#ifdef USE_SYSTEM_MALLOC + TRACE("m1", "nbd_free: %p", x, 0); +#ifndef NDEBUG + memset(x, 0xcd, sizeof(void *)); // bear trap +#endif//NDEBUG + free(x); + return; +#endif//USE_SYSTEM_MALLOC + TRACE("m1", "nbd_free: block %p region %p", x, (size_t)x & ~MASK(REGION_SCALE)); + assert(x); LOCALIZE_THREAD_LOCAL(tid_, int); block_t *b = (block_t *)x; - assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t))); - header_t *h = region_header_ + ((size_t)b >> REGION_SCALE); + header_t *h = get_header(x); + TRACE("m1", "nbd_free: header %p scale %llu", h, h->scale); + assert(h->scale && h->scale <= MAX_SCALE); #ifndef NDEBUG - memset(b, 0xcd, (1 << h->scale)); + memset(b, 0xcd, (1 << h->scale)); // bear trap #endif - TRACE("m0", "nbd_free(): block %p scale %llu", b, h->scale); if (h->owner == tid_) { - TRACE("m0", "nbd_free(): private block, free list head %p", - h->owner, pri_free_list_[tid_][h->scale].head); + TRACE("m1", "nbd_free: private block, old free list head %p", pri_free_list_[tid_][h->scale].head, 0); b->next = pri_free_list_[tid_][h->scale].head; pri_free_list_[tid_][h->scale].head = b; } else { - TRACE("m0", "nbd_free(): owner %llu free list head %p", - h->owner, pub_free_list_[h->owner][h->scale][tid_]); + TRACE("m1", "nbd_free: owner %llu free list head %p", h->owner, pub_free_list_[h->owner][h->scale][tid_]); do { b->next = pub_free_list_[h->owner][h->scale][tid_]; } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next); @@ -94,16 +123,23 @@ void nbd_free (void *x) { // on the private free list. If we didn't find any blocks on the public free lists, allocate a new // region, break it up into blocks and put them on the private free list. void *nbd_malloc (size_t n) { - assert(n); - LOCALIZE_THREAD_LOCAL(tid_, int); +#ifdef USE_SYSTEM_MALLOC + TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, GET_SCALE(n)); + void *x = malloc(n); + TRACE("m1", "nbd_malloc: returning %p", x, 0); + return x; +#endif + if (EXPECT_FALSE(n == 0)) + return NULL; if (n < sizeof(block_t)) { n = sizeof(block_t); } int b_scale = GET_SCALE(n); + assert(b_scale >= 2); assert(b_scale <= MAX_SCALE); - TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale); + TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale); + LOCALIZE_THREAD_LOCAL(tid_, int); private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list - TRACE("m0", "nbd_malloc(): private free list first block %p", pri->head, 0); // If our private free list is empty, try to find blocks on our public free list. If that fails, // allocate a new region. @@ -113,14 +149,13 @@ void *nbd_malloc (size_t n) { // look for blocks on our public free lists round robin pri->next_pub = (pri->next_pub+1) & (MAX_NUM_THREADS-1); - TRACE("m0", "nbd_malloc(): searching public free list %llu", pri->next_pub, 0); + TRACE("m1", "nbd_malloc: searching public free list %llu", pri->next_pub, 0); if (pri->next_pub == tid_) { uint32_t count = pri->count; pri->count = 0; - // If our private list is empty and we haven't gotten at least half a region's worth - // of block's from our public lists, we allocate a new region. This guarentees that - // we amortize the cost of accessing our public lists accross enough nbd_malloc() - // calls. + // If we haven't gotten at least half a region's worth of block's from our public lists + // we allocate a new region. This guarentees that we amortize the cost of accessing our + // public lists accross enough nbd_malloc() calls. uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; if (count < min_count) { char *region = get_new_region(b_scale); @@ -131,14 +166,12 @@ void *nbd_malloc (size_t n) { b->next = pri->head; pri->head = b; } + pri->count = 0; break; } - continue; - } - - if (pubs[pri->next_pub] != NULL) { + } else if (pubs[pri->next_pub] != NULL) { block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); - TRACE("m0", "nbd_malloc(): stole list %p", stolen, 0); + TRACE("m1", "nbd_malloc: stole list %p", stolen, 0); if (stolen == NULL) continue; pri->head = stolen; @@ -150,8 +183,9 @@ void *nbd_malloc (size_t n) { // Pull a block off of our private free list. block_t *b = pri->head; - TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, b->next); + TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE)); assert(b); + ASSERT(get_header(b)->scale == b_scale); pri->head = b->next; pri->count++; return b; diff --git a/runtime/rcu.c b/runtime/rcu.c index 50205e8..3e74986 100644 --- a/runtime/rcu.c +++ b/runtime/rcu.c @@ -65,10 +65,12 @@ void rcu_update (void) { } // free - while (pending_[tid_]->tail != rcu_[tid_][tid_]) { - fifo_t *q = pending_[tid_]; - uint32_t i = MOD_SCALE(q->tail++, q->scale); + fifo_t *q = pending_[tid_]; + while (q->tail != rcu_[tid_][tid_]) { + uint32_t i = MOD_SCALE(q->tail, q->scale); + TRACE("r0", "rcu_update: freeing %p from queue at position %llu", q->x[i], q->tail); nbd_free(q->x[i]); + q->tail++; } } @@ -77,13 +79,14 @@ void rcu_defer_free (void *x) { LOCALIZE_THREAD_LOCAL(tid_, int); fifo_t *q = pending_[tid_]; assert(MOD_SCALE(q->head + 1, q->scale) != MOD_SCALE(q->tail, q->scale)); - uint32_t i = MOD_SCALE(q->head++, q->scale); + uint32_t i = MOD_SCALE(q->head, q->scale); q->x[i] = x; - TRACE("r0", "rcu_defer_free: put %p on queue at position %llu", x, pending_[tid_]->head); + TRACE("r0", "rcu_defer_free: put %p on queue at position %llu", x, q->head); + q->head++; - if (pending_[tid_]->head - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD) - return; - TRACE("r0", "rcu_defer_free: posting %llu", pending_[tid_]->head, 0); - int next_thread_id = (tid_ + 1) % num_threads_; - rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = pending_[tid_]->head; + if (pending_[tid_]->head - rcu_last_posted_[tid_][tid_] >= RCU_POST_THRESHOLD) { + TRACE("r0", "rcu_defer_free: posting %llu", pending_[tid_]->head, 0); + int next_thread_id = (tid_ + 1) % num_threads_; + rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = pending_[tid_]->head; + } } diff --git a/test/haz_test.c b/test/haz_test.c index 3b9092a..c55ad0e 100644 --- a/test/haz_test.c +++ b/test/haz_test.c @@ -32,7 +32,7 @@ static lifo_t *stk_; void *worker (void *arg) { int id = (int)(size_t)arg; - unsigned int r = (unsigned int)(id + 1) * 0x5bd1e995; // seed "random" number generator + unsigned int r = (unsigned int)(id + 1) * 0x5bd1e995; // seed psuedo-random number generator haz_t *hp0 = haz_get_static(0); // Wait for all the worker threads to be ready. @@ -41,7 +41,7 @@ void *worker (void *arg) { int i; for (i = 0; i < NUM_ITERATIONS; ++ i) { - r ^= r << 6; r ^= r >> 21; r ^= r << 7; // generate next "random" number + r ^= r << 6; r ^= r >> 21; r ^= r << 7; // generate next psuedo-random number if (r & 0x1000) { // push node_t *new_head = (node_t *)nbd_malloc(sizeof(node_t)); diff --git a/test/map_test1.c b/test/map_test1.c index 2e9ef85..3acf4c2 100644 --- a/test/map_test1.c +++ b/test/map_test1.c @@ -56,7 +56,7 @@ void *worker (void *arg) { } int main (int argc, char **argv) { - lwt_set_trace_level("r0m0l3"); + lwt_set_trace_level("r0m3s3"); char* program_name = argv[0]; pthread_t thread[MAX_NUM_THREADS]; @@ -66,7 +66,7 @@ int main (int argc, char **argv) { return -1; } - num_threads_ = MAX_NUM_THREADS; + num_threads_ = 2; if (argc == 2) { errno = 0; diff --git a/test/map_test2.c b/test/map_test2.c index 9fc6fe3..de7f388 100644 --- a/test/map_test2.c +++ b/test/map_test2.c @@ -316,8 +316,7 @@ void big_iteration_test (CuTest* tc) { } int main (void) { - - lwt_set_trace_level("l3"); + lwt_set_trace_level("r0m3s3"); static const map_impl_t *map_types[] = { &ll_map_impl, &sl_map_impl, &ht_map_impl }; for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) { diff --git a/test/rcu_test.c b/test/rcu_test.c index d469648..3fd10d5 100644 --- a/test/rcu_test.c +++ b/test/rcu_test.c @@ -78,7 +78,7 @@ void *worker (void *arg) { } int main (int argc, char **argv) { - //lwt_set_trace_level("m0r0"); + lwt_set_trace_level("m3r3"); int num_threads = 2; if (argc == 2)