From: jdybnis Date: Sat, 29 Nov 2008 04:21:55 +0000 (+0000) Subject: generalize list into an updatable list-based map X-Git-Url: https://pd.if.org/git/?p=nbds;a=commitdiff_plain;h=f0777b2151019e22458f6f166a8f3c569c32a505 generalize list into an updatable list-based map --- diff --git a/include/lwt.h b/include/lwt.h index 20c867e..dd98369 100644 --- a/include/lwt.h +++ b/include/lwt.h @@ -13,6 +13,12 @@ #define TRACE(flag, format, v1, v2) lwt_trace(flag, format, (size_t)(v1), (size_t)(v2)) #endif +#ifdef NDEBUG +#define ASSERT(x) +#else +#define ASSERT(x) if (!(x)) { lwt_halt(); assert(!#x); } +#endif + // Dump trace records to . The file should be post-processed with "sort" before viewing. void lwt_dump (const char *file_name) __attribute__ ((externally_visible)); diff --git a/include/struct.h b/include/struct.h index 3bb724b..359c4cf 100644 --- a/include/struct.h +++ b/include/struct.h @@ -9,16 +9,14 @@ typedef struct ht hashtable_t; hashtable_t *ht_alloc (void); -void ht_free (hashtable_t *ht); - uint64_t ht_compare_and_set (hashtable_t *ht, const char *key, uint32_t key_len, uint64_t expected_val, uint64_t val); uint64_t ht_get (hashtable_t *ht, const char *key, uint32_t len); uint64_t ht_remove (hashtable_t *ht, const char *key, uint32_t len); uint64_t ht_count (hashtable_t *ht); +void ht_free (hashtable_t *ht); typedef struct ll list_t; -list_t * ll_alloc (void); - +list_t * ll_alloc (void); uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len); uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val); uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len); @@ -26,7 +24,6 @@ void ll_print (list_t *ll); typedef struct sl skiplist_t; skiplist_t * sl_alloc (void); - uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len); uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value); uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len); diff --git a/makefile b/makefile index 901a363..dfabc2d 100644 --- a/makefile +++ b/makefile @@ -1,11 +1,10 @@ ################################################################################################### # Written by Josh Dybnis and released to the public domain, as explained at # http://creativecommons.org/licenses/publicdomain -# ################################################################################################### # Makefile for building programs with whole-program interfile optimization ################################################################################################### -OPT := -fwhole-program -combine -03# -DNDEBUG +OPT := -fwhole-program -combine -03 #-DNDEBUG CFLAGS := -g -Wall -Werror -std=c99 -m64 $(OPT) #-DENABLE_TRACE INCS := $(addprefix -I, include) TESTS := output/ll_test output/sl_test output/ht_test output/rcu_test @@ -22,7 +21,7 @@ ht_test_SRCS := $(TEST_SRCS) struct/hashtable.c test/ht_test.c test/CuTest.c st tests: $(TESTS) ################################################################################################### -# Run the tests +# build and run tests ################################################################################################### test: $(addsuffix .log, $(TESTS)) @echo > /dev/null @@ -45,7 +44,7 @@ $(EXES): output/% : output/%.d makefile gcc $(CFLAGS) $(INCS) -o $@ $($*_SRCS) ################################################################################################### -# Build tags file for vi +# tags file for vi ################################################################################################### tags: ctags -R . @@ -56,12 +55,11 @@ tags: clean: rm -rfv output/* -.PHONY: clean test tags - --include $(addsuffix .d, $(EXES)) - ################################################################################################### -# Dummy rule for boostrapping dependency files +# dummy rule for boostrapping dependency files ################################################################################################### $(addsuffix .d, $(EXES)) : output/%.d : - touch $@ + +-include $(addsuffix .d, $(EXES)) + +.PHONY: clean test tags diff --git a/runtime/lwt.c b/runtime/lwt.c index ec84f90..f6bd346 100644 --- a/runtime/lwt.c +++ b/runtime/lwt.c @@ -119,8 +119,7 @@ void lwt_dump (const char *file_name) } void lwt_trace_i (const char *format, size_t value1, size_t value2) { - if (halt_) - return; + while (halt_) {} LOCALIZE_THREAD_LOCAL(tid_, int); lwt_buffer_t *tb = lwt_buf_[tid_]; if (tb != NULL) { diff --git a/runtime/rcu.c b/runtime/rcu.c index 897bdff..5ae851b 100644 --- a/runtime/rcu.c +++ b/runtime/rcu.c @@ -23,6 +23,7 @@ typedef struct fifo { void *x[0]; } fifo_t; +#define MOD_SCALE(x, b) ((x) & MASK(b)) static uint64_t rcu_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {}; static uint64_t rcu_last_posted_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {}; static fifo_t *pending_[MAX_NUM_THREADS] = {}; @@ -37,21 +38,6 @@ static fifo_t *fifo_alloc(int scale) { return q; } -static uint32_t fifo_index (fifo_t *q, uint32_t i) { - return i & MASK(q->scale); -} - -static void fifo_enqueue (fifo_t *q, void *x) { - assert(fifo_index(q, q->head + 1) != fifo_index(q, q->tail)); - uint32_t i = fifo_index(q, q->head++); - q->x[i] = x; -} - -static void *fifo_dequeue (fifo_t *q) { - uint32_t i = fifo_index(q, q->tail++); - return q->x[i]; -} - void rcu_thread_init (int id) { assert(id < MAX_NUM_THREADS); if (pending_[id] == NULL) { @@ -60,17 +46,6 @@ void rcu_thread_init (int id) { } } -static void rcu_post (uint64_t x) { - LOCALIZE_THREAD_LOCAL(tid_, int); - if (x - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD) - return; - - int next_thread_id = (tid_ + 1) % num_threads_; - - TRACE("r0", "rcu_post: %llu", x, 0); - rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = x; -} - void rcu_update (void) { LOCALIZE_THREAD_LOCAL(tid_, int); assert(tid_ < num_threads_); @@ -90,13 +65,23 @@ void rcu_update (void) { // free while (pending_[tid_]->tail != rcu_[tid_][tid_]) { - nbd_free(fifo_dequeue(pending_[tid_])); + fifo_t *q = pending_[tid_]; + uint32_t i = MOD_SCALE(q->tail++, q->scale); + nbd_free(q->x[i]); } } void nbd_defer_free (void *x) { LOCALIZE_THREAD_LOCAL(tid_, int); - fifo_enqueue(pending_[tid_], x); + fifo_t *q = pending_[tid_]; + assert(MOD_SCALE(q->head + 1, q->scale) != MOD_SCALE(q->tail, q->scale)); + uint32_t i = MOD_SCALE(q->head++, q->scale); + q->x[i] = x; TRACE("r0", "nbd_defer_free: put %p on queue at position %llu", x, pending_[tid_]->head); - rcu_post(pending_[tid_]->head); + + if (pending_[tid_]->head - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD) + return; + TRACE("r0", "nbd_defer_free: posting %llu", pending_[tid_]->head, 0); + int next_thread_id = (tid_ + 1) % num_threads_; + rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = pending_[tid_]->head; } diff --git a/struct/hashtable.c b/struct/hashtable.c index a332819..5406d42 100644 --- a/struct/hashtable.c +++ b/struct/hashtable.c @@ -94,14 +94,14 @@ static volatile entry_t *hti_lookup (hashtable_i_t *hti, uint32_t key_hash, cons uint64_t e_key = e->key; if (e_key == DOES_NOT_EXIST) { - TRACE("h1", "hti_lookup: entry %p for key \"%s\" is empty", e, ns_data(GET_PTR(e_key))); + TRACE("h1", "hti_lookup: entry %p for key \"%s\" is empty", e, GET_PTR(e_key)->data); *is_empty = 1; // indicate an empty so the caller avoids an expensive ht_key_equals return e; } if (ht_key_equals(e_key, key_hash, key_data, key_len)) { - TRACE("h1", "hti_lookup: entry %p key \"%s\"", e, ns_data(GET_PTR(e_key))); - TRACE("h2", "hti_lookup: entry key len %llu, value %p", ns_len(GET_PTR(e_key)), e->value); + TRACE("h1", "hti_lookup: entry %p key \"%s\"", e, GET_PTR(e_key)->data); + TRACE("h2", "hti_lookup: entry key len %llu, value %p", GET_PTR(e_key)->len, e->value); return e; } } @@ -226,11 +226,11 @@ static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *ht1_e, uint32_t // We use 0 to indicate that isn't initiallized. Occasionally the will // really be 0 and we will waste time recomputing it. That is rare enough that it is OK. if (key_hash == 0) { - key_hash = murmur32(ns_data(key_string), ns_len(key_string)); + key_hash = murmur32(key_string->data, key_string->len); } int is_empty; - volatile entry_t *ht2_e = hti_lookup(ht2, key_hash, ns_data(key_string), ns_len(key_string), &is_empty); + volatile entry_t *ht2_e = hti_lookup(ht2, key_hash, key_string->data, key_string->len, &is_empty); TRACE("h0", "hti_copy_entry: copy entry %p to entry %p", ht1_e, ht2_e); // it is possible that there is not any room in the new table either @@ -266,7 +266,7 @@ static int hti_copy_entry (hashtable_i_t *ht1, volatile entry_t *ht1_e, uint32_t // Update the count if we were the one that completed the copy. if (old_ht2_e_value == DOES_NOT_EXIST) { - TRACE("h0", "hti_copy_entry: key \"%s\" value %p copied to new entry", ns_data(key_string), value); + TRACE("h0", "hti_copy_entry: key \"%s\" value %p copied to new entry", key_string->data, value); SYNC_ADD(&ht1->count, -1); SYNC_ADD(&ht2->count, 1); return TRUE; @@ -337,7 +337,7 @@ static uint64_t hti_compare_and_set (hashtable_i_t *hti, uint32_t key_hash, cons TRACE("h2", "hti_compare_and_set: installed key %p in entry %p", key, e); } - TRACE("h0", "hti_compare_and_set: entry for key \"%s\" is %p", ns_data(GET_PTR(e->key)), e); + TRACE("h0", "hti_compare_and_set: entry for key \"%s\" is %p", GET_PTR(e->key)->data, e); // If the entry is in the middle of a copy, the copy must be completed first. uint64_t e_value = e->value; diff --git a/struct/list.c b/struct/list.c index 1920e60..b9ec91f 100644 --- a/struct/list.c +++ b/struct/list.c @@ -55,15 +55,13 @@ list_t *ll_alloc (void) { return ll; } -static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, uint32_t key_len, int help_remove) { +static int find_pred (node_t **pred_ptr, node_t **item_ptr, list_t *ll, const void *key_data, uint32_t key_len, int help_remove) { node_t *pred = ll->head; node_t *item = pred->next; - TRACE("l3", "find_pred: searching for key %p in ll (head is %p)", key_data, pred); + TRACE("l2", "find_pred: searching for key %p in ll (head is %p)", key_data, pred); while (item != NULL) { node_t *next = item->next; - TRACE("l3", "find_pred: visiting item %p (next %p)", item, next); - TRACE("l3", "find_pred: key %p", STRIP_TAG(item->key), item->val); // A tag means an item is logically removed but not physically unlinked yet. while (EXPECT_FALSE(IS_TAGGED(next))) { @@ -73,26 +71,29 @@ static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, u item = (node_t *)STRIP_TAG(item->next); if (EXPECT_FALSE(item == NULL)) break; + TRACE("l3", "find_pred: skipping marked item %p (next is %p)", item, next); next = item->next; continue; } // Unlink logically removed items. node_t *other; + TRACE("l3", "find_pred: unlinking marked item %p next is %p", item, next); if ((other = SYNC_CAS(&pred->next, item, STRIP_TAG(next))) == item) { + TRACE("l2", "find_pred: unlinked item %p from pred %p", item, pred); item = (node_t *)STRIP_TAG(next); if (EXPECT_FALSE(item == NULL)) break; next = item->next; - TRACE("l3", "find_pred: unlinked item %p from pred %p", item, pred); - TRACE("l3", "find_pred: now item is %p next is %p", item, next); + TRACE("l3", "find_pred: now current item is %p next is %p", item, next); // The thread that completes the unlink should free the memory. node_defer_free(other); } else { - TRACE("l3", "find_pred: lost race to unlink from pred %p; its link changed to %p", pred, other); + TRACE("l2", "find_pred: lost a race to unlink item %p from pred %p", item, pred); + TRACE("l2", "find_pred: pred's link changed to %p", other, 0); if (IS_TAGGED(other)) - return find_pred(pred_ptr, ll, key_data, key_len, help_remove); // retry + return find_pred(pred_ptr, item_ptr, ll, key_data, key_len, help_remove); // retry item = other; if (EXPECT_FALSE(item == NULL)) break; @@ -103,102 +104,173 @@ static node_t *find_pred (node_t **pred_ptr, list_t *ll, const void *key_data, u if (EXPECT_FALSE(item == NULL)) break; + TRACE("l3", "find_pred: visiting item %p (next is %p)", item, next); + TRACE("l4", "find_pred: key %p val %p", STRIP_TAG(item->key), item->val); + + // A tagged key is an integer, otherwise it is a pointer to a string + int d; + if (IS_TAGGED(item->key)) { + d = (STRIP_TAG(item->key) - (uint64_t)key_data); + } else { + int item_key_len = item->key->len; + int len = (key_len < item_key_len) ? key_len : item_key_len; + d = memcmp(item->key->data, key_data, len); + if (d == 0) { d = item_key_len - key_len; } + } + // If we reached the key (or passed where it should be), we found the right predesssor - int x = (IS_TAGGED(item->key)) - ? (STRIP_TAG(item->key) - (uint64_t)key_data) - : ns_cmp_raw(item->key, key_data, key_len); - if (x >= 0) { - TRACE("l3", "find_pred: found pred %p item %p", pred, item); + if (d >= 0) { if (pred_ptr != NULL) { *pred_ptr = pred; } - return x == 0 ? item : NULL; + *item_ptr = item; + if (d == 0) { + TRACE("l2", "find_pred: found matching item %p in list, pred is %p", item, pred); + return TRUE; + } + TRACE("l2", "find_pred: found proper place for key %p in list, pred is %p. returning null", key_data, pred); + return FALSE; } pred = item; item = next; - } // is not in . if (pred_ptr != NULL) { *pred_ptr = pred; } - return NULL; + *item_ptr = NULL; + TRACE("l2", "find_pred: reached end of list. last item is %p", pred, 0); + return FALSE; } // Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor. uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len) { - TRACE("l3", "ll_lookup: searching for key %p in list %p", key_data, ll); - node_t *item = find_pred(NULL, ll, key_data, key_len, FALSE); + TRACE("l1", "ll_lookup: searching for key %p in list %p", key_data, ll); + node_t *item; + int found = find_pred(NULL, &item, ll, key_data, key_len, FALSE); // If we found an matching the key return its value. - return item != NULL ? item->val : DOES_NOT_EXIST; + if (found) { + uint64_t val = item->val; + if (val != DOES_NOT_EXIST) { + TRACE("l1", "ll_lookup: found item %p. val %p. returning item", item, item->val); + return val; + } + } + + TRACE("l1", "ll_lookup: no item in the list matched the key", 0, 0); + return DOES_NOT_EXIST; } -// Insert a new item if a matching key doesn't already exist in -uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val) { - assert(new_val != DOES_NOT_EXIST); - TRACE("l3", "ll_cas: inserting key %p val %p", key_data, new_val); +uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expectation, uint64_t new_val) { + TRACE("l1", "ll_cas: key %p list %p", key_data, ll); + TRACE("l1", "ll_cas: expectation %p new value %p", expectation, new_val); + ASSERT((int64_t)new_val > 0); + + node_t *pred, *old_item; do { - node_t *pred; - node_t *old_item = find_pred(&pred, ll, key_data, key_len, TRUE); + if (!find_pred(&pred, &old_item, ll, key_data, key_len, TRUE)) { - // If a node matching the key already exists in return its value. - if (old_item != NULL) { - TRACE("l3", "ll_cas: there is already an item %p (value %p) with the same key", old_item, old_item->val); - return old_item->val; - } + // There is no existing item in the list that matches the key. + if (EXPECT_FALSE((int64_t)expectation > 0 || expectation == EXPECT_EXISTS)) { + TRACE("l1", "ll_cas: the expectation was not met, the list was not changed", 0, 0); + return DOES_NOT_EXIST; // failure + } + + ASSERT(expectation == EXPECT_DOES_NOT_EXIST || expectation == EXPECT_WHATEVER); + + // Create a new item and insert it into the list. + TRACE("l2", "ll_cas: attempting to insert item between %p and %p", pred, pred->next); + node_t *new_item = node_alloc(key_data, key_len, new_val); + node_t *next = new_item->next = old_item; + node_t *other = SYNC_CAS(&pred->next, next, new_item); + if (other == next) { + TRACE("l1", "ll_cas: successfully inserted new item %p", new_item, 0); + return DOES_NOT_EXIST; // success + } - TRACE("l3", "ll_cas: attempting to insert item between %p and %p", pred, pred->next); - node_t *new_item = node_alloc(key_data, key_len, new_val); - node_t *next = new_item->next = pred->next; - node_t *other = SYNC_CAS(&pred->next, next, new_item); - if (other == next) { - TRACE("l3", "ll_cas: successfully inserted item %p", new_item, 0); - return DOES_NOT_EXIST; // success + // Lost a race. Failed to insert the new item into the list. + TRACE("l1", "ll_cas: lost a race. CAS failed. expected pred's link to be %p but found %p", next, other); + node_free(new_item); + continue; // retry } - TRACE("l3", "ll_cas: failed to change pred's link: expected %p found %p", next, other); - node_free(new_item); + // Found an item in the list that matches the key. + uint64_t old_item_val = old_item->val; + do { + // If the item's value is DOES_NOT_EXIST it means another thread removed the node out from under us. + if (EXPECT_FALSE(old_item_val == DOES_NOT_EXIST)) { + TRACE("l2", "ll_cas: lost a race, found an item but another thread removed it. retry", 0, 0); + break; // retry + } + + if (EXPECT_FALSE(expectation == EXPECT_DOES_NOT_EXIST)) { + TRACE("l1", "ll_cas: found an item %p in the list that matched the key. the expectation was " + "not met, the list was not changed", old_item, old_item_val); + return old_item_val; // failure + } + + // Use a CAS and not a SWAP. If the node is in the process of being removed and we used a SWAP, we could + // replace DOES_NOT_EXIST with our value. Then another thread that is updating the value could think it + // succeeded and return our value even though we indicated that the node has been removed. If the CAS + // fails it means another thread either removed the node or updated its value. + uint64_t ret_val = SYNC_CAS(&old_item->val, old_item_val, new_val); + if (ret_val == old_item_val) { + TRACE("l1", "ll_cas: the CAS succeeded. updated the value of the item", 0, 0); + return ret_val; // success + } + TRACE("l2", "ll_cas: lost a race. the CAS failed. another thread changed the item's value", 0, 0); + + old_item_val = ret_val; + } while (1); } while (1); } uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len) { - TRACE("l3", "ll_remove: removing item with key %p from list %p", key_data, ll); + TRACE("l1", "ll_remove: removing item with key %p from list %p", key_data, ll); node_t *pred; - node_t *item = find_pred(&pred, ll, key_data, key_len, TRUE); - if (item == NULL) { - TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0); + node_t *item; + int found = find_pred(&pred, &item, ll, key_data, key_len, TRUE); + if (!found) { + TRACE("l1", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0); return DOES_NOT_EXIST; } // Mark removed. This must be atomic. If multiple threads try to remove the same item // only one of them should succeed. - if (EXPECT_FALSE(IS_TAGGED(item->next))) { - TRACE("l3", "ll_remove: %p is already marked for removal by another thread", item, 0); - return DOES_NOT_EXIST; - } - node_t *next = SYNC_FETCH_AND_OR(&item->next, TAG); - if (EXPECT_FALSE(IS_TAGGED(next))) { - TRACE("l3", "ll_remove: lost race -- %p is already marked for removal by another thread", item, 0); - return DOES_NOT_EXIST; - } + node_t *next; + node_t *old_next = item->next; + do { + next = old_next; + old_next = SYNC_CAS(&item->next, next, TAG_VALUE(next)); + if (IS_TAGGED(old_next)) { + TRACE("l1", "ll_remove: lost a race -- %p is already marked for removal by another thread", item, 0); + return DOES_NOT_EXIST; + } + } while (next != old_next); + TRACE("l2", "ll_remove: logically removed item %p", item, 0); + ASSERT(!IS_TAGGED(item->next)); + + // This has to be an atomic swap in case another thread is updating the item while we are removing it. + uint64_t val = SYNC_SWAP(&item->val, DOES_NOT_EXIST); - uint64_t val = item->val; + TRACE("l2", "ll_remove: replaced item's val %p with DOES_NOT_EXIT", val, 0); // Unlink from . If we lose a race to another thread just back off. It is safe to leave the // item logically removed for a later call (or some other thread) to physically unlink. By marking the // item earlier, we logically removed it. - TRACE("l3", "ll_remove: link item's pred %p to it's successor %p", pred, next); + TRACE("l2", "ll_remove: unlink the item by linking its pred %p to its successor %p", pred, next); node_t *other; if ((other = SYNC_CAS(&pred->next, item, next)) != item) { - TRACE("l3", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other); + TRACE("l1", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other); return val; } // The thread that completes the unlink should free the memory. - node_defer_free(item); + node_defer_free((node_t *)item); + TRACE("l1", "ll_remove: successfully unlinked item %p from the list", item, 0); return val; } @@ -206,13 +278,18 @@ void ll_print (list_t *ll) { node_t *item; item = ll->head->next; while (item) { + node_t *next = item->next; + if (IS_TAGGED(item)) { + printf("*"); + } + printf("%p:", item); if (IS_TAGGED(item->key)) { printf("0x%llx ", STRIP_TAG(item->key)); } else { - printf("%s ", (char *)ns_data(item->key)); + printf("%s ", (char *)item->key->data); } fflush(stdout); - item = item->next; + item = (node_t *)STRIP_TAG(next); } printf("\n"); } diff --git a/struct/nstring.c b/struct/nstring.c index 840c6df..db35ee1 100644 --- a/struct/nstring.c +++ b/struct/nstring.c @@ -2,11 +2,6 @@ #include "nstring.h" #include "mem.h" -struct nstring { - uint32_t len; - char data[]; -}; - nstring_t *ns_alloc (const void *data, uint32_t len) { nstring_t *ns = nbd_malloc(sizeof(nstring_t) + len); ns->len = len; @@ -15,10 +10,6 @@ nstring_t *ns_alloc (const void *data, uint32_t len) { } int ns_cmp_raw (nstring_t *ns, const void *data, uint32_t len) { - int rc = memcmp(ns->data, data, (len < ns->len) ? len : ns->len); - return (rc == 0) ? ns->len - len : rc; + int d = memcmp(ns->data, data, (len < ns->len) ? len : ns->len); + return (d == 0) ? ns->len - len : d; } - -const void *ns_data (nstring_t *ns) { return ns->data; } - -uint64_t ns_len (nstring_t *ns) { return ns->len; } diff --git a/struct/nstring.h b/struct/nstring.h index ba0d6ec..9aa4ce4 100644 --- a/struct/nstring.h +++ b/struct/nstring.h @@ -1,11 +1,12 @@ #ifndef NSTRING_H #define NSTRING_H -typedef struct nstring nstring_t; +typedef struct nstring { + uint32_t len; + char data[]; +} nstring_t; -nstring_t *ns_alloc (const void *data, uint32_t len); +nstring_t * ns_alloc (const void *data, uint32_t len); +int ns_cmp_raw (nstring_t *ns, const void *data, uint32_t len); -int ns_cmp_raw (nstring_t *ns, const void *data, uint32_t len); -const void *ns_data (nstring_t *ns); -uint64_t ns_len (nstring_t *ns); #endif//NSTRING_H diff --git a/struct/skiplist.c b/struct/skiplist.c index a3fab39..826751c 100644 --- a/struct/skiplist.c +++ b/struct/skiplist.c @@ -190,7 +190,7 @@ static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl // Fast find that does not help unlink partially removed nodes and does not return the node's predecessors. uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) { - TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key, sl); + TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key_data, sl); node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE); // If we found an matching the return its value. @@ -337,7 +337,7 @@ void sl_print (skiplist_t *sl) { if (IS_TAGGED(item->key)) { printf("%s%p:%llx ", is_marked ? "*" : "", item, STRIP_TAG(item->key)); } else { - printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)ns_data(item->key)); + printf("%s%p:%s ", is_marked ? "*" : "", item, (char *)item->key->data); } if (item != sl->head) { printf("[%d]", item->top_level); diff --git a/test/ll_test.c b/test/ll_test.c index 4f2e236..03bc81f 100644 --- a/test/ll_test.c +++ b/test/ll_test.c @@ -46,7 +46,7 @@ void *worker (void *arg) { int main (int argc, char **argv) { nbd_init(); - lwt_set_trace_level("l3"); + //lwt_set_trace_level("l3"); char* program_name = argv[0]; pthread_t thread[MAX_NUM_THREADS];