#define CACHE_LINE_SIZE 64
-#define CAT(x,y) x##y
-#define ON_EXIT_SCOPE_I(x,i) \
- inline void CAT(scope_cleanup_function_, i) (int *CAT(scope_cleanup_dummy_argument_, i)) { x; }; \
- int CAT(scope_cleanup_dummy_variable_, i) __attribute__((cleanup(CAT(scope_cleanup_function_, i))));
-#define ON_EXIT_SCOPE(x) ON_EXIT_SCOPE_I(x,__LINE__)
-
#define EXPECT_TRUE(x) __builtin_expect(x, 1)
#define EXPECT_FALSE(x) __builtin_expect(x, 0)
#define EXPECT_EXISTS (-1)
#define EXPECT_WHATEVER (-2)
-typedef struct hti *hashtable_t;
+typedef struct ht hashtable_t;
hashtable_t *ht_alloc (void);
void ht_free (hashtable_t *ht);
list_t * ll_alloc (void);
uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len);
-uint64_t ll_add (list_t *ll, const void *key_data, uint32_t key_len, uint64_t value);
+uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val);
uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len);
void ll_print (list_t *ll);
# Makefile for building programs with whole-program interfile optimization
###################################################################################################
OPT := -fwhole-program -combine -03# -DNDEBUG
-CFLAGS := -g -Wall -Werror -std=c99 -m64 -fnested-functions $(OPT)# -DENABLE_TRACE
+CFLAGS := -g -Wall -Werror -std=c99 -m64 $(OPT) #-DENABLE_TRACE
INCS := $(addprefix -I, include)
TESTS := output/ll_test output/sl_test output/ht_test output/rcu_test
EXES := $(TESTS)
###################################################################################################
# Rebuild an executable if any of it's source files need to be recompiled
#
-# Note: Calculating dependancies as a side-effect of compilation is disabled. There is a bug in
+# Note: Calculating dependencies as a side-effect of compilation is disabled. There is a bug in
# gcc. Compilation fails when -MM -MF is used and there is more than one source file.
-# -MM -MT $@.d -MF $@.d
+# Otherwise "-MM -MT $@.d -MF $@.d" should be part of the command line for the compile.
+#
+# Also, when calculating dependencies -combine is removed from CFLAGS because of another bug
+# in gcc. It chokes when -MM is used with -combine.
###################################################################################################
$(EXES): output/% : output/%.d makefile
+ gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@.d
gcc $(CFLAGS) $(INCS) -o $@ $($*_SRCS)
###################################################################################################
.PHONY: clean test tags
+-include $(addsuffix .d, $(EXES))
+
###################################################################################################
-# Generate the dependencies lists for each executable
-#
-# Note: -combine is removed from CFLAGS because of a bug in gcc. The compiler chokes when
-# -MM is used with -combine.
+# Dummy rule for boostrapping dependency files
###################################################################################################
$(addsuffix .d, $(EXES)) : output/%.d :
- gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@
-
--include $(addsuffix .d, $(EXES))
+ touch $@
int scan;
} hashtable_i_t;
+struct ht {
+ hashtable_i_t *hti;
+};
+
static const uint64_t COPIED_VALUE = -1;
static const uint64_t TOMBSTONE = STRIP_TAG(-1);
//
uint64_t ht_get (hashtable_t *ht, const char *key_data, uint32_t key_len) {
- return hti_get(*ht, murmur32(key_data, key_len), key_data, key_len);
+ return hti_get(ht->hti, murmur32(key_data, key_len), key_data, key_len);
}
//
assert(key_data);
assert(!IS_TAGGED(new_val) && new_val != DOES_NOT_EXIST);
- hashtable_i_t *hti = *ht;
+ hashtable_i_t *hti = ht->hti;
// Help with an ongoing copy.
if (EXPECT_FALSE(hti->next != NULL)) {
// Dispose of fully copied tables.
if (hti->num_entries_copied == (1 << hti->scale) || panic) {
assert(hti->next);
- if (SYNC_CAS(ht, hti, hti->next) == hti) {
+ if (SYNC_CAS(&ht->hti, hti, hti->next) == hti) {
nbd_defer_free(hti);
}
}
// Remove the value in <ht> associated with <key_data>. Returns the value removed, or
// DOES_NOT_EXIST if there was no value for that key.
uint64_t ht_remove (hashtable_t *ht, const char *key_data, uint32_t key_len) {
- hashtable_i_t *hti = *ht;
+ hashtable_i_t *hti = ht->hti;
uint64_t val;
uint32_t key_hash = murmur32(key_data, key_len);
do {
// Returns the number of key-values pairs in <ht>
uint64_t ht_count (hashtable_t *ht) {
- hashtable_i_t *hti = *ht;
+ hashtable_i_t *hti = ht->hti;
uint64_t count = 0;
while (hti) {
count += hti->count;
// Allocate and initialize a new hash table.
hashtable_t *ht_alloc (void) {
hashtable_t *ht = nbd_malloc(sizeof(hashtable_t));
- *ht = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE);
+ ht->hti = (hashtable_i_t *)hti_alloc(ht, MIN_SCALE);
return ht;
}
// Free <ht> and its internal structures.
void ht_free (hashtable_t *ht) {
- hashtable_i_t *hti = *ht;
+ hashtable_i_t *hti = ht->hti;
do {
for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
assert(hti->table[i].value == COPIED_VALUE || !IS_TAGGED(hti->table[i].value));
typedef struct node {
nstring_t *key;
- uint64_t value;
+ uint64_t val;
struct node *next;
} node_t;
node_t *head;
};
-static node_t *node_alloc (const void *key_data, uint32_t key_len, uint64_t value) {
+static node_t *node_alloc (const void *key_data, uint32_t key_len, uint64_t val) {
node_t *item = (node_t *)nbd_malloc(sizeof(node_t));
memset(item, 0, sizeof(node_t));
// If <key_len> is -1 it indicates <key_data> is an integer and not a pointer
item->key = (key_len == (unsigned)-1)
? (void *)TAG_VALUE(key_data)
: ns_alloc(key_data, key_len);
- item->value = value;
+ item->val = val;
return item;
}
+static void node_free (node_t *item) {
+ if (!IS_TAGGED(item->key)) {
+ nbd_free(item->key);
+ }
+ nbd_free(item);
+}
+
+static void node_defer_free (node_t *item) {
+ if (!IS_TAGGED(item->key)) {
+ nbd_defer_free(item->key);
+ }
+ nbd_defer_free(item);
+}
+
list_t *ll_alloc (void) {
list_t *ll = (list_t *)nbd_malloc(sizeof(list_t));
ll->head = node_alloc(" ", 0, 0);
while (item != NULL) {
node_t *next = item->next;
TRACE("l3", "find_pred: visiting item %p (next %p)", item, next);
- TRACE("l3", "find_pred: key %p", STRIP_TAG(item->key), item->value);
+ TRACE("l3", "find_pred: key %p", STRIP_TAG(item->key), item->val);
// A tag means an item is logically removed but not physically unlinked yet.
while (EXPECT_FALSE(IS_TAGGED(next))) {
TRACE("l3", "find_pred: now item is %p next is %p", item, next);
// The thread that completes the unlink should free the memory.
- nbd_defer_free(other);
+ node_defer_free(other);
} else {
TRACE("l3", "find_pred: lost race to unlink from pred %p; its link changed to %p", pred, other);
if (IS_TAGGED(other))
// Fast find. Do not help unlink partially removed nodes and do not return the found item's predecessor.
uint64_t ll_lookup (list_t *ll, const void *key_data, uint32_t key_len) {
- TRACE("l3", "ll_lookup: searching for key %p in ll %p", key_data, ll);
+ TRACE("l3", "ll_lookup: searching for key %p in list %p", key_data, ll);
node_t *item = find_pred(NULL, ll, key_data, key_len, FALSE);
// If we found an <item> matching the key return its value.
- return item != NULL ? item->value : DOES_NOT_EXIST;
+ return item != NULL ? item->val : DOES_NOT_EXIST;
}
// Insert a new item if a matching key doesn't already exist in <ll>
-uint64_t ll_add (list_t *ll, const void *key_data, uint32_t key_len, uint64_t value) {
- TRACE("l3", "ll_add: inserting key %p value %p", key_data, value);
- node_t *pred;
- node_t *item = NULL;
+uint64_t ll_cas (list_t *ll, const void *key_data, uint32_t key_len, uint64_t expected_val, uint64_t new_val) {
+ assert(new_val != DOES_NOT_EXIST);
+ TRACE("l3", "ll_cas: inserting key %p val %p", key_data, new_val);
do {
- node_t *next = find_pred(&pred, ll, key_data, key_len, TRUE);
+ node_t *pred;
+ node_t *old_item = find_pred(&pred, ll, key_data, key_len, TRUE);
// If a node matching the key already exists in <ll> return its value.
- if (next != NULL) {
- TRACE("l3", "ll_add: there is already an item %p (value %p) with the same key", next, next->value);
- if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
- return next->value;
+ if (old_item != NULL) {
+ TRACE("l3", "ll_cas: there is already an item %p (value %p) with the same key", old_item, old_item->val);
+ return old_item->val;
}
- next = pred->next;
- TRACE("l3", "ll_add: attempting to insert item between %p and %p", pred, next);
- if (EXPECT_TRUE(item == NULL)) { item = node_alloc(key_data, key_len, value); }
- item->next = next;
- node_t *other = SYNC_CAS(&pred->next, next, item);
+ TRACE("l3", "ll_cas: attempting to insert item between %p and %p", pred, pred->next);
+ node_t *new_item = node_alloc(key_data, key_len, new_val);
+ node_t *next = new_item->next = pred->next;
+ node_t *other = SYNC_CAS(&pred->next, next, new_item);
if (other == next) {
- TRACE("l3", "ll_add: successfully inserted item %p", item, 0);
+ TRACE("l3", "ll_cas: successfully inserted item %p", new_item, 0);
return DOES_NOT_EXIST; // success
}
- TRACE("l3", "ll_add: failed to change pred's link: expected %p found %p", next, other);
+ TRACE("l3", "ll_cas: failed to change pred's link: expected %p found %p", next, other);
+ node_free(new_item);
} while (1);
}
uint64_t ll_remove (list_t *ll, const void *key_data, uint32_t key_len) {
- TRACE("l3", "ll_remove: removing item with key %p from ll %p", key_data, ll);
+ TRACE("l3", "ll_remove: removing item with key %p from list %p", key_data, ll);
node_t *pred;
node_t *item = find_pred(&pred, ll, key_data, key_len, TRUE);
if (item == NULL) {
- TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the ll", 0, 0);
+ TRACE("l3", "ll_remove: remove failed, an item with a matching key does not exist in the list", 0, 0);
return DOES_NOT_EXIST;
}
return DOES_NOT_EXIST;
}
- uint64_t value = item->value;
+ uint64_t val = item->val;
- // Unlink <item> from <ll>.
+ // Unlink <item> from <ll>. If we lose a race to another thread just back off. It is safe to leave the
+ // item logically removed for a later call (or some other thread) to physically unlink. By marking the
+ // item earlier, we logically removed it.
TRACE("l3", "ll_remove: link item's pred %p to it's successor %p", pred, next);
node_t *other;
if ((other = SYNC_CAS(&pred->next, item, next)) != item) {
TRACE("l3", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other);
- // By marking the item earlier, we logically removed it. It is safe to leave the item.
- // Another thread will finish physically removing it from the ll.
- return value;
+ return val;
}
// The thread that completes the unlink should free the memory.
- nbd_defer_free(item);
- return value;
+ node_defer_free(item);
+ return val;
}
void ll_print (list_t *ll) {
typedef struct node {
nstring_t *key;
- uint64_t value;
+ uint64_t val;
int top_level;
struct node *next[];
} node_t;
return n;
}
-node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t value) {
+node_t *node_alloc (int level, const void *key_data, uint32_t key_len, uint64_t val) {
assert(level >= 0 && level <= MAX_LEVEL);
size_t sz = sizeof(node_t) + (level + 1) * sizeof(node_t *);
node_t *item = (node_t *)nbd_malloc(sz);
item->key = (key_len == (unsigned)-1)
? (void *)TAG_VALUE(key_data)
: ns_alloc(key_data, key_len);
- item->value = value;
+ item->val = val;
item->top_level = level;
return item;
}
+static void node_free (node_t *item) {
+ if (!IS_TAGGED(item->key)) {
+ nbd_free(item->key);
+ }
+ nbd_free(item);
+}
+
+static void node_defer_free (node_t *item) {
+ if (!IS_TAGGED(item->key)) {
+ nbd_defer_free(item->key);
+ }
+ nbd_defer_free(item);
+}
+
skiplist_t *sl_alloc (void) {
skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
sl->head = node_alloc(MAX_LEVEL, " ", 0, 0);
while (item != NULL) {
node_t *next = item->next[level];
TRACE("s3", "find_preds: visiting item %p (next %p)", item, next);
- TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->value);
+ TRACE("s3", "find_preds: key %p", STRIP_TAG(item->key), item->val);
// A tag means an item is logically removed but not physically unlinked yet.
while (EXPECT_FALSE(IS_TAGGED(next))) {
TRACE("s3", "find_preds: now item is %p next is %p", item, next);
// The thread that completes the unlink should free the memory.
- if (level == 0) { nbd_defer_free(other); }
+ if (level == 0) { node_defer_free(other); }
} else {
TRACE("s3", "find_preds: lost race to unlink from pred %p; its link changed to %p", pred, other);
if (IS_TAGGED(other))
// Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
uint64_t sl_lookup (skiplist_t *sl, const void *key_data, uint32_t key_len) {
- TRACE("s3", "sl_lookup: searching for key %p in sl %p", key, sl);
+ TRACE("s3", "sl_lookup: searching for key %p in skiplist %p", key, sl);
node_t *item = find_preds(NULL, NULL, 0, sl, key_data, key_len, FALSE);
// If we found an <item> matching the <key> return its value.
- return item != NULL ? item->value : DOES_NOT_EXIST;
+ return item != NULL ? item->val : DOES_NOT_EXIST;
}
// Insert the <key> if it doesn't already exist in <sl>
-uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t value) {
- TRACE("s3", "sl_add: inserting key %p value %p", key_data, value);
+uint64_t sl_add (skiplist_t *sl, const void *key_data, uint32_t key_len, uint64_t val) {
+ TRACE("s3", "sl_add: inserting key %p val %p", key_data, val);
node_t *preds[MAX_LEVEL+1];
node_t *nexts[MAX_LEVEL+1];
- node_t *item = NULL;
+ node_t *new_item = NULL;
int n = random_level();
do {
- node_t *next = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
+ node_t *old_item = find_preds(preds, nexts, n, sl, key_data, key_len, TRUE);
// If a node matching <key> already exists in <sl>, return its value.
- if (next != NULL) {
- TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->value);
- if (EXPECT_FALSE(item != NULL)) { nbd_free(item); }
- return nexts[0]->value;
+ if (old_item != NULL) {
+ TRACE("s3", "sl_add: there is already an item %p (value %p) with the same key", nexts[0], nexts[0]->val);
+ return nexts[0]->val;
}
- // First insert <item> into the bottom level.
- if (EXPECT_TRUE(item == NULL)) { item = node_alloc(n, key_data, key_len, value); }
+ // First insert <new_item> into the bottom level.
+ TRACE("s3", "sl_add: attempting to insert item between %p and %p", preds[0], nexts[0]);
+ new_item = node_alloc(n, key_data, key_len, val);
node_t *pred = preds[0];
- item->next[0] = next = nexts[0];
- TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
- for (int level = 1; level <= item->top_level; ++level) {
- item->next[level] = nexts[level];
+ node_t *next = new_item->next[0] = nexts[0];
+ for (int level = 1; level <= new_item->top_level; ++level) {
+ new_item->next[level] = nexts[level];
}
- node_t *other = SYNC_CAS(&pred->next[0], next, item);
+ node_t *other = SYNC_CAS(&pred->next[0], next, new_item);
if (other == next) {
- TRACE("s3", "sl_add: successfully inserted item %p at level 0", item, 0);
+ TRACE("s3", "sl_add: successfully inserted item %p at level 0", new_item, 0);
break; // success
}
TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
+ node_free(new_item);
} while (1);
- // Insert <item> into <sl> from the bottom level up.
- for (int level = 1; level <= item->top_level; ++level) {
+ // Insert <new_item> into <sl> from the bottom level up.
+ for (int level = 1; level <= new_item->top_level; ++level) {
node_t *pred = preds[level];
node_t *next = nexts[level];
do {
TRACE("s3", "sl_add: attempting to insert item between %p and %p", pred, next);
- node_t *other = SYNC_CAS(&pred->next[level], next, item);
+ node_t *other = SYNC_CAS(&pred->next[level], next, new_item);
if (other == next) {
- TRACE("s3", "sl_add: successfully inserted item %p at level %llu", item, level);
+ TRACE("s3", "sl_add: successfully inserted item %p at level %llu", new_item, level);
break; // success
}
TRACE("s3", "sl_add: failed to change pred's link: expected %p found %p", next, other);
- find_preds(preds, nexts, item->top_level, sl, key_data, key_len, TRUE);
+ find_preds(preds, nexts, new_item->top_level, sl, key_data, key_len, TRUE);
pred = preds[level];
next = nexts[level];
- // Update <item>'s next pointer
+ // Update <new_item>'s next pointer
do {
// There in no need to continue linking in the item if another thread removed it.
- node_t *old_next = ((volatile node_t *)item)->next[level];
+ node_t *old_next = ((volatile node_t *)new_item)->next[level];
if (IS_TAGGED(old_next))
- return value;
+ return val;
- // Use a CAS so we to not inadvertantly stomp on a mark another thread placed on the item.
- if (old_next == next || SYNC_CAS(&item->next[level], old_next, next) == old_next)
+ // Use a CAS so we do not inadvertantly stomp on a mark another thread placed on the item.
+ if (old_next == next || SYNC_CAS(&new_item->next[level], old_next, next) == old_next)
break;
} while (1);
} while (1);
}
- return value;
+ return val;
}
uint64_t sl_remove (skiplist_t *sl, const void *key_data, uint32_t key_len) {
- TRACE("s3", "sl_remove: removing item with key %p from sl %p", key_data, sl);
+ TRACE("s3", "sl_remove: removing item with key %p from skiplist %p", key_data, sl);
node_t *preds[MAX_LEVEL+1];
node_t *item = find_preds(preds, NULL, -1, sl, key_data, key_len, TRUE);
if (item == NULL) {
- TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the sl", 0, 0);
+ TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
return DOES_NOT_EXIST;
}
}
}
- uint64_t value = item->value;
+ uint64_t val = item->val;
- // Unlink <item> from the top down.
+ // Unlink <item> from <ll>. If we lose a race to another thread just back off. It is safe to leave the
+ // item partially unlinked for a later call (or some other thread) to physically unlink. By marking the
+ // item earlier, we logically removed it.
int level = item->top_level;
while (level >= 0) {
node_t *pred = preds[level];
node_t *other = NULL;
if ((other = SYNC_CAS(&pred->next[level], item, STRIP_TAG(next))) != item) {
TRACE("s3", "sl_remove: unlink failed; pred's link changed from %p to %p", item, other);
- // By marking the item earlier, we logically removed it. It is safe to leave the item partially
- // unlinked. Another thread will finish physically removing it from <sl>.
- return value;
+ return val;
}
--level;
}
// The thread that completes the unlink should free the memory.
- nbd_defer_free(item);
- return value;
+ node_defer_free(item);
+ return val;
}
void sl_print (skiplist_t *sl) {
char key_str[10];
sprintf(key_str, "%llX", key);
if (r & (1 << 8)) {
- ll_add(ll_, key_str, strlen(key_str) + 1, 1);
+ ll_cas(ll_, key_str, strlen(key_str) + 1, EXPECT_WHATEVER, 1);
} else {
ll_remove(ll_, key_str, strlen(key_str) + 1);
}
#else
if (r & (1 << 8)) {
- ll_add(ll_, (void *)key, -1, 1);
+ ll_cas(ll_, (void *)key, -1, EXPECT_WHATEVER, 1);
} else {
ll_remove(ll_, (void *)key, -1);
}
int main (int argc, char **argv) {
nbd_init();
- //lwt_set_trace_level("m0l0");
+ lwt_set_trace_level("l3");
char* program_name = argv[0];
pthread_t thread[MAX_NUM_THREADS];