#include <string.h>
#include <sys/types.h>
-#define MAX_NUM_THREADS 4 // make this whatever you want, but make it a power of 2
-
#define CACHE_LINE_SIZE 64 // 64 byte cache line on x86 and x86-64
#define CACHE_LINE_SCALE 6 // log base 2 of the cache line size
#define EXPECT_TRUE(x) __builtin_expect(!!(x), 1)
#define EXPECT_FALSE(x) __builtin_expect(!!(x), 0)
-#define SYNC_SWAP __sync_lock_test_and_set
-#define SYNC_CAS __sync_val_compare_and_swap
-#define SYNC_ADD __sync_add_and_fetch
-#define SYNC_FETCH_AND_OR __sync_fetch_and_or
+#ifndef NBD_SINGLE_THREADED
+
+#define MAX_NUM_THREADS 16 // make this whatever you want, but make it a power of 2
+
+#define SYNC_SWAP(addr,x) __sync_lock_test_and_set(addr,x)
+#define SYNC_CAS(addr,old,x) __sync_val_compare_and_swap(addr,old,x)
+#define SYNC_ADD(addr,n) __sync_add_and_fetch(addr,n)
+#define SYNC_FETCH_AND_OR(addr,x) __sync_fetch_and_or(addr,x)
+#else// NBD_SINGLE_THREADED
+
+#define MAX_NUM_THREADS 1
+
+#define SYNC_SWAP(addr,x) ({ typeof(*(addr)) _old = *(addr); *(addr) = (x); _old; })
+#define SYNC_CAS(addr,old,x) ({ typeof(*(addr)) _old = *(addr); *(addr) = (x); _old; })
+//#define SYNC_CAS(addr,old,x) ({ typeof(*(addr)) _old = *(addr); if ((old) == _old) { *(addr) = (x); } _old; })
+#define SYNC_ADD(addr,n) ({ typeof(*(addr)) _old = *(addr); *(addr) += (n); _old; })
+#define SYNC_FETCH_AND_OR(addr,x) ({ typeof(*(addr)) _old = *(addr); *(addr) |= (x); _old; })
+
+#endif//NBD_SINGLE_THREADED
#define COUNT_TRAILING_ZEROS __builtin_ctz
# Makefile for building programs with whole-program interfile optimization
###################################################################################################
CFLAGS0 := -Wall -Werror -std=gnu99 -lpthread #-m32 -DNBD32
-CFLAGS1 := $(CFLAGS0) -g #-O3 #-DNDEBUG #-fwhole-program -combine
+CFLAGS1 := $(CFLAGS0) -g -O3 -DNDEBUG #-fwhole-program -combine
CFLAGS2 := $(CFLAGS1) #-DENABLE_TRACE
CFLAGS3 := $(CFLAGS2) #-DLIST_USE_HAZARD_POINTER
-CFLAGS := $(CFLAGS3) #-DUSE_SYSTEM_MALLOC #-DTEST_STRING_KEYS
+CFLAGS := $(CFLAGS3) #-DNBD_SINGLE_THREADED #-DUSE_SYSTEM_MALLOC #-DTEST_STRING_KEYS
INCS := $(addprefix -I, include)
-TESTS := output/perf_test output/map_test2 output/map_test1 output/txn_test \
- output/rcu_test output/haz_test
+TESTS := output/perf_test output/map_test1 output/map_test2 output/rcu_test output/txn_test #output/haz_test
OBJS := $(TESTS)
-RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c \
- runtime/hazard.c
+RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c #runtime/hazard.c
MAP_SRCS := map/map.c map/list.c map/skiplist.c map/hashtable.c
haz_test_SRCS := $(RUNTIME_SRCS) test/haz_test.c
# in gcc. It chokes when -MM is used with -combine.
###################################################################################################
$(OBJS): output/% : output/%.d makefile
- gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@.d
+ gcc $(CFLAGS) $(INCS) -MM -MT $@ $($*_SRCS) > $@.d
gcc $(CFLAGS) $(INCS) -o $@ $($*_SRCS)
asm: $(addsuffix .s, $(OBJS))
* Note: This is code uses synchronous atomic operations because that is all that x86 provides.
* Every atomic operation is also an implicit full memory barrier. The upshot is that it simplifies
* the code a bit, but it won't be as fast as it could be on platforms that provide weaker
- * operations like and unfenced CAS which would still do the job.
+ * operations like unfenced CAS which would still do the job.
*
* 11FebO9 - Bug fix in ht_iter_next() from Rui Ueyama
*/
const datatype_t *key_type;
};
-static const map_val_t COPIED_VALUE = TAG_VALUE(DOES_NOT_EXIST, TAG1);
-static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1);
+static const map_val_t COPIED_VALUE = TAG_VALUE(DOES_NOT_EXIST, TAG1);
+static const map_val_t TOMBSTONE = STRIP_TAG(-1, TAG1);
static const unsigned ENTRIES_PER_BUCKET = CACHE_LINE_SIZE/sizeof(entry_t);
static const unsigned ENTRIES_PER_COPY_CHUNK = CACHE_LINE_SIZE/sizeof(entry_t)*2;
memset(hti, 0, sizeof(hti_t));
hti->scale = scale;
- size_t sz = sizeof(entry_t) * (1 << scale);
+ size_t sz = sizeof(entry_t) * (1ULL << scale);
#ifdef USE_SYSTEM_MALLOC
hti->unaligned_table_ptr = nbd_malloc(sz + CACHE_LINE_SIZE - 1);
hti->table = (void *)(((size_t)hti->unaligned_table_ptr + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1));
memset((void *)hti->table, 0, sz);
// When searching for a key probe a maximum of 1/4 of the buckets up to 1000 buckets.
- hti->max_probe = ((1 << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4;
+ hti->max_probe = ((1ULL << (hti->scale - 2)) / ENTRIES_PER_BUCKET) + 4;
if (hti->max_probe > MAX_BUCKETS_TO_PROBE) {
hti->max_probe = MAX_BUCKETS_TO_PROBE;
}
// heuristics to determine the size of the new table
size_t count = ht_count(hti->ht);
unsigned int new_scale = hti->scale;
- new_scale += (count > (1 << (new_scale - 2))); // double size if more than 1/4 full
- new_scale += (count > (1 << (new_scale - 2))); // double size again if more than 1/2 full
+ new_scale += (count > (1ULL << (new_scale - 2))); // double size if more than 1/4 full
+ new_scale += (count > (1ULL << (new_scale - 2))); // double size again if more than 1/2 full
// Allocate the new table and attempt to install it.
hti_t *next = hti_alloc(hti->ht, new_scale);
assert(ht1);
assert(ht1->next);
assert(ht2);
- assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1 << ht1->scale));
+ assert(ht1_ent >= ht1->table && ht1_ent < ht1->table + (1ULL << ht1->scale));
#ifndef NBD32
assert(key_hash == 0 || ht1->ht->key_type == NULL || (key_hash >> 16) == (ht1_ent->key >> 48));
#endif
size_t x = hti->copy_scan;
TRACE("h1", "ht_cas: help copy. scan is %llu, size is %llu", x, 1<<hti->scale);
- if (total_copied != (1 << hti->scale)) {
+ if (total_copied != (1ULL << hti->scale)) {
// Panic if we've been around the array twice and still haven't finished the copy.
- int panic = (x >= (1 << (hti->scale + 1)));
+ int panic = (x >= (1ULL << (hti->scale + 1)));
if (!panic) {
limit = ENTRIES_PER_COPY_CHUNK;
TRACE("h1", "ht_cas: help copy panic", 0, 0);
// scan the whole table
ent = hti->table;
- limit = (1 << hti->scale);
+ limit = (1ULL << hti->scale);
}
// Copy the entries
for (int i = 0; i < limit; ++i) {
num_copied += hti_copy_entry(hti, ent++, 0, hti->next);
- assert(ent <= hti->table + (1 << hti->scale));
+ assert(ent <= hti->table + (1ULL << hti->scale));
}
if (num_copied != 0) {
total_copied = SYNC_ADD(&hti->num_entries_copied, num_copied);
}
}
- return (total_copied == (1 << hti->scale));
+ return (total_copied == (1ULL << hti->scale));
}
static void hti_defer_free (hti_t *hti) {
assert(hti->ref_count == 0);
- for (uint32_t i = 0; i < (1 << hti->scale); ++i) {
+ for (uint32_t i = 0; i < (1ULL << hti->scale); ++i) {
map_key_t key = hti->table[i].key;
map_val_t val = hti->table[i].val;
if (val == COPIED_VALUE)
hti_t *hti = ht->hti;
while (hti) {
printf("hti:%p scale:%u count:%d copied:%d\n", hti, hti->scale, hti->count, hti->num_entries_copied);
- for (int i = 0; i < (1 << hti->scale); ++i) {
+ for (int i = 0; i < (1ULL << hti->scale); ++i) {
volatile entry_t *ent = hti->table + i;
printf("[0x%x] 0x%llx:0x%llx\n", i, (uint64_t)ent->key, (uint64_t)ent->val);
if (i > 30) {
volatile entry_t *ent;
map_key_t key;
map_val_t val;
- size_t table_size = (1 << iter->hti->scale);
+ size_t table_size = (1ULL << iter->hti->scale);
do {
iter->idx++;
if (iter->idx == table_size) {
// Unlink logically removed items.
TRACE("l3", "find_pred: unlinking marked item %p next is %p", item, next);
- markable_t other = SYNC_CAS(&pred->next, item, STRIP_MARK(next));
+ markable_t other = SYNC_CAS(&pred->next, (markable_t)item, (markable_t)STRIP_MARK(next));
if (other == (markable_t)item) {
TRACE("l2", "find_pred: unlinked item %p from pred %p", item, pred);
item = STRIP_MARK(next);
map_key_t new_key = ll->key_type == NULL ? key : (map_key_t)ll->key_type->clone((void *)key);
node_t *new_item = node_alloc(new_key, new_val);
markable_t next = new_item->next = (markable_t)old_item;
- markable_t other = SYNC_CAS(&pred->next, next, new_item);
+ markable_t other = SYNC_CAS(&pred->next, (markable_t)next, (markable_t)new_item);
if (other == next) {
TRACE("l1", "ll_cas: successfully inserted new item %p", new_item, 0);
return DOES_NOT_EXIST; // success
// item earlier, we logically removed it.
TRACE("l2", "ll_remove: unlink the item by linking its pred %p to its successor %p", pred, next);
markable_t other;
- if ((other = SYNC_CAS(&pred->next, item, next)) != (markable_t)item) {
+ if ((other = SYNC_CAS(&pred->next, (markable_t)item, next)) != (markable_t)item) {
TRACE("l1", "ll_remove: unlink failed; pred's link changed from %p to %p", item, other);
return val;
}
#include "mem.h"
#include "rcu.h"
-// Setting MAX_LEVEL to 0 essentially makes this data structure the Harris-Michael lock-free list (in list.c).
-#define MAX_LEVEL 31
+// Setting MAX_LEVELS to 1 essentially makes this data structure the Harris-Michael lock-free list (see list.c).
+#define MAX_LEVELS 32
+
+enum unlink {
+ FORCE_UNLINK,
+ ASSIST_UNLINK,
+ DONT_UNLINK
+};
typedef struct node {
map_key_t key;
map_val_t val;
- int top_level;
+ unsigned num_levels;
markable_t next[1];
} node_t;
#define STRIP_MARK(x) ((node_t *)STRIP_TAG((x), 0x1))
#endif
-static int random_level (void) {
+static int random_levels (void) {
unsigned r = nbd_rand();
- int n = __builtin_ctz(r) / 2;
- if (n > MAX_LEVEL) { n = MAX_LEVEL; }
+ int n = __builtin_ctz(r) / 2 + 1;
+ if (n > MAX_LEVELS) { n = MAX_LEVELS; }
return n;
}
-static node_t *node_alloc (int level, map_key_t key, map_val_t val) {
- assert(level >= 0 && level <= MAX_LEVEL);
- size_t sz = sizeof(node_t) + level * sizeof(node_t *);
+static node_t *node_alloc (int num_levels, map_key_t key, map_val_t val) {
+ assert(num_levels >= 0 && num_levels <= MAX_LEVELS);
+ size_t sz = sizeof(node_t) + (num_levels - 1) * sizeof(node_t *);
node_t *item = (node_t *)nbd_malloc(sz);
memset(item, 0, sz);
item->key = key;
item->val = val;
- item->top_level = level;
- TRACE("s2", "node_alloc: new node %p (%llu levels)", item, level);
+ item->num_levels = num_levels;
+ TRACE("s2", "node_alloc: new node %p (%llu levels)", item, num_levels);
return item;
}
skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
sl->key_type = key_type;
sl->high_water = 0;
- sl->head = node_alloc(MAX_LEVEL, 0, 0);
- memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
+ sl->head = node_alloc(MAX_LEVELS, 0, 0);
+ memset(sl->head->next, 0, MAX_LEVELS * sizeof(skiplist_t *));
return sl;
}
return count;
}
-static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, map_key_t key, int help_remove) {
+static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, map_key_t key, enum unlink unlink) {
node_t *pred = sl->head;
node_t *item = NULL;
TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
int d = 0;
- int start_level = sl->high_water;
- if (EXPECT_FALSE(start_level < n)) {
- start_level = n;
- }
// Traverse the levels of <sl> from the top level to the bottom
- for (int level = start_level; level >= 0; --level) {
+ for (int level = sl->high_water - 1; level >= 0; --level) {
markable_t next = pred->next[level];
- if (next == DOES_NOT_EXIST && level > n)
+ if (next == DOES_NOT_EXIST && level >= n)
continue;
TRACE("s3", "find_preds: traversing level %p starting at %p", level, pred);
if (EXPECT_FALSE(HAS_MARK(next))) {
TRACE("s2", "find_preds: pred %p is marked for removal (next %p); retry", pred, next);
- ASSERT(level == pred->top_level || HAS_MARK(pred->next[level+1]));
- return find_preds(preds, succs, n, sl, key, help_remove); // retry
+ ASSERT(level == pred->num_levels - 1 || HAS_MARK(pred->next[level+1]));
+ return find_preds(preds, succs, n, sl, key, unlink); // retry
}
item = GET_NODE(next);
while (item != NULL) {
// A tag means an item is logically removed but not physically unlinked yet.
while (EXPECT_FALSE(HAS_MARK(next))) {
TRACE("s3", "find_preds: found marked item %p (next is %p)", item, next);
- if (!help_remove) {
+ if (unlink == DONT_UNLINK) {
// Skip over logically removed items.
item = STRIP_MARK(next);
} else {
// Unlink logically removed items.
- markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next));
+ markable_t other = SYNC_CAS(&pred->next[level], (markable_t)item, (markable_t)STRIP_MARK(next));
if (other == (markable_t)item) {
TRACE("s3", "find_preds: unlinked item from pred %p", pred, 0);
item = STRIP_MARK(next);
} else {
TRACE("s3", "find_preds: lost race to unlink item pred %p's link changed to %p", pred, other);
if (HAS_MARK(other))
- return find_preds(preds, succs, n, sl, key, help_remove); // retry
+ return find_preds(preds, succs, n, sl, key, unlink); // retry
item = GET_NODE(other);
}
next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST;
d = sl->key_type->cmp((void *)item->key, (void *)key);
}
- if (d >= 0)
+ if (d > 0)
+ break;
+ if (d == 0 && unlink != FORCE_UNLINK)
break;
pred = item;
TRACE("s3", "find_preds: found pred %p next %p", pred, item);
- // The cast to unsigned is for the case when n is -1.
- if ((unsigned)level <= (unsigned)n) {
+ if (level < n) {
if (preds != NULL) {
preds[level] = pred;
}
}
}
- // fill in empty levels
- if (n == -1 && item != NULL && preds != NULL) {
- assert(item->top_level <= MAX_LEVEL);
- for (int level = start_level + 1; level <= item->top_level; ++level) {
- preds[level] = sl->head;
- }
- }
-
if (d == 0) {
TRACE("s2", "find_preds: found matching item %p in skiplist, pred is %p", item, pred);
return item;
return NULL;
}
-static void sl_unlink (skiplist_t *sl, map_key_t key) {
- node_t *pred = sl->head;
- node_t *item = NULL;
- TRACE("s2", "sl_unlink: unlinking marked item with key %p", key, 0);
- int d = 0;
-
- // Traverse the levels of <sl> from the top level to the bottom
- for (int level = sl->high_water; level >= 0; --level) {
- markable_t next = pred->next[level];
- if (next == DOES_NOT_EXIST)
- continue;
- TRACE("s3", "sl_unlink: traversing level %p starting at %p", level, pred);
- if (EXPECT_FALSE(HAS_MARK(next))) {
- TRACE("s2", "sl_unlink: lost a race; pred %p is marked for removal (next %p); retry", pred, next);
- ASSERT(level == pred->top_level || HAS_MARK(pred->next[level+1]));
- return sl_unlink(sl, key); // retry
- }
- item = GET_NODE(next);
- while (item != NULL) {
- next = item->next[level];
-
- while (HAS_MARK(next)) {
- TRACE("s3", "sl_unlink: found marked item %p (next is %p)", item, next);
-
- markable_t other = SYNC_CAS(&pred->next[level], item, STRIP_MARK(next));
- if (other == (markable_t)item) {
- TRACE("s3", "sl_unlink: unlinked item from pred %p", pred, 0);
- item = STRIP_MARK(next);
- } else {
- TRACE("s3", "sl_unlink: lost race to unlink item, pred %p's link changed to %p", pred, other);
- if (HAS_MARK(other))
- return sl_unlink(sl, key); // retry
- item = GET_NODE(other);
- }
- next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST;
- }
-
- if (EXPECT_FALSE(item == NULL)) {
- TRACE("s3", "sl_unlink: past the last item in the skiplist", 0, 0);
- break;
- }
-
- TRACE("s4", "sl_unlink: visiting item %p (next is %p)", item, next);
- TRACE("s4", "sl_unlink: key %p val %p", STRIP_MARK(item->key), item->val);
-
- if (EXPECT_TRUE(sl->key_type == NULL)) {
- d = item->key - key;
- } else {
- d = sl->key_type->cmp((void *)item->key, (void *)key);
- }
-
- if (d > 0)
- break;
-
- pred = item;
- item = GET_NODE(next);
- }
-
- TRACE("s3", "sl_unlink: at pred %p next %p", pred, item);
- }
-}
-
// Fast find that does not help unlink partially removed nodes and does not return the node's predecessors.
map_val_t sl_lookup (skiplist_t *sl, map_key_t key) {
TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl);
- node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE);
+ node_t *item = find_preds(NULL, NULL, 0, sl, key, DONT_UNLINK);
// If we found an <item> matching the <key> return its value.
if (item != NULL) {
TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val);
ASSERT((int64_t)new_val > 0);
- node_t *preds[MAX_LEVEL+1];
- node_t *nexts[MAX_LEVEL+1];
+ node_t *preds[MAX_LEVELS];
+ node_t *nexts[MAX_LEVELS];
node_t *new_item = NULL;
- int n = random_level();
- node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE);
+ int n = random_levels();
+ if (n > sl->high_water) {
+ n = SYNC_ADD(&sl->high_water, 1);
+ TRACE("s2", "sl_cas: incremented high water mark to %p", n, 0);
+ }
+ node_t *old_item = find_preds(preds, nexts, n, sl, key, ASSIST_UNLINK);
// If there is already an item in the skiplist that matches the key just update its value.
if (old_item != NULL) {
// Create a new node and insert it into the skiplist.
TRACE("s3", "sl_cas: attempting to insert a new item between %p and %p", preds[0], nexts[0]);
map_key_t new_key = sl->key_type == NULL ? key : (map_key_t)sl->key_type->clone((void *)key);
- if (n > sl->high_water) {
- n = sl->high_water + 1;
- int x = SYNC_ADD(&sl->high_water, 1);
- x = x;
- TRACE("s2", "sl_cas: incremented high water mark to %p", x, 0);
- }
new_item = node_alloc(n, new_key, new_val);
// Set <new_item>'s next pointers to their proper values
markable_t next = new_item->next[0] = (markable_t)nexts[0];
- for (int level = 1; level <= new_item->top_level; ++level) {
+ for (int level = 1; level < new_item->num_levels; ++level) {
new_item->next[level] = (markable_t)nexts[level];
}
// Link <new_item> into <sl> from the bottom level up. After <new_item> is inserted into the bottom level
// it is officially part of the skiplist.
node_t *pred = preds[0];
- markable_t other = SYNC_CAS(&pred->next[0], next, new_item);
+ markable_t other = SYNC_CAS(&pred->next[0], next, (markable_t)new_item);
if (other != next) {
TRACE("s3", "sl_cas: failed to change pred's link: expected %p found %p", next, other);
TRACE("s3", "sl_cas: successfully inserted a new item %p at the bottom level", new_item, 0);
- for (int level = 1; level <= new_item->top_level; ++level) {
+ ASSERT(new_item->num_levels <= MAX_LEVELS);
+ for (int level = 1; level < new_item->num_levels; ++level) {
TRACE("s3", "sl_cas: inserting the new item %p at level %p", new_item, level);
do {
node_t * pred = preds[level];
ASSERT(new_item->next[level]==(markable_t)nexts[level] || new_item->next[level]==MARK_NODE(nexts[level]));
TRACE("s3", "sl_cas: attempting to to insert the new item between %p and %p", pred, nexts[level]);
- markable_t other = SYNC_CAS(&pred->next[level], nexts[level], (markable_t)new_item);
+ markable_t other = SYNC_CAS(&pred->next[level], (markable_t)nexts[level], (markable_t)new_item);
if (other == (markable_t)nexts[level])
break; // successfully linked <new_item> into the skiplist at the current <level>
TRACE("s3", "sl_cas: lost a race. failed to change pred's link. expected %p found %p", nexts[level], other);
// Find <new_item>'s new preds and nexts.
- find_preds(preds, nexts, new_item->top_level, sl, key, TRUE);
+ find_preds(preds, nexts, new_item->num_levels, sl, key, ASSIST_UNLINK);
- for (int i = level; i <= new_item->top_level; ++i) {
+ for (int i = level; i < new_item->num_levels; ++i) {
markable_t old_next = new_item->next[i];
if ((markable_t)nexts[i] == old_next)
continue;
// Update <new_item>'s inconsistent next pointer before trying again. Use a CAS so if another thread
// is trying to remove the new item concurrently we do not stomp on the mark it places on the item.
TRACE("s3", "sl_cas: attempting to update the new item's link from %p to %p", old_next, nexts[i]);
- other = SYNC_CAS(&new_item->next[i], old_next, nexts[i]);
+ other = SYNC_CAS(&new_item->next[i], old_next, (markable_t)nexts[i]);
ASSERT(other == old_next || other == MARK_NODE(old_next));
// If another thread is removing this item we can stop linking it into to skiplist
if (HAS_MARK(other)) {
- sl_unlink(sl, key); // see comment below
+ find_preds(NULL, NULL, 0, sl, key, FORCE_UNLINK); // see comment below
return DOES_NOT_EXIST;
}
}
// make sure it is completely unlinked before we return. We might have lost a race and inserted the new item
// at some level after the other thread thought it was fully removed. That is a problem because once a thread
// thinks it completely unlinks a node it queues it to be freed
- if (HAS_MARK(new_item->next[new_item->top_level])) {
- sl_unlink(sl, key);
+ if (HAS_MARK(new_item->next[new_item->num_levels - 1])) {
+ find_preds(NULL, NULL, 0, sl, key, FORCE_UNLINK);
}
return DOES_NOT_EXIST; // success, inserted a new item
map_val_t sl_remove (skiplist_t *sl, map_key_t key) {
TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl);
- node_t *preds[MAX_LEVEL+1];
- node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE);
+ node_t *preds[MAX_LEVELS];
+ node_t *item = find_preds(preds, NULL, sl->high_water, sl, key, ASSIST_UNLINK);
if (item == NULL) {
TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
return DOES_NOT_EXIST;
// Mark <item> at each level of <sl> from the top down. If multiple threads try to concurrently remove
// the same item only one of them should succeed. Marking the bottom level establishes which of them succeeds.
markable_t old_next = 0;
- for (int level = item->top_level; level >= 0; --level) {
+ for (int level = item->num_levels - 1; level >= 0; --level) {
markable_t next;
old_next = item->next[level];
do {
TRACE("s2", "sl_remove: replaced item %p's value with DOES_NOT_EXIT", item, 0);
// unlink the item
- sl_unlink(sl, key);
+ find_preds(NULL, NULL, 0, sl, key, FORCE_UNLINK);
// free the node
if (sl->key_type != NULL) {
void sl_print (skiplist_t *sl) {
printf("high water: %d levels\n", sl->high_water);
- for (int level = MAX_LEVEL; level >= 0; --level) {
+ for (int level = MAX_LEVELS - 1; level >= 0; --level) {
node_t *item = sl->head;
if (item->next[level] == DOES_NOT_EXIST)
continue;
int is_marked = HAS_MARK(item->next[0]);
printf("%s%p:0x%llx ", is_marked ? "*" : "", item, (uint64_t)item->key);
if (item != sl->head) {
- printf("[%d]", item->top_level);
+ printf("[%d]", item->num_levels);
} else {
printf("[HEAD]");
}
- for (int level = 1; level <= item->top_level; ++level) {
+ for (int level = 1; level < item->num_levels; ++level) {
node_t *next = STRIP_MARK(item->next[level]);
is_marked = HAS_MARK(item->next[0]);
printf(" %p%s", next, is_marked ? "*" : "");
sl_iter_t *sl_iter_begin (skiplist_t *sl, map_key_t key) {
sl_iter_t *iter = (sl_iter_t *)nbd_malloc(sizeof(sl_iter_t));
if (key != DOES_NOT_EXIST) {
- find_preds(NULL, &iter->next, 0, sl, key, FALSE);
+ find_preds(NULL, &iter->next, 1, sl, key, DONT_UNLINK);
} else {
iter->next = GET_NODE(sl->head->next[0]);
}
--- /dev/null
+/*
+ * Written by Josh Dybnis and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ *
+ * non thread safe skiplist
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "common.h"
+#include "skiplist.h"
+#include "runtime.h"
+#include "mem.h"
+
+#define MAX_LEVEL 31
+
+typedef struct node {
+ map_key_t key;
+ map_val_t val;
+ int top_level;
+ struct node *next[1];
+} node_t;
+
+struct sl_iter {
+ node_t *next;
+};
+
+struct sl {
+ node_t *head;
+ const datatype_t *key_type;
+ int high_water; // max level of any item in the list
+};
+
+static int random_level (void) {
+ unsigned r = nbd_rand();
+ int n = __builtin_ctz(r) / 2;
+ if (n > MAX_LEVEL) { n = MAX_LEVEL; }
+ return n;
+}
+
+static node_t *node_alloc (int level, map_key_t key, map_val_t val) {
+ assert(level >= 0 && level <= MAX_LEVEL);
+ size_t sz = sizeof(node_t) + level * sizeof(node_t *);
+ node_t *item = (node_t *)nbd_malloc(sz);
+ memset(item, 0, sz);
+ item->key = key;
+ item->val = val;
+ item->top_level = level;
+ TRACE("s2", "node_alloc: new node %p (%llu levels)", item, level);
+ return item;
+}
+
+skiplist_t *sl_alloc (const datatype_t *key_type) {
+ skiplist_t *sl = (skiplist_t *)nbd_malloc(sizeof(skiplist_t));
+ sl->key_type = key_type;
+ sl->high_water = 0;
+ sl->head = node_alloc(MAX_LEVEL, 0, 0);
+ memset(sl->head->next, 0, (MAX_LEVEL+1) * sizeof(skiplist_t *));
+ return sl;
+}
+
+void sl_free (skiplist_t *sl) {
+ node_t *item = sl->head->next[0];
+ while (item) {
+ node_t *next = item->next[0];
+ if (sl->key_type != NULL) {
+ nbd_free((void *)item->key);
+ }
+ nbd_free(item);
+ item = next;
+ }
+}
+
+size_t sl_count (skiplist_t *sl) {
+ size_t count = 0;
+ node_t *item = sl->head->next[0];
+ while (item) {
+ count++;
+ item = item->next[0];
+ }
+ return count;
+}
+
+static node_t *find_preds (node_t **preds, node_t **succs, int n, skiplist_t *sl, map_key_t key, int help_remove) {
+ node_t *pred = sl->head;
+ node_t *item = NULL;
+ TRACE("s2", "find_preds: searching for key %p in skiplist (head is %p)", key, pred);
+ int d = 0;
+ int start_level = sl->high_water;
+ if (EXPECT_FALSE(start_level < n)) {
+ start_level = n;
+ }
+
+ // Traverse the levels of <sl> from the top level to the bottom
+ for (int level = start_level; level >= 0; --level) {
+ node_t *next = pred->next[level];
+ if (next == DOES_NOT_EXIST && level > n)
+ continue;
+ TRACE("s3", "find_preds: traversing level %p starting at %p", level, pred);
+ item = next;
+ while (item != NULL) {
+ next = item->next[level];
+
+ if (EXPECT_TRUE(sl->key_type == NULL)) {
+ d = item->key - key;
+ } else {
+ d = sl->key_type->cmp((void *)item->key, (void *)key);
+ }
+
+ if (d >= 0)
+ break;
+
+ pred = item;
+ item = next;
+ }
+
+ TRACE("s3", "find_preds: found pred %p next %p", pred, item);
+
+ // The cast to unsigned is for the case when n is -1.
+ if ((unsigned)level <= (unsigned)n) {
+ if (preds != NULL) {
+ preds[level] = pred;
+ }
+ if (succs != NULL) {
+ succs[level] = item;
+ }
+ }
+ }
+
+ // fill in empty levels
+ if (n == -1 && item != NULL && preds != NULL) {
+ assert(item->top_level <= MAX_LEVEL);
+ for (int level = start_level + 1; level <= item->top_level; ++level) {
+ preds[level] = sl->head;
+ }
+ }
+
+ if (d == 0) {
+ TRACE("s2", "find_preds: found matching item %p in skiplist, pred is %p", item, pred);
+ return item;
+ }
+ TRACE("s2", "find_preds: found proper place for key %p in skiplist, pred is %p. returning null", key, pred);
+ return NULL;
+}
+
+static void sl_unlink (skiplist_t *sl, map_key_t key) {
+ node_t *pred = sl->head;
+ node_t *item = NULL;
+ TRACE("s2", "sl_unlink: unlinking marked item with key %p", key, 0);
+ int d = 0;
+
+ // Traverse the levels of <sl>
+ for (int level = sl->high_water; level >= 0; --level) {
+ node_t *next = pred->next[level];
+ if (next == DOES_NOT_EXIST)
+ continue;
+ TRACE("s3", "sl_unlink: traversing level %p starting at %p", level, pred);
+ item = next;
+ while (item != NULL) {
+ next = item->next[level];
+
+ if (EXPECT_TRUE(sl->key_type == NULL)) {
+ d = item->key - key;
+ } else {
+ d = sl->key_type->cmp((void *)item->key, (void *)key);
+ }
+
+ if (d == 0) {
+ pred->next[level] = next;
+ TRACE("s3", "sl_unlink: unlinked item from pred %p", pred, 0);
+ item = next;
+ next = (item != NULL) ? item->next[level] : DOES_NOT_EXIST;
+ break;
+ }
+ if (d > 0)
+ break;
+
+ pred = item;
+ item = next;
+ }
+
+ TRACE("s3", "sl_unlink: at pred %p next %p", pred, item);
+ }
+}
+
+// Fast find that does not return the node's predecessors.
+map_val_t sl_lookup (skiplist_t *sl, map_key_t key) {
+ TRACE("s1", "sl_lookup: searching for key %p in skiplist %p", key, sl);
+ node_t *item = find_preds(NULL, NULL, 0, sl, key, FALSE);
+
+ // If we found an <item> matching the <key> return its value.
+ if (item != NULL) {
+ map_val_t val = item->val;
+ return val;
+ }
+
+ TRACE("l1", "sl_lookup: no item in the skiplist matched the key", 0, 0);
+ return DOES_NOT_EXIST;
+}
+
+map_key_t sl_min_key (skiplist_t *sl) {
+ node_t *item = sl->head->next[0];
+ while (item != NULL)
+ return item->key;
+ return DOES_NOT_EXIST;
+}
+
+map_val_t sl_cas (skiplist_t *sl, map_key_t key, map_val_t expectation, map_val_t new_val) {
+ TRACE("s1", "sl_cas: key %p skiplist %p", key, sl);
+ TRACE("s1", "sl_cas: expectation %p new value %p", expectation, new_val);
+ ASSERT((int64_t)new_val > 0);
+
+ node_t *preds[MAX_LEVEL+1];
+ node_t *nexts[MAX_LEVEL+1];
+ node_t *new_item = NULL;
+ int n = random_level();
+ node_t *old_item = find_preds(preds, nexts, n, sl, key, TRUE);
+
+ // If there is already an item in the skiplist that matches the key just update its value.
+ if (old_item != NULL) {
+ map_val_t old_val = old_item->val;
+ if (expectation == CAS_EXPECT_DOES_NOT_EXIST ||
+ (expectation != CAS_EXPECT_WHATEVER && expectation != CAS_EXPECT_EXISTS && expectation != old_val)) {
+ TRACE("s1", "update_item: found an item %p in the skiplist that matched the key. the expectation was "
+ "not met, the skiplist was not changed", item, old_val);
+ return old_val;
+ }
+ old_item->val = new_val;
+ return old_val;
+ }
+
+ if (EXPECT_FALSE(expectation != CAS_EXPECT_DOES_NOT_EXIST && expectation != CAS_EXPECT_WHATEVER)) {
+ TRACE("l1", "sl_cas: the expectation was not met, the skiplist was not changed", 0, 0);
+ return DOES_NOT_EXIST; // failure, the caller expected an item for the <key> to already exist
+ }
+
+ TRACE("s3", "sl_cas: inserting a new item between %p and %p", preds[0], nexts[0]);
+
+ // Create a new node and insert it into the skiplist.
+ map_key_t new_key = sl->key_type == NULL ? key : (map_key_t)sl->key_type->clone((void *)key);
+ if (n > sl->high_water) {
+ n = ++sl->high_water;
+ TRACE("s2", "sl_cas: incremented high water mark to %p", sl->high_water, 0);
+ }
+ new_item = node_alloc(n, new_key, new_val);
+
+ // Set <new_item>'s next pointers to their proper values
+ for (int level = 0; level <= new_item->top_level; ++level) {
+ new_item->next[level] = nexts[level];
+ }
+
+ // Link <new_item> into <sl>
+ for (int level = 0; level <= new_item->top_level; ++level) {
+ preds[level]->next[level] = new_item;
+ }
+
+ return DOES_NOT_EXIST; // success, inserted a new item
+}
+
+map_val_t sl_remove (skiplist_t *sl, map_key_t key) {
+ TRACE("s1", "sl_remove: removing item with key %p from skiplist %p", key, sl);
+ node_t *preds[MAX_LEVEL+1];
+ node_t *item = find_preds(preds, NULL, -1, sl, key, TRUE);
+ if (item == NULL) {
+ TRACE("s3", "sl_remove: remove failed, an item with a matching key does not exist in the skiplist", 0, 0);
+ return DOES_NOT_EXIST;
+ }
+ map_val_t val = item->val;
+
+ // unlink the item
+ sl_unlink(sl, key);
+
+ // free the node
+ if (sl->key_type != NULL) {
+ nbd_free((void *)item->key);
+ }
+ nbd_free(item);
+
+ return val;
+}
+
+void sl_print (skiplist_t *sl) {
+
+ printf("high water: %d levels\n", sl->high_water);
+ for (int level = MAX_LEVEL; level >= 0; --level) {
+ node_t *item = sl->head;
+ if (item->next[level] == DOES_NOT_EXIST)
+ continue;
+ printf("(%d) ", level);
+ int i = 0;
+ while (item) {
+ node_t *next = item->next[level];
+ printf("%p ", item);
+ item = next;
+ if (i++ > 30) {
+ printf("...");
+ break;
+ }
+ }
+ printf("\n");
+ fflush(stdout);
+ }
+ node_t *item = sl->head;
+ int i = 0;
+ while (item) {
+ printf("%p:0x%llx ", item, (uint64_t)item->key);
+ if (item != sl->head) {
+ printf("[%d]", item->top_level);
+ } else {
+ printf("[HEAD]");
+ }
+ for (int level = 1; level <= item->top_level; ++level) {
+ node_t *next = item->next[level];
+ printf(" %p", next);
+ if (item == sl->head && item->next[level] == DOES_NOT_EXIST)
+ break;
+ }
+ printf("\n");
+ fflush(stdout);
+ item = item->next[0];
+ if (i++ > 30) {
+ printf("...\n");
+ break;
+ }
+ }
+}
+
+sl_iter_t *sl_iter_begin (skiplist_t *sl, map_key_t key) {
+ sl_iter_t *iter = (sl_iter_t *)nbd_malloc(sizeof(sl_iter_t));
+ if (key != DOES_NOT_EXIST) {
+ find_preds(NULL, &iter->next, 0, sl, key, FALSE);
+ } else {
+ iter->next = sl->head->next[0];
+ }
+ return iter;
+}
+
+map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr) {
+ assert(iter);
+ node_t *item = iter->next;
+ if (item == NULL) {
+ iter->next = NULL;
+ return DOES_NOT_EXIST;
+ }
+ iter->next = item->next[0];
+ if (key_ptr != NULL) {
+ *key_ptr = item->key;
+ }
+ return item->val;
+}
+
+void sl_iter_free (sl_iter_t *iter) {
+ nbd_free(iter);
+}
--- /dev/null
+#!/bin/sh
+for ks in 28 24 20 16 12 8 4 0
+do
+ for th in 1 2 4 8 16
+ do
+ output/perf_test $th $ks
+ done
+done
+
+
#include "mem.h"
#define LWT_BUFFER_SCALE 20
-#define LWT_BUFFER_SIZE (1 << LWT_BUFFER_SCALE)
+#define LWT_BUFFER_SIZE (1ULL << LWT_BUFFER_SCALE)
#define LWT_BUFFER_MASK (LWT_BUFFER_SIZE - 1)
volatile int halt_ = 0;
#include "rlocal.h"
#include "lwt.h"
-#define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger)
#ifndef NBD32
+#define MAX_SCALE 36 // allocate blocks up to 64GB (arbitrary, could be bigger)
#define MIN_SCALE 3 // smallest allocated block is 8 bytes
#define MAX_POINTER_BITS 48
#define PAGE_SCALE 21 // 2MB pages
#else
+#define MAX_SCALE 31
#define MIN_SCALE 2 // smallest allocated block is 4 bytes
#define MAX_POINTER_BITS 32
#define PAGE_SCALE 12 // 4KB pages
#endif
-#define PAGE_SIZE (1 << PAGE_SCALE)
-#define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
+#define PAGE_SIZE (1ULL << PAGE_SCALE)
+#define HEADERS_SIZE (((size_t)1ULL << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
typedef struct block {
struct block *next;
return region;
}
#endif//RECYCLE_PAGES
- size_t region_size = (1 << block_scale);
+ size_t region_size = (1ULL << block_scale);
if (region_size < PAGE_SIZE) {
region_size = PAGE_SIZE;
}
ASSERT(b_scale && b_scale <= MAX_SCALE);
#ifdef RECYCLE_PAGES
if (b_scale > PAGE_SCALE) {
- int rc = munmap(x, 1 << b_scale);
+ int rc = munmap(x, 1ULL << b_scale);
ASSERT(rc == 0);
rc = rc;
}
#endif
#ifndef NDEBUG
- memset(b, 0xcd, (1 << b_scale)); // bear trap
+ memset(b, 0xcd, (1ULL << b_scale)); // bear trap
#endif
tl_t *tl = &tl_[tid_]; // thread-local data
if (h->owner == tid_) {
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// The free list is empty so process blocks freed from other threads and then check again.
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
#ifdef RECYCLE_PAGES
ASSERT(b != NULL);
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// There are no partially allocated pages so get a new page.
// Break up the remainder of the page into blocks and put them on the free list. Start at the
// end of the page so that the free list ends up in increasing order, for ease of debugging.
if (b_scale < PAGE_SCALE) {
- size_t block_size = (1 << b_scale);
+ size_t block_size = (1ULL << b_scale);
block_t *head = NULL;
for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
block_t *x = (block_t *)(page + offset);
}
TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE));
+ assert(b);
return b;
}
#else//USE_SYSTEM_MALLOC
#include <stdlib.h>
+#include "common.h"
+#include "rlocal.h"
+#include "lwt.h"
void mem_init (void) {
return;
}
-void ndb_free (void *x) {
+void nbd_free (void *x) {
TRACE("m1", "nbd_free: %p", x, 0);
#ifndef NDEBUG
memset(x, 0xcd, sizeof(void *)); // bear trap
--- /dev/null
+/*
+ * Written by Josh Dybnis and released to the public domain, as explained at
+ * http://creativecommons.org/licenses/publicdomain
+ *
+ * fast multi-threaded malloc.
+ */
+#ifndef USE_SYSTEM_MALLOC
+#define _BSD_SOURCE // so we get MAP_ANON on linux
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include "common.h"
+#include "rlocal.h"
+#include "lwt.h"
+
+#define CHUNK_SCALE 12 // 4k chunks
+#define PAGE_SCALE 21 // 2MB pages
+#define PAGE_SIZE (1ULL << PAGE_SCALE)
+
+// On both linux and Mac OS X the size of the mmap-able virtual address space is between 2^46 and 2^47. Linux has
+// no problem when you grab the whole thing. Mac OS X apparently does some O(n) thing on the first page fault
+// that takes over 2 seconds if you mmap 2^46 bytes. So on Mac OS X we only take 2^38 bytes of virtual space. Which
+// is OK though, since you can only buy a Mac with up to 32GB of RAM (as of 2/09).
+#ifndef NBD32
+#ifdef __MACOSX__
+#define TOTAL_SCALE 38
+#else //__MACOSX__
+#define TOTAL_SCALE 46
+#endif//__MACOSX__
+#else// NBD32
+#define TOTAL_SCALE 32
+#endif//NBD32
+#define TOTAL_SIZE (1ULL << TOTAL_SCALE)
+
+#define INVALID_SLAB_CLASS 255
+#define METASLAB_CLASS_MAX 2
+#define NESTED_4K_SLAB_CLASS_MAX 16
+#define NESTED_32K_SLAB_CLASS_MAX 39
+#define NESTED_256K_SLAB_CLASS_MAX 63
+#define NESTED_SLAB_CLASS_MAX NESTED_256K_SLAB_CLASS_MAX
+#define LARGE_SLAB_CLASS_MAX 93
+#define HUGE_SLAB_CLASS_MAX (sizeof(BlockSize) / sizeof(*BlockSize))
+#define SLAB_CLASS_MAX HUGE_SLAB_CLASS_MAX
+
+#define NESTED_SLAB_CASES NESTED_4K_SLAB_CASES: case NESTED_32K_SLAB_CASES: case NESTED_256K_SLAB_CASES
+#define NESTED_4K_SLAB_CASES METASLAB_CLASS_MAX+1 ... NESTED_4K_SLAB_CLASS_MAX
+#define NESTED_32K_SLAB_CASES NESTED_4K_SLAB_CLASS_MAX+1 ... NESTED_32K_SLAB_CLASS_MAX: case 0
+#define NESTED_256K_SLAB_CASES NESTED_32K_SLAB_CLASS_MAX+1 ... NESTED_SLAB_CLASS_MAX: case 1
+#define LARGE_SLAB_CASES NESTED_SLAB_CLASS_MAX+1 ... LARGE_SLAB_CLASS_MAX: case 2
+#define HUGE_SLAB_CASES LARGE_SLAB_CLASS_MAX+1 ... HUGE_SLAB_CLASS_MAX
+
+#define SLAB_CLASS_SCALE(class) ({ \
+ int _scale = 0; \
+ switch (class) { \
+ case NESTED_4K_SLAB_CASES: _scale = 12; break; \
+ case NESTED_32K_SLAB_CASES: _scale = 15; break; \
+ case NESTED_256K_SLAB_CASES: _scale = 18; break; \
+ case LARGE_SLAB_CASES: _scale = 21; break; \
+ } \
+ _scale; \
+})
+
+// indexed by class
+static const uint32_t BlockSize[] = {
+ // meta slab classes (for the nested slabs)
+ 1 << 12, 1 << 15, 1 << 18
+
+ // nested slab classes (4kB, 32kB, and 256kB)
+ 8, 16, 24, 32, 40, 48, 56, 64, 72, 80,
+ 88, 96, 112, 120, 128, 144, 160, 176, 192, 224,
+ 256, 288, 320, 352, 384, 416, 448, 480, 512, 576,
+ 640, 704, 768, 832, 896, 960, 1024, 1152, 1280, 1408,
+ 1536, 1664, 1856, 2048, 2240, 2432, 2688, 2944, 3200, 3520,
+ 3840, 4160, 4544, 4928, 5312, 5696, 6144, 6592, 7040, 7488,
+ 7936,
+
+ // large slab classes (full page, 2MB)
+ 8896, 9984, 11200, 12544, 14016, 15616, 17408, 19328, 21440, 23744,
+ 26176, 28800, 31616, 34624, 37760, 41024, 44416, 47936, 51584, 55296,
+ 59008, 62784, 66496, 70208, 73856, 77376, 80832, 84160, 87360, 90368,
+ 93248, 95936, 98496, 100864,
+
+ // huge slabs (slabs on huge blocks, 2MB-4MB)
+ 110912, 121984, 134144, 147520, 162240, 178432, 196224, 215808, 237376, 261056,
+ 287104, 315776, 347328, 382016, 420160, 462144, 508352, 559168, 615040, 676544,
+ 744192, 818560, 900416, 990400, 1089408, 1198336, 1318144, 1449920, 1594880, 1754368,
+ 1929792
+};
+
+typedef uint8_t class_t;
+
+typedef struct block {
+ struct block *next;
+} block_t;
+
+typedef struct slab {
+ unsigned valid:1;
+ unsigned free_list:15;
+ unsigned num_in_use:9;
+ unsigned class:6;
+} __attribute__((packed)) slab_t;
+
+typedef struct metaslab {
+ slab_t slab;
+ char * data;
+ slab_t slab[1 << (PAGE_SCALE - CHUNK_SCALE)];
+ struct {
+ struct metaslab *older;
+ struct metaslab *newer;
+ } q[NESTED_SLAB_CLASS_MAX+1];
+ uint64_t partial_slab_bitmap2[NESTED_32K_SLAB_CLASS_MAX+1];
+ uint8_t partial_slab_bitmap1[NESTED_SLAB_CLASS_MAX+1];
+} metaslab_t;
+
+char *MemBase = NULL;
+char *MemEnd = NULL;
+char *PageBreak = NULL;
+size_t *PageMap = NULL;
+block_t *FreePages = NULL;
+struct { slab_t *slab; char *slab_base; } ActiveSlab[SLAB_CLASS_MAX + 1] = {};
+
+struct {
+ size_t slabs_in_use;
+ size_t bytes_requested;
+ size_t bytes_allocated;
+ size_t total_bytes_allocated;
+} ClassStats[METASLAB_CLASS_MAX+1];
+
+struct {
+ slab_t *oldest;
+ slab_t *newest;
+} PartialSlabQueue[SLAB_CLASS_MAX+1];
+
+struct {
+ slab_t *oldest;
+} FreshPartialSlabQueue[SLAB_CLASS_MAX+1];
+
+static block_t *get_block (class_t slab_class);
+
+void mem_init (void) {
+ ASSERT(INVALID_SLAB_CLASS > SLAB_CLASS_MAX);
+
+ void *buf = mmap(NULL, TOTAL_SIZE, PROT_NONE, MAP_NORESERVE|MAP_ANON|MAP_PRIVATE, -1, 0);
+ if (buf == (void *)-1) {
+ perror("mmap");
+ exit(-1);
+ }
+ MemEnd = buf + TOTAL_SIZE;
+ MemBase = (char *)( ((size_t)buf + PAGE_SIZE-1) & ~(PAGE_SIZE-1) ); // align to a page boundry
+
+ size_t page_map_size = sizeof(void *) >> (TOTAL_SCALE - PAGE_SCALE);
+ mprotect(MemBase, chunk_map_size, PROT_READ|PROT_WRITE);
+ PageBreak = MemBase + chunk_map_size;
+ PageMap = (size_t *)MemBase;
+}
+
+static class_t get_slab_class (size_t size) {
+ for (int i = METASLAB_CLASS_MAX + 1; i <= SLAB_CLASS_MAX; ++i) {
+ if (size <= BlockSize[i])
+ return i;
+ }
+ return INVALID_SLAB_CLASS;
+}
+
+static class_t get_meta_class (class_t class) {
+ int scale = SLAB_CLASS_SCALE(class);
+ if (scale == PAGE_SCALE || scale == 0)
+ return INVALID_SLAB_CLASS;
+ return (scale - 12) / 3;
+}
+
+static void *get_page (void) {
+ block_t *p = FreePages;
+ if (p == NULL) {
+ p = (block_t *)PageBreak;
+ PageBreak += PAGE_SIZE;
+ return p;
+ }
+ FreePages = p->next;
+ return p;
+}
+
+static void free_page (void *p) {
+ ASSERT(p < (void *)PageBreak);
+ block_t *b = (block_t *)p;
+ b->next = FreePages;
+ FreePages = b;
+}
+
+static void init_slab (void *b, class_t slab_class) {
+}
+
+static slab_t *new_large_slab (class_t slab_class) {
+ return NULL;
+}
+
+static int find_partial_slab(metaslab_t *metaslab, class_t target_class, int target_index) {
+ switch (target_class) {
+ case NESTED_4K_SLAB_CASSES:
+ {
+ // search nearby the target first
+ int base_index = (target_index & ~0x7);
+ for (int i = 0; i < 8; ++i) {
+ if (base_index + i == target_index)
+ continue;
+ if (metaslab->slab[base_index + i].class == target_class)
+ return base_index + i;
+ }
+ do {
+ metaslab->partial_slab_bitmap2[target_class] &= ~(1ULL << (base_index >> 3));
+ uint64_t bitmap = metaslab->partial_slab_bitmap2[target_class];
+ if (bitmap == 0)
+ return NULL;
+ int n = base_index >> 3;
+ if (bitmap & (0xFF << (n & ~0x7))) {
+ bitmap &= 0xFF << (n & ~0x7); // search nearby the target first
+ }
+ base_index = COUNT_TRAILING_ZEROS(bitmap) << 3;
+ for (int i = 0; i < 8; ++i) {
+ if (metaslab->slab[base_index + i].class == target_class)
+ return base_index + i;
+ }
+ } while (1);
+ }
+ case NESTED_32K_SLAB_CASSES:
+ {
+ uint64_t bitmap = metaslab->partial_slab_bitmap2[target_class];
+ if (bitmap == 0)
+ return NULL;
+ int n = target_index >> 3;
+ if (bitmap & (0xFF << (n & ~0x7))) {
+ bitmap &= 0xFF << (n & ~0x7); // search nearby the target first
+ }
+ return COUNT_TRAILING_ZEROS(bitmap) << 3;
+ }
+ case NESTED_256K_SLAB_CASSES:
+ {
+ uint8_t bitmap = metaslab->partial_slab_bitmap1[target_class];
+ if (bitmap == 0)
+ return NULL;
+ return COUNT_TRAILING_ZEROS(bitmap) << 6;
+ }
+ default:
+ ASSERT(FALSE);
+ return -1;
+ }
+}
+
+static void activate_new_slab (class_t slab_class) {
+ slab_t *new_slab;
+ switch (slab_class) {
+ case NESTED_SLAB_CASES:
+ int slab_index = ActiveSlab[slab_class].slab_index;
+ metaslab_t *metaslab = ActiveSlab[slab_class].metaslab;
+
+ // First look for a partial slab on the same metaslab as the old active slab.
+ new_slab = find_partial_slab(metaslab, slab_class);
+ if (new_slab == NULL) {
+ // No partial slab on the same metaslab. Remove a metaslab from the front of the queue.
+ metaslab_t *metaslab = (metaslab_t *)PartialSlabQueue[slab_class].oldest;
+ if (metaslab != NULL) {
+ ASSERT(metaslab->q[slab_class].older == NULL);
+ PartialSlabQueue[slab_class].newest = (slab_t *)metaslab->q[slab_class].newer;
+ metaslab->q[slab_class].newer->q[slab_class].older = NULL;
+ new_slab = find_partial_slab(metaslab, slab_class);
+ } else {
+ // Can't find a partial slab; create a new slab.
+ new_slab = (slab_t *)get_block(get_meta_class(slab_class));
+ init_slab(new_slab, slab_class);
+ }
+ }
+ break;
+
+ case LARGE_SLAB_CASES:
+ case HUGE_SLAB_CASES:
+ // large or huge slab class
+ new_slab = PartialSlabQueue[slab_class].oldest;
+ if (new_slab == NULL) {
+ ASSERT(new_slab->older == NULL);
+ PartialSlabQueue[slab_class].newest = new_slab->newer;
+ new_slab->newer->older = NULL;
+ }
+ if (new_slab == NULL) {
+ if (IS_HUGE_SLAB_CLASS(slab_class)) {
+ new_slab = new_large_slab(slab_class);
+ } else {
+ ASSERT(IS_LARGE_SLAB_CLASS(slab_class));
+ new_slab = (slab_t *)get_page();
+ }
+ init_slab(new_slab, slab_class);
+ }
+ break;
+
+ default:
+ ASSERT(FALSE);
+ }
+
+ ActiveSlab[slab_class] = new_slab;
+}
+
+static void *get_block(class_t slab_class) {
+
+ // Look for a free block on the active slab.
+ switch (slab_class) {
+ case NESTED_SLAB_CASES:
+ int slab_index = ActiveSlab[slab_class].slab_index;
+ metaslab_t *metaslab = ActiveSlab[slab_class].metaslab;
+ if (metaslab != NULL) {
+ slab_t slab = metaslab->slab[slab_index];
+ if (slab.free_list) {
+ char *slab_base = metaslab->data + ( ( slab_index - 1 ) << SLAB_CLASS_SCALE(slab_class) );
+ void *b = (void *)( slab_base + ( ( slab.free_list - 1 ) << 3 ) );
+ metaslab->slab[slab_index].free_list = *(uint16_t *)b;
+ return b;
+ }
+ }
+ break;
+
+ case LARGE_SLAB_CASES:
+ //TODO
+ break;
+
+ case HUGE_SLAB_CASES:
+ //TODO
+ break;
+
+ default:
+ ASSERT(FALSE);
+ }
+
+ // Find another slab, activate it, and try again.
+ activate_new_slab(slab_class);
+ return get_block(slab_class); // recursive tail-call
+}
+
+void *nbd_malloc (size_t n) {
+ TRACE("m1", "nbd_malloc: size %llu", n, 0);
+ if (n == 0)
+ return NULL;
+
+ block_t *b = get_block( get_slab_class(n) );
+
+ TRACE("m1", "nbd_malloc: returning block %p", b, 0);
+ return b;
+}
+
+void nbd_free (void *x) {
+ TRACE("m1", "nbd_free: block %p", x, 0);
+ ASSERT(x);
+ ASSERT(x >= (void *)MemBase && x < (void *)MemEnd);
+
+ block_t *b = (block_t *)x;
+ size_t page_index = (size_t)b >> PAGE_SCALE;
+ metaslab_t *metaslab = PageMap[page_index];
+ ASSERT(metaslab);
+ size_t slab_index = ((size_t)b & MASK(PAGE_SCALE)) >> 12;
+ slab_t slab = metaslab->slab[slab_index];
+
+ // if <slab> is not valid <b> is on a larger slab.
+ if (slab.valid) {
+ b->next = slab.free_list;
+ // the <offset> of the block is offset by 1 so 0 can represent NULL.
+ slab.free_list = ( ((size_t)b & MASK(12)) >> 3 ) + 1;
+ } else {
+ // <b> is not on a 4kB slab.
+ slab_index &= 0x7; // Try the 32kB slab.
+ slab = metaslab->slab[slab_index];
+ if (slab.valid) {
+ b->next = slab.free_list;
+ slab.free_list = ( ((size_t)b & MASK(15)) >> 3 ) + 1;
+ } else {
+ // <b> is not on a 32kB slab.
+ slab_index &= 0x3F; // <b> must be on the 256kB slab.
+ slab = metaslab->slab[slab_index];
+ ASSERT(slab.valid);
+ b->next = slab.free_list;
+ slab.free_list = ( ((size_t)b & MASK(18)) >> 3 ) + 1;
+ }
+ }
+ --slab.num_in_use;
+ metaslab->slab[slab_index] = slab;
+ if (slab.num_in_use == 0) {
+ free_slab(metaslab, slab_index);
+ }
+}
+
+#else//USE_SYSTEM_MALLOC
+#include <stdlib.h>
+
+void mem_init (void) {
+ return;
+}
+
+void ndb_free (void *x) {
+ TRACE("m1", "nbd_free: %p", x, 0);
+#ifndef NDEBUG
+ memset(x, 0xcd, sizeof(void *)); // bear trap
+#endif//NDEBUG
+ free(x);
+ return;
+}
+
+void *nbd_malloc (size_t n) {
+ TRACE("m1", "nbd_malloc: request size %llu", n, 0);
+ void *x = malloc(n);
+ TRACE("m1", "nbd_malloc: returning %p", x, 0);
+ return x;
+}
+#endif//USE_SYSTEM_MALLOC
--- /dev/null
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+
+#define CACHE_LINE_SCALE 6
+
+// Return the expected fraction of bytes wasted per slab.
+//
+// The internal fragmentation due to using size classes is biased by including the space required,
+// for a pointer to each block.
+double calc_frag(int slab_size, int block_size, int delta)
+{
+ double quant = (double)delta / 2 / block_size;
+ assert(quant >= 0.0);
+ int blocks_per_slab = (int)(slab_size / block_size);
+
+ // internal fragmentation that comes from tiling non-power-of-2 sized blocks in slabs
+ int extra_space = slab_size - blocks_per_slab * block_size;
+ assert(extra_space < block_size);
+
+ // number of different cache line colors needed to evenly distribute cache line accesses
+ int num_colors = block_size >> CACHE_LINE_SCALE;
+ if (num_colors <= 1)
+ return (double)extra_space/slab_size + quant;
+
+ int num_overflow = num_colors - 1 - (extra_space >> CACHE_LINE_SCALE);
+ if (num_overflow <= 0)
+ return (double)extra_space/slab_size + quant;
+
+ double coloring = (double)num_overflow * block_size / num_colors;
+ return ((double)extra_space + coloring)/slab_size + quant;
+}
+
+// size classes for various alignments, max 6% expected internal fragmentation
+
+// 2B-128B blocks, 4k slab
+static uint8_t A1_4kB[] = { 2, 3, 5, 7, 9, 11, 14, 17, 20, 24, 28, 33, 39, 46, 53, 62, 70, 80, 91, 105, 120, 128 };
+static uint8_t A2_4kB[] = { 2, 4, 6, 8, 10, 14, 18, 22, 28, 34, 40, 48, 56, 66, 74, 84, 94, 104, 120, 128 };
+static uint8_t A4_4kB[] = { 4, 8, 12, 16, 20, 24, 32, 40, 48, 56, 68, 80, 92, 104, 120, 128 };
+static uint8_t A8_4kB[] = { 8, 16, 24, 32, 40, 48, 64, 80, 96, 112, 120, 128 };
+static uint8_t A16_4kB[] = { 16, 32, 48, 64, 80, 96, 112, 128 };
+
+// 128B-1kB blocks, 32k slab
+static uint16_t A1_32kB[] = { 137, 156, 178, 201, 227, 256, 288, 323, 361, 402, 447, 494, 545, 598, 654, 712, 771, 832, 895, 958, 1022 };
+static uint16_t A8_32kB[] = { 144, 168, 192, 224, 256, 296, 336, 376, 424, 472, 528, 584, 640, 704, 768, 832, 896, 960, 1024 };
+static uint16_t A16_32kB[] = { 144, 176, 208, 240, 272, 320, 368, 416, 464, 512, 576, 640, 704, 768, 832, 896, 960, 1024 };
+
+// 1kB-8kB blocks, 256k slab
+static uint16_t A1_256kB[] = { 1152, 1297, 1458, 1636, 1832, 2048, 2284, 2541, 2820, 3124, 3550, 3904, 4280, 4676, 5092, 5525, 5974, 6435, 6906, 7380, 7856 };
+static uint16_t A8_256kB[] = { 1152, 1288, 1440, 1608, 1792, 2000, 2224, 2472, 2744, 3032, 3344, 3680, 4040, 4416, 4816, 5232, 5664, 6112, 6568, 7032, 7504, 7976 };
+static uint16_t A64_256kB[] = { 1152, 1280, 1408, 1536, 1664, 1856, 2048, 2240, 2432, 2688, 2944, 3200, 3520, 3840, 4160, 4544, 4928, 5312, 5696, 6144, 6592, 7040, 7488, 7936 };
+
+// 8kB-100kB blocks, 2MB slab
+static uint32_t A64_2MB[] = {
+ 8896, 9984, 11200, 12544, 14016, 15616, 17408, 19328, 21440, 23744, 26176, 28800, 31616, 34624, 37760, 41024,
+ 44416, 47936, 51584, 55296, 59008, 62784, 66496, 70208, 73856, 77376, 80832, 84160, 87360, 90368, 93248, 95936,
+ 98496, 100864
+};
+
+int main (void) {
+
+ double x = 100864;
+ int n;
+ for (n = 0; n < 40 && x < (1 << 21); ++n) {
+ x *= 1.1;
+ x = (uint32_t)x & ~63;
+ printf("%u, ", (uint32_t)x);
+ }
+ printf("\n%d\n", n);
+ return 0;
+ const int start1 = 120832;
+ const int start2 = 1408;
+ const int alignment = 64;
+#define ischosen(x) \
+ (x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || \
+ x == 0 || x == 0 || x == 0 || x == 0 || x == 0 || x == 0)
+
+ const int slab_size = 1 << 21;
+ const double thresh = .06;
+ int block_size;
+ int i = 0;
+ for (block_size = start1; i < 87 && block_size < (slab_size >> 3); ++i, block_size += alignment) {
+ printf("%5d ", block_size);
+
+ int d;
+ double min = 1;
+ int ch = block_size + alignment;
+ for (d = block_size; d >= alignment; d-=alignment) {
+ int x = block_size - d;
+ if (ischosen(x)) {
+ double f = calc_frag(slab_size, block_size, d);
+ if (f < thresh && f < min) { min = f; ch = d; }
+ }
+ }
+
+ for (d = start2; d > start2 - 1024; d-=alignment) {
+ if (d <= block_size && d <= ch) {
+ double f = calc_frag(slab_size, block_size, d);
+ if (f < thresh) {
+ if (d == ch) {
+ printf(" *%3.1f%% ", f*100);
+ } else {
+ printf(" %4.1f%% ", f*100);
+ }
+ continue;
+ }
+ }
+ if (d-1 <= block_size && d-alignment <= ch && calc_frag(slab_size, block_size, d - alignment) < thresh) {
+ printf("%6d ", block_size);
+ continue;
+ }
+ printf(" ");
+ }
+
+ if (ischosen(block_size)) {
+ printf("%5d*", block_size);
+ } else {
+ printf("%5d", block_size);
+ }
+ printf("\n");
+ }
+ return 0;
+}
static int num_threads_ = 0;
static fifo_t *fifo_alloc(int scale) {
- fifo_t *q = (fifo_t *)nbd_malloc(sizeof(fifo_t) + (1 << scale) * sizeof(void *));
+ fifo_t *q = (fifo_t *)nbd_malloc(sizeof(fifo_t) + (1ULL << scale) * sizeof(void *));
memset(q, 0, sizeof(fifo_t));
q->scale = scale;
q->head = 0;
LOCALIZE_THREAD_LOCAL(tid_, int);
assert(tid_ < num_threads_);
int next_thread_id = (tid_ + 1) % num_threads_;
+ TRACE("r1", "rcu_update: updating thread %llu", next_thread_id, 0);
int i;
for (i = 0; i < num_threads_; ++i) {
if (i == tid_)
uint64_t x = rcu_[tid_][i];
rcu_[next_thread_id][i] = rcu_last_posted_[tid_][i] = x;
+ TRACE("r2", "rcu_update: posted updated value (%llu) for thread %llu", x, i);
}
// free
#include "hazard.h"
#define NUM_ITERATIONS 10000000
-#define MAX_NUM_THREADS 4
typedef struct node {
struct node *next;
int main (int argc, char **argv) {
//lwt_set_trace_level("m0r0");
- int num_threads = 2;
+ int num_threads = MAX_NUM_THREADS;
if (argc == 2)
{
errno = 0;
return -1;
}
- num_threads_ = 2;
+ num_threads_ = MAX_NUM_THREADS;
if (argc == 2)
{
errno = 0;
map_t *map = wd->map;
CuTest* tc = wd->tc;
int d = wd->id;
- int iters = 10000;
+ int iters = (map_type_ == &MAP_IMPL_LL ? 10000 : 100000);
(void)SYNC_ADD(wd->wait, -1);
do { } while (*wd->wait); // wait for all workers to be ready
pthread_t thread[2];
worker_data_t wd[2];
- static const int num_threads = 2;
- volatile int wait = num_threads;
+ volatile int wait = 2;
#ifdef TEST_STRING_KEYS
map_t *map = map_alloc(map_type_, &DATATYPE_NSTRING);
#else
// In 2 threads, add & remove even & odd elements concurrently
int i;
- for (i = 0; i < num_threads; ++i) {
+ for (i = 0; i < 2; ++i) {
wd[i].id = i;
wd[i].tc = tc;
wd[i].map = map;
if (rc != 0) { perror("nbd_thread_create"); return; }
}
- for (i = 0; i < num_threads; ++i) {
+ for (i = 0; i < 2; ++i) {
pthread_join(thread[i], NULL);
}
gettimeofday(&tv2, NULL);
int ms = (int)(1000000*(tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec - tv1.tv_usec) / 1000;
map_print(map);
- printf("Th:%d Time:%dms\n", num_threads, ms);
+ printf("Time:%dms\n", ms);
fflush(stdout);
// In the end, all members should be removed
}
int main (void) {
- lwt_set_trace_level("H3m3l2t0");
+ lwt_set_trace_level("r0m3l2t0");
static const map_impl_t *map_types[] = { &MAP_IMPL_LL, &MAP_IMPL_SL, &MAP_IMPL_HT };
for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) {
CuSuite* suite = CuSuiteNew();
SUITE_ADD_TEST(suite, concurrent_add_remove_test);
- SUITE_ADD_TEST(suite, basic_test);
- SUITE_ADD_TEST(suite, basic_iteration_test);
- SUITE_ADD_TEST(suite, big_iteration_test);
+// SUITE_ADD_TEST(suite, basic_test);
+// SUITE_ADD_TEST(suite, basic_iteration_test);
+// SUITE_ADD_TEST(suite, big_iteration_test);
CuSuiteRun(suite);
CuSuiteDetails(suite, output);
static volatile int wait_;
static volatile int stop_;
-static long num_threads_;
+static int num_threads_;
+static int duration_;
static map_t *map_;
static int get_range_;
static int put_range_;
-static int num_keys_;
+static size_t num_keys_;
static map_key_t *keys_ = NULL;
-static uint64_t times_[MAX_NUM_THREADS] = {};
static int ops_[MAX_NUM_THREADS] = {};
+#define FOO (1ULL << 20)
+
void *worker (void *arg) {
int tid = (int)(size_t)arg;
uint64_t s = nbd_rand_seed(tid);
(void)SYNC_ADD(&wait_, -1);
do {} while (wait_);
- uint64_t t1 = rdtsc();
-
while (!stop_) {
- int r = nbd_next_rand(&s);
- int x = r & ( (1 << 20) - 1 );
- int i = nbd_next_rand(&s) & (num_keys_ - 1);
- map_key_t key = keys_[i];
+ map_key_t key = keys_[ nbd_next_rand(&s) & (num_keys_ - 1) ];
+ uint32_t x = nbd_next_rand(&s) & (FOO - 1);
if (x < get_range_) {
- map_val_t val = map_get(map_, key);
+#ifndef NDEBUG
+ map_val_t val =
+#endif
+ map_get(map_, key);
#ifdef TEST_STRING_KEYS
ASSERT(val == DOES_NOT_EXIST || ns_cmp((nstring_t *)key, (nstring_t *)val) == 0);
#else
rcu_update();
}
- times_[tid] = rdtsc() - t1;
ops_[tid] = get_ops + put_ops + del_ops;
return NULL;
}
-void run_test (void) {
+int run_test (void) {
+ int ops;
wait_ = num_threads_ + 1;
// Quicky sanity check
do { /* nothing */ } while (wait_ != 1);
wait_ = 0;
- sleep(2);
+ sleep(duration_);
stop_ = 1;
for (int i = 0; i < num_threads_; ++i) {
pthread_join(thread[i], NULL);
}
+ ops = 0;
+ for (int i = 0; i < num_threads_; ++i) {
+ ops += ops_[i];
+ }
+ return ops;
}
int main (int argc, char **argv) {
char* program_name = argv[0];
- if (argc > 2) {
+ if (argc > 3) {
fprintf(stderr, "Usage: %s num_threads\n", program_name);
return -1;
}
num_threads_ = 2;
- if (argc == 2)
+ if (argc > 1)
{
errno = 0;
num_threads_ = strtol(argv[1], NULL, 10);
}
}
+ int table_scale = 12;
+ if (argc > 2) {
+ table_scale = strtol(argv[2], NULL, 10);
+ if (errno) {
+ fprintf(stderr, "%s: Invalid argument for the scale of the collection\n", program_name);
+ return -1;
+ }
+ table_scale = strtol(argv[2], NULL, 10);
+ if (table_scale < 0 || table_scale > 31) {
+ fprintf(stderr, "%s: The scale of the collection must be between 0 and 31\n", program_name);
+ return -1;
+ }
+ }
+
- int table_scale = 10;
- int read_ratio = 95;
- get_range_ = (read_ratio << 20) / 100;
- put_range_ = (((1 << 20) - get_range_) >> 1) + get_range_;
+ int read_ratio = 90;
+ int put_ratio = 50;
+ get_range_ = (int)((double)FOO / 100 * read_ratio);
+ put_range_ = get_range_ + (int)(((double)FOO - get_range_) / 100 * put_ratio);
- static const map_impl_t *map_types[] = { &MAP_IMPL_HT };
+ static const map_impl_t *map_types[] = { &MAP_IMPL_SL };
for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) {
#ifdef TEST_STRING_KEYS
map_ = map_alloc(map_types[i], &DATATYPE_NSTRING);
#endif
// Do some warmup
- num_keys_ = 1 << table_scale;
+ num_keys_ = 1ULL << table_scale;
keys_ = nbd_malloc(sizeof(map_key_t) * num_keys_);
- for (int j = 0; j < num_keys_; ++j) {
+ ASSERT(keys_ != NULL);
+ for (uint64_t j = 0; j < num_keys_; ++j) {
#ifdef TEST_STRING_KEYS
char tmp[64];
snprintf(tmp, sizeof(tmp), "%dabc%d", j, j*17+123);
#endif
}
- struct timeval tv1, tv2;
- gettimeofday(&tv1, NULL);
-
+ duration_ = 10;
int num_trials = 1;
+ int ops = 0;
for (int i = 0; i < num_trials; ++i) {
- run_test();
+ ops += run_test();
}
+ double ops_per_sec = ops / num_trials / duration_;
- gettimeofday(&tv2, NULL);
- int ms = (int)(1000000*(tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec - tv1.tv_usec) / 1000;
- map_print(map_);
- printf("Th:%ld Time:%dms\n\n", num_threads_, ms);
+ //map_print(map_);
+ printf("Threads:%-2d Size:2^%-2d Mops/Sec:%-4.3g per-thread:%-4.3g\n\n",
+ num_threads_, table_scale, ops_per_sec/1000000, ops_per_sec/num_threads_/1000000);
fflush(stdout);
map_free(map_);
int main (int argc, char **argv) {
lwt_set_trace_level("m3r3");
- int num_threads = 2;
+ int num_threads = MAX_NUM_THREADS;
if (argc == 2)
{
errno = 0;