From 7d658a03f83e64690d0c71b4733dd3f9a2c60208 Mon Sep 17 00:00:00 2001 From: jdybnis Date: Mon, 9 Feb 2009 04:46:50 +0000 Subject: [PATCH] add port of perf test from high-scale-lib some code cleanup --- include/common.h | 17 +- include/hashtable.h | 2 +- include/list.h | 2 +- include/runtime.h | 2 + include/skiplist.h | 2 +- makefile | 18 ++- map/hashtable.c | 8 +- map/list.c | 4 +- runtime/mem.c | 383 +++++++++++++++++++++++++++++--------------- runtime/runtime.c | 14 ++ test/haz_test.c | 8 +- test/map_test1.c | 2 +- test/map_test2.c | 2 +- test/perf_test.c | 180 +++++++++++++++++++++ test/rcu_test.c | 10 +- test/txn_test.c | 2 +- todo | 15 +- txn/txn.c | 11 +- 18 files changed, 506 insertions(+), 176 deletions(-) create mode 100644 test/perf_test.c diff --git a/include/common.h b/include/common.h index 8988ed0..2931c4d 100644 --- a/include/common.h +++ b/include/common.h @@ -11,12 +11,13 @@ #include #include -#define MAX_NUM_THREADS 4 // make this whatever you want, but make it a power of 2 +#define MAX_NUM_THREADS 4 // make this whatever you want, but make it a power of 2 -#define CACHE_LINE_SIZE 64 +#define CACHE_LINE_SIZE 64 // 64 byte cache line on x86 and x86-64 +#define CACHE_LINE_SCALE 6 // log base 2 of the cache line size -#define EXPECT_TRUE(x) __builtin_expect(x, 1) -#define EXPECT_FALSE(x) __builtin_expect(x, 0) +#define EXPECT_TRUE(x) __builtin_expect(!!(x), 1) +#define EXPECT_FALSE(x) __builtin_expect(!!(x), 0) #define SYNC_SWAP __sync_lock_test_and_set #define SYNC_CAS __sync_val_compare_and_swap @@ -45,7 +46,7 @@ #define ERROR_UNSUPPORTED_FEATURE (-3) #define ERROR_TXN_NOT_RUNNING (-4) -#define VOLATILE(x) *((volatile typeof(x) *)&x) +#define VOLATILE_DEREF(x) (*((volatile typeof(x))(x))) typedef unsigned long long uint64_t; typedef unsigned int uint32_t; @@ -53,5 +54,11 @@ typedef unsigned char uint8_t; typedef size_t markable_t; +static inline uint64_t rdtsc (void) { + unsigned l, u; + __asm__ __volatile__("rdtsc" : "=a" (l), "=d" (u)); + return ((uint64_t)u << 32) | l; +} + #include "lwt.h" #endif //COMMON_H diff --git a/include/hashtable.h b/include/hashtable.h index 3fb5d6c..d0d12ae 100644 --- a/include/hashtable.h +++ b/include/hashtable.h @@ -18,7 +18,7 @@ ht_iter_t * ht_iter_begin (hashtable_t *ht, map_key_t key); map_val_t ht_iter_next (ht_iter_t *iter, map_key_t *key_ptr); void ht_iter_free (ht_iter_t *iter); -static const map_impl_t ht_map_impl = { +static const map_impl_t MAP_IMPL_HT = { (map_alloc_t)ht_alloc, (map_cas_t)ht_cas, (map_get_t)ht_get, (map_remove_t)ht_remove, (map_count_t)ht_count, (map_print_t)ht_print, (map_free_t)ht_free, (map_iter_begin_t)ht_iter_begin, (map_iter_next_t)ht_iter_next, (map_iter_free_t)ht_iter_free diff --git a/include/list.h b/include/list.h index 0274066..a246bc2 100644 --- a/include/list.h +++ b/include/list.h @@ -19,7 +19,7 @@ ll_iter_t * ll_iter_begin (list_t *ll, map_key_t key); map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr); void ll_iter_free (ll_iter_t *iter); -static const map_impl_t ll_map_impl = { +static const map_impl_t MAP_IMPL_LL = { (map_alloc_t)ll_alloc, (map_cas_t)ll_cas, (map_get_t)ll_lookup, (map_remove_t)ll_remove, (map_count_t)ll_count, (map_print_t)ll_print, (map_free_t)ll_free, (map_iter_begin_t)ll_iter_begin, (map_iter_next_t)ll_iter_next, (map_iter_free_t)ll_iter_free diff --git a/include/runtime.h b/include/runtime.h index c9db783..ff1d245 100644 --- a/include/runtime.h +++ b/include/runtime.h @@ -12,5 +12,7 @@ extern DECLARE_THREAD_LOCAL(tid_, int); int nbd_thread_create (pthread_t *restrict thread, int thread_id, void *(*start_routine)(void *), void *restrict arg); int nbd_rand (void); +uint64_t nbd_rand_seed (int i); +int nbd_next_rand (uint64_t *r); #endif//RUNTIME_H diff --git a/include/skiplist.h b/include/skiplist.h index 484c64c..4377a94 100644 --- a/include/skiplist.h +++ b/include/skiplist.h @@ -19,7 +19,7 @@ sl_iter_t * sl_iter_begin (skiplist_t *sl, map_key_t key); map_val_t sl_iter_next (sl_iter_t *iter, map_key_t *key_ptr); void sl_iter_free (sl_iter_t *iter); -static const map_impl_t sl_map_impl = { +static const map_impl_t MAP_IMPL_SL = { (map_alloc_t)sl_alloc, (map_cas_t)sl_cas, (map_get_t)sl_lookup, (map_remove_t)sl_remove, (map_count_t)sl_count, (map_print_t)sl_print, (map_free_t)sl_free, (map_iter_begin_t)sl_iter_begin, (map_iter_next_t)sl_iter_next, (map_iter_free_t)sl_iter_free diff --git a/makefile b/makefile index 8d51bb3..dbb0b8d 100644 --- a/makefile +++ b/makefile @@ -5,13 +5,14 @@ # Makefile for building programs with whole-program interfile optimization ################################################################################################### CFLAGS0 := -Wall -Werror -std=gnu99 -lpthread #-m32 -DNBD32 -CFLAGS1 := $(CFLAGS0) -g -O3 #-DNDEBUG #-fwhole-program -combine +CFLAGS1 := $(CFLAGS0) -g #-O3 #-DNDEBUG #-fwhole-program -combine CFLAGS2 := $(CFLAGS1) #-DENABLE_TRACE CFLAGS3 := $(CFLAGS2) #-DLIST_USE_HAZARD_POINTER CFLAGS := $(CFLAGS3) #-DUSE_SYSTEM_MALLOC #-DTEST_STRING_KEYS INCS := $(addprefix -I, include) -TESTS := output/map_test2 output/map_test1 output/txn_test output/rcu_test output/haz_test -EXES := $(TESTS) +TESTS := output/perf_test output/map_test2 output/map_test1 output/txn_test \ + output/rcu_test output/haz_test +OBJS := $(TESTS) RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c \ runtime/hazard.c @@ -22,6 +23,7 @@ rcu_test_SRCS := $(RUNTIME_SRCS) test/rcu_test.c txn_test_SRCS := $(RUNTIME_SRCS) $(MAP_SRCS) test/txn_test.c test/CuTest.c txn/txn.c map_test1_SRCS := $(RUNTIME_SRCS) $(MAP_SRCS) test/map_test1.c map_test2_SRCS := $(RUNTIME_SRCS) $(MAP_SRCS) test/map_test2.c test/CuTest.c +perf_test_SRCS := $(RUNTIME_SRCS) $(MAP_SRCS) test/perf_test.c tests: $(TESTS) @@ -44,13 +46,13 @@ $(addsuffix .log, $(TESTS)) : %.log : % # Also, when calculating dependencies -combine is removed from CFLAGS because of another bug # in gcc. It chokes when -MM is used with -combine. ################################################################################################### -$(EXES): output/% : output/%.d makefile +$(OBJS): output/% : output/%.d makefile gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > $@.d gcc $(CFLAGS) $(INCS) -o $@ $($*_SRCS) -asm: $(addsuffix .s, $(EXES)) +asm: $(addsuffix .s, $(OBJS)) -$(addsuffix .s, $(EXES)): output/%.s : output/%.d makefile +$(addsuffix .s, $(OBJS)): output/%.s : output/%.d makefile gcc $(CFLAGS:-combine:) $(INCS) -MM -MT $@ $($*_SRCS) > output/$*.d gcc $(CFLAGS) $(INCS) -combine -S -o $@.temp $($*_SRCS) grep -v "^L[BFM]\|^LCF" $@.temp > $@ @@ -71,8 +73,8 @@ clean: ################################################################################################### # dummy rule for boostrapping dependency files ################################################################################################### -$(addsuffix .d, $(EXES)) : output/%.d : +$(addsuffix .d, $(OBJS)) : output/%.d : --include $(addsuffix .d, $(EXES)) +-include $(addsuffix .d, $(OBJS)) .PHONY: clean test tags asm diff --git a/map/hashtable.c b/map/hashtable.c index b8d8f6b..34d04d5 100644 --- a/map/hashtable.c +++ b/map/hashtable.c @@ -374,7 +374,7 @@ static map_val_t hti_cas (hti_t *hti, map_key_t key, uint32_t key_hash, map_val_ map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1)) { - int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); + int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next); if (did_copy) { (void)SYNC_ADD(&hti->num_entries_copied, 1); } @@ -429,7 +429,7 @@ static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) { // searching the table. In that case, if a copy is in progress the key // might exist in the copy. if (EXPECT_FALSE(ent == NULL)) { - if (((volatile hti_t *)hti)->next != NULL) + if (VOLATILE_DEREF(hti).next != NULL) return hti_get(hti->next, key, key_hash); // recursive tail-call return DOES_NOT_EXIST; } @@ -441,12 +441,12 @@ static map_val_t hti_get (hti_t *hti, map_key_t key, uint32_t key_hash) { map_val_t ent_val = ent->val; if (EXPECT_FALSE(IS_TAGGED(ent_val, TAG1))) { if (EXPECT_FALSE(ent_val != COPIED_VALUE && ent_val != TAG_VALUE(TOMBSTONE, TAG1))) { - int did_copy = hti_copy_entry(hti, ent, key_hash, ((volatile hti_t *)hti)->next); + int did_copy = hti_copy_entry(hti, ent, key_hash, VOLATILE_DEREF(hti).next); if (did_copy) { (void)SYNC_ADD(&hti->num_entries_copied, 1); } } - return hti_get(((volatile hti_t *)hti)->next, key, key_hash); // tail-call + return hti_get(VOLATILE_DEREF(hti).next, key, key_hash); // tail-call } return (ent_val == TOMBSTONE) ? DOES_NOT_EXIST : ent_val; diff --git a/map/list.c b/map/list.c index bc191cd..3b17873 100644 --- a/map/list.c +++ b/map/list.c @@ -298,7 +298,7 @@ map_val_t ll_remove (list_t *ll, map_key_t key) { } } while (next != old_next); TRACE("l2", "ll_remove: logically removed item %p", item, 0); - ASSERT(HAS_MARK(((volatile node_t *)item)->next)); + ASSERT(HAS_MARK(VOLATILE_DEREF(item).next)); // Atomically swap out the item's value in case another thread is updating the item while we are // removing it. This establishes which operation occurs first logically, the update or the remove. @@ -377,7 +377,7 @@ map_val_t ll_iter_next (ll_iter_t *iter, map_key_t *key_ptr) { do { item = iter->pred->next; haz_set(hp0, STRIP_MARK(item)); - } while (item != ((volatile node_t *)iter->pred)->next); + } while (item != VOLATILE_DEREF(iter->pred).next); #endif//LIST_USE_HAZARD_POINTER iter->pred = STRIP_MARK(item); if (iter->pred == NULL) diff --git a/runtime/mem.c b/runtime/mem.c index 57d94aa..7ffda43 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -4,215 +4,346 @@ * * Extreamly fast multi-threaded malloc. */ +#ifndef USE_SYSTEM_MALLOC #define _BSD_SOURCE // so we get MAP_ANON on linux -#include +#include #include #include +#include #include "common.h" #include "rlocal.h" #include "lwt.h" -#define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of , rounded up +#define RECYCLE_PAGES + +#define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger) #ifndef NBD32 +#define MIN_SCALE 3 // smallest allocated block is 8 bytes #define MAX_POINTER_BITS 48 -#define REGION_SCALE 21 // 2mb regions +#define PAGE_SCALE 21 // 2MB pages #else +#define MIN_SCALE 2 // smallest allocated block is 4 bytes #define MAX_POINTER_BITS 32 -#define REGION_SCALE 12 // 4kb regions +#define PAGE_SCALE 12 // 4KB pages #endif -#define REGION_SIZE (1 << REGION_SCALE) -#define HEADER_REGION_SCALE ((MAX_POINTER_BITS - REGION_SCALE) + GET_SCALE(sizeof(header_t))) -#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger) +#define PAGE_SIZE (1 << PAGE_SCALE) +#define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t)) typedef struct block { struct block *next; } block_t; -// region header +// TODO: Break the page header into two parts. The first part is located in the header region. The +// second part is located on the page and is only used when there are free items. typedef struct header { +#ifdef RECYCLE_PAGES + struct header *next; + struct header *prev; + block_t *free_list; // list of free blocks + int num_in_use; +#endif//RECYCLE_PAGES uint8_t owner; // thread id of owner uint8_t scale; // log2 of the block size } header_t; +#ifdef RECYCLE_PAGES +typedef struct size_class { + header_t *active_page; + header_t *oldest_partial; + header_t *newest_partial; +} size_class_t; +#endif//RECYCLE_PAGES + typedef struct tl { - block_t *free_blocks[MAX_SCALE+1]; +#ifndef RECYCLE_PAGES + block_t *free_list[MAX_SCALE+1]; +#else + header_t *free_pages; + size_class_t size_class[MAX_SCALE+1]; +#endif//RECYCLE_PAGES block_t *blocks_from[MAX_NUM_THREADS]; block_t *blocks_to[MAX_NUM_THREADS]; -} __attribute__((aligned(CACHE_LINE_SIZE))) tl_t ; +} __attribute__((aligned(CACHE_LINE_SIZE))) tl_t; static header_t *headers_ = NULL; static tl_t tl_[MAX_NUM_THREADS] = {}; static inline header_t *get_header (void *r) { - return headers_ + ((size_t)r >> REGION_SCALE); + ASSERT(((size_t)r >> PAGE_SCALE) < HEADERS_SIZE); + return headers_ + ((size_t)r >> PAGE_SCALE); } static void *get_new_region (int block_scale) { - size_t sz = (1 << block_scale); - if (sz < REGION_SIZE) { - sz = REGION_SIZE; + LOCALIZE_THREAD_LOCAL(tid_, int); +#ifdef RECYCLE_PAGES + tl_t *tl = &tl_[tid_]; // thread-local data + if (block_scale <= PAGE_SCALE && tl->free_pages != NULL) { + void *region = tl->free_pages; + tl->free_pages = tl->free_pages->next; + get_header(region)->scale = block_scale; + return region; + } +#endif//RECYCLE_PAGES + size_t region_size = (1 << block_scale); + if (region_size < PAGE_SIZE) { + region_size = PAGE_SIZE; } - void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); - TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, sz); + void *region = mmap(NULL, region_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, region_size); if (region == (void *)-1) { perror("get_new_region: mmap"); exit(-1); } - if ((size_t)region & (sz - 1)) { + if ((size_t)region & (region_size - 1)) { TRACE("m0", "get_new_region: region not aligned", 0, 0); - munmap(region, sz); - region = mmap(NULL, sz * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + munmap(region, region_size); + region = mmap(NULL, region_size * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if (region == (void *)-1) { perror("get_new_region: mmap"); exit(-1); } - TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, sz * 2); - void *aligned = (void *)(((size_t)region + sz) & ~(sz - 1)); + TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, region_size * 2); + void *aligned = (void *)(((size_t)region + region_size) & ~(region_size - 1)); size_t extra = (char *)aligned - (char *)region; if (extra) { munmap(region, extra); TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra); } - extra = ((char *)region + sz) - (char *)aligned; + extra = ((char *)region + region_size) - (char *)aligned; if (extra) { - munmap((char *)aligned + sz, extra); - TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + sz, extra); + munmap((char *)aligned + region_size, extra); + TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + region_size, extra); } region = aligned; } assert(region); - if (headers_ != NULL) { - LOCALIZE_THREAD_LOCAL(tid_, int); - header_t *h = get_header(region); - TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_); - - assert(h->scale == 0); - h->scale = block_scale; - h->owner = tid_; - } + + header_t *h = get_header(region); + TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_); + assert(h->scale == 0); + h->scale = block_scale; + h->owner = tid_; return region; } void mem_init (void) { -#ifdef USE_SYSTEM_MALLOC - return; -#endif assert(headers_ == NULL); - // Allocate a region for the region headers. This could be a big chunk of memory (256MB) on 64 bit systems, - // but it just takes up virtual address space. Physical address space used by the headers is still proportional - // to the amount of memory we alloc. - headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE); - TRACE("m1", "mem_init: header region %p", headers_, 0); - memset(headers_, 0, (1 << HEADER_REGION_SCALE)); + // Allocate space for the page headers. This could be a big chunk of memory on 64 bit systems, + // but it just takes up virtual address space. Physical space used by the headers is still + // proportional to the amount of memory the user mallocs. + headers_ = (header_t *)malloc(HEADERS_SIZE); + TRACE("m1", "mem_init: header page %p", headers_, 0); + memset(headers_, 0, HEADERS_SIZE); + + // initialize spsc queues + for (int i = 0; i < MAX_NUM_THREADS; ++i) { + for (int j = 0; j < MAX_NUM_THREADS; ++j) { + if (i != j) { + tl_[i].blocks_to[j] = (block_t *)&(tl_[j].blocks_from[i]); + } + } + } } -// Put onto its owner's public free list (in the appropriate size bin). -// -// TODO: maybe we want to munmap() larger size blocks? void nbd_free (void *x) { -#ifdef USE_SYSTEM_MALLOC - TRACE("m1", "nbd_free: %p", x, 0); -#ifndef NDEBUG - //memset(x, 0xcd, sizeof(void *)); // bear trap -#endif//NDEBUG - free(x); - return; -#endif//USE_SYSTEM_MALLOC - TRACE("m1", "nbd_free: block %p region %p", x, (size_t)x & ~MASK(REGION_SCALE)); - - assert(x); + TRACE("m1", "nbd_free: block %p page %p", x, (size_t)x & ~MASK(PAGE_SCALE)); + ASSERT(x); LOCALIZE_THREAD_LOCAL(tid_, int); block_t *b = (block_t *)x; header_t *h = get_header(x); - TRACE("m1", "nbd_free: header %p scale %llu", h, h->scale); - assert(h->scale && h->scale <= MAX_SCALE); + int b_scale = h->scale; + TRACE("m1", "nbd_free: header %p scale %llu", h, b_scale); + ASSERT(b_scale && b_scale <= MAX_SCALE); +#ifdef RECYCLE_PAGES + if (b_scale > PAGE_SCALE) { + int rc = munmap(x, 1 << b_scale); + ASSERT(rc == 0); + rc = rc; + } +#endif #ifndef NDEBUG - memset(b, 0xcd, (1 << h->scale)); // bear trap + memset(b, 0xcd, (1 << b_scale)); // bear trap #endif tl_t *tl = &tl_[tid_]; // thread-local data if (h->owner == tid_) { - TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_blocks[h->scale], 0); - b->next = tl->free_blocks[h->scale]; - tl->free_blocks[h->scale] = b; + TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_list[b_scale], 0); + +#ifndef RECYCLE_PAGES + b->next = tl->free_list[b_scale]; + tl->free_list[b_scale] = b; +#else //RECYCLE_PAGES + b->next = h->free_list; + h->free_list = b; + h->num_in_use--; + size_class_t *sc = &tl->size_class[b_scale]; + if (sc->active_page != h) { + if (h->num_in_use == 0) { + // remove from the partial-page list + if (h->next != NULL) { h->next->prev = h->prev; } + if (h->prev != NULL) { h->prev->next = h->next; } + // put on the free-page list + h->next = tl->free_pages; + tl->free_pages = h; + } else { + // move to the top of the partial-page list + if (h->next != NULL) { + h->next->prev = h->prev; + if (h->prev != NULL) { h->prev->next = h->next; } + h->prev = sc->newest_partial; + h->next = NULL; + sc->newest_partial = h; + } + } + } +#endif//RECYCLE_PAGES } else { - TRACE("m1", "nbd_free: owner %llu", h->owner, 0); // push onto it's owner's queue - VOLATILE(b->next) = NULL; - if (EXPECT_FALSE(tl->blocks_to[h->owner] == NULL)) { - VOLATILE(tl_[h->owner].blocks_from[tid_]) = b; - } else { - VOLATILE(tl->blocks_to[h->owner]->next) = b; + int b_owner = h->owner; + TRACE("m1", "nbd_free: owner %llu", b_owner, 0); + + // The assignment statements are volatile to prevent the compiler from reordering them. + VOLATILE_DEREF(b).next = NULL; + VOLATILE_DEREF(tl->blocks_to[b_owner]).next = b; + + tl->blocks_to[b_owner] = b; + } +} + +static inline void process_incoming_blocks (tl_t *tl) { + for (int p = 0; p < MAX_NUM_THREADS; ++p) { + block_t *b = tl->blocks_from[p]; + if (EXPECT_FALSE(b == NULL)) continue; // the queue is completely empty + + // Leave the last block on the queue. Removing the last block on the queue would create a + // race with the producer thread putting a new block on the queue. + for (block_t *next = b->next; next != NULL; b = next, next = b->next) { + // push onto the appropriate free list +#ifndef RECYCLE_PAGES + int b_scale = get_header(b)->scale; + b->next = tl->free_list[b_scale]; + tl->free_list[b_scale] = b; +#else //RECYCLE_PAGES + header_t *h = get_header(b); + b->next = h->free_list; + h->free_list = b; +#endif//RECYCLE_PAGES } - tl->blocks_to[h->owner] = b; + tl->blocks_from[p] = b; } } -// Allocate a block of memory at least size . Blocks are binned in powers-of-two. Round up -// to the nearest power-of-two. +static inline block_t *pop_free_list (tl_t *tl, int scale) { +#ifndef RECYCLE_PAGES + block_t **free_list = &tl->free_list[scale]; +#else //RECYCLE_PAGES + size_class_t *sc = &tl->size_class[scale]; + if (EXPECT_FALSE(sc->active_page == NULL)) + return NULL; + block_t **free_list = &sc->active_page->free_list; +#endif//RECYCLE_PAGES + block_t *b = *free_list; + if (EXPECT_FALSE(b == NULL)) + return NULL; + ASSERT(get_header(b)->scale == scale); + *free_list = b->next; + return b; +} + +// Allocate a block of memory at least size . Blocks are binned in powers-of-two. Round up to +// the nearest power of two. // -// First check the current thread's private free list for an available block. If no blocks are on -// the private free list, pull blocks off of the current thread's public free lists and put them -// on the private free list. If we didn't find any blocks on the public free lists, allocate a new -// region, break it up into blocks and put them on the private free list. +// First check the current thread's free list for an available block. If there are no blocks on the +// free list, pull items off of the current thread's incoming block queues and push them onto the +// free list. If we didn't get an appropriate size block off of the block queues then allocate a new +// page, break it up into blocks and push them onto the free list. void *nbd_malloc (size_t n) { -#ifdef USE_SYSTEM_MALLOC - TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, GET_SCALE(n)); - void *x = malloc(n); - TRACE("m1", "nbd_malloc: returning %p", x, 0); - return x; -#endif - if (EXPECT_FALSE(n == 0)) - return NULL; - if (n < sizeof(block_t)) { - n = sizeof(block_t); - } - int b_scale = GET_SCALE(n); - assert(b_scale >= 2); - assert(b_scale <= MAX_SCALE); - TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale); + // the scale is the log base 2 of , rounded up + int b_scale = (sizeof(void *) * __CHAR_BIT__) - __builtin_clzl((n) - 1); + TRACE("m1", "nbd_malloc: size %llu (scale %llu)", n, b_scale); + + if (EXPECT_FALSE(b_scale < MIN_SCALE)) { b_scale = MIN_SCALE; } + if (EXPECT_FALSE(b_scale > MAX_SCALE)) { return NULL; } + LOCALIZE_THREAD_LOCAL(tid_, int); tl_t *tl = &tl_[tid_]; // thread-local data - // If our private free list is empty, try to find blocks on our public free list. If that fails, - // allocate a new region. - if (EXPECT_FALSE(tl->free_blocks[b_scale] == NULL)) { - for (int i = 0; i < MAX_NUM_THREADS; ++ i) { - block_t *x = tl->blocks_from[i]; - if (x != NULL) { - block_t *next = x->next; - if (next != NULL) { - do { - header_t *h = get_header(x); - x->next = tl->free_blocks[h->scale]; - tl->free_blocks[h->scale] = x; - x = next; - next = x->next; - } while (next != NULL); - tl->blocks_from[i] = x; - } - } - } - // allocate a new region - if (tl->free_blocks[b_scale] == NULL) { - char *region = get_new_region(b_scale); - size_t b_size = 1 << b_scale; - size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size; - for (int i = region_size; i != 0; i -= b_size) { - block_t *b = (block_t *)(region + i - b_size); - b->next = tl->free_blocks[b_scale]; - tl->free_blocks[b_scale] = b; - } + block_t *b = pop_free_list(tl, b_scale); + if (b != NULL) { + TRACE("m1", "nbd_malloc: returning block %p", b, 0); + return b; + } + + // The free list is empty so process blocks freed from other threads and then check again. + process_incoming_blocks(tl); + b = pop_free_list(tl, b_scale); + if (b != NULL) { + TRACE("m1", "nbd_malloc: returning block %p", b, 0); + return b; + } + +#ifdef RECYCLE_PAGES + // The current active page is completely allocated. Make the oldest partially allocated page + // the new active page. + size_class_t *sc = &tl->size_class[b_scale]; + if (sc->oldest_partial != NULL) { + sc->active_page = sc->oldest_partial; + sc->oldest_partial = sc->oldest_partial->next; + sc->oldest_partial->prev = NULL; + b = pop_free_list(tl, b_scale); + ASSERT(b != NULL); + TRACE("m1", "nbd_malloc: returning block %p", b, 0); + return b; + } + // There are no partially allocated pages so get a new page. + +#endif//RECYCLE_PAGES + + // Get a new page. + char *page = get_new_region(b_scale); + b = (block_t *)page; // grab the first block on the page + + // Break up the remainder of the page into blocks and put them on the free list. Start at the + // end of the page so that the free list ends up in increasing order, for ease of debugging. + if (b_scale < PAGE_SCALE) { + size_t block_size = (1 << b_scale); + block_t *head = NULL; + for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) { + block_t *x = (block_t *)(page + offset); + x->next = head; head = x; } - assert(tl->free_blocks[b_scale] != NULL); +#ifndef RECYCLE_PAGES + tl->free_list[b_scale] = head; +#else //RECYCLE_PAGES + sc->active_page = get_header(page); + sc->active_page->free_list = head; +#endif//RECYCLE_PAGES } - // Pull a block off of our private free list. - block_t *b = tl->free_blocks[b_scale]; - TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE)); - ASSERT(b); - ASSERT(get_header(b)->scale == b_scale); - tl->free_blocks[b_scale] = b->next; + TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE)); return b; } +#else//USE_SYSTEM_MALLOC +#include + +void mem_init (void) { + return; +} + +void ndb_free (void *x) { + TRACE("m1", "nbd_free: %p", x, 0); +#ifndef NDEBUG + memset(x, 0xcd, sizeof(void *)); // bear trap +#endif//NDEBUG + free(x); + return; +} + +void *nbd_malloc (size_t n) { + TRACE("m1", "nbd_malloc: request size %llu", n, 0); + void *x = malloc(n); + TRACE("m1", "nbd_malloc: returning %p", x, 0); + return x; +} +#endif//USE_SYSTEM_MALLOC diff --git a/runtime/runtime.c b/runtime/runtime.c index ceb6772..415a161 100644 --- a/runtime/runtime.c +++ b/runtime/runtime.c @@ -34,7 +34,11 @@ static void *worker (void *arg) { thread_info_t *ti = (thread_info_t *)arg; SET_THREAD_LOCAL(tid_, ti->thread_id); LOCALIZE_THREAD_LOCAL(tid_, int); +#ifndef NDEBUG SET_THREAD_LOCAL(rand_seed_, tid_+1); +#else + SET_THREAD_LOCAL(rand_seed_, nbd_rand_seed(tid_+1)); +#endif lwt_thread_init(ti->thread_id); rcu_thread_init(ti->thread_id); void *ret = ti->start_routine(ti->arg); @@ -56,3 +60,13 @@ int nbd_rand (void) { SET_THREAD_LOCAL(rand_seed_, r); return r; } + +uint64_t nbd_rand_seed (int i) { + return rdtsc() + -715159705 + i * 129; +} + +// Fairly fast random numbers +int nbd_next_rand (uint64_t *r) { + *r = (*r * 0x5DEECE66DLL + 0xBLL) & MASK(48); + return (*r >> 17) & 0x7FFFFFFF; +} diff --git a/test/haz_test.c b/test/haz_test.c index a84c7be..b427fda 100644 --- a/test/haz_test.c +++ b/test/haz_test.c @@ -36,7 +36,7 @@ void *worker (void *arg) { haz_t *hp0 = haz_get_static(0); // Wait for all the worker threads to be ready. - (void)__sync_fetch_and_add(&wait_, -1); + (void)SYNC_ADD(&wait_, -1); do {} while (wait_); int i; @@ -50,7 +50,7 @@ void *worker (void *arg) { do { temp = old_head; new_head->next = temp; - } while ((old_head = __sync_val_compare_and_swap(&stk_->head, temp, new_head)) != temp); + } while ((old_head = SYNC_CAS(&stk_->head, temp, new_head)) != temp); } else { // pop node_t *temp; @@ -60,10 +60,10 @@ void *worker (void *arg) { if (temp == NULL) break; haz_set(hp0, temp); - head = ((volatile lifo_t *)stk_)->head; + head = VOLATILE_DEREF(stk_).head; if (temp != head) continue; - } while ((head = __sync_val_compare_and_swap(&stk_->head, temp, temp->next)) != temp); + } while ((head = SYNC_CAS(&stk_->head, temp, temp->next)) != temp); if (temp != NULL) { haz_defer_free(temp, nbd_free); diff --git a/test/map_test1.c b/test/map_test1.c index ce6102c..9d45812 100644 --- a/test/map_test1.c +++ b/test/map_test1.c @@ -85,7 +85,7 @@ int main (int argc, char **argv) { } } - static const map_impl_t *map_types[] = { &ll_map_impl, &sl_map_impl, &ht_map_impl }; + static const map_impl_t *map_types[] = { &MAP_IMPL_LL, &MAP_IMPL_SL, &MAP_IMPL_HT }; for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) { #ifdef TEST_STRING_KEYS map_ = map_alloc(map_types[i], &DATATYPE_NSTRING); diff --git a/test/map_test2.c b/test/map_test2.c index b239cfc..8ab2b6f 100644 --- a/test/map_test2.c +++ b/test/map_test2.c @@ -318,7 +318,7 @@ void big_iteration_test (CuTest* tc) { int main (void) { lwt_set_trace_level("H3m3l2t0"); - static const map_impl_t *map_types[] = { &ll_map_impl, &sl_map_impl, &ht_map_impl }; + static const map_impl_t *map_types[] = { &MAP_IMPL_LL, &MAP_IMPL_SL, &MAP_IMPL_HT }; for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) { map_type_ = map_types[i]; diff --git a/test/perf_test.c b/test/perf_test.c new file mode 100644 index 0000000..ace96d0 --- /dev/null +++ b/test/perf_test.c @@ -0,0 +1,180 @@ +#include +#include +#include +#include +#include + +#include "common.h" +#include "nstring.h" +#include "runtime.h" +#include "map.h" +#include "rcu.h" +#include "mem.h" +#include "list.h" +#include "skiplist.h" +#include "hashtable.h" + +//#define TEST_STRING_KEYS + +static volatile int wait_; +static volatile int stop_; +static long num_threads_; +static map_t *map_; +static int get_range_; +static int put_range_; +static int num_keys_; +static map_key_t *keys_ = NULL; +static uint64_t times_[MAX_NUM_THREADS] = {}; +static int ops_[MAX_NUM_THREADS] = {}; + +void *worker (void *arg) { + int tid = (int)(size_t)arg; + uint64_t s = nbd_rand_seed(tid); + int get_ops = 0, put_ops = 0, del_ops = 0; + + // Wait for all the worker threads to be ready. + (void)SYNC_ADD(&wait_, -1); + do {} while (wait_); + + uint64_t t1 = rdtsc(); + + while (!stop_) { + int r = nbd_next_rand(&s); + int x = r & ( (1 << 20) - 1 ); + int i = nbd_next_rand(&s) & (num_keys_ - 1); + map_key_t key = keys_[i]; + if (x < get_range_) { + map_val_t val = map_get(map_, key); +#ifdef TEST_STRING_KEYS + ASSERT(val == DOES_NOT_EXIST || ns_cmp((nstring_t *)key, (nstring_t *)val) == 0); +#else + ASSERT(val == DOES_NOT_EXIST || key == val); +#endif + get_ops++; + } else if (x < put_range_) { + map_add(map_, key, key); + put_ops++; + } else { + map_remove(map_, key); + del_ops++; + } + rcu_update(); + } + + times_[tid] = rdtsc() - t1; + ops_[tid] = get_ops + put_ops + del_ops; + + return NULL; +} + +void run_test (void) { + wait_ = num_threads_ + 1; + + // Quicky sanity check + int n = 100; + if (num_keys_ < n) { n = num_keys_; } + for (int i = 0; i < n; ++i) { + map_set(map_, keys_[i], keys_[i]); + for(int j = 0; j < i; ++j) { +#ifdef TEST_STRING_KEYS + ASSERT(ns_cmp((nstring_t *)map_get(map_, keys_[i]), (nstring_t *)keys_[i]) == 0); +#else + ASSERT(map_get(map_, keys_[i]) == keys_[i]); +#endif + } + } + + stop_ = 0; + + pthread_t thread[MAX_NUM_THREADS]; + for (int i = 0; i < num_threads_; ++i) { + int rc = nbd_thread_create(thread + i, i, worker, (void*)(size_t)i); + if (rc != 0) { perror("pthread_create"); exit(rc); } + } + + do { /* nothing */ } while (wait_ != 1); + + wait_ = 0; + sleep(2); + stop_ = 1; + + for (int i = 0; i < num_threads_; ++i) { + pthread_join(thread[i], NULL); + } +} + +int main (int argc, char **argv) { + char* program_name = argv[0]; + + if (argc > 2) { + fprintf(stderr, "Usage: %s num_threads\n", program_name); + return -1; + } + + num_threads_ = 2; + if (argc == 2) + { + errno = 0; + num_threads_ = strtol(argv[1], NULL, 10); + if (errno) { + fprintf(stderr, "%s: Invalid argument for number of threads\n", program_name); + return -1; + } + if (num_threads_ <= 0) { + fprintf(stderr, "%s: Number of threads must be at least 1\n", program_name); + return -1; + } + if (num_threads_ > MAX_NUM_THREADS) { + fprintf(stderr, "%s: Number of threads cannot be more than %d\n", program_name, MAX_NUM_THREADS); + return -1; + } + } + + + int table_scale = 10; + int read_ratio = 95; + get_range_ = (read_ratio << 20) / 100; + put_range_ = (((1 << 20) - get_range_) >> 1) + get_range_; + + static const map_impl_t *map_types[] = { &MAP_IMPL_HT }; + for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) { +#ifdef TEST_STRING_KEYS + map_ = map_alloc(map_types[i], &DATATYPE_NSTRING); +#else + map_ = map_alloc(map_types[i], NULL); +#endif + + // Do some warmup + num_keys_ = 1 << table_scale; + keys_ = nbd_malloc(sizeof(map_key_t) * num_keys_); + for (int j = 0; j < num_keys_; ++j) { +#ifdef TEST_STRING_KEYS + char tmp[64]; + snprintf(tmp, sizeof(tmp), "%dabc%d", j, j*17+123); + int n = strlen(tmp); + keys_[j] = ns_alloc(n); + memcpy(keys_[j], tmp, n); +#else + keys_[j] = j*17+123; +#endif + } + + struct timeval tv1, tv2; + gettimeofday(&tv1, NULL); + + int num_trials = 1; + for (int i = 0; i < num_trials; ++i) { + run_test(); + } + + gettimeofday(&tv2, NULL); + int ms = (int)(1000000*(tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec - tv1.tv_usec) / 1000; + map_print(map_); + printf("Th:%ld Time:%dms\n\n", num_threads_, ms); + fflush(stdout); + + map_free(map_); + } + + return 0; +} diff --git a/test/rcu_test.c b/test/rcu_test.c index 5181253..14134b9 100644 --- a/test/rcu_test.c +++ b/test/rcu_test.c @@ -30,18 +30,18 @@ static lifo_t *lifo_alloc (void) { static void lifo_aba_push (lifo_t *stk, node_t *x) { node_t *head; do { - head = ((volatile lifo_t *)stk)->head; - ((volatile node_t *)x)->next = head; - } while (__sync_val_compare_and_swap(&stk->head, head, x) != head); + head = VOLATILE_DEREF(stk).head; + VOLATILE_DEREF(x).next = head; + } while (SYNC_CAS(&stk->head, head, x) != head); } node_t *lifo_aba_pop (lifo_t *stk) { node_t *head; do { - head = ((volatile lifo_t *)stk)->head; + head = VOLATILE_DEREF(stk).head; if (head == NULL) return NULL; - } while (__sync_val_compare_and_swap(&stk->head, head, head->next) != head); + } while (SYNC_CAS(&stk->head, head, head->next) != head); head->next = NULL; return head; } diff --git a/test/txn_test.c b/test/txn_test.c index 651e1c0..0a5045d 100644 --- a/test/txn_test.c +++ b/test/txn_test.c @@ -10,7 +10,7 @@ #define ASSERT_EQUAL(x, y) CuAssertIntEquals(tc, x, y) void test1 (CuTest* tc) { - map_t *map = map_alloc(&ht_map_impl, NULL); + map_t *map = map_alloc(&MAP_IMPL_HT, NULL); txn_t *t1 = txn_begin(map); txn_t *t2 = txn_begin(map); map_key_t k1 = (map_key_t)1; diff --git a/todo b/todo index 34efcf4..eba0d33 100644 --- a/todo +++ b/todo @@ -1,29 +1,30 @@ -memory manangement +memory reclamation ------------------ - allow threads to dynamically enter and exit rcu's token passing ring - augment rcu with heartbeat manager to kill and recover from stalled threads - make rcu try yielding when its buffer gets full -- alternate memory reclamation schemes: hazard pointers and/or reference counting -- seperate nbd_malloc/nbd_free into general purpose malloc/free replacement +- use alternate memory reclamation schemes: hazard pointers and/or reference counting quality ------- -- verify the key memory management in list, skiplist, and hashtable +- verify the memory management of keys in list, skiplist, and hashtable - transaction tests - port perf tests from lib-high-scale - characterize the performance of hashtable vs. skiplist vs. list - validate function arguments in interface functions -- document usage of the library +- document usage - document algorithms optimization ------------ - investigate 16 byte CAS; ht can store GUIDs inline instead of pointers to actual keys -- shortcut from write-set to entries/nodes -- use a shared scan for write-set validation, similar to ht copy logic +- write after write can just update the old update record instead of pushing a new one +- use a shared scan for write-set validation in txn, similar to ht copy logic - experiment with the performance impact of not passing the hash between functions in ht - experiment with embedding the nstring keys in the list/skiplist nodes features -------- - allow values of 0 to be inserted into maps (change DOES_NOT_EXIST to something other than 0) +- read-committed type transactions +- recycle free regions across size-classes and between threads diff --git a/txn/txn.c b/txn/txn.c index 69f3b55..b3aadfd 100644 --- a/txn/txn.c +++ b/txn/txn.c @@ -36,7 +36,7 @@ struct txn { write_rec_t *writes; size_t writes_size; size_t writes_count; - size_t writes_scan; + size_t validate_scan; txn_state_e state; }; @@ -46,11 +46,6 @@ static skiplist_t *active_ = NULL; static version_t version_ = 1; -static inline skiplist_t *get_active (void) { - - return active_; -} - // Validate the updates for . Validation fails if there is a write-write conflict. That is if after our // read version another transaction committed a change to an entry we are also trying to change. // @@ -119,7 +114,6 @@ static txn_state_e validate_key (txn_t *txn, map_key_t key) { static txn_state_e txn_validate (txn_t *txn) { assert(txn->state != TXN_RUNNING); - int i; switch (txn->state) { case TXN_VALIDATING: @@ -128,7 +122,7 @@ static txn_state_e txn_validate (txn_t *txn) { (void)SYNC_CAS(&txn->wv, UNDETERMINED_VERSION, wv); } - for (i = 0; i < txn->writes_count; ++i) { + for (int i = 0; i < txn->writes_count; ++i) { txn_state_e s = validate_key(txn, txn->writes[i].key); if (s == TXN_ABORTED) { txn->state = TXN_ABORTED; @@ -334,7 +328,6 @@ map_val_t txn_map_get (txn_t *txn, map_key_t key) { map_val_t value = update->value; TRACE("x1", "txn_map_get: key found returning value %p", value, 0); - return value; // collect some garbage version_t min_active_version = UNDETERMINED_VERSION; update_t *next_update = NULL; -- 2.40.0