From 0f6e9afb06b03647c4c5f2499ddab12f42b4340e Mon Sep 17 00:00:00 2001 From: jdybnis Date: Sun, 18 Jan 2009 21:47:47 +0000 Subject: [PATCH] improve memory allocator --- include/hazard.h | 4 +- makefile | 10 +-- runtime/hazard.c | 16 ++++- runtime/lwt.c | 2 +- runtime/mem.c | 156 +++++++++++++++++++++++++++------------------- runtime/rlocal.h | 3 +- runtime/runtime.c | 3 +- test/map_test2.c | 2 +- todo | 17 +---- txn/txn.c | 2 +- 10 files changed, 122 insertions(+), 93 deletions(-) diff --git a/include/hazard.h b/include/hazard.h index 938a3bd..6cec847 100644 --- a/include/hazard.h +++ b/include/hazard.h @@ -15,7 +15,9 @@ typedef void (*free_t) (void *); typedef void *haz_t; -static inline void haz_set (haz_t *haz, void *x) { *haz = x; __asm__ __volatile__("mfence"); } +//static inline void haz_set (volatile haz_t *haz, void *x) { *haz = x; haz_t y = *haz; y = y; } + +static inline void haz_set (volatile haz_t *haz, void *x) { *haz = x; __asm__ __volatile__("mfence"); } haz_t *haz_get_static (int n); void haz_register_dynamic (haz_t *haz); diff --git a/makefile b/makefile index b7649ed..8d51bb3 100644 --- a/makefile +++ b/makefile @@ -4,11 +4,13 @@ ################################################################################################### # Makefile for building programs with whole-program interfile optimization ################################################################################################### -CFLAGS0 := -g -Wall -Werror -std=c99 -lpthread -CFLAGS1 := $(CFLAGS0) -O3 #-DNDEBUG #-DENABLE_TRACE #-fwhole-program -combine -CFLAGS := $(CFLAGS1) -DUSE_SYSTEM_MALLOC #-DLIST_USE_HAZARD_POINTER #-DTEST_STRING_KEYS #-DNBD32 +CFLAGS0 := -Wall -Werror -std=gnu99 -lpthread #-m32 -DNBD32 +CFLAGS1 := $(CFLAGS0) -g -O3 #-DNDEBUG #-fwhole-program -combine +CFLAGS2 := $(CFLAGS1) #-DENABLE_TRACE +CFLAGS3 := $(CFLAGS2) #-DLIST_USE_HAZARD_POINTER +CFLAGS := $(CFLAGS3) #-DUSE_SYSTEM_MALLOC #-DTEST_STRING_KEYS INCS := $(addprefix -I, include) -TESTS := output/rcu_test output/haz_test output/map_test2 output/map_test1 output/txn_test +TESTS := output/map_test2 output/map_test1 output/txn_test output/rcu_test output/haz_test EXES := $(TESTS) RUNTIME_SRCS := runtime/runtime.c runtime/rcu.c runtime/lwt.c runtime/mem.c datatype/nstring.c \ diff --git a/runtime/hazard.c b/runtime/hazard.c index 20d326a..3ebed2e 100644 --- a/runtime/hazard.c +++ b/runtime/hazard.c @@ -13,6 +13,7 @@ #include "tls.h" #include "runtime.h" #include "hazard.h" +#include "lwt.h" typedef struct pending { void * ptr; @@ -35,18 +36,23 @@ typedef struct haz_local { static haz_local_t haz_local_[MAX_NUM_THREADS] = {}; static void sort_hazards (haz_t *hazards, int n) { + TRACE("H3", "sort_hazards: sorting hazard list %p of %p elements", hazards, n); return; } static int search_hazards (void *p, haz_t *hazards, int n) { + TRACE("H4", "search_hazards: searching list %p for hazard %p", hazards, p); for (int i = 0; i < n; ++i) { - if (hazards[i] == p) + if (hazards[i] == p) { + TRACE("H2", "haz_search_hazards: found hazard %p", p, 0); return TRUE; + } } return FALSE; } static void resize_pending (void) { + TRACE("H2", "haz_resize_pending", 0, 0); LOCALIZE_THREAD_LOCAL(tid_, int); haz_local_t *l = haz_local_ + tid_; pending_t *p = nbd_malloc(sizeof(pending_t) * l->pending_size * 2); @@ -57,6 +63,7 @@ static void resize_pending (void) { } void haz_defer_free (void *d, free_t f) { + TRACE("H1", "haz_defer_free: %p (%p)", d, f); assert(d); assert(f); LOCALIZE_THREAD_LOCAL(tid_, int); @@ -121,14 +128,18 @@ void haz_defer_free (void *d, free_t f) { } haz_t *haz_get_static (int i) { + TRACE("H1", "haz_get_static: %p", i, 0); if (i >= STATIC_HAZ_PER_THREAD) return NULL; LOCALIZE_THREAD_LOCAL(tid_, int); assert(i < STATIC_HAZ_PER_THREAD); - return &haz_local_[tid_].static_haz[i]; + haz_t *ret = &haz_local_[tid_].static_haz[i]; + TRACE("H1", "haz_get_static: returning %p", ret, 0); + return ret; } void haz_register_dynamic (haz_t *haz) { + TRACE("H1", "haz_register_dynamic: %p", haz, 0); LOCALIZE_THREAD_LOCAL(tid_, int); haz_local_t *l = haz_local_ + tid_; @@ -151,6 +162,7 @@ void haz_register_dynamic (haz_t *haz) { // assumes was registered in the same thread void haz_unregister_dynamic (void **haz) { + TRACE("H1", "haz_unregister_dynamic: %p", haz, 0); LOCALIZE_THREAD_LOCAL(tid_, int); haz_local_t *l = haz_local_ + tid_; diff --git a/runtime/lwt.c b/runtime/lwt.c index c25fd95..3904175 100644 --- a/runtime/lwt.c +++ b/runtime/lwt.c @@ -10,7 +10,7 @@ #include "lwt.h" #include "mem.h" -#define LWT_BUFFER_SCALE 16 +#define LWT_BUFFER_SCALE 20 #define LWT_BUFFER_SIZE (1 << LWT_BUFFER_SCALE) #define LWT_BUFFER_MASK (LWT_BUFFER_SIZE - 1) diff --git a/runtime/mem.c b/runtime/mem.c index 281c719..1f7a5a6 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -2,7 +2,7 @@ * Written by Josh Dybnis and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain * - * Extreamly fast multi-threaded malloc. 64 bit platforms only! + * Extreamly fast multi-threaded malloc. */ #define _BSD_SOURCE // so we get MAP_ANON on linux #include @@ -13,12 +13,16 @@ #include "lwt.h" #define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of , rounded up -#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger) -#define REGION_SCALE 22 // 4MB regions +#ifndef NBD32 +#define MAX_POINTER_BITS 48 +#define REGION_SCALE 21 // 2mb regions +#else +#define MAX_POINTER_BITS 32 +#define REGION_SCALE 12 // 4kb regions +#endif #define REGION_SIZE (1 << REGION_SCALE) -#define HEADER_REGION_SCALE 22 // 4MB is space enough for headers for over 2,000,000 regions -#define HEADER_REGION_SIZE (1 << HEADER_REGION_SCALE) -#define HEADER_COUNT (HEADER_REGION_SIZE / sizeof(header_t)) +#define HEADER_REGION_SCALE ((MAX_POINTER_BITS - REGION_SCALE) + GET_SCALE(sizeof(header_t))) +#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger) typedef struct block { struct block *next; @@ -30,19 +34,18 @@ typedef struct header { uint8_t scale; // log2 of the block size } header_t; -typedef struct private_list { - block_t *head; - uint32_t next_pub; - uint32_t count; -} private_list_t; +typedef struct tl { + block_t *free_blocks[MAX_SCALE+1]; + block_t *blocks_from[MAX_NUM_THREADS]; + block_t *blocks_to[MAX_NUM_THREADS]; +} __attribute__((aligned(CACHE_LINE_SIZE))) tl_t ; static header_t *headers_ = NULL; -static block_t *pub_free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS] = {}; -static private_list_t pri_free_list_[MAX_NUM_THREADS][MAX_SCALE+1] = {}; +static tl_t tl_[MAX_NUM_THREADS] = {}; static inline header_t *get_header (void *r) { - return headers_ + (((size_t)r >> REGION_SCALE) & (HEADER_COUNT - 1)); + return headers_ + ((size_t)r >> REGION_SCALE); } static void *get_new_region (int block_scale) { @@ -51,11 +54,33 @@ static void *get_new_region (int block_scale) { sz = REGION_SIZE; } void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); - TRACE("m1", "get_new_region: mmap new region %p (size %p)", region, sz); + TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, sz); if (region == (void *)-1) { perror("get_new_region: mmap"); exit(-1); } + if ((size_t)region & (sz - 1)) { + TRACE("m0", "get_new_region: region not aligned", 0, 0); + munmap(region, sz); + region = mmap(NULL, sz * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); + if (region == (void *)-1) { + perror("get_new_region: mmap"); + exit(-1); + } + TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, sz * 2); + void *aligned = (void *)(((size_t)region + sz) & ~(sz - 1)); + size_t extra = (char *)aligned - (char *)region; + if (extra) { + munmap(region, extra); + TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra); + } + extra = ((char *)region + sz) - (char *)aligned; + if (extra) { + munmap((char *)aligned + sz, extra); + TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + sz, extra); + } + region = aligned; + } assert(region); if (headers_ != NULL) { LOCALIZE_THREAD_LOCAL(tid_, int); @@ -70,24 +95,27 @@ static void *get_new_region (int block_scale) { return region; } -void mem_init (void) { +__attribute__ ((constructor(101))) void mem_init (void) { #ifdef USE_SYSTEM_MALLOC return; #endif assert(headers_ == NULL); - headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE); + // Allocate a region for the region headers. This could be a big chunk of memory (256MB) on 64 bit systems, + // but it just takes up virtual address space. Physical address space used by the headers is still proportional + // to the amount of memory we alloc. + headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE); TRACE("m1", "mem_init: header region %p", headers_, 0); - memset(headers_, 0, HEADER_REGION_SIZE); + memset(headers_, 0, (1 << HEADER_REGION_SCALE)); } // Put onto its owner's public free list (in the appropriate size bin). // -// TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space? +// TODO: maybe we want to munmap() larger size blocks? void nbd_free (void *x) { #ifdef USE_SYSTEM_MALLOC TRACE("m1", "nbd_free: %p", x, 0); #ifndef NDEBUG - memset(x, 0xcd, sizeof(void *)); // bear trap + //memset(x, 0xcd, sizeof(void *)); // bear trap #endif//NDEBUG free(x); return; @@ -103,15 +131,21 @@ void nbd_free (void *x) { #ifndef NDEBUG memset(b, 0xcd, (1 << h->scale)); // bear trap #endif + tl_t *tl = &tl_[tid_]; // thread-local data if (h->owner == tid_) { - TRACE("m1", "nbd_free: private block, old free list head %p", pri_free_list_[tid_][h->scale].head, 0); - b->next = pri_free_list_[tid_][h->scale].head; - pri_free_list_[tid_][h->scale].head = b; + TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_blocks[h->scale], 0); + b->next = tl->free_blocks[h->scale]; + tl->free_blocks[h->scale] = b; } else { - TRACE("m1", "nbd_free: owner %llu free list head %p", h->owner, pub_free_list_[h->owner][h->scale][tid_]); - do { - b->next = pub_free_list_[h->owner][h->scale][tid_]; - } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next); + TRACE("m1", "nbd_free: owner %llu", h->owner, 0); + // push onto it's owner's queue + VOLATILE(b->next) = NULL; + if (EXPECT_FALSE(tl->blocks_to[h->owner] == NULL)) { + VOLATILE(tl_[h->owner].blocks_from[tid_]) = b; + } else { + VOLATILE(tl->blocks_to[h->owner]->next) = b; + } + tl->blocks_to[h->owner] = b; } } @@ -139,54 +173,46 @@ void *nbd_malloc (size_t n) { assert(b_scale <= MAX_SCALE); TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale); LOCALIZE_THREAD_LOCAL(tid_, int); - private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list + tl_t *tl = &tl_[tid_]; // thread-local data // If our private free list is empty, try to find blocks on our public free list. If that fails, // allocate a new region. - if (EXPECT_FALSE(pri->head == NULL)) { - block_t **pubs = pub_free_list_[tid_][b_scale]; // our public free lists - while (1) { - // look for blocks on our public free lists round robin - pri->next_pub = (pri->next_pub+1) & (MAX_NUM_THREADS-1); - - TRACE("m1", "nbd_malloc: searching public free list %llu", pri->next_pub, 0); - if (pri->next_pub == tid_) { - uint32_t count = pri->count; - pri->count = 0; - // If we haven't gotten at least half a region's worth of block's from our public lists - // we allocate a new region. This guarentees that we amortize the cost of accessing our - // public lists accross enough nbd_malloc() calls. - uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; - if (count < min_count) { - char *region = get_new_region(b_scale); - size_t b_size = 1 << b_scale; - size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size; - for (int i = region_size; i != 0; i -= b_size) { - block_t *b = (block_t *)(region + i - b_size); - b->next = pri->head; - pri->head = b; - } - pri->count = 0; - break; + if (EXPECT_FALSE(tl->free_blocks[b_scale] == NULL)) { + for (int i = 0; i < MAX_NUM_THREADS; ++ i) { + block_t *x = tl->blocks_from[i]; + if (x != NULL) { + block_t *next = x->next; + if (next != NULL) { + do { + header_t *h = get_header(x); + x->next = tl->free_blocks[h->scale]; + tl->free_blocks[h->scale] = x; + x = next; + next = x->next; + } while (next != NULL); + tl->blocks_from[i] = x; } - } else if (pubs[pri->next_pub] != NULL) { - block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); - TRACE("m1", "nbd_malloc: stole list %p", stolen, 0); - if (stolen == NULL) - continue; - pri->head = stolen; - break; } } - assert(pri->head); + // allocate a new region + if (tl->free_blocks[b_scale] == NULL) { + char *region = get_new_region(b_scale); + size_t b_size = 1 << b_scale; + size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size; + for (int i = region_size; i != 0; i -= b_size) { + block_t *b = (block_t *)(region + i - b_size); + b->next = tl->free_blocks[b_scale]; + tl->free_blocks[b_scale] = b; + } + } + assert(tl->free_blocks[b_scale] != NULL); } // Pull a block off of our private free list. - block_t *b = pri->head; + block_t *b = tl->free_blocks[b_scale]; TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE)); - assert(b); + ASSERT(b); ASSERT(get_header(b)->scale == b_scale); - pri->head = b->next; - pri->count++; + tl->free_blocks[b_scale] = b->next; return b; } diff --git a/runtime/rlocal.h b/runtime/rlocal.h index 1c727bb..d6e90fa 100644 --- a/runtime/rlocal.h +++ b/runtime/rlocal.h @@ -4,8 +4,7 @@ #include "runtime.h" #include "tls.h" -void mem_init (void); - void rcu_thread_init (int thread_id); void lwt_thread_init (int thread_id); + #endif//RLOCAL_H diff --git a/runtime/runtime.c b/runtime/runtime.c index ceb6772..b145faa 100644 --- a/runtime/runtime.c +++ b/runtime/runtime.c @@ -20,12 +20,11 @@ typedef struct thread_info { void *restrict arg; } thread_info_t; -__attribute__ ((constructor)) void nbd_init (void) { +__attribute__ ((constructor(102))) void nbd_init (void) { //sranddev(); INIT_THREAD_LOCAL(rand_seed_); INIT_THREAD_LOCAL(tid_); SET_THREAD_LOCAL(tid_, 0); - mem_init(); lwt_thread_init(0); rcu_thread_init(0); } diff --git a/test/map_test2.c b/test/map_test2.c index de7f388..b660c1a 100644 --- a/test/map_test2.c +++ b/test/map_test2.c @@ -316,7 +316,7 @@ void big_iteration_test (CuTest* tc) { } int main (void) { - lwt_set_trace_level("r0m3s3"); + lwt_set_trace_level("H3m3l2t0"); static const map_impl_t *map_types[] = { &ll_map_impl, &sl_map_impl, &ht_map_impl }; for (int i = 0; i < sizeof(map_types)/sizeof(*map_types); ++i) { diff --git a/todo b/todo index dbdab12..34efcf4 100644 --- a/todo +++ b/todo @@ -1,18 +1,7 @@ -+ fix makefile to compute dependency lists as a side-effect of compilation (-MF) -+ support integer keys for ht -+ optimize tracing code, still too much overhead -+ use NULL instead of a sentinal node in skiplist and list -+ make the interfaces for all data structures consistent -+ make list and skiplist use string keys -+ optimize integer keys -+ ht_print() -+ iterators -+ 32 bit x86 support - memory manangement ------------------ -- allow threads to dynamically enter and exit rcu's token ring -- augment rcu with heartbeat manager to kill stalled threads +- allow threads to dynamically enter and exit rcu's token passing ring +- augment rcu with heartbeat manager to kill and recover from stalled threads - make rcu try yielding when its buffer gets full - alternate memory reclamation schemes: hazard pointers and/or reference counting - seperate nbd_malloc/nbd_free into general purpose malloc/free replacement @@ -33,7 +22,7 @@ optimization - shortcut from write-set to entries/nodes - use a shared scan for write-set validation, similar to ht copy logic - experiment with the performance impact of not passing the hash between functions in ht -- experiment with embedding the keys in the list/skiplist nodes +- experiment with embedding the nstring keys in the list/skiplist nodes features -------- diff --git a/txn/txn.c b/txn/txn.c index a932f66..20d0583 100644 --- a/txn/txn.c +++ b/txn/txn.c @@ -46,7 +46,7 @@ static version_t version_ = 1; static skiplist_t *active_ = NULL; -__attribute__ ((constructor)) void txn_init (void) { +__attribute__ ((constructor(103))) void txn_init (void) { active_ = sl_alloc(NULL); } -- 2.40.0