X-Git-Url: https://pd.if.org/git/?p=nbds;a=blobdiff_plain;f=runtime%2Fmem.c;h=1787a6253f299e67c01c063758aa26399d64bc87;hp=e46eb7efbd8bf8278ba786666012e753bf6e9650;hb=f3eb4799a11ceaeb47ab02034595b5d641c2f1c9;hpb=053576b00e7d37f19ce99f033e9981761d647c1f diff --git a/runtime/mem.c b/runtime/mem.c index e46eb7e..1787a62 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -8,10 +8,10 @@ #include #include #include "common.h" -#include "runtime_local.h" +#include "rlocal.h" #include "lwt.h" -#define GET_SCALE(n) (sizeof(n)*8-__builtin_clzl((n)-1)) // log2 of , rounded up +#define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of , rounded up #define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger) #define REGION_SCALE 22 // 4MB regions #define REGION_SIZE (1 << REGION_SCALE) @@ -62,10 +62,14 @@ void mem_init (void) { // // TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space? void nbd_free (void *x) { + assert(x); LOCALIZE_THREAD_LOCAL(tid_, int); block_t *b = (block_t *)x; assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t))); header_t *h = region_header_ + ((size_t)b >> REGION_SCALE); +#ifndef NDEBUG + memset(b, 0xcd, (1 << h->scale)); +#endif TRACE("m0", "nbd_free(): block %p scale %llu", b, h->scale); if (h->owner == tid_) { TRACE("m0", "nbd_free(): private block, free list head %p", @@ -75,8 +79,9 @@ void nbd_free (void *x) { } else { TRACE("m0", "nbd_free(): owner %llu free list head %p", h->owner, pub_free_list_[h->owner][h->scale][tid_]); - b->next = pub_free_list_[h->owner][h->scale][tid_]; - pub_free_list_[h->owner][h->scale][tid_] = b; + do { + b->next = pub_free_list_[h->owner][h->scale][tid_]; + } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next); } } @@ -88,6 +93,7 @@ void nbd_free (void *x) { // on the private free list. If we didn't find any blocks on the public free lists, allocate a new // region, break it up into blocks and put them on the private free list. void *nbd_malloc (size_t n) { + assert(n); LOCALIZE_THREAD_LOCAL(tid_, int); if (n < sizeof(block_t)) { n = sizeof(block_t); @@ -96,7 +102,7 @@ void *nbd_malloc (size_t n) { assert(b_scale <= MAX_SCALE); TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale); private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list - TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri->list, pri->head); + TRACE("m0", "nbd_malloc(): private free list first block %p", pri->head, 0); // If our private free list is empty, try to find blocks on our public free list. If that fails, // allocate a new region. @@ -111,9 +117,9 @@ void *nbd_malloc (size_t n) { uint32_t count = pri->count; pri->count = 0; // If our private list is empty and we haven't gotten at least half a region's worth - // of block's from our public lists, we break open a new region. This guarentees - // that we are amortizing the cost of accessing our public lists accross enough - // nbd_malloc() calls. + // of block's from our public lists, we allocate a new region. This guarentees that + // we amortize the cost of accessing our public lists accross enough nbd_malloc() + // calls. uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; if (count < min_count) { char *region = get_new_region(b_scale); @@ -131,7 +137,7 @@ void *nbd_malloc (size_t n) { if (pubs[pri->next_pub] != NULL) { block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); - TRACE("m0", "nbd_malloc(): stole list %p first block %p", stolen); + TRACE("m0", "nbd_malloc(): stole list %p", stolen, 0); if (stolen == NULL) continue; pri->head = stolen; @@ -143,7 +149,7 @@ void *nbd_malloc (size_t n) { // Pull a block off of our private free list. block_t *b = pri->head; - TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next); + TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, b->next); assert(b); pri->head = b->next; pri->count++;