X-Git-Url: https://pd.if.org/git/?p=nbds;a=blobdiff_plain;f=runtime%2Fmem.c;h=0b7cd5b7be8a0c23662a74cb3ea53eeb1a3e343a;hp=e46eb7efbd8bf8278ba786666012e753bf6e9650;hb=8143ca0acc36e19d004431952e3b6f9b3d337f49;hpb=053576b00e7d37f19ce99f033e9981761d647c1f diff --git a/runtime/mem.c b/runtime/mem.c index e46eb7e..0b7cd5b 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -8,7 +8,7 @@ #include #include #include "common.h" -#include "runtime_local.h" +#include "rlocal.h" #include "lwt.h" #define GET_SCALE(n) (sizeof(n)*8-__builtin_clzl((n)-1)) // log2 of , rounded up @@ -62,6 +62,7 @@ void mem_init (void) { // // TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space? void nbd_free (void *x) { + assert(x); LOCALIZE_THREAD_LOCAL(tid_, int); block_t *b = (block_t *)x; assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t))); @@ -88,6 +89,7 @@ void nbd_free (void *x) { // on the private free list. If we didn't find any blocks on the public free lists, allocate a new // region, break it up into blocks and put them on the private free list. void *nbd_malloc (size_t n) { + assert(n); LOCALIZE_THREAD_LOCAL(tid_, int); if (n < sizeof(block_t)) { n = sizeof(block_t); @@ -96,7 +98,7 @@ void *nbd_malloc (size_t n) { assert(b_scale <= MAX_SCALE); TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale); private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list - TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri->list, pri->head); + TRACE("m0", "nbd_malloc(): private free list first block %p", pri->head, 0); // If our private free list is empty, try to find blocks on our public free list. If that fails, // allocate a new region. @@ -111,9 +113,9 @@ void *nbd_malloc (size_t n) { uint32_t count = pri->count; pri->count = 0; // If our private list is empty and we haven't gotten at least half a region's worth - // of block's from our public lists, we break open a new region. This guarentees - // that we are amortizing the cost of accessing our public lists accross enough - // nbd_malloc() calls. + // of block's from our public lists, we allocate a new region. This guarentees that + // we amortize the cost of accessing our public lists accross enough nbd_malloc() + // calls. uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; if (count < min_count) { char *region = get_new_region(b_scale); @@ -131,7 +133,7 @@ void *nbd_malloc (size_t n) { if (pubs[pri->next_pub] != NULL) { block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); - TRACE("m0", "nbd_malloc(): stole list %p first block %p", stolen); + TRACE("m0", "nbd_malloc(): stole list %p", stolen, 0); if (stolen == NULL) continue; pri->head = stolen; @@ -143,7 +145,7 @@ void *nbd_malloc (size_t n) { // Pull a block off of our private free list. block_t *b = pri->head; - TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next); + TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, b->next); assert(b); pri->head = b->next; pri->count++;