X-Git-Url: https://pd.if.org/git/?p=nbds;a=blobdiff_plain;f=runtime%2Fmem.c;h=1787a6253f299e67c01c063758aa26399d64bc87;hp=7e56909fbd994cd7f77f0355b2898ef61d7d6067;hb=f3eb4799a11ceaeb47ab02034595b5d641c2f1c9;hpb=1da02238e784eaba7bc8193f62a738e9d3f3ee1a diff --git a/runtime/mem.c b/runtime/mem.c index 7e56909..1787a62 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -8,10 +8,10 @@ #include #include #include "common.h" -#include "runtime_local.h" +#include "rlocal.h" #include "lwt.h" -#define GET_SCALE(n) (sizeof(n)*8-__builtin_clzl((n)-1)) // log2 of , rounded up +#define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of , rounded up #define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger) #define REGION_SCALE 22 // 4MB regions #define REGION_SIZE (1 << REGION_SCALE) @@ -67,6 +67,9 @@ void nbd_free (void *x) { block_t *b = (block_t *)x; assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t))); header_t *h = region_header_ + ((size_t)b >> REGION_SCALE); +#ifndef NDEBUG + memset(b, 0xcd, (1 << h->scale)); +#endif TRACE("m0", "nbd_free(): block %p scale %llu", b, h->scale); if (h->owner == tid_) { TRACE("m0", "nbd_free(): private block, free list head %p", @@ -76,8 +79,9 @@ void nbd_free (void *x) { } else { TRACE("m0", "nbd_free(): owner %llu free list head %p", h->owner, pub_free_list_[h->owner][h->scale][tid_]); - b->next = pub_free_list_[h->owner][h->scale][tid_]; - pub_free_list_[h->owner][h->scale][tid_] = b; + do { + b->next = pub_free_list_[h->owner][h->scale][tid_]; + } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next); } } @@ -98,7 +102,7 @@ void *nbd_malloc (size_t n) { assert(b_scale <= MAX_SCALE); TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale); private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list - TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri->list, pri->head); + TRACE("m0", "nbd_malloc(): private free list first block %p", pri->head, 0); // If our private free list is empty, try to find blocks on our public free list. If that fails, // allocate a new region. @@ -113,9 +117,9 @@ void *nbd_malloc (size_t n) { uint32_t count = pri->count; pri->count = 0; // If our private list is empty and we haven't gotten at least half a region's worth - // of block's from our public lists, we break open a new region. This guarentees - // that we are amortizing the cost of accessing our public lists accross enough - // nbd_malloc() calls. + // of block's from our public lists, we allocate a new region. This guarentees that + // we amortize the cost of accessing our public lists accross enough nbd_malloc() + // calls. uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; if (count < min_count) { char *region = get_new_region(b_scale); @@ -133,7 +137,7 @@ void *nbd_malloc (size_t n) { if (pubs[pri->next_pub] != NULL) { block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); - TRACE("m0", "nbd_malloc(): stole list %p first block %p", stolen); + TRACE("m0", "nbd_malloc(): stole list %p", stolen, 0); if (stolen == NULL) continue; pri->head = stolen; @@ -145,7 +149,7 @@ void *nbd_malloc (size_t n) { // Pull a block off of our private free list. block_t *b = pri->head; - TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next); + TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, b->next); assert(b); pri->head = b->next; pri->count++;