//
// TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space?
void nbd_free (void *x) {
+ assert(x);
LOCALIZE_THREAD_LOCAL(tid_, int);
block_t *b = (block_t *)x;
assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t)));
// on the private free list. If we didn't find any blocks on the public free lists, allocate a new
// region, break it up into blocks and put them on the private free list.
void *nbd_malloc (size_t n) {
+ assert(n);
LOCALIZE_THREAD_LOCAL(tid_, int);
if (n < sizeof(block_t)) {
n = sizeof(block_t);
uint32_t count = pri->count;
pri->count = 0;
// If our private list is empty and we haven't gotten at least half a region's worth
- // of block's from our public lists, we break open a new region. This guarentees
- // that we are amortizing the cost of accessing our public lists accross enough
- // nbd_malloc() calls.
+ // of block's from our public lists, we allocate a new region. This guarentees that
+ // we amortize the cost of accessing our public lists accross enough nbd_malloc()
+ // calls.
uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1;
if (count < min_count) {
char *region = get_new_region(b_scale);