#include <stdio.h>
#include <errno.h>
#include "common.h"
-#include "runtime_local.h"
+#include "rlocal.h"
#include "lwt.h"
-#define GET_SCALE(n) (sizeof(n)*8-__builtin_clzl((n)-1)) // log2 of <n>, rounded up
+#define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of <n>, rounded up
#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger)
#define REGION_SCALE 22 // 4MB regions
#define REGION_SIZE (1 << REGION_SCALE)
//
// TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space?
void nbd_free (void *x) {
+ assert(x);
LOCALIZE_THREAD_LOCAL(tid_, int);
block_t *b = (block_t *)x;
assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t)));
header_t *h = region_header_ + ((size_t)b >> REGION_SCALE);
+#ifndef NDEBUG
+ memset(b, 0xcd, (1 << h->scale));
+#endif
TRACE("m0", "nbd_free(): block %p scale %llu", b, h->scale);
if (h->owner == tid_) {
TRACE("m0", "nbd_free(): private block, free list head %p",
} else {
TRACE("m0", "nbd_free(): owner %llu free list head %p",
h->owner, pub_free_list_[h->owner][h->scale][tid_]);
- b->next = pub_free_list_[h->owner][h->scale][tid_];
- pub_free_list_[h->owner][h->scale][tid_] = b;
+ do {
+ b->next = pub_free_list_[h->owner][h->scale][tid_];
+ } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next);
}
}
// on the private free list. If we didn't find any blocks on the public free lists, allocate a new
// region, break it up into blocks and put them on the private free list.
void *nbd_malloc (size_t n) {
+ assert(n);
LOCALIZE_THREAD_LOCAL(tid_, int);
if (n < sizeof(block_t)) {
n = sizeof(block_t);
assert(b_scale <= MAX_SCALE);
TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale);
private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list
- TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri->list, pri->head);
+ TRACE("m0", "nbd_malloc(): private free list first block %p", pri->head, 0);
// If our private free list is empty, try to find blocks on our public free list. If that fails,
// allocate a new region.
uint32_t count = pri->count;
pri->count = 0;
// If our private list is empty and we haven't gotten at least half a region's worth
- // of block's from our public lists, we break open a new region. This guarentees
- // that we are amortizing the cost of accessing our public lists accross enough
- // nbd_malloc() calls.
+ // of block's from our public lists, we allocate a new region. This guarentees that
+ // we amortize the cost of accessing our public lists accross enough nbd_malloc()
+ // calls.
uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1;
if (count < min_count) {
char *region = get_new_region(b_scale);
if (pubs[pri->next_pub] != NULL) {
block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL);
- TRACE("m0", "nbd_malloc(): stole list %p first block %p", stolen);
+ TRACE("m0", "nbd_malloc(): stole list %p", stolen, 0);
if (stolen == NULL)
continue;
pri->head = stolen;
// Pull a block off of our private free list.
block_t *b = pri->head;
- TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next);
+ TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, b->next);
assert(b);
pri->head = b->next;
pri->count++;