- int b_scale = GET_SCALE(n);
- assert(b_scale >= 2);
- assert(b_scale <= MAX_SCALE);
- TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale);
- LOCALIZE_THREAD_LOCAL(tid_, int);
- private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list
-
- // If our private free list is empty, try to find blocks on our public free list. If that fails,
- // allocate a new region.
- if (EXPECT_FALSE(pri->head == NULL)) {
- block_t **pubs = pub_free_list_[tid_][b_scale]; // our public free lists
- while (1) {
- // look for blocks on our public free lists round robin
- pri->next_pub = (pri->next_pub+1) & (MAX_NUM_THREADS-1);
-
- TRACE("m1", "nbd_malloc: searching public free list %llu", pri->next_pub, 0);
- if (pri->next_pub == tid_) {
- uint32_t count = pri->count;
- pri->count = 0;
- // If we haven't gotten at least half a region's worth of block's from our public lists
- // we allocate a new region. This guarentees that we amortize the cost of accessing our
- // public lists accross enough nbd_malloc() calls.
- uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1;
- if (count < min_count) {
- char *region = get_new_region(b_scale);
- size_t b_size = 1 << b_scale;
- size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size;
- for (int i = region_size; i != 0; i -= b_size) {
- block_t *b = (block_t *)(region + i - b_size);
- b->next = pri->head;
- pri->head = b;
- }
- pri->count = 0;
- break;
- }
- } else if (pubs[pri->next_pub] != NULL) {
- block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL);
- TRACE("m1", "nbd_malloc: stole list %p", stolen, 0);
- if (stolen == NULL)
- continue;
- pri->head = stolen;
- break;
- }
+
+ // The free list is empty so process blocks freed from other threads and then check again.
+ process_incoming_blocks(tl);
+ b = pop_free_list(tl, b_scale);
+ if (b != NULL) {
+ TRACE("m1", "nbd_malloc: returning block %p", b, 0);
+ return b;
+ assert(b);
+ }
+
+#ifdef RECYCLE_PAGES
+ // The current active page is completely allocated. Make the oldest partially allocated page
+ // the new active page.
+ size_class_t *sc = &tl->size_class[b_scale];
+ if (sc->oldest_partial != NULL) {
+ sc->active_page = sc->oldest_partial;
+ sc->oldest_partial = sc->oldest_partial->next;
+ sc->oldest_partial->prev = NULL;
+ b = pop_free_list(tl, b_scale);
+ ASSERT(b != NULL);
+ TRACE("m1", "nbd_malloc: returning block %p", b, 0);
+ return b;
+ assert(b);
+ }
+ // There are no partially allocated pages so get a new page.
+
+#endif//RECYCLE_PAGES
+
+ // Get a new page.
+ char *page = get_new_region(b_scale);
+ b = (block_t *)page; // grab the first block on the page
+
+ // Break up the remainder of the page into blocks and put them on the free list. Start at the
+ // end of the page so that the free list ends up in increasing order, for ease of debugging.
+ if (b_scale < PAGE_SCALE) {
+ size_t block_size = (1ULL << b_scale);
+ block_t *head = NULL;
+ for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
+ block_t *x = (block_t *)(page + offset);
+ x->next = head; head = x;