- assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t)));
- header_t *h = region_header_ + ((size_t)b >> REGION_SCALE);
- TRACE("m0", "nbd_free(): block %p scale %llu", x, h->scale);
- block_t *l = &free_list_[(int)h->owner][(int)h->scale][tid_];
- TRACE("m0", "nbd_free(): free list %p first block %p", l, l->next);
- b->next = l->next;
- l->next = b;
+ header_t *h = get_header(x);
+ int b_scale = h->scale;
+ TRACE("m1", "nbd_free: header %p scale %llu", h, b_scale);
+ ASSERT(b_scale && b_scale <= MAX_SCALE);
+#ifdef RECYCLE_PAGES
+ if (b_scale > PAGE_SCALE) {
+ int rc = munmap(x, 1 << b_scale);
+ ASSERT(rc == 0);
+ rc = rc;
+ }
+#endif
+#ifndef NDEBUG
+ memset(b, 0xcd, (1 << b_scale)); // bear trap
+#endif
+ tl_t *tl = &tl_[tid_]; // thread-local data
+ if (h->owner == tid_) {
+ TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_list[b_scale], 0);
+
+#ifndef RECYCLE_PAGES
+ b->next = tl->free_list[b_scale];
+ tl->free_list[b_scale] = b;
+#else //RECYCLE_PAGES
+ b->next = h->free_list;
+ h->free_list = b;
+ h->num_in_use--;
+ size_class_t *sc = &tl->size_class[b_scale];
+ if (sc->active_page != h) {
+ if (h->num_in_use == 0) {
+ // remove <h> from the partial-page list
+ if (h->next != NULL) { h->next->prev = h->prev; }
+ if (h->prev != NULL) { h->prev->next = h->next; }
+ // put <h> on the free-page list
+ h->next = tl->free_pages;
+ tl->free_pages = h;
+ } else {
+ // move <h> to the top of the partial-page list
+ if (h->next != NULL) {
+ h->next->prev = h->prev;
+ if (h->prev != NULL) { h->prev->next = h->next; }
+ h->prev = sc->newest_partial;
+ h->next = NULL;
+ sc->newest_partial = h;
+ }
+ }
+ }
+#endif//RECYCLE_PAGES
+ } else {
+ // push <b> onto it's owner's queue
+ int b_owner = h->owner;
+ TRACE("m1", "nbd_free: owner %llu", b_owner, 0);
+
+ // The assignment statements are volatile to prevent the compiler from reordering them.
+ VOLATILE_DEREF(b).next = NULL;
+ VOLATILE_DEREF(tl->blocks_to[b_owner]).next = b;
+
+ tl->blocks_to[b_owner] = b;
+ }