#include "rlocal.h"
#include "lwt.h"
-#define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger)
#ifndef NBD32
+#define MAX_SCALE 36 // allocate blocks up to 64GB (arbitrary, could be bigger)
#define MIN_SCALE 3 // smallest allocated block is 8 bytes
#define MAX_POINTER_BITS 48
#define PAGE_SCALE 21 // 2MB pages
#else
+#define MAX_SCALE 31
#define MIN_SCALE 2 // smallest allocated block is 4 bytes
#define MAX_POINTER_BITS 32
#define PAGE_SCALE 12 // 4KB pages
#endif
-#define PAGE_SIZE (1 << PAGE_SCALE)
-#define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
+#define PAGE_SIZE (1ULL << PAGE_SCALE)
+#define HEADERS_SIZE (((size_t)1ULL << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
typedef struct block {
struct block *next;
}
static void *get_new_region (int block_scale) {
- LOCALIZE_THREAD_LOCAL(tid_, int);
+ int thread_index = GET_THREAD_INDEX();
#ifdef RECYCLE_PAGES
- tl_t *tl = &tl_[tid_]; // thread-local data
+ tl_t *tl = &tl_[thread_index]; // thread-local data
if (block_scale <= PAGE_SCALE && tl->free_pages != NULL) {
void *region = tl->free_pages;
tl->free_pages = tl->free_pages->next;
return region;
}
#endif//RECYCLE_PAGES
- size_t region_size = (1 << block_scale);
+ size_t region_size = (1ULL << block_scale);
if (region_size < PAGE_SIZE) {
region_size = PAGE_SIZE;
}
if ((size_t)region & (region_size - 1)) {
TRACE("m0", "get_new_region: region not aligned", 0, 0);
munmap(region, region_size);
- region = mmap(NULL, region_size * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ region = mmap(NULL, region_size * 2, PROT_READ|PROT_WRITE, MAP_NORESERVE|MAP_ANON|MAP_PRIVATE, -1, 0);
if (region == (void *)-1) {
perror("get_new_region: mmap");
exit(-1);
TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
assert(h->scale == 0);
h->scale = block_scale;
- h->owner = tid_;
+ h->owner = thread_index;
return region;
}
void nbd_free (void *x) {
TRACE("m1", "nbd_free: block %p page %p", x, (size_t)x & ~MASK(PAGE_SCALE));
ASSERT(x);
- LOCALIZE_THREAD_LOCAL(tid_, int);
block_t *b = (block_t *)x;
header_t *h = get_header(x);
int b_scale = h->scale;
ASSERT(b_scale && b_scale <= MAX_SCALE);
#ifdef RECYCLE_PAGES
if (b_scale > PAGE_SCALE) {
- int rc = munmap(x, 1 << b_scale);
+ int rc = munmap(x, 1ULL << b_scale);
ASSERT(rc == 0);
rc = rc;
}
#endif
#ifndef NDEBUG
- memset(b, 0xcd, (1 << b_scale)); // bear trap
+ memset(b, 0xcd, (1ULL << b_scale)); // bear trap
#endif
- tl_t *tl = &tl_[tid_]; // thread-local data
- if (h->owner == tid_) {
+ int thread_index = GET_THREAD_INDEX();
+ tl_t *tl = &tl_[thread_index]; // thread-local data
+ if (h->owner == thread_index) {
TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_list[b_scale], 0);
#ifndef RECYCLE_PAGES
if (EXPECT_FALSE(b_scale < MIN_SCALE)) { b_scale = MIN_SCALE; }
if (EXPECT_FALSE(b_scale > MAX_SCALE)) { return NULL; }
- LOCALIZE_THREAD_LOCAL(tid_, int);
- tl_t *tl = &tl_[tid_]; // thread-local data
+ tl_t *tl = &tl_[GET_THREAD_INDEX()]; // thread-local data
block_t *b = pop_free_list(tl, b_scale);
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// The free list is empty so process blocks freed from other threads and then check again.
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
#ifdef RECYCLE_PAGES
ASSERT(b != NULL);
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// There are no partially allocated pages so get a new page.
// Break up the remainder of the page into blocks and put them on the free list. Start at the
// end of the page so that the free list ends up in increasing order, for ease of debugging.
if (b_scale < PAGE_SCALE) {
- size_t block_size = (1 << b_scale);
+ size_t block_size = (1ULL << b_scale);
block_t *head = NULL;
for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
block_t *x = (block_t *)(page + offset);
}
TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE));
+ assert(b);
return b;
}
#else//USE_SYSTEM_MALLOC
#include <stdlib.h>
+#include "common.h"
+#include "rlocal.h"
+#include "lwt.h"
void mem_init (void) {
return;
}
-void ndb_free (void *x) {
+void nbd_free (void *x) {
TRACE("m1", "nbd_free: %p", x, 0);
#ifndef NDEBUG
memset(x, 0xcd, sizeof(void *)); // bear trap