#include "rlocal.h"
#include "lwt.h"
-#define RECYCLE_PAGES
-
-#define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger)
#ifndef NBD32
+#define MAX_SCALE 36 // allocate blocks up to 64GB (arbitrary, could be bigger)
#define MIN_SCALE 3 // smallest allocated block is 8 bytes
#define MAX_POINTER_BITS 48
#define PAGE_SCALE 21 // 2MB pages
#else
+#define MAX_SCALE 31
#define MIN_SCALE 2 // smallest allocated block is 4 bytes
#define MAX_POINTER_BITS 32
#define PAGE_SCALE 12 // 4KB pages
#endif
-#define PAGE_SIZE (1 << PAGE_SCALE)
-#define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
+#define PAGE_SIZE (1ULL << PAGE_SCALE)
+#define HEADERS_SIZE (((size_t)1ULL << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
typedef struct block {
struct block *next;
return region;
}
#endif//RECYCLE_PAGES
- size_t region_size = (1 << block_scale);
+ size_t region_size = (1ULL << block_scale);
if (region_size < PAGE_SIZE) {
region_size = PAGE_SIZE;
}
- void *region = mmap(NULL, region_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ void *region = mmap(NULL, region_size, PROT_READ|PROT_WRITE, MAP_NORESERVE|MAP_ANON|MAP_PRIVATE, -1, 0);
TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, region_size);
if (region == (void *)-1) {
perror("get_new_region: mmap");
// Allocate space for the page headers. This could be a big chunk of memory on 64 bit systems,
// but it just takes up virtual address space. Physical space used by the headers is still
// proportional to the amount of memory the user mallocs.
- headers_ = (header_t *)malloc(HEADERS_SIZE);
+ headers_ = mmap(NULL, HEADERS_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
TRACE("m1", "mem_init: header page %p", headers_, 0);
- memset(headers_, 0, HEADERS_SIZE);
// initialize spsc queues
for (int i = 0; i < MAX_NUM_THREADS; ++i) {
ASSERT(b_scale && b_scale <= MAX_SCALE);
#ifdef RECYCLE_PAGES
if (b_scale > PAGE_SCALE) {
- int rc = munmap(x, 1 << b_scale);
+ int rc = munmap(x, 1ULL << b_scale);
ASSERT(rc == 0);
rc = rc;
}
#endif
#ifndef NDEBUG
- memset(b, 0xcd, (1 << b_scale)); // bear trap
+ memset(b, 0xcd, (1ULL << b_scale)); // bear trap
#endif
tl_t *tl = &tl_[tid_]; // thread-local data
if (h->owner == tid_) {
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// The free list is empty so process blocks freed from other threads and then check again.
if (b != NULL) {
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
#ifdef RECYCLE_PAGES
ASSERT(b != NULL);
TRACE("m1", "nbd_malloc: returning block %p", b, 0);
return b;
+ assert(b);
}
// There are no partially allocated pages so get a new page.
// Break up the remainder of the page into blocks and put them on the free list. Start at the
// end of the page so that the free list ends up in increasing order, for ease of debugging.
if (b_scale < PAGE_SCALE) {
- size_t block_size = (1 << b_scale);
+ size_t block_size = (1ULL << b_scale);
block_t *head = NULL;
for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
block_t *x = (block_t *)(page + offset);
}
TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE));
+ assert(b);
return b;
}
#else//USE_SYSTEM_MALLOC
#include <stdlib.h>
+#include "common.h"
+#include "rlocal.h"
+#include "lwt.h"
void mem_init (void) {
return;
}
-void ndb_free (void *x) {
+void nbd_free (void *x) {
TRACE("m1", "nbd_free: %p", x, 0);
#ifndef NDEBUG
memset(x, 0xcd, sizeof(void *)); // bear trap