* Written by Josh Dybnis and released to the public domain, as explained at
* http://creativecommons.org/licenses/publicdomain
*
- * Extreamly fast multi-threaded malloc. 64 bit platforms only!
+ * Extreamly fast multi-threaded malloc.
*/
+#define _BSD_SOURCE // so we get MAP_ANON on linux
#include <sys/mman.h>
#include <stdio.h>
#include <errno.h>
#include "common.h"
-#include "runtime_local.h"
+#include "rlocal.h"
#include "lwt.h"
-#define GET_SCALE(n) (sizeof(n)*8-__builtin_clzl((n)-1)) // log2 of <n>, rounded up
-#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger)
-#define REGION_SCALE 22 // 4MB regions
+#define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of <n>, rounded up
+#ifndef NBD32
+#define MAX_POINTER_BITS 48
+#define REGION_SCALE 21 // 2mb regions
+#else
+#define MAX_POINTER_BITS 32
+#define REGION_SCALE 12 // 4kb regions
+#endif
#define REGION_SIZE (1 << REGION_SCALE)
-#define HEADER_REGION_SCALE 22 // 4MB is space enough for headers for over 2,000,000 regions
+#define HEADER_REGION_SCALE ((MAX_POINTER_BITS - REGION_SCALE) + GET_SCALE(sizeof(header_t)))
+#define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger)
typedef struct block {
struct block *next;
uint8_t scale; // log2 of the block size
} header_t;
-static header_t *region_header_ = NULL;
+typedef struct tl {
+ block_t *free_blocks[MAX_SCALE+1];
+ block_t *blocks_from[MAX_NUM_THREADS];
+ block_t *blocks_to[MAX_NUM_THREADS];
+} __attribute__((aligned(CACHE_LINE_SIZE))) tl_t ;
+
+static header_t *headers_ = NULL;
-// TODO: experiment with different memory layouts (i.e. separate private and public lists)
-static block_t free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS];
+static tl_t tl_[MAX_NUM_THREADS] = {};
-static void *get_new_region (int scale) {
- if (scale < REGION_SCALE) {
- scale = REGION_SCALE;
+static inline header_t *get_header (void *r) {
+ return headers_ + ((size_t)r >> REGION_SCALE);
+}
+
+static void *get_new_region (int block_scale) {
+ size_t sz = (1 << block_scale);
+ if (sz < REGION_SIZE) {
+ sz = REGION_SIZE;
}
- TRACE("m0", "get_new_region(): mmap new region scale: %llu", scale, 0);
- void *region = mmap(NULL, (1 << scale), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, sz);
if (region == (void *)-1) {
perror("get_new_region: mmap");
exit(-1);
}
+ if ((size_t)region & (sz - 1)) {
+ TRACE("m0", "get_new_region: region not aligned", 0, 0);
+ munmap(region, sz);
+ region = mmap(NULL, sz * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if (region == (void *)-1) {
+ perror("get_new_region: mmap");
+ exit(-1);
+ }
+ TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, sz * 2);
+ void *aligned = (void *)(((size_t)region + sz) & ~(sz - 1));
+ size_t extra = (char *)aligned - (char *)region;
+ if (extra) {
+ munmap(region, extra);
+ TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra);
+ }
+ extra = ((char *)region + sz) - (char *)aligned;
+ if (extra) {
+ munmap((char *)aligned + sz, extra);
+ TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + sz, extra);
+ }
+ region = aligned;
+ }
assert(region);
+ if (headers_ != NULL) {
+ LOCALIZE_THREAD_LOCAL(tid_, int);
+ header_t *h = get_header(region);
+ TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
+
+ assert(h->scale == 0);
+ h->scale = block_scale;
+ h->owner = tid_;
+ }
+
return region;
}
-void mem_init (void) {
- assert(region_header_ == NULL);
- region_header_ = (header_t *)get_new_region(HEADER_REGION_SCALE);
- memset(region_header_, 0, REGION_SIZE);
+__attribute__ ((constructor(101))) void mem_init (void) {
+#ifdef USE_SYSTEM_MALLOC
+ return;
+#endif
+ assert(headers_ == NULL);
+ // Allocate a region for the region headers. This could be a big chunk of memory (256MB) on 64 bit systems,
+ // but it just takes up virtual address space. Physical address space used by the headers is still proportional
+ // to the amount of memory we alloc.
+ headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE);
+ TRACE("m1", "mem_init: header region %p", headers_, 0);
+ memset(headers_, 0, (1 << HEADER_REGION_SCALE));
}
// Put <x> onto its owner's public free list (in the appropriate size bin).
//
-// TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space?
+// TODO: maybe we want to munmap() larger size blocks?
void nbd_free (void *x) {
+#ifdef USE_SYSTEM_MALLOC
+ TRACE("m1", "nbd_free: %p", x, 0);
+#ifndef NDEBUG
+ //memset(x, 0xcd, sizeof(void *)); // bear trap
+#endif//NDEBUG
+ free(x);
+ return;
+#endif//USE_SYSTEM_MALLOC
+ TRACE("m1", "nbd_free: block %p region %p", x, (size_t)x & ~MASK(REGION_SCALE));
+
+ assert(x);
LOCALIZE_THREAD_LOCAL(tid_, int);
block_t *b = (block_t *)x;
- assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t)));
- header_t *h = region_header_ + ((size_t)b >> REGION_SCALE);
- TRACE("m0", "nbd_free(): block %p scale %llu", x, h->scale);
- block_t *l = &free_list_[h->owner][h->scale][tid_];
- TRACE("m0", "nbd_free(): free list %p first block %p", l, l->next);
- b->next = l->next;
- l->next = b;
+ header_t *h = get_header(x);
+ TRACE("m1", "nbd_free: header %p scale %llu", h, h->scale);
+ assert(h->scale && h->scale <= MAX_SCALE);
+#ifndef NDEBUG
+ memset(b, 0xcd, (1 << h->scale)); // bear trap
+#endif
+ tl_t *tl = &tl_[tid_]; // thread-local data
+ if (h->owner == tid_) {
+ TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_blocks[h->scale], 0);
+ b->next = tl->free_blocks[h->scale];
+ tl->free_blocks[h->scale] = b;
+ } else {
+ TRACE("m1", "nbd_free: owner %llu", h->owner, 0);
+ // push <b> onto it's owner's queue
+ VOLATILE(b->next) = NULL;
+ if (EXPECT_FALSE(tl->blocks_to[h->owner] == NULL)) {
+ VOLATILE(tl_[h->owner].blocks_from[tid_]) = b;
+ } else {
+ VOLATILE(tl->blocks_to[h->owner]->next) = b;
+ }
+ tl->blocks_to[h->owner] = b;
+ }
}
// Allocate a block of memory at least size <n>. Blocks are binned in powers-of-two. Round up
// <n> to the nearest power-of-two.
//
// First check the current thread's private free list for an available block. If no blocks are on
-// the private free list, pull all the available blocks off of the current thread's public free
-// lists and put them on the private free list. If we didn't find any blocks on the public free
-// lists, open a new region, break it up into blocks and put them on the private free list.
+// the private free list, pull blocks off of the current thread's public free lists and put them
+// on the private free list. If we didn't find any blocks on the public free lists, allocate a new
+// region, break it up into blocks and put them on the private free list.
void *nbd_malloc (size_t n) {
- LOCALIZE_THREAD_LOCAL(tid_, int);
+#ifdef USE_SYSTEM_MALLOC
+ TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, GET_SCALE(n));
+ void *x = malloc(n);
+ TRACE("m1", "nbd_malloc: returning %p", x, 0);
+ return x;
+#endif
+ if (EXPECT_FALSE(n == 0))
+ return NULL;
if (n < sizeof(block_t)) {
n = sizeof(block_t);
}
int b_scale = GET_SCALE(n);
+ assert(b_scale >= 2);
assert(b_scale <= MAX_SCALE);
- TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale);
- block_t *fls = free_list_[tid_][b_scale]; // our free lists
- block_t *pri = fls + tid_; // our private free list
- TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri, pri->next);
-
- // If our private free list is empty, fill it up with blocks from our public free lists
- if (EXPECT_FALSE(pri->next == NULL)) {
- int cnt = 0;
- block_t *last = pri;
- for (int i = 0; i < MAX_NUM_THREADS; ++i) {
- TRACE("m0", "nbd_malloc(): searching public free lists (%llu)", i, 0);
- block_t *pub = fls + i; // one of our public free lists
- TRACE("m0", "nbd_malloc(): public free list %p first block %p", pub, pub->next);
- if (EXPECT_FALSE(pub == pri))
- continue;
-
- if (pub->next != NULL) {
- block_t *stolen = SYNC_SWAP(&pub->next, NULL);
- TRACE("m0", "nbd_malloc(): stole list %p first block %p", pub, stolen);
- if (stolen) {
- last->next = stolen;
- TRACE("m0", "nbd_malloc(): append to last block %p of private free list", last, 0);
- while (last->next) {
- ++cnt;
- TRACE("m0", "nbd_malloc(): find last block in list: last %p last->next %p",
- last, last->next);
- last = last->next;
- }
+ TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale);
+ LOCALIZE_THREAD_LOCAL(tid_, int);
+ tl_t *tl = &tl_[tid_]; // thread-local data
+
+ // If our private free list is empty, try to find blocks on our public free list. If that fails,
+ // allocate a new region.
+ if (EXPECT_FALSE(tl->free_blocks[b_scale] == NULL)) {
+ for (int i = 0; i < MAX_NUM_THREADS; ++ i) {
+ block_t *x = tl->blocks_from[i];
+ if (x != NULL) {
+ block_t *next = x->next;
+ if (next != NULL) {
+ do {
+ header_t *h = get_header(x);
+ x->next = tl->free_blocks[h->scale];
+ tl->free_blocks[h->scale] = x;
+ x = next;
+ next = x->next;
+ } while (next != NULL);
+ tl->blocks_from[i] = x;
}
}
}
- TRACE("m0", "nbd_malloc(): moved %llu blocks from public to private free lists", cnt, 0);
-
- if (b_scale >= REGION_SCALE) {
- if (cnt == 0) {
- assert(pri->next == NULL);
- pri->next = (block_t *)get_new_region(b_scale);
- assert(pri->next->next == NULL);
- }
- assert(pri->next);
-
- } else if (cnt < (1 << (REGION_SCALE - b_scale - 1))) {
-
- // Even if we took a few blocks from our public lists we still break open a new region.
- // This guarentees that we are amortizing the cost of accessing our public lists accross
- // many nbd_malloc() calls.
- char *region = get_new_region(b_scale);
+ // allocate a new region
+ if (tl->free_blocks[b_scale] == NULL) {
+ char *region = get_new_region(b_scale);
size_t b_size = 1 << b_scale;
- for (int i = REGION_SIZE; i != 0; i -= b_size) {
+ size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size;
+ for (int i = region_size; i != 0; i -= b_size) {
block_t *b = (block_t *)(region + i - b_size);
- b->next = pri->next;
- //TRACE("m1", "nbd_malloc(): put new block %p ahead of %p on private list", b, b->next);
- pri->next = b;
- *b = *b;
+ b->next = tl->free_blocks[b_scale];
+ tl->free_blocks[b_scale] = b;
}
}
-
- assert(pri->next);
+ assert(tl->free_blocks[b_scale] != NULL);
}
// Pull a block off of our private free list.
- block_t *b = pri->next;
- TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next);
- pri->next = b->next;
-
- assert(b);
+ block_t *b = tl->free_blocks[b_scale];
+ TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE));
+ ASSERT(b);
+ ASSERT(get_header(b)->scale == b_scale);
+ tl->free_blocks[b_scale] = b->next;
return b;
}