2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Extreamly fast multi-threaded malloc. 64 bit platforms only!
7 #define _BSD_SOURCE // so we get MAP_ANON on linux
15 #define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of <n>, rounded up
16 #define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger)
17 #define REGION_SCALE 22 // 4MB regions
18 #define REGION_SIZE (1 << REGION_SCALE)
19 #define HEADER_REGION_SCALE 22 // 4MB is space enough for headers for over 2,000,000 regions
20 #define HEADER_REGION_SIZE (1 << HEADER_REGION_SCALE)
21 #define HEADER_COUNT (HEADER_REGION_SIZE / sizeof(header_t))
23 typedef struct block {
28 typedef struct header {
29 uint8_t owner; // thread id of owner
30 uint8_t scale; // log2 of the block size
33 typedef struct private_list {
39 static header_t *headers_ = NULL;
41 static block_t *pub_free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS] = {};
42 static private_list_t pri_free_list_[MAX_NUM_THREADS][MAX_SCALE+1] = {};
44 static inline header_t *get_header (void *r) {
45 return headers_ + (((size_t)r >> REGION_SCALE) & (HEADER_COUNT - 1));
48 static void *get_new_region (int block_scale) {
49 size_t sz = (1 << block_scale);
50 if (sz < REGION_SIZE) {
53 void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
54 TRACE("m1", "get_new_region: mmap new region %p (size %p)", region, sz);
55 if (region == (void *)-1) {
56 perror("get_new_region: mmap");
60 if (headers_ != NULL) {
61 LOCALIZE_THREAD_LOCAL(tid_, int);
62 header_t *h = get_header(region);
63 TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
65 assert(h->scale == 0);
66 h->scale = block_scale;
73 void mem_init (void) {
74 #ifdef USE_SYSTEM_MALLOC
77 assert(headers_ == NULL);
78 headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE);
79 TRACE("m1", "mem_init: header region %p", headers_, 0);
80 memset(headers_, 0, HEADER_REGION_SIZE);
83 // Put <x> onto its owner's public free list (in the appropriate size bin).
85 // TODO: maybe we want to munmap() larger size blocks to reclaim virtual address space?
86 void nbd_free (void *x) {
87 #ifdef USE_SYSTEM_MALLOC
88 TRACE("m1", "nbd_free: %p", x, 0);
90 memset(x, 0xcd, sizeof(void *)); // bear trap
94 #endif//USE_SYSTEM_MALLOC
95 TRACE("m1", "nbd_free: block %p region %p", x, (size_t)x & ~MASK(REGION_SCALE));
98 LOCALIZE_THREAD_LOCAL(tid_, int);
99 block_t *b = (block_t *)x;
100 header_t *h = get_header(x);
101 TRACE("m1", "nbd_free: header %p scale %llu", h, h->scale);
102 assert(h->scale && h->scale <= MAX_SCALE);
104 memset(b, 0xcd, (1 << h->scale)); // bear trap
106 if (h->owner == tid_) {
107 TRACE("m1", "nbd_free: private block, old free list head %p", pri_free_list_[tid_][h->scale].head, 0);
108 b->next = pri_free_list_[tid_][h->scale].head;
109 pri_free_list_[tid_][h->scale].head = b;
111 TRACE("m1", "nbd_free: owner %llu free list head %p", h->owner, pub_free_list_[h->owner][h->scale][tid_]);
113 b->next = pub_free_list_[h->owner][h->scale][tid_];
114 } while (SYNC_CAS(&pub_free_list_[h->owner][h->scale][tid_], b->next, b) != b->next);
118 // Allocate a block of memory at least size <n>. Blocks are binned in powers-of-two. Round up
119 // <n> to the nearest power-of-two.
121 // First check the current thread's private free list for an available block. If no blocks are on
122 // the private free list, pull blocks off of the current thread's public free lists and put them
123 // on the private free list. If we didn't find any blocks on the public free lists, allocate a new
124 // region, break it up into blocks and put them on the private free list.
125 void *nbd_malloc (size_t n) {
126 #ifdef USE_SYSTEM_MALLOC
127 TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, GET_SCALE(n));
129 TRACE("m1", "nbd_malloc: returning %p", x, 0);
132 if (EXPECT_FALSE(n == 0))
134 if (n < sizeof(block_t)) {
137 int b_scale = GET_SCALE(n);
138 assert(b_scale >= 2);
139 assert(b_scale <= MAX_SCALE);
140 TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale);
141 LOCALIZE_THREAD_LOCAL(tid_, int);
142 private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list
144 // If our private free list is empty, try to find blocks on our public free list. If that fails,
145 // allocate a new region.
146 if (EXPECT_FALSE(pri->head == NULL)) {
147 block_t **pubs = pub_free_list_[tid_][b_scale]; // our public free lists
149 // look for blocks on our public free lists round robin
150 pri->next_pub = (pri->next_pub+1) & (MAX_NUM_THREADS-1);
152 TRACE("m1", "nbd_malloc: searching public free list %llu", pri->next_pub, 0);
153 if (pri->next_pub == tid_) {
154 uint32_t count = pri->count;
156 // If we haven't gotten at least half a region's worth of block's from our public lists
157 // we allocate a new region. This guarentees that we amortize the cost of accessing our
158 // public lists accross enough nbd_malloc() calls.
159 uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1;
160 if (count < min_count) {
161 char *region = get_new_region(b_scale);
162 size_t b_size = 1 << b_scale;
163 size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size;
164 for (int i = region_size; i != 0; i -= b_size) {
165 block_t *b = (block_t *)(region + i - b_size);
172 } else if (pubs[pri->next_pub] != NULL) {
173 block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL);
174 TRACE("m1", "nbd_malloc: stole list %p", stolen, 0);
184 // Pull a block off of our private free list.
185 block_t *b = pri->head;
186 TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE));
188 ASSERT(get_header(b)->scale == b_scale);