2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Extreamly fast multi-threaded malloc.
7 #define _BSD_SOURCE // so we get MAP_ANON on linux
15 #define GET_SCALE(n) (sizeof(void *)*__CHAR_BIT__ - __builtin_clzl((n) - 1)) // log2 of <n>, rounded up
17 #define MAX_POINTER_BITS 48
18 #define REGION_SCALE 21 // 2mb regions
20 #define MAX_POINTER_BITS 32
21 #define REGION_SCALE 12 // 4kb regions
23 #define REGION_SIZE (1 << REGION_SCALE)
24 #define HEADER_REGION_SCALE ((MAX_POINTER_BITS - REGION_SCALE) + GET_SCALE(sizeof(header_t)))
25 #define MAX_SCALE 31 // allocate blocks up to 4GB in size (arbitrary, could be bigger)
27 typedef struct block {
32 typedef struct header {
33 uint8_t owner; // thread id of owner
34 uint8_t scale; // log2 of the block size
38 block_t *free_blocks[MAX_SCALE+1];
39 block_t *blocks_from[MAX_NUM_THREADS];
40 block_t *blocks_to[MAX_NUM_THREADS];
41 } __attribute__((aligned(CACHE_LINE_SIZE))) tl_t ;
43 static header_t *headers_ = NULL;
45 static tl_t tl_[MAX_NUM_THREADS] = {};
47 static inline header_t *get_header (void *r) {
48 return headers_ + ((size_t)r >> REGION_SCALE);
51 static void *get_new_region (int block_scale) {
52 size_t sz = (1 << block_scale);
53 if (sz < REGION_SIZE) {
56 void *region = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
57 TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, sz);
58 if (region == (void *)-1) {
59 perror("get_new_region: mmap");
62 if ((size_t)region & (sz - 1)) {
63 TRACE("m0", "get_new_region: region not aligned", 0, 0);
65 region = mmap(NULL, sz * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
66 if (region == (void *)-1) {
67 perror("get_new_region: mmap");
70 TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, sz * 2);
71 void *aligned = (void *)(((size_t)region + sz) & ~(sz - 1));
72 size_t extra = (char *)aligned - (char *)region;
74 munmap(region, extra);
75 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra);
77 extra = ((char *)region + sz) - (char *)aligned;
79 munmap((char *)aligned + sz, extra);
80 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + sz, extra);
85 if (headers_ != NULL) {
86 LOCALIZE_THREAD_LOCAL(tid_, int);
87 header_t *h = get_header(region);
88 TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
90 assert(h->scale == 0);
91 h->scale = block_scale;
98 __attribute__ ((constructor(101))) void mem_init (void) {
99 #ifdef USE_SYSTEM_MALLOC
102 assert(headers_ == NULL);
103 // Allocate a region for the region headers. This could be a big chunk of memory (256MB) on 64 bit systems,
104 // but it just takes up virtual address space. Physical address space used by the headers is still proportional
105 // to the amount of memory we alloc.
106 headers_ = (header_t *)get_new_region(HEADER_REGION_SCALE);
107 TRACE("m1", "mem_init: header region %p", headers_, 0);
108 memset(headers_, 0, (1 << HEADER_REGION_SCALE));
111 // Put <x> onto its owner's public free list (in the appropriate size bin).
113 // TODO: maybe we want to munmap() larger size blocks?
114 void nbd_free (void *x) {
115 #ifdef USE_SYSTEM_MALLOC
116 TRACE("m1", "nbd_free: %p", x, 0);
118 //memset(x, 0xcd, sizeof(void *)); // bear trap
122 #endif//USE_SYSTEM_MALLOC
123 TRACE("m1", "nbd_free: block %p region %p", x, (size_t)x & ~MASK(REGION_SCALE));
126 LOCALIZE_THREAD_LOCAL(tid_, int);
127 block_t *b = (block_t *)x;
128 header_t *h = get_header(x);
129 TRACE("m1", "nbd_free: header %p scale %llu", h, h->scale);
130 assert(h->scale && h->scale <= MAX_SCALE);
132 memset(b, 0xcd, (1 << h->scale)); // bear trap
134 tl_t *tl = &tl_[tid_]; // thread-local data
135 if (h->owner == tid_) {
136 TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_blocks[h->scale], 0);
137 b->next = tl->free_blocks[h->scale];
138 tl->free_blocks[h->scale] = b;
140 TRACE("m1", "nbd_free: owner %llu", h->owner, 0);
141 // push <b> onto it's owner's queue
142 VOLATILE(b->next) = NULL;
143 if (EXPECT_FALSE(tl->blocks_to[h->owner] == NULL)) {
144 VOLATILE(tl_[h->owner].blocks_from[tid_]) = b;
146 VOLATILE(tl->blocks_to[h->owner]->next) = b;
148 tl->blocks_to[h->owner] = b;
152 // Allocate a block of memory at least size <n>. Blocks are binned in powers-of-two. Round up
153 // <n> to the nearest power-of-two.
155 // First check the current thread's private free list for an available block. If no blocks are on
156 // the private free list, pull blocks off of the current thread's public free lists and put them
157 // on the private free list. If we didn't find any blocks on the public free lists, allocate a new
158 // region, break it up into blocks and put them on the private free list.
159 void *nbd_malloc (size_t n) {
160 #ifdef USE_SYSTEM_MALLOC
161 TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, GET_SCALE(n));
163 TRACE("m1", "nbd_malloc: returning %p", x, 0);
166 if (EXPECT_FALSE(n == 0))
168 if (n < sizeof(block_t)) {
171 int b_scale = GET_SCALE(n);
172 assert(b_scale >= 2);
173 assert(b_scale <= MAX_SCALE);
174 TRACE("m1", "nbd_malloc: request size %llu (scale %llu)", n, b_scale);
175 LOCALIZE_THREAD_LOCAL(tid_, int);
176 tl_t *tl = &tl_[tid_]; // thread-local data
178 // If our private free list is empty, try to find blocks on our public free list. If that fails,
179 // allocate a new region.
180 if (EXPECT_FALSE(tl->free_blocks[b_scale] == NULL)) {
181 for (int i = 0; i < MAX_NUM_THREADS; ++ i) {
182 block_t *x = tl->blocks_from[i];
184 block_t *next = x->next;
187 header_t *h = get_header(x);
188 x->next = tl->free_blocks[h->scale];
189 tl->free_blocks[h->scale] = x;
192 } while (next != NULL);
193 tl->blocks_from[i] = x;
197 // allocate a new region
198 if (tl->free_blocks[b_scale] == NULL) {
199 char *region = get_new_region(b_scale);
200 size_t b_size = 1 << b_scale;
201 size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size;
202 for (int i = region_size; i != 0; i -= b_size) {
203 block_t *b = (block_t *)(region + i - b_size);
204 b->next = tl->free_blocks[b_scale];
205 tl->free_blocks[b_scale] = b;
208 assert(tl->free_blocks[b_scale] != NULL);
211 // Pull a block off of our private free list.
212 block_t *b = tl->free_blocks[b_scale];
213 TRACE("m1", "nbd_malloc: returning block %p (region %p) from private list", b, (size_t)b & ~MASK(REGION_SCALE));
215 ASSERT(get_header(b)->scale == b_scale);
216 tl->free_blocks[b_scale] = b->next;