2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Extreamly fast multi-threaded malloc.
7 #ifndef USE_SYSTEM_MALLOC
8 #define _BSD_SOURCE // so we get MAP_ANON on linux
17 #define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger)
19 #define MIN_SCALE 3 // smallest allocated block is 8 bytes
20 #define MAX_POINTER_BITS 48
21 #define PAGE_SCALE 21 // 2MB pages
23 #define MIN_SCALE 2 // smallest allocated block is 4 bytes
24 #define MAX_POINTER_BITS 32
25 #define PAGE_SCALE 12 // 4KB pages
27 #define PAGE_SIZE (1 << PAGE_SCALE)
28 #define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
30 typedef struct block {
34 // TODO: Break the page header into two parts. The first part is located in the header region. The
35 // second part is located on the page and is only used when there are free items.
36 typedef struct header {
40 block_t *free_list; // list of free blocks
43 uint8_t owner; // thread id of owner
44 uint8_t scale; // log2 of the block size
48 typedef struct size_class {
49 header_t *active_page;
50 header_t *oldest_partial;
51 header_t *newest_partial;
57 block_t *free_list[MAX_SCALE+1];
60 size_class_t size_class[MAX_SCALE+1];
62 block_t *blocks_from[MAX_NUM_THREADS];
63 block_t *blocks_to[MAX_NUM_THREADS];
64 } __attribute__((aligned(CACHE_LINE_SIZE))) tl_t;
66 static header_t *headers_ = NULL;
68 static tl_t tl_[MAX_NUM_THREADS] = {};
70 static inline header_t *get_header (void *r) {
71 ASSERT(((size_t)r >> PAGE_SCALE) < HEADERS_SIZE);
72 return headers_ + ((size_t)r >> PAGE_SCALE);
75 static void *get_new_region (int block_scale) {
76 LOCALIZE_THREAD_LOCAL(tid_, int);
78 tl_t *tl = &tl_[tid_]; // thread-local data
79 if (block_scale <= PAGE_SCALE && tl->free_pages != NULL) {
80 void *region = tl->free_pages;
81 tl->free_pages = tl->free_pages->next;
82 get_header(region)->scale = block_scale;
86 size_t region_size = (1 << block_scale);
87 if (region_size < PAGE_SIZE) {
88 region_size = PAGE_SIZE;
90 void *region = mmap(NULL, region_size, PROT_READ|PROT_WRITE, MAP_NORESERVE|MAP_ANON|MAP_PRIVATE, -1, 0);
91 TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, region_size);
92 if (region == (void *)-1) {
93 perror("get_new_region: mmap");
96 if ((size_t)region & (region_size - 1)) {
97 TRACE("m0", "get_new_region: region not aligned", 0, 0);
98 munmap(region, region_size);
99 region = mmap(NULL, region_size * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
100 if (region == (void *)-1) {
101 perror("get_new_region: mmap");
104 TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, region_size * 2);
105 void *aligned = (void *)(((size_t)region + region_size) & ~(region_size - 1));
106 size_t extra = (char *)aligned - (char *)region;
108 munmap(region, extra);
109 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra);
111 extra = ((char *)region + region_size) - (char *)aligned;
113 munmap((char *)aligned + region_size, extra);
114 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + region_size, extra);
120 header_t *h = get_header(region);
121 TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
122 assert(h->scale == 0);
123 h->scale = block_scale;
129 void mem_init (void) {
130 assert(headers_ == NULL);
131 // Allocate space for the page headers. This could be a big chunk of memory on 64 bit systems,
132 // but it just takes up virtual address space. Physical space used by the headers is still
133 // proportional to the amount of memory the user mallocs.
134 headers_ = mmap(NULL, HEADERS_SIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
135 TRACE("m1", "mem_init: header page %p", headers_, 0);
137 // initialize spsc queues
138 for (int i = 0; i < MAX_NUM_THREADS; ++i) {
139 for (int j = 0; j < MAX_NUM_THREADS; ++j) {
141 tl_[i].blocks_to[j] = (block_t *)&(tl_[j].blocks_from[i]);
147 void nbd_free (void *x) {
148 TRACE("m1", "nbd_free: block %p page %p", x, (size_t)x & ~MASK(PAGE_SCALE));
150 LOCALIZE_THREAD_LOCAL(tid_, int);
151 block_t *b = (block_t *)x;
152 header_t *h = get_header(x);
153 int b_scale = h->scale;
154 TRACE("m1", "nbd_free: header %p scale %llu", h, b_scale);
155 ASSERT(b_scale && b_scale <= MAX_SCALE);
157 if (b_scale > PAGE_SCALE) {
158 int rc = munmap(x, 1 << b_scale);
164 memset(b, 0xcd, (1 << b_scale)); // bear trap
166 tl_t *tl = &tl_[tid_]; // thread-local data
167 if (h->owner == tid_) {
168 TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_list[b_scale], 0);
170 #ifndef RECYCLE_PAGES
171 b->next = tl->free_list[b_scale];
172 tl->free_list[b_scale] = b;
173 #else //RECYCLE_PAGES
174 b->next = h->free_list;
177 size_class_t *sc = &tl->size_class[b_scale];
178 if (sc->active_page != h) {
179 if (h->num_in_use == 0) {
180 // remove <h> from the partial-page list
181 if (h->next != NULL) { h->next->prev = h->prev; }
182 if (h->prev != NULL) { h->prev->next = h->next; }
183 // put <h> on the free-page list
184 h->next = tl->free_pages;
187 // move <h> to the top of the partial-page list
188 if (h->next != NULL) {
189 h->next->prev = h->prev;
190 if (h->prev != NULL) { h->prev->next = h->next; }
191 h->prev = sc->newest_partial;
193 sc->newest_partial = h;
197 #endif//RECYCLE_PAGES
199 // push <b> onto it's owner's queue
200 int b_owner = h->owner;
201 TRACE("m1", "nbd_free: owner %llu", b_owner, 0);
203 // The assignment statements are volatile to prevent the compiler from reordering them.
204 VOLATILE_DEREF(b).next = NULL;
205 VOLATILE_DEREF(tl->blocks_to[b_owner]).next = b;
207 tl->blocks_to[b_owner] = b;
211 static inline void process_incoming_blocks (tl_t *tl) {
212 for (int p = 0; p < MAX_NUM_THREADS; ++p) {
213 block_t *b = tl->blocks_from[p];
214 if (EXPECT_FALSE(b == NULL)) continue; // the queue is completely empty
216 // Leave the last block on the queue. Removing the last block on the queue would create a
217 // race with the producer thread putting a new block on the queue.
218 for (block_t *next = b->next; next != NULL; b = next, next = b->next) {
219 // push <b> onto the appropriate free list
220 #ifndef RECYCLE_PAGES
221 int b_scale = get_header(b)->scale;
222 b->next = tl->free_list[b_scale];
223 tl->free_list[b_scale] = b;
224 #else //RECYCLE_PAGES
225 header_t *h = get_header(b);
226 b->next = h->free_list;
228 #endif//RECYCLE_PAGES
230 tl->blocks_from[p] = b;
234 static inline block_t *pop_free_list (tl_t *tl, int scale) {
235 #ifndef RECYCLE_PAGES
236 block_t **free_list = &tl->free_list[scale];
237 #else //RECYCLE_PAGES
238 size_class_t *sc = &tl->size_class[scale];
239 if (EXPECT_FALSE(sc->active_page == NULL))
241 block_t **free_list = &sc->active_page->free_list;
242 #endif//RECYCLE_PAGES
243 block_t *b = *free_list;
244 if (EXPECT_FALSE(b == NULL))
246 ASSERT(get_header(b)->scale == scale);
247 *free_list = b->next;
251 // Allocate a block of memory at least size <n>. Blocks are binned in powers-of-two. Round up <n> to
252 // the nearest power of two.
254 // First check the current thread's free list for an available block. If there are no blocks on the
255 // free list, pull items off of the current thread's incoming block queues and push them onto the
256 // free list. If we didn't get an appropriate size block off of the block queues then allocate a new
257 // page, break it up into blocks and push them onto the free list.
258 void *nbd_malloc (size_t n) {
259 // the scale is the log base 2 of <n>, rounded up
260 int b_scale = (sizeof(void *) * __CHAR_BIT__) - __builtin_clzl((n) - 1);
261 TRACE("m1", "nbd_malloc: size %llu (scale %llu)", n, b_scale);
263 if (EXPECT_FALSE(b_scale < MIN_SCALE)) { b_scale = MIN_SCALE; }
264 if (EXPECT_FALSE(b_scale > MAX_SCALE)) { return NULL; }
266 LOCALIZE_THREAD_LOCAL(tid_, int);
267 tl_t *tl = &tl_[tid_]; // thread-local data
269 block_t *b = pop_free_list(tl, b_scale);
271 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
275 // The free list is empty so process blocks freed from other threads and then check again.
276 process_incoming_blocks(tl);
277 b = pop_free_list(tl, b_scale);
279 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
284 // The current active page is completely allocated. Make the oldest partially allocated page
285 // the new active page.
286 size_class_t *sc = &tl->size_class[b_scale];
287 if (sc->oldest_partial != NULL) {
288 sc->active_page = sc->oldest_partial;
289 sc->oldest_partial = sc->oldest_partial->next;
290 sc->oldest_partial->prev = NULL;
291 b = pop_free_list(tl, b_scale);
293 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
296 // There are no partially allocated pages so get a new page.
298 #endif//RECYCLE_PAGES
301 char *page = get_new_region(b_scale);
302 b = (block_t *)page; // grab the first block on the page
304 // Break up the remainder of the page into blocks and put them on the free list. Start at the
305 // end of the page so that the free list ends up in increasing order, for ease of debugging.
306 if (b_scale < PAGE_SCALE) {
307 size_t block_size = (1 << b_scale);
308 block_t *head = NULL;
309 for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
310 block_t *x = (block_t *)(page + offset);
311 x->next = head; head = x;
313 #ifndef RECYCLE_PAGES
314 tl->free_list[b_scale] = head;
315 #else //RECYCLE_PAGES
316 sc->active_page = get_header(page);
317 sc->active_page->free_list = head;
318 #endif//RECYCLE_PAGES
321 TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE));
324 #else//USE_SYSTEM_MALLOC
327 void mem_init (void) {
331 void ndb_free (void *x) {
332 TRACE("m1", "nbd_free: %p", x, 0);
334 memset(x, 0xcd, sizeof(void *)); // bear trap
340 void *nbd_malloc (size_t n) {
341 TRACE("m1", "nbd_malloc: request size %llu", n, 0);
343 TRACE("m1", "nbd_malloc: returning %p", x, 0);
346 #endif//USE_SYSTEM_MALLOC