2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * Extreamly fast multi-threaded malloc.
7 #ifndef USE_SYSTEM_MALLOC
8 #define _BSD_SOURCE // so we get MAP_ANON on linux
19 #define MAX_SCALE 31 // allocate blocks up to 4GB (arbitrary, could be bigger)
21 #define MIN_SCALE 3 // smallest allocated block is 8 bytes
22 #define MAX_POINTER_BITS 48
23 #define PAGE_SCALE 21 // 2MB pages
25 #define MIN_SCALE 2 // smallest allocated block is 4 bytes
26 #define MAX_POINTER_BITS 32
27 #define PAGE_SCALE 12 // 4KB pages
29 #define PAGE_SIZE (1 << PAGE_SCALE)
30 #define HEADERS_SIZE (((size_t)1 << (MAX_POINTER_BITS - PAGE_SCALE)) * sizeof(header_t))
32 typedef struct block {
36 // TODO: Break the page header into two parts. The first part is located in the header region. The
37 // second part is located on the page and is only used when there are free items.
38 typedef struct header {
42 block_t *free_list; // list of free blocks
45 uint8_t owner; // thread id of owner
46 uint8_t scale; // log2 of the block size
50 typedef struct size_class {
51 header_t *active_page;
52 header_t *oldest_partial;
53 header_t *newest_partial;
59 block_t *free_list[MAX_SCALE+1];
62 size_class_t size_class[MAX_SCALE+1];
64 block_t *blocks_from[MAX_NUM_THREADS];
65 block_t *blocks_to[MAX_NUM_THREADS];
66 } __attribute__((aligned(CACHE_LINE_SIZE))) tl_t;
68 static header_t *headers_ = NULL;
70 static tl_t tl_[MAX_NUM_THREADS] = {};
72 static inline header_t *get_header (void *r) {
73 ASSERT(((size_t)r >> PAGE_SCALE) < HEADERS_SIZE);
74 return headers_ + ((size_t)r >> PAGE_SCALE);
77 static void *get_new_region (int block_scale) {
78 LOCALIZE_THREAD_LOCAL(tid_, int);
80 tl_t *tl = &tl_[tid_]; // thread-local data
81 if (block_scale <= PAGE_SCALE && tl->free_pages != NULL) {
82 void *region = tl->free_pages;
83 tl->free_pages = tl->free_pages->next;
84 get_header(region)->scale = block_scale;
88 size_t region_size = (1 << block_scale);
89 if (region_size < PAGE_SIZE) {
90 region_size = PAGE_SIZE;
92 void *region = mmap(NULL, region_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
93 TRACE("m1", "get_new_region: mmapped new region %p (size %p)", region, region_size);
94 if (region == (void *)-1) {
95 perror("get_new_region: mmap");
98 if ((size_t)region & (region_size - 1)) {
99 TRACE("m0", "get_new_region: region not aligned", 0, 0);
100 munmap(region, region_size);
101 region = mmap(NULL, region_size * 2, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
102 if (region == (void *)-1) {
103 perror("get_new_region: mmap");
106 TRACE("m0", "get_new_region: mmapped new region %p (size %p)", region, region_size * 2);
107 void *aligned = (void *)(((size_t)region + region_size) & ~(region_size - 1));
108 size_t extra = (char *)aligned - (char *)region;
110 munmap(region, extra);
111 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", region, extra);
113 extra = ((char *)region + region_size) - (char *)aligned;
115 munmap((char *)aligned + region_size, extra);
116 TRACE("m0", "get_new_region: unmapped extra memory %p (size %p)", (char *)aligned + region_size, extra);
122 header_t *h = get_header(region);
123 TRACE("m1", "get_new_region: header %p (%p)", h, h - headers_);
124 assert(h->scale == 0);
125 h->scale = block_scale;
131 void mem_init (void) {
132 assert(headers_ == NULL);
133 // Allocate space for the page headers. This could be a big chunk of memory on 64 bit systems,
134 // but it just takes up virtual address space. Physical space used by the headers is still
135 // proportional to the amount of memory the user mallocs.
136 headers_ = (header_t *)malloc(HEADERS_SIZE);
137 TRACE("m1", "mem_init: header page %p", headers_, 0);
138 memset(headers_, 0, HEADERS_SIZE);
140 // initialize spsc queues
141 for (int i = 0; i < MAX_NUM_THREADS; ++i) {
142 for (int j = 0; j < MAX_NUM_THREADS; ++j) {
144 tl_[i].blocks_to[j] = (block_t *)&(tl_[j].blocks_from[i]);
150 void nbd_free (void *x) {
151 TRACE("m1", "nbd_free: block %p page %p", x, (size_t)x & ~MASK(PAGE_SCALE));
153 LOCALIZE_THREAD_LOCAL(tid_, int);
154 block_t *b = (block_t *)x;
155 header_t *h = get_header(x);
156 int b_scale = h->scale;
157 TRACE("m1", "nbd_free: header %p scale %llu", h, b_scale);
158 ASSERT(b_scale && b_scale <= MAX_SCALE);
160 if (b_scale > PAGE_SCALE) {
161 int rc = munmap(x, 1 << b_scale);
167 memset(b, 0xcd, (1 << b_scale)); // bear trap
169 tl_t *tl = &tl_[tid_]; // thread-local data
170 if (h->owner == tid_) {
171 TRACE("m1", "nbd_free: private block, old free list head %p", tl->free_list[b_scale], 0);
173 #ifndef RECYCLE_PAGES
174 b->next = tl->free_list[b_scale];
175 tl->free_list[b_scale] = b;
176 #else //RECYCLE_PAGES
177 b->next = h->free_list;
180 size_class_t *sc = &tl->size_class[b_scale];
181 if (sc->active_page != h) {
182 if (h->num_in_use == 0) {
183 // remove <h> from the partial-page list
184 if (h->next != NULL) { h->next->prev = h->prev; }
185 if (h->prev != NULL) { h->prev->next = h->next; }
186 // put <h> on the free-page list
187 h->next = tl->free_pages;
190 // move <h> to the top of the partial-page list
191 if (h->next != NULL) {
192 h->next->prev = h->prev;
193 if (h->prev != NULL) { h->prev->next = h->next; }
194 h->prev = sc->newest_partial;
196 sc->newest_partial = h;
200 #endif//RECYCLE_PAGES
202 // push <b> onto it's owner's queue
203 int b_owner = h->owner;
204 TRACE("m1", "nbd_free: owner %llu", b_owner, 0);
206 // The assignment statements are volatile to prevent the compiler from reordering them.
207 VOLATILE_DEREF(b).next = NULL;
208 VOLATILE_DEREF(tl->blocks_to[b_owner]).next = b;
210 tl->blocks_to[b_owner] = b;
214 static inline void process_incoming_blocks (tl_t *tl) {
215 for (int p = 0; p < MAX_NUM_THREADS; ++p) {
216 block_t *b = tl->blocks_from[p];
217 if (EXPECT_FALSE(b == NULL)) continue; // the queue is completely empty
219 // Leave the last block on the queue. Removing the last block on the queue would create a
220 // race with the producer thread putting a new block on the queue.
221 for (block_t *next = b->next; next != NULL; b = next, next = b->next) {
222 // push <b> onto the appropriate free list
223 #ifndef RECYCLE_PAGES
224 int b_scale = get_header(b)->scale;
225 b->next = tl->free_list[b_scale];
226 tl->free_list[b_scale] = b;
227 #else //RECYCLE_PAGES
228 header_t *h = get_header(b);
229 b->next = h->free_list;
231 #endif//RECYCLE_PAGES
233 tl->blocks_from[p] = b;
237 static inline block_t *pop_free_list (tl_t *tl, int scale) {
238 #ifndef RECYCLE_PAGES
239 block_t **free_list = &tl->free_list[scale];
240 #else //RECYCLE_PAGES
241 size_class_t *sc = &tl->size_class[scale];
242 if (EXPECT_FALSE(sc->active_page == NULL))
244 block_t **free_list = &sc->active_page->free_list;
245 #endif//RECYCLE_PAGES
246 block_t *b = *free_list;
247 if (EXPECT_FALSE(b == NULL))
249 ASSERT(get_header(b)->scale == scale);
250 *free_list = b->next;
254 // Allocate a block of memory at least size <n>. Blocks are binned in powers-of-two. Round up <n> to
255 // the nearest power of two.
257 // First check the current thread's free list for an available block. If there are no blocks on the
258 // free list, pull items off of the current thread's incoming block queues and push them onto the
259 // free list. If we didn't get an appropriate size block off of the block queues then allocate a new
260 // page, break it up into blocks and push them onto the free list.
261 void *nbd_malloc (size_t n) {
262 // the scale is the log base 2 of <n>, rounded up
263 int b_scale = (sizeof(void *) * __CHAR_BIT__) - __builtin_clzl((n) - 1);
264 TRACE("m1", "nbd_malloc: size %llu (scale %llu)", n, b_scale);
266 if (EXPECT_FALSE(b_scale < MIN_SCALE)) { b_scale = MIN_SCALE; }
267 if (EXPECT_FALSE(b_scale > MAX_SCALE)) { return NULL; }
269 LOCALIZE_THREAD_LOCAL(tid_, int);
270 tl_t *tl = &tl_[tid_]; // thread-local data
272 block_t *b = pop_free_list(tl, b_scale);
274 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
278 // The free list is empty so process blocks freed from other threads and then check again.
279 process_incoming_blocks(tl);
280 b = pop_free_list(tl, b_scale);
282 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
287 // The current active page is completely allocated. Make the oldest partially allocated page
288 // the new active page.
289 size_class_t *sc = &tl->size_class[b_scale];
290 if (sc->oldest_partial != NULL) {
291 sc->active_page = sc->oldest_partial;
292 sc->oldest_partial = sc->oldest_partial->next;
293 sc->oldest_partial->prev = NULL;
294 b = pop_free_list(tl, b_scale);
296 TRACE("m1", "nbd_malloc: returning block %p", b, 0);
299 // There are no partially allocated pages so get a new page.
301 #endif//RECYCLE_PAGES
304 char *page = get_new_region(b_scale);
305 b = (block_t *)page; // grab the first block on the page
307 // Break up the remainder of the page into blocks and put them on the free list. Start at the
308 // end of the page so that the free list ends up in increasing order, for ease of debugging.
309 if (b_scale < PAGE_SCALE) {
310 size_t block_size = (1 << b_scale);
311 block_t *head = NULL;
312 for (int offset = PAGE_SIZE - block_size; offset > 0; offset -= block_size) {
313 block_t *x = (block_t *)(page + offset);
314 x->next = head; head = x;
316 #ifndef RECYCLE_PAGES
317 tl->free_list[b_scale] = head;
318 #else //RECYCLE_PAGES
319 sc->active_page = get_header(page);
320 sc->active_page->free_list = head;
321 #endif//RECYCLE_PAGES
324 TRACE("m1", "nbd_malloc: returning block %p from new region %p", b, (size_t)b & ~MASK(PAGE_SCALE));
327 #else//USE_SYSTEM_MALLOC
330 void mem_init (void) {
334 void ndb_free (void *x) {
335 TRACE("m1", "nbd_free: %p", x, 0);
337 memset(x, 0xcd, sizeof(void *)); // bear trap
343 void *nbd_malloc (size_t n) {
344 TRACE("m1", "nbd_malloc: request size %llu", n, 0);
346 TRACE("m1", "nbd_malloc: returning %p", x, 0);
349 #endif//USE_SYSTEM_MALLOC