From 053576b00e7d37f19ce99f033e9981761d647c1f Mon Sep 17 00:00:00 2001 From: jdybnis Date: Thu, 13 Nov 2008 06:10:29 +0000 Subject: [PATCH] fix potential blocking behavior in ht,c streamline mem.c add warning about robustness to comments in rcu.c --- runtime/mem.c | 128 +++++++++++++++++++++++++------------------------- runtime/rcu.c | 4 +- struct/ht.c | 45 +++++++++++------- 3 files changed, 95 insertions(+), 82 deletions(-) diff --git a/runtime/mem.c b/runtime/mem.c index f6fd318..e46eb7e 100644 --- a/runtime/mem.c +++ b/runtime/mem.c @@ -27,10 +27,16 @@ typedef struct header { uint8_t scale; // log2 of the block size } header_t; +typedef struct private_list { + block_t *head; + uint32_t next_pub; + uint32_t count; +} private_list_t; + static header_t *region_header_ = NULL; -// TODO: experiment with different memory layouts (i.e. separate private and public lists) -static block_t free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS]; +static block_t *pub_free_list_[MAX_NUM_THREADS][MAX_SCALE+1][MAX_NUM_THREADS] = {}; +static private_list_t pri_free_list_[MAX_NUM_THREADS][MAX_SCALE+1] = {}; static void *get_new_region (int scale) { if (scale < REGION_SCALE) { @@ -60,20 +66,27 @@ void nbd_free (void *x) { block_t *b = (block_t *)x; assert(((size_t)b >> REGION_SCALE) < ((1 << HEADER_REGION_SCALE) / sizeof(header_t))); header_t *h = region_header_ + ((size_t)b >> REGION_SCALE); - TRACE("m0", "nbd_free(): block %p scale %llu", x, h->scale); - block_t *l = &free_list_[h->owner][h->scale][tid_]; - TRACE("m0", "nbd_free(): free list %p first block %p", l, l->next); - b->next = l->next; - l->next = b; + TRACE("m0", "nbd_free(): block %p scale %llu", b, h->scale); + if (h->owner == tid_) { + TRACE("m0", "nbd_free(): private block, free list head %p", + h->owner, pri_free_list_[tid_][h->scale].head); + b->next = pri_free_list_[tid_][h->scale].head; + pri_free_list_[tid_][h->scale].head = b; + } else { + TRACE("m0", "nbd_free(): owner %llu free list head %p", + h->owner, pub_free_list_[h->owner][h->scale][tid_]); + b->next = pub_free_list_[h->owner][h->scale][tid_]; + pub_free_list_[h->owner][h->scale][tid_] = b; + } } // Allocate a block of memory at least size . Blocks are binned in powers-of-two. Round up // to the nearest power-of-two. // // First check the current thread's private free list for an available block. If no blocks are on -// the private free list, pull all the available blocks off of the current thread's public free -// lists and put them on the private free list. If we didn't find any blocks on the public free -// lists, open a new region, break it up into blocks and put them on the private free list. +// the private free list, pull blocks off of the current thread's public free lists and put them +// on the private free list. If we didn't find any blocks on the public free lists, allocate a new +// region, break it up into blocks and put them on the private free list. void *nbd_malloc (size_t n) { LOCALIZE_THREAD_LOCAL(tid_, int); if (n < sizeof(block_t)) { @@ -82,70 +95,57 @@ void *nbd_malloc (size_t n) { int b_scale = GET_SCALE(n); assert(b_scale <= MAX_SCALE); TRACE("m0", "nbd_malloc(): size %llu scale %llu", n, b_scale); - block_t *fls = free_list_[tid_][b_scale]; // our free lists - block_t *pri = fls + tid_; // our private free list - TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri, pri->next); - - // If our private free list is empty, fill it up with blocks from our public free lists - if (EXPECT_FALSE(pri->next == NULL)) { - int cnt = 0; - block_t *last = pri; - for (int i = 0; i < MAX_NUM_THREADS; ++i) { - TRACE("m0", "nbd_malloc(): searching public free lists (%llu)", i, 0); - block_t *pub = fls + i; // one of our public free lists - TRACE("m0", "nbd_malloc(): public free list %p first block %p", pub, pub->next); - if (EXPECT_FALSE(pub == pri)) - continue; - - if (pub->next != NULL) { - block_t *stolen = SYNC_SWAP(&pub->next, NULL); - TRACE("m0", "nbd_malloc(): stole list %p first block %p", pub, stolen); - if (stolen) { - last->next = stolen; - TRACE("m0", "nbd_malloc(): append to last block %p of private free list", last, 0); - while (last->next) { - ++cnt; - TRACE("m0", "nbd_malloc(): find last block in list: last %p last->next %p", - last, last->next); - last = last->next; + private_list_t *pri = &pri_free_list_[tid_][b_scale]; // our private free list + TRACE("m0", "nbd_malloc(): private free list %p first block %p", pri->list, pri->head); + + // If our private free list is empty, try to find blocks on our public free list. If that fails, + // allocate a new region. + if (EXPECT_FALSE(pri->head == NULL)) { + block_t **pubs = pub_free_list_[tid_][b_scale]; // our public free lists + while (1) { + // look for blocks on our public free lists round robin + pri->next_pub = (pri->next_pub+1) & (MAX_NUM_THREADS-1); + + TRACE("m0", "nbd_malloc(): searching public free list %llu", pri->next_pub, 0); + if (pri->next_pub == tid_) { + uint32_t count = pri->count; + pri->count = 0; + // If our private list is empty and we haven't gotten at least half a region's worth + // of block's from our public lists, we break open a new region. This guarentees + // that we are amortizing the cost of accessing our public lists accross enough + // nbd_malloc() calls. + uint32_t min_count = b_scale > REGION_SCALE ? 1 << (b_scale-REGION_SCALE-1) : 1; + if (count < min_count) { + char *region = get_new_region(b_scale); + size_t b_size = 1 << b_scale; + size_t region_size = (b_size < REGION_SIZE) ? REGION_SIZE : b_size; + for (int i = region_size; i != 0; i -= b_size) { + block_t *b = (block_t *)(region + i - b_size); + b->next = pri->head; + pri->head = b; } + break; } + continue; } - } - TRACE("m0", "nbd_malloc(): moved %llu blocks from public to private free lists", cnt, 0); - - if (b_scale >= REGION_SCALE) { - if (cnt == 0) { - assert(pri->next == NULL); - pri->next = (block_t *)get_new_region(b_scale); - assert(pri->next->next == NULL); - } - assert(pri->next); - } else if (cnt < (1 << (REGION_SCALE - b_scale - 1))) { - - // Even if we took a few blocks from our public lists we still break open a new region. - // This guarentees that we are amortizing the cost of accessing our public lists accross - // many nbd_malloc() calls. - char *region = get_new_region(b_scale); - size_t b_size = 1 << b_scale; - for (int i = REGION_SIZE; i != 0; i -= b_size) { - block_t *b = (block_t *)(region + i - b_size); - b->next = pri->next; - //TRACE("m1", "nbd_malloc(): put new block %p ahead of %p on private list", b, b->next); - pri->next = b; - *b = *b; + if (pubs[pri->next_pub] != NULL) { + block_t *stolen = SYNC_SWAP(&pubs[pri->next_pub], NULL); + TRACE("m0", "nbd_malloc(): stole list %p first block %p", stolen); + if (stolen == NULL) + continue; + pri->head = stolen; + break; } } - - assert(pri->next); + assert(pri->head); } // Pull a block off of our private free list. - block_t *b = pri->next; + block_t *b = pri->head; TRACE("m0", "nbd_malloc(): take block %p off of of private list (new head is %p)", b, pri->next); - pri->next = b->next; - assert(b); + pri->head = b->next; + pri->count++; return b; } diff --git a/runtime/rcu.c b/runtime/rcu.c index c2e465e..f5e3c3c 100644 --- a/runtime/rcu.c +++ b/runtime/rcu.c @@ -2,7 +2,9 @@ * Written by Josh Dybnis and released to the public domain, as explained at * http://creativecommons.org/licenses/publicdomain * - * safe memory reclemation using a simple technique from rcu + * safe memory reclamation using a simple technique from rcu + * + * WARNING: not robust enough for real-world use */ #include #include "common.h" diff --git a/struct/ht.c b/struct/ht.c index e3debc4..8703faa 100644 --- a/struct/ht.c +++ b/struct/ht.c @@ -426,35 +426,46 @@ uint64_t ht_compare_and_set (hash_table_t *ht, const char *key_val, uint32_t key // Help with an ongoing copy. if (EXPECT_FALSE(hti->next != NULL)) { - - // Reserve some entries for this thread to copy. There is a race condition here because the - // fetch and add isn't atomic, but that is ok. + volatile entry_t *e; + uint64_t limit; + int num_copied = 0; int x = hti->scan; - hti->scan = x + ENTRIES_PER_COPY_CHUNK; - // scan> might be larger than the size of the table, if some thread stalls while - // copying. In that case we just wrap around to the begining and make another pass through - // the table. - volatile entry_t *e = hti->table + (x & MASK(hti->scale)); + // Panic if we've been around the array twice and still haven't finished the copy. + int panic = (x >= (1 << (hti->scale + 1))); + if (!panic) { + limit = ENTRIES_PER_COPY_CHUNK; + + // Reserve some entries for this thread to copy. There is a race condition here because the + // fetch and add isn't atomic, but that is ok. + hti->scan = x + ENTRIES_PER_COPY_CHUNK; + + // scan> might be larger than the size of the table, if some thread stalls while + // copying. In that case we just wrap around to the begining and make another pass through + // the table. + e = hti->table + (x & MASK(hti->scale)); + } else { + // scan the whole table + limit = (1 << hti->scale); + e = hti->table; + } // Copy the entries - int num_copied = 0, i; - for (i = 0; i < ENTRIES_PER_COPY_CHUNK; ++i) { + for (int i = 0; i < limit; ++i) { num_copied += hti_copy_entry(hti, e++, 0, hti->next); assert(e <= hti->table + (1 << hti->scale)); } if (num_copied != 0) { SYNC_ADD(&hti->num_entries_copied, num_copied); } - } - // Dispose of fully copied tables. - while (hti->num_entries_copied == (1 << hti->scale)) { - assert(hti->next); - if (SYNC_CAS(ht, hti, hti->next) == hti) { - nbd_defer_free(hti); + // Dispose of fully copied tables. + if (hti->num_entries_copied == (1 << hti->scale) || panic) { + assert(hti->next); + if (SYNC_CAS(ht, hti, hti->next) == hti) { + nbd_defer_free(hti); + } } - hti = *ht; } uint64_t old_val; -- 2.40.0