2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * safe memory reclamation using a simple technique from rcu
7 * WARNING: not robust enough for real-world use
16 #define RCU_POST_THRESHOLD 10
17 #define RCU_QUEUE_SCALE 20
26 #define MOD_SCALE(x, b) ((x) & MASK(b))
27 static uint64_t rcu_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
28 static uint64_t rcu_last_posted_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
29 static fifo_t *pending_[MAX_NUM_THREADS] = {};
30 static int num_threads_ = 0;
32 static fifo_t *fifo_alloc(int scale) {
33 fifo_t *q = (fifo_t *)nbd_malloc(sizeof(fifo_t) + (1 << scale) * sizeof(void *));
34 memset(q, 0, sizeof(fifo_t));
41 void rcu_thread_init (int id) {
42 assert(id < MAX_NUM_THREADS);
43 if (pending_[id] == NULL) {
44 pending_[id] = fifo_alloc(RCU_QUEUE_SCALE);
45 SYNC_ADD(&num_threads_, 1);
49 void rcu_update (void) {
50 LOCALIZE_THREAD_LOCAL(tid_, int);
51 assert(tid_ < num_threads_);
52 int next_thread_id = (tid_ + 1) % num_threads_;
54 for (i = 0; i < num_threads_; ++i) {
58 // No need to post an update if the value hasn't changed
59 if (rcu_[tid_][i] == rcu_last_posted_[tid_][i])
62 uint64_t x = rcu_[tid_][i];
63 rcu_[next_thread_id][i] = rcu_last_posted_[tid_][i] = x;
67 while (pending_[tid_]->tail != rcu_[tid_][tid_]) {
68 fifo_t *q = pending_[tid_];
69 uint32_t i = MOD_SCALE(q->tail++, q->scale);
74 void nbd_defer_free (void *x) {
75 LOCALIZE_THREAD_LOCAL(tid_, int);
76 fifo_t *q = pending_[tid_];
77 assert(MOD_SCALE(q->head + 1, q->scale) != MOD_SCALE(q->tail, q->scale));
78 uint32_t i = MOD_SCALE(q->head++, q->scale);
80 TRACE("r0", "nbd_defer_free: put %p on queue at position %llu", x, pending_[tid_]->head);
82 if (pending_[tid_]->head - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD)
84 TRACE("r0", "nbd_defer_free: posting %llu", pending_[tid_]->head, 0);
85 int next_thread_id = (tid_ + 1) % num_threads_;
86 rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = pending_[tid_]->head;