2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * safe memory reclemation using a simple technique from rcu
15 #define RCU_POST_THRESHOLD 10
16 #define RCU_QUEUE_SCALE 20
25 static uint64_t rcu_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
26 static uint64_t rcu_last_posted_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
27 static fifo_t *pending_[MAX_NUM_THREADS] = {};
28 static int num_threads_ = 0;
30 static fifo_t *fifo_alloc(int scale) {
31 fifo_t *q = (fifo_t *)nbd_malloc(sizeof(fifo_t) + (1 << scale) * sizeof(void *));
32 memset(q, 0, sizeof(fifo_t));
39 static uint32_t fifo_index (fifo_t *q, uint32_t i) {
40 return i & MASK(q->scale);
43 static void fifo_enqueue (fifo_t *q, void *x) {
44 assert(fifo_index(q, q->head + 1) != fifo_index(q, q->tail));
45 uint32_t i = fifo_index(q, q->head++);
49 static void *fifo_dequeue (fifo_t *q) {
50 uint32_t i = fifo_index(q, q->tail++);
54 void rcu_thread_init (int id) {
55 assert(id < MAX_NUM_THREADS);
56 if (pending_[id] == NULL) {
57 pending_[id] = fifo_alloc(RCU_QUEUE_SCALE);
58 SYNC_ADD(&num_threads_, 1);
62 static void rcu_post (uint64_t x) {
63 LOCALIZE_THREAD_LOCAL(tid_, int);
64 if (x - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD)
67 int next_thread_id = (tid_ + 1) % num_threads_;
69 TRACE("r0", "rcu_post: %llu", x, 0);
70 rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = x;
73 void rcu_update (void) {
74 LOCALIZE_THREAD_LOCAL(tid_, int);
75 assert(tid_ < num_threads_);
76 int next_thread_id = (tid_ + 1) % num_threads_;
78 for (i = 0; i < num_threads_; ++i) {
82 // No need to post an update if the value hasn't changed
83 if (rcu_[tid_][i] == rcu_last_posted_[tid_][i])
86 uint64_t x = rcu_[tid_][i];
87 rcu_[next_thread_id][i] = rcu_last_posted_[tid_][i] = x;
91 while (pending_[tid_]->tail != rcu_[tid_][tid_]) {
92 nbd_free(fifo_dequeue(pending_[tid_]));
96 void nbd_defer_free (void *x) {
97 LOCALIZE_THREAD_LOCAL(tid_, int);
98 fifo_enqueue(pending_[tid_], x);
99 TRACE("r0", "nbd_defer_free: put %p on queue at position %llu", x, pending_[tid_]->head);
100 rcu_post(pending_[tid_]->head);
108 #define NUM_ITERATIONS 10000000
110 typedef struct node {
114 typedef struct lifo {
118 static volatile int wait_;
121 static lifo_t *lifo_alloc (void) {
122 lifo_t *stk = (lifo_t *)nbd_malloc(sizeof(lifo_t));
123 memset(stk, 0, sizeof(lifo_t));
127 static void lifo_aba_push (lifo_t *stk, node_t *x) {
130 head = ((volatile lifo_t *)stk)->head;
131 ((volatile node_t *)x)->next = head;
132 } while (__sync_val_compare_and_swap(&stk->head, head, x) != head);
135 node_t *lifo_aba_pop (lifo_t *stk) {
138 head = ((volatile lifo_t *)stk)->head;
141 } while (__sync_val_compare_and_swap(&stk->head, head, head->next) != head);
146 node_t *node_alloc (void) {
147 node_t *node = (node_t *)nbd_malloc(sizeof(node_t));
148 memset(node, 0, sizeof(node_t));
152 void *worker (void *arg) {
153 int id = (int)(size_t)arg;
154 unsigned int rand_seed = (unsigned int)id + 1;
157 // Wait for all the worker threads to be ready.
158 __sync_fetch_and_add(&wait_, -1);
162 for (i = 0; i < NUM_ITERATIONS; ++ i) {
163 int n = rand_r(&rand_seed);
165 lifo_aba_push(stk_, node_alloc());
167 node_t *x = lifo_aba_pop(stk_);
178 int main (int argc, char **argv) {
180 //lwt_set_trace_level("m0r0");
186 num_threads = strtol(argv[1], NULL, 10);
188 fprintf(stderr, "%s: Invalid argument for number of threads\n", argv[0]);
191 if (num_threads <= 0) {
192 fprintf(stderr, "%s: Number of threads must be at least 1\n", argv[0]);
200 pthread_t thread[num_threads];
201 for (int i = 0; i < num_threads; ++i) {
202 int rc = pthread_create(thread + i, NULL, worker, (void *)(size_t)i);
203 if (rc != 0) { perror("pthread_create"); return rc; }
205 for (int i = 0; i < num_threads; ++i) {
206 pthread_join(thread[i], NULL);