2 * Written by Josh Dybnis and released to the public domain, as explained at
3 * http://creativecommons.org/licenses/publicdomain
5 * safe memory reclemation using a simple technique from rcu
13 #define RCU_POST_THRESHOLD 10
14 #define RCU_QUEUE_SCALE 20
23 static uint64_t rcu_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
24 static uint64_t rcu_last_posted_[MAX_NUM_THREADS][MAX_NUM_THREADS] = {};
25 static fifo_t *pending_[MAX_NUM_THREADS] = {};
26 static int num_threads_ = 0;
28 static fifo_t *fifo_alloc(int scale) {
29 fifo_t *q = (fifo_t *)nbd_malloc(sizeof(fifo_t) + (1 << scale) * sizeof(void *));
30 memset(q, 0, sizeof(fifo_t));
37 static uint32_t fifo_index (fifo_t *q, uint32_t i) {
38 return i & MASK(q->scale);
41 static void fifo_enqueue (fifo_t *q, void *x) {
42 assert(fifo_index(q, q->head + 1) != fifo_index(q, q->tail));
43 uint32_t i = fifo_index(q, q->head++);
47 static void *fifo_dequeue (fifo_t *q) {
48 uint32_t i = fifo_index(q, q->tail++);
52 void rcu_thread_init (int id) {
53 assert(id < MAX_NUM_THREADS);
54 if (pending_[id] == NULL) {
55 pending_[id] = fifo_alloc(RCU_QUEUE_SCALE);
56 SYNC_ADD(&num_threads_, 1);
60 static void rcu_post (uint64_t x) {
61 LOCALIZE_THREAD_LOCAL(tid_, int);
62 if (x - rcu_last_posted_[tid_][tid_] < RCU_POST_THRESHOLD)
65 int next_thread_id = (tid_ + 1) % num_threads_;
67 TRACE("r0", "rcu_post: %llu", x, 0);
68 rcu_[next_thread_id][tid_] = rcu_last_posted_[tid_][tid_] = x;
71 void rcu_update (void) {
72 LOCALIZE_THREAD_LOCAL(tid_, int);
73 assert(tid_ < num_threads_);
74 int next_thread_id = (tid_ + 1) % num_threads_;
76 for (i = 0; i < num_threads_; ++i) {
80 // No need to post an update if the value hasn't changed
81 if (rcu_[tid_][i] == rcu_last_posted_[tid_][i])
84 uint64_t x = rcu_[tid_][i];
85 rcu_[next_thread_id][i] = rcu_last_posted_[tid_][i] = x;
89 while (pending_[tid_]->tail != rcu_[tid_][tid_]) {
90 nbd_free(fifo_dequeue(pending_[tid_]));
94 void nbd_defer_free (void *x) {
95 LOCALIZE_THREAD_LOCAL(tid_, int);
96 fifo_enqueue(pending_[tid_], x);
97 TRACE("r0", "nbd_defer_free: put %p on queue at position %llu", x, pending_[tid_]->head);
98 rcu_post(pending_[tid_]->head);
106 #define NUM_ITERATIONS 10000000
108 typedef struct node {
112 typedef struct lifo {
116 static volatile int wait_;
119 static lifo_t *lifo_alloc (void) {
120 lifo_t *stk = (lifo_t *)nbd_malloc(sizeof(lifo_t));
121 memset(stk, 0, sizeof(lifo_t));
125 static void lifo_aba_push (lifo_t *stk, node_t *x) {
128 head = ((volatile lifo_t *)stk)->head;
129 ((volatile node_t *)x)->next = head;
130 } while (__sync_val_compare_and_swap(&stk->head, head, x) != head);
133 node_t *lifo_aba_pop (lifo_t *stk) {
136 head = ((volatile lifo_t *)stk)->head;
139 } while (__sync_val_compare_and_swap(&stk->head, head, head->next) != head);
144 node_t *node_alloc (void) {
145 node_t *node = (node_t *)nbd_malloc(sizeof(node_t));
146 memset(node, 0, sizeof(node_t));
150 void *worker (void *arg) {
151 int id = (int)(size_t)arg;
152 unsigned int rand_seed = (unsigned int)id + 1;
154 // Wait for all the worker threads to be ready.
155 __sync_fetch_and_add(&wait_, -1);
159 for (i = 0; i < NUM_ITERATIONS; ++ i) {
160 int n = rand_r(&rand_seed);
162 lifo_aba_push(stk_, node_alloc());
164 node_t *x = lifo_aba_pop(stk_);
175 int main (int argc, char **argv) {
177 //lwt_set_trace_level("m0r0");
183 num_threads = strtol(argv[1], NULL, 10);
185 fprintf(stderr, "%s: Invalid argument for number of threads\n", argv[0]);
188 if (num_threads <= 0) {
189 fprintf(stderr, "%s: Number of threads must be at least 1\n", argv[0]);
197 pthread_t thread[num_threads];
198 for (int i = 0; i < num_threads; ++i) {
199 int rc = nbd_thread_create(thread + i, i, worker, (void *)(size_t)i);
200 if (rc != 0) { perror("pthread_create"); return rc; }
202 for (int i = 0; i < num_threads; ++i) {
203 pthread_join(thread[i], NULL);