1 | /* |
2 | * urcu-mb.c |
3 | * |
4 | * Userspace RCU library with explicit memory barriers |
5 | * |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
8 | * Copyright 2015 Red Hat, Inc. |
9 | * |
10 | * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com> |
11 | * |
12 | * This library is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU Lesser General Public |
14 | * License as published by the Free Software Foundation; either |
15 | * version 2.1 of the License, or (at your option) any later version. |
16 | * |
17 | * This library is distributed in the hope that it will be useful, |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
20 | * Lesser General Public License for more details. |
21 | * |
22 | * You should have received a copy of the GNU Lesser General Public |
23 | * License along with this library; if not, write to the Free Software |
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
25 | * |
26 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
27 | */ |
28 | |
29 | #include "qemu/osdep.h" |
30 | #include "qemu/rcu.h" |
31 | #include "qemu/atomic.h" |
32 | #include "qemu/thread.h" |
33 | #include "qemu/main-loop.h" |
34 | #if defined(CONFIG_MALLOC_TRIM) |
35 | #include <malloc.h> |
36 | #endif |
37 | |
38 | /* |
39 | * Global grace period counter. Bit 0 is always one in rcu_gp_ctr. |
40 | * Bits 1 and above are defined in synchronize_rcu. |
41 | */ |
42 | #define RCU_GP_LOCKED (1UL << 0) |
43 | #define RCU_GP_CTR (1UL << 1) |
44 | |
45 | unsigned long rcu_gp_ctr = RCU_GP_LOCKED; |
46 | |
47 | QemuEvent rcu_gp_event; |
48 | static QemuMutex rcu_registry_lock; |
49 | static QemuMutex rcu_sync_lock; |
50 | |
51 | /* |
52 | * Check whether a quiescent state was crossed between the beginning of |
53 | * update_counter_and_wait and now. |
54 | */ |
55 | static inline int rcu_gp_ongoing(unsigned long *ctr) |
56 | { |
57 | unsigned long v; |
58 | |
59 | v = atomic_read(ctr); |
60 | return v && (v != rcu_gp_ctr); |
61 | } |
62 | |
63 | /* Written to only by each individual reader. Read by both the reader and the |
64 | * writers. |
65 | */ |
66 | __thread struct rcu_reader_data rcu_reader; |
67 | |
68 | /* Protected by rcu_registry_lock. */ |
69 | typedef QLIST_HEAD(, rcu_reader_data) ThreadList; |
70 | static ThreadList registry = QLIST_HEAD_INITIALIZER(registry); |
71 | |
72 | /* Wait for previous parity/grace period to be empty of readers. */ |
73 | static void wait_for_readers(void) |
74 | { |
75 | ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders); |
76 | struct rcu_reader_data *index, *tmp; |
77 | |
78 | for (;;) { |
79 | /* We want to be notified of changes made to rcu_gp_ongoing |
80 | * while we walk the list. |
81 | */ |
82 | qemu_event_reset(&rcu_gp_event); |
83 | |
84 | /* Instead of using atomic_mb_set for index->waiting, and |
85 | * atomic_mb_read for index->ctr, memory barriers are placed |
86 | * manually since writes to different threads are independent. |
87 | * qemu_event_reset has acquire semantics, so no memory barrier |
88 | * is needed here. |
89 | */ |
90 | QLIST_FOREACH(index, ®istry, node) { |
91 | atomic_set(&index->waiting, true); |
92 | } |
93 | |
94 | /* Here, order the stores to index->waiting before the loads of |
95 | * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(), |
96 | * ensuring that the loads of index->ctr are sequentially consistent. |
97 | */ |
98 | smp_mb_global(); |
99 | |
100 | QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { |
101 | if (!rcu_gp_ongoing(&index->ctr)) { |
102 | QLIST_REMOVE(index, node); |
103 | QLIST_INSERT_HEAD(&qsreaders, index, node); |
104 | |
105 | /* No need for mb_set here, worst of all we |
106 | * get some extra futex wakeups. |
107 | */ |
108 | atomic_set(&index->waiting, false); |
109 | } |
110 | } |
111 | |
112 | if (QLIST_EMPTY(®istry)) { |
113 | break; |
114 | } |
115 | |
116 | /* Wait for one thread to report a quiescent state and try again. |
117 | * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't |
118 | * wait too much time. |
119 | * |
120 | * rcu_register_thread() may add nodes to ®istry; it will not |
121 | * wake up synchronize_rcu, but that is okay because at least another |
122 | * thread must exit its RCU read-side critical section before |
123 | * synchronize_rcu is done. The next iteration of the loop will |
124 | * move the new thread's rcu_reader from ®istry to &qsreaders, |
125 | * because rcu_gp_ongoing() will return false. |
126 | * |
127 | * rcu_unregister_thread() may remove nodes from &qsreaders instead |
128 | * of ®istry if it runs during qemu_event_wait. That's okay; |
129 | * the node then will not be added back to ®istry by QLIST_SWAP |
130 | * below. The invariant is that the node is part of one list when |
131 | * rcu_registry_lock is released. |
132 | */ |
133 | qemu_mutex_unlock(&rcu_registry_lock); |
134 | qemu_event_wait(&rcu_gp_event); |
135 | qemu_mutex_lock(&rcu_registry_lock); |
136 | } |
137 | |
138 | /* put back the reader list in the registry */ |
139 | QLIST_SWAP(®istry, &qsreaders, node); |
140 | } |
141 | |
142 | void synchronize_rcu(void) |
143 | { |
144 | qemu_mutex_lock(&rcu_sync_lock); |
145 | |
146 | /* Write RCU-protected pointers before reading p_rcu_reader->ctr. |
147 | * Pairs with smp_mb_placeholder() in rcu_read_lock(). |
148 | */ |
149 | smp_mb_global(); |
150 | |
151 | qemu_mutex_lock(&rcu_registry_lock); |
152 | if (!QLIST_EMPTY(®istry)) { |
153 | /* In either case, the atomic_mb_set below blocks stores that free |
154 | * old RCU-protected pointers. |
155 | */ |
156 | if (sizeof(rcu_gp_ctr) < 8) { |
157 | /* For architectures with 32-bit longs, a two-subphases algorithm |
158 | * ensures we do not encounter overflow bugs. |
159 | * |
160 | * Switch parity: 0 -> 1, 1 -> 0. |
161 | */ |
162 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); |
163 | wait_for_readers(); |
164 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); |
165 | } else { |
166 | /* Increment current grace period. */ |
167 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); |
168 | } |
169 | |
170 | wait_for_readers(); |
171 | } |
172 | |
173 | qemu_mutex_unlock(&rcu_registry_lock); |
174 | qemu_mutex_unlock(&rcu_sync_lock); |
175 | } |
176 | |
177 | |
178 | #define RCU_CALL_MIN_SIZE 30 |
179 | |
180 | /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h |
181 | * from liburcu. Note that head is only used by the consumer. |
182 | */ |
183 | static struct rcu_head dummy; |
184 | static struct rcu_head *head = &dummy, **tail = &dummy.next; |
185 | static int rcu_call_count; |
186 | static QemuEvent rcu_call_ready_event; |
187 | |
188 | static void enqueue(struct rcu_head *node) |
189 | { |
190 | struct rcu_head **old_tail; |
191 | |
192 | node->next = NULL; |
193 | old_tail = atomic_xchg(&tail, &node->next); |
194 | atomic_mb_set(old_tail, node); |
195 | } |
196 | |
197 | static struct rcu_head *try_dequeue(void) |
198 | { |
199 | struct rcu_head *node, *next; |
200 | |
201 | retry: |
202 | /* Test for an empty list, which we do not expect. Note that for |
203 | * the consumer head and tail are always consistent. The head |
204 | * is consistent because only the consumer reads/writes it. |
205 | * The tail, because it is the first step in the enqueuing. |
206 | * It is only the next pointers that might be inconsistent. |
207 | */ |
208 | if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { |
209 | abort(); |
210 | } |
211 | |
212 | /* If the head node has NULL in its next pointer, the value is |
213 | * wrong and we need to wait until its enqueuer finishes the update. |
214 | */ |
215 | node = head; |
216 | next = atomic_mb_read(&head->next); |
217 | if (!next) { |
218 | return NULL; |
219 | } |
220 | |
221 | /* Since we are the sole consumer, and we excluded the empty case |
222 | * above, the queue will always have at least two nodes: the |
223 | * dummy node, and the one being removed. So we do not need to update |
224 | * the tail pointer. |
225 | */ |
226 | head = next; |
227 | |
228 | /* If we dequeued the dummy node, add it back at the end and retry. */ |
229 | if (node == &dummy) { |
230 | enqueue(node); |
231 | goto retry; |
232 | } |
233 | |
234 | return node; |
235 | } |
236 | |
237 | static void *call_rcu_thread(void *opaque) |
238 | { |
239 | struct rcu_head *node; |
240 | |
241 | rcu_register_thread(); |
242 | |
243 | for (;;) { |
244 | int tries = 0; |
245 | int n = atomic_read(&rcu_call_count); |
246 | |
247 | /* Heuristically wait for a decent number of callbacks to pile up. |
248 | * Fetch rcu_call_count now, we only must process elements that were |
249 | * added before synchronize_rcu() starts. |
250 | */ |
251 | while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) { |
252 | g_usleep(10000); |
253 | if (n == 0) { |
254 | qemu_event_reset(&rcu_call_ready_event); |
255 | n = atomic_read(&rcu_call_count); |
256 | if (n == 0) { |
257 | #if defined(CONFIG_MALLOC_TRIM) |
258 | malloc_trim(4 * 1024 * 1024); |
259 | #endif |
260 | qemu_event_wait(&rcu_call_ready_event); |
261 | } |
262 | } |
263 | n = atomic_read(&rcu_call_count); |
264 | } |
265 | |
266 | atomic_sub(&rcu_call_count, n); |
267 | synchronize_rcu(); |
268 | qemu_mutex_lock_iothread(); |
269 | while (n > 0) { |
270 | node = try_dequeue(); |
271 | while (!node) { |
272 | qemu_mutex_unlock_iothread(); |
273 | qemu_event_reset(&rcu_call_ready_event); |
274 | node = try_dequeue(); |
275 | if (!node) { |
276 | qemu_event_wait(&rcu_call_ready_event); |
277 | node = try_dequeue(); |
278 | } |
279 | qemu_mutex_lock_iothread(); |
280 | } |
281 | |
282 | n--; |
283 | node->func(node); |
284 | } |
285 | qemu_mutex_unlock_iothread(); |
286 | } |
287 | abort(); |
288 | } |
289 | |
290 | void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) |
291 | { |
292 | node->func = func; |
293 | enqueue(node); |
294 | atomic_inc(&rcu_call_count); |
295 | qemu_event_set(&rcu_call_ready_event); |
296 | } |
297 | |
298 | void rcu_register_thread(void) |
299 | { |
300 | assert(rcu_reader.ctr == 0); |
301 | qemu_mutex_lock(&rcu_registry_lock); |
302 | QLIST_INSERT_HEAD(®istry, &rcu_reader, node); |
303 | qemu_mutex_unlock(&rcu_registry_lock); |
304 | } |
305 | |
306 | void rcu_unregister_thread(void) |
307 | { |
308 | qemu_mutex_lock(&rcu_registry_lock); |
309 | QLIST_REMOVE(&rcu_reader, node); |
310 | qemu_mutex_unlock(&rcu_registry_lock); |
311 | } |
312 | |
313 | static void rcu_init_complete(void) |
314 | { |
315 | QemuThread thread; |
316 | |
317 | qemu_mutex_init(&rcu_registry_lock); |
318 | qemu_mutex_init(&rcu_sync_lock); |
319 | qemu_event_init(&rcu_gp_event, true); |
320 | |
321 | qemu_event_init(&rcu_call_ready_event, false); |
322 | |
323 | /* The caller is assumed to have iothread lock, so the call_rcu thread |
324 | * must have been quiescent even after forking, just recreate it. |
325 | */ |
326 | qemu_thread_create(&thread, "call_rcu" , call_rcu_thread, |
327 | NULL, QEMU_THREAD_DETACHED); |
328 | |
329 | rcu_register_thread(); |
330 | } |
331 | |
332 | static int atfork_depth = 1; |
333 | |
334 | void rcu_enable_atfork(void) |
335 | { |
336 | atfork_depth++; |
337 | } |
338 | |
339 | void rcu_disable_atfork(void) |
340 | { |
341 | atfork_depth--; |
342 | } |
343 | |
344 | #ifdef CONFIG_POSIX |
345 | static void rcu_init_lock(void) |
346 | { |
347 | if (atfork_depth < 1) { |
348 | return; |
349 | } |
350 | |
351 | qemu_mutex_lock(&rcu_sync_lock); |
352 | qemu_mutex_lock(&rcu_registry_lock); |
353 | } |
354 | |
355 | static void rcu_init_unlock(void) |
356 | { |
357 | if (atfork_depth < 1) { |
358 | return; |
359 | } |
360 | |
361 | qemu_mutex_unlock(&rcu_registry_lock); |
362 | qemu_mutex_unlock(&rcu_sync_lock); |
363 | } |
364 | |
365 | static void rcu_init_child(void) |
366 | { |
367 | if (atfork_depth < 1) { |
368 | return; |
369 | } |
370 | |
371 | memset(®istry, 0, sizeof(registry)); |
372 | rcu_init_complete(); |
373 | } |
374 | #endif |
375 | |
376 | static void __attribute__((__constructor__)) rcu_init(void) |
377 | { |
378 | smp_mb_global_init(); |
379 | #ifdef CONFIG_POSIX |
380 | pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child); |
381 | #endif |
382 | rcu_init_complete(); |
383 | } |
384 | |