]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * urcu-mb.c | |
3 | * | |
4 | * Userspace RCU library with explicit memory barriers | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <[email protected]> | |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
8 | * Copyright 2015 Red Hat, Inc. | |
9 | * | |
10 | * Ported to QEMU by Paolo Bonzini <[email protected]> | |
11 | * | |
12 | * This library is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU Lesser General Public | |
14 | * License as published by the Free Software Foundation; either | |
15 | * version 2.1 of the License, or (at your option) any later version. | |
16 | * | |
17 | * This library is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * Lesser General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU Lesser General Public | |
23 | * License along with this library; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
25 | * | |
26 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
27 | */ | |
28 | ||
29 | #include "qemu/osdep.h" | |
30 | #include "qemu-common.h" | |
31 | #include "qemu/rcu.h" | |
32 | #include "qemu/atomic.h" | |
33 | #include "qemu/thread.h" | |
34 | #include "qemu/main-loop.h" | |
35 | #if defined(CONFIG_MALLOC_TRIM) | |
36 | #include <malloc.h> | |
37 | #endif | |
38 | ||
39 | /* | |
40 | * Global grace period counter. Bit 0 is always one in rcu_gp_ctr. | |
41 | * Bits 1 and above are defined in synchronize_rcu. | |
42 | */ | |
43 | #define RCU_GP_LOCKED (1UL << 0) | |
44 | #define RCU_GP_CTR (1UL << 1) | |
45 | ||
46 | unsigned long rcu_gp_ctr = RCU_GP_LOCKED; | |
47 | ||
48 | QemuEvent rcu_gp_event; | |
49 | static QemuMutex rcu_registry_lock; | |
50 | static QemuMutex rcu_sync_lock; | |
51 | ||
52 | /* | |
53 | * Check whether a quiescent state was crossed between the beginning of | |
54 | * update_counter_and_wait and now. | |
55 | */ | |
56 | static inline int rcu_gp_ongoing(unsigned long *ctr) | |
57 | { | |
58 | unsigned long v; | |
59 | ||
60 | v = atomic_read(ctr); | |
61 | return v && (v != rcu_gp_ctr); | |
62 | } | |
63 | ||
64 | /* Written to only by each individual reader. Read by both the reader and the | |
65 | * writers. | |
66 | */ | |
67 | __thread struct rcu_reader_data rcu_reader; | |
68 | ||
69 | /* Protected by rcu_registry_lock. */ | |
70 | typedef QLIST_HEAD(, rcu_reader_data) ThreadList; | |
71 | static ThreadList registry = QLIST_HEAD_INITIALIZER(registry); | |
72 | ||
73 | /* Wait for previous parity/grace period to be empty of readers. */ | |
74 | static void wait_for_readers(void) | |
75 | { | |
76 | ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders); | |
77 | struct rcu_reader_data *index, *tmp; | |
78 | ||
79 | for (;;) { | |
80 | /* We want to be notified of changes made to rcu_gp_ongoing | |
81 | * while we walk the list. | |
82 | */ | |
83 | qemu_event_reset(&rcu_gp_event); | |
84 | ||
85 | /* Instead of using atomic_mb_set for index->waiting, and | |
86 | * atomic_mb_read for index->ctr, memory barriers are placed | |
87 | * manually since writes to different threads are independent. | |
88 | * qemu_event_reset has acquire semantics, so no memory barrier | |
89 | * is needed here. | |
90 | */ | |
91 | QLIST_FOREACH(index, ®istry, node) { | |
92 | atomic_set(&index->waiting, true); | |
93 | } | |
94 | ||
95 | /* Here, order the stores to index->waiting before the loads of | |
96 | * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(), | |
97 | * ensuring that the loads of index->ctr are sequentially consistent. | |
98 | */ | |
99 | smp_mb_global(); | |
100 | ||
101 | QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { | |
102 | if (!rcu_gp_ongoing(&index->ctr)) { | |
103 | QLIST_REMOVE(index, node); | |
104 | QLIST_INSERT_HEAD(&qsreaders, index, node); | |
105 | ||
106 | /* No need for mb_set here, worst of all we | |
107 | * get some extra futex wakeups. | |
108 | */ | |
109 | atomic_set(&index->waiting, false); | |
110 | } | |
111 | } | |
112 | ||
113 | if (QLIST_EMPTY(®istry)) { | |
114 | break; | |
115 | } | |
116 | ||
117 | /* Wait for one thread to report a quiescent state and try again. | |
118 | * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't | |
119 | * wait too much time. | |
120 | * | |
121 | * rcu_register_thread() may add nodes to ®istry; it will not | |
122 | * wake up synchronize_rcu, but that is okay because at least another | |
123 | * thread must exit its RCU read-side critical section before | |
124 | * synchronize_rcu is done. The next iteration of the loop will | |
125 | * move the new thread's rcu_reader from ®istry to &qsreaders, | |
126 | * because rcu_gp_ongoing() will return false. | |
127 | * | |
128 | * rcu_unregister_thread() may remove nodes from &qsreaders instead | |
129 | * of ®istry if it runs during qemu_event_wait. That's okay; | |
130 | * the node then will not be added back to ®istry by QLIST_SWAP | |
131 | * below. The invariant is that the node is part of one list when | |
132 | * rcu_registry_lock is released. | |
133 | */ | |
134 | qemu_mutex_unlock(&rcu_registry_lock); | |
135 | qemu_event_wait(&rcu_gp_event); | |
136 | qemu_mutex_lock(&rcu_registry_lock); | |
137 | } | |
138 | ||
139 | /* put back the reader list in the registry */ | |
140 | QLIST_SWAP(®istry, &qsreaders, node); | |
141 | } | |
142 | ||
143 | void synchronize_rcu(void) | |
144 | { | |
145 | qemu_mutex_lock(&rcu_sync_lock); | |
146 | ||
147 | /* Write RCU-protected pointers before reading p_rcu_reader->ctr. | |
148 | * Pairs with smp_mb_placeholder() in rcu_read_lock(). | |
149 | */ | |
150 | smp_mb_global(); | |
151 | ||
152 | qemu_mutex_lock(&rcu_registry_lock); | |
153 | if (!QLIST_EMPTY(®istry)) { | |
154 | /* In either case, the atomic_mb_set below blocks stores that free | |
155 | * old RCU-protected pointers. | |
156 | */ | |
157 | if (sizeof(rcu_gp_ctr) < 8) { | |
158 | /* For architectures with 32-bit longs, a two-subphases algorithm | |
159 | * ensures we do not encounter overflow bugs. | |
160 | * | |
161 | * Switch parity: 0 -> 1, 1 -> 0. | |
162 | */ | |
163 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
164 | wait_for_readers(); | |
165 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
166 | } else { | |
167 | /* Increment current grace period. */ | |
168 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); | |
169 | } | |
170 | ||
171 | wait_for_readers(); | |
172 | } | |
173 | ||
174 | qemu_mutex_unlock(&rcu_registry_lock); | |
175 | qemu_mutex_unlock(&rcu_sync_lock); | |
176 | } | |
177 | ||
178 | ||
179 | #define RCU_CALL_MIN_SIZE 30 | |
180 | ||
181 | /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h | |
182 | * from liburcu. Note that head is only used by the consumer. | |
183 | */ | |
184 | static struct rcu_head dummy; | |
185 | static struct rcu_head *head = &dummy, **tail = &dummy.next; | |
186 | static int rcu_call_count; | |
187 | static QemuEvent rcu_call_ready_event; | |
188 | ||
189 | static void enqueue(struct rcu_head *node) | |
190 | { | |
191 | struct rcu_head **old_tail; | |
192 | ||
193 | node->next = NULL; | |
194 | old_tail = atomic_xchg(&tail, &node->next); | |
195 | atomic_mb_set(old_tail, node); | |
196 | } | |
197 | ||
198 | static struct rcu_head *try_dequeue(void) | |
199 | { | |
200 | struct rcu_head *node, *next; | |
201 | ||
202 | retry: | |
203 | /* Test for an empty list, which we do not expect. Note that for | |
204 | * the consumer head and tail are always consistent. The head | |
205 | * is consistent because only the consumer reads/writes it. | |
206 | * The tail, because it is the first step in the enqueuing. | |
207 | * It is only the next pointers that might be inconsistent. | |
208 | */ | |
209 | if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { | |
210 | abort(); | |
211 | } | |
212 | ||
213 | /* If the head node has NULL in its next pointer, the value is | |
214 | * wrong and we need to wait until its enqueuer finishes the update. | |
215 | */ | |
216 | node = head; | |
217 | next = atomic_mb_read(&head->next); | |
218 | if (!next) { | |
219 | return NULL; | |
220 | } | |
221 | ||
222 | /* Since we are the sole consumer, and we excluded the empty case | |
223 | * above, the queue will always have at least two nodes: the | |
224 | * dummy node, and the one being removed. So we do not need to update | |
225 | * the tail pointer. | |
226 | */ | |
227 | head = next; | |
228 | ||
229 | /* If we dequeued the dummy node, add it back at the end and retry. */ | |
230 | if (node == &dummy) { | |
231 | enqueue(node); | |
232 | goto retry; | |
233 | } | |
234 | ||
235 | return node; | |
236 | } | |
237 | ||
238 | static void *call_rcu_thread(void *opaque) | |
239 | { | |
240 | struct rcu_head *node; | |
241 | ||
242 | rcu_register_thread(); | |
243 | ||
244 | for (;;) { | |
245 | int tries = 0; | |
246 | int n = atomic_read(&rcu_call_count); | |
247 | ||
248 | /* Heuristically wait for a decent number of callbacks to pile up. | |
249 | * Fetch rcu_call_count now, we only must process elements that were | |
250 | * added before synchronize_rcu() starts. | |
251 | */ | |
252 | while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) { | |
253 | g_usleep(10000); | |
254 | if (n == 0) { | |
255 | qemu_event_reset(&rcu_call_ready_event); | |
256 | n = atomic_read(&rcu_call_count); | |
257 | if (n == 0) { | |
258 | #if defined(CONFIG_MALLOC_TRIM) | |
259 | malloc_trim(4 * 1024 * 1024); | |
260 | #endif | |
261 | qemu_event_wait(&rcu_call_ready_event); | |
262 | } | |
263 | } | |
264 | n = atomic_read(&rcu_call_count); | |
265 | } | |
266 | ||
267 | atomic_sub(&rcu_call_count, n); | |
268 | synchronize_rcu(); | |
269 | qemu_mutex_lock_iothread(); | |
270 | while (n > 0) { | |
271 | node = try_dequeue(); | |
272 | while (!node) { | |
273 | qemu_mutex_unlock_iothread(); | |
274 | qemu_event_reset(&rcu_call_ready_event); | |
275 | node = try_dequeue(); | |
276 | if (!node) { | |
277 | qemu_event_wait(&rcu_call_ready_event); | |
278 | node = try_dequeue(); | |
279 | } | |
280 | qemu_mutex_lock_iothread(); | |
281 | } | |
282 | ||
283 | n--; | |
284 | node->func(node); | |
285 | } | |
286 | qemu_mutex_unlock_iothread(); | |
287 | } | |
288 | abort(); | |
289 | } | |
290 | ||
291 | void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) | |
292 | { | |
293 | node->func = func; | |
294 | enqueue(node); | |
295 | atomic_inc(&rcu_call_count); | |
296 | qemu_event_set(&rcu_call_ready_event); | |
297 | } | |
298 | ||
299 | void rcu_register_thread(void) | |
300 | { | |
301 | assert(rcu_reader.ctr == 0); | |
302 | qemu_mutex_lock(&rcu_registry_lock); | |
303 | QLIST_INSERT_HEAD(®istry, &rcu_reader, node); | |
304 | qemu_mutex_unlock(&rcu_registry_lock); | |
305 | } | |
306 | ||
307 | void rcu_unregister_thread(void) | |
308 | { | |
309 | qemu_mutex_lock(&rcu_registry_lock); | |
310 | QLIST_REMOVE(&rcu_reader, node); | |
311 | qemu_mutex_unlock(&rcu_registry_lock); | |
312 | } | |
313 | ||
314 | static void rcu_init_complete(void) | |
315 | { | |
316 | QemuThread thread; | |
317 | ||
318 | qemu_mutex_init(&rcu_registry_lock); | |
319 | qemu_mutex_init(&rcu_sync_lock); | |
320 | qemu_event_init(&rcu_gp_event, true); | |
321 | ||
322 | qemu_event_init(&rcu_call_ready_event, false); | |
323 | ||
324 | /* The caller is assumed to have iothread lock, so the call_rcu thread | |
325 | * must have been quiescent even after forking, just recreate it. | |
326 | */ | |
327 | qemu_thread_create(&thread, "call_rcu", call_rcu_thread, | |
328 | NULL, QEMU_THREAD_DETACHED); | |
329 | ||
330 | rcu_register_thread(); | |
331 | } | |
332 | ||
333 | static int atfork_depth = 1; | |
334 | ||
335 | void rcu_enable_atfork(void) | |
336 | { | |
337 | atfork_depth++; | |
338 | } | |
339 | ||
340 | void rcu_disable_atfork(void) | |
341 | { | |
342 | atfork_depth--; | |
343 | } | |
344 | ||
345 | #ifdef CONFIG_POSIX | |
346 | static void rcu_init_lock(void) | |
347 | { | |
348 | if (atfork_depth < 1) { | |
349 | return; | |
350 | } | |
351 | ||
352 | qemu_mutex_lock(&rcu_sync_lock); | |
353 | qemu_mutex_lock(&rcu_registry_lock); | |
354 | } | |
355 | ||
356 | static void rcu_init_unlock(void) | |
357 | { | |
358 | if (atfork_depth < 1) { | |
359 | return; | |
360 | } | |
361 | ||
362 | qemu_mutex_unlock(&rcu_registry_lock); | |
363 | qemu_mutex_unlock(&rcu_sync_lock); | |
364 | } | |
365 | ||
366 | static void rcu_init_child(void) | |
367 | { | |
368 | if (atfork_depth < 1) { | |
369 | return; | |
370 | } | |
371 | ||
372 | memset(®istry, 0, sizeof(registry)); | |
373 | rcu_init_complete(); | |
374 | } | |
375 | #endif | |
376 | ||
377 | static void __attribute__((__constructor__)) rcu_init(void) | |
378 | { | |
379 | smp_mb_global_init(); | |
380 | #ifdef CONFIG_POSIX | |
381 | pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child); | |
382 | #endif | |
383 | rcu_init_complete(); | |
384 | } |