]>
Commit | Line | Data |
---|---|---|
7911747b PB |
1 | /* |
2 | * urcu-mb.c | |
3 | * | |
4 | * Userspace RCU library with explicit memory barriers | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <[email protected]> | |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
8 | * Copyright 2015 Red Hat, Inc. | |
9 | * | |
10 | * Ported to QEMU by Paolo Bonzini <[email protected]> | |
11 | * | |
12 | * This library is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU Lesser General Public | |
14 | * License as published by the Free Software Foundation; either | |
15 | * version 2.1 of the License, or (at your option) any later version. | |
16 | * | |
17 | * This library is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * Lesser General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU Lesser General Public | |
23 | * License along with this library; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
25 | * | |
26 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
27 | */ | |
28 | ||
aafd7584 | 29 | #include "qemu/osdep.h" |
26387f86 | 30 | #include "qemu-common.h" |
7911747b PB |
31 | #include "qemu/rcu.h" |
32 | #include "qemu/atomic.h" | |
26387f86 | 33 | #include "qemu/thread.h" |
a4649824 | 34 | #include "qemu/main-loop.h" |
5a22ab71 YZ |
35 | #if defined(CONFIG_MALLOC_TRIM) |
36 | #include <malloc.h> | |
37 | #endif | |
7911747b PB |
38 | |
39 | /* | |
40 | * Global grace period counter. Bit 0 is always one in rcu_gp_ctr. | |
41 | * Bits 1 and above are defined in synchronize_rcu. | |
42 | */ | |
43 | #define RCU_GP_LOCKED (1UL << 0) | |
44 | #define RCU_GP_CTR (1UL << 1) | |
45 | ||
46 | unsigned long rcu_gp_ctr = RCU_GP_LOCKED; | |
47 | ||
48 | QemuEvent rcu_gp_event; | |
c097a60b WC |
49 | static QemuMutex rcu_registry_lock; |
50 | static QemuMutex rcu_sync_lock; | |
7911747b PB |
51 | |
52 | /* | |
53 | * Check whether a quiescent state was crossed between the beginning of | |
54 | * update_counter_and_wait and now. | |
55 | */ | |
56 | static inline int rcu_gp_ongoing(unsigned long *ctr) | |
57 | { | |
58 | unsigned long v; | |
59 | ||
60 | v = atomic_read(ctr); | |
61 | return v && (v != rcu_gp_ctr); | |
62 | } | |
63 | ||
64 | /* Written to only by each individual reader. Read by both the reader and the | |
65 | * writers. | |
66 | */ | |
67 | __thread struct rcu_reader_data rcu_reader; | |
68 | ||
c097a60b | 69 | /* Protected by rcu_registry_lock. */ |
7911747b PB |
70 | typedef QLIST_HEAD(, rcu_reader_data) ThreadList; |
71 | static ThreadList registry = QLIST_HEAD_INITIALIZER(registry); | |
72 | ||
73 | /* Wait for previous parity/grace period to be empty of readers. */ | |
74 | static void wait_for_readers(void) | |
75 | { | |
76 | ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders); | |
77 | struct rcu_reader_data *index, *tmp; | |
78 | ||
79 | for (;;) { | |
80 | /* We want to be notified of changes made to rcu_gp_ongoing | |
81 | * while we walk the list. | |
82 | */ | |
83 | qemu_event_reset(&rcu_gp_event); | |
84 | ||
85 | /* Instead of using atomic_mb_set for index->waiting, and | |
86 | * atomic_mb_read for index->ctr, memory barriers are placed | |
87 | * manually since writes to different threads are independent. | |
e11131b0 PB |
88 | * qemu_event_reset has acquire semantics, so no memory barrier |
89 | * is needed here. | |
7911747b | 90 | */ |
7911747b PB |
91 | QLIST_FOREACH(index, ®istry, node) { |
92 | atomic_set(&index->waiting, true); | |
93 | } | |
94 | ||
e11131b0 PB |
95 | /* Here, order the stores to index->waiting before the |
96 | * loads of index->ctr. | |
97 | */ | |
7911747b PB |
98 | smp_mb(); |
99 | ||
100 | QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { | |
101 | if (!rcu_gp_ongoing(&index->ctr)) { | |
102 | QLIST_REMOVE(index, node); | |
103 | QLIST_INSERT_HEAD(&qsreaders, index, node); | |
104 | ||
105 | /* No need for mb_set here, worst of all we | |
106 | * get some extra futex wakeups. | |
107 | */ | |
108 | atomic_set(&index->waiting, false); | |
109 | } | |
110 | } | |
111 | ||
7911747b PB |
112 | if (QLIST_EMPTY(®istry)) { |
113 | break; | |
114 | } | |
115 | ||
c097a60b WC |
116 | /* Wait for one thread to report a quiescent state and try again. |
117 | * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't | |
118 | * wait too much time. | |
119 | * | |
120 | * rcu_register_thread() may add nodes to ®istry; it will not | |
121 | * wake up synchronize_rcu, but that is okay because at least another | |
122 | * thread must exit its RCU read-side critical section before | |
123 | * synchronize_rcu is done. The next iteration of the loop will | |
124 | * move the new thread's rcu_reader from ®istry to &qsreaders, | |
125 | * because rcu_gp_ongoing() will return false. | |
126 | * | |
127 | * rcu_unregister_thread() may remove nodes from &qsreaders instead | |
128 | * of ®istry if it runs during qemu_event_wait. That's okay; | |
129 | * the node then will not be added back to ®istry by QLIST_SWAP | |
130 | * below. The invariant is that the node is part of one list when | |
131 | * rcu_registry_lock is released. | |
7911747b | 132 | */ |
c097a60b | 133 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b | 134 | qemu_event_wait(&rcu_gp_event); |
c097a60b | 135 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b PB |
136 | } |
137 | ||
138 | /* put back the reader list in the registry */ | |
139 | QLIST_SWAP(®istry, &qsreaders, node); | |
140 | } | |
141 | ||
142 | void synchronize_rcu(void) | |
143 | { | |
c097a60b WC |
144 | qemu_mutex_lock(&rcu_sync_lock); |
145 | qemu_mutex_lock(&rcu_registry_lock); | |
7911747b PB |
146 | |
147 | if (!QLIST_EMPTY(®istry)) { | |
148 | /* In either case, the atomic_mb_set below blocks stores that free | |
149 | * old RCU-protected pointers. | |
150 | */ | |
151 | if (sizeof(rcu_gp_ctr) < 8) { | |
152 | /* For architectures with 32-bit longs, a two-subphases algorithm | |
153 | * ensures we do not encounter overflow bugs. | |
154 | * | |
155 | * Switch parity: 0 -> 1, 1 -> 0. | |
156 | */ | |
157 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
158 | wait_for_readers(); | |
159 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
160 | } else { | |
161 | /* Increment current grace period. */ | |
162 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); | |
163 | } | |
164 | ||
165 | wait_for_readers(); | |
166 | } | |
167 | ||
c097a60b WC |
168 | qemu_mutex_unlock(&rcu_registry_lock); |
169 | qemu_mutex_unlock(&rcu_sync_lock); | |
7911747b PB |
170 | } |
171 | ||
26387f86 PB |
172 | |
173 | #define RCU_CALL_MIN_SIZE 30 | |
174 | ||
175 | /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h | |
176 | * from liburcu. Note that head is only used by the consumer. | |
177 | */ | |
178 | static struct rcu_head dummy; | |
179 | static struct rcu_head *head = &dummy, **tail = &dummy.next; | |
180 | static int rcu_call_count; | |
181 | static QemuEvent rcu_call_ready_event; | |
182 | ||
183 | static void enqueue(struct rcu_head *node) | |
184 | { | |
185 | struct rcu_head **old_tail; | |
186 | ||
187 | node->next = NULL; | |
188 | old_tail = atomic_xchg(&tail, &node->next); | |
189 | atomic_mb_set(old_tail, node); | |
190 | } | |
191 | ||
192 | static struct rcu_head *try_dequeue(void) | |
193 | { | |
194 | struct rcu_head *node, *next; | |
195 | ||
196 | retry: | |
197 | /* Test for an empty list, which we do not expect. Note that for | |
198 | * the consumer head and tail are always consistent. The head | |
199 | * is consistent because only the consumer reads/writes it. | |
200 | * The tail, because it is the first step in the enqueuing. | |
201 | * It is only the next pointers that might be inconsistent. | |
202 | */ | |
203 | if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { | |
204 | abort(); | |
205 | } | |
206 | ||
207 | /* If the head node has NULL in its next pointer, the value is | |
208 | * wrong and we need to wait until its enqueuer finishes the update. | |
209 | */ | |
210 | node = head; | |
211 | next = atomic_mb_read(&head->next); | |
212 | if (!next) { | |
213 | return NULL; | |
214 | } | |
215 | ||
216 | /* Since we are the sole consumer, and we excluded the empty case | |
217 | * above, the queue will always have at least two nodes: the | |
218 | * dummy node, and the one being removed. So we do not need to update | |
219 | * the tail pointer. | |
220 | */ | |
221 | head = next; | |
222 | ||
223 | /* If we dequeued the dummy node, add it back at the end and retry. */ | |
224 | if (node == &dummy) { | |
225 | enqueue(node); | |
226 | goto retry; | |
227 | } | |
228 | ||
229 | return node; | |
230 | } | |
231 | ||
232 | static void *call_rcu_thread(void *opaque) | |
233 | { | |
234 | struct rcu_head *node; | |
235 | ||
ab28bd23 PB |
236 | rcu_register_thread(); |
237 | ||
26387f86 PB |
238 | for (;;) { |
239 | int tries = 0; | |
240 | int n = atomic_read(&rcu_call_count); | |
241 | ||
242 | /* Heuristically wait for a decent number of callbacks to pile up. | |
243 | * Fetch rcu_call_count now, we only must process elements that were | |
244 | * added before synchronize_rcu() starts. | |
245 | */ | |
a7d1d636 PB |
246 | while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) { |
247 | g_usleep(10000); | |
248 | if (n == 0) { | |
249 | qemu_event_reset(&rcu_call_ready_event); | |
26387f86 | 250 | n = atomic_read(&rcu_call_count); |
a7d1d636 | 251 | if (n == 0) { |
5a22ab71 YZ |
252 | #if defined(CONFIG_MALLOC_TRIM) |
253 | malloc_trim(4 * 1024 * 1024); | |
254 | #endif | |
a7d1d636 PB |
255 | qemu_event_wait(&rcu_call_ready_event); |
256 | } | |
26387f86 | 257 | } |
a7d1d636 | 258 | n = atomic_read(&rcu_call_count); |
26387f86 PB |
259 | } |
260 | ||
261 | atomic_sub(&rcu_call_count, n); | |
262 | synchronize_rcu(); | |
a4649824 | 263 | qemu_mutex_lock_iothread(); |
26387f86 PB |
264 | while (n > 0) { |
265 | node = try_dequeue(); | |
266 | while (!node) { | |
a4649824 | 267 | qemu_mutex_unlock_iothread(); |
26387f86 PB |
268 | qemu_event_reset(&rcu_call_ready_event); |
269 | node = try_dequeue(); | |
270 | if (!node) { | |
271 | qemu_event_wait(&rcu_call_ready_event); | |
272 | node = try_dequeue(); | |
273 | } | |
a4649824 | 274 | qemu_mutex_lock_iothread(); |
26387f86 PB |
275 | } |
276 | ||
277 | n--; | |
278 | node->func(node); | |
279 | } | |
a4649824 | 280 | qemu_mutex_unlock_iothread(); |
26387f86 PB |
281 | } |
282 | abort(); | |
283 | } | |
284 | ||
285 | void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) | |
286 | { | |
287 | node->func = func; | |
288 | enqueue(node); | |
289 | atomic_inc(&rcu_call_count); | |
290 | qemu_event_set(&rcu_call_ready_event); | |
291 | } | |
292 | ||
7911747b PB |
293 | void rcu_register_thread(void) |
294 | { | |
295 | assert(rcu_reader.ctr == 0); | |
c097a60b | 296 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b | 297 | QLIST_INSERT_HEAD(®istry, &rcu_reader, node); |
c097a60b | 298 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b PB |
299 | } |
300 | ||
301 | void rcu_unregister_thread(void) | |
302 | { | |
c097a60b | 303 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b | 304 | QLIST_REMOVE(&rcu_reader, node); |
c097a60b | 305 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b PB |
306 | } |
307 | ||
21b7cf9e | 308 | static void rcu_init_complete(void) |
7911747b | 309 | { |
26387f86 PB |
310 | QemuThread thread; |
311 | ||
c097a60b WC |
312 | qemu_mutex_init(&rcu_registry_lock); |
313 | qemu_mutex_init(&rcu_sync_lock); | |
7911747b | 314 | qemu_event_init(&rcu_gp_event, true); |
26387f86 PB |
315 | |
316 | qemu_event_init(&rcu_call_ready_event, false); | |
21b7cf9e PB |
317 | |
318 | /* The caller is assumed to have iothread lock, so the call_rcu thread | |
319 | * must have been quiescent even after forking, just recreate it. | |
320 | */ | |
26387f86 PB |
321 | qemu_thread_create(&thread, "call_rcu", call_rcu_thread, |
322 | NULL, QEMU_THREAD_DETACHED); | |
323 | ||
7911747b PB |
324 | rcu_register_thread(); |
325 | } | |
21b7cf9e | 326 | |
73c6e401 PB |
327 | static int atfork_depth = 1; |
328 | ||
329 | void rcu_enable_atfork(void) | |
330 | { | |
331 | atfork_depth++; | |
332 | } | |
333 | ||
334 | void rcu_disable_atfork(void) | |
335 | { | |
336 | atfork_depth--; | |
337 | } | |
338 | ||
21b7cf9e PB |
339 | #ifdef CONFIG_POSIX |
340 | static void rcu_init_lock(void) | |
341 | { | |
73c6e401 PB |
342 | if (atfork_depth < 1) { |
343 | return; | |
344 | } | |
345 | ||
c097a60b WC |
346 | qemu_mutex_lock(&rcu_sync_lock); |
347 | qemu_mutex_lock(&rcu_registry_lock); | |
21b7cf9e PB |
348 | } |
349 | ||
350 | static void rcu_init_unlock(void) | |
351 | { | |
73c6e401 PB |
352 | if (atfork_depth < 1) { |
353 | return; | |
354 | } | |
355 | ||
c097a60b WC |
356 | qemu_mutex_unlock(&rcu_registry_lock); |
357 | qemu_mutex_unlock(&rcu_sync_lock); | |
21b7cf9e PB |
358 | } |
359 | ||
2a96a552 | 360 | static void rcu_init_child(void) |
21b7cf9e | 361 | { |
2a96a552 PB |
362 | if (atfork_depth < 1) { |
363 | return; | |
364 | } | |
365 | ||
21b7cf9e PB |
366 | memset(®istry, 0, sizeof(registry)); |
367 | rcu_init_complete(); | |
368 | } | |
2a96a552 | 369 | #endif |
21b7cf9e PB |
370 | |
371 | static void __attribute__((__constructor__)) rcu_init(void) | |
372 | { | |
373 | #ifdef CONFIG_POSIX | |
2a96a552 | 374 | pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child); |
21b7cf9e PB |
375 | #endif |
376 | rcu_init_complete(); | |
377 | } |