]>
Commit | Line | Data |
---|---|---|
7911747b PB |
1 | /* |
2 | * urcu-mb.c | |
3 | * | |
4 | * Userspace RCU library with explicit memory barriers | |
5 | * | |
6 | * Copyright (c) 2009 Mathieu Desnoyers <[email protected]> | |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
8 | * Copyright 2015 Red Hat, Inc. | |
9 | * | |
10 | * Ported to QEMU by Paolo Bonzini <[email protected]> | |
11 | * | |
12 | * This library is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU Lesser General Public | |
14 | * License as published by the Free Software Foundation; either | |
15 | * version 2.1 of the License, or (at your option) any later version. | |
16 | * | |
17 | * This library is distributed in the hope that it will be useful, | |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
20 | * Lesser General Public License for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU Lesser General Public | |
23 | * License along with this library; if not, write to the Free Software | |
24 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
25 | * | |
26 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
27 | */ | |
28 | ||
26387f86 | 29 | #include "qemu-common.h" |
7911747b PB |
30 | #include <stdio.h> |
31 | #include <assert.h> | |
32 | #include <stdlib.h> | |
33 | #include <stdint.h> | |
34 | #include <errno.h> | |
35 | #include "qemu/rcu.h" | |
36 | #include "qemu/atomic.h" | |
26387f86 | 37 | #include "qemu/thread.h" |
a4649824 | 38 | #include "qemu/main-loop.h" |
7911747b PB |
39 | |
40 | /* | |
41 | * Global grace period counter. Bit 0 is always one in rcu_gp_ctr. | |
42 | * Bits 1 and above are defined in synchronize_rcu. | |
43 | */ | |
44 | #define RCU_GP_LOCKED (1UL << 0) | |
45 | #define RCU_GP_CTR (1UL << 1) | |
46 | ||
47 | unsigned long rcu_gp_ctr = RCU_GP_LOCKED; | |
48 | ||
49 | QemuEvent rcu_gp_event; | |
c097a60b WC |
50 | static QemuMutex rcu_registry_lock; |
51 | static QemuMutex rcu_sync_lock; | |
7911747b PB |
52 | |
53 | /* | |
54 | * Check whether a quiescent state was crossed between the beginning of | |
55 | * update_counter_and_wait and now. | |
56 | */ | |
57 | static inline int rcu_gp_ongoing(unsigned long *ctr) | |
58 | { | |
59 | unsigned long v; | |
60 | ||
61 | v = atomic_read(ctr); | |
62 | return v && (v != rcu_gp_ctr); | |
63 | } | |
64 | ||
65 | /* Written to only by each individual reader. Read by both the reader and the | |
66 | * writers. | |
67 | */ | |
68 | __thread struct rcu_reader_data rcu_reader; | |
69 | ||
c097a60b | 70 | /* Protected by rcu_registry_lock. */ |
7911747b PB |
71 | typedef QLIST_HEAD(, rcu_reader_data) ThreadList; |
72 | static ThreadList registry = QLIST_HEAD_INITIALIZER(registry); | |
73 | ||
74 | /* Wait for previous parity/grace period to be empty of readers. */ | |
75 | static void wait_for_readers(void) | |
76 | { | |
77 | ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders); | |
78 | struct rcu_reader_data *index, *tmp; | |
79 | ||
80 | for (;;) { | |
81 | /* We want to be notified of changes made to rcu_gp_ongoing | |
82 | * while we walk the list. | |
83 | */ | |
84 | qemu_event_reset(&rcu_gp_event); | |
85 | ||
86 | /* Instead of using atomic_mb_set for index->waiting, and | |
87 | * atomic_mb_read for index->ctr, memory barriers are placed | |
88 | * manually since writes to different threads are independent. | |
89 | * atomic_mb_set has a smp_wmb before... | |
90 | */ | |
91 | smp_wmb(); | |
92 | QLIST_FOREACH(index, ®istry, node) { | |
93 | atomic_set(&index->waiting, true); | |
94 | } | |
95 | ||
96 | /* ... and a smp_mb after. */ | |
97 | smp_mb(); | |
98 | ||
99 | QLIST_FOREACH_SAFE(index, ®istry, node, tmp) { | |
100 | if (!rcu_gp_ongoing(&index->ctr)) { | |
101 | QLIST_REMOVE(index, node); | |
102 | QLIST_INSERT_HEAD(&qsreaders, index, node); | |
103 | ||
104 | /* No need for mb_set here, worst of all we | |
105 | * get some extra futex wakeups. | |
106 | */ | |
107 | atomic_set(&index->waiting, false); | |
108 | } | |
109 | } | |
110 | ||
111 | /* atomic_mb_read has smp_rmb after. */ | |
112 | smp_rmb(); | |
113 | ||
114 | if (QLIST_EMPTY(®istry)) { | |
115 | break; | |
116 | } | |
117 | ||
c097a60b WC |
118 | /* Wait for one thread to report a quiescent state and try again. |
119 | * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't | |
120 | * wait too much time. | |
121 | * | |
122 | * rcu_register_thread() may add nodes to ®istry; it will not | |
123 | * wake up synchronize_rcu, but that is okay because at least another | |
124 | * thread must exit its RCU read-side critical section before | |
125 | * synchronize_rcu is done. The next iteration of the loop will | |
126 | * move the new thread's rcu_reader from ®istry to &qsreaders, | |
127 | * because rcu_gp_ongoing() will return false. | |
128 | * | |
129 | * rcu_unregister_thread() may remove nodes from &qsreaders instead | |
130 | * of ®istry if it runs during qemu_event_wait. That's okay; | |
131 | * the node then will not be added back to ®istry by QLIST_SWAP | |
132 | * below. The invariant is that the node is part of one list when | |
133 | * rcu_registry_lock is released. | |
7911747b | 134 | */ |
c097a60b | 135 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b | 136 | qemu_event_wait(&rcu_gp_event); |
c097a60b | 137 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b PB |
138 | } |
139 | ||
140 | /* put back the reader list in the registry */ | |
141 | QLIST_SWAP(®istry, &qsreaders, node); | |
142 | } | |
143 | ||
144 | void synchronize_rcu(void) | |
145 | { | |
c097a60b WC |
146 | qemu_mutex_lock(&rcu_sync_lock); |
147 | qemu_mutex_lock(&rcu_registry_lock); | |
7911747b PB |
148 | |
149 | if (!QLIST_EMPTY(®istry)) { | |
150 | /* In either case, the atomic_mb_set below blocks stores that free | |
151 | * old RCU-protected pointers. | |
152 | */ | |
153 | if (sizeof(rcu_gp_ctr) < 8) { | |
154 | /* For architectures with 32-bit longs, a two-subphases algorithm | |
155 | * ensures we do not encounter overflow bugs. | |
156 | * | |
157 | * Switch parity: 0 -> 1, 1 -> 0. | |
158 | */ | |
159 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
160 | wait_for_readers(); | |
161 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); | |
162 | } else { | |
163 | /* Increment current grace period. */ | |
164 | atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); | |
165 | } | |
166 | ||
167 | wait_for_readers(); | |
168 | } | |
169 | ||
c097a60b WC |
170 | qemu_mutex_unlock(&rcu_registry_lock); |
171 | qemu_mutex_unlock(&rcu_sync_lock); | |
7911747b PB |
172 | } |
173 | ||
26387f86 PB |
174 | |
175 | #define RCU_CALL_MIN_SIZE 30 | |
176 | ||
177 | /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h | |
178 | * from liburcu. Note that head is only used by the consumer. | |
179 | */ | |
180 | static struct rcu_head dummy; | |
181 | static struct rcu_head *head = &dummy, **tail = &dummy.next; | |
182 | static int rcu_call_count; | |
183 | static QemuEvent rcu_call_ready_event; | |
184 | ||
185 | static void enqueue(struct rcu_head *node) | |
186 | { | |
187 | struct rcu_head **old_tail; | |
188 | ||
189 | node->next = NULL; | |
190 | old_tail = atomic_xchg(&tail, &node->next); | |
191 | atomic_mb_set(old_tail, node); | |
192 | } | |
193 | ||
194 | static struct rcu_head *try_dequeue(void) | |
195 | { | |
196 | struct rcu_head *node, *next; | |
197 | ||
198 | retry: | |
199 | /* Test for an empty list, which we do not expect. Note that for | |
200 | * the consumer head and tail are always consistent. The head | |
201 | * is consistent because only the consumer reads/writes it. | |
202 | * The tail, because it is the first step in the enqueuing. | |
203 | * It is only the next pointers that might be inconsistent. | |
204 | */ | |
205 | if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { | |
206 | abort(); | |
207 | } | |
208 | ||
209 | /* If the head node has NULL in its next pointer, the value is | |
210 | * wrong and we need to wait until its enqueuer finishes the update. | |
211 | */ | |
212 | node = head; | |
213 | next = atomic_mb_read(&head->next); | |
214 | if (!next) { | |
215 | return NULL; | |
216 | } | |
217 | ||
218 | /* Since we are the sole consumer, and we excluded the empty case | |
219 | * above, the queue will always have at least two nodes: the | |
220 | * dummy node, and the one being removed. So we do not need to update | |
221 | * the tail pointer. | |
222 | */ | |
223 | head = next; | |
224 | ||
225 | /* If we dequeued the dummy node, add it back at the end and retry. */ | |
226 | if (node == &dummy) { | |
227 | enqueue(node); | |
228 | goto retry; | |
229 | } | |
230 | ||
231 | return node; | |
232 | } | |
233 | ||
234 | static void *call_rcu_thread(void *opaque) | |
235 | { | |
236 | struct rcu_head *node; | |
237 | ||
ab28bd23 PB |
238 | rcu_register_thread(); |
239 | ||
26387f86 PB |
240 | for (;;) { |
241 | int tries = 0; | |
242 | int n = atomic_read(&rcu_call_count); | |
243 | ||
244 | /* Heuristically wait for a decent number of callbacks to pile up. | |
245 | * Fetch rcu_call_count now, we only must process elements that were | |
246 | * added before synchronize_rcu() starts. | |
247 | */ | |
a7d1d636 PB |
248 | while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) { |
249 | g_usleep(10000); | |
250 | if (n == 0) { | |
251 | qemu_event_reset(&rcu_call_ready_event); | |
26387f86 | 252 | n = atomic_read(&rcu_call_count); |
a7d1d636 PB |
253 | if (n == 0) { |
254 | qemu_event_wait(&rcu_call_ready_event); | |
255 | } | |
26387f86 | 256 | } |
a7d1d636 | 257 | n = atomic_read(&rcu_call_count); |
26387f86 PB |
258 | } |
259 | ||
260 | atomic_sub(&rcu_call_count, n); | |
261 | synchronize_rcu(); | |
a4649824 | 262 | qemu_mutex_lock_iothread(); |
26387f86 PB |
263 | while (n > 0) { |
264 | node = try_dequeue(); | |
265 | while (!node) { | |
a4649824 | 266 | qemu_mutex_unlock_iothread(); |
26387f86 PB |
267 | qemu_event_reset(&rcu_call_ready_event); |
268 | node = try_dequeue(); | |
269 | if (!node) { | |
270 | qemu_event_wait(&rcu_call_ready_event); | |
271 | node = try_dequeue(); | |
272 | } | |
a4649824 | 273 | qemu_mutex_lock_iothread(); |
26387f86 PB |
274 | } |
275 | ||
276 | n--; | |
277 | node->func(node); | |
278 | } | |
a4649824 | 279 | qemu_mutex_unlock_iothread(); |
26387f86 PB |
280 | } |
281 | abort(); | |
282 | } | |
283 | ||
284 | void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) | |
285 | { | |
286 | node->func = func; | |
287 | enqueue(node); | |
288 | atomic_inc(&rcu_call_count); | |
289 | qemu_event_set(&rcu_call_ready_event); | |
290 | } | |
291 | ||
7911747b PB |
292 | void rcu_register_thread(void) |
293 | { | |
294 | assert(rcu_reader.ctr == 0); | |
c097a60b | 295 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b | 296 | QLIST_INSERT_HEAD(®istry, &rcu_reader, node); |
c097a60b | 297 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b PB |
298 | } |
299 | ||
300 | void rcu_unregister_thread(void) | |
301 | { | |
c097a60b | 302 | qemu_mutex_lock(&rcu_registry_lock); |
7911747b | 303 | QLIST_REMOVE(&rcu_reader, node); |
c097a60b | 304 | qemu_mutex_unlock(&rcu_registry_lock); |
7911747b PB |
305 | } |
306 | ||
21b7cf9e | 307 | static void rcu_init_complete(void) |
7911747b | 308 | { |
26387f86 PB |
309 | QemuThread thread; |
310 | ||
c097a60b WC |
311 | qemu_mutex_init(&rcu_registry_lock); |
312 | qemu_mutex_init(&rcu_sync_lock); | |
7911747b | 313 | qemu_event_init(&rcu_gp_event, true); |
26387f86 PB |
314 | |
315 | qemu_event_init(&rcu_call_ready_event, false); | |
21b7cf9e PB |
316 | |
317 | /* The caller is assumed to have iothread lock, so the call_rcu thread | |
318 | * must have been quiescent even after forking, just recreate it. | |
319 | */ | |
26387f86 PB |
320 | qemu_thread_create(&thread, "call_rcu", call_rcu_thread, |
321 | NULL, QEMU_THREAD_DETACHED); | |
322 | ||
7911747b PB |
323 | rcu_register_thread(); |
324 | } | |
21b7cf9e PB |
325 | |
326 | #ifdef CONFIG_POSIX | |
327 | static void rcu_init_lock(void) | |
328 | { | |
c097a60b WC |
329 | qemu_mutex_lock(&rcu_sync_lock); |
330 | qemu_mutex_lock(&rcu_registry_lock); | |
21b7cf9e PB |
331 | } |
332 | ||
333 | static void rcu_init_unlock(void) | |
334 | { | |
c097a60b WC |
335 | qemu_mutex_unlock(&rcu_registry_lock); |
336 | qemu_mutex_unlock(&rcu_sync_lock); | |
21b7cf9e | 337 | } |
a59629fc | 338 | #endif |
21b7cf9e | 339 | |
a59629fc | 340 | void rcu_after_fork(void) |
21b7cf9e | 341 | { |
21b7cf9e PB |
342 | memset(®istry, 0, sizeof(registry)); |
343 | rcu_init_complete(); | |
344 | } | |
21b7cf9e PB |
345 | |
346 | static void __attribute__((__constructor__)) rcu_init(void) | |
347 | { | |
348 | #ifdef CONFIG_POSIX | |
05620f85 | 349 | pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock); |
21b7cf9e PB |
350 | #endif |
351 | rcu_init_complete(); | |
352 | } |