]>
Commit | Line | Data |
---|---|---|
e7ee1501 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
dad81a20 PM |
2 | /* |
3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. | |
4 | * | |
dad81a20 PM |
5 | * Copyright (C) IBM Corporation, 2006 |
6 | * Copyright (C) Fujitsu, 2012 | |
7 | * | |
65bb0dc4 | 8 | * Authors: Paul McKenney <[email protected]> |
dad81a20 PM |
9 | * Lai Jiangshan <[email protected]> |
10 | * | |
11 | * For detailed explanation of Read-Copy Update mechanism see - | |
12 | * Documentation/RCU/ *.txt | |
13 | * | |
14 | */ | |
15 | ||
a7538352 JP |
16 | #define pr_fmt(fmt) "rcu: " fmt |
17 | ||
dad81a20 PM |
18 | #include <linux/export.h> |
19 | #include <linux/mutex.h> | |
20 | #include <linux/percpu.h> | |
21 | #include <linux/preempt.h> | |
22 | #include <linux/rcupdate_wait.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/delay.h> | |
22607d66 | 26 | #include <linux/module.h> |
dad81a20 PM |
27 | #include <linux/srcu.h> |
28 | ||
dad81a20 | 29 | #include "rcu.h" |
45753c5f | 30 | #include "rcu_segcblist.h" |
dad81a20 | 31 | |
0c8e0e3c PM |
32 | /* Holdoff in nanoseconds for auto-expediting. */ |
33 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) | |
34 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; | |
22607d66 PM |
35 | module_param(exp_holdoff, ulong, 0444); |
36 | ||
c350c008 PM |
37 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
38 | static ulong counter_wrap_check = (ULONG_MAX >> 2); | |
39 | module_param(counter_wrap_check, ulong, 0444); | |
40 | ||
e0fcba9a PM |
41 | /* Early-boot callback-management, so early that no lock is required! */ |
42 | static LIST_HEAD(srcu_boot_list); | |
43 | static bool __read_mostly srcu_init_done; | |
44 | ||
da915ad5 | 45 | static void srcu_invoke_callbacks(struct work_struct *work); |
aacb5d91 | 46 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
0d8a1e83 | 47 | static void process_srcu(struct work_struct *work); |
e81baf4c | 48 | static void srcu_delay_timer(struct timer_list *t); |
da915ad5 | 49 | |
d6331980 PM |
50 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
51 | #define spin_lock_rcu_node(p) \ | |
52 | do { \ | |
53 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ | |
54 | smp_mb__after_unlock_lock(); \ | |
55 | } while (0) | |
56 | ||
57 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) | |
58 | ||
59 | #define spin_lock_irq_rcu_node(p) \ | |
60 | do { \ | |
61 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ | |
62 | smp_mb__after_unlock_lock(); \ | |
63 | } while (0) | |
64 | ||
65 | #define spin_unlock_irq_rcu_node(p) \ | |
66 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) | |
67 | ||
68 | #define spin_lock_irqsave_rcu_node(p, flags) \ | |
69 | do { \ | |
70 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ | |
71 | smp_mb__after_unlock_lock(); \ | |
72 | } while (0) | |
73 | ||
74 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ | |
75 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ | |
76 | ||
da915ad5 PM |
77 | /* |
78 | * Initialize SRCU combining tree. Note that statically allocated | |
79 | * srcu_struct structures might already have srcu_read_lock() and | |
80 | * srcu_read_unlock() running against them. So if the is_static parameter | |
81 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | |
82 | */ | |
aacb5d91 | 83 | static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) |
dad81a20 | 84 | { |
da915ad5 PM |
85 | int cpu; |
86 | int i; | |
87 | int level = 0; | |
88 | int levelspread[RCU_NUM_LVLS]; | |
89 | struct srcu_data *sdp; | |
90 | struct srcu_node *snp; | |
91 | struct srcu_node *snp_first; | |
92 | ||
93 | /* Work out the overall tree geometry. */ | |
aacb5d91 | 94 | ssp->level[0] = &ssp->node[0]; |
da915ad5 | 95 | for (i = 1; i < rcu_num_lvls; i++) |
aacb5d91 | 96 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
da915ad5 PM |
97 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
98 | ||
99 | /* Each pass through this loop initializes one srcu_node structure. */ | |
aacb5d91 | 100 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 101 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
c7e88067 PM |
102 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
103 | ARRAY_SIZE(snp->srcu_data_have_cbs)); | |
104 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { | |
da915ad5 | 105 | snp->srcu_have_cbs[i] = 0; |
c7e88067 PM |
106 | snp->srcu_data_have_cbs[i] = 0; |
107 | } | |
1e9a038b | 108 | snp->srcu_gp_seq_needed_exp = 0; |
da915ad5 PM |
109 | snp->grplo = -1; |
110 | snp->grphi = -1; | |
aacb5d91 | 111 | if (snp == &ssp->node[0]) { |
da915ad5 PM |
112 | /* Root node, special case. */ |
113 | snp->srcu_parent = NULL; | |
114 | continue; | |
115 | } | |
116 | ||
117 | /* Non-root node. */ | |
aacb5d91 | 118 | if (snp == ssp->level[level + 1]) |
da915ad5 | 119 | level++; |
aacb5d91 PM |
120 | snp->srcu_parent = ssp->level[level - 1] + |
121 | (snp - ssp->level[level]) / | |
da915ad5 PM |
122 | levelspread[level - 1]; |
123 | } | |
124 | ||
125 | /* | |
126 | * Initialize the per-CPU srcu_data array, which feeds into the | |
127 | * leaves of the srcu_node tree. | |
128 | */ | |
129 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | |
130 | ARRAY_SIZE(sdp->srcu_unlock_count)); | |
131 | level = rcu_num_lvls - 1; | |
aacb5d91 | 132 | snp_first = ssp->level[level]; |
da915ad5 | 133 | for_each_possible_cpu(cpu) { |
aacb5d91 | 134 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 135 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
da915ad5 PM |
136 | rcu_segcblist_init(&sdp->srcu_cblist); |
137 | sdp->srcu_cblist_invoking = false; | |
aacb5d91 PM |
138 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
139 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; | |
da915ad5 PM |
140 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
141 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | |
142 | if (snp->grplo < 0) | |
143 | snp->grplo = cpu; | |
144 | snp->grphi = cpu; | |
145 | } | |
146 | sdp->cpu = cpu; | |
e81baf4c SAS |
147 | INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
148 | timer_setup(&sdp->delay_work, srcu_delay_timer, 0); | |
aacb5d91 | 149 | sdp->ssp = ssp; |
c7e88067 | 150 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
da915ad5 PM |
151 | if (is_static) |
152 | continue; | |
153 | ||
154 | /* Dynamically allocated, better be no srcu_read_locks()! */ | |
155 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { | |
156 | sdp->srcu_lock_count[i] = 0; | |
157 | sdp->srcu_unlock_count[i] = 0; | |
158 | } | |
159 | } | |
160 | } | |
161 | ||
162 | /* | |
163 | * Initialize non-compile-time initialized fields, including the | |
164 | * associated srcu_node and srcu_data structures. The is_static | |
165 | * parameter is passed through to init_srcu_struct_nodes(), and | |
166 | * also tells us that ->sda has already been wired up to srcu_data. | |
167 | */ | |
aacb5d91 | 168 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
da915ad5 | 169 | { |
aacb5d91 PM |
170 | mutex_init(&ssp->srcu_cb_mutex); |
171 | mutex_init(&ssp->srcu_gp_mutex); | |
172 | ssp->srcu_idx = 0; | |
173 | ssp->srcu_gp_seq = 0; | |
174 | ssp->srcu_barrier_seq = 0; | |
175 | mutex_init(&ssp->srcu_barrier_mutex); | |
176 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); | |
177 | INIT_DELAYED_WORK(&ssp->work, process_srcu); | |
da915ad5 | 178 | if (!is_static) |
aacb5d91 | 179 | ssp->sda = alloc_percpu(struct srcu_data); |
50edb988 PM |
180 | if (!ssp->sda) |
181 | return -ENOMEM; | |
aacb5d91 PM |
182 | init_srcu_struct_nodes(ssp, is_static); |
183 | ssp->srcu_gp_seq_needed_exp = 0; | |
184 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); | |
185 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ | |
50edb988 | 186 | return 0; |
dad81a20 PM |
187 | } |
188 | ||
189 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
190 | ||
aacb5d91 | 191 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
dad81a20 PM |
192 | struct lock_class_key *key) |
193 | { | |
194 | /* Don't re-initialize a lock while it is held. */ | |
aacb5d91 PM |
195 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
196 | lockdep_init_map(&ssp->dep_map, name, key, 0); | |
197 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); | |
198 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
199 | } |
200 | EXPORT_SYMBOL_GPL(__init_srcu_struct); | |
201 | ||
202 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
203 | ||
204 | /** | |
205 | * init_srcu_struct - initialize a sleep-RCU structure | |
aacb5d91 | 206 | * @ssp: structure to initialize. |
dad81a20 PM |
207 | * |
208 | * Must invoke this on a given srcu_struct before passing that srcu_struct | |
209 | * to any other function. Each srcu_struct represents a separate domain | |
210 | * of SRCU protection. | |
211 | */ | |
aacb5d91 | 212 | int init_srcu_struct(struct srcu_struct *ssp) |
dad81a20 | 213 | { |
aacb5d91 PM |
214 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
215 | return init_srcu_struct_fields(ssp, false); | |
dad81a20 PM |
216 | } |
217 | EXPORT_SYMBOL_GPL(init_srcu_struct); | |
218 | ||
219 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
220 | ||
221 | /* | |
da915ad5 PM |
222 | * First-use initialization of statically allocated srcu_struct |
223 | * structure. Wiring up the combining tree is more than can be | |
224 | * done with compile-time initialization, so this check is added | |
aacb5d91 | 225 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
da915ad5 PM |
226 | * compile-time initialized, to resolve races involving multiple |
227 | * CPUs trying to garner first-use privileges. | |
228 | */ | |
aacb5d91 | 229 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
da915ad5 PM |
230 | { |
231 | unsigned long flags; | |
232 | ||
da915ad5 | 233 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
aacb5d91 | 234 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
da915ad5 | 235 | return; /* Already initialized. */ |
aacb5d91 PM |
236 | spin_lock_irqsave_rcu_node(ssp, flags); |
237 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { | |
238 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
239 | return; |
240 | } | |
aacb5d91 PM |
241 | init_srcu_struct_fields(ssp, true); |
242 | spin_unlock_irqrestore_rcu_node(ssp, flags); | |
da915ad5 PM |
243 | } |
244 | ||
245 | /* | |
246 | * Returns approximate total of the readers' ->srcu_lock_count[] values | |
247 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 248 | */ |
aacb5d91 | 249 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
250 | { |
251 | int cpu; | |
252 | unsigned long sum = 0; | |
253 | ||
254 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 255 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 256 | |
da915ad5 | 257 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
dad81a20 PM |
258 | } |
259 | return sum; | |
260 | } | |
261 | ||
262 | /* | |
da915ad5 PM |
263 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
264 | * for the rank of per-CPU counters specified by idx. | |
dad81a20 | 265 | */ |
aacb5d91 | 266 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
267 | { |
268 | int cpu; | |
269 | unsigned long sum = 0; | |
270 | ||
271 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 272 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 273 | |
da915ad5 | 274 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
dad81a20 PM |
275 | } |
276 | return sum; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Return true if the number of pre-existing readers is determined to | |
281 | * be zero. | |
282 | */ | |
aacb5d91 | 283 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
284 | { |
285 | unsigned long unlocks; | |
286 | ||
aacb5d91 | 287 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
dad81a20 PM |
288 | |
289 | /* | |
290 | * Make sure that a lock is always counted if the corresponding | |
291 | * unlock is counted. Needs to be a smp_mb() as the read side may | |
292 | * contain a read from a variable that is written to before the | |
293 | * synchronize_srcu() in the write side. In this case smp_mb()s | |
294 | * A and B act like the store buffering pattern. | |
295 | * | |
296 | * This smp_mb() also pairs with smp_mb() C to prevent accesses | |
297 | * after the synchronize_srcu() from being executed before the | |
298 | * grace period ends. | |
299 | */ | |
300 | smp_mb(); /* A */ | |
301 | ||
302 | /* | |
303 | * If the locks are the same as the unlocks, then there must have | |
304 | * been no readers on this index at some time in between. This does | |
305 | * not mean that there are no more readers, as one could have read | |
306 | * the current index but not have incremented the lock counter yet. | |
307 | * | |
881ec9d2 PM |
308 | * So suppose that the updater is preempted here for so long |
309 | * that more than ULONG_MAX non-nested readers come and go in | |
310 | * the meantime. It turns out that this cannot result in overflow | |
311 | * because if a reader modifies its unlock count after we read it | |
312 | * above, then that reader's next load of ->srcu_idx is guaranteed | |
313 | * to get the new value, which will cause it to operate on the | |
314 | * other bank of counters, where it cannot contribute to the | |
315 | * overflow of these counters. This means that there is a maximum | |
316 | * of 2*NR_CPUS increments, which cannot overflow given current | |
317 | * systems, especially not on 64-bit systems. | |
318 | * | |
319 | * OK, how about nesting? This does impose a limit on nesting | |
320 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | |
321 | * especially on 64-bit systems. | |
dad81a20 | 322 | */ |
aacb5d91 | 323 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
dad81a20 PM |
324 | } |
325 | ||
326 | /** | |
327 | * srcu_readers_active - returns true if there are readers. and false | |
328 | * otherwise | |
aacb5d91 | 329 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
dad81a20 PM |
330 | * |
331 | * Note that this is not an atomic primitive, and can therefore suffer | |
332 | * severe errors when invoked on an active srcu_struct. That said, it | |
333 | * can be useful as an error check at cleanup time. | |
334 | */ | |
aacb5d91 | 335 | static bool srcu_readers_active(struct srcu_struct *ssp) |
dad81a20 PM |
336 | { |
337 | int cpu; | |
338 | unsigned long sum = 0; | |
339 | ||
340 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 341 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
dad81a20 | 342 | |
da915ad5 PM |
343 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
344 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); | |
345 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); | |
346 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); | |
dad81a20 PM |
347 | } |
348 | return sum; | |
349 | } | |
350 | ||
351 | #define SRCU_INTERVAL 1 | |
352 | ||
1e9a038b PM |
353 | /* |
354 | * Return grace-period delay, zero if there are expedited grace | |
355 | * periods pending, SRCU_INTERVAL otherwise. | |
356 | */ | |
aacb5d91 | 357 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
1e9a038b | 358 | { |
aacb5d91 PM |
359 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
360 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) | |
1e9a038b PM |
361 | return 0; |
362 | return SRCU_INTERVAL; | |
363 | } | |
364 | ||
f5ad3991 PM |
365 | /** |
366 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure | |
367 | * @ssp: structure to clean up. | |
368 | * | |
369 | * Must invoke this after you are finished using a given srcu_struct that | |
370 | * was initialized via init_srcu_struct(), else you leak memory. | |
371 | */ | |
372 | void cleanup_srcu_struct(struct srcu_struct *ssp) | |
dad81a20 | 373 | { |
da915ad5 PM |
374 | int cpu; |
375 | ||
aacb5d91 | 376 | if (WARN_ON(!srcu_get_delay(ssp))) |
f7194ac3 | 377 | return; /* Just leak it! */ |
aacb5d91 | 378 | if (WARN_ON(srcu_readers_active(ssp))) |
f7194ac3 | 379 | return; /* Just leak it! */ |
f5ad3991 | 380 | flush_delayed_work(&ssp->work); |
e81baf4c SAS |
381 | for_each_possible_cpu(cpu) { |
382 | struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); | |
383 | ||
f5ad3991 PM |
384 | del_timer_sync(&sdp->delay_work); |
385 | flush_work(&sdp->work); | |
5cdfd174 PM |
386 | if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
387 | return; /* Forgot srcu_barrier(), so just leak it! */ | |
e81baf4c | 388 | } |
aacb5d91 PM |
389 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
390 | WARN_ON(srcu_readers_active(ssp))) { | |
a7538352 | 391 | pr_info("%s: Active srcu_struct %p state: %d\n", |
aacb5d91 | 392 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
dad81a20 PM |
393 | return; /* Caller forgot to stop doing call_srcu()? */ |
394 | } | |
aacb5d91 PM |
395 | free_percpu(ssp->sda); |
396 | ssp->sda = NULL; | |
dad81a20 | 397 | } |
f5ad3991 | 398 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
dad81a20 PM |
399 | |
400 | /* | |
401 | * Counts the new reader in the appropriate per-CPU element of the | |
cdf7abc4 | 402 | * srcu_struct. |
dad81a20 PM |
403 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
404 | */ | |
aacb5d91 | 405 | int __srcu_read_lock(struct srcu_struct *ssp) |
dad81a20 PM |
406 | { |
407 | int idx; | |
408 | ||
aacb5d91 PM |
409 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
410 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); | |
dad81a20 PM |
411 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
412 | return idx; | |
413 | } | |
414 | EXPORT_SYMBOL_GPL(__srcu_read_lock); | |
415 | ||
416 | /* | |
417 | * Removes the count for the old reader from the appropriate per-CPU | |
418 | * element of the srcu_struct. Note that this may well be a different | |
419 | * CPU than that which was incremented by the corresponding srcu_read_lock(). | |
dad81a20 | 420 | */ |
aacb5d91 | 421 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
dad81a20 PM |
422 | { |
423 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ | |
aacb5d91 | 424 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
dad81a20 PM |
425 | } |
426 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); | |
427 | ||
428 | /* | |
429 | * We use an adaptive strategy for synchronize_srcu() and especially for | |
430 | * synchronize_srcu_expedited(). We spin for a fixed time period | |
431 | * (defined below) to allow SRCU readers to exit their read-side critical | |
432 | * sections. If there are still some readers after a few microseconds, | |
433 | * we repeatedly block for 1-millisecond time periods. | |
434 | */ | |
435 | #define SRCU_RETRY_CHECK_DELAY 5 | |
436 | ||
437 | /* | |
438 | * Start an SRCU grace period. | |
439 | */ | |
aacb5d91 | 440 | static void srcu_gp_start(struct srcu_struct *ssp) |
dad81a20 | 441 | { |
aacb5d91 | 442 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
dad81a20 PM |
443 | int state; |
444 | ||
aacb5d91 PM |
445 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
446 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
eb4c2382 | 447 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
da915ad5 | 448 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 449 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 | 450 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
aacb5d91 | 451 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
eb4c2382 | 452 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
2da4b2a7 | 453 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
aacb5d91 | 454 | rcu_seq_start(&ssp->srcu_gp_seq); |
71042606 | 455 | state = rcu_seq_state(ssp->srcu_gp_seq); |
dad81a20 PM |
456 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
457 | } | |
458 | ||
da915ad5 | 459 | |
e81baf4c | 460 | static void srcu_delay_timer(struct timer_list *t) |
da915ad5 | 461 | { |
e81baf4c | 462 | struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
da915ad5 | 463 | |
e81baf4c | 464 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
da915ad5 PM |
465 | } |
466 | ||
e81baf4c | 467 | static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
da915ad5 PM |
468 | unsigned long delay) |
469 | { | |
e81baf4c SAS |
470 | if (!delay) { |
471 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); | |
472 | return; | |
473 | } | |
da915ad5 | 474 | |
e81baf4c | 475 | timer_reduce(&sdp->delay_work, jiffies + delay); |
da915ad5 PM |
476 | } |
477 | ||
478 | /* | |
479 | * Schedule callback invocation for the specified srcu_data structure, | |
480 | * if possible, on the corresponding CPU. | |
481 | */ | |
482 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | |
483 | { | |
e81baf4c | 484 | srcu_queue_delayed_work_on(sdp, delay); |
da915ad5 PM |
485 | } |
486 | ||
487 | /* | |
488 | * Schedule callback invocation for all srcu_data structures associated | |
c7e88067 PM |
489 | * with the specified srcu_node structure that have callbacks for the |
490 | * just-completed grace period, the one corresponding to idx. If possible, | |
491 | * schedule this invocation on the corresponding CPUs. | |
da915ad5 | 492 | */ |
aacb5d91 | 493 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b | 494 | unsigned long mask, unsigned long delay) |
da915ad5 PM |
495 | { |
496 | int cpu; | |
497 | ||
c7e88067 PM |
498 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
499 | if (!(mask & (1 << (cpu - snp->grplo)))) | |
500 | continue; | |
aacb5d91 | 501 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
c7e88067 | 502 | } |
da915ad5 PM |
503 | } |
504 | ||
505 | /* | |
506 | * Note the end of an SRCU grace period. Initiates callback invocation | |
507 | * and starts a new grace period if needed. | |
508 | * | |
509 | * The ->srcu_cb_mutex acquisition does not protect any data, but | |
510 | * instead prevents more than one grace period from starting while we | |
511 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] | |
512 | * array to have a finite number of elements. | |
513 | */ | |
aacb5d91 | 514 | static void srcu_gp_end(struct srcu_struct *ssp) |
da915ad5 | 515 | { |
1e9a038b | 516 | unsigned long cbdelay; |
da915ad5 | 517 | bool cbs; |
8ddbd883 | 518 | bool last_lvl; |
c350c008 PM |
519 | int cpu; |
520 | unsigned long flags; | |
da915ad5 PM |
521 | unsigned long gpseq; |
522 | int idx; | |
c7e88067 | 523 | unsigned long mask; |
c350c008 | 524 | struct srcu_data *sdp; |
da915ad5 PM |
525 | struct srcu_node *snp; |
526 | ||
527 | /* Prevent more than one additional grace period. */ | |
aacb5d91 | 528 | mutex_lock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
529 | |
530 | /* End the current grace period. */ | |
aacb5d91 PM |
531 | spin_lock_irq_rcu_node(ssp); |
532 | idx = rcu_seq_state(ssp->srcu_gp_seq); | |
da915ad5 | 533 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
aacb5d91 | 534 | cbdelay = srcu_get_delay(ssp); |
844a378d | 535 | WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
aacb5d91 PM |
536 | rcu_seq_end(&ssp->srcu_gp_seq); |
537 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
538 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) | |
8c9e0cb3 | 539 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
aacb5d91 PM |
540 | spin_unlock_irq_rcu_node(ssp); |
541 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
542 | /* A new grace period can start at this point. But only one. */ |
543 | ||
544 | /* Initiate callback invocation as needed. */ | |
545 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | |
aacb5d91 | 546 | srcu_for_each_node_breadth_first(ssp, snp) { |
d6331980 | 547 | spin_lock_irq_rcu_node(snp); |
da915ad5 | 548 | cbs = false; |
aacb5d91 | 549 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
8ddbd883 | 550 | if (last_lvl) |
da915ad5 PM |
551 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
552 | snp->srcu_have_cbs[idx] = gpseq; | |
553 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | |
1e9a038b | 554 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
7ff8b450 | 555 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
c7e88067 PM |
556 | mask = snp->srcu_data_have_cbs[idx]; |
557 | snp->srcu_data_have_cbs[idx] = 0; | |
d6331980 | 558 | spin_unlock_irq_rcu_node(snp); |
a3883df3 | 559 | if (cbs) |
aacb5d91 | 560 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
c350c008 PM |
561 | |
562 | /* Occasionally prevent srcu_data counter wrap. */ | |
8ddbd883 | 563 | if (!(gpseq & counter_wrap_check) && last_lvl) |
c350c008 | 564 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
aacb5d91 | 565 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 566 | spin_lock_irqsave_rcu_node(sdp, flags); |
c350c008 PM |
567 | if (ULONG_CMP_GE(gpseq, |
568 | sdp->srcu_gp_seq_needed + 100)) | |
569 | sdp->srcu_gp_seq_needed = gpseq; | |
a35d13ec II |
570 | if (ULONG_CMP_GE(gpseq, |
571 | sdp->srcu_gp_seq_needed_exp + 100)) | |
572 | sdp->srcu_gp_seq_needed_exp = gpseq; | |
d6331980 | 573 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
c350c008 | 574 | } |
da915ad5 PM |
575 | } |
576 | ||
577 | /* Callback initiation done, allow grace periods after next. */ | |
aacb5d91 | 578 | mutex_unlock(&ssp->srcu_cb_mutex); |
da915ad5 PM |
579 | |
580 | /* Start a new grace period if needed. */ | |
aacb5d91 PM |
581 | spin_lock_irq_rcu_node(ssp); |
582 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); | |
da915ad5 | 583 | if (!rcu_seq_state(gpseq) && |
aacb5d91 PM |
584 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
585 | srcu_gp_start(ssp); | |
586 | spin_unlock_irq_rcu_node(ssp); | |
587 | srcu_reschedule(ssp, 0); | |
da915ad5 | 588 | } else { |
aacb5d91 | 589 | spin_unlock_irq_rcu_node(ssp); |
da915ad5 PM |
590 | } |
591 | } | |
592 | ||
1e9a038b PM |
593 | /* |
594 | * Funnel-locking scheme to scalably mediate many concurrent expedited | |
595 | * grace-period requests. This function is invoked for the first known | |
596 | * expedited request for a grace period that has already been requested, | |
597 | * but without expediting. To start a completely new grace period, | |
598 | * whether expedited or not, use srcu_funnel_gp_start() instead. | |
599 | */ | |
aacb5d91 | 600 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
1e9a038b PM |
601 | unsigned long s) |
602 | { | |
603 | unsigned long flags; | |
604 | ||
605 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 606 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
1e9a038b PM |
607 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
608 | return; | |
d6331980 | 609 | spin_lock_irqsave_rcu_node(snp, flags); |
1e9a038b | 610 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
d6331980 | 611 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b PM |
612 | return; |
613 | } | |
614 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | |
d6331980 | 615 | spin_unlock_irqrestore_rcu_node(snp, flags); |
1e9a038b | 616 | } |
aacb5d91 PM |
617 | spin_lock_irqsave_rcu_node(ssp, flags); |
618 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) | |
8c9e0cb3 | 619 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
aacb5d91 | 620 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
1e9a038b PM |
621 | } |
622 | ||
da915ad5 PM |
623 | /* |
624 | * Funnel-locking scheme to scalably mediate many concurrent grace-period | |
625 | * requests. The winner has to do the work of actually starting grace | |
626 | * period s. Losers must either ensure that their desired grace-period | |
627 | * number is recorded on at least their leaf srcu_node structure, or they | |
628 | * must take steps to invoke their own callbacks. | |
17294ce6 PM |
629 | * |
630 | * Note that this function also does the work of srcu_funnel_exp_start(), | |
631 | * in some cases by directly invoking it. | |
da915ad5 | 632 | */ |
aacb5d91 | 633 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
1e9a038b | 634 | unsigned long s, bool do_norm) |
da915ad5 PM |
635 | { |
636 | unsigned long flags; | |
637 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); | |
638 | struct srcu_node *snp = sdp->mynode; | |
639 | unsigned long snp_seq; | |
640 | ||
641 | /* Each pass through the loop does one level of the srcu_node tree. */ | |
642 | for (; snp != NULL; snp = snp->srcu_parent) { | |
aacb5d91 | 643 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
da915ad5 | 644 | return; /* GP already done and CBs recorded. */ |
d6331980 | 645 | spin_lock_irqsave_rcu_node(snp, flags); |
da915ad5 PM |
646 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
647 | snp_seq = snp->srcu_have_cbs[idx]; | |
c7e88067 PM |
648 | if (snp == sdp->mynode && snp_seq == s) |
649 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
d6331980 | 650 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 | 651 | if (snp == sdp->mynode && snp_seq != s) { |
1e9a038b PM |
652 | srcu_schedule_cbs_sdp(sdp, do_norm |
653 | ? SRCU_INTERVAL | |
654 | : 0); | |
655 | return; | |
da915ad5 | 656 | } |
1e9a038b | 657 | if (!do_norm) |
aacb5d91 | 658 | srcu_funnel_exp_start(ssp, snp, s); |
da915ad5 PM |
659 | return; |
660 | } | |
661 | snp->srcu_have_cbs[idx] = s; | |
c7e88067 PM |
662 | if (snp == sdp->mynode) |
663 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | |
1e9a038b | 664 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
7ff8b450 | 665 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
d6331980 | 666 | spin_unlock_irqrestore_rcu_node(snp, flags); |
da915ad5 PM |
667 | } |
668 | ||
669 | /* Top of tree, must ensure the grace period will be started. */ | |
aacb5d91 PM |
670 | spin_lock_irqsave_rcu_node(ssp, flags); |
671 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { | |
da915ad5 PM |
672 | /* |
673 | * Record need for grace period s. Pair with load | |
674 | * acquire setting up for initialization. | |
675 | */ | |
aacb5d91 | 676 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
da915ad5 | 677 | } |
aacb5d91 | 678 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
8c9e0cb3 | 679 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
da915ad5 PM |
680 | |
681 | /* If grace period not already done and none in progress, start it. */ | |
aacb5d91 PM |
682 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
683 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { | |
684 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); | |
685 | srcu_gp_start(ssp); | |
e0fcba9a | 686 | if (likely(srcu_init_done)) |
aacb5d91 PM |
687 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
688 | srcu_get_delay(ssp)); | |
689 | else if (list_empty(&ssp->work.work.entry)) | |
690 | list_add(&ssp->work.work.entry, &srcu_boot_list); | |
da915ad5 | 691 | } |
aacb5d91 | 692 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
da915ad5 PM |
693 | } |
694 | ||
dad81a20 PM |
695 | /* |
696 | * Wait until all readers counted by array index idx complete, but | |
697 | * loop an additional time if there is an expedited grace period pending. | |
da915ad5 | 698 | * The caller must ensure that ->srcu_idx is not changed while checking. |
dad81a20 | 699 | */ |
aacb5d91 | 700 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
dad81a20 PM |
701 | { |
702 | for (;;) { | |
aacb5d91 | 703 | if (srcu_readers_active_idx_check(ssp, idx)) |
dad81a20 | 704 | return true; |
aacb5d91 | 705 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
dad81a20 PM |
706 | return false; |
707 | udelay(SRCU_RETRY_CHECK_DELAY); | |
708 | } | |
709 | } | |
710 | ||
711 | /* | |
da915ad5 PM |
712 | * Increment the ->srcu_idx counter so that future SRCU readers will |
713 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows | |
dad81a20 PM |
714 | * us to wait for pre-existing readers in a starvation-free manner. |
715 | */ | |
aacb5d91 | 716 | static void srcu_flip(struct srcu_struct *ssp) |
dad81a20 | 717 | { |
881ec9d2 PM |
718 | /* |
719 | * Ensure that if this updater saw a given reader's increment | |
720 | * from __srcu_read_lock(), that reader was using an old value | |
721 | * of ->srcu_idx. Also ensure that if a given reader sees the | |
722 | * new value of ->srcu_idx, this updater's earlier scans cannot | |
723 | * have seen that reader's increments (which is OK, because this | |
724 | * grace period need not wait on that reader). | |
725 | */ | |
726 | smp_mb(); /* E */ /* Pairs with B and C. */ | |
727 | ||
aacb5d91 | 728 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
dad81a20 PM |
729 | |
730 | /* | |
731 | * Ensure that if the updater misses an __srcu_read_unlock() | |
732 | * increment, that task's next __srcu_read_lock() will see the | |
733 | * above counter update. Note that both this memory barrier | |
734 | * and the one in srcu_readers_active_idx_check() provide the | |
735 | * guarantee for __srcu_read_lock(). | |
736 | */ | |
737 | smp_mb(); /* D */ /* Pairs with C. */ | |
738 | } | |
739 | ||
2da4b2a7 PM |
740 | /* |
741 | * If SRCU is likely idle, return true, otherwise return false. | |
742 | * | |
743 | * Note that it is OK for several current from-idle requests for a new | |
744 | * grace period from idle to specify expediting because they will all end | |
745 | * up requesting the same grace period anyhow. So no loss. | |
746 | * | |
747 | * Note also that if any CPU (including the current one) is still invoking | |
748 | * callbacks, this function will nevertheless say "idle". This is not | |
749 | * ideal, but the overhead of checking all CPUs' callback lists is even | |
750 | * less ideal, especially on large systems. Furthermore, the wakeup | |
751 | * can happen before the callback is fully removed, so we have no choice | |
752 | * but to accept this type of error. | |
753 | * | |
754 | * This function is also subject to counter-wrap errors, but let's face | |
755 | * it, if this function was preempted for enough time for the counters | |
756 | * to wrap, it really doesn't matter whether or not we expedite the grace | |
757 | * period. The extra overhead of a needlessly expedited grace period is | |
7fef6cff | 758 | * negligible when amortized over that time period, and the extra latency |
2da4b2a7 PM |
759 | * of a needlessly non-expedited grace period is similarly negligible. |
760 | */ | |
aacb5d91 | 761 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
2da4b2a7 | 762 | { |
22607d66 | 763 | unsigned long curseq; |
2da4b2a7 PM |
764 | unsigned long flags; |
765 | struct srcu_data *sdp; | |
22607d66 | 766 | unsigned long t; |
844a378d | 767 | unsigned long tlast; |
2da4b2a7 | 768 | |
bde50d8f | 769 | check_init_srcu_struct(ssp); |
2da4b2a7 | 770 | /* If the local srcu_data structure has callbacks, not idle. */ |
bde50d8f SAS |
771 | sdp = raw_cpu_ptr(ssp->sda); |
772 | spin_lock_irqsave_rcu_node(sdp, flags); | |
2da4b2a7 | 773 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
bde50d8f | 774 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
2da4b2a7 PM |
775 | return false; /* Callbacks already present, so not idle. */ |
776 | } | |
bde50d8f | 777 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
2da4b2a7 PM |
778 | |
779 | /* | |
780 | * No local callbacks, so probabalistically probe global state. | |
781 | * Exact information would require acquiring locks, which would | |
782 | * kill scalability, hence the probabalistic nature of the probe. | |
783 | */ | |
22607d66 PM |
784 | |
785 | /* First, see if enough time has passed since the last GP. */ | |
786 | t = ktime_get_mono_fast_ns(); | |
844a378d | 787 | tlast = READ_ONCE(ssp->srcu_last_gp_end); |
22607d66 | 788 | if (exp_holdoff == 0 || |
844a378d | 789 | time_in_range_open(t, tlast, tlast + exp_holdoff)) |
22607d66 PM |
790 | return false; /* Too soon after last GP. */ |
791 | ||
792 | /* Next, check for probable idleness. */ | |
aacb5d91 | 793 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
2da4b2a7 | 794 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
aacb5d91 | 795 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
2da4b2a7 PM |
796 | return false; /* Grace period in progress, so not idle. */ |
797 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | |
aacb5d91 | 798 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
2da4b2a7 PM |
799 | return false; /* GP # changed, so not idle. */ |
800 | return true; /* With reasonable probability, idle! */ | |
801 | } | |
802 | ||
a602538e PM |
803 | /* |
804 | * SRCU callback function to leak a callback. | |
805 | */ | |
806 | static void srcu_leak_callback(struct rcu_head *rhp) | |
807 | { | |
808 | } | |
809 | ||
29d2bb94 PM |
810 | /* |
811 | * Start an SRCU grace period, and also queue the callback if non-NULL. | |
812 | */ | |
5358c9fa PM |
813 | static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, |
814 | struct rcu_head *rhp, bool do_norm) | |
29d2bb94 PM |
815 | { |
816 | unsigned long flags; | |
817 | int idx; | |
818 | bool needexp = false; | |
819 | bool needgp = false; | |
820 | unsigned long s; | |
821 | struct srcu_data *sdp; | |
822 | ||
5358c9fa | 823 | check_init_srcu_struct(ssp); |
29d2bb94 PM |
824 | idx = srcu_read_lock(ssp); |
825 | sdp = raw_cpu_ptr(ssp->sda); | |
826 | spin_lock_irqsave_rcu_node(sdp, flags); | |
5358c9fa PM |
827 | if (rhp) |
828 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); | |
29d2bb94 PM |
829 | rcu_segcblist_advance(&sdp->srcu_cblist, |
830 | rcu_seq_current(&ssp->srcu_gp_seq)); | |
831 | s = rcu_seq_snap(&ssp->srcu_gp_seq); | |
832 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); | |
833 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { | |
834 | sdp->srcu_gp_seq_needed = s; | |
835 | needgp = true; | |
836 | } | |
837 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { | |
838 | sdp->srcu_gp_seq_needed_exp = s; | |
839 | needexp = true; | |
840 | } | |
841 | spin_unlock_irqrestore_rcu_node(sdp, flags); | |
842 | if (needgp) | |
843 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); | |
844 | else if (needexp) | |
845 | srcu_funnel_exp_start(ssp, sdp->mynode, s); | |
846 | srcu_read_unlock(ssp, idx); | |
5358c9fa | 847 | return s; |
29d2bb94 PM |
848 | } |
849 | ||
dad81a20 | 850 | /* |
da915ad5 PM |
851 | * Enqueue an SRCU callback on the srcu_data structure associated with |
852 | * the current CPU and the specified srcu_struct structure, initiating | |
853 | * grace-period processing if it is not already running. | |
dad81a20 PM |
854 | * |
855 | * Note that all CPUs must agree that the grace period extended beyond | |
856 | * all pre-existing SRCU read-side critical section. On systems with | |
857 | * more than one CPU, this means that when "func()" is invoked, each CPU | |
858 | * is guaranteed to have executed a full memory barrier since the end of | |
859 | * its last corresponding SRCU read-side critical section whose beginning | |
5ef98a63 | 860 | * preceded the call to call_srcu(). It also means that each CPU executing |
dad81a20 | 861 | * an SRCU read-side critical section that continues beyond the start of |
5ef98a63 | 862 | * "func()" must have executed a memory barrier after the call_srcu() |
dad81a20 PM |
863 | * but before the beginning of that SRCU read-side critical section. |
864 | * Note that these guarantees include CPUs that are offline, idle, or | |
865 | * executing in user mode, as well as CPUs that are executing in the kernel. | |
866 | * | |
5ef98a63 | 867 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
dad81a20 PM |
868 | * resulting SRCU callback function "func()", then both CPU A and CPU |
869 | * B are guaranteed to execute a full memory barrier during the time | |
5ef98a63 | 870 | * interval between the call to call_srcu() and the invocation of "func()". |
dad81a20 PM |
871 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
872 | * again only if the system has more than one CPU). | |
873 | * | |
874 | * Of course, these guarantees apply only for invocations of call_srcu(), | |
875 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | |
876 | * srcu_struct structure. | |
877 | */ | |
11b00045 JB |
878 | static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
879 | rcu_callback_t func, bool do_norm) | |
dad81a20 | 880 | { |
a602538e PM |
881 | if (debug_rcu_head_queue(rhp)) { |
882 | /* Probable double call_srcu(), so leak the callback. */ | |
883 | WRITE_ONCE(rhp->func, srcu_leak_callback); | |
884 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); | |
885 | return; | |
886 | } | |
da915ad5 | 887 | rhp->func = func; |
5358c9fa | 888 | (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); |
1e9a038b PM |
889 | } |
890 | ||
5a0465e1 PM |
891 | /** |
892 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | |
aacb5d91 | 893 | * @ssp: srcu_struct in queue the callback |
27fdb35f | 894 | * @rhp: structure to be used for queueing the SRCU callback. |
5a0465e1 PM |
895 | * @func: function to be invoked after the SRCU grace period |
896 | * | |
897 | * The callback function will be invoked some time after a full SRCU | |
898 | * grace period elapses, in other words after all pre-existing SRCU | |
899 | * read-side critical sections have completed. However, the callback | |
900 | * function might well execute concurrently with other SRCU read-side | |
901 | * critical sections that started after call_srcu() was invoked. SRCU | |
902 | * read-side critical sections are delimited by srcu_read_lock() and | |
903 | * srcu_read_unlock(), and may be nested. | |
904 | * | |
905 | * The callback will be invoked from process context, but must nevertheless | |
906 | * be fast and must not block. | |
907 | */ | |
aacb5d91 | 908 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
1e9a038b PM |
909 | rcu_callback_t func) |
910 | { | |
aacb5d91 | 911 | __call_srcu(ssp, rhp, func, true); |
dad81a20 PM |
912 | } |
913 | EXPORT_SYMBOL_GPL(call_srcu); | |
914 | ||
dad81a20 PM |
915 | /* |
916 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | |
917 | */ | |
aacb5d91 | 918 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
dad81a20 PM |
919 | { |
920 | struct rcu_synchronize rcu; | |
dad81a20 | 921 | |
f505d434 | 922 | RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || |
dad81a20 PM |
923 | lock_is_held(&rcu_bh_lock_map) || |
924 | lock_is_held(&rcu_lock_map) || | |
925 | lock_is_held(&rcu_sched_lock_map), | |
926 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); | |
927 | ||
928 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | |
929 | return; | |
930 | might_sleep(); | |
aacb5d91 | 931 | check_init_srcu_struct(ssp); |
dad81a20 | 932 | init_completion(&rcu.completion); |
da915ad5 | 933 | init_rcu_head_on_stack(&rcu.head); |
aacb5d91 | 934 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
dad81a20 | 935 | wait_for_completion(&rcu.completion); |
da915ad5 | 936 | destroy_rcu_head_on_stack(&rcu.head); |
35732cf9 PM |
937 | |
938 | /* | |
939 | * Make sure that later code is ordered after the SRCU grace | |
d6331980 | 940 | * period. This pairs with the spin_lock_irq_rcu_node() |
35732cf9 PM |
941 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
942 | * because the current CPU might have been totally uninvolved with | |
943 | * (and thus unordered against) that grace period. | |
944 | */ | |
945 | smp_mb(); | |
dad81a20 PM |
946 | } |
947 | ||
948 | /** | |
949 | * synchronize_srcu_expedited - Brute-force SRCU grace period | |
aacb5d91 | 950 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
951 | * |
952 | * Wait for an SRCU grace period to elapse, but be more aggressive about | |
953 | * spinning rather than blocking when waiting. | |
954 | * | |
955 | * Note that synchronize_srcu_expedited() has the same deadlock and | |
956 | * memory-ordering properties as does synchronize_srcu(). | |
957 | */ | |
aacb5d91 | 958 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
dad81a20 | 959 | { |
aacb5d91 | 960 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
dad81a20 PM |
961 | } |
962 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | |
963 | ||
964 | /** | |
965 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | |
aacb5d91 | 966 | * @ssp: srcu_struct with which to synchronize. |
dad81a20 PM |
967 | * |
968 | * Wait for the count to drain to zero of both indexes. To avoid the | |
969 | * possible starvation of synchronize_srcu(), it waits for the count of | |
da915ad5 PM |
970 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
971 | * and then flip the srcu_idx and wait for the count of the other index. | |
dad81a20 PM |
972 | * |
973 | * Can block; must be called from process context. | |
974 | * | |
975 | * Note that it is illegal to call synchronize_srcu() from the corresponding | |
976 | * SRCU read-side critical section; doing so will result in deadlock. | |
977 | * However, it is perfectly legal to call synchronize_srcu() on one | |
978 | * srcu_struct from some other srcu_struct's read-side critical section, | |
979 | * as long as the resulting graph of srcu_structs is acyclic. | |
980 | * | |
981 | * There are memory-ordering constraints implied by synchronize_srcu(). | |
982 | * On systems with more than one CPU, when synchronize_srcu() returns, | |
983 | * each CPU is guaranteed to have executed a full memory barrier since | |
6eb95cc4 | 984 | * the end of its last corresponding SRCU read-side critical section |
dad81a20 PM |
985 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
986 | * each CPU having an SRCU read-side critical section that extends beyond | |
987 | * the return from synchronize_srcu() is guaranteed to have executed a | |
988 | * full memory barrier after the beginning of synchronize_srcu() and before | |
989 | * the beginning of that SRCU read-side critical section. Note that these | |
990 | * guarantees include CPUs that are offline, idle, or executing in user mode, | |
991 | * as well as CPUs that are executing in the kernel. | |
992 | * | |
993 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned | |
994 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed | |
995 | * to have executed a full memory barrier during the execution of | |
996 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B | |
997 | * are the same CPU, but again only if the system has more than one CPU. | |
998 | * | |
999 | * Of course, these memory-ordering guarantees apply only when | |
1000 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are | |
1001 | * passed the same srcu_struct structure. | |
2da4b2a7 PM |
1002 | * |
1003 | * If SRCU is likely idle, expedite the first request. This semantic | |
1004 | * was provided by Classic SRCU, and is relied upon by its users, so TREE | |
1005 | * SRCU must also provide it. Note that detecting idleness is heuristic | |
1006 | * and subject to both false positives and negatives. | |
dad81a20 | 1007 | */ |
aacb5d91 | 1008 | void synchronize_srcu(struct srcu_struct *ssp) |
dad81a20 | 1009 | { |
aacb5d91 PM |
1010 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
1011 | synchronize_srcu_expedited(ssp); | |
dad81a20 | 1012 | else |
aacb5d91 | 1013 | __synchronize_srcu(ssp, true); |
dad81a20 PM |
1014 | } |
1015 | EXPORT_SYMBOL_GPL(synchronize_srcu); | |
1016 | ||
5358c9fa PM |
1017 | /** |
1018 | * get_state_synchronize_srcu - Provide an end-of-grace-period cookie | |
1019 | * @ssp: srcu_struct to provide cookie for. | |
1020 | * | |
1021 | * This function returns a cookie that can be passed to | |
1022 | * poll_state_synchronize_srcu(), which will return true if a full grace | |
1023 | * period has elapsed in the meantime. It is the caller's responsibility | |
1024 | * to make sure that grace period happens, for example, by invoking | |
1025 | * call_srcu() after return from get_state_synchronize_srcu(). | |
1026 | */ | |
1027 | unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) | |
1028 | { | |
1029 | // Any prior manipulation of SRCU-protected data must happen | |
1030 | // before the load from ->srcu_gp_seq. | |
1031 | smp_mb(); | |
1032 | return rcu_seq_snap(&ssp->srcu_gp_seq); | |
1033 | } | |
1034 | EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); | |
1035 | ||
1036 | /** | |
1037 | * start_poll_synchronize_srcu - Provide cookie and start grace period | |
1038 | * @ssp: srcu_struct to provide cookie for. | |
1039 | * | |
1040 | * This function returns a cookie that can be passed to | |
1041 | * poll_state_synchronize_srcu(), which will return true if a full grace | |
1042 | * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), | |
1043 | * this function also ensures that any needed SRCU grace period will be | |
1044 | * started. This convenience does come at a cost in terms of CPU overhead. | |
1045 | */ | |
1046 | unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) | |
1047 | { | |
1048 | return srcu_gp_start_if_needed(ssp, NULL, true); | |
1049 | } | |
1050 | EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); | |
1051 | ||
1052 | /** | |
1053 | * poll_state_synchronize_srcu - Has cookie's grace period ended? | |
1054 | * @ssp: srcu_struct to provide cookie for. | |
1055 | * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). | |
1056 | * | |
1057 | * This function takes the cookie that was returned from either | |
1058 | * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and | |
1059 | * returns @true if an SRCU grace period elapsed since the time that the | |
1060 | * cookie was created. | |
4e7ccfae PM |
1061 | * |
1062 | * Because cookies are finite in size, wrapping/overflow is possible. | |
1063 | * This is more pronounced on 32-bit systems where cookies are 32 bits, | |
1064 | * where in theory wrapping could happen in about 14 hours assuming | |
1065 | * 25-microsecond expedited SRCU grace periods. However, a more likely | |
1066 | * overflow lower bound is on the order of 24 days in the case of | |
1067 | * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit | |
1068 | * system requires geologic timespans, as in more than seven million years | |
1069 | * even for expedited SRCU grace periods. | |
1070 | * | |
1071 | * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems | |
1072 | * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses | |
1073 | * a 16-bit cookie, which rcutorture routinely wraps in a matter of a | |
1074 | * few minutes. If this proves to be a problem, this counter will be | |
1075 | * expanded to the same size as for Tree SRCU. | |
5358c9fa PM |
1076 | */ |
1077 | bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) | |
1078 | { | |
1079 | if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) | |
1080 | return false; | |
1081 | // Ensure that the end of the SRCU grace period happens before | |
1082 | // any subsequent code that the caller might execute. | |
1083 | smp_mb(); // ^^^ | |
1084 | return true; | |
1085 | } | |
1086 | EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); | |
1087 | ||
da915ad5 PM |
1088 | /* |
1089 | * Callback function for srcu_barrier() use. | |
1090 | */ | |
1091 | static void srcu_barrier_cb(struct rcu_head *rhp) | |
1092 | { | |
1093 | struct srcu_data *sdp; | |
aacb5d91 | 1094 | struct srcu_struct *ssp; |
da915ad5 PM |
1095 | |
1096 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | |
aacb5d91 PM |
1097 | ssp = sdp->ssp; |
1098 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) | |
1099 | complete(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1100 | } |
1101 | ||
dad81a20 PM |
1102 | /** |
1103 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | |
aacb5d91 | 1104 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
dad81a20 | 1105 | */ |
aacb5d91 | 1106 | void srcu_barrier(struct srcu_struct *ssp) |
dad81a20 | 1107 | { |
da915ad5 PM |
1108 | int cpu; |
1109 | struct srcu_data *sdp; | |
aacb5d91 | 1110 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
da915ad5 | 1111 | |
aacb5d91 PM |
1112 | check_init_srcu_struct(ssp); |
1113 | mutex_lock(&ssp->srcu_barrier_mutex); | |
1114 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { | |
da915ad5 | 1115 | smp_mb(); /* Force ordering following return. */ |
aacb5d91 | 1116 | mutex_unlock(&ssp->srcu_barrier_mutex); |
da915ad5 PM |
1117 | return; /* Someone else did our work for us. */ |
1118 | } | |
aacb5d91 PM |
1119 | rcu_seq_start(&ssp->srcu_barrier_seq); |
1120 | init_completion(&ssp->srcu_barrier_completion); | |
da915ad5 PM |
1121 | |
1122 | /* Initial count prevents reaching zero until all CBs are posted. */ | |
aacb5d91 | 1123 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
da915ad5 PM |
1124 | |
1125 | /* | |
1126 | * Each pass through this loop enqueues a callback, but only | |
1127 | * on CPUs already having callbacks enqueued. Note that if | |
1128 | * a CPU already has callbacks enqueue, it must have already | |
1129 | * registered the need for a future grace period, so all we | |
1130 | * need do is enqueue a callback that will use the same | |
1131 | * grace period as the last callback already in the queue. | |
1132 | */ | |
1133 | for_each_possible_cpu(cpu) { | |
aacb5d91 | 1134 | sdp = per_cpu_ptr(ssp->sda, cpu); |
d6331980 | 1135 | spin_lock_irq_rcu_node(sdp); |
aacb5d91 | 1136 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
da915ad5 | 1137 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
a602538e | 1138 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
da915ad5 | 1139 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
77a40f97 | 1140 | &sdp->srcu_barrier_head)) { |
a602538e | 1141 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
aacb5d91 | 1142 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
a602538e | 1143 | } |
d6331980 | 1144 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1145 | } |
1146 | ||
1147 | /* Remove the initial count, at which point reaching zero can happen. */ | |
aacb5d91 PM |
1148 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
1149 | complete(&ssp->srcu_barrier_completion); | |
1150 | wait_for_completion(&ssp->srcu_barrier_completion); | |
da915ad5 | 1151 | |
aacb5d91 PM |
1152 | rcu_seq_end(&ssp->srcu_barrier_seq); |
1153 | mutex_unlock(&ssp->srcu_barrier_mutex); | |
dad81a20 PM |
1154 | } |
1155 | EXPORT_SYMBOL_GPL(srcu_barrier); | |
1156 | ||
1157 | /** | |
1158 | * srcu_batches_completed - return batches completed. | |
aacb5d91 | 1159 | * @ssp: srcu_struct on which to report batch completion. |
dad81a20 PM |
1160 | * |
1161 | * Report the number of batches, correlated with, but not necessarily | |
1162 | * precisely the same as, the number of grace periods that have elapsed. | |
1163 | */ | |
aacb5d91 | 1164 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
dad81a20 | 1165 | { |
39f91504 | 1166 | return READ_ONCE(ssp->srcu_idx); |
dad81a20 PM |
1167 | } |
1168 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | |
1169 | ||
1170 | /* | |
da915ad5 PM |
1171 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
1172 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | |
1173 | * completed in that state. | |
dad81a20 | 1174 | */ |
aacb5d91 | 1175 | static void srcu_advance_state(struct srcu_struct *ssp) |
dad81a20 PM |
1176 | { |
1177 | int idx; | |
1178 | ||
aacb5d91 | 1179 | mutex_lock(&ssp->srcu_gp_mutex); |
da915ad5 | 1180 | |
dad81a20 PM |
1181 | /* |
1182 | * Because readers might be delayed for an extended period after | |
da915ad5 | 1183 | * fetching ->srcu_idx for their index, at any point in time there |
dad81a20 PM |
1184 | * might well be readers using both idx=0 and idx=1. We therefore |
1185 | * need to wait for readers to clear from both index values before | |
1186 | * invoking a callback. | |
1187 | * | |
1188 | * The load-acquire ensures that we see the accesses performed | |
1189 | * by the prior grace period. | |
1190 | */ | |
aacb5d91 | 1191 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
dad81a20 | 1192 | if (idx == SRCU_STATE_IDLE) { |
aacb5d91 PM |
1193 | spin_lock_irq_rcu_node(ssp); |
1194 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1195 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); | |
1196 | spin_unlock_irq_rcu_node(ssp); | |
1197 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 PM |
1198 | return; |
1199 | } | |
aacb5d91 | 1200 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
dad81a20 | 1201 | if (idx == SRCU_STATE_IDLE) |
aacb5d91 PM |
1202 | srcu_gp_start(ssp); |
1203 | spin_unlock_irq_rcu_node(ssp); | |
da915ad5 | 1204 | if (idx != SRCU_STATE_IDLE) { |
aacb5d91 | 1205 | mutex_unlock(&ssp->srcu_gp_mutex); |
dad81a20 | 1206 | return; /* Someone else started the grace period. */ |
da915ad5 | 1207 | } |
dad81a20 PM |
1208 | } |
1209 | ||
aacb5d91 PM |
1210 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
1211 | idx = 1 ^ (ssp->srcu_idx & 1); | |
1212 | if (!try_check_zero(ssp, idx, 1)) { | |
1213 | mutex_unlock(&ssp->srcu_gp_mutex); | |
dad81a20 | 1214 | return; /* readers present, retry later. */ |
da915ad5 | 1215 | } |
aacb5d91 | 1216 | srcu_flip(ssp); |
71042606 | 1217 | spin_lock_irq_rcu_node(ssp); |
aacb5d91 | 1218 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
71042606 | 1219 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 PM |
1220 | } |
1221 | ||
aacb5d91 | 1222 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
dad81a20 PM |
1223 | |
1224 | /* | |
1225 | * SRCU read-side critical sections are normally short, | |
1226 | * so check at least twice in quick succession after a flip. | |
1227 | */ | |
aacb5d91 PM |
1228 | idx = 1 ^ (ssp->srcu_idx & 1); |
1229 | if (!try_check_zero(ssp, idx, 2)) { | |
1230 | mutex_unlock(&ssp->srcu_gp_mutex); | |
da915ad5 PM |
1231 | return; /* readers present, retry later. */ |
1232 | } | |
aacb5d91 | 1233 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
dad81a20 PM |
1234 | } |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * Invoke a limited number of SRCU callbacks that have passed through | |
1239 | * their grace period. If there are more to do, SRCU will reschedule | |
1240 | * the workqueue. Note that needed memory barriers have been executed | |
1241 | * in this task's context by srcu_readers_active_idx_check(). | |
1242 | */ | |
da915ad5 | 1243 | static void srcu_invoke_callbacks(struct work_struct *work) |
dad81a20 | 1244 | { |
ae5c2341 | 1245 | long len; |
da915ad5 | 1246 | bool more; |
dad81a20 PM |
1247 | struct rcu_cblist ready_cbs; |
1248 | struct rcu_head *rhp; | |
da915ad5 | 1249 | struct srcu_data *sdp; |
aacb5d91 | 1250 | struct srcu_struct *ssp; |
dad81a20 | 1251 | |
e81baf4c SAS |
1252 | sdp = container_of(work, struct srcu_data, work); |
1253 | ||
aacb5d91 | 1254 | ssp = sdp->ssp; |
dad81a20 | 1255 | rcu_cblist_init(&ready_cbs); |
d6331980 | 1256 | spin_lock_irq_rcu_node(sdp); |
da915ad5 | 1257 | rcu_segcblist_advance(&sdp->srcu_cblist, |
aacb5d91 | 1258 | rcu_seq_current(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1259 | if (sdp->srcu_cblist_invoking || |
1260 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | |
d6331980 | 1261 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1262 | return; /* Someone else on the job or nothing to do. */ |
1263 | } | |
1264 | ||
1265 | /* We are on the job! Extract and invoke ready callbacks. */ | |
1266 | sdp->srcu_cblist_invoking = true; | |
1267 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); | |
ae5c2341 | 1268 | len = ready_cbs.len; |
d6331980 | 1269 | spin_unlock_irq_rcu_node(sdp); |
dad81a20 PM |
1270 | rhp = rcu_cblist_dequeue(&ready_cbs); |
1271 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { | |
a602538e | 1272 | debug_rcu_head_unqueue(rhp); |
dad81a20 PM |
1273 | local_bh_disable(); |
1274 | rhp->func(rhp); | |
1275 | local_bh_enable(); | |
1276 | } | |
ae5c2341 | 1277 | WARN_ON_ONCE(ready_cbs.len); |
da915ad5 PM |
1278 | |
1279 | /* | |
1280 | * Update counts, accelerate new callbacks, and if needed, | |
1281 | * schedule another round of callback invocation. | |
1282 | */ | |
d6331980 | 1283 | spin_lock_irq_rcu_node(sdp); |
ae5c2341 | 1284 | rcu_segcblist_add_len(&sdp->srcu_cblist, -len); |
da915ad5 | 1285 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
aacb5d91 | 1286 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
da915ad5 PM |
1287 | sdp->srcu_cblist_invoking = false; |
1288 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | |
d6331980 | 1289 | spin_unlock_irq_rcu_node(sdp); |
da915ad5 PM |
1290 | if (more) |
1291 | srcu_schedule_cbs_sdp(sdp, 0); | |
dad81a20 PM |
1292 | } |
1293 | ||
1294 | /* | |
1295 | * Finished one round of SRCU grace period. Start another if there are | |
1296 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. | |
1297 | */ | |
aacb5d91 | 1298 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
dad81a20 | 1299 | { |
da915ad5 | 1300 | bool pushgp = true; |
dad81a20 | 1301 | |
aacb5d91 PM |
1302 | spin_lock_irq_rcu_node(ssp); |
1303 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { | |
1304 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { | |
da915ad5 PM |
1305 | /* All requests fulfilled, time to go idle. */ |
1306 | pushgp = false; | |
1307 | } | |
aacb5d91 | 1308 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
da915ad5 | 1309 | /* Outstanding request and no GP. Start one. */ |
aacb5d91 | 1310 | srcu_gp_start(ssp); |
dad81a20 | 1311 | } |
aacb5d91 | 1312 | spin_unlock_irq_rcu_node(ssp); |
dad81a20 | 1313 | |
da915ad5 | 1314 | if (pushgp) |
aacb5d91 | 1315 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
dad81a20 PM |
1316 | } |
1317 | ||
1318 | /* | |
1319 | * This is the work-queue function that handles SRCU grace periods. | |
1320 | */ | |
0d8a1e83 | 1321 | static void process_srcu(struct work_struct *work) |
dad81a20 | 1322 | { |
aacb5d91 | 1323 | struct srcu_struct *ssp; |
dad81a20 | 1324 | |
aacb5d91 | 1325 | ssp = container_of(work, struct srcu_struct, work.work); |
dad81a20 | 1326 | |
aacb5d91 PM |
1327 | srcu_advance_state(ssp); |
1328 | srcu_reschedule(ssp, srcu_get_delay(ssp)); | |
dad81a20 | 1329 | } |
7f6733c3 PM |
1330 | |
1331 | void srcutorture_get_gp_data(enum rcutorture_type test_type, | |
aacb5d91 | 1332 | struct srcu_struct *ssp, int *flags, |
aebc8264 | 1333 | unsigned long *gp_seq) |
7f6733c3 PM |
1334 | { |
1335 | if (test_type != SRCU_FLAVOR) | |
1336 | return; | |
1337 | *flags = 0; | |
aacb5d91 | 1338 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
7f6733c3 PM |
1339 | } |
1340 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | |
1f4f6da1 | 1341 | |
aacb5d91 | 1342 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
115a1a52 PM |
1343 | { |
1344 | int cpu; | |
1345 | int idx; | |
ac3748c6 | 1346 | unsigned long s0 = 0, s1 = 0; |
115a1a52 | 1347 | |
aacb5d91 | 1348 | idx = ssp->srcu_idx & 0x1; |
52e17ba1 | 1349 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
aacb5d91 | 1350 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
115a1a52 PM |
1351 | for_each_possible_cpu(cpu) { |
1352 | unsigned long l0, l1; | |
1353 | unsigned long u0, u1; | |
1354 | long c0, c1; | |
5ab07a8d | 1355 | struct srcu_data *sdp; |
115a1a52 | 1356 | |
aacb5d91 | 1357 | sdp = per_cpu_ptr(ssp->sda, cpu); |
b68c6146 PM |
1358 | u0 = data_race(sdp->srcu_unlock_count[!idx]); |
1359 | u1 = data_race(sdp->srcu_unlock_count[idx]); | |
115a1a52 PM |
1360 | |
1361 | /* | |
1362 | * Make sure that a lock is always counted if the corresponding | |
1363 | * unlock is counted. | |
1364 | */ | |
1365 | smp_rmb(); | |
1366 | ||
b68c6146 PM |
1367 | l0 = data_race(sdp->srcu_lock_count[!idx]); |
1368 | l1 = data_race(sdp->srcu_lock_count[idx]); | |
115a1a52 PM |
1369 | |
1370 | c0 = l0 - u0; | |
1371 | c1 = l1 - u1; | |
7e210a65 PM |
1372 | pr_cont(" %d(%ld,%ld %c)", |
1373 | cpu, c0, c1, | |
1374 | "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); | |
ac3748c6 PM |
1375 | s0 += c0; |
1376 | s1 += c1; | |
115a1a52 | 1377 | } |
ac3748c6 | 1378 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
115a1a52 PM |
1379 | } |
1380 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); | |
1381 | ||
1f4f6da1 PM |
1382 | static int __init srcu_bootup_announce(void) |
1383 | { | |
1384 | pr_info("Hierarchical SRCU implementation.\n"); | |
0c8e0e3c PM |
1385 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
1386 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); | |
1f4f6da1 PM |
1387 | return 0; |
1388 | } | |
1389 | early_initcall(srcu_bootup_announce); | |
e0fcba9a PM |
1390 | |
1391 | void __init srcu_init(void) | |
1392 | { | |
aacb5d91 | 1393 | struct srcu_struct *ssp; |
e0fcba9a PM |
1394 | |
1395 | srcu_init_done = true; | |
1396 | while (!list_empty(&srcu_boot_list)) { | |
aacb5d91 | 1397 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
4e6ea4ef | 1398 | work.work.entry); |
aacb5d91 PM |
1399 | check_init_srcu_struct(ssp); |
1400 | list_del_init(&ssp->work.work.entry); | |
1401 | queue_work(rcu_gp_wq, &ssp->work.work); | |
e0fcba9a PM |
1402 | } |
1403 | } | |
fe15b50c PM |
1404 | |
1405 | #ifdef CONFIG_MODULES | |
1406 | ||
1407 | /* Initialize any global-scope srcu_struct structures used by this module. */ | |
1408 | static int srcu_module_coming(struct module *mod) | |
1409 | { | |
1410 | int i; | |
1411 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1412 | int ret; | |
1413 | ||
1414 | for (i = 0; i < mod->num_srcu_structs; i++) { | |
1415 | ret = init_srcu_struct(*(sspp++)); | |
1416 | if (WARN_ON_ONCE(ret)) | |
1417 | return ret; | |
1418 | } | |
1419 | return 0; | |
1420 | } | |
1421 | ||
1422 | /* Clean up any global-scope srcu_struct structures used by this module. */ | |
1423 | static void srcu_module_going(struct module *mod) | |
1424 | { | |
1425 | int i; | |
1426 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; | |
1427 | ||
1428 | for (i = 0; i < mod->num_srcu_structs; i++) | |
1429 | cleanup_srcu_struct(*(sspp++)); | |
1430 | } | |
1431 | ||
1432 | /* Handle one module, either coming or going. */ | |
1433 | static int srcu_module_notify(struct notifier_block *self, | |
1434 | unsigned long val, void *data) | |
1435 | { | |
1436 | struct module *mod = data; | |
1437 | int ret = 0; | |
1438 | ||
1439 | switch (val) { | |
1440 | case MODULE_STATE_COMING: | |
1441 | ret = srcu_module_coming(mod); | |
1442 | break; | |
1443 | case MODULE_STATE_GOING: | |
1444 | srcu_module_going(mod); | |
1445 | break; | |
1446 | default: | |
1447 | break; | |
1448 | } | |
1449 | return ret; | |
1450 | } | |
1451 | ||
1452 | static struct notifier_block srcu_module_nb = { | |
1453 | .notifier_call = srcu_module_notify, | |
1454 | .priority = 0, | |
1455 | }; | |
1456 | ||
1457 | static __init int init_srcu_module_notifier(void) | |
1458 | { | |
1459 | int ret; | |
1460 | ||
1461 | ret = register_module_notifier(&srcu_module_nb); | |
1462 | if (ret) | |
1463 | pr_warn("Failed to register srcu module notifier\n"); | |
1464 | return ret; | |
1465 | } | |
1466 | late_initcall(init_srcu_module_notifier); | |
1467 | ||
1468 | #endif /* #ifdef CONFIG_MODULES */ |