1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
16 #define pr_fmt(fmt) "rcu: " fmt
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/srcu.h>
31 #include "rcu_segcblist.h"
33 /* Holdoff in nanoseconds for auto-expediting. */
34 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36 module_param(exp_holdoff, ulong, 0444);
38 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
39 static ulong counter_wrap_check = (ULONG_MAX >> 2);
40 module_param(counter_wrap_check, ulong, 0444);
43 * Control conversion to SRCU_SIZE_BIG:
44 * 0: Don't convert at all.
45 * 1: Convert at init_srcu_struct() time.
46 * 2: Convert when rcutorture invokes srcu_torture_stats_print().
47 * 3: Decide at boot time based on system shape (default).
48 * 0x1x: Convert when excessive contention encountered.
50 #define SRCU_SIZING_NONE 0
51 #define SRCU_SIZING_INIT 1
52 #define SRCU_SIZING_TORTURE 2
53 #define SRCU_SIZING_AUTO 3
54 #define SRCU_SIZING_CONTEND 0x10
55 #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56 #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57 #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58 #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59 #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60 static int convert_to_big = SRCU_SIZING_AUTO;
61 module_param(convert_to_big, int, 0444);
63 /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
64 static int big_cpu_lim __read_mostly = 128;
65 module_param(big_cpu_lim, int, 0444);
67 /* Contention events per jiffy to initiate transition to big. */
68 static int small_contention_lim __read_mostly = 100;
69 module_param(small_contention_lim, int, 0444);
71 /* Early-boot callback-management, so early that no lock is required! */
72 static LIST_HEAD(srcu_boot_list);
73 static bool __read_mostly srcu_init_done;
75 static void srcu_invoke_callbacks(struct work_struct *work);
76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77 static void process_srcu(struct work_struct *work);
78 static void srcu_delay_timer(struct timer_list *t);
80 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
81 #define spin_lock_rcu_node(p) \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
87 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
89 #define spin_lock_irq_rcu_node(p) \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
95 #define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
98 #define spin_lock_irqsave_rcu_node(p, flags) \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
104 #define spin_trylock_irqsave_rcu_node(p, flags) \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
109 smp_mb__after_unlock_lock(); \
113 #define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
117 * Initialize SRCU per-CPU data. Note that statically allocated
118 * srcu_struct structures might already have srcu_read_lock() and
119 * srcu_read_unlock() running against them. So if the is_static parameter
120 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
122 static void init_srcu_struct_data(struct srcu_struct *ssp)
125 struct srcu_data *sdp;
128 * Initialize the per-CPU srcu_data array, which feeds into the
129 * leaves of the srcu_node tree.
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
148 /* Invalid seq state, used during snp node initialization */
149 #define SRCU_SNP_INIT_SEQ 0x2
152 * Check whether sequence number corresponding to snp node,
155 static inline bool srcu_invl_snp_seq(unsigned long s)
157 return s == SRCU_SNP_INIT_SEQ;
161 * Allocated and initialize SRCU combining tree. Returns @true if
162 * allocation succeeded and @false otherwise.
164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
169 int levelspread[RCU_NUM_LVLS];
170 struct srcu_data *sdp;
171 struct srcu_node *snp;
172 struct srcu_node *snp_first;
174 /* Initialize geometry if it has not already been initialized. */
176 ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
180 /* Work out the overall tree geometry. */
181 ssp->level[0] = &ssp->node[0];
182 for (i = 1; i < rcu_num_lvls; i++)
183 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
184 rcu_init_levelspread(levelspread, num_rcu_lvl);
186 /* Each pass through this loop initializes one srcu_node structure. */
187 srcu_for_each_node_breadth_first(ssp, snp) {
188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 ARRAY_SIZE(snp->srcu_data_have_cbs));
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 snp->srcu_data_have_cbs[i] = 0;
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
198 if (snp == &ssp->node[0]) {
199 /* Root node, special case. */
200 snp->srcu_parent = NULL;
205 if (snp == ssp->level[level + 1])
207 snp->srcu_parent = ssp->level[level - 1] +
208 (snp - ssp->level[level]) /
209 levelspread[level - 1];
213 * Initialize the per-CPU srcu_data array, which feeds into the
214 * leaves of the srcu_node tree.
216 level = rcu_num_lvls - 1;
217 snp_first = ssp->level[level];
218 for_each_possible_cpu(cpu) {
219 sdp = per_cpu_ptr(ssp->sda, cpu);
220 sdp->mynode = &snp_first[cpu / levelspread[level]];
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
226 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
228 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
233 * Initialize non-compile-time initialized fields, including the
234 * associated srcu_node and srcu_data structures. The is_static parameter
235 * tells us that ->sda has already been wired up to srcu_data.
237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
239 ssp->srcu_size_state = SRCU_SIZE_SMALL;
241 mutex_init(&ssp->srcu_cb_mutex);
242 mutex_init(&ssp->srcu_gp_mutex);
244 ssp->srcu_gp_seq = 0;
245 ssp->srcu_barrier_seq = 0;
246 mutex_init(&ssp->srcu_barrier_mutex);
247 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
248 INIT_DELAYED_WORK(&ssp->work, process_srcu);
249 ssp->sda_is_static = is_static;
251 ssp->sda = alloc_percpu(struct srcu_data);
254 init_srcu_struct_data(ssp);
255 ssp->srcu_gp_seq_needed_exp = 0;
256 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
257 if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
258 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
259 if (!ssp->sda_is_static) {
260 free_percpu(ssp->sda);
265 WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
268 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
272 #ifdef CONFIG_DEBUG_LOCK_ALLOC
274 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
275 struct lock_class_key *key)
277 /* Don't re-initialize a lock while it is held. */
278 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
279 lockdep_init_map(&ssp->dep_map, name, key, 0);
280 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
281 return init_srcu_struct_fields(ssp, false);
283 EXPORT_SYMBOL_GPL(__init_srcu_struct);
285 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
288 * init_srcu_struct - initialize a sleep-RCU structure
289 * @ssp: structure to initialize.
291 * Must invoke this on a given srcu_struct before passing that srcu_struct
292 * to any other function. Each srcu_struct represents a separate domain
293 * of SRCU protection.
295 int init_srcu_struct(struct srcu_struct *ssp)
297 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
298 return init_srcu_struct_fields(ssp, false);
300 EXPORT_SYMBOL_GPL(init_srcu_struct);
302 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
305 * Initiate a transition to SRCU_SIZE_BIG with lock held.
307 static void __srcu_transition_to_big(struct srcu_struct *ssp)
309 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
310 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
314 * Initiate an idempotent transition to SRCU_SIZE_BIG.
316 static void srcu_transition_to_big(struct srcu_struct *ssp)
320 /* Double-checked locking on ->srcu_size-state. */
321 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
323 spin_lock_irqsave_rcu_node(ssp, flags);
324 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
325 spin_unlock_irqrestore_rcu_node(ssp, flags);
328 __srcu_transition_to_big(ssp);
329 spin_unlock_irqrestore_rcu_node(ssp, flags);
333 * Check to see if the just-encountered contention event justifies
334 * a transition to SRCU_SIZE_BIG.
336 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
340 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
343 if (ssp->srcu_size_jiffies != j) {
344 ssp->srcu_size_jiffies = j;
345 ssp->srcu_n_lock_retries = 0;
347 if (++ssp->srcu_n_lock_retries <= small_contention_lim)
349 __srcu_transition_to_big(ssp);
353 * Acquire the specified srcu_data structure's ->lock, but check for
354 * excessive contention, which results in initiation of a transition
355 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
356 * parameter permits this.
358 static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
360 struct srcu_struct *ssp = sdp->ssp;
362 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
364 spin_lock_irqsave_rcu_node(ssp, *flags);
365 spin_lock_irqsave_check_contention(ssp);
366 spin_unlock_irqrestore_rcu_node(ssp, *flags);
367 spin_lock_irqsave_rcu_node(sdp, *flags);
371 * Acquire the specified srcu_struct structure's ->lock, but check for
372 * excessive contention, which results in initiation of a transition
373 * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
374 * parameter permits this.
376 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
378 if (spin_trylock_irqsave_rcu_node(ssp, *flags))
380 spin_lock_irqsave_rcu_node(ssp, *flags);
381 spin_lock_irqsave_check_contention(ssp);
385 * First-use initialization of statically allocated srcu_struct
386 * structure. Wiring up the combining tree is more than can be
387 * done with compile-time initialization, so this check is added
388 * to each update-side SRCU primitive. Use ssp->lock, which -is-
389 * compile-time initialized, to resolve races involving multiple
390 * CPUs trying to garner first-use privileges.
392 static void check_init_srcu_struct(struct srcu_struct *ssp)
396 /* The smp_load_acquire() pairs with the smp_store_release(). */
397 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
398 return; /* Already initialized. */
399 spin_lock_irqsave_rcu_node(ssp, flags);
400 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
401 spin_unlock_irqrestore_rcu_node(ssp, flags);
404 init_srcu_struct_fields(ssp, true);
405 spin_unlock_irqrestore_rcu_node(ssp, flags);
409 * Returns approximate total of the readers' ->srcu_lock_count[] values
410 * for the rank of per-CPU counters specified by idx.
412 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
415 unsigned long sum = 0;
417 for_each_possible_cpu(cpu) {
418 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
420 sum += atomic_long_read(&cpuc->srcu_lock_count[idx]);
426 * Returns approximate total of the readers' ->srcu_unlock_count[] values
427 * for the rank of per-CPU counters specified by idx.
429 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
432 unsigned long mask = 0;
433 unsigned long sum = 0;
435 for_each_possible_cpu(cpu) {
436 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
438 sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]);
439 if (IS_ENABLED(CONFIG_PROVE_RCU))
440 mask = mask | READ_ONCE(cpuc->srcu_nmi_safety);
442 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && (mask & (mask >> 1)),
443 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
448 * Return true if the number of pre-existing readers is determined to
451 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
453 unsigned long unlocks;
455 unlocks = srcu_readers_unlock_idx(ssp, idx);
458 * Make sure that a lock is always counted if the corresponding
459 * unlock is counted. Needs to be a smp_mb() as the read side may
460 * contain a read from a variable that is written to before the
461 * synchronize_srcu() in the write side. In this case smp_mb()s
462 * A and B act like the store buffering pattern.
464 * This smp_mb() also pairs with smp_mb() C to prevent accesses
465 * after the synchronize_srcu() from being executed before the
471 * If the locks are the same as the unlocks, then there must have
472 * been no readers on this index at some point in this function.
473 * But there might be more readers, as a task might have read
474 * the current ->srcu_idx but not yet have incremented its CPU's
475 * ->srcu_lock_count[idx] counter. In fact, it is possible
476 * that most of the tasks have been preempted between fetching
477 * ->srcu_idx and incrementing ->srcu_lock_count[idx]. And there
478 * could be almost (ULONG_MAX / sizeof(struct task_struct)) tasks
479 * in a system whose address space was fully populated with memory.
480 * Call this quantity Nt.
482 * So suppose that the updater is preempted at this point in the
483 * code for a long time. That now-preempted updater has already
484 * flipped ->srcu_idx (possibly during the preceding grace period),
485 * done an smp_mb() (again, possibly during the preceding grace
486 * period), and summed up the ->srcu_unlock_count[idx] counters.
487 * How many times can a given one of the aforementioned Nt tasks
488 * increment the old ->srcu_idx value's ->srcu_lock_count[idx]
489 * counter, in the absence of nesting?
491 * It can clearly do so once, given that it has already fetched
492 * the old value of ->srcu_idx and is just about to use that value
493 * to index its increment of ->srcu_lock_count[idx]. But as soon as
494 * it leaves that SRCU read-side critical section, it will increment
495 * ->srcu_unlock_count[idx], which must follow the updater's above
496 * read from that same value. Thus, as soon the reading task does
497 * an smp_mb() and a later fetch from ->srcu_idx, that task will be
498 * guaranteed to get the new index. Except that the increment of
499 * ->srcu_unlock_count[idx] in __srcu_read_unlock() is after the
500 * smp_mb(), and the fetch from ->srcu_idx in __srcu_read_lock()
501 * is before the smp_mb(). Thus, that task might not see the new
502 * value of ->srcu_idx until the -second- __srcu_read_lock(),
503 * which in turn means that this task might well increment
504 * ->srcu_lock_count[idx] for the old value of ->srcu_idx twice,
507 * However, it is important to note that a given smp_mb() takes
508 * effect not just for the task executing it, but also for any
509 * later task running on that same CPU.
511 * That is, there can be almost Nt + Nc further increments of
512 * ->srcu_lock_count[idx] for the old index, where Nc is the number
513 * of CPUs. But this is OK because the size of the task_struct
514 * structure limits the value of Nt and current systems limit Nc
517 * OK, but what about nesting? This does impose a limit on
518 * nesting of half of the size of the task_struct structure
519 * (measured in bytes), which should be sufficient. A late 2022
520 * TREE01 rcutorture run reported this size to be no less than
521 * 9408 bytes, allowing up to 4704 levels of nesting, which is
522 * comfortably beyond excessive. Especially on 64-bit systems,
523 * which are unlikely to be configured with an address space fully
524 * populated with memory, at least not anytime soon.
526 return srcu_readers_lock_idx(ssp, idx) == unlocks;
530 * srcu_readers_active - returns true if there are readers. and false
532 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
534 * Note that this is not an atomic primitive, and can therefore suffer
535 * severe errors when invoked on an active srcu_struct. That said, it
536 * can be useful as an error check at cleanup time.
538 static bool srcu_readers_active(struct srcu_struct *ssp)
541 unsigned long sum = 0;
543 for_each_possible_cpu(cpu) {
544 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
546 sum += atomic_long_read(&cpuc->srcu_lock_count[0]);
547 sum += atomic_long_read(&cpuc->srcu_lock_count[1]);
548 sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]);
549 sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]);
555 * We use an adaptive strategy for synchronize_srcu() and especially for
556 * synchronize_srcu_expedited(). We spin for a fixed time period
557 * (defined below, boot time configurable) to allow SRCU readers to exit
558 * their read-side critical sections. If there are still some readers
559 * after one jiffy, we repeatedly block for one jiffy time periods.
560 * The blocking time is increased as the grace-period age increases,
561 * with max blocking time capped at 10 jiffies.
563 #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
565 static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
566 module_param(srcu_retry_check_delay, ulong, 0444);
568 #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
569 #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
571 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
572 // no-delay instances.
573 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
574 // no-delay instances.
576 #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
577 #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
578 #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
579 // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
580 // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
581 // called from process_srcu().
582 #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
583 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
585 // Maximum per-GP-phase consecutive no-delay instances.
586 #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
587 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
588 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
589 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
591 static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
592 module_param(srcu_max_nodelay_phase, ulong, 0444);
594 // Maximum consecutive no-delay instances.
595 #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
596 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
598 static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
599 module_param(srcu_max_nodelay, ulong, 0444);
602 * Return grace-period delay, zero if there are expedited grace
603 * periods pending, SRCU_INTERVAL otherwise.
605 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
607 unsigned long gpstart;
609 unsigned long jbase = SRCU_INTERVAL;
611 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
613 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
615 gpstart = READ_ONCE(ssp->srcu_gp_start);
616 if (time_after(j, gpstart))
617 jbase += j - gpstart;
619 WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
620 if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
624 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
628 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
629 * @ssp: structure to clean up.
631 * Must invoke this after you are finished using a given srcu_struct that
632 * was initialized via init_srcu_struct(), else you leak memory.
634 void cleanup_srcu_struct(struct srcu_struct *ssp)
638 if (WARN_ON(!srcu_get_delay(ssp)))
639 return; /* Just leak it! */
640 if (WARN_ON(srcu_readers_active(ssp)))
641 return; /* Just leak it! */
642 flush_delayed_work(&ssp->work);
643 for_each_possible_cpu(cpu) {
644 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
646 del_timer_sync(&sdp->delay_work);
647 flush_work(&sdp->work);
648 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
649 return; /* Forgot srcu_barrier(), so just leak it! */
651 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
652 WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
653 WARN_ON(srcu_readers_active(ssp))) {
654 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
655 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
656 rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
657 return; /* Caller forgot to stop doing call_srcu()? */
659 if (!ssp->sda_is_static) {
660 free_percpu(ssp->sda);
665 ssp->srcu_size_state = SRCU_SIZE_SMALL;
667 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
669 #ifdef CONFIG_PROVE_RCU
671 * Check for consistent NMI safety.
673 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
675 int nmi_safe_mask = 1 << nmi_safe;
676 int old_nmi_safe_mask;
677 struct srcu_data *sdp;
679 /* NMI-unsafe use in NMI is a bad sign */
680 WARN_ON_ONCE(!nmi_safe && in_nmi());
681 sdp = raw_cpu_ptr(ssp->sda);
682 old_nmi_safe_mask = READ_ONCE(sdp->srcu_nmi_safety);
683 if (!old_nmi_safe_mask) {
684 WRITE_ONCE(sdp->srcu_nmi_safety, nmi_safe_mask);
687 WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
689 EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
690 #endif /* CONFIG_PROVE_RCU */
693 * Counts the new reader in the appropriate per-CPU element of the
695 * Returns an index that must be passed to the matching srcu_read_unlock().
697 int __srcu_read_lock(struct srcu_struct *ssp)
701 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
702 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
703 smp_mb(); /* B */ /* Avoid leaking the critical section. */
706 EXPORT_SYMBOL_GPL(__srcu_read_lock);
709 * Removes the count for the old reader from the appropriate per-CPU
710 * element of the srcu_struct. Note that this may well be a different
711 * CPU than that which was incremented by the corresponding srcu_read_lock().
713 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
715 smp_mb(); /* C */ /* Avoid leaking the critical section. */
716 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
718 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
720 #ifdef CONFIG_NEED_SRCU_NMI_SAFE
723 * Counts the new reader in the appropriate per-CPU element of the
724 * srcu_struct, but in an NMI-safe manner using RMW atomics.
725 * Returns an index that must be passed to the matching srcu_read_unlock().
727 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
730 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
732 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
733 atomic_long_inc(&sdp->srcu_lock_count[idx]);
734 smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
737 EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
740 * Removes the count for the old reader from the appropriate per-CPU
741 * element of the srcu_struct. Note that this may well be a different
742 * CPU than that which was incremented by the corresponding srcu_read_lock().
744 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
746 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
748 smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
749 atomic_long_inc(&sdp->srcu_unlock_count[idx]);
751 EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
753 #endif // CONFIG_NEED_SRCU_NMI_SAFE
756 * Start an SRCU grace period.
758 static void srcu_gp_start(struct srcu_struct *ssp)
760 struct srcu_data *sdp;
763 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
764 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
766 sdp = this_cpu_ptr(ssp->sda);
767 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
768 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
769 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
770 rcu_segcblist_advance(&sdp->srcu_cblist,
771 rcu_seq_current(&ssp->srcu_gp_seq));
772 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
773 rcu_seq_snap(&ssp->srcu_gp_seq));
774 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
775 WRITE_ONCE(ssp->srcu_gp_start, jiffies);
776 WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
777 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
778 rcu_seq_start(&ssp->srcu_gp_seq);
779 state = rcu_seq_state(ssp->srcu_gp_seq);
780 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
784 static void srcu_delay_timer(struct timer_list *t)
786 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
788 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
791 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
795 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
799 timer_reduce(&sdp->delay_work, jiffies + delay);
803 * Schedule callback invocation for the specified srcu_data structure,
804 * if possible, on the corresponding CPU.
806 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
808 srcu_queue_delayed_work_on(sdp, delay);
812 * Schedule callback invocation for all srcu_data structures associated
813 * with the specified srcu_node structure that have callbacks for the
814 * just-completed grace period, the one corresponding to idx. If possible,
815 * schedule this invocation on the corresponding CPUs.
817 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
818 unsigned long mask, unsigned long delay)
822 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
823 if (!(mask & (1 << (cpu - snp->grplo))))
825 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
830 * Note the end of an SRCU grace period. Initiates callback invocation
831 * and starts a new grace period if needed.
833 * The ->srcu_cb_mutex acquisition does not protect any data, but
834 * instead prevents more than one grace period from starting while we
835 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
836 * array to have a finite number of elements.
838 static void srcu_gp_end(struct srcu_struct *ssp)
840 unsigned long cbdelay = 1;
848 struct srcu_data *sdp;
850 struct srcu_node *snp;
853 /* Prevent more than one additional grace period. */
854 mutex_lock(&ssp->srcu_cb_mutex);
856 /* End the current grace period. */
857 spin_lock_irq_rcu_node(ssp);
858 idx = rcu_seq_state(ssp->srcu_gp_seq);
859 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
860 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
863 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
864 rcu_seq_end(&ssp->srcu_gp_seq);
865 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
866 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
867 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
868 spin_unlock_irq_rcu_node(ssp);
869 mutex_unlock(&ssp->srcu_gp_mutex);
870 /* A new grace period can start at this point. But only one. */
872 /* Initiate callback invocation as needed. */
873 ss_state = smp_load_acquire(&ssp->srcu_size_state);
874 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
875 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
878 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
879 srcu_for_each_node_breadth_first(ssp, snp) {
880 spin_lock_irq_rcu_node(snp);
882 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
884 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
885 snp->srcu_have_cbs[idx] = gpseq;
886 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
887 sgsne = snp->srcu_gp_seq_needed_exp;
888 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
889 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
890 if (ss_state < SRCU_SIZE_BIG)
893 mask = snp->srcu_data_have_cbs[idx];
894 snp->srcu_data_have_cbs[idx] = 0;
895 spin_unlock_irq_rcu_node(snp);
897 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
901 /* Occasionally prevent srcu_data counter wrap. */
902 if (!(gpseq & counter_wrap_check))
903 for_each_possible_cpu(cpu) {
904 sdp = per_cpu_ptr(ssp->sda, cpu);
905 spin_lock_irqsave_rcu_node(sdp, flags);
906 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
907 sdp->srcu_gp_seq_needed = gpseq;
908 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
909 sdp->srcu_gp_seq_needed_exp = gpseq;
910 spin_unlock_irqrestore_rcu_node(sdp, flags);
913 /* Callback initiation done, allow grace periods after next. */
914 mutex_unlock(&ssp->srcu_cb_mutex);
916 /* Start a new grace period if needed. */
917 spin_lock_irq_rcu_node(ssp);
918 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
919 if (!rcu_seq_state(gpseq) &&
920 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
922 spin_unlock_irq_rcu_node(ssp);
923 srcu_reschedule(ssp, 0);
925 spin_unlock_irq_rcu_node(ssp);
928 /* Transition to big if needed. */
929 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
930 if (ss_state == SRCU_SIZE_ALLOC)
931 init_srcu_struct_nodes(ssp, GFP_KERNEL);
933 smp_store_release(&ssp->srcu_size_state, ss_state + 1);
938 * Funnel-locking scheme to scalably mediate many concurrent expedited
939 * grace-period requests. This function is invoked for the first known
940 * expedited request for a grace period that has already been requested,
941 * but without expediting. To start a completely new grace period,
942 * whether expedited or not, use srcu_funnel_gp_start() instead.
944 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
951 for (; snp != NULL; snp = snp->srcu_parent) {
952 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
953 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_gp_seq, s)) ||
954 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
956 spin_lock_irqsave_rcu_node(snp, flags);
957 sgsne = snp->srcu_gp_seq_needed_exp;
958 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
959 spin_unlock_irqrestore_rcu_node(snp, flags);
962 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
963 spin_unlock_irqrestore_rcu_node(snp, flags);
965 spin_lock_irqsave_ssp_contention(ssp, &flags);
966 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
967 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
968 spin_unlock_irqrestore_rcu_node(ssp, flags);
972 * Funnel-locking scheme to scalably mediate many concurrent grace-period
973 * requests. The winner has to do the work of actually starting grace
974 * period s. Losers must either ensure that their desired grace-period
975 * number is recorded on at least their leaf srcu_node structure, or they
976 * must take steps to invoke their own callbacks.
978 * Note that this function also does the work of srcu_funnel_exp_start(),
979 * in some cases by directly invoking it.
981 * The srcu read lock should be hold around this function. And s is a seq snap
982 * after holding that lock.
984 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
985 unsigned long s, bool do_norm)
988 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
990 struct srcu_node *snp;
991 struct srcu_node *snp_leaf;
992 unsigned long snp_seq;
994 /* Ensure that snp node tree is fully initialized before traversing it */
995 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
998 snp_leaf = sdp->mynode;
1001 /* Each pass through the loop does one level of the srcu_node tree. */
1002 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
1003 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_gp_seq, s)) && snp != snp_leaf)
1004 return; /* GP already done and CBs recorded. */
1005 spin_lock_irqsave_rcu_node(snp, flags);
1006 snp_seq = snp->srcu_have_cbs[idx];
1007 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
1008 if (snp == snp_leaf && snp_seq == s)
1009 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1010 spin_unlock_irqrestore_rcu_node(snp, flags);
1011 if (snp == snp_leaf && snp_seq != s) {
1012 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
1016 srcu_funnel_exp_start(ssp, snp, s);
1019 snp->srcu_have_cbs[idx] = s;
1020 if (snp == snp_leaf)
1021 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
1022 sgsne = snp->srcu_gp_seq_needed_exp;
1023 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
1024 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
1025 spin_unlock_irqrestore_rcu_node(snp, flags);
1028 /* Top of tree, must ensure the grace period will be started. */
1029 spin_lock_irqsave_ssp_contention(ssp, &flags);
1030 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
1032 * Record need for grace period s. Pair with load
1033 * acquire setting up for initialization.
1035 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
1037 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
1038 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
1040 /* If grace period not already in progress, start it. */
1041 if (!WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_gp_seq, s)) &&
1042 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
1043 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
1046 // And how can that list_add() in the "else" clause
1047 // possibly be safe for concurrent execution? Well,
1048 // it isn't. And it does not have to be. After all, it
1049 // can only be executed during early boot when there is only
1050 // the one boot CPU running with interrupts still disabled.
1051 if (likely(srcu_init_done))
1052 queue_delayed_work(rcu_gp_wq, &ssp->work,
1053 !!srcu_get_delay(ssp));
1054 else if (list_empty(&ssp->work.work.entry))
1055 list_add(&ssp->work.work.entry, &srcu_boot_list);
1057 spin_unlock_irqrestore_rcu_node(ssp, flags);
1061 * Wait until all readers counted by array index idx complete, but
1062 * loop an additional time if there is an expedited grace period pending.
1063 * The caller must ensure that ->srcu_idx is not changed while checking.
1065 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1067 unsigned long curdelay;
1069 curdelay = !srcu_get_delay(ssp);
1072 if (srcu_readers_active_idx_check(ssp, idx))
1074 if ((--trycount + curdelay) <= 0)
1076 udelay(srcu_retry_check_delay);
1081 * Increment the ->srcu_idx counter so that future SRCU readers will
1082 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
1083 * us to wait for pre-existing readers in a starvation-free manner.
1085 static void srcu_flip(struct srcu_struct *ssp)
1088 * Ensure that if this updater saw a given reader's increment
1089 * from __srcu_read_lock(), that reader was using an old value
1090 * of ->srcu_idx. Also ensure that if a given reader sees the
1091 * new value of ->srcu_idx, this updater's earlier scans cannot
1092 * have seen that reader's increments (which is OK, because this
1093 * grace period need not wait on that reader).
1095 smp_mb(); /* E */ /* Pairs with B and C. */
1097 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
1100 * Ensure that if the updater misses an __srcu_read_unlock()
1101 * increment, that task's __srcu_read_lock() following its next
1102 * __srcu_read_lock() or __srcu_read_unlock() will see the above
1103 * counter update. Note that both this memory barrier and the
1104 * one in srcu_readers_active_idx_check() provide the guarantee
1105 * for __srcu_read_lock().
1107 smp_mb(); /* D */ /* Pairs with C. */
1111 * If SRCU is likely idle, return true, otherwise return false.
1113 * Note that it is OK for several current from-idle requests for a new
1114 * grace period from idle to specify expediting because they will all end
1115 * up requesting the same grace period anyhow. So no loss.
1117 * Note also that if any CPU (including the current one) is still invoking
1118 * callbacks, this function will nevertheless say "idle". This is not
1119 * ideal, but the overhead of checking all CPUs' callback lists is even
1120 * less ideal, especially on large systems. Furthermore, the wakeup
1121 * can happen before the callback is fully removed, so we have no choice
1122 * but to accept this type of error.
1124 * This function is also subject to counter-wrap errors, but let's face
1125 * it, if this function was preempted for enough time for the counters
1126 * to wrap, it really doesn't matter whether or not we expedite the grace
1127 * period. The extra overhead of a needlessly expedited grace period is
1128 * negligible when amortized over that time period, and the extra latency
1129 * of a needlessly non-expedited grace period is similarly negligible.
1131 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1133 unsigned long curseq;
1134 unsigned long flags;
1135 struct srcu_data *sdp;
1137 unsigned long tlast;
1139 check_init_srcu_struct(ssp);
1140 /* If the local srcu_data structure has callbacks, not idle. */
1141 sdp = raw_cpu_ptr(ssp->sda);
1142 spin_lock_irqsave_rcu_node(sdp, flags);
1143 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1144 spin_unlock_irqrestore_rcu_node(sdp, flags);
1145 return false; /* Callbacks already present, so not idle. */
1147 spin_unlock_irqrestore_rcu_node(sdp, flags);
1150 * No local callbacks, so probabilistically probe global state.
1151 * Exact information would require acquiring locks, which would
1152 * kill scalability, hence the probabilistic nature of the probe.
1155 /* First, see if enough time has passed since the last GP. */
1156 t = ktime_get_mono_fast_ns();
1157 tlast = READ_ONCE(ssp->srcu_last_gp_end);
1158 if (exp_holdoff == 0 ||
1159 time_in_range_open(t, tlast, tlast + exp_holdoff))
1160 return false; /* Too soon after last GP. */
1162 /* Next, check for probable idleness. */
1163 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1164 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
1165 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1166 return false; /* Grace period in progress, so not idle. */
1167 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
1168 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1169 return false; /* GP # changed, so not idle. */
1170 return true; /* With reasonable probability, idle! */
1174 * SRCU callback function to leak a callback.
1176 static void srcu_leak_callback(struct rcu_head *rhp)
1181 * Start an SRCU grace period, and also queue the callback if non-NULL.
1183 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1184 struct rcu_head *rhp, bool do_norm)
1186 unsigned long flags;
1188 bool needexp = false;
1189 bool needgp = false;
1191 struct srcu_data *sdp;
1192 struct srcu_node *sdp_mynode;
1195 check_init_srcu_struct(ssp);
1197 * While starting a new grace period, make sure we are in an
1198 * SRCU read-side critical section so that the grace-period
1199 * sequence number cannot wrap around in the meantime.
1201 idx = __srcu_read_lock_nmisafe(ssp);
1202 ss_state = smp_load_acquire(&ssp->srcu_size_state);
1203 if (ss_state < SRCU_SIZE_WAIT_CALL)
1204 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1206 sdp = raw_cpu_ptr(ssp->sda);
1207 spin_lock_irqsave_sdp_contention(sdp, &flags);
1209 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1210 rcu_segcblist_advance(&sdp->srcu_cblist,
1211 rcu_seq_current(&ssp->srcu_gp_seq));
1212 s = rcu_seq_snap(&ssp->srcu_gp_seq);
1213 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1214 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1215 sdp->srcu_gp_seq_needed = s;
1218 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1219 sdp->srcu_gp_seq_needed_exp = s;
1222 spin_unlock_irqrestore_rcu_node(sdp, flags);
1224 /* Ensure that snp node tree is fully initialized before traversing it */
1225 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1228 sdp_mynode = sdp->mynode;
1231 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1233 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1234 __srcu_read_unlock_nmisafe(ssp, idx);
1239 * Enqueue an SRCU callback on the srcu_data structure associated with
1240 * the current CPU and the specified srcu_struct structure, initiating
1241 * grace-period processing if it is not already running.
1243 * Note that all CPUs must agree that the grace period extended beyond
1244 * all pre-existing SRCU read-side critical section. On systems with
1245 * more than one CPU, this means that when "func()" is invoked, each CPU
1246 * is guaranteed to have executed a full memory barrier since the end of
1247 * its last corresponding SRCU read-side critical section whose beginning
1248 * preceded the call to call_srcu(). It also means that each CPU executing
1249 * an SRCU read-side critical section that continues beyond the start of
1250 * "func()" must have executed a memory barrier after the call_srcu()
1251 * but before the beginning of that SRCU read-side critical section.
1252 * Note that these guarantees include CPUs that are offline, idle, or
1253 * executing in user mode, as well as CPUs that are executing in the kernel.
1255 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
1256 * resulting SRCU callback function "func()", then both CPU A and CPU
1257 * B are guaranteed to execute a full memory barrier during the time
1258 * interval between the call to call_srcu() and the invocation of "func()".
1259 * This guarantee applies even if CPU A and CPU B are the same CPU (but
1260 * again only if the system has more than one CPU).
1262 * Of course, these guarantees apply only for invocations of call_srcu(),
1263 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
1264 * srcu_struct structure.
1266 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1267 rcu_callback_t func, bool do_norm)
1269 if (debug_rcu_head_queue(rhp)) {
1270 /* Probable double call_srcu(), so leak the callback. */
1271 WRITE_ONCE(rhp->func, srcu_leak_callback);
1272 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1276 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1280 * call_srcu() - Queue a callback for invocation after an SRCU grace period
1281 * @ssp: srcu_struct in queue the callback
1282 * @rhp: structure to be used for queueing the SRCU callback.
1283 * @func: function to be invoked after the SRCU grace period
1285 * The callback function will be invoked some time after a full SRCU
1286 * grace period elapses, in other words after all pre-existing SRCU
1287 * read-side critical sections have completed. However, the callback
1288 * function might well execute concurrently with other SRCU read-side
1289 * critical sections that started after call_srcu() was invoked. SRCU
1290 * read-side critical sections are delimited by srcu_read_lock() and
1291 * srcu_read_unlock(), and may be nested.
1293 * The callback will be invoked from process context, but must nevertheless
1294 * be fast and must not block.
1296 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1297 rcu_callback_t func)
1299 __call_srcu(ssp, rhp, func, true);
1301 EXPORT_SYMBOL_GPL(call_srcu);
1304 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
1306 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1308 struct rcu_synchronize rcu;
1310 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1311 lock_is_held(&rcu_bh_lock_map) ||
1312 lock_is_held(&rcu_lock_map) ||
1313 lock_is_held(&rcu_sched_lock_map),
1314 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1316 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1319 check_init_srcu_struct(ssp);
1320 init_completion(&rcu.completion);
1321 init_rcu_head_on_stack(&rcu.head);
1322 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1323 wait_for_completion(&rcu.completion);
1324 destroy_rcu_head_on_stack(&rcu.head);
1327 * Make sure that later code is ordered after the SRCU grace
1328 * period. This pairs with the spin_lock_irq_rcu_node()
1329 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
1330 * because the current CPU might have been totally uninvolved with
1331 * (and thus unordered against) that grace period.
1337 * synchronize_srcu_expedited - Brute-force SRCU grace period
1338 * @ssp: srcu_struct with which to synchronize.
1340 * Wait for an SRCU grace period to elapse, but be more aggressive about
1341 * spinning rather than blocking when waiting.
1343 * Note that synchronize_srcu_expedited() has the same deadlock and
1344 * memory-ordering properties as does synchronize_srcu().
1346 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1348 __synchronize_srcu(ssp, rcu_gp_is_normal());
1350 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1353 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
1354 * @ssp: srcu_struct with which to synchronize.
1356 * Wait for the count to drain to zero of both indexes. To avoid the
1357 * possible starvation of synchronize_srcu(), it waits for the count of
1358 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
1359 * and then flip the srcu_idx and wait for the count of the other index.
1361 * Can block; must be called from process context.
1363 * Note that it is illegal to call synchronize_srcu() from the corresponding
1364 * SRCU read-side critical section; doing so will result in deadlock.
1365 * However, it is perfectly legal to call synchronize_srcu() on one
1366 * srcu_struct from some other srcu_struct's read-side critical section,
1367 * as long as the resulting graph of srcu_structs is acyclic.
1369 * There are memory-ordering constraints implied by synchronize_srcu().
1370 * On systems with more than one CPU, when synchronize_srcu() returns,
1371 * each CPU is guaranteed to have executed a full memory barrier since
1372 * the end of its last corresponding SRCU read-side critical section
1373 * whose beginning preceded the call to synchronize_srcu(). In addition,
1374 * each CPU having an SRCU read-side critical section that extends beyond
1375 * the return from synchronize_srcu() is guaranteed to have executed a
1376 * full memory barrier after the beginning of synchronize_srcu() and before
1377 * the beginning of that SRCU read-side critical section. Note that these
1378 * guarantees include CPUs that are offline, idle, or executing in user mode,
1379 * as well as CPUs that are executing in the kernel.
1381 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
1382 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
1383 * to have executed a full memory barrier during the execution of
1384 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
1385 * are the same CPU, but again only if the system has more than one CPU.
1387 * Of course, these memory-ordering guarantees apply only when
1388 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1389 * passed the same srcu_struct structure.
1391 * Implementation of these memory-ordering guarantees is similar to
1392 * that of synchronize_rcu().
1394 * If SRCU is likely idle, expedite the first request. This semantic
1395 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1396 * SRCU must also provide it. Note that detecting idleness is heuristic
1397 * and subject to both false positives and negatives.
1399 void synchronize_srcu(struct srcu_struct *ssp)
1401 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1402 synchronize_srcu_expedited(ssp);
1404 __synchronize_srcu(ssp, true);
1406 EXPORT_SYMBOL_GPL(synchronize_srcu);
1409 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1410 * @ssp: srcu_struct to provide cookie for.
1412 * This function returns a cookie that can be passed to
1413 * poll_state_synchronize_srcu(), which will return true if a full grace
1414 * period has elapsed in the meantime. It is the caller's responsibility
1415 * to make sure that grace period happens, for example, by invoking
1416 * call_srcu() after return from get_state_synchronize_srcu().
1418 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1420 // Any prior manipulation of SRCU-protected data must happen
1421 // before the load from ->srcu_gp_seq.
1423 return rcu_seq_snap(&ssp->srcu_gp_seq);
1425 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1428 * start_poll_synchronize_srcu - Provide cookie and start grace period
1429 * @ssp: srcu_struct to provide cookie for.
1431 * This function returns a cookie that can be passed to
1432 * poll_state_synchronize_srcu(), which will return true if a full grace
1433 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1434 * this function also ensures that any needed SRCU grace period will be
1435 * started. This convenience does come at a cost in terms of CPU overhead.
1437 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1439 return srcu_gp_start_if_needed(ssp, NULL, true);
1441 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1444 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1445 * @ssp: srcu_struct to provide cookie for.
1446 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1448 * This function takes the cookie that was returned from either
1449 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1450 * returns @true if an SRCU grace period elapsed since the time that the
1451 * cookie was created.
1453 * Because cookies are finite in size, wrapping/overflow is possible.
1454 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1455 * where in theory wrapping could happen in about 14 hours assuming
1456 * 25-microsecond expedited SRCU grace periods. However, a more likely
1457 * overflow lower bound is on the order of 24 days in the case of
1458 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1459 * system requires geologic timespans, as in more than seven million years
1460 * even for expedited SRCU grace periods.
1462 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1463 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1464 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1465 * few minutes. If this proves to be a problem, this counter will be
1466 * expanded to the same size as for Tree SRCU.
1468 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1470 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1472 // Ensure that the end of the SRCU grace period happens before
1473 // any subsequent code that the caller might execute.
1477 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1480 * Callback function for srcu_barrier() use.
1482 static void srcu_barrier_cb(struct rcu_head *rhp)
1484 struct srcu_data *sdp;
1485 struct srcu_struct *ssp;
1487 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1489 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1490 complete(&ssp->srcu_barrier_completion);
1494 * Enqueue an srcu_barrier() callback on the specified srcu_data
1495 * structure's ->cblist. but only if that ->cblist already has at least one
1496 * callback enqueued. Note that if a CPU already has callbacks enqueue,
1497 * it must have already registered the need for a future grace period,
1498 * so all we need do is enqueue a callback that will use the same grace
1499 * period as the last callback already in the queue.
1501 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1503 spin_lock_irq_rcu_node(sdp);
1504 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1505 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1506 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1507 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1508 &sdp->srcu_barrier_head)) {
1509 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1510 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1512 spin_unlock_irq_rcu_node(sdp);
1516 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1517 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1519 void srcu_barrier(struct srcu_struct *ssp)
1523 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1525 check_init_srcu_struct(ssp);
1526 mutex_lock(&ssp->srcu_barrier_mutex);
1527 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1528 smp_mb(); /* Force ordering following return. */
1529 mutex_unlock(&ssp->srcu_barrier_mutex);
1530 return; /* Someone else did our work for us. */
1532 rcu_seq_start(&ssp->srcu_barrier_seq);
1533 init_completion(&ssp->srcu_barrier_completion);
1535 /* Initial count prevents reaching zero until all CBs are posted. */
1536 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1538 idx = __srcu_read_lock_nmisafe(ssp);
1539 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1540 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1542 for_each_possible_cpu(cpu)
1543 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1544 __srcu_read_unlock_nmisafe(ssp, idx);
1546 /* Remove the initial count, at which point reaching zero can happen. */
1547 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1548 complete(&ssp->srcu_barrier_completion);
1549 wait_for_completion(&ssp->srcu_barrier_completion);
1551 rcu_seq_end(&ssp->srcu_barrier_seq);
1552 mutex_unlock(&ssp->srcu_barrier_mutex);
1554 EXPORT_SYMBOL_GPL(srcu_barrier);
1557 * srcu_batches_completed - return batches completed.
1558 * @ssp: srcu_struct on which to report batch completion.
1560 * Report the number of batches, correlated with, but not necessarily
1561 * precisely the same as, the number of grace periods that have elapsed.
1563 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1565 return READ_ONCE(ssp->srcu_idx);
1567 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1570 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1571 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1572 * completed in that state.
1574 static void srcu_advance_state(struct srcu_struct *ssp)
1578 mutex_lock(&ssp->srcu_gp_mutex);
1581 * Because readers might be delayed for an extended period after
1582 * fetching ->srcu_idx for their index, at any point in time there
1583 * might well be readers using both idx=0 and idx=1. We therefore
1584 * need to wait for readers to clear from both index values before
1585 * invoking a callback.
1587 * The load-acquire ensures that we see the accesses performed
1588 * by the prior grace period.
1590 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1591 if (idx == SRCU_STATE_IDLE) {
1592 spin_lock_irq_rcu_node(ssp);
1593 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1594 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1595 spin_unlock_irq_rcu_node(ssp);
1596 mutex_unlock(&ssp->srcu_gp_mutex);
1599 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1600 if (idx == SRCU_STATE_IDLE)
1602 spin_unlock_irq_rcu_node(ssp);
1603 if (idx != SRCU_STATE_IDLE) {
1604 mutex_unlock(&ssp->srcu_gp_mutex);
1605 return; /* Someone else started the grace period. */
1609 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1610 idx = 1 ^ (ssp->srcu_idx & 1);
1611 if (!try_check_zero(ssp, idx, 1)) {
1612 mutex_unlock(&ssp->srcu_gp_mutex);
1613 return; /* readers present, retry later. */
1616 spin_lock_irq_rcu_node(ssp);
1617 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1618 ssp->srcu_n_exp_nodelay = 0;
1619 spin_unlock_irq_rcu_node(ssp);
1622 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1625 * SRCU read-side critical sections are normally short,
1626 * so check at least twice in quick succession after a flip.
1628 idx = 1 ^ (ssp->srcu_idx & 1);
1629 if (!try_check_zero(ssp, idx, 2)) {
1630 mutex_unlock(&ssp->srcu_gp_mutex);
1631 return; /* readers present, retry later. */
1633 ssp->srcu_n_exp_nodelay = 0;
1634 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1639 * Invoke a limited number of SRCU callbacks that have passed through
1640 * their grace period. If there are more to do, SRCU will reschedule
1641 * the workqueue. Note that needed memory barriers have been executed
1642 * in this task's context by srcu_readers_active_idx_check().
1644 static void srcu_invoke_callbacks(struct work_struct *work)
1648 struct rcu_cblist ready_cbs;
1649 struct rcu_head *rhp;
1650 struct srcu_data *sdp;
1651 struct srcu_struct *ssp;
1653 sdp = container_of(work, struct srcu_data, work);
1656 rcu_cblist_init(&ready_cbs);
1657 spin_lock_irq_rcu_node(sdp);
1658 rcu_segcblist_advance(&sdp->srcu_cblist,
1659 rcu_seq_current(&ssp->srcu_gp_seq));
1660 if (sdp->srcu_cblist_invoking ||
1661 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1662 spin_unlock_irq_rcu_node(sdp);
1663 return; /* Someone else on the job or nothing to do. */
1666 /* We are on the job! Extract and invoke ready callbacks. */
1667 sdp->srcu_cblist_invoking = true;
1668 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1669 len = ready_cbs.len;
1670 spin_unlock_irq_rcu_node(sdp);
1671 rhp = rcu_cblist_dequeue(&ready_cbs);
1672 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1673 debug_rcu_head_unqueue(rhp);
1678 WARN_ON_ONCE(ready_cbs.len);
1681 * Update counts, accelerate new callbacks, and if needed,
1682 * schedule another round of callback invocation.
1684 spin_lock_irq_rcu_node(sdp);
1685 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1686 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1687 rcu_seq_snap(&ssp->srcu_gp_seq));
1688 sdp->srcu_cblist_invoking = false;
1689 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1690 spin_unlock_irq_rcu_node(sdp);
1692 srcu_schedule_cbs_sdp(sdp, 0);
1696 * Finished one round of SRCU grace period. Start another if there are
1697 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1699 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1703 spin_lock_irq_rcu_node(ssp);
1704 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1705 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1706 /* All requests fulfilled, time to go idle. */
1709 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1710 /* Outstanding request and no GP. Start one. */
1713 spin_unlock_irq_rcu_node(ssp);
1716 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1720 * This is the work-queue function that handles SRCU grace periods.
1722 static void process_srcu(struct work_struct *work)
1724 unsigned long curdelay;
1726 struct srcu_struct *ssp;
1728 ssp = container_of(work, struct srcu_struct, work.work);
1730 srcu_advance_state(ssp);
1731 curdelay = srcu_get_delay(ssp);
1733 WRITE_ONCE(ssp->reschedule_count, 0);
1736 if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1737 WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1738 if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1741 WRITE_ONCE(ssp->reschedule_count, 1);
1742 WRITE_ONCE(ssp->reschedule_jiffies, j);
1745 srcu_reschedule(ssp, curdelay);
1748 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1749 struct srcu_struct *ssp, int *flags,
1750 unsigned long *gp_seq)
1752 if (test_type != SRCU_FLAVOR)
1755 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1757 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1759 static const char * const srcu_size_state_name[] = {
1762 "SRCU_SIZE_WAIT_BARRIER",
1763 "SRCU_SIZE_WAIT_CALL",
1764 "SRCU_SIZE_WAIT_CBS1",
1765 "SRCU_SIZE_WAIT_CBS2",
1766 "SRCU_SIZE_WAIT_CBS3",
1767 "SRCU_SIZE_WAIT_CBS4",
1772 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1776 unsigned long s0 = 0, s1 = 0;
1777 int ss_state = READ_ONCE(ssp->srcu_size_state);
1778 int ss_state_idx = ss_state;
1780 idx = ssp->srcu_idx & 0x1;
1781 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1782 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1783 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1784 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1785 srcu_size_state_name[ss_state_idx]);
1787 // Called after cleanup_srcu_struct(), perhaps.
1788 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1790 pr_cont(" per-CPU(idx=%d):", idx);
1791 for_each_possible_cpu(cpu) {
1792 unsigned long l0, l1;
1793 unsigned long u0, u1;
1795 struct srcu_data *sdp;
1797 sdp = per_cpu_ptr(ssp->sda, cpu);
1798 u0 = data_race(atomic_long_read(&sdp->srcu_unlock_count[!idx]));
1799 u1 = data_race(atomic_long_read(&sdp->srcu_unlock_count[idx]));
1802 * Make sure that a lock is always counted if the corresponding
1803 * unlock is counted.
1807 l0 = data_race(atomic_long_read(&sdp->srcu_lock_count[!idx]));
1808 l1 = data_race(atomic_long_read(&sdp->srcu_lock_count[idx]));
1812 pr_cont(" %d(%ld,%ld %c)",
1814 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1818 pr_cont(" T(%ld,%ld)\n", s0, s1);
1820 if (SRCU_SIZING_IS_TORTURE())
1821 srcu_transition_to_big(ssp);
1823 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1825 static int __init srcu_bootup_announce(void)
1827 pr_info("Hierarchical SRCU implementation.\n");
1828 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1829 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1830 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1831 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1832 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1833 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1834 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1837 early_initcall(srcu_bootup_announce);
1839 void __init srcu_init(void)
1841 struct srcu_struct *ssp;
1843 /* Decide on srcu_struct-size strategy. */
1844 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1845 if (nr_cpu_ids >= big_cpu_lim) {
1846 convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
1847 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1849 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1850 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1855 * Once that is set, call_srcu() can follow the normal path and
1856 * queue delayed work. This must follow RCU workqueues creation
1857 * and timers initialization.
1859 srcu_init_done = true;
1860 while (!list_empty(&srcu_boot_list)) {
1861 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1863 list_del_init(&ssp->work.work.entry);
1864 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1865 ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1866 queue_work(rcu_gp_wq, &ssp->work.work);
1870 #ifdef CONFIG_MODULES
1872 /* Initialize any global-scope srcu_struct structures used by this module. */
1873 static int srcu_module_coming(struct module *mod)
1876 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1879 for (i = 0; i < mod->num_srcu_structs; i++) {
1880 ret = init_srcu_struct(*(sspp++));
1881 if (WARN_ON_ONCE(ret))
1887 /* Clean up any global-scope srcu_struct structures used by this module. */
1888 static void srcu_module_going(struct module *mod)
1891 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1893 for (i = 0; i < mod->num_srcu_structs; i++)
1894 cleanup_srcu_struct(*(sspp++));
1897 /* Handle one module, either coming or going. */
1898 static int srcu_module_notify(struct notifier_block *self,
1899 unsigned long val, void *data)
1901 struct module *mod = data;
1905 case MODULE_STATE_COMING:
1906 ret = srcu_module_coming(mod);
1908 case MODULE_STATE_GOING:
1909 srcu_module_going(mod);
1917 static struct notifier_block srcu_module_nb = {
1918 .notifier_call = srcu_module_notify,
1922 static __init int init_srcu_module_notifier(void)
1926 ret = register_module_notifier(&srcu_module_nb);
1928 pr_warn("Failed to register srcu module notifier\n");
1931 late_initcall(init_srcu_module_notifier);
1933 #endif /* #ifdef CONFIG_MODULES */