1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
16 #define pr_fmt(fmt) "rcu: " fmt
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/srcu.h>
30 #include "rcu_segcblist.h"
32 /* Holdoff in nanoseconds for auto-expediting. */
33 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
34 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
35 module_param(exp_holdoff, ulong, 0444);
37 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
38 static ulong counter_wrap_check = (ULONG_MAX >> 2);
39 module_param(counter_wrap_check, ulong, 0444);
41 /* Early-boot callback-management, so early that no lock is required! */
42 static LIST_HEAD(srcu_boot_list);
43 static bool __read_mostly srcu_init_done;
45 static void srcu_invoke_callbacks(struct work_struct *work);
46 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
47 static void process_srcu(struct work_struct *work);
48 static void srcu_delay_timer(struct timer_list *t);
50 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
51 #define spin_lock_rcu_node(p) \
53 spin_lock(&ACCESS_PRIVATE(p, lock)); \
54 smp_mb__after_unlock_lock(); \
57 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
59 #define spin_lock_irq_rcu_node(p) \
61 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
62 smp_mb__after_unlock_lock(); \
65 #define spin_unlock_irq_rcu_node(p) \
66 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
68 #define spin_lock_irqsave_rcu_node(p, flags) \
70 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
71 smp_mb__after_unlock_lock(); \
74 #define spin_unlock_irqrestore_rcu_node(p, flags) \
75 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
78 * Initialize SRCU combining tree. Note that statically allocated
79 * srcu_struct structures might already have srcu_read_lock() and
80 * srcu_read_unlock() running against them. So if the is_static parameter
81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
83 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
88 int levelspread[RCU_NUM_LVLS];
89 struct srcu_data *sdp;
90 struct srcu_node *snp;
91 struct srcu_node *snp_first;
93 /* Work out the overall tree geometry. */
94 ssp->level[0] = &ssp->node[0];
95 for (i = 1; i < rcu_num_lvls; i++)
96 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
97 rcu_init_levelspread(levelspread, num_rcu_lvl);
99 /* Each pass through this loop initializes one srcu_node structure. */
100 srcu_for_each_node_breadth_first(ssp, snp) {
101 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
102 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
103 ARRAY_SIZE(snp->srcu_data_have_cbs));
104 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
105 snp->srcu_have_cbs[i] = 0;
106 snp->srcu_data_have_cbs[i] = 0;
108 snp->srcu_gp_seq_needed_exp = 0;
111 if (snp == &ssp->node[0]) {
112 /* Root node, special case. */
113 snp->srcu_parent = NULL;
118 if (snp == ssp->level[level + 1])
120 snp->srcu_parent = ssp->level[level - 1] +
121 (snp - ssp->level[level]) /
122 levelspread[level - 1];
126 * Initialize the per-CPU srcu_data array, which feeds into the
127 * leaves of the srcu_node tree.
129 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
130 ARRAY_SIZE(sdp->srcu_unlock_count));
131 level = rcu_num_lvls - 1;
132 snp_first = ssp->level[level];
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 sdp->mynode = &snp_first[cpu / levelspread[level]];
141 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
147 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
148 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
150 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
154 /* Dynamically allocated, better be no srcu_read_locks()! */
155 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
156 sdp->srcu_lock_count[i] = 0;
157 sdp->srcu_unlock_count[i] = 0;
163 * Initialize non-compile-time initialized fields, including the
164 * associated srcu_node and srcu_data structures. The is_static
165 * parameter is passed through to init_srcu_struct_nodes(), and
166 * also tells us that ->sda has already been wired up to srcu_data.
168 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
170 mutex_init(&ssp->srcu_cb_mutex);
171 mutex_init(&ssp->srcu_gp_mutex);
173 ssp->srcu_gp_seq = 0;
174 ssp->srcu_barrier_seq = 0;
175 mutex_init(&ssp->srcu_barrier_mutex);
176 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
177 INIT_DELAYED_WORK(&ssp->work, process_srcu);
179 ssp->sda = alloc_percpu(struct srcu_data);
182 init_srcu_struct_nodes(ssp, is_static);
183 ssp->srcu_gp_seq_needed_exp = 0;
184 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
185 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
189 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
192 struct lock_class_key *key)
194 /* Don't re-initialize a lock while it is held. */
195 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
196 lockdep_init_map(&ssp->dep_map, name, key, 0);
197 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
198 return init_srcu_struct_fields(ssp, false);
200 EXPORT_SYMBOL_GPL(__init_srcu_struct);
202 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
205 * init_srcu_struct - initialize a sleep-RCU structure
206 * @ssp: structure to initialize.
208 * Must invoke this on a given srcu_struct before passing that srcu_struct
209 * to any other function. Each srcu_struct represents a separate domain
210 * of SRCU protection.
212 int init_srcu_struct(struct srcu_struct *ssp)
214 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
215 return init_srcu_struct_fields(ssp, false);
217 EXPORT_SYMBOL_GPL(init_srcu_struct);
219 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
222 * First-use initialization of statically allocated srcu_struct
223 * structure. Wiring up the combining tree is more than can be
224 * done with compile-time initialization, so this check is added
225 * to each update-side SRCU primitive. Use ssp->lock, which -is-
226 * compile-time initialized, to resolve races involving multiple
227 * CPUs trying to garner first-use privileges.
229 static void check_init_srcu_struct(struct srcu_struct *ssp)
233 /* The smp_load_acquire() pairs with the smp_store_release(). */
234 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
235 return; /* Already initialized. */
236 spin_lock_irqsave_rcu_node(ssp, flags);
237 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
238 spin_unlock_irqrestore_rcu_node(ssp, flags);
241 init_srcu_struct_fields(ssp, true);
242 spin_unlock_irqrestore_rcu_node(ssp, flags);
246 * Returns approximate total of the readers' ->srcu_lock_count[] values
247 * for the rank of per-CPU counters specified by idx.
249 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
252 unsigned long sum = 0;
254 for_each_possible_cpu(cpu) {
255 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
257 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
263 * Returns approximate total of the readers' ->srcu_unlock_count[] values
264 * for the rank of per-CPU counters specified by idx.
266 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
269 unsigned long sum = 0;
271 for_each_possible_cpu(cpu) {
272 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
274 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
280 * Return true if the number of pre-existing readers is determined to
283 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
285 unsigned long unlocks;
287 unlocks = srcu_readers_unlock_idx(ssp, idx);
290 * Make sure that a lock is always counted if the corresponding
291 * unlock is counted. Needs to be a smp_mb() as the read side may
292 * contain a read from a variable that is written to before the
293 * synchronize_srcu() in the write side. In this case smp_mb()s
294 * A and B act like the store buffering pattern.
296 * This smp_mb() also pairs with smp_mb() C to prevent accesses
297 * after the synchronize_srcu() from being executed before the
303 * If the locks are the same as the unlocks, then there must have
304 * been no readers on this index at some time in between. This does
305 * not mean that there are no more readers, as one could have read
306 * the current index but not have incremented the lock counter yet.
308 * So suppose that the updater is preempted here for so long
309 * that more than ULONG_MAX non-nested readers come and go in
310 * the meantime. It turns out that this cannot result in overflow
311 * because if a reader modifies its unlock count after we read it
312 * above, then that reader's next load of ->srcu_idx is guaranteed
313 * to get the new value, which will cause it to operate on the
314 * other bank of counters, where it cannot contribute to the
315 * overflow of these counters. This means that there is a maximum
316 * of 2*NR_CPUS increments, which cannot overflow given current
317 * systems, especially not on 64-bit systems.
319 * OK, how about nesting? This does impose a limit on nesting
320 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
321 * especially on 64-bit systems.
323 return srcu_readers_lock_idx(ssp, idx) == unlocks;
327 * srcu_readers_active - returns true if there are readers. and false
329 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
331 * Note that this is not an atomic primitive, and can therefore suffer
332 * severe errors when invoked on an active srcu_struct. That said, it
333 * can be useful as an error check at cleanup time.
335 static bool srcu_readers_active(struct srcu_struct *ssp)
338 unsigned long sum = 0;
340 for_each_possible_cpu(cpu) {
341 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
343 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
344 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
345 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
346 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
351 #define SRCU_INTERVAL 1
354 * Return grace-period delay, zero if there are expedited grace
355 * periods pending, SRCU_INTERVAL otherwise.
357 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
359 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
360 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
362 return SRCU_INTERVAL;
366 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
367 * @ssp: structure to clean up.
369 * Must invoke this after you are finished using a given srcu_struct that
370 * was initialized via init_srcu_struct(), else you leak memory.
372 void cleanup_srcu_struct(struct srcu_struct *ssp)
376 if (WARN_ON(!srcu_get_delay(ssp)))
377 return; /* Just leak it! */
378 if (WARN_ON(srcu_readers_active(ssp)))
379 return; /* Just leak it! */
380 flush_delayed_work(&ssp->work);
381 for_each_possible_cpu(cpu) {
382 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
384 del_timer_sync(&sdp->delay_work);
385 flush_work(&sdp->work);
386 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
387 return; /* Forgot srcu_barrier(), so just leak it! */
389 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
390 WARN_ON(srcu_readers_active(ssp))) {
391 pr_info("%s: Active srcu_struct %p state: %d\n",
392 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
393 return; /* Caller forgot to stop doing call_srcu()? */
395 free_percpu(ssp->sda);
398 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
401 * Counts the new reader in the appropriate per-CPU element of the
403 * Returns an index that must be passed to the matching srcu_read_unlock().
405 int __srcu_read_lock(struct srcu_struct *ssp)
409 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
410 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
411 smp_mb(); /* B */ /* Avoid leaking the critical section. */
414 EXPORT_SYMBOL_GPL(__srcu_read_lock);
417 * Removes the count for the old reader from the appropriate per-CPU
418 * element of the srcu_struct. Note that this may well be a different
419 * CPU than that which was incremented by the corresponding srcu_read_lock().
421 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
423 smp_mb(); /* C */ /* Avoid leaking the critical section. */
424 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
426 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
429 * We use an adaptive strategy for synchronize_srcu() and especially for
430 * synchronize_srcu_expedited(). We spin for a fixed time period
431 * (defined below) to allow SRCU readers to exit their read-side critical
432 * sections. If there are still some readers after a few microseconds,
433 * we repeatedly block for 1-millisecond time periods.
435 #define SRCU_RETRY_CHECK_DELAY 5
438 * Start an SRCU grace period.
440 static void srcu_gp_start(struct srcu_struct *ssp)
442 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
445 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
446 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
447 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
448 rcu_segcblist_advance(&sdp->srcu_cblist,
449 rcu_seq_current(&ssp->srcu_gp_seq));
450 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
451 rcu_seq_snap(&ssp->srcu_gp_seq));
452 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
453 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
454 rcu_seq_start(&ssp->srcu_gp_seq);
455 state = rcu_seq_state(ssp->srcu_gp_seq);
456 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
460 static void srcu_delay_timer(struct timer_list *t)
462 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
464 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
467 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
471 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
475 timer_reduce(&sdp->delay_work, jiffies + delay);
479 * Schedule callback invocation for the specified srcu_data structure,
480 * if possible, on the corresponding CPU.
482 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
484 srcu_queue_delayed_work_on(sdp, delay);
488 * Schedule callback invocation for all srcu_data structures associated
489 * with the specified srcu_node structure that have callbacks for the
490 * just-completed grace period, the one corresponding to idx. If possible,
491 * schedule this invocation on the corresponding CPUs.
493 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
494 unsigned long mask, unsigned long delay)
498 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
499 if (!(mask & (1 << (cpu - snp->grplo))))
501 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
506 * Note the end of an SRCU grace period. Initiates callback invocation
507 * and starts a new grace period if needed.
509 * The ->srcu_cb_mutex acquisition does not protect any data, but
510 * instead prevents more than one grace period from starting while we
511 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
512 * array to have a finite number of elements.
514 static void srcu_gp_end(struct srcu_struct *ssp)
516 unsigned long cbdelay;
524 struct srcu_data *sdp;
525 struct srcu_node *snp;
527 /* Prevent more than one additional grace period. */
528 mutex_lock(&ssp->srcu_cb_mutex);
530 /* End the current grace period. */
531 spin_lock_irq_rcu_node(ssp);
532 idx = rcu_seq_state(ssp->srcu_gp_seq);
533 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
534 cbdelay = srcu_get_delay(ssp);
535 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
536 rcu_seq_end(&ssp->srcu_gp_seq);
537 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
538 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
539 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
540 spin_unlock_irq_rcu_node(ssp);
541 mutex_unlock(&ssp->srcu_gp_mutex);
542 /* A new grace period can start at this point. But only one. */
544 /* Initiate callback invocation as needed. */
545 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
546 srcu_for_each_node_breadth_first(ssp, snp) {
547 spin_lock_irq_rcu_node(snp);
549 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
551 cbs = snp->srcu_have_cbs[idx] == gpseq;
552 snp->srcu_have_cbs[idx] = gpseq;
553 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
554 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
555 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
556 mask = snp->srcu_data_have_cbs[idx];
557 snp->srcu_data_have_cbs[idx] = 0;
558 spin_unlock_irq_rcu_node(snp);
560 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
562 /* Occasionally prevent srcu_data counter wrap. */
563 if (!(gpseq & counter_wrap_check) && last_lvl)
564 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
565 sdp = per_cpu_ptr(ssp->sda, cpu);
566 spin_lock_irqsave_rcu_node(sdp, flags);
567 if (ULONG_CMP_GE(gpseq,
568 sdp->srcu_gp_seq_needed + 100))
569 sdp->srcu_gp_seq_needed = gpseq;
570 if (ULONG_CMP_GE(gpseq,
571 sdp->srcu_gp_seq_needed_exp + 100))
572 sdp->srcu_gp_seq_needed_exp = gpseq;
573 spin_unlock_irqrestore_rcu_node(sdp, flags);
577 /* Callback initiation done, allow grace periods after next. */
578 mutex_unlock(&ssp->srcu_cb_mutex);
580 /* Start a new grace period if needed. */
581 spin_lock_irq_rcu_node(ssp);
582 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
583 if (!rcu_seq_state(gpseq) &&
584 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
586 spin_unlock_irq_rcu_node(ssp);
587 srcu_reschedule(ssp, 0);
589 spin_unlock_irq_rcu_node(ssp);
594 * Funnel-locking scheme to scalably mediate many concurrent expedited
595 * grace-period requests. This function is invoked for the first known
596 * expedited request for a grace period that has already been requested,
597 * but without expediting. To start a completely new grace period,
598 * whether expedited or not, use srcu_funnel_gp_start() instead.
600 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
605 for (; snp != NULL; snp = snp->srcu_parent) {
606 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
607 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
609 spin_lock_irqsave_rcu_node(snp, flags);
610 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
611 spin_unlock_irqrestore_rcu_node(snp, flags);
614 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
615 spin_unlock_irqrestore_rcu_node(snp, flags);
617 spin_lock_irqsave_rcu_node(ssp, flags);
618 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
619 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
620 spin_unlock_irqrestore_rcu_node(ssp, flags);
624 * Funnel-locking scheme to scalably mediate many concurrent grace-period
625 * requests. The winner has to do the work of actually starting grace
626 * period s. Losers must either ensure that their desired grace-period
627 * number is recorded on at least their leaf srcu_node structure, or they
628 * must take steps to invoke their own callbacks.
630 * Note that this function also does the work of srcu_funnel_exp_start(),
631 * in some cases by directly invoking it.
633 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
634 unsigned long s, bool do_norm)
637 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
638 struct srcu_node *snp = sdp->mynode;
639 unsigned long snp_seq;
641 /* Each pass through the loop does one level of the srcu_node tree. */
642 for (; snp != NULL; snp = snp->srcu_parent) {
643 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
644 return; /* GP already done and CBs recorded. */
645 spin_lock_irqsave_rcu_node(snp, flags);
646 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
647 snp_seq = snp->srcu_have_cbs[idx];
648 if (snp == sdp->mynode && snp_seq == s)
649 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
650 spin_unlock_irqrestore_rcu_node(snp, flags);
651 if (snp == sdp->mynode && snp_seq != s) {
652 srcu_schedule_cbs_sdp(sdp, do_norm
658 srcu_funnel_exp_start(ssp, snp, s);
661 snp->srcu_have_cbs[idx] = s;
662 if (snp == sdp->mynode)
663 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
664 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
665 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
666 spin_unlock_irqrestore_rcu_node(snp, flags);
669 /* Top of tree, must ensure the grace period will be started. */
670 spin_lock_irqsave_rcu_node(ssp, flags);
671 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
673 * Record need for grace period s. Pair with load
674 * acquire setting up for initialization.
676 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
678 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
679 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
681 /* If grace period not already done and none in progress, start it. */
682 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
683 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
684 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
686 if (likely(srcu_init_done))
687 queue_delayed_work(rcu_gp_wq, &ssp->work,
688 srcu_get_delay(ssp));
689 else if (list_empty(&ssp->work.work.entry))
690 list_add(&ssp->work.work.entry, &srcu_boot_list);
692 spin_unlock_irqrestore_rcu_node(ssp, flags);
696 * Wait until all readers counted by array index idx complete, but
697 * loop an additional time if there is an expedited grace period pending.
698 * The caller must ensure that ->srcu_idx is not changed while checking.
700 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
703 if (srcu_readers_active_idx_check(ssp, idx))
705 if (--trycount + !srcu_get_delay(ssp) <= 0)
707 udelay(SRCU_RETRY_CHECK_DELAY);
712 * Increment the ->srcu_idx counter so that future SRCU readers will
713 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
714 * us to wait for pre-existing readers in a starvation-free manner.
716 static void srcu_flip(struct srcu_struct *ssp)
719 * Ensure that if this updater saw a given reader's increment
720 * from __srcu_read_lock(), that reader was using an old value
721 * of ->srcu_idx. Also ensure that if a given reader sees the
722 * new value of ->srcu_idx, this updater's earlier scans cannot
723 * have seen that reader's increments (which is OK, because this
724 * grace period need not wait on that reader).
726 smp_mb(); /* E */ /* Pairs with B and C. */
728 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
731 * Ensure that if the updater misses an __srcu_read_unlock()
732 * increment, that task's next __srcu_read_lock() will see the
733 * above counter update. Note that both this memory barrier
734 * and the one in srcu_readers_active_idx_check() provide the
735 * guarantee for __srcu_read_lock().
737 smp_mb(); /* D */ /* Pairs with C. */
741 * If SRCU is likely idle, return true, otherwise return false.
743 * Note that it is OK for several current from-idle requests for a new
744 * grace period from idle to specify expediting because they will all end
745 * up requesting the same grace period anyhow. So no loss.
747 * Note also that if any CPU (including the current one) is still invoking
748 * callbacks, this function will nevertheless say "idle". This is not
749 * ideal, but the overhead of checking all CPUs' callback lists is even
750 * less ideal, especially on large systems. Furthermore, the wakeup
751 * can happen before the callback is fully removed, so we have no choice
752 * but to accept this type of error.
754 * This function is also subject to counter-wrap errors, but let's face
755 * it, if this function was preempted for enough time for the counters
756 * to wrap, it really doesn't matter whether or not we expedite the grace
757 * period. The extra overhead of a needlessly expedited grace period is
758 * negligible when amortized over that time period, and the extra latency
759 * of a needlessly non-expedited grace period is similarly negligible.
761 static bool srcu_might_be_idle(struct srcu_struct *ssp)
763 unsigned long curseq;
765 struct srcu_data *sdp;
769 check_init_srcu_struct(ssp);
770 /* If the local srcu_data structure has callbacks, not idle. */
771 sdp = raw_cpu_ptr(ssp->sda);
772 spin_lock_irqsave_rcu_node(sdp, flags);
773 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
774 spin_unlock_irqrestore_rcu_node(sdp, flags);
775 return false; /* Callbacks already present, so not idle. */
777 spin_unlock_irqrestore_rcu_node(sdp, flags);
780 * No local callbacks, so probabalistically probe global state.
781 * Exact information would require acquiring locks, which would
782 * kill scalability, hence the probabalistic nature of the probe.
785 /* First, see if enough time has passed since the last GP. */
786 t = ktime_get_mono_fast_ns();
787 tlast = READ_ONCE(ssp->srcu_last_gp_end);
788 if (exp_holdoff == 0 ||
789 time_in_range_open(t, tlast, tlast + exp_holdoff))
790 return false; /* Too soon after last GP. */
792 /* Next, check for probable idleness. */
793 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
794 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
795 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
796 return false; /* Grace period in progress, so not idle. */
797 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
798 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
799 return false; /* GP # changed, so not idle. */
800 return true; /* With reasonable probability, idle! */
804 * SRCU callback function to leak a callback.
806 static void srcu_leak_callback(struct rcu_head *rhp)
811 * Enqueue an SRCU callback on the srcu_data structure associated with
812 * the current CPU and the specified srcu_struct structure, initiating
813 * grace-period processing if it is not already running.
815 * Note that all CPUs must agree that the grace period extended beyond
816 * all pre-existing SRCU read-side critical section. On systems with
817 * more than one CPU, this means that when "func()" is invoked, each CPU
818 * is guaranteed to have executed a full memory barrier since the end of
819 * its last corresponding SRCU read-side critical section whose beginning
820 * preceded the call to call_srcu(). It also means that each CPU executing
821 * an SRCU read-side critical section that continues beyond the start of
822 * "func()" must have executed a memory barrier after the call_srcu()
823 * but before the beginning of that SRCU read-side critical section.
824 * Note that these guarantees include CPUs that are offline, idle, or
825 * executing in user mode, as well as CPUs that are executing in the kernel.
827 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
828 * resulting SRCU callback function "func()", then both CPU A and CPU
829 * B are guaranteed to execute a full memory barrier during the time
830 * interval between the call to call_srcu() and the invocation of "func()".
831 * This guarantee applies even if CPU A and CPU B are the same CPU (but
832 * again only if the system has more than one CPU).
834 * Of course, these guarantees apply only for invocations of call_srcu(),
835 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
836 * srcu_struct structure.
838 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
839 rcu_callback_t func, bool do_norm)
843 bool needexp = false;
846 struct srcu_data *sdp;
848 check_init_srcu_struct(ssp);
849 if (debug_rcu_head_queue(rhp)) {
850 /* Probable double call_srcu(), so leak the callback. */
851 WRITE_ONCE(rhp->func, srcu_leak_callback);
852 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
856 idx = srcu_read_lock(ssp);
857 sdp = raw_cpu_ptr(ssp->sda);
858 spin_lock_irqsave_rcu_node(sdp, flags);
859 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
860 rcu_segcblist_advance(&sdp->srcu_cblist,
861 rcu_seq_current(&ssp->srcu_gp_seq));
862 s = rcu_seq_snap(&ssp->srcu_gp_seq);
863 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
864 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
865 sdp->srcu_gp_seq_needed = s;
868 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
869 sdp->srcu_gp_seq_needed_exp = s;
872 spin_unlock_irqrestore_rcu_node(sdp, flags);
874 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
876 srcu_funnel_exp_start(ssp, sdp->mynode, s);
877 srcu_read_unlock(ssp, idx);
881 * call_srcu() - Queue a callback for invocation after an SRCU grace period
882 * @ssp: srcu_struct in queue the callback
883 * @rhp: structure to be used for queueing the SRCU callback.
884 * @func: function to be invoked after the SRCU grace period
886 * The callback function will be invoked some time after a full SRCU
887 * grace period elapses, in other words after all pre-existing SRCU
888 * read-side critical sections have completed. However, the callback
889 * function might well execute concurrently with other SRCU read-side
890 * critical sections that started after call_srcu() was invoked. SRCU
891 * read-side critical sections are delimited by srcu_read_lock() and
892 * srcu_read_unlock(), and may be nested.
894 * The callback will be invoked from process context, but must nevertheless
895 * be fast and must not block.
897 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
900 __call_srcu(ssp, rhp, func, true);
902 EXPORT_SYMBOL_GPL(call_srcu);
905 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
907 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
909 struct rcu_synchronize rcu;
911 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
912 lock_is_held(&rcu_bh_lock_map) ||
913 lock_is_held(&rcu_lock_map) ||
914 lock_is_held(&rcu_sched_lock_map),
915 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
917 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
920 check_init_srcu_struct(ssp);
921 init_completion(&rcu.completion);
922 init_rcu_head_on_stack(&rcu.head);
923 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
924 wait_for_completion(&rcu.completion);
925 destroy_rcu_head_on_stack(&rcu.head);
928 * Make sure that later code is ordered after the SRCU grace
929 * period. This pairs with the spin_lock_irq_rcu_node()
930 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
931 * because the current CPU might have been totally uninvolved with
932 * (and thus unordered against) that grace period.
938 * synchronize_srcu_expedited - Brute-force SRCU grace period
939 * @ssp: srcu_struct with which to synchronize.
941 * Wait for an SRCU grace period to elapse, but be more aggressive about
942 * spinning rather than blocking when waiting.
944 * Note that synchronize_srcu_expedited() has the same deadlock and
945 * memory-ordering properties as does synchronize_srcu().
947 void synchronize_srcu_expedited(struct srcu_struct *ssp)
949 __synchronize_srcu(ssp, rcu_gp_is_normal());
951 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
954 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
955 * @ssp: srcu_struct with which to synchronize.
957 * Wait for the count to drain to zero of both indexes. To avoid the
958 * possible starvation of synchronize_srcu(), it waits for the count of
959 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
960 * and then flip the srcu_idx and wait for the count of the other index.
962 * Can block; must be called from process context.
964 * Note that it is illegal to call synchronize_srcu() from the corresponding
965 * SRCU read-side critical section; doing so will result in deadlock.
966 * However, it is perfectly legal to call synchronize_srcu() on one
967 * srcu_struct from some other srcu_struct's read-side critical section,
968 * as long as the resulting graph of srcu_structs is acyclic.
970 * There are memory-ordering constraints implied by synchronize_srcu().
971 * On systems with more than one CPU, when synchronize_srcu() returns,
972 * each CPU is guaranteed to have executed a full memory barrier since
973 * the end of its last corresponding SRCU read-side critical section
974 * whose beginning preceded the call to synchronize_srcu(). In addition,
975 * each CPU having an SRCU read-side critical section that extends beyond
976 * the return from synchronize_srcu() is guaranteed to have executed a
977 * full memory barrier after the beginning of synchronize_srcu() and before
978 * the beginning of that SRCU read-side critical section. Note that these
979 * guarantees include CPUs that are offline, idle, or executing in user mode,
980 * as well as CPUs that are executing in the kernel.
982 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
983 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
984 * to have executed a full memory barrier during the execution of
985 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
986 * are the same CPU, but again only if the system has more than one CPU.
988 * Of course, these memory-ordering guarantees apply only when
989 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
990 * passed the same srcu_struct structure.
992 * If SRCU is likely idle, expedite the first request. This semantic
993 * was provided by Classic SRCU, and is relied upon by its users, so TREE
994 * SRCU must also provide it. Note that detecting idleness is heuristic
995 * and subject to both false positives and negatives.
997 void synchronize_srcu(struct srcu_struct *ssp)
999 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1000 synchronize_srcu_expedited(ssp);
1002 __synchronize_srcu(ssp, true);
1004 EXPORT_SYMBOL_GPL(synchronize_srcu);
1007 * Callback function for srcu_barrier() use.
1009 static void srcu_barrier_cb(struct rcu_head *rhp)
1011 struct srcu_data *sdp;
1012 struct srcu_struct *ssp;
1014 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1016 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1017 complete(&ssp->srcu_barrier_completion);
1021 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1022 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1024 void srcu_barrier(struct srcu_struct *ssp)
1027 struct srcu_data *sdp;
1028 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1030 check_init_srcu_struct(ssp);
1031 mutex_lock(&ssp->srcu_barrier_mutex);
1032 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1033 smp_mb(); /* Force ordering following return. */
1034 mutex_unlock(&ssp->srcu_barrier_mutex);
1035 return; /* Someone else did our work for us. */
1037 rcu_seq_start(&ssp->srcu_barrier_seq);
1038 init_completion(&ssp->srcu_barrier_completion);
1040 /* Initial count prevents reaching zero until all CBs are posted. */
1041 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1044 * Each pass through this loop enqueues a callback, but only
1045 * on CPUs already having callbacks enqueued. Note that if
1046 * a CPU already has callbacks enqueue, it must have already
1047 * registered the need for a future grace period, so all we
1048 * need do is enqueue a callback that will use the same
1049 * grace period as the last callback already in the queue.
1051 for_each_possible_cpu(cpu) {
1052 sdp = per_cpu_ptr(ssp->sda, cpu);
1053 spin_lock_irq_rcu_node(sdp);
1054 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1055 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1056 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1057 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1058 &sdp->srcu_barrier_head)) {
1059 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1060 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1062 spin_unlock_irq_rcu_node(sdp);
1065 /* Remove the initial count, at which point reaching zero can happen. */
1066 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1067 complete(&ssp->srcu_barrier_completion);
1068 wait_for_completion(&ssp->srcu_barrier_completion);
1070 rcu_seq_end(&ssp->srcu_barrier_seq);
1071 mutex_unlock(&ssp->srcu_barrier_mutex);
1073 EXPORT_SYMBOL_GPL(srcu_barrier);
1076 * srcu_batches_completed - return batches completed.
1077 * @ssp: srcu_struct on which to report batch completion.
1079 * Report the number of batches, correlated with, but not necessarily
1080 * precisely the same as, the number of grace periods that have elapsed.
1082 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1084 return READ_ONCE(ssp->srcu_idx);
1086 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1089 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1090 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1091 * completed in that state.
1093 static void srcu_advance_state(struct srcu_struct *ssp)
1097 mutex_lock(&ssp->srcu_gp_mutex);
1100 * Because readers might be delayed for an extended period after
1101 * fetching ->srcu_idx for their index, at any point in time there
1102 * might well be readers using both idx=0 and idx=1. We therefore
1103 * need to wait for readers to clear from both index values before
1104 * invoking a callback.
1106 * The load-acquire ensures that we see the accesses performed
1107 * by the prior grace period.
1109 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1110 if (idx == SRCU_STATE_IDLE) {
1111 spin_lock_irq_rcu_node(ssp);
1112 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1113 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1114 spin_unlock_irq_rcu_node(ssp);
1115 mutex_unlock(&ssp->srcu_gp_mutex);
1118 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1119 if (idx == SRCU_STATE_IDLE)
1121 spin_unlock_irq_rcu_node(ssp);
1122 if (idx != SRCU_STATE_IDLE) {
1123 mutex_unlock(&ssp->srcu_gp_mutex);
1124 return; /* Someone else started the grace period. */
1128 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1129 idx = 1 ^ (ssp->srcu_idx & 1);
1130 if (!try_check_zero(ssp, idx, 1)) {
1131 mutex_unlock(&ssp->srcu_gp_mutex);
1132 return; /* readers present, retry later. */
1135 spin_lock_irq_rcu_node(ssp);
1136 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1137 spin_unlock_irq_rcu_node(ssp);
1140 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1143 * SRCU read-side critical sections are normally short,
1144 * so check at least twice in quick succession after a flip.
1146 idx = 1 ^ (ssp->srcu_idx & 1);
1147 if (!try_check_zero(ssp, idx, 2)) {
1148 mutex_unlock(&ssp->srcu_gp_mutex);
1149 return; /* readers present, retry later. */
1151 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1156 * Invoke a limited number of SRCU callbacks that have passed through
1157 * their grace period. If there are more to do, SRCU will reschedule
1158 * the workqueue. Note that needed memory barriers have been executed
1159 * in this task's context by srcu_readers_active_idx_check().
1161 static void srcu_invoke_callbacks(struct work_struct *work)
1164 struct rcu_cblist ready_cbs;
1165 struct rcu_head *rhp;
1166 struct srcu_data *sdp;
1167 struct srcu_struct *ssp;
1169 sdp = container_of(work, struct srcu_data, work);
1172 rcu_cblist_init(&ready_cbs);
1173 spin_lock_irq_rcu_node(sdp);
1174 rcu_segcblist_advance(&sdp->srcu_cblist,
1175 rcu_seq_current(&ssp->srcu_gp_seq));
1176 if (sdp->srcu_cblist_invoking ||
1177 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1178 spin_unlock_irq_rcu_node(sdp);
1179 return; /* Someone else on the job or nothing to do. */
1182 /* We are on the job! Extract and invoke ready callbacks. */
1183 sdp->srcu_cblist_invoking = true;
1184 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1185 spin_unlock_irq_rcu_node(sdp);
1186 rhp = rcu_cblist_dequeue(&ready_cbs);
1187 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1188 debug_rcu_head_unqueue(rhp);
1195 * Update counts, accelerate new callbacks, and if needed,
1196 * schedule another round of callback invocation.
1198 spin_lock_irq_rcu_node(sdp);
1199 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1200 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1201 rcu_seq_snap(&ssp->srcu_gp_seq));
1202 sdp->srcu_cblist_invoking = false;
1203 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1204 spin_unlock_irq_rcu_node(sdp);
1206 srcu_schedule_cbs_sdp(sdp, 0);
1210 * Finished one round of SRCU grace period. Start another if there are
1211 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1213 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1217 spin_lock_irq_rcu_node(ssp);
1218 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1219 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1220 /* All requests fulfilled, time to go idle. */
1223 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1224 /* Outstanding request and no GP. Start one. */
1227 spin_unlock_irq_rcu_node(ssp);
1230 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1234 * This is the work-queue function that handles SRCU grace periods.
1236 static void process_srcu(struct work_struct *work)
1238 struct srcu_struct *ssp;
1240 ssp = container_of(work, struct srcu_struct, work.work);
1242 srcu_advance_state(ssp);
1243 srcu_reschedule(ssp, srcu_get_delay(ssp));
1246 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1247 struct srcu_struct *ssp, int *flags,
1248 unsigned long *gp_seq)
1250 if (test_type != SRCU_FLAVOR)
1253 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1255 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1257 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1261 unsigned long s0 = 0, s1 = 0;
1263 idx = ssp->srcu_idx & 0x1;
1264 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1265 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1266 for_each_possible_cpu(cpu) {
1267 unsigned long l0, l1;
1268 unsigned long u0, u1;
1270 struct srcu_data *sdp;
1272 sdp = per_cpu_ptr(ssp->sda, cpu);
1273 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1274 u1 = data_race(sdp->srcu_unlock_count[idx]);
1277 * Make sure that a lock is always counted if the corresponding
1278 * unlock is counted.
1282 l0 = data_race(sdp->srcu_lock_count[!idx]);
1283 l1 = data_race(sdp->srcu_lock_count[idx]);
1287 pr_cont(" %d(%ld,%ld %c)",
1289 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1293 pr_cont(" T(%ld,%ld)\n", s0, s1);
1295 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1297 static int __init srcu_bootup_announce(void)
1299 pr_info("Hierarchical SRCU implementation.\n");
1300 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1301 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1304 early_initcall(srcu_bootup_announce);
1306 void __init srcu_init(void)
1308 struct srcu_struct *ssp;
1310 srcu_init_done = true;
1311 while (!list_empty(&srcu_boot_list)) {
1312 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1314 check_init_srcu_struct(ssp);
1315 list_del_init(&ssp->work.work.entry);
1316 queue_work(rcu_gp_wq, &ssp->work.work);
1320 #ifdef CONFIG_MODULES
1322 /* Initialize any global-scope srcu_struct structures used by this module. */
1323 static int srcu_module_coming(struct module *mod)
1326 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1329 for (i = 0; i < mod->num_srcu_structs; i++) {
1330 ret = init_srcu_struct(*(sspp++));
1331 if (WARN_ON_ONCE(ret))
1337 /* Clean up any global-scope srcu_struct structures used by this module. */
1338 static void srcu_module_going(struct module *mod)
1341 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1343 for (i = 0; i < mod->num_srcu_structs; i++)
1344 cleanup_srcu_struct(*(sspp++));
1347 /* Handle one module, either coming or going. */
1348 static int srcu_module_notify(struct notifier_block *self,
1349 unsigned long val, void *data)
1351 struct module *mod = data;
1355 case MODULE_STATE_COMING:
1356 ret = srcu_module_coming(mod);
1358 case MODULE_STATE_GOING:
1359 srcu_module_going(mod);
1367 static struct notifier_block srcu_module_nb = {
1368 .notifier_call = srcu_module_notify,
1372 static __init int init_srcu_module_notifier(void)
1376 ret = register_module_notifier(&srcu_module_nb);
1378 pr_warn("Failed to register srcu module notifier\n");
1381 late_initcall(init_srcu_module_notifier);
1383 #endif /* #ifdef CONFIG_MODULES */