1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
5 * Copyright IBM Corporation, 2008
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
14 * For detailed explanation of Read-Copy Update mechanism see -
18 #define pr_fmt(fmt) "rcu: " fmt
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
64 #include <linux/kasan.h>
65 #include "../time/tick-internal.h"
70 #ifdef MODULE_PARAM_PREFIX
71 #undef MODULE_PARAM_PREFIX
73 #define MODULE_PARAM_PREFIX "rcutree."
75 /* Data structures. */
77 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
78 .dynticks_nesting = 1,
79 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
80 .dynticks = ATOMIC_INIT(1),
81 #ifdef CONFIG_RCU_NOCB_CPU
82 .cblist.flags = SEGCBLIST_RCU_CORE,
85 static struct rcu_state rcu_state = {
86 .level = { &rcu_state.node[0] },
87 .gp_state = RCU_GP_IDLE,
88 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
89 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
90 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
93 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
94 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
95 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
98 /* Dump rcu_node combining tree at boot to verify correct setup. */
99 static bool dump_tree;
100 module_param(dump_tree, bool, 0444);
101 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
102 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
103 #ifndef CONFIG_PREEMPT_RT
104 module_param(use_softirq, bool, 0444);
106 /* Control rcu_node-tree auto-balancing at boot time. */
107 static bool rcu_fanout_exact;
108 module_param(rcu_fanout_exact, bool, 0444);
109 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
110 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
111 module_param(rcu_fanout_leaf, int, 0444);
112 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
113 /* Number of rcu_nodes at specified level. */
114 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
115 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
118 * The rcu_scheduler_active variable is initialized to the value
119 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
120 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
121 * RCU can assume that there is but one task, allowing RCU to (for example)
122 * optimize synchronize_rcu() to a simple barrier(). When this variable
123 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
124 * to detect real grace periods. This variable is also used to suppress
125 * boot-time false positives from lockdep-RCU error checking. Finally, it
126 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
127 * is fully initialized, including all of its kthreads having been spawned.
129 int rcu_scheduler_active __read_mostly;
130 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
133 * The rcu_scheduler_fully_active variable transitions from zero to one
134 * during the early_initcall() processing, which is after the scheduler
135 * is capable of creating new tasks. So RCU processing (for example,
136 * creating tasks for RCU priority boosting) must be delayed until after
137 * rcu_scheduler_fully_active transitions from zero to one. We also
138 * currently delay invocation of any RCU callbacks until after this point.
140 * It might later prove better for people registering RCU callbacks during
141 * early boot to take responsibility for these callbacks, but one step at
144 static int rcu_scheduler_fully_active __read_mostly;
146 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
147 unsigned long gps, unsigned long flags);
148 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
149 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
150 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
151 static void invoke_rcu_core(void);
152 static void rcu_report_exp_rdp(struct rcu_data *rdp);
153 static void sync_sched_exp_online_cleanup(int cpu);
154 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
155 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
157 /* rcuc/rcub/rcuop kthread realtime priority */
158 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
159 module_param(kthread_prio, int, 0444);
161 /* Delay in jiffies for grace-period initialization delays, debug only. */
163 static int gp_preinit_delay;
164 module_param(gp_preinit_delay, int, 0444);
165 static int gp_init_delay;
166 module_param(gp_init_delay, int, 0444);
167 static int gp_cleanup_delay;
168 module_param(gp_cleanup_delay, int, 0444);
170 // Add delay to rcu_read_unlock() for strict grace periods.
171 static int rcu_unlock_delay;
172 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
173 module_param(rcu_unlock_delay, int, 0444);
177 * This rcu parameter is runtime-read-only. It reflects
178 * a minimum allowed number of objects which can be cached
179 * per-CPU. Object size is equal to one page. This value
180 * can be changed at boot time.
182 static int rcu_min_cached_objs = 5;
183 module_param(rcu_min_cached_objs, int, 0444);
185 // A page shrinker can ask for pages to be freed to make them
186 // available for other parts of the system. This usually happens
187 // under low memory conditions, and in that case we should also
188 // defer page-cache filling for a short time period.
190 // The default value is 5 seconds, which is long enough to reduce
191 // interference with the shrinker while it asks other systems to
192 // drain their caches.
193 static int rcu_delay_page_cache_fill_msec = 5000;
194 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
196 /* Retrieve RCU kthreads priority for rcutorture */
197 int rcu_get_gp_kthreads_prio(void)
201 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
204 * Number of grace periods between delays, normalized by the duration of
205 * the delay. The longer the delay, the more the grace periods between
206 * each delay. The reason for this normalization is that it means that,
207 * for non-zero delays, the overall slowdown of grace periods is constant
208 * regardless of the duration of the delay. This arrangement balances
209 * the need for long delays to increase some race probabilities with the
210 * need for fast grace periods to increase other race probabilities.
212 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
215 * Compute the mask of online CPUs for the specified rcu_node structure.
216 * This will not be stable unless the rcu_node structure's ->lock is
217 * held, but the bit corresponding to the current CPU will be stable
220 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
222 return READ_ONCE(rnp->qsmaskinitnext);
226 * Is the CPU corresponding to the specified rcu_data structure online
227 * from RCU's perspective? This perspective is given by that structure's
228 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
230 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
232 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
236 * Return true if an RCU grace period is in progress. The READ_ONCE()s
237 * permit this function to be invoked without holding the root rcu_node
238 * structure's ->lock, but of course results can be subject to change.
240 static int rcu_gp_in_progress(void)
242 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
246 * Return the number of callbacks queued on the specified CPU.
247 * Handles both the nocbs and normal cases.
249 static long rcu_get_n_cbs_cpu(int cpu)
251 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
253 if (rcu_segcblist_is_enabled(&rdp->cblist))
254 return rcu_segcblist_n_cbs(&rdp->cblist);
258 void rcu_softirq_qs(void)
261 rcu_preempt_deferred_qs(current);
262 rcu_tasks_qs(current, false);
266 * Increment the current CPU's rcu_data structure's ->dynticks field
267 * with ordering. Return the new value.
269 static noinline noinstr unsigned long rcu_dynticks_inc(int incby)
271 return arch_atomic_add_return(incby, this_cpu_ptr(&rcu_data.dynticks));
275 * Record entry into an extended quiescent state. This is only to be
276 * called when not already in an extended quiescent state, that is,
277 * RCU is watching prior to the call to this function and is no longer
278 * watching upon return.
280 static noinstr void rcu_dynticks_eqs_enter(void)
285 * CPUs seeing atomic_add_return() must see prior RCU read-side
286 * critical sections, and we also must force ordering with the
289 rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
290 seq = rcu_dynticks_inc(1);
291 // RCU is no longer watching. Better be in extended quiescent state!
292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & 0x1));
296 * Record exit from an extended quiescent state. This is only to be
297 * called from an extended quiescent state, that is, RCU is not watching
298 * prior to the call to this function and is watching upon return.
300 static noinstr void rcu_dynticks_eqs_exit(void)
305 * CPUs seeing atomic_add_return() must see prior idle sojourns,
306 * and we also must force ordering with the next RCU read-side
309 seq = rcu_dynticks_inc(1);
310 // RCU is now watching. Better not be in an extended quiescent state!
311 rcu_dynticks_task_trace_exit(); // After ->dynticks update!
312 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & 0x1));
316 * Reset the current CPU's ->dynticks counter to indicate that the
317 * newly onlined CPU is no longer in an extended quiescent state.
318 * This will either leave the counter unchanged, or increment it
319 * to the next non-quiescent value.
321 * The non-atomic test/increment sequence works because the upper bits
322 * of the ->dynticks counter are manipulated only by the corresponding CPU,
323 * or when the corresponding CPU is offline.
325 static void rcu_dynticks_eqs_online(void)
327 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
329 if (atomic_read(&rdp->dynticks) & 0x1)
335 * Is the current CPU in an extended quiescent state?
337 * No ordering, as we are sampling CPU-local information.
339 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
341 return !(arch_atomic_read(this_cpu_ptr(&rcu_data.dynticks)) & 0x1);
345 * Snapshot the ->dynticks counter with full ordering so as to allow
346 * stable comparison of this counter with past and future snapshots.
348 static int rcu_dynticks_snap(struct rcu_data *rdp)
350 smp_mb(); // Fundamental RCU ordering guarantee.
351 return atomic_read_acquire(&rdp->dynticks);
355 * Return true if the snapshot returned from rcu_dynticks_snap()
356 * indicates that RCU is in an extended quiescent state.
358 static bool rcu_dynticks_in_eqs(int snap)
360 return !(snap & 0x1);
363 /* Return true if the specified CPU is currently idle from an RCU viewpoint. */
364 bool rcu_is_idle_cpu(int cpu)
366 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
368 return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
372 * Return true if the CPU corresponding to the specified rcu_data
373 * structure has spent some time in an extended quiescent state since
374 * rcu_dynticks_snap() returned the specified snapshot.
376 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
378 return snap != rcu_dynticks_snap(rdp);
382 * Return true if the referenced integer is zero while the specified
383 * CPU remains within a single extended quiescent state.
385 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
387 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
390 // If not quiescent, force back to earlier extended quiescent state.
391 snap = atomic_read(&rdp->dynticks) & ~0x1;
393 smp_rmb(); // Order ->dynticks and *vp reads.
395 return false; // Non-zero, so report failure;
396 smp_rmb(); // Order *vp read and ->dynticks re-read.
398 // If still in the same extended quiescent state, we are good!
399 return snap == atomic_read(&rdp->dynticks);
403 * Let the RCU core know that this CPU has gone through the scheduler,
404 * which is a quiescent state. This is called when the need for a
405 * quiescent state is urgent, so we burn an atomic operation and full
406 * memory barriers to let the RCU core know about it, regardless of what
407 * this CPU might (or might not) do in the near future.
409 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
411 * The caller must have disabled interrupts and must not be idle.
413 notrace void rcu_momentary_dyntick_idle(void)
417 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
418 seq = rcu_dynticks_inc(2);
419 /* It is illegal to call this from idle state. */
420 WARN_ON_ONCE(!(seq & 0x1));
421 rcu_preempt_deferred_qs(current);
423 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
426 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
428 * If the current CPU is idle and running at a first-level (not nested)
429 * interrupt, or directly, from idle, return true.
431 * The caller must have at least disabled IRQs.
433 static int rcu_is_cpu_rrupt_from_idle(void)
438 * Usually called from the tick; but also used from smp_function_call()
439 * for expedited grace periods. This latter can result in running from
440 * the idle task, instead of an actual IPI.
442 lockdep_assert_irqs_disabled();
444 /* Check for counter underflows */
445 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
446 "RCU dynticks_nesting counter underflow!");
447 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
448 "RCU dynticks_nmi_nesting counter underflow/zero!");
450 /* Are we at first interrupt nesting level? */
451 nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
456 * If we're not in an interrupt, we must be in the idle task!
458 WARN_ON_ONCE(!nesting && !is_idle_task(current));
460 /* Does CPU appear to be idle from an RCU standpoint? */
461 return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
464 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465 // Maximum callbacks per rcu_do_batch ...
466 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
467 static long blimit = DEFAULT_RCU_BLIMIT;
468 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
469 static long qhimark = DEFAULT_RCU_QHIMARK;
470 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
471 static long qlowmark = DEFAULT_RCU_QLOMARK;
472 #define DEFAULT_RCU_QOVLD_MULT 2
473 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
474 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
475 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
477 module_param(blimit, long, 0444);
478 module_param(qhimark, long, 0444);
479 module_param(qlowmark, long, 0444);
480 module_param(qovld, long, 0444);
482 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
483 static ulong jiffies_till_next_fqs = ULONG_MAX;
484 static bool rcu_kick_kthreads;
485 static int rcu_divisor = 7;
486 module_param(rcu_divisor, int, 0644);
488 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
489 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
490 module_param(rcu_resched_ns, long, 0644);
493 * How long the grace period must be before we start recruiting
494 * quiescent-state help from rcu_note_context_switch().
496 static ulong jiffies_till_sched_qs = ULONG_MAX;
497 module_param(jiffies_till_sched_qs, ulong, 0444);
498 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
499 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
502 * Make sure that we give the grace-period kthread time to detect any
503 * idle CPUs before taking active measures to force quiescent states.
504 * However, don't go below 100 milliseconds, adjusted upwards for really
507 static void adjust_jiffies_till_sched_qs(void)
511 /* If jiffies_till_sched_qs was specified, respect the request. */
512 if (jiffies_till_sched_qs != ULONG_MAX) {
513 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
516 /* Otherwise, set to third fqs scan, but bound below on large system. */
517 j = READ_ONCE(jiffies_till_first_fqs) +
518 2 * READ_ONCE(jiffies_till_next_fqs);
519 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
520 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
521 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
522 WRITE_ONCE(jiffies_to_sched_qs, j);
525 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
528 int ret = kstrtoul(val, 0, &j);
531 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
532 adjust_jiffies_till_sched_qs();
537 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
540 int ret = kstrtoul(val, 0, &j);
543 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
544 adjust_jiffies_till_sched_qs();
549 static const struct kernel_param_ops first_fqs_jiffies_ops = {
550 .set = param_set_first_fqs_jiffies,
551 .get = param_get_ulong,
554 static const struct kernel_param_ops next_fqs_jiffies_ops = {
555 .set = param_set_next_fqs_jiffies,
556 .get = param_get_ulong,
559 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
560 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
561 module_param(rcu_kick_kthreads, bool, 0644);
563 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
564 static int rcu_pending(int user);
567 * Return the number of RCU GPs completed thus far for debug & stats.
569 unsigned long rcu_get_gp_seq(void)
571 return READ_ONCE(rcu_state.gp_seq);
573 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
576 * Return the number of RCU expedited batches completed thus far for
577 * debug & stats. Odd numbers mean that a batch is in progress, even
578 * numbers mean idle. The value returned will thus be roughly double
579 * the cumulative batches since boot.
581 unsigned long rcu_exp_batches_completed(void)
583 return rcu_state.expedited_sequence;
585 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
588 * Return the root node of the rcu_state structure.
590 static struct rcu_node *rcu_get_root(void)
592 return &rcu_state.node[0];
596 * Send along grace-period-related data for rcutorture diagnostics.
598 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
599 unsigned long *gp_seq)
603 *flags = READ_ONCE(rcu_state.gp_flags);
604 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
610 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
613 * Enter an RCU extended quiescent state, which can be either the
614 * idle loop or adaptive-tickless usermode execution.
616 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
617 * the possibility of usermode upcalls having messed up our count
618 * of interrupt nesting level during the prior busy period.
620 static noinstr void rcu_eqs_enter(bool user)
622 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
624 WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
625 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
626 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
627 rdp->dynticks_nesting == 0);
628 if (rdp->dynticks_nesting != 1) {
629 // RCU will still be watching, so just do accounting and leave.
630 rdp->dynticks_nesting--;
634 lockdep_assert_irqs_disabled();
635 instrumentation_begin();
636 trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
637 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
638 rcu_preempt_deferred_qs(current);
640 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
641 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
643 instrumentation_end();
644 WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
645 // RCU is watching here ...
646 rcu_dynticks_eqs_enter();
647 // ... but is no longer watching here.
648 rcu_dynticks_task_enter();
652 * rcu_idle_enter - inform RCU that current CPU is entering idle
654 * Enter idle mode, in other words, -leave- the mode in which RCU
655 * read-side critical sections can occur. (Though RCU read-side
656 * critical sections can occur in irq handlers in idle, a possibility
657 * handled by irq_enter() and irq_exit().)
659 * If you add or remove a call to rcu_idle_enter(), be sure to test with
660 * CONFIG_RCU_EQS_DEBUG=y.
662 void rcu_idle_enter(void)
664 lockdep_assert_irqs_disabled();
665 rcu_eqs_enter(false);
667 EXPORT_SYMBOL_GPL(rcu_idle_enter);
669 #ifdef CONFIG_NO_HZ_FULL
671 #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
673 * An empty function that will trigger a reschedule on
674 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
676 static void late_wakeup_func(struct irq_work *work)
680 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
681 IRQ_WORK_INIT(late_wakeup_func);
686 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
687 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
689 * In these cases the late RCU wake ups aren't supported in the resched loops and our
690 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
691 * get re-enabled again.
693 noinstr static void rcu_irq_work_resched(void)
695 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
697 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
700 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
703 instrumentation_begin();
704 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
705 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
707 instrumentation_end();
711 static inline void rcu_irq_work_resched(void) { }
715 * rcu_user_enter - inform RCU that we are resuming userspace.
717 * Enter RCU idle mode right before resuming userspace. No use of RCU
718 * is permitted between this call and rcu_user_exit(). This way the
719 * CPU doesn't need to maintain the tick for RCU maintenance purposes
720 * when the CPU runs in userspace.
722 * If you add or remove a call to rcu_user_enter(), be sure to test with
723 * CONFIG_RCU_EQS_DEBUG=y.
725 noinstr void rcu_user_enter(void)
727 lockdep_assert_irqs_disabled();
730 * Other than generic entry implementation, we may be past the last
731 * rescheduling opportunity in the entry code. Trigger a self IPI
732 * that will fire and reschedule once we resume in user/guest mode.
734 rcu_irq_work_resched();
738 #endif /* CONFIG_NO_HZ_FULL */
741 * rcu_nmi_exit - inform RCU of exit from NMI context
743 * If we are returning from the outermost NMI handler that interrupted an
744 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
745 * to let the RCU grace-period handling know that the CPU is back to
748 * If you add or remove a call to rcu_nmi_exit(), be sure to test
749 * with CONFIG_RCU_EQS_DEBUG=y.
751 noinstr void rcu_nmi_exit(void)
753 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
755 instrumentation_begin();
757 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
758 * (We are exiting an NMI handler, so RCU better be paying attention
761 WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
762 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
765 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
766 * leave it in non-RCU-idle state.
768 if (rdp->dynticks_nmi_nesting != 1) {
769 trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
770 atomic_read(&rdp->dynticks));
771 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
772 rdp->dynticks_nmi_nesting - 2);
773 instrumentation_end();
777 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
778 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
779 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
781 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
782 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
783 instrumentation_end();
785 // RCU is watching here ...
786 rcu_dynticks_eqs_enter();
787 // ... but is no longer watching here.
790 rcu_dynticks_task_enter();
794 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
796 * Exit from an interrupt handler, which might possibly result in entering
797 * idle mode, in other words, leaving the mode in which read-side critical
798 * sections can occur. The caller must have disabled interrupts.
800 * This code assumes that the idle loop never does anything that might
801 * result in unbalanced calls to irq_enter() and irq_exit(). If your
802 * architecture's idle loop violates this assumption, RCU will give you what
803 * you deserve, good and hard. But very infrequently and irreproducibly.
805 * Use things like work queues to work around this limitation.
807 * You have been warned.
809 * If you add or remove a call to rcu_irq_exit(), be sure to test with
810 * CONFIG_RCU_EQS_DEBUG=y.
812 void noinstr rcu_irq_exit(void)
814 lockdep_assert_irqs_disabled();
818 #ifdef CONFIG_PROVE_RCU
820 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
822 void rcu_irq_exit_check_preempt(void)
824 lockdep_assert_irqs_disabled();
826 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
827 "RCU dynticks_nesting counter underflow/zero!");
828 RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
830 "Bad RCU dynticks_nmi_nesting counter\n");
831 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
832 "RCU in extended quiescent state!");
834 #endif /* #ifdef CONFIG_PROVE_RCU */
837 * Wrapper for rcu_irq_exit() where interrupts are enabled.
839 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
840 * with CONFIG_RCU_EQS_DEBUG=y.
842 void rcu_irq_exit_irqson(void)
846 local_irq_save(flags);
848 local_irq_restore(flags);
852 * Exit an RCU extended quiescent state, which can be either the
853 * idle loop or adaptive-tickless usermode execution.
855 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
856 * allow for the possibility of usermode upcalls messing up our count of
857 * interrupt nesting level during the busy period that is just now starting.
859 static void noinstr rcu_eqs_exit(bool user)
861 struct rcu_data *rdp;
864 lockdep_assert_irqs_disabled();
865 rdp = this_cpu_ptr(&rcu_data);
866 oldval = rdp->dynticks_nesting;
867 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
869 // RCU was already watching, so just do accounting and leave.
870 rdp->dynticks_nesting++;
873 rcu_dynticks_task_exit();
874 // RCU is not watching here ...
875 rcu_dynticks_eqs_exit();
876 // ... but is watching here.
877 instrumentation_begin();
879 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
880 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
882 trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
883 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
884 WRITE_ONCE(rdp->dynticks_nesting, 1);
885 WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
886 WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
887 instrumentation_end();
891 * rcu_idle_exit - inform RCU that current CPU is leaving idle
893 * Exit idle mode, in other words, -enter- the mode in which RCU
894 * read-side critical sections can occur.
896 * If you add or remove a call to rcu_idle_exit(), be sure to test with
897 * CONFIG_RCU_EQS_DEBUG=y.
899 void rcu_idle_exit(void)
903 local_irq_save(flags);
905 local_irq_restore(flags);
907 EXPORT_SYMBOL_GPL(rcu_idle_exit);
909 #ifdef CONFIG_NO_HZ_FULL
911 * rcu_user_exit - inform RCU that we are exiting userspace.
913 * Exit RCU idle mode while entering the kernel because it can
914 * run a RCU read side critical section anytime.
916 * If you add or remove a call to rcu_user_exit(), be sure to test with
917 * CONFIG_RCU_EQS_DEBUG=y.
919 void noinstr rcu_user_exit(void)
925 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
927 * The scheduler tick is not normally enabled when CPUs enter the kernel
928 * from nohz_full userspace execution. After all, nohz_full userspace
929 * execution is an RCU quiescent state and the time executing in the kernel
930 * is quite short. Except of course when it isn't. And it is not hard to
931 * cause a large system to spend tens of seconds or even minutes looping
932 * in the kernel, which can cause a number of problems, include RCU CPU
935 * Therefore, if a nohz_full CPU fails to report a quiescent state
936 * in a timely manner, the RCU grace-period kthread sets that CPU's
937 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
938 * exception will invoke this function, which will turn on the scheduler
939 * tick, which will enable RCU to detect that CPU's quiescent states,
940 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
941 * The tick will be disabled once a quiescent state is reported for
944 * Of course, in carefully tuned systems, there might never be an
945 * interrupt or exception. In that case, the RCU grace-period kthread
946 * will eventually cause one to happen. However, in less carefully
947 * controlled environments, this function allows RCU to get what it
948 * needs without creating otherwise useless interruptions.
950 void __rcu_irq_enter_check_tick(void)
952 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
954 // If we're here from NMI there's nothing to do.
958 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
959 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
961 if (!tick_nohz_full_cpu(rdp->cpu) ||
962 !READ_ONCE(rdp->rcu_urgent_qs) ||
963 READ_ONCE(rdp->rcu_forced_tick)) {
964 // RCU doesn't need nohz_full help from this CPU, or it is
965 // already getting that help.
969 // We get here only when not in an extended quiescent state and
970 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
971 // already watching and (2) The fact that we are in an interrupt
972 // handler and that the rcu_node lock is an irq-disabled lock
973 // prevents self-deadlock. So we can safely recheck under the lock.
974 // Note that the nohz_full state currently cannot change.
975 raw_spin_lock_rcu_node(rdp->mynode);
976 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
977 // A nohz_full CPU is in the kernel and RCU needs a
978 // quiescent state. Turn on the tick!
979 WRITE_ONCE(rdp->rcu_forced_tick, true);
980 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
982 raw_spin_unlock_rcu_node(rdp->mynode);
984 #endif /* CONFIG_NO_HZ_FULL */
987 * rcu_nmi_enter - inform RCU of entry to NMI context
989 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
990 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
991 * that the CPU is active. This implementation permits nested NMIs, as
992 * long as the nesting level does not overflow an int. (You will probably
993 * run out of stack space first.)
995 * If you add or remove a call to rcu_nmi_enter(), be sure to test
996 * with CONFIG_RCU_EQS_DEBUG=y.
998 noinstr void rcu_nmi_enter(void)
1001 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1003 /* Complain about underflow. */
1004 WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
1007 * If idle from RCU viewpoint, atomically increment ->dynticks
1008 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1009 * Otherwise, increment ->dynticks_nmi_nesting by two. This means
1010 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1011 * to be in the outermost NMI handler that interrupted an RCU-idle
1012 * period (observation due to Andy Lutomirski).
1014 if (rcu_dynticks_curr_cpu_in_eqs()) {
1017 rcu_dynticks_task_exit();
1019 // RCU is not watching here ...
1020 rcu_dynticks_eqs_exit();
1021 // ... but is watching here.
1023 instrumentation_begin();
1024 // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1025 instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1026 // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1027 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1030 } else if (!in_nmi()) {
1031 instrumentation_begin();
1032 rcu_irq_enter_check_tick();
1034 instrumentation_begin();
1037 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1038 rdp->dynticks_nmi_nesting,
1039 rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1040 instrumentation_end();
1041 WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1042 rdp->dynticks_nmi_nesting + incby);
1047 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1049 * Enter an interrupt handler, which might possibly result in exiting
1050 * idle mode, in other words, entering the mode in which read-side critical
1051 * sections can occur. The caller must have disabled interrupts.
1053 * Note that the Linux kernel is fully capable of entering an interrupt
1054 * handler that it never exits, for example when doing upcalls to user mode!
1055 * This code assumes that the idle loop never does upcalls to user mode.
1056 * If your architecture's idle loop does do upcalls to user mode (or does
1057 * anything else that results in unbalanced calls to the irq_enter() and
1058 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1059 * But very infrequently and irreproducibly.
1061 * Use things like work queues to work around this limitation.
1063 * You have been warned.
1065 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1066 * CONFIG_RCU_EQS_DEBUG=y.
1068 noinstr void rcu_irq_enter(void)
1070 lockdep_assert_irqs_disabled();
1075 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1077 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1078 * with CONFIG_RCU_EQS_DEBUG=y.
1080 void rcu_irq_enter_irqson(void)
1082 unsigned long flags;
1084 local_irq_save(flags);
1086 local_irq_restore(flags);
1090 * Check to see if any future non-offloaded RCU-related work will need
1091 * to be done by the current CPU, even if none need be done immediately,
1092 * returning 1 if so. This function is part of the RCU implementation;
1093 * it is -not- an exported member of the RCU API. This is used by
1094 * the idle-entry code to figure out whether it is safe to disable the
1095 * scheduler-clock interrupt.
1097 * Just check whether or not this CPU has non-offloaded RCU callbacks
1100 int rcu_needs_cpu(void)
1102 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
1103 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
1107 * If any sort of urgency was applied to the current CPU (for example,
1108 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1109 * to get to a quiescent state, disable it.
1111 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1113 raw_lockdep_assert_held_rcu_node(rdp->mynode);
1114 WRITE_ONCE(rdp->rcu_urgent_qs, false);
1115 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1116 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1117 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1118 WRITE_ONCE(rdp->rcu_forced_tick, false);
1123 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1125 * Return true if RCU is watching the running CPU, which means that this
1126 * CPU can safely enter RCU read-side critical sections. In other words,
1127 * if the current CPU is not in its idle loop or is in an interrupt or
1128 * NMI handler, return true.
1130 * Make notrace because it can be called by the internal functions of
1131 * ftrace, and making this notrace removes unnecessary recursion calls.
1133 notrace bool rcu_is_watching(void)
1137 preempt_disable_notrace();
1138 ret = !rcu_dynticks_curr_cpu_in_eqs();
1139 preempt_enable_notrace();
1142 EXPORT_SYMBOL_GPL(rcu_is_watching);
1145 * If a holdout task is actually running, request an urgent quiescent
1146 * state from its CPU. This is unsynchronized, so migrations can cause
1147 * the request to go to the wrong CPU. Which is OK, all that will happen
1148 * is that the CPU's next context switch will be a bit slower and next
1149 * time around this task will generate another request.
1151 void rcu_request_urgent_qs_task(struct task_struct *t)
1158 return; /* This task is not running on that CPU. */
1159 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1162 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1165 * Is the current CPU online as far as RCU is concerned?
1167 * Disable preemption to avoid false positives that could otherwise
1168 * happen due to the current CPU number being sampled, this task being
1169 * preempted, its old CPU being taken offline, resuming on some other CPU,
1170 * then determining that its old CPU is now offline.
1172 * Disable checking if in an NMI handler because we cannot safely
1173 * report errors from NMI handlers anyway. In addition, it is OK to use
1174 * RCU on an offline processor during initial boot, hence the check for
1175 * rcu_scheduler_fully_active.
1177 bool rcu_lockdep_current_cpu_online(void)
1179 struct rcu_data *rdp;
1182 if (in_nmi() || !rcu_scheduler_fully_active)
1184 preempt_disable_notrace();
1185 rdp = this_cpu_ptr(&rcu_data);
1187 * Strictly, we care here about the case where the current CPU is
1188 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
1189 * not being up to date. So arch_spin_is_locked() might have a
1190 * false positive if it's held by some *other* CPU, but that's
1191 * OK because that just means a false *negative* on the warning.
1193 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
1195 preempt_enable_notrace();
1198 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1200 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1203 * When trying to report a quiescent state on behalf of some other CPU,
1204 * it is our responsibility to check for and handle potential overflow
1205 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1206 * After all, the CPU might be in deep idle state, and thus executing no
1209 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1211 raw_lockdep_assert_held_rcu_node(rnp);
1212 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1214 WRITE_ONCE(rdp->gpwrap, true);
1215 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1216 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1220 * Snapshot the specified CPU's dynticks counter so that we can later
1221 * credit them with an implicit quiescent state. Return 1 if this CPU
1222 * is in dynticks idle mode, which is an extended quiescent state.
1224 static int dyntick_save_progress_counter(struct rcu_data *rdp)
1226 rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1227 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1228 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1229 rcu_gpnum_ovf(rdp->mynode, rdp);
1236 * Return true if the specified CPU has passed through a quiescent
1237 * state by virtue of being in or having passed through an dynticks
1238 * idle state since the last call to dyntick_save_progress_counter()
1239 * for this same CPU, or by virtue of having been offline.
1241 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1244 struct rcu_node *rnp = rdp->mynode;
1247 * If the CPU passed through or entered a dynticks idle phase with
1248 * no active irq/NMI handlers, then we can safely pretend that the CPU
1249 * already acknowledged the request to pass through a quiescent
1250 * state. Either way, that CPU cannot possibly be in an RCU
1251 * read-side critical section that started before the beginning
1252 * of the current RCU grace period.
1254 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1255 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1256 rcu_gpnum_ovf(rnp, rdp);
1261 * Complain if a CPU that is considered to be offline from RCU's
1262 * perspective has not yet reported a quiescent state. After all,
1263 * the offline CPU should have reported a quiescent state during
1264 * the CPU-offline process, or, failing that, by rcu_gp_init()
1265 * if it ran concurrently with either the CPU going offline or the
1266 * last task on a leaf rcu_node structure exiting its RCU read-side
1267 * critical section while all CPUs corresponding to that structure
1268 * are offline. This added warning detects bugs in any of these
1271 * The rcu_node structure's ->lock is held here, which excludes
1272 * the relevant portions the CPU-hotplug code, the grace-period
1273 * initialization code, and the rcu_read_unlock() code paths.
1275 * For more detail, please refer to the "Hotplug CPU" section
1276 * of RCU's Requirements documentation.
1278 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
1279 struct rcu_node *rnp1;
1281 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1282 __func__, rnp->grplo, rnp->grphi, rnp->level,
1283 (long)rnp->gp_seq, (long)rnp->completedqs);
1284 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1285 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1286 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1287 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1288 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
1289 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1290 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1291 return 1; /* Break things loose after complaining. */
1295 * A CPU running for an extended time within the kernel can
1296 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1297 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1298 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1299 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1300 * variable are safe because the assignments are repeated if this
1301 * CPU failed to pass through a quiescent state. This code
1302 * also checks .jiffies_resched in case jiffies_to_sched_qs
1305 jtsq = READ_ONCE(jiffies_to_sched_qs);
1306 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
1307 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1308 time_after(jiffies, rcu_state.jiffies_resched) ||
1309 rcu_state.cbovld)) {
1310 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
1311 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1312 smp_store_release(&rdp->rcu_urgent_qs, true);
1313 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1314 WRITE_ONCE(rdp->rcu_urgent_qs, true);
1318 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1319 * The above code handles this, but only for straight cond_resched().
1320 * And some in-kernel loops check need_resched() before calling
1321 * cond_resched(), which defeats the above code for CPUs that are
1322 * running in-kernel with scheduling-clock interrupts disabled.
1323 * So hit them over the head with the resched_cpu() hammer!
1325 if (tick_nohz_full_cpu(rdp->cpu) &&
1326 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1327 rcu_state.cbovld)) {
1328 WRITE_ONCE(rdp->rcu_urgent_qs, true);
1329 resched_cpu(rdp->cpu);
1330 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1334 * If more than halfway to RCU CPU stall-warning time, invoke
1335 * resched_cpu() more frequently to try to loosen things up a bit.
1336 * Also check to see if the CPU is getting hammered with interrupts,
1337 * but only once per grace period, just to keep the IPIs down to
1340 if (time_after(jiffies, rcu_state.jiffies_resched)) {
1341 if (time_after(jiffies,
1342 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1343 resched_cpu(rdp->cpu);
1344 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1346 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1347 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1348 (rnp->ffmask & rdp->grpmask)) {
1349 rdp->rcu_iw_pending = true;
1350 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1351 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1358 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
1359 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1360 unsigned long gp_seq_req, const char *s)
1362 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1363 gp_seq_req, rnp->level,
1364 rnp->grplo, rnp->grphi, s);
1368 * rcu_start_this_gp - Request the start of a particular grace period
1369 * @rnp_start: The leaf node of the CPU from which to start.
1370 * @rdp: The rcu_data corresponding to the CPU from which to start.
1371 * @gp_seq_req: The gp_seq of the grace period to start.
1373 * Start the specified grace period, as needed to handle newly arrived
1374 * callbacks. The required future grace periods are recorded in each
1375 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1376 * is reason to awaken the grace-period kthread.
1378 * The caller must hold the specified rcu_node structure's ->lock, which
1379 * is why the caller is responsible for waking the grace-period kthread.
1381 * Returns true if the GP thread needs to be awakened else false.
1383 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1384 unsigned long gp_seq_req)
1387 struct rcu_node *rnp;
1390 * Use funnel locking to either acquire the root rcu_node
1391 * structure's lock or bail out if the need for this grace period
1392 * has already been recorded -- or if that grace period has in
1393 * fact already started. If there is already a grace period in
1394 * progress in a non-leaf node, no recording is needed because the
1395 * end of the grace period will scan the leaf rcu_node structures.
1396 * Note that rnp_start->lock must not be released.
1398 raw_lockdep_assert_held_rcu_node(rnp_start);
1399 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1400 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1401 if (rnp != rnp_start)
1402 raw_spin_lock_rcu_node(rnp);
1403 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1404 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1405 (rnp != rnp_start &&
1406 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1407 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1411 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1412 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1414 * We just marked the leaf or internal node, and a
1415 * grace period is in progress, which means that
1416 * rcu_gp_cleanup() will see the marking. Bail to
1417 * reduce contention.
1419 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1420 TPS("Startedleaf"));
1423 if (rnp != rnp_start && rnp->parent != NULL)
1424 raw_spin_unlock_rcu_node(rnp);
1426 break; /* At root, and perhaps also leaf. */
1429 /* If GP already in progress, just leave, otherwise start one. */
1430 if (rcu_gp_in_progress()) {
1431 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1434 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1435 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1436 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1437 if (!READ_ONCE(rcu_state.gp_kthread)) {
1438 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1441 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1442 ret = true; /* Caller must wake GP kthread. */
1444 /* Push furthest requested GP to leaf node and rcu_data structure. */
1445 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1446 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1447 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1449 if (rnp != rnp_start)
1450 raw_spin_unlock_rcu_node(rnp);
1455 * Clean up any old requests for the just-ended grace period. Also return
1456 * whether any additional grace periods have been requested.
1458 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1461 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1463 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1465 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1466 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1467 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1472 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1473 * interrupt or softirq handler, in which case we just might immediately
1474 * sleep upon return, resulting in a grace-period hang), and don't bother
1475 * awakening when there is nothing for the grace-period kthread to do
1476 * (as in several CPUs raced to awaken, we lost), and finally don't try
1477 * to awaken a kthread that has not yet been created. If all those checks
1478 * are passed, track some debug information and awaken.
1480 * So why do the self-wakeup when in an interrupt or softirq handler
1481 * in the grace-period kthread's context? Because the kthread might have
1482 * been interrupted just as it was going to sleep, and just after the final
1483 * pre-sleep check of the awaken condition. In this case, a wakeup really
1484 * is required, and is therefore supplied.
1486 static void rcu_gp_kthread_wake(void)
1488 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1490 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1491 !READ_ONCE(rcu_state.gp_flags) || !t)
1493 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1494 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1495 swake_up_one(&rcu_state.gp_wq);
1499 * If there is room, assign a ->gp_seq number to any callbacks on this
1500 * CPU that have not already been assigned. Also accelerate any callbacks
1501 * that were previously assigned a ->gp_seq number that has since proven
1502 * to be too conservative, which can happen if callbacks get assigned a
1503 * ->gp_seq number while RCU is idle, but with reference to a non-root
1504 * rcu_node structure. This function is idempotent, so it does not hurt
1505 * to call it repeatedly. Returns an flag saying that we should awaken
1506 * the RCU grace-period kthread.
1508 * The caller must hold rnp->lock with interrupts disabled.
1510 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1512 unsigned long gp_seq_req;
1515 rcu_lockdep_assert_cblist_protected(rdp);
1516 raw_lockdep_assert_held_rcu_node(rnp);
1518 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1519 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1522 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1525 * Callbacks are often registered with incomplete grace-period
1526 * information. Something about the fact that getting exact
1527 * information requires acquiring a global lock... RCU therefore
1528 * makes a conservative estimate of the grace period number at which
1529 * a given callback will become ready to invoke. The following
1530 * code checks this estimate and improves it when possible, thus
1531 * accelerating callback invocation to an earlier grace-period
1534 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1535 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1536 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1538 /* Trace depending on how much we were able to accelerate. */
1539 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1540 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1542 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1544 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1550 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1551 * rcu_node structure's ->lock be held. It consults the cached value
1552 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1553 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1554 * while holding the leaf rcu_node structure's ->lock.
1556 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1557 struct rcu_data *rdp)
1562 rcu_lockdep_assert_cblist_protected(rdp);
1563 c = rcu_seq_snap(&rcu_state.gp_seq);
1564 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1565 /* Old request still live, so mark recent callbacks. */
1566 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1569 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1570 needwake = rcu_accelerate_cbs(rnp, rdp);
1571 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1573 rcu_gp_kthread_wake();
1577 * Move any callbacks whose grace period has completed to the
1578 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1579 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1580 * sublist. This function is idempotent, so it does not hurt to
1581 * invoke it repeatedly. As long as it is not invoked -too- often...
1582 * Returns true if the RCU grace-period kthread needs to be awakened.
1584 * The caller must hold rnp->lock with interrupts disabled.
1586 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1588 rcu_lockdep_assert_cblist_protected(rdp);
1589 raw_lockdep_assert_held_rcu_node(rnp);
1591 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1592 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1596 * Find all callbacks whose ->gp_seq numbers indicate that they
1597 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1599 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1601 /* Classify any remaining callbacks. */
1602 return rcu_accelerate_cbs(rnp, rdp);
1606 * Move and classify callbacks, but only if doing so won't require
1607 * that the RCU grace-period kthread be awakened.
1609 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1610 struct rcu_data *rdp)
1612 rcu_lockdep_assert_cblist_protected(rdp);
1613 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1615 // The grace period cannot end while we hold the rcu_node lock.
1616 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1617 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1618 raw_spin_unlock_rcu_node(rnp);
1622 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1623 * quiescent state. This is intended to be invoked when the CPU notices
1624 * a new grace period.
1626 static void rcu_strict_gp_check_qs(void)
1628 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1635 * Update CPU-local rcu_data state to record the beginnings and ends of
1636 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1637 * structure corresponding to the current CPU, and must have irqs disabled.
1638 * Returns true if the grace-period kthread needs to be awakened.
1640 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1644 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1646 raw_lockdep_assert_held_rcu_node(rnp);
1648 if (rdp->gp_seq == rnp->gp_seq)
1649 return false; /* Nothing to do. */
1651 /* Handle the ends of any preceding grace periods first. */
1652 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1653 unlikely(READ_ONCE(rdp->gpwrap))) {
1655 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1656 rdp->core_needs_qs = false;
1657 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1660 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1661 if (rdp->core_needs_qs)
1662 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1665 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1666 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1667 unlikely(READ_ONCE(rdp->gpwrap))) {
1669 * If the current grace period is waiting for this CPU,
1670 * set up to detect a quiescent state, otherwise don't
1671 * go looking for one.
1673 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1674 need_qs = !!(rnp->qsmask & rdp->grpmask);
1675 rdp->cpu_no_qs.b.norm = need_qs;
1676 rdp->core_needs_qs = need_qs;
1677 zero_cpu_stall_ticks(rdp);
1679 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1680 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1681 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1682 WRITE_ONCE(rdp->gpwrap, false);
1683 rcu_gpnum_ovf(rnp, rdp);
1687 static void note_gp_changes(struct rcu_data *rdp)
1689 unsigned long flags;
1691 struct rcu_node *rnp;
1693 local_irq_save(flags);
1695 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1696 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1697 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1698 local_irq_restore(flags);
1701 needwake = __note_gp_changes(rnp, rdp);
1702 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1703 rcu_strict_gp_check_qs();
1705 rcu_gp_kthread_wake();
1708 static void rcu_gp_slow(int delay)
1711 !(rcu_seq_ctr(rcu_state.gp_seq) %
1712 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1713 schedule_timeout_idle(delay);
1716 static unsigned long sleep_duration;
1718 /* Allow rcutorture to stall the grace-period kthread. */
1719 void rcu_gp_set_torture_wait(int duration)
1721 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1722 WRITE_ONCE(sleep_duration, duration);
1724 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1726 /* Actually implement the aforementioned wait. */
1727 static void rcu_gp_torture_wait(void)
1729 unsigned long duration;
1731 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1733 duration = xchg(&sleep_duration, 0UL);
1735 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1736 schedule_timeout_idle(duration);
1737 pr_alert("%s: Wait complete\n", __func__);
1742 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1745 static void rcu_strict_gp_boundary(void *unused)
1751 * Initialize a new grace period. Return false if no grace period required.
1753 static noinline_for_stack bool rcu_gp_init(void)
1755 unsigned long flags;
1756 unsigned long oldmask;
1758 struct rcu_data *rdp;
1759 struct rcu_node *rnp = rcu_get_root();
1761 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1762 raw_spin_lock_irq_rcu_node(rnp);
1763 if (!READ_ONCE(rcu_state.gp_flags)) {
1764 /* Spurious wakeup, tell caller to go back to sleep. */
1765 raw_spin_unlock_irq_rcu_node(rnp);
1768 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1770 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1772 * Grace period already in progress, don't start another.
1773 * Not supposed to be able to happen.
1775 raw_spin_unlock_irq_rcu_node(rnp);
1779 /* Advance to a new grace period and initialize state. */
1780 record_gp_stall_check_time();
1781 /* Record GP times before starting GP, hence rcu_seq_start(). */
1782 rcu_seq_start(&rcu_state.gp_seq);
1783 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1784 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1785 raw_spin_unlock_irq_rcu_node(rnp);
1788 * Apply per-leaf buffered online and offline operations to
1789 * the rcu_node tree. Note that this new grace period need not
1790 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1791 * offlining path, when combined with checks in this function,
1792 * will handle CPUs that are currently going offline or that will
1793 * go offline later. Please also refer to "Hotplug CPU" section
1794 * of RCU's Requirements documentation.
1796 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1797 /* Exclude CPU hotplug operations. */
1798 rcu_for_each_leaf_node(rnp) {
1799 local_irq_save(flags);
1800 arch_spin_lock(&rcu_state.ofl_lock);
1801 raw_spin_lock_rcu_node(rnp);
1802 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1803 !rnp->wait_blkd_tasks) {
1804 /* Nothing to do on this leaf rcu_node structure. */
1805 raw_spin_unlock_rcu_node(rnp);
1806 arch_spin_unlock(&rcu_state.ofl_lock);
1807 local_irq_restore(flags);
1811 /* Record old state, apply changes to ->qsmaskinit field. */
1812 oldmask = rnp->qsmaskinit;
1813 rnp->qsmaskinit = rnp->qsmaskinitnext;
1815 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1816 if (!oldmask != !rnp->qsmaskinit) {
1817 if (!oldmask) { /* First online CPU for rcu_node. */
1818 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1819 rcu_init_new_rnp(rnp);
1820 } else if (rcu_preempt_has_tasks(rnp)) {
1821 rnp->wait_blkd_tasks = true; /* blocked tasks */
1822 } else { /* Last offline CPU and can propagate. */
1823 rcu_cleanup_dead_rnp(rnp);
1828 * If all waited-on tasks from prior grace period are
1829 * done, and if all this rcu_node structure's CPUs are
1830 * still offline, propagate up the rcu_node tree and
1831 * clear ->wait_blkd_tasks. Otherwise, if one of this
1832 * rcu_node structure's CPUs has since come back online,
1833 * simply clear ->wait_blkd_tasks.
1835 if (rnp->wait_blkd_tasks &&
1836 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1837 rnp->wait_blkd_tasks = false;
1838 if (!rnp->qsmaskinit)
1839 rcu_cleanup_dead_rnp(rnp);
1842 raw_spin_unlock_rcu_node(rnp);
1843 arch_spin_unlock(&rcu_state.ofl_lock);
1844 local_irq_restore(flags);
1846 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1849 * Set the quiescent-state-needed bits in all the rcu_node
1850 * structures for all currently online CPUs in breadth-first
1851 * order, starting from the root rcu_node structure, relying on the
1852 * layout of the tree within the rcu_state.node[] array. Note that
1853 * other CPUs will access only the leaves of the hierarchy, thus
1854 * seeing that no grace period is in progress, at least until the
1855 * corresponding leaf node has been initialized.
1857 * The grace period cannot complete until the initialization
1858 * process finishes, because this kthread handles both.
1860 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1861 rcu_for_each_node_breadth_first(rnp) {
1862 rcu_gp_slow(gp_init_delay);
1863 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1864 rdp = this_cpu_ptr(&rcu_data);
1865 rcu_preempt_check_blocked_tasks(rnp);
1866 rnp->qsmask = rnp->qsmaskinit;
1867 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1868 if (rnp == rdp->mynode)
1869 (void)__note_gp_changes(rnp, rdp);
1870 rcu_preempt_boost_start_gp(rnp);
1871 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1872 rnp->level, rnp->grplo,
1873 rnp->grphi, rnp->qsmask);
1874 /* Quiescent states for tasks on any now-offline CPUs. */
1875 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1876 rnp->rcu_gp_init_mask = mask;
1877 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1878 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1880 raw_spin_unlock_irq_rcu_node(rnp);
1881 cond_resched_tasks_rcu_qs();
1882 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1885 // If strict, make all CPUs aware of new grace period.
1886 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1887 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1893 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1896 static bool rcu_gp_fqs_check_wake(int *gfp)
1898 struct rcu_node *rnp = rcu_get_root();
1900 // If under overload conditions, force an immediate FQS scan.
1901 if (*gfp & RCU_GP_FLAG_OVLD)
1904 // Someone like call_rcu() requested a force-quiescent-state scan.
1905 *gfp = READ_ONCE(rcu_state.gp_flags);
1906 if (*gfp & RCU_GP_FLAG_FQS)
1909 // The current grace period has completed.
1910 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1917 * Do one round of quiescent-state forcing.
1919 static void rcu_gp_fqs(bool first_time)
1921 struct rcu_node *rnp = rcu_get_root();
1923 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1924 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1926 /* Collect dyntick-idle snapshots. */
1927 force_qs_rnp(dyntick_save_progress_counter);
1929 /* Handle dyntick-idle and offline CPUs. */
1930 force_qs_rnp(rcu_implicit_dynticks_qs);
1932 /* Clear flag to prevent immediate re-entry. */
1933 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1934 raw_spin_lock_irq_rcu_node(rnp);
1935 WRITE_ONCE(rcu_state.gp_flags,
1936 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1937 raw_spin_unlock_irq_rcu_node(rnp);
1942 * Loop doing repeated quiescent-state forcing until the grace period ends.
1944 static noinline_for_stack void rcu_gp_fqs_loop(void)
1950 struct rcu_node *rnp = rcu_get_root();
1952 first_gp_fqs = true;
1953 j = READ_ONCE(jiffies_till_first_fqs);
1954 if (rcu_state.cbovld)
1955 gf = RCU_GP_FLAG_OVLD;
1959 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1961 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1962 * update; required for stall checks.
1965 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1966 jiffies + (j ? 3 * j : 2));
1968 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1970 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1971 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1972 rcu_gp_fqs_check_wake(&gf), j);
1973 rcu_gp_torture_wait();
1974 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1975 /* Locking provides needed memory barriers. */
1976 /* If grace period done, leave loop. */
1977 if (!READ_ONCE(rnp->qsmask) &&
1978 !rcu_preempt_blocked_readers_cgp(rnp))
1980 /* If time for quiescent-state forcing, do it. */
1981 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1982 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1983 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1985 rcu_gp_fqs(first_gp_fqs);
1988 first_gp_fqs = false;
1989 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1991 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1993 cond_resched_tasks_rcu_qs();
1994 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1995 ret = 0; /* Force full wait till next FQS. */
1996 j = READ_ONCE(jiffies_till_next_fqs);
1998 /* Deal with stray signal. */
1999 cond_resched_tasks_rcu_qs();
2000 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2001 WARN_ON(signal_pending(current));
2002 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2004 ret = 1; /* Keep old FQS timing. */
2006 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2009 j = rcu_state.jiffies_force_qs - j;
2016 * Clean up after the old grace period.
2018 static noinline void rcu_gp_cleanup(void)
2021 bool needgp = false;
2022 unsigned long gp_duration;
2023 unsigned long new_gp_seq;
2025 struct rcu_data *rdp;
2026 struct rcu_node *rnp = rcu_get_root();
2027 struct swait_queue_head *sq;
2029 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2030 raw_spin_lock_irq_rcu_node(rnp);
2031 rcu_state.gp_end = jiffies;
2032 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2033 if (gp_duration > rcu_state.gp_max)
2034 rcu_state.gp_max = gp_duration;
2037 * We know the grace period is complete, but to everyone else
2038 * it appears to still be ongoing. But it is also the case
2039 * that to everyone else it looks like there is nothing that
2040 * they can do to advance the grace period. It is therefore
2041 * safe for us to drop the lock in order to mark the grace
2042 * period as completed in all of the rcu_node structures.
2044 raw_spin_unlock_irq_rcu_node(rnp);
2047 * Propagate new ->gp_seq value to rcu_node structures so that
2048 * other CPUs don't have to wait until the start of the next grace
2049 * period to process their callbacks. This also avoids some nasty
2050 * RCU grace-period initialization races by forcing the end of
2051 * the current grace period to be completely recorded in all of
2052 * the rcu_node structures before the beginning of the next grace
2053 * period is recorded in any of the rcu_node structures.
2055 new_gp_seq = rcu_state.gp_seq;
2056 rcu_seq_end(&new_gp_seq);
2057 rcu_for_each_node_breadth_first(rnp) {
2058 raw_spin_lock_irq_rcu_node(rnp);
2059 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2060 dump_blkd_tasks(rnp, 10);
2061 WARN_ON_ONCE(rnp->qsmask);
2062 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2063 rdp = this_cpu_ptr(&rcu_data);
2064 if (rnp == rdp->mynode)
2065 needgp = __note_gp_changes(rnp, rdp) || needgp;
2066 /* smp_mb() provided by prior unlock-lock pair. */
2067 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2068 // Reset overload indication for CPUs no longer overloaded
2069 if (rcu_is_leaf_node(rnp))
2070 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2071 rdp = per_cpu_ptr(&rcu_data, cpu);
2072 check_cb_ovld_locked(rdp, rnp);
2074 sq = rcu_nocb_gp_get(rnp);
2075 raw_spin_unlock_irq_rcu_node(rnp);
2076 rcu_nocb_gp_cleanup(sq);
2077 cond_resched_tasks_rcu_qs();
2078 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2079 rcu_gp_slow(gp_cleanup_delay);
2081 rnp = rcu_get_root();
2082 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2084 /* Declare grace period done, trace first to use old GP number. */
2085 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2086 rcu_seq_end(&rcu_state.gp_seq);
2087 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2088 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2089 /* Check for GP requests since above loop. */
2090 rdp = this_cpu_ptr(&rcu_data);
2091 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2092 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2093 TPS("CleanupMore"));
2096 /* Advance CBs to reduce false positives below. */
2097 offloaded = rcu_rdp_is_offloaded(rdp);
2098 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2099 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2100 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2101 trace_rcu_grace_period(rcu_state.name,
2105 WRITE_ONCE(rcu_state.gp_flags,
2106 rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2108 raw_spin_unlock_irq_rcu_node(rnp);
2110 // If strict, make all CPUs aware of the end of the old grace period.
2111 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2112 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2116 * Body of kthread that handles grace periods.
2118 static int __noreturn rcu_gp_kthread(void *unused)
2120 rcu_bind_gp_kthread();
2123 /* Handle grace-period start. */
2125 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2127 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2128 swait_event_idle_exclusive(rcu_state.gp_wq,
2129 READ_ONCE(rcu_state.gp_flags) &
2131 rcu_gp_torture_wait();
2132 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2133 /* Locking provides needed memory barrier. */
2136 cond_resched_tasks_rcu_qs();
2137 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2138 WARN_ON(signal_pending(current));
2139 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2143 /* Handle quiescent-state forcing. */
2146 /* Handle grace-period end. */
2147 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2149 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2154 * Report a full set of quiescent states to the rcu_state data structure.
2155 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2156 * another grace period is required. Whether we wake the grace-period
2157 * kthread or it awakens itself for the next round of quiescent-state
2158 * forcing, that kthread will clean up after the just-completed grace
2159 * period. Note that the caller must hold rnp->lock, which is released
2162 static void rcu_report_qs_rsp(unsigned long flags)
2163 __releases(rcu_get_root()->lock)
2165 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2166 WARN_ON_ONCE(!rcu_gp_in_progress());
2167 WRITE_ONCE(rcu_state.gp_flags,
2168 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2169 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2170 rcu_gp_kthread_wake();
2174 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2175 * Allows quiescent states for a group of CPUs to be reported at one go
2176 * to the specified rcu_node structure, though all the CPUs in the group
2177 * must be represented by the same rcu_node structure (which need not be a
2178 * leaf rcu_node structure, though it often will be). The gps parameter
2179 * is the grace-period snapshot, which means that the quiescent states
2180 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2181 * must be held upon entry, and it is released before return.
2183 * As a special case, if mask is zero, the bit-already-cleared check is
2184 * disabled. This allows propagating quiescent state due to resumed tasks
2185 * during grace-period initialization.
2187 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2188 unsigned long gps, unsigned long flags)
2189 __releases(rnp->lock)
2191 unsigned long oldmask = 0;
2192 struct rcu_node *rnp_c;
2194 raw_lockdep_assert_held_rcu_node(rnp);
2196 /* Walk up the rcu_node hierarchy. */
2198 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2201 * Our bit has already been cleared, or the
2202 * relevant grace period is already over, so done.
2204 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2207 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2208 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2209 rcu_preempt_blocked_readers_cgp(rnp));
2210 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2211 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2212 mask, rnp->qsmask, rnp->level,
2213 rnp->grplo, rnp->grphi,
2215 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2217 /* Other bits still set at this level, so done. */
2218 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2221 rnp->completedqs = rnp->gp_seq;
2222 mask = rnp->grpmask;
2223 if (rnp->parent == NULL) {
2225 /* No more levels. Exit loop holding root lock. */
2229 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2232 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2233 oldmask = READ_ONCE(rnp_c->qsmask);
2237 * Get here if we are the last CPU to pass through a quiescent
2238 * state for this grace period. Invoke rcu_report_qs_rsp()
2239 * to clean up and start the next grace period if one is needed.
2241 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2245 * Record a quiescent state for all tasks that were previously queued
2246 * on the specified rcu_node structure and that were blocking the current
2247 * RCU grace period. The caller must hold the corresponding rnp->lock with
2248 * irqs disabled, and this lock is released upon return, but irqs remain
2251 static void __maybe_unused
2252 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2253 __releases(rnp->lock)
2257 struct rcu_node *rnp_p;
2259 raw_lockdep_assert_held_rcu_node(rnp);
2260 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2261 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2263 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2264 return; /* Still need more quiescent states! */
2267 rnp->completedqs = rnp->gp_seq;
2268 rnp_p = rnp->parent;
2269 if (rnp_p == NULL) {
2271 * Only one rcu_node structure in the tree, so don't
2272 * try to report up to its nonexistent parent!
2274 rcu_report_qs_rsp(flags);
2278 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2280 mask = rnp->grpmask;
2281 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2282 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2283 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2287 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2288 * structure. This must be called from the specified CPU.
2291 rcu_report_qs_rdp(struct rcu_data *rdp)
2293 unsigned long flags;
2295 bool needwake = false;
2296 bool needacc = false;
2297 struct rcu_node *rnp;
2299 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2301 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2302 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2306 * The grace period in which this quiescent state was
2307 * recorded has ended, so don't report it upwards.
2308 * We will instead need a new quiescent state that lies
2309 * within the current grace period.
2311 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2312 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2315 mask = rdp->grpmask;
2316 rdp->core_needs_qs = false;
2317 if ((rnp->qsmask & mask) == 0) {
2318 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2321 * This GP can't end until cpu checks in, so all of our
2322 * callbacks can be processed during the next GP.
2324 * NOCB kthreads have their own way to deal with that...
2326 if (!rcu_rdp_is_offloaded(rdp)) {
2327 needwake = rcu_accelerate_cbs(rnp, rdp);
2328 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2330 * ...but NOCB kthreads may miss or delay callbacks acceleration
2331 * if in the middle of a (de-)offloading process.
2336 rcu_disable_urgency_upon_qs(rdp);
2337 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2338 /* ^^^ Released rnp->lock */
2340 rcu_gp_kthread_wake();
2343 rcu_nocb_lock_irqsave(rdp, flags);
2344 rcu_accelerate_cbs_unlocked(rnp, rdp);
2345 rcu_nocb_unlock_irqrestore(rdp, flags);
2351 * Check to see if there is a new grace period of which this CPU
2352 * is not yet aware, and if so, set up local rcu_data state for it.
2353 * Otherwise, see if this CPU has just passed through its first
2354 * quiescent state for this grace period, and record that fact if so.
2357 rcu_check_quiescent_state(struct rcu_data *rdp)
2359 /* Check for grace-period ends and beginnings. */
2360 note_gp_changes(rdp);
2363 * Does this CPU still need to do its part for current grace period?
2364 * If no, return and let the other CPUs do their part as well.
2366 if (!rdp->core_needs_qs)
2370 * Was there a quiescent state since the beginning of the grace
2371 * period? If no, then exit and wait for the next call.
2373 if (rdp->cpu_no_qs.b.norm)
2377 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2380 rcu_report_qs_rdp(rdp);
2384 * Near the end of the offline process. Trace the fact that this CPU
2387 int rcutree_dying_cpu(unsigned int cpu)
2390 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2391 struct rcu_node *rnp = rdp->mynode;
2393 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2396 blkd = !!(rnp->qsmask & rdp->grpmask);
2397 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2398 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2403 * All CPUs for the specified rcu_node structure have gone offline,
2404 * and all tasks that were preempted within an RCU read-side critical
2405 * section while running on one of those CPUs have since exited their RCU
2406 * read-side critical section. Some other CPU is reporting this fact with
2407 * the specified rcu_node structure's ->lock held and interrupts disabled.
2408 * This function therefore goes up the tree of rcu_node structures,
2409 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2410 * the leaf rcu_node structure's ->qsmaskinit field has already been
2413 * This function does check that the specified rcu_node structure has
2414 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2415 * prematurely. That said, invoking it after the fact will cost you
2416 * a needless lock acquisition. So once it has done its work, don't
2419 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2422 struct rcu_node *rnp = rnp_leaf;
2424 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2425 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2426 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2427 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2430 mask = rnp->grpmask;
2434 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2435 rnp->qsmaskinit &= ~mask;
2436 /* Between grace periods, so better already be zero! */
2437 WARN_ON_ONCE(rnp->qsmask);
2438 if (rnp->qsmaskinit) {
2439 raw_spin_unlock_rcu_node(rnp);
2440 /* irqs remain disabled. */
2443 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2448 * The CPU has been completely removed, and some other CPU is reporting
2449 * this fact from process context. Do the remainder of the cleanup.
2450 * There can only be one CPU hotplug operation at a time, so no need for
2453 int rcutree_dead_cpu(unsigned int cpu)
2455 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2456 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2458 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2461 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2462 /* Adjust any no-longer-needed kthreads. */
2463 rcu_boost_kthread_setaffinity(rnp, -1);
2464 // Stop-machine done, so allow nohz_full to disable tick.
2465 tick_dep_clear(TICK_DEP_BIT_RCU);
2470 * Invoke any RCU callbacks that have made it to the end of their grace
2471 * period. Throttle as specified by rdp->blimit.
2473 static void rcu_do_batch(struct rcu_data *rdp)
2476 bool __maybe_unused empty;
2477 unsigned long flags;
2478 struct rcu_head *rhp;
2479 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2481 long pending, tlimit = 0;
2483 /* If no callbacks are ready, just return. */
2484 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2485 trace_rcu_batch_start(rcu_state.name,
2486 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2487 trace_rcu_batch_end(rcu_state.name, 0,
2488 !rcu_segcblist_empty(&rdp->cblist),
2489 need_resched(), is_idle_task(current),
2490 rcu_is_callbacks_kthread());
2495 * Extract the list of ready callbacks, disabling IRQs to prevent
2496 * races with call_rcu() from interrupt handlers. Leave the
2497 * callback counts, as rcu_barrier() needs to be conservative.
2499 rcu_nocb_lock_irqsave(rdp, flags);
2500 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2501 pending = rcu_segcblist_n_cbs(&rdp->cblist);
2502 div = READ_ONCE(rcu_divisor);
2503 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2504 bl = max(rdp->blimit, pending >> div);
2505 if (in_serving_softirq() && unlikely(bl > 100)) {
2506 long rrn = READ_ONCE(rcu_resched_ns);
2508 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2509 tlimit = local_clock() + rrn;
2511 trace_rcu_batch_start(rcu_state.name,
2512 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2513 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2514 if (rcu_rdp_is_offloaded(rdp))
2515 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2517 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2518 rcu_nocb_unlock_irqrestore(rdp, flags);
2520 /* Invoke callbacks. */
2521 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2522 rhp = rcu_cblist_dequeue(&rcl);
2524 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2528 debug_rcu_head_unqueue(rhp);
2530 rcu_lock_acquire(&rcu_callback_map);
2531 trace_rcu_invoke_callback(rcu_state.name, rhp);
2534 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2537 rcu_lock_release(&rcu_callback_map);
2540 * Stop only if limit reached and CPU has something to do.
2542 if (in_serving_softirq()) {
2543 if (count >= bl && (need_resched() || !is_idle_task(current)))
2546 * Make sure we don't spend too much time here and deprive other
2547 * softirq vectors of CPU cycles.
2549 if (unlikely(tlimit)) {
2550 /* only call local_clock() every 32 callbacks */
2551 if (likely((count & 31) || local_clock() < tlimit))
2553 /* Exceeded the time limit, so leave. */
2558 lockdep_assert_irqs_enabled();
2559 cond_resched_tasks_rcu_qs();
2560 lockdep_assert_irqs_enabled();
2565 rcu_nocb_lock_irqsave(rdp, flags);
2566 rdp->n_cbs_invoked += count;
2567 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2568 is_idle_task(current), rcu_is_callbacks_kthread());
2570 /* Update counts and requeue any remaining callbacks. */
2571 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2572 rcu_segcblist_add_len(&rdp->cblist, -count);
2574 /* Reinstate batch limit if we have worked down the excess. */
2575 count = rcu_segcblist_n_cbs(&rdp->cblist);
2576 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2577 rdp->blimit = blimit;
2579 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2580 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2581 rdp->qlen_last_fqs_check = 0;
2582 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2583 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2584 rdp->qlen_last_fqs_check = count;
2587 * The following usually indicates a double call_rcu(). To track
2588 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2590 empty = rcu_segcblist_empty(&rdp->cblist);
2591 WARN_ON_ONCE(count == 0 && !empty);
2592 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2593 count != 0 && empty);
2594 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2595 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2597 rcu_nocb_unlock_irqrestore(rdp, flags);
2599 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2603 * This function is invoked from each scheduling-clock interrupt,
2604 * and checks to see if this CPU is in a non-context-switch quiescent
2605 * state, for example, user mode or idle loop. It also schedules RCU
2606 * core processing. If the current grace period has gone on too long,
2607 * it will ask the scheduler to manufacture a context switch for the sole
2608 * purpose of providing the needed quiescent state.
2610 void rcu_sched_clock_irq(int user)
2612 trace_rcu_utilization(TPS("Start scheduler-tick"));
2613 lockdep_assert_irqs_disabled();
2614 raw_cpu_inc(rcu_data.ticks_this_gp);
2615 /* The load-acquire pairs with the store-release setting to true. */
2616 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2617 /* Idle and userspace execution already are quiescent states. */
2618 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2619 set_tsk_need_resched(current);
2620 set_preempt_need_resched();
2622 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2624 rcu_flavor_sched_clock_irq(user);
2625 if (rcu_pending(user))
2627 lockdep_assert_irqs_disabled();
2629 trace_rcu_utilization(TPS("End scheduler-tick"));
2633 * Scan the leaf rcu_node structures. For each structure on which all
2634 * CPUs have reported a quiescent state and on which there are tasks
2635 * blocking the current grace period, initiate RCU priority boosting.
2636 * Otherwise, invoke the specified function to check dyntick state for
2637 * each CPU that has not yet reported a quiescent state.
2639 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2642 unsigned long flags;
2644 struct rcu_data *rdp;
2645 struct rcu_node *rnp;
2647 rcu_state.cbovld = rcu_state.cbovldnext;
2648 rcu_state.cbovldnext = false;
2649 rcu_for_each_leaf_node(rnp) {
2650 cond_resched_tasks_rcu_qs();
2652 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2653 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2654 if (rnp->qsmask == 0) {
2655 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2657 * No point in scanning bits because they
2658 * are all zero. But we might need to
2659 * priority-boost blocked readers.
2661 rcu_initiate_boost(rnp, flags);
2662 /* rcu_initiate_boost() releases rnp->lock */
2665 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2668 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2669 rdp = per_cpu_ptr(&rcu_data, cpu);
2671 mask |= rdp->grpmask;
2672 rcu_disable_urgency_upon_qs(rdp);
2676 /* Idle/offline CPUs, report (releases rnp->lock). */
2677 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2679 /* Nothing to do here, so just drop the lock. */
2680 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2686 * Force quiescent states on reluctant CPUs, and also detect which
2687 * CPUs are in dyntick-idle mode.
2689 void rcu_force_quiescent_state(void)
2691 unsigned long flags;
2693 struct rcu_node *rnp;
2694 struct rcu_node *rnp_old = NULL;
2696 /* Funnel through hierarchy to reduce memory contention. */
2697 rnp = __this_cpu_read(rcu_data.mynode);
2698 for (; rnp != NULL; rnp = rnp->parent) {
2699 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2700 !raw_spin_trylock(&rnp->fqslock);
2701 if (rnp_old != NULL)
2702 raw_spin_unlock(&rnp_old->fqslock);
2707 /* rnp_old == rcu_get_root(), rnp == NULL. */
2709 /* Reached the root of the rcu_node tree, acquire lock. */
2710 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2711 raw_spin_unlock(&rnp_old->fqslock);
2712 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2713 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2714 return; /* Someone beat us to it. */
2716 WRITE_ONCE(rcu_state.gp_flags,
2717 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2718 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2719 rcu_gp_kthread_wake();
2721 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2723 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2725 static void strict_work_handler(struct work_struct *work)
2731 /* Perform RCU core processing work for the current CPU. */
2732 static __latent_entropy void rcu_core(void)
2734 unsigned long flags;
2735 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2736 struct rcu_node *rnp = rdp->mynode;
2738 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2739 * Therefore this function can race with concurrent NOCB (de-)offloading
2740 * on this CPU and the below condition must be considered volatile.
2741 * However if we race with:
2743 * _ Offloading: In the worst case we accelerate or process callbacks
2744 * concurrently with NOCB kthreads. We are guaranteed to
2745 * call rcu_nocb_lock() if that happens.
2747 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2748 * processing. This is fine because the early stage
2749 * of deoffloading invokes rcu_core() after setting
2750 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2751 * what could have been dismissed without the need to wait
2752 * for the next rcu_pending() check in the next jiffy.
2754 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2756 if (cpu_is_offline(smp_processor_id()))
2758 trace_rcu_utilization(TPS("Start RCU core"));
2759 WARN_ON_ONCE(!rdp->beenonline);
2761 /* Report any deferred quiescent states if preemption enabled. */
2762 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2763 rcu_preempt_deferred_qs(current);
2764 } else if (rcu_preempt_need_deferred_qs(current)) {
2765 set_tsk_need_resched(current);
2766 set_preempt_need_resched();
2769 /* Update RCU state based on any recent quiescent states. */
2770 rcu_check_quiescent_state(rdp);
2772 /* No grace period and unregistered callbacks? */
2773 if (!rcu_gp_in_progress() &&
2774 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2775 rcu_nocb_lock_irqsave(rdp, flags);
2776 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2777 rcu_accelerate_cbs_unlocked(rnp, rdp);
2778 rcu_nocb_unlock_irqrestore(rdp, flags);
2781 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2783 /* If there are callbacks ready, invoke them. */
2784 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2785 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2787 /* Re-invoke RCU core processing if there are callbacks remaining. */
2788 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2792 /* Do any needed deferred wakeups of rcuo kthreads. */
2793 do_nocb_deferred_wakeup(rdp);
2794 trace_rcu_utilization(TPS("End RCU core"));
2796 // If strict GPs, schedule an RCU reader in a clean environment.
2797 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2798 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2801 static void rcu_core_si(struct softirq_action *h)
2806 static void rcu_wake_cond(struct task_struct *t, int status)
2809 * If the thread is yielding, only wake it when this
2810 * is invoked from idle
2812 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2816 static void invoke_rcu_core_kthread(void)
2818 struct task_struct *t;
2819 unsigned long flags;
2821 local_irq_save(flags);
2822 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2823 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2824 if (t != NULL && t != current)
2825 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2826 local_irq_restore(flags);
2830 * Wake up this CPU's rcuc kthread to do RCU core processing.
2832 static void invoke_rcu_core(void)
2834 if (!cpu_online(smp_processor_id()))
2837 raise_softirq(RCU_SOFTIRQ);
2839 invoke_rcu_core_kthread();
2842 static void rcu_cpu_kthread_park(unsigned int cpu)
2844 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2847 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2849 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2853 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2854 * the RCU softirq used in configurations of RCU that do not support RCU
2855 * priority boosting.
2857 static void rcu_cpu_kthread(unsigned int cpu)
2859 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2860 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2861 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2864 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2865 for (spincnt = 0; spincnt < 10; spincnt++) {
2866 WRITE_ONCE(*j, jiffies);
2868 *statusp = RCU_KTHREAD_RUNNING;
2869 local_irq_disable();
2877 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2878 *statusp = RCU_KTHREAD_WAITING;
2882 *statusp = RCU_KTHREAD_YIELDING;
2883 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2884 schedule_timeout_idle(2);
2885 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2886 *statusp = RCU_KTHREAD_WAITING;
2887 WRITE_ONCE(*j, jiffies);
2890 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2891 .store = &rcu_data.rcu_cpu_kthread_task,
2892 .thread_should_run = rcu_cpu_kthread_should_run,
2893 .thread_fn = rcu_cpu_kthread,
2894 .thread_comm = "rcuc/%u",
2895 .setup = rcu_cpu_kthread_setup,
2896 .park = rcu_cpu_kthread_park,
2900 * Spawn per-CPU RCU core processing kthreads.
2902 static int __init rcu_spawn_core_kthreads(void)
2906 for_each_possible_cpu(cpu)
2907 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2910 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2911 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2916 * Handle any core-RCU processing required by a call_rcu() invocation.
2918 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2919 unsigned long flags)
2922 * If called from an extended quiescent state, invoke the RCU
2923 * core in order to force a re-evaluation of RCU's idleness.
2925 if (!rcu_is_watching())
2928 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2929 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2933 * Force the grace period if too many callbacks or too long waiting.
2934 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2935 * if some other CPU has recently done so. Also, don't bother
2936 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2937 * is the only one waiting for a grace period to complete.
2939 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2940 rdp->qlen_last_fqs_check + qhimark)) {
2942 /* Are we ignoring a completed grace period? */
2943 note_gp_changes(rdp);
2945 /* Start a new grace period if one not already started. */
2946 if (!rcu_gp_in_progress()) {
2947 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2949 /* Give the grace period a kick. */
2950 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2951 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2952 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2953 rcu_force_quiescent_state();
2954 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2955 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2961 * RCU callback function to leak a callback.
2963 static void rcu_leak_callback(struct rcu_head *rhp)
2968 * Check and if necessary update the leaf rcu_node structure's
2969 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2970 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2971 * structure's ->lock.
2973 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2975 raw_lockdep_assert_held_rcu_node(rnp);
2976 if (qovld_calc <= 0)
2977 return; // Early boot and wildcard value set.
2978 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2979 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2981 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2985 * Check and if necessary update the leaf rcu_node structure's
2986 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2987 * number of queued RCU callbacks. No locks need be held, but the
2988 * caller must have disabled interrupts.
2990 * Note that this function ignores the possibility that there are a lot
2991 * of callbacks all of which have already seen the end of their respective
2992 * grace periods. This omission is due to the need for no-CBs CPUs to
2993 * be holding ->nocb_lock to do this check, which is too heavy for a
2994 * common-case operation.
2996 static void check_cb_ovld(struct rcu_data *rdp)
2998 struct rcu_node *const rnp = rdp->mynode;
3000 if (qovld_calc <= 0 ||
3001 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3002 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3003 return; // Early boot wildcard value or already set correctly.
3004 raw_spin_lock_rcu_node(rnp);
3005 check_cb_ovld_locked(rdp, rnp);
3006 raw_spin_unlock_rcu_node(rnp);
3010 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3011 * @head: structure to be used for queueing the RCU updates.
3012 * @func: actual callback function to be invoked after the grace period
3014 * The callback function will be invoked some time after a full grace
3015 * period elapses, in other words after all pre-existing RCU read-side
3016 * critical sections have completed. However, the callback function
3017 * might well execute concurrently with RCU read-side critical sections
3018 * that started after call_rcu() was invoked.
3020 * RCU read-side critical sections are delimited by rcu_read_lock()
3021 * and rcu_read_unlock(), and may be nested. In addition, but only in
3022 * v5.0 and later, regions of code across which interrupts, preemption,
3023 * or softirqs have been disabled also serve as RCU read-side critical
3024 * sections. This includes hardware interrupt handlers, softirq handlers,
3027 * Note that all CPUs must agree that the grace period extended beyond
3028 * all pre-existing RCU read-side critical section. On systems with more
3029 * than one CPU, this means that when "func()" is invoked, each CPU is
3030 * guaranteed to have executed a full memory barrier since the end of its
3031 * last RCU read-side critical section whose beginning preceded the call
3032 * to call_rcu(). It also means that each CPU executing an RCU read-side
3033 * critical section that continues beyond the start of "func()" must have
3034 * executed a memory barrier after the call_rcu() but before the beginning
3035 * of that RCU read-side critical section. Note that these guarantees
3036 * include CPUs that are offline, idle, or executing in user mode, as
3037 * well as CPUs that are executing in the kernel.
3039 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3040 * resulting RCU callback function "func()", then both CPU A and CPU B are
3041 * guaranteed to execute a full memory barrier during the time interval
3042 * between the call to call_rcu() and the invocation of "func()" -- even
3043 * if CPU A and CPU B are the same CPU (but again only if the system has
3044 * more than one CPU).
3046 * Implementation of these memory-ordering guarantees is described here:
3047 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3049 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3051 static atomic_t doublefrees;
3052 unsigned long flags;
3053 struct rcu_data *rdp;
3056 /* Misaligned rcu_head! */
3057 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3059 if (debug_rcu_head_queue(head)) {
3061 * Probable double call_rcu(), so leak the callback.
3062 * Use rcu:rcu_callback trace event to find the previous
3063 * time callback was passed to call_rcu().
3065 if (atomic_inc_return(&doublefrees) < 4) {
3066 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
3069 WRITE_ONCE(head->func, rcu_leak_callback);
3074 kasan_record_aux_stack_noalloc(head);
3075 local_irq_save(flags);
3076 rdp = this_cpu_ptr(&rcu_data);
3078 /* Add the callback to our list. */
3079 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3080 // This can trigger due to call_rcu() from offline CPU:
3081 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3082 WARN_ON_ONCE(!rcu_is_watching());
3083 // Very early boot, before rcu_init(). Initialize if needed
3084 // and then drop through to queue the callback.
3085 if (rcu_segcblist_empty(&rdp->cblist))
3086 rcu_segcblist_init(&rdp->cblist);
3090 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3091 return; // Enqueued onto ->nocb_bypass, so just leave.
3092 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3093 rcu_segcblist_enqueue(&rdp->cblist, head);
3094 if (__is_kvfree_rcu_offset((unsigned long)func))
3095 trace_rcu_kvfree_callback(rcu_state.name, head,
3096 (unsigned long)func,
3097 rcu_segcblist_n_cbs(&rdp->cblist));
3099 trace_rcu_callback(rcu_state.name, head,
3100 rcu_segcblist_n_cbs(&rdp->cblist));
3102 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3104 /* Go handle any RCU core processing required. */
3105 if (unlikely(rcu_rdp_is_offloaded(rdp))) {
3106 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3108 __call_rcu_core(rdp, head, flags);
3109 local_irq_restore(flags);
3112 EXPORT_SYMBOL_GPL(call_rcu);
3115 /* Maximum number of jiffies to wait before draining a batch. */
3116 #define KFREE_DRAIN_JIFFIES (HZ / 50)
3117 #define KFREE_N_BATCHES 2
3118 #define FREE_N_CHANNELS 2
3121 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3122 * @nr_records: Number of active pointers in the array
3123 * @next: Next bulk object in the block chain
3124 * @records: Array of the kvfree_rcu() pointers
3126 struct kvfree_rcu_bulk_data {
3127 unsigned long nr_records;
3128 struct kvfree_rcu_bulk_data *next;
3133 * This macro defines how many entries the "records" array
3134 * will contain. It is based on the fact that the size of
3135 * kvfree_rcu_bulk_data structure becomes exactly one page.
3137 #define KVFREE_BULK_MAX_ENTR \
3138 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3141 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3142 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3143 * @head_free: List of kfree_rcu() objects waiting for a grace period
3144 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3145 * @krcp: Pointer to @kfree_rcu_cpu structure
3148 struct kfree_rcu_cpu_work {
3149 struct rcu_work rcu_work;
3150 struct rcu_head *head_free;
3151 struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3152 struct kfree_rcu_cpu *krcp;
3156 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3157 * @head: List of kfree_rcu() objects not yet waiting for a grace period
3158 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3159 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3160 * @lock: Synchronize access to this structure
3161 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3162 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3163 * @initialized: The @rcu_work fields have been initialized
3164 * @count: Number of objects for which GP not started
3166 * A simple cache list that contains objects for reuse purpose.
3167 * In order to save some per-cpu space the list is singular.
3168 * Even though it is lockless an access has to be protected by the
3170 * @page_cache_work: A work to refill the cache when it is empty
3171 * @backoff_page_cache_fill: Delay cache refills
3172 * @work_in_progress: Indicates that page_cache_work is running
3173 * @hrtimer: A hrtimer for scheduling a page_cache_work
3174 * @nr_bkv_objs: number of allocated objects at @bkvcache.
3176 * This is a per-CPU structure. The reason that it is not included in
3177 * the rcu_data structure is to permit this code to be extracted from
3178 * the RCU files. Such extraction could allow further optimization of
3179 * the interactions with the slab allocators.
3181 struct kfree_rcu_cpu {
3182 struct rcu_head *head;
3183 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3184 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3185 raw_spinlock_t lock;
3186 struct delayed_work monitor_work;
3191 struct delayed_work page_cache_work;
3192 atomic_t backoff_page_cache_fill;
3193 atomic_t work_in_progress;
3194 struct hrtimer hrtimer;
3196 struct llist_head bkvcache;
3200 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3201 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3204 static __always_inline void
3205 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3207 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3210 for (i = 0; i < bhead->nr_records; i++)
3211 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3215 static inline struct kfree_rcu_cpu *
3216 krc_this_cpu_lock(unsigned long *flags)
3218 struct kfree_rcu_cpu *krcp;
3220 local_irq_save(*flags); // For safely calling this_cpu_ptr().
3221 krcp = this_cpu_ptr(&krc);
3222 raw_spin_lock(&krcp->lock);
3228 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3230 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3233 static inline struct kvfree_rcu_bulk_data *
3234 get_cached_bnode(struct kfree_rcu_cpu *krcp)
3236 if (!krcp->nr_bkv_objs)
3239 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
3240 return (struct kvfree_rcu_bulk_data *)
3241 llist_del_first(&krcp->bkvcache);
3245 put_cached_bnode(struct kfree_rcu_cpu *krcp,
3246 struct kvfree_rcu_bulk_data *bnode)
3249 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3252 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3253 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3258 drain_page_cache(struct kfree_rcu_cpu *krcp)
3260 unsigned long flags;
3261 struct llist_node *page_list, *pos, *n;
3264 raw_spin_lock_irqsave(&krcp->lock, flags);
3265 page_list = llist_del_all(&krcp->bkvcache);
3266 WRITE_ONCE(krcp->nr_bkv_objs, 0);
3267 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3269 llist_for_each_safe(pos, n, page_list) {
3270 free_page((unsigned long)pos);
3278 * This function is invoked in workqueue context after a grace period.
3279 * It frees all the objects queued on ->bkvhead_free or ->head_free.
3281 static void kfree_rcu_work(struct work_struct *work)
3283 unsigned long flags;
3284 struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3285 struct rcu_head *head, *next;
3286 struct kfree_rcu_cpu *krcp;
3287 struct kfree_rcu_cpu_work *krwp;
3290 krwp = container_of(to_rcu_work(work),
3291 struct kfree_rcu_cpu_work, rcu_work);
3294 raw_spin_lock_irqsave(&krcp->lock, flags);
3295 // Channels 1 and 2.
3296 for (i = 0; i < FREE_N_CHANNELS; i++) {
3297 bkvhead[i] = krwp->bkvhead_free[i];
3298 krwp->bkvhead_free[i] = NULL;
3302 head = krwp->head_free;
3303 krwp->head_free = NULL;
3304 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3306 // Handle the first two channels.
3307 for (i = 0; i < FREE_N_CHANNELS; i++) {
3308 for (; bkvhead[i]; bkvhead[i] = bnext) {
3309 bnext = bkvhead[i]->next;
3310 debug_rcu_bhead_unqueue(bkvhead[i]);
3312 rcu_lock_acquire(&rcu_callback_map);
3313 if (i == 0) { // kmalloc() / kfree().
3314 trace_rcu_invoke_kfree_bulk_callback(
3315 rcu_state.name, bkvhead[i]->nr_records,
3316 bkvhead[i]->records);
3318 kfree_bulk(bkvhead[i]->nr_records,
3319 bkvhead[i]->records);
3320 } else { // vmalloc() / vfree().
3321 for (j = 0; j < bkvhead[i]->nr_records; j++) {
3322 trace_rcu_invoke_kvfree_callback(
3324 bkvhead[i]->records[j], 0);
3326 vfree(bkvhead[i]->records[j]);
3329 rcu_lock_release(&rcu_callback_map);
3331 raw_spin_lock_irqsave(&krcp->lock, flags);
3332 if (put_cached_bnode(krcp, bkvhead[i]))
3334 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3337 free_page((unsigned long) bkvhead[i]);
3339 cond_resched_tasks_rcu_qs();
3344 * This is used when the "bulk" path can not be used for the
3345 * double-argument of kvfree_rcu(). This happens when the
3346 * page-cache is empty, which means that objects are instead
3347 * queued on a linked list through their rcu_head structures.
3348 * This list is named "Channel 3".
3350 for (; head; head = next) {
3351 unsigned long offset = (unsigned long)head->func;
3352 void *ptr = (void *)head - offset;
3355 debug_rcu_head_unqueue((struct rcu_head *)ptr);
3356 rcu_lock_acquire(&rcu_callback_map);
3357 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3359 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3362 rcu_lock_release(&rcu_callback_map);
3363 cond_resched_tasks_rcu_qs();
3368 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3370 static void kfree_rcu_monitor(struct work_struct *work)
3372 struct kfree_rcu_cpu *krcp = container_of(work,
3373 struct kfree_rcu_cpu, monitor_work.work);
3374 unsigned long flags;
3377 raw_spin_lock_irqsave(&krcp->lock, flags);
3379 // Attempt to start a new batch.
3380 for (i = 0; i < KFREE_N_BATCHES; i++) {
3381 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3383 // Try to detach bkvhead or head and attach it over any
3384 // available corresponding free channel. It can be that
3385 // a previous RCU batch is in progress, it means that
3386 // immediately to queue another one is not possible so
3387 // in that case the monitor work is rearmed.
3388 if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3389 (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3390 (krcp->head && !krwp->head_free)) {
3391 // Channel 1 corresponds to the SLAB-pointer bulk path.
3392 // Channel 2 corresponds to vmalloc-pointer bulk path.
3393 for (j = 0; j < FREE_N_CHANNELS; j++) {
3394 if (!krwp->bkvhead_free[j]) {
3395 krwp->bkvhead_free[j] = krcp->bkvhead[j];
3396 krcp->bkvhead[j] = NULL;
3400 // Channel 3 corresponds to both SLAB and vmalloc
3401 // objects queued on the linked list.
3402 if (!krwp->head_free) {
3403 krwp->head_free = krcp->head;
3407 WRITE_ONCE(krcp->count, 0);
3409 // One work is per one batch, so there are three
3410 // "free channels", the batch can handle. It can
3411 // be that the work is in the pending state when
3412 // channels have been detached following by each
3414 queue_rcu_work(system_wq, &krwp->rcu_work);
3418 // If there is nothing to detach, it means that our job is
3419 // successfully done here. In case of having at least one
3420 // of the channels that is still busy we should rearm the
3421 // work to repeat an attempt. Because previous batches are
3422 // still in progress.
3423 if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
3424 krcp->monitor_todo = false;
3426 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3428 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3431 static enum hrtimer_restart
3432 schedule_page_work_fn(struct hrtimer *t)
3434 struct kfree_rcu_cpu *krcp =
3435 container_of(t, struct kfree_rcu_cpu, hrtimer);
3437 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3438 return HRTIMER_NORESTART;
3441 static void fill_page_cache_func(struct work_struct *work)
3443 struct kvfree_rcu_bulk_data *bnode;
3444 struct kfree_rcu_cpu *krcp =
3445 container_of(work, struct kfree_rcu_cpu,
3446 page_cache_work.work);
3447 unsigned long flags;
3452 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3453 1 : rcu_min_cached_objs;
3455 for (i = 0; i < nr_pages; i++) {
3456 bnode = (struct kvfree_rcu_bulk_data *)
3457 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3460 raw_spin_lock_irqsave(&krcp->lock, flags);
3461 pushed = put_cached_bnode(krcp, bnode);
3462 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3465 free_page((unsigned long) bnode);
3471 atomic_set(&krcp->work_in_progress, 0);
3472 atomic_set(&krcp->backoff_page_cache_fill, 0);
3476 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3478 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3479 !atomic_xchg(&krcp->work_in_progress, 1)) {
3480 if (atomic_read(&krcp->backoff_page_cache_fill)) {
3481 queue_delayed_work(system_wq,
3482 &krcp->page_cache_work,
3483 msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3485 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3486 krcp->hrtimer.function = schedule_page_work_fn;
3487 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3492 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3493 // state specified by flags. If can_alloc is true, the caller must
3494 // be schedulable and not be holding any locks or mutexes that might be
3495 // acquired by the memory allocator or anything that it might invoke.
3496 // Returns true if ptr was successfully recorded, else the caller must
3499 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3500 unsigned long *flags, void *ptr, bool can_alloc)
3502 struct kvfree_rcu_bulk_data *bnode;
3505 *krcp = krc_this_cpu_lock(flags);
3506 if (unlikely(!(*krcp)->initialized))
3509 idx = !!is_vmalloc_addr(ptr);
3511 /* Check if a new block is required. */
3512 if (!(*krcp)->bkvhead[idx] ||
3513 (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3514 bnode = get_cached_bnode(*krcp);
3515 if (!bnode && can_alloc) {
3516 krc_this_cpu_unlock(*krcp, *flags);
3518 // __GFP_NORETRY - allows a light-weight direct reclaim
3519 // what is OK from minimizing of fallback hitting point of
3520 // view. Apart of that it forbids any OOM invoking what is
3521 // also beneficial since we are about to release memory soon.
3523 // __GFP_NOMEMALLOC - prevents from consuming of all the
3524 // memory reserves. Please note we have a fallback path.
3526 // __GFP_NOWARN - it is supposed that an allocation can
3527 // be failed under low memory or high memory pressure
3529 bnode = (struct kvfree_rcu_bulk_data *)
3530 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3531 *krcp = krc_this_cpu_lock(flags);
3537 /* Initialize the new block. */
3538 bnode->nr_records = 0;
3539 bnode->next = (*krcp)->bkvhead[idx];
3541 /* Attach it to the head. */
3542 (*krcp)->bkvhead[idx] = bnode;
3545 /* Finally insert. */
3546 (*krcp)->bkvhead[idx]->records
3547 [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3553 * Queue a request for lazy invocation of the appropriate free routine
3554 * after a grace period. Please note that three paths are maintained,
3555 * two for the common case using arrays of pointers and a third one that
3556 * is used only when the main paths cannot be used, for example, due to
3559 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3560 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3561 * be free'd in workqueue context. This allows us to: batch requests together to
3562 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3564 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3566 unsigned long flags;
3567 struct kfree_rcu_cpu *krcp;
3572 ptr = (void *) head - (unsigned long) func;
3575 * Please note there is a limitation for the head-less
3576 * variant, that is why there is a clear rule for such
3577 * objects: it can be used from might_sleep() context
3578 * only. For other places please embed an rcu_head to
3582 ptr = (unsigned long *) func;
3585 // Queue the object but don't yet schedule the batch.
3586 if (debug_rcu_head_queue(ptr)) {
3587 // Probable double kfree_rcu(), just leak.
3588 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3591 // Mark as success and leave.
3595 kasan_record_aux_stack_noalloc(ptr);
3596 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3598 run_page_cache_worker(krcp);
3601 // Inline if kvfree_rcu(one_arg) call.
3605 head->next = krcp->head;
3610 WRITE_ONCE(krcp->count, krcp->count + 1);
3612 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3613 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3614 !krcp->monitor_todo) {
3615 krcp->monitor_todo = true;
3616 schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3620 krc_this_cpu_unlock(krcp, flags);
3623 * Inline kvfree() after synchronize_rcu(). We can do
3624 * it from might_sleep() context only, so the current
3625 * CPU can pass the QS state.
3628 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3633 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3635 static unsigned long
3636 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3639 unsigned long count = 0;
3641 /* Snapshot count of all CPUs */
3642 for_each_possible_cpu(cpu) {
3643 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3645 count += READ_ONCE(krcp->count);
3646 count += READ_ONCE(krcp->nr_bkv_objs);
3647 atomic_set(&krcp->backoff_page_cache_fill, 1);
3653 static unsigned long
3654 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3658 for_each_possible_cpu(cpu) {
3660 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3662 count = krcp->count;
3663 count += drain_page_cache(krcp);
3664 kfree_rcu_monitor(&krcp->monitor_work.work);
3666 sc->nr_to_scan -= count;
3669 if (sc->nr_to_scan <= 0)
3673 return freed == 0 ? SHRINK_STOP : freed;
3676 static struct shrinker kfree_rcu_shrinker = {
3677 .count_objects = kfree_rcu_shrink_count,
3678 .scan_objects = kfree_rcu_shrink_scan,
3680 .seeks = DEFAULT_SEEKS,
3683 void __init kfree_rcu_scheduler_running(void)
3686 unsigned long flags;
3688 for_each_possible_cpu(cpu) {
3689 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3691 raw_spin_lock_irqsave(&krcp->lock, flags);
3692 if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
3693 krcp->monitor_todo) {
3694 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3697 krcp->monitor_todo = true;
3698 schedule_delayed_work_on(cpu, &krcp->monitor_work,
3699 KFREE_DRAIN_JIFFIES);
3700 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3705 * During early boot, any blocking grace-period wait automatically
3706 * implies a grace period. Later on, this is never the case for PREEMPTION.
3708 * However, because a context switch is a grace period for !PREEMPTION, any
3709 * blocking grace-period wait automatically implies a grace period if
3710 * there is only one CPU online at any point time during execution of
3711 * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
3712 * occasionally incorrectly indicate that there are multiple CPUs online
3713 * when there was in fact only one the whole time, as this just adds some
3714 * overhead: RCU still operates correctly.
3716 static int rcu_blocking_is_gp(void)
3720 if (IS_ENABLED(CONFIG_PREEMPTION))
3721 return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3722 might_sleep(); /* Check for RCU read-side critical section. */
3725 * If the rcu_state.n_online_cpus counter is equal to one,
3726 * there is only one CPU, and that CPU sees all prior accesses
3727 * made by any CPU that was online at the time of its access.
3728 * Furthermore, if this counter is equal to one, its value cannot
3729 * change until after the preempt_enable() below.
3731 * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3732 * all later CPUs (both this one and any that come online later
3733 * on) are guaranteed to see all accesses prior to this point
3734 * in the code, without the need for additional memory barriers.
3735 * Those memory barriers are provided by CPU-hotplug code.
3737 ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3743 * synchronize_rcu - wait until a grace period has elapsed.
3745 * Control will return to the caller some time after a full grace
3746 * period has elapsed, in other words after all currently executing RCU
3747 * read-side critical sections have completed. Note, however, that
3748 * upon return from synchronize_rcu(), the caller might well be executing
3749 * concurrently with new RCU read-side critical sections that began while
3750 * synchronize_rcu() was waiting.
3752 * RCU read-side critical sections are delimited by rcu_read_lock()
3753 * and rcu_read_unlock(), and may be nested. In addition, but only in
3754 * v5.0 and later, regions of code across which interrupts, preemption,
3755 * or softirqs have been disabled also serve as RCU read-side critical
3756 * sections. This includes hardware interrupt handlers, softirq handlers,
3759 * Note that this guarantee implies further memory-ordering guarantees.
3760 * On systems with more than one CPU, when synchronize_rcu() returns,
3761 * each CPU is guaranteed to have executed a full memory barrier since
3762 * the end of its last RCU read-side critical section whose beginning
3763 * preceded the call to synchronize_rcu(). In addition, each CPU having
3764 * an RCU read-side critical section that extends beyond the return from
3765 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3766 * after the beginning of synchronize_rcu() and before the beginning of
3767 * that RCU read-side critical section. Note that these guarantees include
3768 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3769 * that are executing in the kernel.
3771 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3772 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3773 * to have executed a full memory barrier during the execution of
3774 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3775 * again only if the system has more than one CPU).
3777 * Implementation of these memory-ordering guarantees is described here:
3778 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3780 void synchronize_rcu(void)
3782 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3783 lock_is_held(&rcu_lock_map) ||
3784 lock_is_held(&rcu_sched_lock_map),
3785 "Illegal synchronize_rcu() in RCU read-side critical section");
3786 if (rcu_blocking_is_gp())
3787 return; // Context allows vacuous grace periods.
3788 if (rcu_gp_is_expedited())
3789 synchronize_rcu_expedited();
3791 wait_rcu_gp(call_rcu);
3793 EXPORT_SYMBOL_GPL(synchronize_rcu);
3796 * get_state_synchronize_rcu - Snapshot current RCU state
3798 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3799 * or poll_state_synchronize_rcu() to determine whether or not a full
3800 * grace period has elapsed in the meantime.
3802 unsigned long get_state_synchronize_rcu(void)
3805 * Any prior manipulation of RCU-protected data must happen
3806 * before the load from ->gp_seq.
3809 return rcu_seq_snap(&rcu_state.gp_seq);
3811 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3814 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3816 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3817 * or poll_state_synchronize_rcu() to determine whether or not a full
3818 * grace period has elapsed in the meantime. If the needed grace period
3819 * is not already slated to start, notifies RCU core of the need for that
3822 * Interrupts must be enabled for the case where it is necessary to awaken
3823 * the grace-period kthread.
3825 unsigned long start_poll_synchronize_rcu(void)
3827 unsigned long flags;
3828 unsigned long gp_seq = get_state_synchronize_rcu();
3830 struct rcu_data *rdp;
3831 struct rcu_node *rnp;
3833 lockdep_assert_irqs_enabled();
3834 local_irq_save(flags);
3835 rdp = this_cpu_ptr(&rcu_data);
3837 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3838 needwake = rcu_start_this_gp(rnp, rdp, gp_seq);
3839 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3841 rcu_gp_kthread_wake();
3844 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3847 * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3849 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3851 * If a full RCU grace period has elapsed since the earlier call from
3852 * which oldstate was obtained, return @true, otherwise return @false.
3853 * If @false is returned, it is the caller's responsibility to invoke this
3854 * function later on until it does return @true. Alternatively, the caller
3855 * can explicitly wait for a grace period, for example, by passing @oldstate
3856 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3858 * Yes, this function does not take counter wrap into account.
3859 * But counter wrap is harmless. If the counter wraps, we have waited for
3860 * more than 2 billion grace periods (and way more on a 64-bit system!).
3861 * Those needing to keep oldstate values for very long time periods
3862 * (many hours even on 32-bit systems) should check them occasionally
3863 * and either refresh them or set a flag indicating that the grace period
3866 * This function provides the same memory-ordering guarantees that
3867 * would be provided by a synchronize_rcu() that was invoked at the call
3868 * to the function that provided @oldstate, and that returned at the end
3871 bool poll_state_synchronize_rcu(unsigned long oldstate)
3873 if (rcu_seq_done(&rcu_state.gp_seq, oldstate)) {
3874 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3879 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3882 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3884 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3886 * If a full RCU grace period has elapsed since the earlier call to
3887 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3888 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3890 * Yes, this function does not take counter wrap into account. But
3891 * counter wrap is harmless. If the counter wraps, we have waited for
3892 * more than 2 billion grace periods (and way more on a 64-bit system!),
3893 * so waiting for one additional grace period should be just fine.
3895 * This function provides the same memory-ordering guarantees that
3896 * would be provided by a synchronize_rcu() that was invoked at the call
3897 * to the function that provided @oldstate, and that returned at the end
3900 void cond_synchronize_rcu(unsigned long oldstate)
3902 if (!poll_state_synchronize_rcu(oldstate))
3905 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3908 * Check to see if there is any immediate RCU-related work to be done by
3909 * the current CPU, returning 1 if so and zero otherwise. The checks are
3910 * in order of increasing expense: checks that can be carried out against
3911 * CPU-local state are performed first. However, we must check for CPU
3912 * stalls first, else we might not get a chance.
3914 static int rcu_pending(int user)
3916 bool gp_in_progress;
3917 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3918 struct rcu_node *rnp = rdp->mynode;
3920 lockdep_assert_irqs_disabled();
3922 /* Check for CPU stalls, if enabled. */
3923 check_cpu_stall(rdp);
3925 /* Does this CPU need a deferred NOCB wakeup? */
3926 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3929 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3930 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3933 /* Is the RCU core waiting for a quiescent state from this CPU? */
3934 gp_in_progress = rcu_gp_in_progress();
3935 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3938 /* Does this CPU have callbacks ready to invoke? */
3939 if (!rcu_rdp_is_offloaded(rdp) &&
3940 rcu_segcblist_ready_cbs(&rdp->cblist))
3943 /* Has RCU gone idle with this CPU needing another grace period? */
3944 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3945 !rcu_rdp_is_offloaded(rdp) &&
3946 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3949 /* Have RCU grace period completed or started? */
3950 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3951 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3959 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3960 * the compiler is expected to optimize this away.
3962 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3964 trace_rcu_barrier(rcu_state.name, s, cpu,
3965 atomic_read(&rcu_state.barrier_cpu_count), done);
3969 * RCU callback function for rcu_barrier(). If we are last, wake
3970 * up the task executing rcu_barrier().
3972 * Note that the value of rcu_state.barrier_sequence must be captured
3973 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3974 * other CPUs might count the value down to zero before this CPU gets
3975 * around to invoking rcu_barrier_trace(), which might result in bogus
3976 * data from the next instance of rcu_barrier().
3978 static void rcu_barrier_callback(struct rcu_head *rhp)
3980 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3982 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3983 rcu_barrier_trace(TPS("LastCB"), -1, s);
3984 complete(&rcu_state.barrier_completion);
3986 rcu_barrier_trace(TPS("CB"), -1, s);
3991 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3993 static void rcu_barrier_entrain(struct rcu_data *rdp)
3995 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3996 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3998 lockdep_assert_held(&rcu_state.barrier_lock);
3999 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
4001 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
4002 rdp->barrier_head.func = rcu_barrier_callback;
4003 debug_rcu_head_queue(&rdp->barrier_head);
4005 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
4006 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4007 atomic_inc(&rcu_state.barrier_cpu_count);
4009 debug_rcu_head_unqueue(&rdp->barrier_head);
4010 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
4012 rcu_nocb_unlock(rdp);
4013 smp_store_release(&rdp->barrier_seq_snap, gseq);
4017 * Called with preemption disabled, and from cross-cpu IRQ context.
4019 static void rcu_barrier_handler(void *cpu_in)
4021 uintptr_t cpu = (uintptr_t)cpu_in;
4022 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4024 lockdep_assert_irqs_disabled();
4025 WARN_ON_ONCE(cpu != rdp->cpu);
4026 WARN_ON_ONCE(cpu != smp_processor_id());
4027 raw_spin_lock(&rcu_state.barrier_lock);
4028 rcu_barrier_entrain(rdp);
4029 raw_spin_unlock(&rcu_state.barrier_lock);
4033 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4035 * Note that this primitive does not necessarily wait for an RCU grace period
4036 * to complete. For example, if there are no RCU callbacks queued anywhere
4037 * in the system, then rcu_barrier() is within its rights to return
4038 * immediately, without waiting for anything, much less an RCU grace period.
4040 void rcu_barrier(void)
4043 unsigned long flags;
4045 struct rcu_data *rdp;
4046 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4048 rcu_barrier_trace(TPS("Begin"), -1, s);
4050 /* Take mutex to serialize concurrent rcu_barrier() requests. */
4051 mutex_lock(&rcu_state.barrier_mutex);
4053 /* Did someone else do our work for us? */
4054 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4055 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4056 smp_mb(); /* caller's subsequent code after above check. */
4057 mutex_unlock(&rcu_state.barrier_mutex);
4061 /* Mark the start of the barrier operation. */
4062 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4063 rcu_seq_start(&rcu_state.barrier_sequence);
4064 gseq = rcu_state.barrier_sequence;
4065 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4068 * Initialize the count to two rather than to zero in order
4069 * to avoid a too-soon return to zero in case of an immediate
4070 * invocation of the just-enqueued callback (or preemption of
4071 * this task). Exclude CPU-hotplug operations to ensure that no
4072 * offline non-offloaded CPU has callbacks queued.
4074 init_completion(&rcu_state.barrier_completion);
4075 atomic_set(&rcu_state.barrier_cpu_count, 2);
4076 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4079 * Force each CPU with callbacks to register a new callback.
4080 * When that callback is invoked, we will know that all of the
4081 * corresponding CPU's preceding callbacks have been invoked.
4083 for_each_possible_cpu(cpu) {
4084 rdp = per_cpu_ptr(&rcu_data, cpu);
4086 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4088 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4089 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4090 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4091 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4092 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4095 if (!rcu_rdp_cpu_online(rdp)) {
4096 rcu_barrier_entrain(rdp);
4097 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4098 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4099 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4102 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4103 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4104 schedule_timeout_uninterruptible(1);
4107 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4108 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4112 * Now that we have an rcu_barrier_callback() callback on each
4113 * CPU, and thus each counted, remove the initial count.
4115 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4116 complete(&rcu_state.barrier_completion);
4118 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4119 wait_for_completion(&rcu_state.barrier_completion);
4121 /* Mark the end of the barrier operation. */
4122 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4123 rcu_seq_end(&rcu_state.barrier_sequence);
4124 gseq = rcu_state.barrier_sequence;
4125 for_each_possible_cpu(cpu) {
4126 rdp = per_cpu_ptr(&rcu_data, cpu);
4128 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4131 /* Other rcu_barrier() invocations can now safely proceed. */
4132 mutex_unlock(&rcu_state.barrier_mutex);
4134 EXPORT_SYMBOL_GPL(rcu_barrier);
4137 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4138 * first CPU in a given leaf rcu_node structure coming online. The caller
4139 * must hold the corresponding leaf rcu_node ->lock with interrupts
4142 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4146 struct rcu_node *rnp = rnp_leaf;
4148 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4149 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4151 mask = rnp->grpmask;
4155 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4156 oldmask = rnp->qsmaskinit;
4157 rnp->qsmaskinit |= mask;
4158 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4165 * Do boot-time initialization of a CPU's per-CPU RCU data.
4168 rcu_boot_init_percpu_data(int cpu)
4170 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4172 /* Set up local state, ensuring consistent view of global state. */
4173 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4174 INIT_WORK(&rdp->strict_work, strict_work_handler);
4175 WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4176 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4177 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4178 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4179 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4180 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4181 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4183 rcu_boot_init_nocb_percpu_data(rdp);
4187 * Invoked early in the CPU-online process, when pretty much all services
4188 * are available. The incoming CPU is not present.
4190 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4191 * offline event can be happening at a given time. Note also that we can
4192 * accept some slop in the rsp->gp_seq access due to the fact that this
4193 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4194 * And any offloaded callbacks are being numbered elsewhere.
4196 int rcutree_prepare_cpu(unsigned int cpu)
4198 unsigned long flags;
4199 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4200 struct rcu_node *rnp = rcu_get_root();
4202 /* Set up local state, ensuring consistent view of global state. */
4203 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4204 rdp->qlen_last_fqs_check = 0;
4205 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4206 rdp->blimit = blimit;
4207 rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
4208 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4211 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4214 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4215 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4218 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4219 * propagation up the rcu_node tree will happen at the beginning
4220 * of the next grace period.
4223 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4224 rdp->beenonline = true; /* We have now been online. */
4225 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4226 rdp->gp_seq_needed = rdp->gp_seq;
4227 rdp->cpu_no_qs.b.norm = true;
4228 rdp->core_needs_qs = false;
4229 rdp->rcu_iw_pending = false;
4230 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4231 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4232 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4233 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4234 rcu_spawn_one_boost_kthread(rnp);
4235 rcu_spawn_cpu_nocb_kthread(cpu);
4236 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4242 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4244 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4246 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4248 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4252 * Near the end of the CPU-online process. Pretty much all services
4253 * enabled, and the CPU is now very much alive.
4255 int rcutree_online_cpu(unsigned int cpu)
4257 unsigned long flags;
4258 struct rcu_data *rdp;
4259 struct rcu_node *rnp;
4261 rdp = per_cpu_ptr(&rcu_data, cpu);
4263 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4264 rnp->ffmask |= rdp->grpmask;
4265 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4266 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4267 return 0; /* Too early in boot for scheduler work. */
4268 sync_sched_exp_online_cleanup(cpu);
4269 rcutree_affinity_setting(cpu, -1);
4271 // Stop-machine done, so allow nohz_full to disable tick.
4272 tick_dep_clear(TICK_DEP_BIT_RCU);
4277 * Near the beginning of the process. The CPU is still very much alive
4278 * with pretty much all services enabled.
4280 int rcutree_offline_cpu(unsigned int cpu)
4282 unsigned long flags;
4283 struct rcu_data *rdp;
4284 struct rcu_node *rnp;
4286 rdp = per_cpu_ptr(&rcu_data, cpu);
4288 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4289 rnp->ffmask &= ~rdp->grpmask;
4290 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4292 rcutree_affinity_setting(cpu, cpu);
4294 // nohz_full CPUs need the tick for stop-machine to work quickly
4295 tick_dep_set(TICK_DEP_BIT_RCU);
4300 * Mark the specified CPU as being online so that subsequent grace periods
4301 * (both expedited and normal) will wait on it. Note that this means that
4302 * incoming CPUs are not allowed to use RCU read-side critical sections
4303 * until this function is called. Failing to observe this restriction
4304 * will result in lockdep splats.
4306 * Note that this function is special in that it is invoked directly
4307 * from the incoming CPU rather than from the cpuhp_step mechanism.
4308 * This is because this function must be invoked at a precise location.
4310 void rcu_cpu_starting(unsigned int cpu)
4312 unsigned long flags;
4314 struct rcu_data *rdp;
4315 struct rcu_node *rnp;
4318 rdp = per_cpu_ptr(&rcu_data, cpu);
4319 if (rdp->cpu_started)
4321 rdp->cpu_started = true;
4324 mask = rdp->grpmask;
4325 local_irq_save(flags);
4326 arch_spin_lock(&rcu_state.ofl_lock);
4327 rcu_dynticks_eqs_online();
4328 raw_spin_lock(&rcu_state.barrier_lock);
4329 raw_spin_lock_rcu_node(rnp);
4330 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4331 raw_spin_unlock(&rcu_state.barrier_lock);
4332 newcpu = !(rnp->expmaskinitnext & mask);
4333 rnp->expmaskinitnext |= mask;
4334 /* Allow lockless access for expedited grace periods. */
4335 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4336 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4337 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4338 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4339 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4341 /* An incoming CPU should never be blocking a grace period. */
4342 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4343 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4344 unsigned long flags2;
4346 local_irq_save(flags2);
4347 rcu_disable_urgency_upon_qs(rdp);
4348 /* Report QS -after- changing ->qsmaskinitnext! */
4349 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags2);
4351 raw_spin_unlock_rcu_node(rnp);
4353 arch_spin_unlock(&rcu_state.ofl_lock);
4354 local_irq_restore(flags);
4355 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4359 * The outgoing function has no further need of RCU, so remove it from
4360 * the rcu_node tree's ->qsmaskinitnext bit masks.
4362 * Note that this function is special in that it is invoked directly
4363 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4364 * This is because this function must be invoked at a precise location.
4366 void rcu_report_dead(unsigned int cpu)
4368 unsigned long flags, seq_flags;
4370 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4371 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4373 // Do any dangling deferred wakeups.
4374 do_nocb_deferred_wakeup(rdp);
4376 /* QS for any half-done expedited grace period. */
4377 rcu_report_exp_rdp(rdp);
4378 rcu_preempt_deferred_qs(current);
4380 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4381 mask = rdp->grpmask;
4382 local_irq_save(seq_flags);
4383 arch_spin_lock(&rcu_state.ofl_lock);
4384 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4385 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4386 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4387 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4388 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4389 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4390 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4392 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4393 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4394 arch_spin_unlock(&rcu_state.ofl_lock);
4395 local_irq_restore(seq_flags);
4397 rdp->cpu_started = false;
4400 #ifdef CONFIG_HOTPLUG_CPU
4402 * The outgoing CPU has just passed through the dying-idle state, and we
4403 * are being invoked from the CPU that was IPIed to continue the offline
4404 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4406 void rcutree_migrate_callbacks(int cpu)
4408 unsigned long flags;
4409 struct rcu_data *my_rdp;
4410 struct rcu_node *my_rnp;
4411 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4414 if (rcu_rdp_is_offloaded(rdp) ||
4415 rcu_segcblist_empty(&rdp->cblist))
4416 return; /* No callbacks to migrate. */
4418 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4419 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4420 rcu_barrier_entrain(rdp);
4421 my_rdp = this_cpu_ptr(&rcu_data);
4422 my_rnp = my_rdp->mynode;
4423 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4424 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4425 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4426 /* Leverage recent GPs and set GP for new callbacks. */
4427 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4428 rcu_advance_cbs(my_rnp, my_rdp);
4429 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4430 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4431 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4432 rcu_segcblist_disable(&rdp->cblist);
4433 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4434 if (rcu_rdp_is_offloaded(my_rdp)) {
4435 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4436 __call_rcu_nocb_wake(my_rdp, true, flags);
4438 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4439 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4442 rcu_gp_kthread_wake();
4443 lockdep_assert_irqs_enabled();
4444 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4445 !rcu_segcblist_empty(&rdp->cblist),
4446 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4447 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4448 rcu_segcblist_first_cb(&rdp->cblist));
4453 * On non-huge systems, use expedited RCU grace periods to make suspend
4454 * and hibernation run faster.
4456 static int rcu_pm_notify(struct notifier_block *self,
4457 unsigned long action, void *hcpu)
4460 case PM_HIBERNATION_PREPARE:
4461 case PM_SUSPEND_PREPARE:
4464 case PM_POST_HIBERNATION:
4465 case PM_POST_SUSPEND:
4466 rcu_unexpedite_gp();
4475 * Spawn the kthreads that handle RCU's grace periods.
4477 static int __init rcu_spawn_gp_kthread(void)
4479 unsigned long flags;
4480 struct rcu_node *rnp;
4481 struct sched_param sp;
4482 struct task_struct *t;
4484 rcu_scheduler_fully_active = 1;
4485 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4486 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4489 sp.sched_priority = kthread_prio;
4490 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4492 rnp = rcu_get_root();
4493 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4494 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4495 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4496 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4497 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4498 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4500 rcu_spawn_nocb_kthreads();
4501 rcu_spawn_boost_kthreads();
4502 rcu_spawn_core_kthreads();
4505 early_initcall(rcu_spawn_gp_kthread);
4508 * This function is invoked towards the end of the scheduler's
4509 * initialization process. Before this is called, the idle task might
4510 * contain synchronous grace-period primitives (during which time, this idle
4511 * task is booting the system, and such primitives are no-ops). After this
4512 * function is called, any synchronous grace-period primitives are run as
4513 * expedited, with the requesting task driving the grace period forward.
4514 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4515 * runtime RCU functionality.
4517 void rcu_scheduler_starting(void)
4519 WARN_ON(num_online_cpus() != 1);
4520 WARN_ON(nr_context_switches() > 0);
4521 rcu_test_sync_prims();
4522 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4523 rcu_test_sync_prims();
4527 * Helper function for rcu_init() that initializes the rcu_state structure.
4529 static void __init rcu_init_one(void)
4531 static const char * const buf[] = RCU_NODE_NAME_INIT;
4532 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4533 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4534 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4536 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4540 struct rcu_node *rnp;
4542 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4544 /* Silence gcc 4.8 false positive about array index out of range. */
4545 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4546 panic("rcu_init_one: rcu_num_lvls out of range");
4548 /* Initialize the level-tracking arrays. */
4550 for (i = 1; i < rcu_num_lvls; i++)
4551 rcu_state.level[i] =
4552 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4553 rcu_init_levelspread(levelspread, num_rcu_lvl);
4555 /* Initialize the elements themselves, starting from the leaves. */
4557 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4558 cpustride *= levelspread[i];
4559 rnp = rcu_state.level[i];
4560 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4561 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4562 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4563 &rcu_node_class[i], buf[i]);
4564 raw_spin_lock_init(&rnp->fqslock);
4565 lockdep_set_class_and_name(&rnp->fqslock,
4566 &rcu_fqs_class[i], fqs[i]);
4567 rnp->gp_seq = rcu_state.gp_seq;
4568 rnp->gp_seq_needed = rcu_state.gp_seq;
4569 rnp->completedqs = rcu_state.gp_seq;
4571 rnp->qsmaskinit = 0;
4572 rnp->grplo = j * cpustride;
4573 rnp->grphi = (j + 1) * cpustride - 1;
4574 if (rnp->grphi >= nr_cpu_ids)
4575 rnp->grphi = nr_cpu_ids - 1;
4581 rnp->grpnum = j % levelspread[i - 1];
4582 rnp->grpmask = BIT(rnp->grpnum);
4583 rnp->parent = rcu_state.level[i - 1] +
4584 j / levelspread[i - 1];
4587 INIT_LIST_HEAD(&rnp->blkd_tasks);
4588 rcu_init_one_nocb(rnp);
4589 init_waitqueue_head(&rnp->exp_wq[0]);
4590 init_waitqueue_head(&rnp->exp_wq[1]);
4591 init_waitqueue_head(&rnp->exp_wq[2]);
4592 init_waitqueue_head(&rnp->exp_wq[3]);
4593 spin_lock_init(&rnp->exp_lock);
4594 mutex_init(&rnp->boost_kthread_mutex);
4598 init_swait_queue_head(&rcu_state.gp_wq);
4599 init_swait_queue_head(&rcu_state.expedited_wq);
4600 rnp = rcu_first_leaf_node();
4601 for_each_possible_cpu(i) {
4602 while (i > rnp->grphi)
4604 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4605 rcu_boot_init_percpu_data(i);
4610 * Force priority from the kernel command-line into range.
4612 static void __init sanitize_kthread_prio(void)
4614 int kthread_prio_in = kthread_prio;
4616 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4617 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4619 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4621 else if (kthread_prio < 0)
4623 else if (kthread_prio > 99)
4626 if (kthread_prio != kthread_prio_in)
4627 pr_alert("%s: Limited prio to %d from %d\n",
4628 __func__, kthread_prio, kthread_prio_in);
4632 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4633 * replace the definitions in tree.h because those are needed to size
4634 * the ->node array in the rcu_state structure.
4636 void rcu_init_geometry(void)
4640 static unsigned long old_nr_cpu_ids;
4641 int rcu_capacity[RCU_NUM_LVLS];
4642 static bool initialized;
4646 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4647 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4649 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4653 old_nr_cpu_ids = nr_cpu_ids;
4657 * Initialize any unspecified boot parameters.
4658 * The default values of jiffies_till_first_fqs and
4659 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4660 * value, which is a function of HZ, then adding one for each
4661 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4663 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4664 if (jiffies_till_first_fqs == ULONG_MAX)
4665 jiffies_till_first_fqs = d;
4666 if (jiffies_till_next_fqs == ULONG_MAX)
4667 jiffies_till_next_fqs = d;
4668 adjust_jiffies_till_sched_qs();
4670 /* If the compile-time values are accurate, just leave. */
4671 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4672 nr_cpu_ids == NR_CPUS)
4674 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4675 rcu_fanout_leaf, nr_cpu_ids);
4678 * The boot-time rcu_fanout_leaf parameter must be at least two
4679 * and cannot exceed the number of bits in the rcu_node masks.
4680 * Complain and fall back to the compile-time values if this
4681 * limit is exceeded.
4683 if (rcu_fanout_leaf < 2 ||
4684 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4685 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4691 * Compute number of nodes that can be handled an rcu_node tree
4692 * with the given number of levels.
4694 rcu_capacity[0] = rcu_fanout_leaf;
4695 for (i = 1; i < RCU_NUM_LVLS; i++)
4696 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4699 * The tree must be able to accommodate the configured number of CPUs.
4700 * If this limit is exceeded, fall back to the compile-time values.
4702 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4703 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4708 /* Calculate the number of levels in the tree. */
4709 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4711 rcu_num_lvls = i + 1;
4713 /* Calculate the number of rcu_nodes at each level of the tree. */
4714 for (i = 0; i < rcu_num_lvls; i++) {
4715 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4716 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4719 /* Calculate the total number of rcu_node structures. */
4721 for (i = 0; i < rcu_num_lvls; i++)
4722 rcu_num_nodes += num_rcu_lvl[i];
4726 * Dump out the structure of the rcu_node combining tree associated
4727 * with the rcu_state structure.
4729 static void __init rcu_dump_rcu_node_tree(void)
4732 struct rcu_node *rnp;
4734 pr_info("rcu_node tree layout dump\n");
4736 rcu_for_each_node_breadth_first(rnp) {
4737 if (rnp->level != level) {
4742 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4747 struct workqueue_struct *rcu_gp_wq;
4748 struct workqueue_struct *rcu_par_gp_wq;
4750 static void __init kfree_rcu_batch_init(void)
4755 /* Clamp it to [0:100] seconds interval. */
4756 if (rcu_delay_page_cache_fill_msec < 0 ||
4757 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4759 rcu_delay_page_cache_fill_msec =
4760 clamp(rcu_delay_page_cache_fill_msec, 0,
4761 (int) (100 * MSEC_PER_SEC));
4763 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4764 rcu_delay_page_cache_fill_msec);
4767 for_each_possible_cpu(cpu) {
4768 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4770 for (i = 0; i < KFREE_N_BATCHES; i++) {
4771 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4772 krcp->krw_arr[i].krcp = krcp;
4775 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4776 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4777 krcp->initialized = true;
4779 if (register_shrinker(&kfree_rcu_shrinker))
4780 pr_err("Failed to register kfree_rcu() shrinker!\n");
4783 void __init rcu_init(void)
4787 rcu_early_boot_tests();
4789 kfree_rcu_batch_init();
4790 rcu_bootup_announce();
4791 sanitize_kthread_prio();
4792 rcu_init_geometry();
4795 rcu_dump_rcu_node_tree();
4797 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4800 * We don't need protection against CPU-hotplug here because
4801 * this is called early in boot, before either interrupts
4802 * or the scheduler are operational.
4804 pm_notifier(rcu_pm_notify, 0);
4805 for_each_online_cpu(cpu) {
4806 rcutree_prepare_cpu(cpu);
4807 rcu_cpu_starting(cpu);
4808 rcutree_online_cpu(cpu);
4811 /* Create workqueue for Tree SRCU and for expedited GPs. */
4812 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4813 WARN_ON(!rcu_gp_wq);
4814 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4815 WARN_ON(!rcu_par_gp_wq);
4817 /* Fill in default value for rcutree.qovld boot parameter. */
4818 /* -After- the rcu_node ->lock fields are initialized! */
4820 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4825 #include "tree_stall.h"
4826 #include "tree_exp.h"
4827 #include "tree_nocb.h"
4828 #include "tree_plugin.h"