1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
5 * Copyright IBM Corporation, 2008
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
14 * For detailed explanation of Read-Copy Update mechanism see -
18 #define pr_fmt(fmt) "rcu: " fmt
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/moduleparam.h>
35 #include <linux/panic.h>
36 #include <linux/panic_notifier.h>
37 #include <linux/percpu.h>
38 #include <linux/notifier.h>
39 #include <linux/cpu.h>
40 #include <linux/mutex.h>
41 #include <linux/time.h>
42 #include <linux/kernel_stat.h>
43 #include <linux/wait.h>
44 #include <linux/kthread.h>
45 #include <uapi/linux/sched/types.h>
46 #include <linux/prefetch.h>
47 #include <linux/delay.h>
48 #include <linux/random.h>
49 #include <linux/trace_events.h>
50 #include <linux/suspend.h>
51 #include <linux/ftrace.h>
52 #include <linux/tick.h>
53 #include <linux/sysrq.h>
54 #include <linux/kprobes.h>
55 #include <linux/gfp.h>
56 #include <linux/oom.h>
57 #include <linux/smpboot.h>
58 #include <linux/jiffies.h>
59 #include <linux/slab.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/sched/clock.h>
62 #include <linux/vmalloc.h>
64 #include <linux/kasan.h>
65 #include <linux/context_tracking.h>
66 #include "../time/tick-internal.h"
71 #ifdef MODULE_PARAM_PREFIX
72 #undef MODULE_PARAM_PREFIX
74 #define MODULE_PARAM_PREFIX "rcutree."
76 /* Data structures. */
78 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
80 #ifdef CONFIG_RCU_NOCB_CPU
81 .cblist.flags = SEGCBLIST_RCU_CORE,
84 static struct rcu_state rcu_state = {
85 .level = { &rcu_state.node[0] },
86 .gp_state = RCU_GP_IDLE,
87 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
88 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
89 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
92 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
93 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
94 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
97 /* Dump rcu_node combining tree at boot to verify correct setup. */
98 static bool dump_tree;
99 module_param(dump_tree, bool, 0444);
100 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
101 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
102 #ifndef CONFIG_PREEMPT_RT
103 module_param(use_softirq, bool, 0444);
105 /* Control rcu_node-tree auto-balancing at boot time. */
106 static bool rcu_fanout_exact;
107 module_param(rcu_fanout_exact, bool, 0444);
108 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 module_param(rcu_fanout_leaf, int, 0444);
111 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 /* Number of rcu_nodes at specified level. */
113 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
117 * The rcu_scheduler_active variable is initialized to the value
118 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
120 * RCU can assume that there is but one task, allowing RCU to (for example)
121 * optimize synchronize_rcu() to a simple barrier(). When this variable
122 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123 * to detect real grace periods. This variable is also used to suppress
124 * boot-time false positives from lockdep-RCU error checking. Finally, it
125 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126 * is fully initialized, including all of its kthreads having been spawned.
128 int rcu_scheduler_active __read_mostly;
129 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
132 * The rcu_scheduler_fully_active variable transitions from zero to one
133 * during the early_initcall() processing, which is after the scheduler
134 * is capable of creating new tasks. So RCU processing (for example,
135 * creating tasks for RCU priority boosting) must be delayed until after
136 * rcu_scheduler_fully_active transitions from zero to one. We also
137 * currently delay invocation of any RCU callbacks until after this point.
139 * It might later prove better for people registering RCU callbacks during
140 * early boot to take responsibility for these callbacks, but one step at
143 static int rcu_scheduler_fully_active __read_mostly;
145 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 unsigned long gps, unsigned long flags);
147 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
148 static void invoke_rcu_core(void);
149 static void rcu_report_exp_rdp(struct rcu_data *rdp);
150 static void sync_sched_exp_online_cleanup(int cpu);
151 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
152 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
153 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
154 static bool rcu_init_invoked(void);
155 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
156 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
159 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
160 * real-time priority(enabling/disabling) is controlled by
161 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
163 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
164 module_param(kthread_prio, int, 0444);
166 /* Delay in jiffies for grace-period initialization delays, debug only. */
168 static int gp_preinit_delay;
169 module_param(gp_preinit_delay, int, 0444);
170 static int gp_init_delay;
171 module_param(gp_init_delay, int, 0444);
172 static int gp_cleanup_delay;
173 module_param(gp_cleanup_delay, int, 0444);
175 // Add delay to rcu_read_unlock() for strict grace periods.
176 static int rcu_unlock_delay;
177 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
178 module_param(rcu_unlock_delay, int, 0444);
182 * This rcu parameter is runtime-read-only. It reflects
183 * a minimum allowed number of objects which can be cached
184 * per-CPU. Object size is equal to one page. This value
185 * can be changed at boot time.
187 static int rcu_min_cached_objs = 5;
188 module_param(rcu_min_cached_objs, int, 0444);
190 // A page shrinker can ask for pages to be freed to make them
191 // available for other parts of the system. This usually happens
192 // under low memory conditions, and in that case we should also
193 // defer page-cache filling for a short time period.
195 // The default value is 5 seconds, which is long enough to reduce
196 // interference with the shrinker while it asks other systems to
197 // drain their caches.
198 static int rcu_delay_page_cache_fill_msec = 5000;
199 module_param(rcu_delay_page_cache_fill_msec, int, 0444);
201 /* Retrieve RCU kthreads priority for rcutorture */
202 int rcu_get_gp_kthreads_prio(void)
206 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
209 * Number of grace periods between delays, normalized by the duration of
210 * the delay. The longer the delay, the more the grace periods between
211 * each delay. The reason for this normalization is that it means that,
212 * for non-zero delays, the overall slowdown of grace periods is constant
213 * regardless of the duration of the delay. This arrangement balances
214 * the need for long delays to increase some race probabilities with the
215 * need for fast grace periods to increase other race probabilities.
217 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
220 * Return true if an RCU grace period is in progress. The READ_ONCE()s
221 * permit this function to be invoked without holding the root rcu_node
222 * structure's ->lock, but of course results can be subject to change.
224 static int rcu_gp_in_progress(void)
226 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
230 * Return the number of callbacks queued on the specified CPU.
231 * Handles both the nocbs and normal cases.
233 static long rcu_get_n_cbs_cpu(int cpu)
235 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
237 if (rcu_segcblist_is_enabled(&rdp->cblist))
238 return rcu_segcblist_n_cbs(&rdp->cblist);
242 void rcu_softirq_qs(void)
245 rcu_preempt_deferred_qs(current);
246 rcu_tasks_qs(current, false);
250 * Reset the current CPU's ->dynticks counter to indicate that the
251 * newly onlined CPU is no longer in an extended quiescent state.
252 * This will either leave the counter unchanged, or increment it
253 * to the next non-quiescent value.
255 * The non-atomic test/increment sequence works because the upper bits
256 * of the ->dynticks counter are manipulated only by the corresponding CPU,
257 * or when the corresponding CPU is offline.
259 static void rcu_dynticks_eqs_online(void)
261 if (ct_dynticks() & RCU_DYNTICKS_IDX)
263 ct_state_inc(RCU_DYNTICKS_IDX);
267 * Snapshot the ->dynticks counter with full ordering so as to allow
268 * stable comparison of this counter with past and future snapshots.
270 static int rcu_dynticks_snap(int cpu)
272 smp_mb(); // Fundamental RCU ordering guarantee.
273 return ct_dynticks_cpu_acquire(cpu);
277 * Return true if the snapshot returned from rcu_dynticks_snap()
278 * indicates that RCU is in an extended quiescent state.
280 static bool rcu_dynticks_in_eqs(int snap)
282 return !(snap & RCU_DYNTICKS_IDX);
286 * Return true if the CPU corresponding to the specified rcu_data
287 * structure has spent some time in an extended quiescent state since
288 * rcu_dynticks_snap() returned the specified snapshot.
290 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
292 return snap != rcu_dynticks_snap(rdp->cpu);
296 * Return true if the referenced integer is zero while the specified
297 * CPU remains within a single extended quiescent state.
299 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
303 // If not quiescent, force back to earlier extended quiescent state.
304 snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
305 smp_rmb(); // Order ->dynticks and *vp reads.
307 return false; // Non-zero, so report failure;
308 smp_rmb(); // Order *vp read and ->dynticks re-read.
310 // If still in the same extended quiescent state, we are good!
311 return snap == ct_dynticks_cpu(cpu);
315 * Let the RCU core know that this CPU has gone through the scheduler,
316 * which is a quiescent state. This is called when the need for a
317 * quiescent state is urgent, so we burn an atomic operation and full
318 * memory barriers to let the RCU core know about it, regardless of what
319 * this CPU might (or might not) do in the near future.
321 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
323 * The caller must have disabled interrupts and must not be idle.
325 notrace void rcu_momentary_dyntick_idle(void)
329 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
330 seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
331 /* It is illegal to call this from idle state. */
332 WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
333 rcu_preempt_deferred_qs(current);
335 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
338 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
340 * If the current CPU is idle and running at a first-level (not nested)
341 * interrupt, or directly, from idle, return true.
343 * The caller must have at least disabled IRQs.
345 static int rcu_is_cpu_rrupt_from_idle(void)
350 * Usually called from the tick; but also used from smp_function_call()
351 * for expedited grace periods. This latter can result in running from
352 * the idle task, instead of an actual IPI.
354 lockdep_assert_irqs_disabled();
356 /* Check for counter underflows */
357 RCU_LOCKDEP_WARN(ct_dynticks_nesting() < 0,
358 "RCU dynticks_nesting counter underflow!");
359 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
360 "RCU dynticks_nmi_nesting counter underflow/zero!");
362 /* Are we at first interrupt nesting level? */
363 nesting = ct_dynticks_nmi_nesting();
368 * If we're not in an interrupt, we must be in the idle task!
370 WARN_ON_ONCE(!nesting && !is_idle_task(current));
372 /* Does CPU appear to be idle from an RCU standpoint? */
373 return ct_dynticks_nesting() == 0;
376 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
377 // Maximum callbacks per rcu_do_batch ...
378 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
379 static long blimit = DEFAULT_RCU_BLIMIT;
380 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
381 static long qhimark = DEFAULT_RCU_QHIMARK;
382 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
383 static long qlowmark = DEFAULT_RCU_QLOMARK;
384 #define DEFAULT_RCU_QOVLD_MULT 2
385 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
386 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
387 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
389 module_param(blimit, long, 0444);
390 module_param(qhimark, long, 0444);
391 module_param(qlowmark, long, 0444);
392 module_param(qovld, long, 0444);
394 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
395 static ulong jiffies_till_next_fqs = ULONG_MAX;
396 static bool rcu_kick_kthreads;
397 static int rcu_divisor = 7;
398 module_param(rcu_divisor, int, 0644);
400 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
401 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
402 module_param(rcu_resched_ns, long, 0644);
405 * How long the grace period must be before we start recruiting
406 * quiescent-state help from rcu_note_context_switch().
408 static ulong jiffies_till_sched_qs = ULONG_MAX;
409 module_param(jiffies_till_sched_qs, ulong, 0444);
410 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
411 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
414 * Make sure that we give the grace-period kthread time to detect any
415 * idle CPUs before taking active measures to force quiescent states.
416 * However, don't go below 100 milliseconds, adjusted upwards for really
419 static void adjust_jiffies_till_sched_qs(void)
423 /* If jiffies_till_sched_qs was specified, respect the request. */
424 if (jiffies_till_sched_qs != ULONG_MAX) {
425 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
428 /* Otherwise, set to third fqs scan, but bound below on large system. */
429 j = READ_ONCE(jiffies_till_first_fqs) +
430 2 * READ_ONCE(jiffies_till_next_fqs);
431 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
432 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
433 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
434 WRITE_ONCE(jiffies_to_sched_qs, j);
437 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
440 int ret = kstrtoul(val, 0, &j);
443 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
444 adjust_jiffies_till_sched_qs();
449 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
452 int ret = kstrtoul(val, 0, &j);
455 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
456 adjust_jiffies_till_sched_qs();
461 static const struct kernel_param_ops first_fqs_jiffies_ops = {
462 .set = param_set_first_fqs_jiffies,
463 .get = param_get_ulong,
466 static const struct kernel_param_ops next_fqs_jiffies_ops = {
467 .set = param_set_next_fqs_jiffies,
468 .get = param_get_ulong,
471 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
472 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
473 module_param(rcu_kick_kthreads, bool, 0644);
475 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
476 static int rcu_pending(int user);
479 * Return the number of RCU GPs completed thus far for debug & stats.
481 unsigned long rcu_get_gp_seq(void)
483 return READ_ONCE(rcu_state.gp_seq);
485 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
488 * Return the number of RCU expedited batches completed thus far for
489 * debug & stats. Odd numbers mean that a batch is in progress, even
490 * numbers mean idle. The value returned will thus be roughly double
491 * the cumulative batches since boot.
493 unsigned long rcu_exp_batches_completed(void)
495 return rcu_state.expedited_sequence;
497 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
500 * Return the root node of the rcu_state structure.
502 static struct rcu_node *rcu_get_root(void)
504 return &rcu_state.node[0];
508 * Send along grace-period-related data for rcutorture diagnostics.
510 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
511 unsigned long *gp_seq)
515 *flags = READ_ONCE(rcu_state.gp_flags);
516 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
522 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
524 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
526 * An empty function that will trigger a reschedule on
527 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
529 static void late_wakeup_func(struct irq_work *work)
533 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
534 IRQ_WORK_INIT(late_wakeup_func);
539 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
540 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
542 * In these cases the late RCU wake ups aren't supported in the resched loops and our
543 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
544 * get re-enabled again.
546 noinstr void rcu_irq_work_resched(void)
548 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
550 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
553 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
556 instrumentation_begin();
557 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
558 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
560 instrumentation_end();
562 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
564 #ifdef CONFIG_PROVE_RCU
566 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
568 void rcu_irq_exit_check_preempt(void)
570 lockdep_assert_irqs_disabled();
572 RCU_LOCKDEP_WARN(ct_dynticks_nesting() <= 0,
573 "RCU dynticks_nesting counter underflow/zero!");
574 RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
576 "Bad RCU dynticks_nmi_nesting counter\n");
577 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
578 "RCU in extended quiescent state!");
580 #endif /* #ifdef CONFIG_PROVE_RCU */
582 #ifdef CONFIG_NO_HZ_FULL
584 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
586 * The scheduler tick is not normally enabled when CPUs enter the kernel
587 * from nohz_full userspace execution. After all, nohz_full userspace
588 * execution is an RCU quiescent state and the time executing in the kernel
589 * is quite short. Except of course when it isn't. And it is not hard to
590 * cause a large system to spend tens of seconds or even minutes looping
591 * in the kernel, which can cause a number of problems, include RCU CPU
594 * Therefore, if a nohz_full CPU fails to report a quiescent state
595 * in a timely manner, the RCU grace-period kthread sets that CPU's
596 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
597 * exception will invoke this function, which will turn on the scheduler
598 * tick, which will enable RCU to detect that CPU's quiescent states,
599 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
600 * The tick will be disabled once a quiescent state is reported for
603 * Of course, in carefully tuned systems, there might never be an
604 * interrupt or exception. In that case, the RCU grace-period kthread
605 * will eventually cause one to happen. However, in less carefully
606 * controlled environments, this function allows RCU to get what it
607 * needs without creating otherwise useless interruptions.
609 void __rcu_irq_enter_check_tick(void)
611 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
613 // If we're here from NMI there's nothing to do.
617 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
618 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
620 if (!tick_nohz_full_cpu(rdp->cpu) ||
621 !READ_ONCE(rdp->rcu_urgent_qs) ||
622 READ_ONCE(rdp->rcu_forced_tick)) {
623 // RCU doesn't need nohz_full help from this CPU, or it is
624 // already getting that help.
628 // We get here only when not in an extended quiescent state and
629 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
630 // already watching and (2) The fact that we are in an interrupt
631 // handler and that the rcu_node lock is an irq-disabled lock
632 // prevents self-deadlock. So we can safely recheck under the lock.
633 // Note that the nohz_full state currently cannot change.
634 raw_spin_lock_rcu_node(rdp->mynode);
635 if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
636 // A nohz_full CPU is in the kernel and RCU needs a
637 // quiescent state. Turn on the tick!
638 WRITE_ONCE(rdp->rcu_forced_tick, true);
639 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
641 raw_spin_unlock_rcu_node(rdp->mynode);
643 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
644 #endif /* CONFIG_NO_HZ_FULL */
647 * Check to see if any future non-offloaded RCU-related work will need
648 * to be done by the current CPU, even if none need be done immediately,
649 * returning 1 if so. This function is part of the RCU implementation;
650 * it is -not- an exported member of the RCU API. This is used by
651 * the idle-entry code to figure out whether it is safe to disable the
652 * scheduler-clock interrupt.
654 * Just check whether or not this CPU has non-offloaded RCU callbacks
657 int rcu_needs_cpu(void)
659 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
660 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
664 * If any sort of urgency was applied to the current CPU (for example,
665 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
666 * to get to a quiescent state, disable it.
668 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
670 raw_lockdep_assert_held_rcu_node(rdp->mynode);
671 WRITE_ONCE(rdp->rcu_urgent_qs, false);
672 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
673 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
674 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
675 WRITE_ONCE(rdp->rcu_forced_tick, false);
680 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
682 * Return true if RCU is watching the running CPU, which means that this
683 * CPU can safely enter RCU read-side critical sections. In other words,
684 * if the current CPU is not in its idle loop or is in an interrupt or
685 * NMI handler, return true.
687 * Make notrace because it can be called by the internal functions of
688 * ftrace, and making this notrace removes unnecessary recursion calls.
690 notrace bool rcu_is_watching(void)
694 preempt_disable_notrace();
695 ret = !rcu_dynticks_curr_cpu_in_eqs();
696 preempt_enable_notrace();
699 EXPORT_SYMBOL_GPL(rcu_is_watching);
702 * If a holdout task is actually running, request an urgent quiescent
703 * state from its CPU. This is unsynchronized, so migrations can cause
704 * the request to go to the wrong CPU. Which is OK, all that will happen
705 * is that the CPU's next context switch will be a bit slower and next
706 * time around this task will generate another request.
708 void rcu_request_urgent_qs_task(struct task_struct *t)
715 return; /* This task is not running on that CPU. */
716 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
720 * When trying to report a quiescent state on behalf of some other CPU,
721 * it is our responsibility to check for and handle potential overflow
722 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
723 * After all, the CPU might be in deep idle state, and thus executing no
726 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
728 raw_lockdep_assert_held_rcu_node(rnp);
729 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
731 WRITE_ONCE(rdp->gpwrap, true);
732 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
733 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
737 * Snapshot the specified CPU's dynticks counter so that we can later
738 * credit them with an implicit quiescent state. Return 1 if this CPU
739 * is in dynticks idle mode, which is an extended quiescent state.
741 static int dyntick_save_progress_counter(struct rcu_data *rdp)
743 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
744 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
745 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
746 rcu_gpnum_ovf(rdp->mynode, rdp);
753 * Return true if the specified CPU has passed through a quiescent
754 * state by virtue of being in or having passed through an dynticks
755 * idle state since the last call to dyntick_save_progress_counter()
756 * for this same CPU, or by virtue of having been offline.
758 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
761 struct rcu_node *rnp = rdp->mynode;
764 * If the CPU passed through or entered a dynticks idle phase with
765 * no active irq/NMI handlers, then we can safely pretend that the CPU
766 * already acknowledged the request to pass through a quiescent
767 * state. Either way, that CPU cannot possibly be in an RCU
768 * read-side critical section that started before the beginning
769 * of the current RCU grace period.
771 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
772 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
773 rcu_gpnum_ovf(rnp, rdp);
778 * Complain if a CPU that is considered to be offline from RCU's
779 * perspective has not yet reported a quiescent state. After all,
780 * the offline CPU should have reported a quiescent state during
781 * the CPU-offline process, or, failing that, by rcu_gp_init()
782 * if it ran concurrently with either the CPU going offline or the
783 * last task on a leaf rcu_node structure exiting its RCU read-side
784 * critical section while all CPUs corresponding to that structure
785 * are offline. This added warning detects bugs in any of these
788 * The rcu_node structure's ->lock is held here, which excludes
789 * the relevant portions the CPU-hotplug code, the grace-period
790 * initialization code, and the rcu_read_unlock() code paths.
792 * For more detail, please refer to the "Hotplug CPU" section
793 * of RCU's Requirements documentation.
795 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
796 struct rcu_node *rnp1;
798 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
799 __func__, rnp->grplo, rnp->grphi, rnp->level,
800 (long)rnp->gp_seq, (long)rnp->completedqs);
801 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
802 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
803 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
804 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
805 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
806 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
807 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
808 return 1; /* Break things loose after complaining. */
812 * A CPU running for an extended time within the kernel can
813 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
814 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
815 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
816 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
817 * variable are safe because the assignments are repeated if this
818 * CPU failed to pass through a quiescent state. This code
819 * also checks .jiffies_resched in case jiffies_to_sched_qs
822 jtsq = READ_ONCE(jiffies_to_sched_qs);
823 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
824 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
825 time_after(jiffies, rcu_state.jiffies_resched) ||
827 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
828 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
829 smp_store_release(&rdp->rcu_urgent_qs, true);
830 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
831 WRITE_ONCE(rdp->rcu_urgent_qs, true);
835 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
836 * The above code handles this, but only for straight cond_resched().
837 * And some in-kernel loops check need_resched() before calling
838 * cond_resched(), which defeats the above code for CPUs that are
839 * running in-kernel with scheduling-clock interrupts disabled.
840 * So hit them over the head with the resched_cpu() hammer!
842 if (tick_nohz_full_cpu(rdp->cpu) &&
843 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
845 WRITE_ONCE(rdp->rcu_urgent_qs, true);
846 resched_cpu(rdp->cpu);
847 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
851 * If more than halfway to RCU CPU stall-warning time, invoke
852 * resched_cpu() more frequently to try to loosen things up a bit.
853 * Also check to see if the CPU is getting hammered with interrupts,
854 * but only once per grace period, just to keep the IPIs down to
857 if (time_after(jiffies, rcu_state.jiffies_resched)) {
858 if (time_after(jiffies,
859 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
860 resched_cpu(rdp->cpu);
861 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
863 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
864 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
865 (rnp->ffmask & rdp->grpmask)) {
866 rdp->rcu_iw_pending = true;
867 rdp->rcu_iw_gp_seq = rnp->gp_seq;
868 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
871 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
873 struct rcu_snap_record *rsrp;
874 struct kernel_cpustat *kcsp;
876 kcsp = &kcpustat_cpu(cpu);
878 rsrp = &rdp->snap_record;
879 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
880 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
881 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
882 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
883 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
884 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
885 rsrp->jiffies = jiffies;
886 rsrp->gp_seq = rdp->gp_seq;
893 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
894 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
895 unsigned long gp_seq_req, const char *s)
897 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
898 gp_seq_req, rnp->level,
899 rnp->grplo, rnp->grphi, s);
903 * rcu_start_this_gp - Request the start of a particular grace period
904 * @rnp_start: The leaf node of the CPU from which to start.
905 * @rdp: The rcu_data corresponding to the CPU from which to start.
906 * @gp_seq_req: The gp_seq of the grace period to start.
908 * Start the specified grace period, as needed to handle newly arrived
909 * callbacks. The required future grace periods are recorded in each
910 * rcu_node structure's ->gp_seq_needed field. Returns true if there
911 * is reason to awaken the grace-period kthread.
913 * The caller must hold the specified rcu_node structure's ->lock, which
914 * is why the caller is responsible for waking the grace-period kthread.
916 * Returns true if the GP thread needs to be awakened else false.
918 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
919 unsigned long gp_seq_req)
922 struct rcu_node *rnp;
925 * Use funnel locking to either acquire the root rcu_node
926 * structure's lock or bail out if the need for this grace period
927 * has already been recorded -- or if that grace period has in
928 * fact already started. If there is already a grace period in
929 * progress in a non-leaf node, no recording is needed because the
930 * end of the grace period will scan the leaf rcu_node structures.
931 * Note that rnp_start->lock must not be released.
933 raw_lockdep_assert_held_rcu_node(rnp_start);
934 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
935 for (rnp = rnp_start; 1; rnp = rnp->parent) {
936 if (rnp != rnp_start)
937 raw_spin_lock_rcu_node(rnp);
938 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
939 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
941 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
942 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
946 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
947 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
949 * We just marked the leaf or internal node, and a
950 * grace period is in progress, which means that
951 * rcu_gp_cleanup() will see the marking. Bail to
954 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
958 if (rnp != rnp_start && rnp->parent != NULL)
959 raw_spin_unlock_rcu_node(rnp);
961 break; /* At root, and perhaps also leaf. */
964 /* If GP already in progress, just leave, otherwise start one. */
965 if (rcu_gp_in_progress()) {
966 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
969 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
970 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
971 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
972 if (!READ_ONCE(rcu_state.gp_kthread)) {
973 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
976 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
977 ret = true; /* Caller must wake GP kthread. */
979 /* Push furthest requested GP to leaf node and rcu_data structure. */
980 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
981 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
982 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
984 if (rnp != rnp_start)
985 raw_spin_unlock_rcu_node(rnp);
990 * Clean up any old requests for the just-ended grace period. Also return
991 * whether any additional grace periods have been requested.
993 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
996 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
998 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1000 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1001 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1002 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1007 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1008 * interrupt or softirq handler, in which case we just might immediately
1009 * sleep upon return, resulting in a grace-period hang), and don't bother
1010 * awakening when there is nothing for the grace-period kthread to do
1011 * (as in several CPUs raced to awaken, we lost), and finally don't try
1012 * to awaken a kthread that has not yet been created. If all those checks
1013 * are passed, track some debug information and awaken.
1015 * So why do the self-wakeup when in an interrupt or softirq handler
1016 * in the grace-period kthread's context? Because the kthread might have
1017 * been interrupted just as it was going to sleep, and just after the final
1018 * pre-sleep check of the awaken condition. In this case, a wakeup really
1019 * is required, and is therefore supplied.
1021 static void rcu_gp_kthread_wake(void)
1023 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1025 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1026 !READ_ONCE(rcu_state.gp_flags) || !t)
1028 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1029 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1030 swake_up_one(&rcu_state.gp_wq);
1034 * If there is room, assign a ->gp_seq number to any callbacks on this
1035 * CPU that have not already been assigned. Also accelerate any callbacks
1036 * that were previously assigned a ->gp_seq number that has since proven
1037 * to be too conservative, which can happen if callbacks get assigned a
1038 * ->gp_seq number while RCU is idle, but with reference to a non-root
1039 * rcu_node structure. This function is idempotent, so it does not hurt
1040 * to call it repeatedly. Returns an flag saying that we should awaken
1041 * the RCU grace-period kthread.
1043 * The caller must hold rnp->lock with interrupts disabled.
1045 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1047 unsigned long gp_seq_req;
1050 rcu_lockdep_assert_cblist_protected(rdp);
1051 raw_lockdep_assert_held_rcu_node(rnp);
1053 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1054 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1057 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1060 * Callbacks are often registered with incomplete grace-period
1061 * information. Something about the fact that getting exact
1062 * information requires acquiring a global lock... RCU therefore
1063 * makes a conservative estimate of the grace period number at which
1064 * a given callback will become ready to invoke. The following
1065 * code checks this estimate and improves it when possible, thus
1066 * accelerating callback invocation to an earlier grace-period
1069 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1070 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1071 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1073 /* Trace depending on how much we were able to accelerate. */
1074 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1075 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1077 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1079 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1085 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1086 * rcu_node structure's ->lock be held. It consults the cached value
1087 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1088 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1089 * while holding the leaf rcu_node structure's ->lock.
1091 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1092 struct rcu_data *rdp)
1097 rcu_lockdep_assert_cblist_protected(rdp);
1098 c = rcu_seq_snap(&rcu_state.gp_seq);
1099 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1100 /* Old request still live, so mark recent callbacks. */
1101 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1104 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1105 needwake = rcu_accelerate_cbs(rnp, rdp);
1106 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1108 rcu_gp_kthread_wake();
1112 * Move any callbacks whose grace period has completed to the
1113 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1114 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1115 * sublist. This function is idempotent, so it does not hurt to
1116 * invoke it repeatedly. As long as it is not invoked -too- often...
1117 * Returns true if the RCU grace-period kthread needs to be awakened.
1119 * The caller must hold rnp->lock with interrupts disabled.
1121 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1123 rcu_lockdep_assert_cblist_protected(rdp);
1124 raw_lockdep_assert_held_rcu_node(rnp);
1126 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1127 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1131 * Find all callbacks whose ->gp_seq numbers indicate that they
1132 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1134 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1136 /* Classify any remaining callbacks. */
1137 return rcu_accelerate_cbs(rnp, rdp);
1141 * Move and classify callbacks, but only if doing so won't require
1142 * that the RCU grace-period kthread be awakened.
1144 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1145 struct rcu_data *rdp)
1147 rcu_lockdep_assert_cblist_protected(rdp);
1148 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1150 // The grace period cannot end while we hold the rcu_node lock.
1151 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1152 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1153 raw_spin_unlock_rcu_node(rnp);
1157 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1158 * quiescent state. This is intended to be invoked when the CPU notices
1159 * a new grace period.
1161 static void rcu_strict_gp_check_qs(void)
1163 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1170 * Update CPU-local rcu_data state to record the beginnings and ends of
1171 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1172 * structure corresponding to the current CPU, and must have irqs disabled.
1173 * Returns true if the grace-period kthread needs to be awakened.
1175 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1179 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1181 raw_lockdep_assert_held_rcu_node(rnp);
1183 if (rdp->gp_seq == rnp->gp_seq)
1184 return false; /* Nothing to do. */
1186 /* Handle the ends of any preceding grace periods first. */
1187 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1188 unlikely(READ_ONCE(rdp->gpwrap))) {
1190 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1191 rdp->core_needs_qs = false;
1192 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1195 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1196 if (rdp->core_needs_qs)
1197 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1200 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1201 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1202 unlikely(READ_ONCE(rdp->gpwrap))) {
1204 * If the current grace period is waiting for this CPU,
1205 * set up to detect a quiescent state, otherwise don't
1206 * go looking for one.
1208 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1209 need_qs = !!(rnp->qsmask & rdp->grpmask);
1210 rdp->cpu_no_qs.b.norm = need_qs;
1211 rdp->core_needs_qs = need_qs;
1212 zero_cpu_stall_ticks(rdp);
1214 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1215 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1216 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1217 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1218 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1219 WRITE_ONCE(rdp->gpwrap, false);
1220 rcu_gpnum_ovf(rnp, rdp);
1224 static void note_gp_changes(struct rcu_data *rdp)
1226 unsigned long flags;
1228 struct rcu_node *rnp;
1230 local_irq_save(flags);
1232 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1233 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1234 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1235 local_irq_restore(flags);
1238 needwake = __note_gp_changes(rnp, rdp);
1239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1240 rcu_strict_gp_check_qs();
1242 rcu_gp_kthread_wake();
1245 static atomic_t *rcu_gp_slow_suppress;
1247 /* Register a counter to suppress debugging grace-period delays. */
1248 void rcu_gp_slow_register(atomic_t *rgssp)
1250 WARN_ON_ONCE(rcu_gp_slow_suppress);
1252 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1254 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1256 /* Unregister a counter, with NULL for not caring which. */
1257 void rcu_gp_slow_unregister(atomic_t *rgssp)
1259 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress);
1261 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1263 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1265 static bool rcu_gp_slow_is_suppressed(void)
1267 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1269 return rgssp && atomic_read(rgssp);
1272 static void rcu_gp_slow(int delay)
1274 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1275 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1276 schedule_timeout_idle(delay);
1279 static unsigned long sleep_duration;
1281 /* Allow rcutorture to stall the grace-period kthread. */
1282 void rcu_gp_set_torture_wait(int duration)
1284 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1285 WRITE_ONCE(sleep_duration, duration);
1287 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1289 /* Actually implement the aforementioned wait. */
1290 static void rcu_gp_torture_wait(void)
1292 unsigned long duration;
1294 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1296 duration = xchg(&sleep_duration, 0UL);
1298 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1299 schedule_timeout_idle(duration);
1300 pr_alert("%s: Wait complete\n", __func__);
1305 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1308 static void rcu_strict_gp_boundary(void *unused)
1313 // Make the polled API aware of the beginning of a grace period.
1314 static void rcu_poll_gp_seq_start(unsigned long *snap)
1316 struct rcu_node *rnp = rcu_get_root();
1318 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1319 raw_lockdep_assert_held_rcu_node(rnp);
1321 // If RCU was idle, note beginning of GP.
1322 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1323 rcu_seq_start(&rcu_state.gp_seq_polled);
1325 // Either way, record current state.
1326 *snap = rcu_state.gp_seq_polled;
1329 // Make the polled API aware of the end of a grace period.
1330 static void rcu_poll_gp_seq_end(unsigned long *snap)
1332 struct rcu_node *rnp = rcu_get_root();
1334 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1335 raw_lockdep_assert_held_rcu_node(rnp);
1337 // If the previously noted GP is still in effect, record the
1338 // end of that GP. Either way, zero counter to avoid counter-wrap
1340 if (*snap && *snap == rcu_state.gp_seq_polled) {
1341 rcu_seq_end(&rcu_state.gp_seq_polled);
1342 rcu_state.gp_seq_polled_snap = 0;
1343 rcu_state.gp_seq_polled_exp_snap = 0;
1349 // Make the polled API aware of the beginning of a grace period, but
1350 // where caller does not hold the root rcu_node structure's lock.
1351 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1353 unsigned long flags;
1354 struct rcu_node *rnp = rcu_get_root();
1356 if (rcu_init_invoked()) {
1357 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1358 lockdep_assert_irqs_enabled();
1359 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1361 rcu_poll_gp_seq_start(snap);
1362 if (rcu_init_invoked())
1363 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1366 // Make the polled API aware of the end of a grace period, but where
1367 // caller does not hold the root rcu_node structure's lock.
1368 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1370 unsigned long flags;
1371 struct rcu_node *rnp = rcu_get_root();
1373 if (rcu_init_invoked()) {
1374 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1375 lockdep_assert_irqs_enabled();
1376 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1378 rcu_poll_gp_seq_end(snap);
1379 if (rcu_init_invoked())
1380 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1384 * Initialize a new grace period. Return false if no grace period required.
1386 static noinline_for_stack bool rcu_gp_init(void)
1388 unsigned long flags;
1389 unsigned long oldmask;
1391 struct rcu_data *rdp;
1392 struct rcu_node *rnp = rcu_get_root();
1394 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1395 raw_spin_lock_irq_rcu_node(rnp);
1396 if (!READ_ONCE(rcu_state.gp_flags)) {
1397 /* Spurious wakeup, tell caller to go back to sleep. */
1398 raw_spin_unlock_irq_rcu_node(rnp);
1401 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1403 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1405 * Grace period already in progress, don't start another.
1406 * Not supposed to be able to happen.
1408 raw_spin_unlock_irq_rcu_node(rnp);
1412 /* Advance to a new grace period and initialize state. */
1413 record_gp_stall_check_time();
1414 /* Record GP times before starting GP, hence rcu_seq_start(). */
1415 rcu_seq_start(&rcu_state.gp_seq);
1416 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1417 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1418 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1419 raw_spin_unlock_irq_rcu_node(rnp);
1422 * Apply per-leaf buffered online and offline operations to
1423 * the rcu_node tree. Note that this new grace period need not
1424 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1425 * offlining path, when combined with checks in this function,
1426 * will handle CPUs that are currently going offline or that will
1427 * go offline later. Please also refer to "Hotplug CPU" section
1428 * of RCU's Requirements documentation.
1430 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1431 /* Exclude CPU hotplug operations. */
1432 rcu_for_each_leaf_node(rnp) {
1433 local_irq_save(flags);
1434 arch_spin_lock(&rcu_state.ofl_lock);
1435 raw_spin_lock_rcu_node(rnp);
1436 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1437 !rnp->wait_blkd_tasks) {
1438 /* Nothing to do on this leaf rcu_node structure. */
1439 raw_spin_unlock_rcu_node(rnp);
1440 arch_spin_unlock(&rcu_state.ofl_lock);
1441 local_irq_restore(flags);
1445 /* Record old state, apply changes to ->qsmaskinit field. */
1446 oldmask = rnp->qsmaskinit;
1447 rnp->qsmaskinit = rnp->qsmaskinitnext;
1449 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1450 if (!oldmask != !rnp->qsmaskinit) {
1451 if (!oldmask) { /* First online CPU for rcu_node. */
1452 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1453 rcu_init_new_rnp(rnp);
1454 } else if (rcu_preempt_has_tasks(rnp)) {
1455 rnp->wait_blkd_tasks = true; /* blocked tasks */
1456 } else { /* Last offline CPU and can propagate. */
1457 rcu_cleanup_dead_rnp(rnp);
1462 * If all waited-on tasks from prior grace period are
1463 * done, and if all this rcu_node structure's CPUs are
1464 * still offline, propagate up the rcu_node tree and
1465 * clear ->wait_blkd_tasks. Otherwise, if one of this
1466 * rcu_node structure's CPUs has since come back online,
1467 * simply clear ->wait_blkd_tasks.
1469 if (rnp->wait_blkd_tasks &&
1470 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1471 rnp->wait_blkd_tasks = false;
1472 if (!rnp->qsmaskinit)
1473 rcu_cleanup_dead_rnp(rnp);
1476 raw_spin_unlock_rcu_node(rnp);
1477 arch_spin_unlock(&rcu_state.ofl_lock);
1478 local_irq_restore(flags);
1480 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1483 * Set the quiescent-state-needed bits in all the rcu_node
1484 * structures for all currently online CPUs in breadth-first
1485 * order, starting from the root rcu_node structure, relying on the
1486 * layout of the tree within the rcu_state.node[] array. Note that
1487 * other CPUs will access only the leaves of the hierarchy, thus
1488 * seeing that no grace period is in progress, at least until the
1489 * corresponding leaf node has been initialized.
1491 * The grace period cannot complete until the initialization
1492 * process finishes, because this kthread handles both.
1494 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1495 rcu_for_each_node_breadth_first(rnp) {
1496 rcu_gp_slow(gp_init_delay);
1497 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1498 rdp = this_cpu_ptr(&rcu_data);
1499 rcu_preempt_check_blocked_tasks(rnp);
1500 rnp->qsmask = rnp->qsmaskinit;
1501 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1502 if (rnp == rdp->mynode)
1503 (void)__note_gp_changes(rnp, rdp);
1504 rcu_preempt_boost_start_gp(rnp);
1505 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1506 rnp->level, rnp->grplo,
1507 rnp->grphi, rnp->qsmask);
1508 /* Quiescent states for tasks on any now-offline CPUs. */
1509 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1510 rnp->rcu_gp_init_mask = mask;
1511 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1512 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1514 raw_spin_unlock_irq_rcu_node(rnp);
1515 cond_resched_tasks_rcu_qs();
1516 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1519 // If strict, make all CPUs aware of new grace period.
1520 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1521 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1527 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1530 static bool rcu_gp_fqs_check_wake(int *gfp)
1532 struct rcu_node *rnp = rcu_get_root();
1534 // If under overload conditions, force an immediate FQS scan.
1535 if (*gfp & RCU_GP_FLAG_OVLD)
1538 // Someone like call_rcu() requested a force-quiescent-state scan.
1539 *gfp = READ_ONCE(rcu_state.gp_flags);
1540 if (*gfp & RCU_GP_FLAG_FQS)
1543 // The current grace period has completed.
1544 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1551 * Do one round of quiescent-state forcing.
1553 static void rcu_gp_fqs(bool first_time)
1555 struct rcu_node *rnp = rcu_get_root();
1557 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1558 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1560 /* Collect dyntick-idle snapshots. */
1561 force_qs_rnp(dyntick_save_progress_counter);
1563 /* Handle dyntick-idle and offline CPUs. */
1564 force_qs_rnp(rcu_implicit_dynticks_qs);
1566 /* Clear flag to prevent immediate re-entry. */
1567 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1568 raw_spin_lock_irq_rcu_node(rnp);
1569 WRITE_ONCE(rcu_state.gp_flags,
1570 READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1571 raw_spin_unlock_irq_rcu_node(rnp);
1576 * Loop doing repeated quiescent-state forcing until the grace period ends.
1578 static noinline_for_stack void rcu_gp_fqs_loop(void)
1580 bool first_gp_fqs = true;
1584 struct rcu_node *rnp = rcu_get_root();
1586 j = READ_ONCE(jiffies_till_first_fqs);
1587 if (rcu_state.cbovld)
1588 gf = RCU_GP_FLAG_OVLD;
1591 if (rcu_state.cbovld) {
1596 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
1597 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1599 * jiffies_force_qs before RCU_GP_WAIT_FQS state
1600 * update; required for stall checks.
1603 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1604 jiffies + (j ? 3 * j : 2));
1606 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1608 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1609 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
1610 rcu_gp_fqs_check_wake(&gf), j);
1611 rcu_gp_torture_wait();
1612 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
1613 /* Locking provides needed memory barriers. */
1615 * Exit the loop if the root rcu_node structure indicates that the grace period
1616 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
1617 * is required only for single-node rcu_node trees because readers blocking
1618 * the current grace period are queued only on leaf rcu_node structures.
1619 * For multi-node trees, checking the root node's ->qsmask suffices, because a
1620 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
1621 * the corresponding leaf nodes have passed through their quiescent state.
1623 if (!READ_ONCE(rnp->qsmask) &&
1624 !rcu_preempt_blocked_readers_cgp(rnp))
1626 /* If time for quiescent-state forcing, do it. */
1627 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
1628 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
1629 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1631 rcu_gp_fqs(first_gp_fqs);
1634 first_gp_fqs = false;
1635 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
1637 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1639 cond_resched_tasks_rcu_qs();
1640 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1641 ret = 0; /* Force full wait till next FQS. */
1642 j = READ_ONCE(jiffies_till_next_fqs);
1644 /* Deal with stray signal. */
1645 cond_resched_tasks_rcu_qs();
1646 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1647 WARN_ON(signal_pending(current));
1648 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1650 ret = 1; /* Keep old FQS timing. */
1652 if (time_after(jiffies, rcu_state.jiffies_force_qs))
1655 j = rcu_state.jiffies_force_qs - j;
1662 * Clean up after the old grace period.
1664 static noinline void rcu_gp_cleanup(void)
1667 bool needgp = false;
1668 unsigned long gp_duration;
1669 unsigned long new_gp_seq;
1671 struct rcu_data *rdp;
1672 struct rcu_node *rnp = rcu_get_root();
1673 struct swait_queue_head *sq;
1675 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1676 raw_spin_lock_irq_rcu_node(rnp);
1677 rcu_state.gp_end = jiffies;
1678 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1679 if (gp_duration > rcu_state.gp_max)
1680 rcu_state.gp_max = gp_duration;
1683 * We know the grace period is complete, but to everyone else
1684 * it appears to still be ongoing. But it is also the case
1685 * that to everyone else it looks like there is nothing that
1686 * they can do to advance the grace period. It is therefore
1687 * safe for us to drop the lock in order to mark the grace
1688 * period as completed in all of the rcu_node structures.
1690 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
1691 raw_spin_unlock_irq_rcu_node(rnp);
1694 * Propagate new ->gp_seq value to rcu_node structures so that
1695 * other CPUs don't have to wait until the start of the next grace
1696 * period to process their callbacks. This also avoids some nasty
1697 * RCU grace-period initialization races by forcing the end of
1698 * the current grace period to be completely recorded in all of
1699 * the rcu_node structures before the beginning of the next grace
1700 * period is recorded in any of the rcu_node structures.
1702 new_gp_seq = rcu_state.gp_seq;
1703 rcu_seq_end(&new_gp_seq);
1704 rcu_for_each_node_breadth_first(rnp) {
1705 raw_spin_lock_irq_rcu_node(rnp);
1706 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1707 dump_blkd_tasks(rnp, 10);
1708 WARN_ON_ONCE(rnp->qsmask);
1709 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1711 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
1712 rdp = this_cpu_ptr(&rcu_data);
1713 if (rnp == rdp->mynode)
1714 needgp = __note_gp_changes(rnp, rdp) || needgp;
1715 /* smp_mb() provided by prior unlock-lock pair. */
1716 needgp = rcu_future_gp_cleanup(rnp) || needgp;
1717 // Reset overload indication for CPUs no longer overloaded
1718 if (rcu_is_leaf_node(rnp))
1719 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
1720 rdp = per_cpu_ptr(&rcu_data, cpu);
1721 check_cb_ovld_locked(rdp, rnp);
1723 sq = rcu_nocb_gp_get(rnp);
1724 raw_spin_unlock_irq_rcu_node(rnp);
1725 rcu_nocb_gp_cleanup(sq);
1726 cond_resched_tasks_rcu_qs();
1727 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1728 rcu_gp_slow(gp_cleanup_delay);
1730 rnp = rcu_get_root();
1731 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1733 /* Declare grace period done, trace first to use old GP number. */
1734 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1735 rcu_seq_end(&rcu_state.gp_seq);
1736 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1737 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
1738 /* Check for GP requests since above loop. */
1739 rdp = this_cpu_ptr(&rcu_data);
1740 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1741 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1742 TPS("CleanupMore"));
1745 /* Advance CBs to reduce false positives below. */
1746 offloaded = rcu_rdp_is_offloaded(rdp);
1747 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1749 // We get here if a grace period was needed (“needgp”)
1750 // and the above call to rcu_accelerate_cbs() did not set
1751 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
1752 // the need for another grace period). The purpose
1753 // of the “offloaded” check is to avoid invoking
1754 // rcu_accelerate_cbs() on an offloaded CPU because we do not
1755 // hold the ->nocb_lock needed to safely access an offloaded
1756 // ->cblist. We do not want to acquire that lock because
1757 // it can be heavily contended during callback floods.
1759 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1760 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1761 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
1764 // We get here either if there is no need for an
1765 // additional grace period or if rcu_accelerate_cbs() has
1766 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
1767 // So all we need to do is to clear all of the other
1770 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1772 raw_spin_unlock_irq_rcu_node(rnp);
1774 // If strict, make all CPUs aware of the end of the old grace period.
1775 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1776 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1780 * Body of kthread that handles grace periods.
1782 static int __noreturn rcu_gp_kthread(void *unused)
1784 rcu_bind_gp_kthread();
1787 /* Handle grace-period start. */
1789 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1791 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
1792 swait_event_idle_exclusive(rcu_state.gp_wq,
1793 READ_ONCE(rcu_state.gp_flags) &
1795 rcu_gp_torture_wait();
1796 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
1797 /* Locking provides needed memory barrier. */
1800 cond_resched_tasks_rcu_qs();
1801 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1802 WARN_ON(signal_pending(current));
1803 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1807 /* Handle quiescent-state forcing. */
1810 /* Handle grace-period end. */
1811 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
1813 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
1818 * Report a full set of quiescent states to the rcu_state data structure.
1819 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1820 * another grace period is required. Whether we wake the grace-period
1821 * kthread or it awakens itself for the next round of quiescent-state
1822 * forcing, that kthread will clean up after the just-completed grace
1823 * period. Note that the caller must hold rnp->lock, which is released
1826 static void rcu_report_qs_rsp(unsigned long flags)
1827 __releases(rcu_get_root()->lock)
1829 raw_lockdep_assert_held_rcu_node(rcu_get_root());
1830 WARN_ON_ONCE(!rcu_gp_in_progress());
1831 WRITE_ONCE(rcu_state.gp_flags,
1832 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1833 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1834 rcu_gp_kthread_wake();
1838 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1839 * Allows quiescent states for a group of CPUs to be reported at one go
1840 * to the specified rcu_node structure, though all the CPUs in the group
1841 * must be represented by the same rcu_node structure (which need not be a
1842 * leaf rcu_node structure, though it often will be). The gps parameter
1843 * is the grace-period snapshot, which means that the quiescent states
1844 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
1845 * must be held upon entry, and it is released before return.
1847 * As a special case, if mask is zero, the bit-already-cleared check is
1848 * disabled. This allows propagating quiescent state due to resumed tasks
1849 * during grace-period initialization.
1851 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1852 unsigned long gps, unsigned long flags)
1853 __releases(rnp->lock)
1855 unsigned long oldmask = 0;
1856 struct rcu_node *rnp_c;
1858 raw_lockdep_assert_held_rcu_node(rnp);
1860 /* Walk up the rcu_node hierarchy. */
1862 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
1865 * Our bit has already been cleared, or the
1866 * relevant grace period is already over, so done.
1868 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1871 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1872 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1873 rcu_preempt_blocked_readers_cgp(rnp));
1874 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
1875 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1876 mask, rnp->qsmask, rnp->level,
1877 rnp->grplo, rnp->grphi,
1879 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1881 /* Other bits still set at this level, so done. */
1882 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1885 rnp->completedqs = rnp->gp_seq;
1886 mask = rnp->grpmask;
1887 if (rnp->parent == NULL) {
1889 /* No more levels. Exit loop holding root lock. */
1893 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1896 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1897 oldmask = READ_ONCE(rnp_c->qsmask);
1901 * Get here if we are the last CPU to pass through a quiescent
1902 * state for this grace period. Invoke rcu_report_qs_rsp()
1903 * to clean up and start the next grace period if one is needed.
1905 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1909 * Record a quiescent state for all tasks that were previously queued
1910 * on the specified rcu_node structure and that were blocking the current
1911 * RCU grace period. The caller must hold the corresponding rnp->lock with
1912 * irqs disabled, and this lock is released upon return, but irqs remain
1915 static void __maybe_unused
1916 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1917 __releases(rnp->lock)
1921 struct rcu_node *rnp_p;
1923 raw_lockdep_assert_held_rcu_node(rnp);
1924 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
1925 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1927 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1928 return; /* Still need more quiescent states! */
1931 rnp->completedqs = rnp->gp_seq;
1932 rnp_p = rnp->parent;
1933 if (rnp_p == NULL) {
1935 * Only one rcu_node structure in the tree, so don't
1936 * try to report up to its nonexistent parent!
1938 rcu_report_qs_rsp(flags);
1942 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1944 mask = rnp->grpmask;
1945 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1946 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
1947 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1951 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1952 * structure. This must be called from the specified CPU.
1955 rcu_report_qs_rdp(struct rcu_data *rdp)
1957 unsigned long flags;
1959 bool needacc = false;
1960 struct rcu_node *rnp;
1962 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
1964 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1965 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
1969 * The grace period in which this quiescent state was
1970 * recorded has ended, so don't report it upwards.
1971 * We will instead need a new quiescent state that lies
1972 * within the current grace period.
1974 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
1975 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1978 mask = rdp->grpmask;
1979 rdp->core_needs_qs = false;
1980 if ((rnp->qsmask & mask) == 0) {
1981 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1984 * This GP can't end until cpu checks in, so all of our
1985 * callbacks can be processed during the next GP.
1987 * NOCB kthreads have their own way to deal with that...
1989 if (!rcu_rdp_is_offloaded(rdp)) {
1991 * The current GP has not yet ended, so it
1992 * should not be possible for rcu_accelerate_cbs()
1993 * to return true. So complain, but don't awaken.
1995 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
1996 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
1998 * ...but NOCB kthreads may miss or delay callbacks acceleration
1999 * if in the middle of a (de-)offloading process.
2004 rcu_disable_urgency_upon_qs(rdp);
2005 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2006 /* ^^^ Released rnp->lock */
2009 rcu_nocb_lock_irqsave(rdp, flags);
2010 rcu_accelerate_cbs_unlocked(rnp, rdp);
2011 rcu_nocb_unlock_irqrestore(rdp, flags);
2017 * Check to see if there is a new grace period of which this CPU
2018 * is not yet aware, and if so, set up local rcu_data state for it.
2019 * Otherwise, see if this CPU has just passed through its first
2020 * quiescent state for this grace period, and record that fact if so.
2023 rcu_check_quiescent_state(struct rcu_data *rdp)
2025 /* Check for grace-period ends and beginnings. */
2026 note_gp_changes(rdp);
2029 * Does this CPU still need to do its part for current grace period?
2030 * If no, return and let the other CPUs do their part as well.
2032 if (!rdp->core_needs_qs)
2036 * Was there a quiescent state since the beginning of the grace
2037 * period? If no, then exit and wait for the next call.
2039 if (rdp->cpu_no_qs.b.norm)
2043 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2046 rcu_report_qs_rdp(rdp);
2049 /* Return true if callback-invocation time limit exceeded. */
2050 static bool rcu_do_batch_check_time(long count, long tlimit,
2051 bool jlimit_check, unsigned long jlimit)
2053 // Invoke local_clock() only once per 32 consecutive callbacks.
2054 return unlikely(tlimit) &&
2055 (!likely(count & 31) ||
2056 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2057 jlimit_check && time_after(jiffies, jlimit))) &&
2058 local_clock() >= tlimit;
2062 * Invoke any RCU callbacks that have made it to the end of their grace
2063 * period. Throttle as specified by rdp->blimit.
2065 static void rcu_do_batch(struct rcu_data *rdp)
2070 bool __maybe_unused empty;
2071 unsigned long flags;
2072 unsigned long jlimit;
2073 bool jlimit_check = false;
2075 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2076 struct rcu_head *rhp;
2079 /* If no callbacks are ready, just return. */
2080 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2081 trace_rcu_batch_start(rcu_state.name,
2082 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2083 trace_rcu_batch_end(rcu_state.name, 0,
2084 !rcu_segcblist_empty(&rdp->cblist),
2085 need_resched(), is_idle_task(current),
2086 rcu_is_callbacks_kthread(rdp));
2091 * Extract the list of ready callbacks, disabling IRQs to prevent
2092 * races with call_rcu() from interrupt handlers. Leave the
2093 * callback counts, as rcu_barrier() needs to be conservative.
2095 rcu_nocb_lock_irqsave(rdp, flags);
2096 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2097 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2098 div = READ_ONCE(rcu_divisor);
2099 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2100 bl = max(rdp->blimit, pending >> div);
2101 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2102 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2103 const long npj = NSEC_PER_SEC / HZ;
2104 long rrn = READ_ONCE(rcu_resched_ns);
2106 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2107 tlimit = local_clock() + rrn;
2108 jlimit = jiffies + (rrn + npj + 1) / npj;
2109 jlimit_check = true;
2111 trace_rcu_batch_start(rcu_state.name,
2112 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2113 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2114 if (rcu_rdp_is_offloaded(rdp))
2115 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2117 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2118 rcu_nocb_unlock_irqrestore(rdp, flags);
2120 /* Invoke callbacks. */
2121 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2122 rhp = rcu_cblist_dequeue(&rcl);
2124 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2128 debug_rcu_head_unqueue(rhp);
2130 rcu_lock_acquire(&rcu_callback_map);
2131 trace_rcu_invoke_callback(rcu_state.name, rhp);
2134 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2137 rcu_lock_release(&rcu_callback_map);
2140 * Stop only if limit reached and CPU has something to do.
2142 if (in_serving_softirq()) {
2143 if (count >= bl && (need_resched() || !is_idle_task(current)))
2146 * Make sure we don't spend too much time here and deprive other
2147 * softirq vectors of CPU cycles.
2149 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2152 // In rcuc/rcuoc context, so no worries about
2153 // depriving other softirq vectors of CPU cycles.
2155 lockdep_assert_irqs_enabled();
2156 cond_resched_tasks_rcu_qs();
2157 lockdep_assert_irqs_enabled();
2159 // But rcuc kthreads can delay quiescent-state
2160 // reporting, so check time limits for them.
2161 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2162 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2163 rdp->rcu_cpu_has_work = 1;
2169 rcu_nocb_lock_irqsave(rdp, flags);
2170 rdp->n_cbs_invoked += count;
2171 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2172 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2174 /* Update counts and requeue any remaining callbacks. */
2175 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2176 rcu_segcblist_add_len(&rdp->cblist, -count);
2178 /* Reinstate batch limit if we have worked down the excess. */
2179 count = rcu_segcblist_n_cbs(&rdp->cblist);
2180 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2181 rdp->blimit = blimit;
2183 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2184 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2185 rdp->qlen_last_fqs_check = 0;
2186 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2187 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2188 rdp->qlen_last_fqs_check = count;
2191 * The following usually indicates a double call_rcu(). To track
2192 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2194 empty = rcu_segcblist_empty(&rdp->cblist);
2195 WARN_ON_ONCE(count == 0 && !empty);
2196 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2197 count != 0 && empty);
2198 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2199 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2201 rcu_nocb_unlock_irqrestore(rdp, flags);
2203 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2207 * This function is invoked from each scheduling-clock interrupt,
2208 * and checks to see if this CPU is in a non-context-switch quiescent
2209 * state, for example, user mode or idle loop. It also schedules RCU
2210 * core processing. If the current grace period has gone on too long,
2211 * it will ask the scheduler to manufacture a context switch for the sole
2212 * purpose of providing the needed quiescent state.
2214 void rcu_sched_clock_irq(int user)
2218 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2220 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2221 __this_cpu_write(rcu_data.last_sched_clock, j);
2223 trace_rcu_utilization(TPS("Start scheduler-tick"));
2224 lockdep_assert_irqs_disabled();
2225 raw_cpu_inc(rcu_data.ticks_this_gp);
2226 /* The load-acquire pairs with the store-release setting to true. */
2227 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2228 /* Idle and userspace execution already are quiescent states. */
2229 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2230 set_tsk_need_resched(current);
2231 set_preempt_need_resched();
2233 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2235 rcu_flavor_sched_clock_irq(user);
2236 if (rcu_pending(user))
2238 if (user || rcu_is_cpu_rrupt_from_idle())
2239 rcu_note_voluntary_context_switch(current);
2240 lockdep_assert_irqs_disabled();
2242 trace_rcu_utilization(TPS("End scheduler-tick"));
2246 * Scan the leaf rcu_node structures. For each structure on which all
2247 * CPUs have reported a quiescent state and on which there are tasks
2248 * blocking the current grace period, initiate RCU priority boosting.
2249 * Otherwise, invoke the specified function to check dyntick state for
2250 * each CPU that has not yet reported a quiescent state.
2252 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2255 unsigned long flags;
2257 struct rcu_data *rdp;
2258 struct rcu_node *rnp;
2260 rcu_state.cbovld = rcu_state.cbovldnext;
2261 rcu_state.cbovldnext = false;
2262 rcu_for_each_leaf_node(rnp) {
2263 cond_resched_tasks_rcu_qs();
2265 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2266 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2267 if (rnp->qsmask == 0) {
2268 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2270 * No point in scanning bits because they
2271 * are all zero. But we might need to
2272 * priority-boost blocked readers.
2274 rcu_initiate_boost(rnp, flags);
2275 /* rcu_initiate_boost() releases rnp->lock */
2278 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2281 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2282 rdp = per_cpu_ptr(&rcu_data, cpu);
2284 mask |= rdp->grpmask;
2285 rcu_disable_urgency_upon_qs(rdp);
2289 /* Idle/offline CPUs, report (releases rnp->lock). */
2290 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2292 /* Nothing to do here, so just drop the lock. */
2293 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2299 * Force quiescent states on reluctant CPUs, and also detect which
2300 * CPUs are in dyntick-idle mode.
2302 void rcu_force_quiescent_state(void)
2304 unsigned long flags;
2306 struct rcu_node *rnp;
2307 struct rcu_node *rnp_old = NULL;
2309 /* Funnel through hierarchy to reduce memory contention. */
2310 rnp = raw_cpu_read(rcu_data.mynode);
2311 for (; rnp != NULL; rnp = rnp->parent) {
2312 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2313 !raw_spin_trylock(&rnp->fqslock);
2314 if (rnp_old != NULL)
2315 raw_spin_unlock(&rnp_old->fqslock);
2320 /* rnp_old == rcu_get_root(), rnp == NULL. */
2322 /* Reached the root of the rcu_node tree, acquire lock. */
2323 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2324 raw_spin_unlock(&rnp_old->fqslock);
2325 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2326 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2327 return; /* Someone beat us to it. */
2329 WRITE_ONCE(rcu_state.gp_flags,
2330 READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2331 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2332 rcu_gp_kthread_wake();
2334 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2336 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2338 static void strict_work_handler(struct work_struct *work)
2344 /* Perform RCU core processing work for the current CPU. */
2345 static __latent_entropy void rcu_core(void)
2347 unsigned long flags;
2348 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2349 struct rcu_node *rnp = rdp->mynode;
2351 * On RT rcu_core() can be preempted when IRQs aren't disabled.
2352 * Therefore this function can race with concurrent NOCB (de-)offloading
2353 * on this CPU and the below condition must be considered volatile.
2354 * However if we race with:
2356 * _ Offloading: In the worst case we accelerate or process callbacks
2357 * concurrently with NOCB kthreads. We are guaranteed to
2358 * call rcu_nocb_lock() if that happens.
2360 * _ Deoffloading: In the worst case we miss callbacks acceleration or
2361 * processing. This is fine because the early stage
2362 * of deoffloading invokes rcu_core() after setting
2363 * SEGCBLIST_RCU_CORE. So we guarantee that we'll process
2364 * what could have been dismissed without the need to wait
2365 * for the next rcu_pending() check in the next jiffy.
2367 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2369 if (cpu_is_offline(smp_processor_id()))
2371 trace_rcu_utilization(TPS("Start RCU core"));
2372 WARN_ON_ONCE(!rdp->beenonline);
2374 /* Report any deferred quiescent states if preemption enabled. */
2375 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2376 rcu_preempt_deferred_qs(current);
2377 } else if (rcu_preempt_need_deferred_qs(current)) {
2378 set_tsk_need_resched(current);
2379 set_preempt_need_resched();
2382 /* Update RCU state based on any recent quiescent states. */
2383 rcu_check_quiescent_state(rdp);
2385 /* No grace period and unregistered callbacks? */
2386 if (!rcu_gp_in_progress() &&
2387 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2388 rcu_nocb_lock_irqsave(rdp, flags);
2389 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2390 rcu_accelerate_cbs_unlocked(rnp, rdp);
2391 rcu_nocb_unlock_irqrestore(rdp, flags);
2394 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2396 /* If there are callbacks ready, invoke them. */
2397 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2398 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2400 /* Re-invoke RCU core processing if there are callbacks remaining. */
2401 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2405 /* Do any needed deferred wakeups of rcuo kthreads. */
2406 do_nocb_deferred_wakeup(rdp);
2407 trace_rcu_utilization(TPS("End RCU core"));
2409 // If strict GPs, schedule an RCU reader in a clean environment.
2410 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2411 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2414 static void rcu_core_si(struct softirq_action *h)
2419 static void rcu_wake_cond(struct task_struct *t, int status)
2422 * If the thread is yielding, only wake it when this
2423 * is invoked from idle
2425 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2429 static void invoke_rcu_core_kthread(void)
2431 struct task_struct *t;
2432 unsigned long flags;
2434 local_irq_save(flags);
2435 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2436 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2437 if (t != NULL && t != current)
2438 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2439 local_irq_restore(flags);
2443 * Wake up this CPU's rcuc kthread to do RCU core processing.
2445 static void invoke_rcu_core(void)
2447 if (!cpu_online(smp_processor_id()))
2450 raise_softirq(RCU_SOFTIRQ);
2452 invoke_rcu_core_kthread();
2455 static void rcu_cpu_kthread_park(unsigned int cpu)
2457 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2460 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2462 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2467 * the RCU softirq used in configurations of RCU that do not support RCU
2468 * priority boosting.
2470 static void rcu_cpu_kthread(unsigned int cpu)
2472 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2473 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2474 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2477 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2478 for (spincnt = 0; spincnt < 10; spincnt++) {
2479 WRITE_ONCE(*j, jiffies);
2481 *statusp = RCU_KTHREAD_RUNNING;
2482 local_irq_disable();
2484 WRITE_ONCE(*workp, 0);
2489 if (!READ_ONCE(*workp)) {
2490 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2491 *statusp = RCU_KTHREAD_WAITING;
2495 *statusp = RCU_KTHREAD_YIELDING;
2496 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2497 schedule_timeout_idle(2);
2498 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2499 *statusp = RCU_KTHREAD_WAITING;
2500 WRITE_ONCE(*j, jiffies);
2503 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2504 .store = &rcu_data.rcu_cpu_kthread_task,
2505 .thread_should_run = rcu_cpu_kthread_should_run,
2506 .thread_fn = rcu_cpu_kthread,
2507 .thread_comm = "rcuc/%u",
2508 .setup = rcu_cpu_kthread_setup,
2509 .park = rcu_cpu_kthread_park,
2513 * Spawn per-CPU RCU core processing kthreads.
2515 static int __init rcu_spawn_core_kthreads(void)
2519 for_each_possible_cpu(cpu)
2520 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2523 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2524 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2529 * Handle any core-RCU processing required by a call_rcu() invocation.
2531 static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2532 unsigned long flags)
2535 * If called from an extended quiescent state, invoke the RCU
2536 * core in order to force a re-evaluation of RCU's idleness.
2538 if (!rcu_is_watching())
2541 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2542 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2546 * Force the grace period if too many callbacks or too long waiting.
2547 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2548 * if some other CPU has recently done so. Also, don't bother
2549 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2550 * is the only one waiting for a grace period to complete.
2552 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2553 rdp->qlen_last_fqs_check + qhimark)) {
2555 /* Are we ignoring a completed grace period? */
2556 note_gp_changes(rdp);
2558 /* Start a new grace period if one not already started. */
2559 if (!rcu_gp_in_progress()) {
2560 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2562 /* Give the grace period a kick. */
2563 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2564 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2565 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2566 rcu_force_quiescent_state();
2567 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2568 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2574 * RCU callback function to leak a callback.
2576 static void rcu_leak_callback(struct rcu_head *rhp)
2581 * Check and if necessary update the leaf rcu_node structure's
2582 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2583 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
2584 * structure's ->lock.
2586 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2588 raw_lockdep_assert_held_rcu_node(rnp);
2589 if (qovld_calc <= 0)
2590 return; // Early boot and wildcard value set.
2591 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2592 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2594 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2598 * Check and if necessary update the leaf rcu_node structure's
2599 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2600 * number of queued RCU callbacks. No locks need be held, but the
2601 * caller must have disabled interrupts.
2603 * Note that this function ignores the possibility that there are a lot
2604 * of callbacks all of which have already seen the end of their respective
2605 * grace periods. This omission is due to the need for no-CBs CPUs to
2606 * be holding ->nocb_lock to do this check, which is too heavy for a
2607 * common-case operation.
2609 static void check_cb_ovld(struct rcu_data *rdp)
2611 struct rcu_node *const rnp = rdp->mynode;
2613 if (qovld_calc <= 0 ||
2614 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2615 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2616 return; // Early boot wildcard value or already set correctly.
2617 raw_spin_lock_rcu_node(rnp);
2618 check_cb_ovld_locked(rdp, rnp);
2619 raw_spin_unlock_rcu_node(rnp);
2623 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
2625 static atomic_t doublefrees;
2626 unsigned long flags;
2628 struct rcu_data *rdp;
2631 /* Misaligned rcu_head! */
2632 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2634 if (debug_rcu_head_queue(head)) {
2636 * Probable double call_rcu(), so leak the callback.
2637 * Use rcu:rcu_callback trace event to find the previous
2638 * time callback was passed to call_rcu().
2640 if (atomic_inc_return(&doublefrees) < 4) {
2641 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
2644 WRITE_ONCE(head->func, rcu_leak_callback);
2649 kasan_record_aux_stack_noalloc(head);
2650 local_irq_save(flags);
2651 rdp = this_cpu_ptr(&rcu_data);
2652 lazy = lazy_in && !rcu_async_should_hurry();
2654 /* Add the callback to our list. */
2655 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2656 // This can trigger due to call_rcu() from offline CPU:
2657 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2658 WARN_ON_ONCE(!rcu_is_watching());
2659 // Very early boot, before rcu_init(). Initialize if needed
2660 // and then drop through to queue the callback.
2661 if (rcu_segcblist_empty(&rdp->cblist))
2662 rcu_segcblist_init(&rdp->cblist);
2666 if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy))
2667 return; // Enqueued onto ->nocb_bypass, so just leave.
2668 // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
2669 rcu_segcblist_enqueue(&rdp->cblist, head);
2670 if (__is_kvfree_rcu_offset((unsigned long)func))
2671 trace_rcu_kvfree_callback(rcu_state.name, head,
2672 (unsigned long)func,
2673 rcu_segcblist_n_cbs(&rdp->cblist));
2675 trace_rcu_callback(rcu_state.name, head,
2676 rcu_segcblist_n_cbs(&rdp->cblist));
2678 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2680 /* Go handle any RCU core processing required. */
2681 if (unlikely(rcu_rdp_is_offloaded(rdp))) {
2682 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2684 __call_rcu_core(rdp, head, flags);
2685 local_irq_restore(flags);
2689 #ifdef CONFIG_RCU_LAZY
2691 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
2692 * flush all lazy callbacks (including the new one) to the main ->cblist while
2695 * @head: structure to be used for queueing the RCU updates.
2696 * @func: actual callback function to be invoked after the grace period
2698 * The callback function will be invoked some time after a full grace
2699 * period elapses, in other words after all pre-existing RCU read-side
2700 * critical sections have completed.
2702 * Use this API instead of call_rcu() if you don't want the callback to be
2703 * invoked after very long periods of time, which can happen on systems without
2704 * memory pressure and on systems which are lightly loaded or mostly idle.
2705 * This function will cause callbacks to be invoked sooner than later at the
2706 * expense of extra power. Other than that, this function is identical to, and
2707 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
2708 * ordering and other functionality.
2710 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
2712 return __call_rcu_common(head, func, false);
2714 EXPORT_SYMBOL_GPL(call_rcu_hurry);
2718 * call_rcu() - Queue an RCU callback for invocation after a grace period.
2719 * By default the callbacks are 'lazy' and are kept hidden from the main
2720 * ->cblist to prevent starting of grace periods too soon.
2721 * If you desire grace periods to start very soon, use call_rcu_hurry().
2723 * @head: structure to be used for queueing the RCU updates.
2724 * @func: actual callback function to be invoked after the grace period
2726 * The callback function will be invoked some time after a full grace
2727 * period elapses, in other words after all pre-existing RCU read-side
2728 * critical sections have completed. However, the callback function
2729 * might well execute concurrently with RCU read-side critical sections
2730 * that started after call_rcu() was invoked.
2732 * RCU read-side critical sections are delimited by rcu_read_lock()
2733 * and rcu_read_unlock(), and may be nested. In addition, but only in
2734 * v5.0 and later, regions of code across which interrupts, preemption,
2735 * or softirqs have been disabled also serve as RCU read-side critical
2736 * sections. This includes hardware interrupt handlers, softirq handlers,
2739 * Note that all CPUs must agree that the grace period extended beyond
2740 * all pre-existing RCU read-side critical section. On systems with more
2741 * than one CPU, this means that when "func()" is invoked, each CPU is
2742 * guaranteed to have executed a full memory barrier since the end of its
2743 * last RCU read-side critical section whose beginning preceded the call
2744 * to call_rcu(). It also means that each CPU executing an RCU read-side
2745 * critical section that continues beyond the start of "func()" must have
2746 * executed a memory barrier after the call_rcu() but before the beginning
2747 * of that RCU read-side critical section. Note that these guarantees
2748 * include CPUs that are offline, idle, or executing in user mode, as
2749 * well as CPUs that are executing in the kernel.
2751 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2752 * resulting RCU callback function "func()", then both CPU A and CPU B are
2753 * guaranteed to execute a full memory barrier during the time interval
2754 * between the call to call_rcu() and the invocation of "func()" -- even
2755 * if CPU A and CPU B are the same CPU (but again only if the system has
2756 * more than one CPU).
2758 * Implementation of these memory-ordering guarantees is described here:
2759 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
2761 void call_rcu(struct rcu_head *head, rcu_callback_t func)
2763 return __call_rcu_common(head, func, IS_ENABLED(CONFIG_RCU_LAZY));
2765 EXPORT_SYMBOL_GPL(call_rcu);
2767 /* Maximum number of jiffies to wait before draining a batch. */
2768 #define KFREE_DRAIN_JIFFIES (5 * HZ)
2769 #define KFREE_N_BATCHES 2
2770 #define FREE_N_CHANNELS 2
2773 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2774 * @list: List node. All blocks are linked between each other
2775 * @gp_snap: Snapshot of RCU state for objects placed to this bulk
2776 * @nr_records: Number of active pointers in the array
2777 * @records: Array of the kvfree_rcu() pointers
2779 struct kvfree_rcu_bulk_data {
2780 struct list_head list;
2781 struct rcu_gp_oldstate gp_snap;
2782 unsigned long nr_records;
2787 * This macro defines how many entries the "records" array
2788 * will contain. It is based on the fact that the size of
2789 * kvfree_rcu_bulk_data structure becomes exactly one page.
2791 #define KVFREE_BULK_MAX_ENTR \
2792 ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
2795 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2796 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2797 * @head_free: List of kfree_rcu() objects waiting for a grace period
2798 * @head_free_gp_snap: Grace-period snapshot to check for attempted premature frees.
2799 * @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2800 * @krcp: Pointer to @kfree_rcu_cpu structure
2803 struct kfree_rcu_cpu_work {
2804 struct rcu_work rcu_work;
2805 struct rcu_head *head_free;
2806 struct rcu_gp_oldstate head_free_gp_snap;
2807 struct list_head bulk_head_free[FREE_N_CHANNELS];
2808 struct kfree_rcu_cpu *krcp;
2812 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2813 * @head: List of kfree_rcu() objects not yet waiting for a grace period
2814 * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2815 * @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2816 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2817 * @lock: Synchronize access to this structure
2818 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2819 * @initialized: The @rcu_work fields have been initialized
2820 * @head_count: Number of objects in rcu_head singular list
2821 * @bulk_count: Number of objects in bulk-list
2823 * A simple cache list that contains objects for reuse purpose.
2824 * In order to save some per-cpu space the list is singular.
2825 * Even though it is lockless an access has to be protected by the
2827 * @page_cache_work: A work to refill the cache when it is empty
2828 * @backoff_page_cache_fill: Delay cache refills
2829 * @work_in_progress: Indicates that page_cache_work is running
2830 * @hrtimer: A hrtimer for scheduling a page_cache_work
2831 * @nr_bkv_objs: number of allocated objects at @bkvcache.
2833 * This is a per-CPU structure. The reason that it is not included in
2834 * the rcu_data structure is to permit this code to be extracted from
2835 * the RCU files. Such extraction could allow further optimization of
2836 * the interactions with the slab allocators.
2838 struct kfree_rcu_cpu {
2839 // Objects queued on a linked list
2840 // through their rcu_head structures.
2841 struct rcu_head *head;
2842 unsigned long head_gp_snap;
2843 atomic_t head_count;
2845 // Objects queued on a bulk-list.
2846 struct list_head bulk_head[FREE_N_CHANNELS];
2847 atomic_t bulk_count[FREE_N_CHANNELS];
2849 struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
2850 raw_spinlock_t lock;
2851 struct delayed_work monitor_work;
2854 struct delayed_work page_cache_work;
2855 atomic_t backoff_page_cache_fill;
2856 atomic_t work_in_progress;
2857 struct hrtimer hrtimer;
2859 struct llist_head bkvcache;
2863 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
2864 .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
2867 static __always_inline void
2868 debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
2870 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2873 for (i = 0; i < bhead->nr_records; i++)
2874 debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
2878 static inline struct kfree_rcu_cpu *
2879 krc_this_cpu_lock(unsigned long *flags)
2881 struct kfree_rcu_cpu *krcp;
2883 local_irq_save(*flags); // For safely calling this_cpu_ptr().
2884 krcp = this_cpu_ptr(&krc);
2885 raw_spin_lock(&krcp->lock);
2891 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
2893 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2896 static inline struct kvfree_rcu_bulk_data *
2897 get_cached_bnode(struct kfree_rcu_cpu *krcp)
2899 if (!krcp->nr_bkv_objs)
2902 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
2903 return (struct kvfree_rcu_bulk_data *)
2904 llist_del_first(&krcp->bkvcache);
2908 put_cached_bnode(struct kfree_rcu_cpu *krcp,
2909 struct kvfree_rcu_bulk_data *bnode)
2912 if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
2915 llist_add((struct llist_node *) bnode, &krcp->bkvcache);
2916 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
2921 drain_page_cache(struct kfree_rcu_cpu *krcp)
2923 unsigned long flags;
2924 struct llist_node *page_list, *pos, *n;
2927 if (!rcu_min_cached_objs)
2930 raw_spin_lock_irqsave(&krcp->lock, flags);
2931 page_list = llist_del_all(&krcp->bkvcache);
2932 WRITE_ONCE(krcp->nr_bkv_objs, 0);
2933 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2935 llist_for_each_safe(pos, n, page_list) {
2936 free_page((unsigned long)pos);
2944 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
2945 struct kvfree_rcu_bulk_data *bnode, int idx)
2947 unsigned long flags;
2950 if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
2951 debug_rcu_bhead_unqueue(bnode);
2952 rcu_lock_acquire(&rcu_callback_map);
2953 if (idx == 0) { // kmalloc() / kfree().
2954 trace_rcu_invoke_kfree_bulk_callback(
2955 rcu_state.name, bnode->nr_records,
2958 kfree_bulk(bnode->nr_records, bnode->records);
2959 } else { // vmalloc() / vfree().
2960 for (i = 0; i < bnode->nr_records; i++) {
2961 trace_rcu_invoke_kvfree_callback(
2962 rcu_state.name, bnode->records[i], 0);
2964 vfree(bnode->records[i]);
2967 rcu_lock_release(&rcu_callback_map);
2970 raw_spin_lock_irqsave(&krcp->lock, flags);
2971 if (put_cached_bnode(krcp, bnode))
2973 raw_spin_unlock_irqrestore(&krcp->lock, flags);
2976 free_page((unsigned long) bnode);
2978 cond_resched_tasks_rcu_qs();
2982 kvfree_rcu_list(struct rcu_head *head)
2984 struct rcu_head *next;
2986 for (; head; head = next) {
2987 void *ptr = (void *) head->func;
2988 unsigned long offset = (void *) head - ptr;
2991 debug_rcu_head_unqueue((struct rcu_head *)ptr);
2992 rcu_lock_acquire(&rcu_callback_map);
2993 trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
2995 if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
2998 rcu_lock_release(&rcu_callback_map);
2999 cond_resched_tasks_rcu_qs();
3004 * This function is invoked in workqueue context after a grace period.
3005 * It frees all the objects queued on ->bulk_head_free or ->head_free.
3007 static void kfree_rcu_work(struct work_struct *work)
3009 unsigned long flags;
3010 struct kvfree_rcu_bulk_data *bnode, *n;
3011 struct list_head bulk_head[FREE_N_CHANNELS];
3012 struct rcu_head *head;
3013 struct kfree_rcu_cpu *krcp;
3014 struct kfree_rcu_cpu_work *krwp;
3015 struct rcu_gp_oldstate head_gp_snap;
3018 krwp = container_of(to_rcu_work(work),
3019 struct kfree_rcu_cpu_work, rcu_work);
3022 raw_spin_lock_irqsave(&krcp->lock, flags);
3023 // Channels 1 and 2.
3024 for (i = 0; i < FREE_N_CHANNELS; i++)
3025 list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
3028 head = krwp->head_free;
3029 krwp->head_free = NULL;
3030 head_gp_snap = krwp->head_free_gp_snap;
3031 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3033 // Handle the first two channels.
3034 for (i = 0; i < FREE_N_CHANNELS; i++) {
3035 // Start from the tail page, so a GP is likely passed for it.
3036 list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3037 kvfree_rcu_bulk(krcp, bnode, i);
3041 * This is used when the "bulk" path can not be used for the
3042 * double-argument of kvfree_rcu(). This happens when the
3043 * page-cache is empty, which means that objects are instead
3044 * queued on a linked list through their rcu_head structures.
3045 * This list is named "Channel 3".
3047 if (head && !WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&head_gp_snap)))
3048 kvfree_rcu_list(head);
3052 need_offload_krc(struct kfree_rcu_cpu *krcp)
3056 for (i = 0; i < FREE_N_CHANNELS; i++)
3057 if (!list_empty(&krcp->bulk_head[i]))
3060 return !!READ_ONCE(krcp->head);
3064 need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
3068 for (i = 0; i < FREE_N_CHANNELS; i++)
3069 if (!list_empty(&krwp->bulk_head_free[i]))
3072 return !!krwp->head_free;
3075 static int krc_count(struct kfree_rcu_cpu *krcp)
3077 int sum = atomic_read(&krcp->head_count);
3080 for (i = 0; i < FREE_N_CHANNELS; i++)
3081 sum += atomic_read(&krcp->bulk_count[i]);
3087 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3089 long delay, delay_left;
3091 delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES;
3092 if (delayed_work_pending(&krcp->monitor_work)) {
3093 delay_left = krcp->monitor_work.timer.expires - jiffies;
3094 if (delay < delay_left)
3095 mod_delayed_work(system_wq, &krcp->monitor_work, delay);
3098 queue_delayed_work(system_wq, &krcp->monitor_work, delay);
3102 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
3104 struct list_head bulk_ready[FREE_N_CHANNELS];
3105 struct kvfree_rcu_bulk_data *bnode, *n;
3106 struct rcu_head *head_ready = NULL;
3107 unsigned long flags;
3110 raw_spin_lock_irqsave(&krcp->lock, flags);
3111 for (i = 0; i < FREE_N_CHANNELS; i++) {
3112 INIT_LIST_HEAD(&bulk_ready[i]);
3114 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
3115 if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
3118 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
3119 list_move(&bnode->list, &bulk_ready[i]);
3123 if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) {
3124 head_ready = krcp->head;
3125 atomic_set(&krcp->head_count, 0);
3126 WRITE_ONCE(krcp->head, NULL);
3128 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3130 for (i = 0; i < FREE_N_CHANNELS; i++) {
3131 list_for_each_entry_safe(bnode, n, &bulk_ready[i], list)
3132 kvfree_rcu_bulk(krcp, bnode, i);
3136 kvfree_rcu_list(head_ready);
3140 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3142 static void kfree_rcu_monitor(struct work_struct *work)
3144 struct kfree_rcu_cpu *krcp = container_of(work,
3145 struct kfree_rcu_cpu, monitor_work.work);
3146 unsigned long flags;
3149 // Drain ready for reclaim.
3150 kvfree_rcu_drain_ready(krcp);
3152 raw_spin_lock_irqsave(&krcp->lock, flags);
3154 // Attempt to start a new batch.
3155 for (i = 0; i < KFREE_N_BATCHES; i++) {
3156 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3158 // Try to detach bulk_head or head and attach it, only when
3159 // all channels are free. Any channel is not free means at krwp
3160 // there is on-going rcu work to handle krwp's free business.
3161 if (need_wait_for_krwp_work(krwp))
3164 // kvfree_rcu_drain_ready() might handle this krcp, if so give up.
3165 if (need_offload_krc(krcp)) {
3166 // Channel 1 corresponds to the SLAB-pointer bulk path.
3167 // Channel 2 corresponds to vmalloc-pointer bulk path.
3168 for (j = 0; j < FREE_N_CHANNELS; j++) {
3169 if (list_empty(&krwp->bulk_head_free[j])) {
3170 atomic_set(&krcp->bulk_count[j], 0);
3171 list_replace_init(&krcp->bulk_head[j],
3172 &krwp->bulk_head_free[j]);
3176 // Channel 3 corresponds to both SLAB and vmalloc
3177 // objects queued on the linked list.
3178 if (!krwp->head_free) {
3179 krwp->head_free = krcp->head;
3180 get_state_synchronize_rcu_full(&krwp->head_free_gp_snap);
3181 atomic_set(&krcp->head_count, 0);
3182 WRITE_ONCE(krcp->head, NULL);
3185 // One work is per one batch, so there are three
3186 // "free channels", the batch can handle. It can
3187 // be that the work is in the pending state when
3188 // channels have been detached following by each
3190 queue_rcu_work(system_wq, &krwp->rcu_work);
3194 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3196 // If there is nothing to detach, it means that our job is
3197 // successfully done here. In case of having at least one
3198 // of the channels that is still busy we should rearm the
3199 // work to repeat an attempt. Because previous batches are
3200 // still in progress.
3201 if (need_offload_krc(krcp))
3202 schedule_delayed_monitor_work(krcp);
3205 static enum hrtimer_restart
3206 schedule_page_work_fn(struct hrtimer *t)
3208 struct kfree_rcu_cpu *krcp =
3209 container_of(t, struct kfree_rcu_cpu, hrtimer);
3211 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3212 return HRTIMER_NORESTART;
3215 static void fill_page_cache_func(struct work_struct *work)
3217 struct kvfree_rcu_bulk_data *bnode;
3218 struct kfree_rcu_cpu *krcp =
3219 container_of(work, struct kfree_rcu_cpu,
3220 page_cache_work.work);
3221 unsigned long flags;
3226 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3227 1 : rcu_min_cached_objs;
3229 for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) {
3230 bnode = (struct kvfree_rcu_bulk_data *)
3231 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3236 raw_spin_lock_irqsave(&krcp->lock, flags);
3237 pushed = put_cached_bnode(krcp, bnode);
3238 raw_spin_unlock_irqrestore(&krcp->lock, flags);
3241 free_page((unsigned long) bnode);
3246 atomic_set(&krcp->work_in_progress, 0);
3247 atomic_set(&krcp->backoff_page_cache_fill, 0);
3251 run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3253 // If cache disabled, bail out.
3254 if (!rcu_min_cached_objs)
3257 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3258 !atomic_xchg(&krcp->work_in_progress, 1)) {
3259 if (atomic_read(&krcp->backoff_page_cache_fill)) {
3260 queue_delayed_work(system_wq,
3261 &krcp->page_cache_work,
3262 msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3264 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3265 krcp->hrtimer.function = schedule_page_work_fn;
3266 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3271 // Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3272 // state specified by flags. If can_alloc is true, the caller must
3273 // be schedulable and not be holding any locks or mutexes that might be
3274 // acquired by the memory allocator or anything that it might invoke.
3275 // Returns true if ptr was successfully recorded, else the caller must
3278 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3279 unsigned long *flags, void *ptr, bool can_alloc)
3281 struct kvfree_rcu_bulk_data *bnode;
3284 *krcp = krc_this_cpu_lock(flags);
3285 if (unlikely(!(*krcp)->initialized))
3288 idx = !!is_vmalloc_addr(ptr);
3289 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3290 struct kvfree_rcu_bulk_data, list);
3292 /* Check if a new block is required. */
3293 if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
3294 bnode = get_cached_bnode(*krcp);
3295 if (!bnode && can_alloc) {
3296 krc_this_cpu_unlock(*krcp, *flags);
3298 // __GFP_NORETRY - allows a light-weight direct reclaim
3299 // what is OK from minimizing of fallback hitting point of
3300 // view. Apart of that it forbids any OOM invoking what is
3301 // also beneficial since we are about to release memory soon.
3303 // __GFP_NOMEMALLOC - prevents from consuming of all the
3304 // memory reserves. Please note we have a fallback path.
3306 // __GFP_NOWARN - it is supposed that an allocation can
3307 // be failed under low memory or high memory pressure
3309 bnode = (struct kvfree_rcu_bulk_data *)
3310 __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3311 raw_spin_lock_irqsave(&(*krcp)->lock, *flags);
3317 // Initialize the new block and attach it.
3318 bnode->nr_records = 0;
3319 list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
3322 // Finally insert and update the GP for this page.
3323 bnode->records[bnode->nr_records++] = ptr;
3324 get_state_synchronize_rcu_full(&bnode->gp_snap);
3325 atomic_inc(&(*krcp)->bulk_count[idx]);
3331 * Queue a request for lazy invocation of the appropriate free routine
3332 * after a grace period. Please note that three paths are maintained,
3333 * two for the common case using arrays of pointers and a third one that
3334 * is used only when the main paths cannot be used, for example, due to
3337 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3338 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3339 * be free'd in workqueue context. This allows us to: batch requests together to
3340 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3342 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3344 unsigned long flags;
3345 struct kfree_rcu_cpu *krcp;
3349 * Please note there is a limitation for the head-less
3350 * variant, that is why there is a clear rule for such
3351 * objects: it can be used from might_sleep() context
3352 * only. For other places please embed an rcu_head to
3358 // Queue the object but don't yet schedule the batch.
3359 if (debug_rcu_head_queue(ptr)) {
3360 // Probable double kfree_rcu(), just leak.
3361 WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3364 // Mark as success and leave.
3368 kasan_record_aux_stack_noalloc(ptr);
3369 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3371 run_page_cache_worker(krcp);
3374 // Inline if kvfree_rcu(one_arg) call.
3378 head->next = krcp->head;
3379 WRITE_ONCE(krcp->head, head);
3380 atomic_inc(&krcp->head_count);
3382 // Take a snapshot for this krcp.
3383 krcp->head_gp_snap = get_state_synchronize_rcu();
3387 // Set timer to drain after KFREE_DRAIN_JIFFIES.
3388 if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING)
3389 schedule_delayed_monitor_work(krcp);
3392 krc_this_cpu_unlock(krcp, flags);
3395 * Inline kvfree() after synchronize_rcu(). We can do
3396 * it from might_sleep() context only, so the current
3397 * CPU can pass the QS state.
3400 debug_rcu_head_unqueue((struct rcu_head *) ptr);
3405 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3407 static unsigned long
3408 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3411 unsigned long count = 0;
3413 /* Snapshot count of all CPUs */
3414 for_each_possible_cpu(cpu) {
3415 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3417 count += krc_count(krcp);
3418 count += READ_ONCE(krcp->nr_bkv_objs);
3419 atomic_set(&krcp->backoff_page_cache_fill, 1);
3422 return count == 0 ? SHRINK_EMPTY : count;
3425 static unsigned long
3426 kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3430 for_each_possible_cpu(cpu) {
3432 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3434 count = krc_count(krcp);
3435 count += drain_page_cache(krcp);
3436 kfree_rcu_monitor(&krcp->monitor_work.work);
3438 sc->nr_to_scan -= count;
3441 if (sc->nr_to_scan <= 0)
3445 return freed == 0 ? SHRINK_STOP : freed;
3448 static struct shrinker kfree_rcu_shrinker = {
3449 .count_objects = kfree_rcu_shrink_count,
3450 .scan_objects = kfree_rcu_shrink_scan,
3452 .seeks = DEFAULT_SEEKS,
3455 void __init kfree_rcu_scheduler_running(void)
3459 for_each_possible_cpu(cpu) {
3460 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3462 if (need_offload_krc(krcp))
3463 schedule_delayed_monitor_work(krcp);
3468 * During early boot, any blocking grace-period wait automatically
3469 * implies a grace period.
3471 * Later on, this could in theory be the case for kernels built with
3472 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3473 * is not a common case. Furthermore, this optimization would cause
3474 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3475 * grace-period optimization is ignored once the scheduler is running.
3477 static int rcu_blocking_is_gp(void)
3479 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3487 * synchronize_rcu - wait until a grace period has elapsed.
3489 * Control will return to the caller some time after a full grace
3490 * period has elapsed, in other words after all currently executing RCU
3491 * read-side critical sections have completed. Note, however, that
3492 * upon return from synchronize_rcu(), the caller might well be executing
3493 * concurrently with new RCU read-side critical sections that began while
3494 * synchronize_rcu() was waiting.
3496 * RCU read-side critical sections are delimited by rcu_read_lock()
3497 * and rcu_read_unlock(), and may be nested. In addition, but only in
3498 * v5.0 and later, regions of code across which interrupts, preemption,
3499 * or softirqs have been disabled also serve as RCU read-side critical
3500 * sections. This includes hardware interrupt handlers, softirq handlers,
3503 * Note that this guarantee implies further memory-ordering guarantees.
3504 * On systems with more than one CPU, when synchronize_rcu() returns,
3505 * each CPU is guaranteed to have executed a full memory barrier since
3506 * the end of its last RCU read-side critical section whose beginning
3507 * preceded the call to synchronize_rcu(). In addition, each CPU having
3508 * an RCU read-side critical section that extends beyond the return from
3509 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3510 * after the beginning of synchronize_rcu() and before the beginning of
3511 * that RCU read-side critical section. Note that these guarantees include
3512 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3513 * that are executing in the kernel.
3515 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3516 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3517 * to have executed a full memory barrier during the execution of
3518 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3519 * again only if the system has more than one CPU).
3521 * Implementation of these memory-ordering guarantees is described here:
3522 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3524 void synchronize_rcu(void)
3526 unsigned long flags;
3527 struct rcu_node *rnp;
3529 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3530 lock_is_held(&rcu_lock_map) ||
3531 lock_is_held(&rcu_sched_lock_map),
3532 "Illegal synchronize_rcu() in RCU read-side critical section");
3533 if (!rcu_blocking_is_gp()) {
3534 if (rcu_gp_is_expedited())
3535 synchronize_rcu_expedited();
3537 wait_rcu_gp(call_rcu_hurry);
3541 // Context allows vacuous grace periods.
3542 // Note well that this code runs with !PREEMPT && !SMP.
3543 // In addition, all code that advances grace periods runs at
3544 // process level. Therefore, this normal GP overlaps with other
3545 // normal GPs only by being fully nested within them, which allows
3546 // reuse of ->gp_seq_polled_snap.
3547 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3548 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3550 // Update the normal grace-period counters to record
3551 // this grace period, but only those used by the boot CPU.
3552 // The rcu_scheduler_starting() will take care of the rest of
3554 local_irq_save(flags);
3555 WARN_ON_ONCE(num_online_cpus() > 1);
3556 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3557 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3558 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3559 local_irq_restore(flags);
3561 EXPORT_SYMBOL_GPL(synchronize_rcu);
3564 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3565 * @rgosp: Place to put state cookie
3567 * Stores into @rgosp a value that will always be treated by functions
3568 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3569 * has already completed.
3571 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3573 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3574 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3576 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3579 * get_state_synchronize_rcu - Snapshot current RCU state
3581 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3582 * or poll_state_synchronize_rcu() to determine whether or not a full
3583 * grace period has elapsed in the meantime.
3585 unsigned long get_state_synchronize_rcu(void)
3588 * Any prior manipulation of RCU-protected data must happen
3589 * before the load from ->gp_seq.
3592 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3594 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3597 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3598 * @rgosp: location to place combined normal/expedited grace-period state
3600 * Places the normal and expedited grace-period states in @rgosp. This
3601 * state value can be passed to a later call to cond_synchronize_rcu_full()
3602 * or poll_state_synchronize_rcu_full() to determine whether or not a
3603 * grace period (whether normal or expedited) has elapsed in the meantime.
3604 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3605 * long, but is guaranteed to see all grace periods. In contrast, the
3606 * combined state occupies less memory, but can sometimes fail to take
3607 * grace periods into account.
3609 * This does not guarantee that the needed grace period will actually
3612 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3614 struct rcu_node *rnp = rcu_get_root();
3617 * Any prior manipulation of RCU-protected data must happen
3618 * before the loads from ->gp_seq and ->expedited_sequence.
3621 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3622 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3624 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3627 * Helper function for start_poll_synchronize_rcu() and
3628 * start_poll_synchronize_rcu_full().
3630 static void start_poll_synchronize_rcu_common(void)
3632 unsigned long flags;
3634 struct rcu_data *rdp;
3635 struct rcu_node *rnp;
3637 lockdep_assert_irqs_enabled();
3638 local_irq_save(flags);
3639 rdp = this_cpu_ptr(&rcu_data);
3641 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3642 // Note it is possible for a grace period to have elapsed between
3643 // the above call to get_state_synchronize_rcu() and the below call
3644 // to rcu_seq_snap. This is OK, the worst that happens is that we
3645 // get a grace period that no one needed. These accesses are ordered
3646 // by smp_mb(), and we are accessing them in the opposite order
3647 // from which they are updated at grace-period start, as required.
3648 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3649 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3651 rcu_gp_kthread_wake();
3655 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3657 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3658 * or poll_state_synchronize_rcu() to determine whether or not a full
3659 * grace period has elapsed in the meantime. If the needed grace period
3660 * is not already slated to start, notifies RCU core of the need for that
3663 * Interrupts must be enabled for the case where it is necessary to awaken
3664 * the grace-period kthread.
3666 unsigned long start_poll_synchronize_rcu(void)
3668 unsigned long gp_seq = get_state_synchronize_rcu();
3670 start_poll_synchronize_rcu_common();
3673 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3676 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3677 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3679 * Places the normal and expedited grace-period states in *@rgos. This
3680 * state value can be passed to a later call to cond_synchronize_rcu_full()
3681 * or poll_state_synchronize_rcu_full() to determine whether or not a
3682 * grace period (whether normal or expedited) has elapsed in the meantime.
3683 * If the needed grace period is not already slated to start, notifies
3684 * RCU core of the need for that grace period.
3686 * Interrupts must be enabled for the case where it is necessary to awaken
3687 * the grace-period kthread.
3689 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3691 get_state_synchronize_rcu_full(rgosp);
3693 start_poll_synchronize_rcu_common();
3695 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3698 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3699 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3701 * If a full RCU grace period has elapsed since the earlier call from
3702 * which @oldstate was obtained, return @true, otherwise return @false.
3703 * If @false is returned, it is the caller's responsibility to invoke this
3704 * function later on until it does return @true. Alternatively, the caller
3705 * can explicitly wait for a grace period, for example, by passing @oldstate
3706 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3707 * on the one hand or by directly invoking either synchronize_rcu() or
3708 * synchronize_rcu_expedited() on the other.
3710 * Yes, this function does not take counter wrap into account.
3711 * But counter wrap is harmless. If the counter wraps, we have waited for
3712 * more than a billion grace periods (and way more on a 64-bit system!).
3713 * Those needing to keep old state values for very long time periods
3714 * (many hours even on 32-bit systems) should check them occasionally and
3715 * either refresh them or set a flag indicating that the grace period has
3716 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3717 * to get a guaranteed-completed grace-period state.
3719 * In addition, because oldstate compresses the grace-period state for
3720 * both normal and expedited grace periods into a single unsigned long,
3721 * it can miss a grace period when synchronize_rcu() runs concurrently
3722 * with synchronize_rcu_expedited(). If this is unacceptable, please
3723 * instead use the _full() variant of these polling APIs.
3725 * This function provides the same memory-ordering guarantees that
3726 * would be provided by a synchronize_rcu() that was invoked at the call
3727 * to the function that provided @oldstate, and that returned at the end
3730 bool poll_state_synchronize_rcu(unsigned long oldstate)
3732 if (oldstate == RCU_GET_STATE_COMPLETED ||
3733 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3734 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3739 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3742 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3743 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3745 * If a full RCU grace period has elapsed since the earlier call from
3746 * which *rgosp was obtained, return @true, otherwise return @false.
3747 * If @false is returned, it is the caller's responsibility to invoke this
3748 * function later on until it does return @true. Alternatively, the caller
3749 * can explicitly wait for a grace period, for example, by passing @rgosp
3750 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3752 * Yes, this function does not take counter wrap into account.
3753 * But counter wrap is harmless. If the counter wraps, we have waited
3754 * for more than a billion grace periods (and way more on a 64-bit
3755 * system!). Those needing to keep rcu_gp_oldstate values for very
3756 * long time periods (many hours even on 32-bit systems) should check
3757 * them occasionally and either refresh them or set a flag indicating
3758 * that the grace period has completed. Alternatively, they can use
3759 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3760 * grace-period state.
3762 * This function provides the same memory-ordering guarantees that would
3763 * be provided by a synchronize_rcu() that was invoked at the call to
3764 * the function that provided @rgosp, and that returned at the end of this
3765 * function. And this guarantee requires that the root rcu_node structure's
3766 * ->gp_seq field be checked instead of that of the rcu_state structure.
3767 * The problem is that the just-ending grace-period's callbacks can be
3768 * invoked between the time that the root rcu_node structure's ->gp_seq
3769 * field is updated and the time that the rcu_state structure's ->gp_seq
3770 * field is updated. Therefore, if a single synchronize_rcu() is to
3771 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3772 * then the root rcu_node structure is the one that needs to be polled.
3774 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3776 struct rcu_node *rnp = rcu_get_root();
3778 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3779 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3780 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3781 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3782 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3783 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3788 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3791 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3792 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3794 * If a full RCU grace period has elapsed since the earlier call to
3795 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3796 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3798 * Yes, this function does not take counter wrap into account.
3799 * But counter wrap is harmless. If the counter wraps, we have waited for
3800 * more than 2 billion grace periods (and way more on a 64-bit system!),
3801 * so waiting for a couple of additional grace periods should be just fine.
3803 * This function provides the same memory-ordering guarantees that
3804 * would be provided by a synchronize_rcu() that was invoked at the call
3805 * to the function that provided @oldstate and that returned at the end
3808 void cond_synchronize_rcu(unsigned long oldstate)
3810 if (!poll_state_synchronize_rcu(oldstate))
3813 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3816 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3817 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3819 * If a full RCU grace period has elapsed since the call to
3820 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3821 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3822 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3823 * for a full grace period.
3825 * Yes, this function does not take counter wrap into account.
3826 * But counter wrap is harmless. If the counter wraps, we have waited for
3827 * more than 2 billion grace periods (and way more on a 64-bit system!),
3828 * so waiting for a couple of additional grace periods should be just fine.
3830 * This function provides the same memory-ordering guarantees that
3831 * would be provided by a synchronize_rcu() that was invoked at the call
3832 * to the function that provided @rgosp and that returned at the end of
3835 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3837 if (!poll_state_synchronize_rcu_full(rgosp))
3840 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3843 * Check to see if there is any immediate RCU-related work to be done by
3844 * the current CPU, returning 1 if so and zero otherwise. The checks are
3845 * in order of increasing expense: checks that can be carried out against
3846 * CPU-local state are performed first. However, we must check for CPU
3847 * stalls first, else we might not get a chance.
3849 static int rcu_pending(int user)
3851 bool gp_in_progress;
3852 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3853 struct rcu_node *rnp = rdp->mynode;
3855 lockdep_assert_irqs_disabled();
3857 /* Check for CPU stalls, if enabled. */
3858 check_cpu_stall(rdp);
3860 /* Does this CPU need a deferred NOCB wakeup? */
3861 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3864 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3865 if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3868 /* Is the RCU core waiting for a quiescent state from this CPU? */
3869 gp_in_progress = rcu_gp_in_progress();
3870 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3873 /* Does this CPU have callbacks ready to invoke? */
3874 if (!rcu_rdp_is_offloaded(rdp) &&
3875 rcu_segcblist_ready_cbs(&rdp->cblist))
3878 /* Has RCU gone idle with this CPU needing another grace period? */
3879 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3880 !rcu_rdp_is_offloaded(rdp) &&
3881 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3884 /* Have RCU grace period completed or started? */
3885 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3886 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3894 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3895 * the compiler is expected to optimize this away.
3897 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3899 trace_rcu_barrier(rcu_state.name, s, cpu,
3900 atomic_read(&rcu_state.barrier_cpu_count), done);
3904 * RCU callback function for rcu_barrier(). If we are last, wake
3905 * up the task executing rcu_barrier().
3907 * Note that the value of rcu_state.barrier_sequence must be captured
3908 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3909 * other CPUs might count the value down to zero before this CPU gets
3910 * around to invoking rcu_barrier_trace(), which might result in bogus
3911 * data from the next instance of rcu_barrier().
3913 static void rcu_barrier_callback(struct rcu_head *rhp)
3915 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3917 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3918 rcu_barrier_trace(TPS("LastCB"), -1, s);
3919 complete(&rcu_state.barrier_completion);
3921 rcu_barrier_trace(TPS("CB"), -1, s);
3926 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3928 static void rcu_barrier_entrain(struct rcu_data *rdp)
3930 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3931 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3932 bool wake_nocb = false;
3933 bool was_alldone = false;
3935 lockdep_assert_held(&rcu_state.barrier_lock);
3936 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3938 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3939 rdp->barrier_head.func = rcu_barrier_callback;
3940 debug_rcu_head_queue(&rdp->barrier_head);
3943 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3944 * queue. This way we don't wait for bypass timer that can reach seconds
3945 * if it's fully lazy.
3947 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3948 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3949 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3950 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3951 atomic_inc(&rcu_state.barrier_cpu_count);
3953 debug_rcu_head_unqueue(&rdp->barrier_head);
3954 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3956 rcu_nocb_unlock(rdp);
3958 wake_nocb_gp(rdp, false);
3959 smp_store_release(&rdp->barrier_seq_snap, gseq);
3963 * Called with preemption disabled, and from cross-cpu IRQ context.
3965 static void rcu_barrier_handler(void *cpu_in)
3967 uintptr_t cpu = (uintptr_t)cpu_in;
3968 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3970 lockdep_assert_irqs_disabled();
3971 WARN_ON_ONCE(cpu != rdp->cpu);
3972 WARN_ON_ONCE(cpu != smp_processor_id());
3973 raw_spin_lock(&rcu_state.barrier_lock);
3974 rcu_barrier_entrain(rdp);
3975 raw_spin_unlock(&rcu_state.barrier_lock);
3979 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3981 * Note that this primitive does not necessarily wait for an RCU grace period
3982 * to complete. For example, if there are no RCU callbacks queued anywhere
3983 * in the system, then rcu_barrier() is within its rights to return
3984 * immediately, without waiting for anything, much less an RCU grace period.
3986 void rcu_barrier(void)
3989 unsigned long flags;
3991 struct rcu_data *rdp;
3992 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3994 rcu_barrier_trace(TPS("Begin"), -1, s);
3996 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3997 mutex_lock(&rcu_state.barrier_mutex);
3999 /* Did someone else do our work for us? */
4000 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4001 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
4002 smp_mb(); /* caller's subsequent code after above check. */
4003 mutex_unlock(&rcu_state.barrier_mutex);
4007 /* Mark the start of the barrier operation. */
4008 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4009 rcu_seq_start(&rcu_state.barrier_sequence);
4010 gseq = rcu_state.barrier_sequence;
4011 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4014 * Initialize the count to two rather than to zero in order
4015 * to avoid a too-soon return to zero in case of an immediate
4016 * invocation of the just-enqueued callback (or preemption of
4017 * this task). Exclude CPU-hotplug operations to ensure that no
4018 * offline non-offloaded CPU has callbacks queued.
4020 init_completion(&rcu_state.barrier_completion);
4021 atomic_set(&rcu_state.barrier_cpu_count, 2);
4022 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4025 * Force each CPU with callbacks to register a new callback.
4026 * When that callback is invoked, we will know that all of the
4027 * corresponding CPU's preceding callbacks have been invoked.
4029 for_each_possible_cpu(cpu) {
4030 rdp = per_cpu_ptr(&rcu_data, cpu);
4032 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4034 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4035 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4036 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4037 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4038 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
4041 if (!rcu_rdp_cpu_online(rdp)) {
4042 rcu_barrier_entrain(rdp);
4043 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4044 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4045 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
4048 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4049 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
4050 schedule_timeout_uninterruptible(1);
4053 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4054 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
4058 * Now that we have an rcu_barrier_callback() callback on each
4059 * CPU, and thus each counted, remove the initial count.
4061 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4062 complete(&rcu_state.barrier_completion);
4064 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4065 wait_for_completion(&rcu_state.barrier_completion);
4067 /* Mark the end of the barrier operation. */
4068 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4069 rcu_seq_end(&rcu_state.barrier_sequence);
4070 gseq = rcu_state.barrier_sequence;
4071 for_each_possible_cpu(cpu) {
4072 rdp = per_cpu_ptr(&rcu_data, cpu);
4074 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4077 /* Other rcu_barrier() invocations can now safely proceed. */
4078 mutex_unlock(&rcu_state.barrier_mutex);
4080 EXPORT_SYMBOL_GPL(rcu_barrier);
4083 * Compute the mask of online CPUs for the specified rcu_node structure.
4084 * This will not be stable unless the rcu_node structure's ->lock is
4085 * held, but the bit corresponding to the current CPU will be stable
4088 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
4090 return READ_ONCE(rnp->qsmaskinitnext);
4094 * Is the CPU corresponding to the specified rcu_data structure online
4095 * from RCU's perspective? This perspective is given by that structure's
4096 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
4098 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4100 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4103 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
4106 * Is the current CPU online as far as RCU is concerned?
4108 * Disable preemption to avoid false positives that could otherwise
4109 * happen due to the current CPU number being sampled, this task being
4110 * preempted, its old CPU being taken offline, resuming on some other CPU,
4111 * then determining that its old CPU is now offline.
4113 * Disable checking if in an NMI handler because we cannot safely
4114 * report errors from NMI handlers anyway. In addition, it is OK to use
4115 * RCU on an offline processor during initial boot, hence the check for
4116 * rcu_scheduler_fully_active.
4118 bool rcu_lockdep_current_cpu_online(void)
4120 struct rcu_data *rdp;
4123 if (in_nmi() || !rcu_scheduler_fully_active)
4125 preempt_disable_notrace();
4126 rdp = this_cpu_ptr(&rcu_data);
4128 * Strictly, we care here about the case where the current CPU is
4129 * in rcu_cpu_starting() and thus has an excuse for rdp->grpmask
4130 * not being up to date. So arch_spin_is_locked() might have a
4131 * false positive if it's held by some *other* CPU, but that's
4132 * OK because that just means a false *negative* on the warning.
4134 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4136 preempt_enable_notrace();
4139 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
4141 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
4143 // Has rcu_init() been invoked? This is used (for example) to determine
4144 // whether spinlocks may be acquired safely.
4145 static bool rcu_init_invoked(void)
4147 return !!rcu_state.n_online_cpus;
4151 * Near the end of the offline process. Trace the fact that this CPU
4154 int rcutree_dying_cpu(unsigned int cpu)
4157 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4158 struct rcu_node *rnp = rdp->mynode;
4160 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4163 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4164 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4165 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4170 * All CPUs for the specified rcu_node structure have gone offline,
4171 * and all tasks that were preempted within an RCU read-side critical
4172 * section while running on one of those CPUs have since exited their RCU
4173 * read-side critical section. Some other CPU is reporting this fact with
4174 * the specified rcu_node structure's ->lock held and interrupts disabled.
4175 * This function therefore goes up the tree of rcu_node structures,
4176 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
4177 * the leaf rcu_node structure's ->qsmaskinit field has already been
4180 * This function does check that the specified rcu_node structure has
4181 * all CPUs offline and no blocked tasks, so it is OK to invoke it
4182 * prematurely. That said, invoking it after the fact will cost you
4183 * a needless lock acquisition. So once it has done its work, don't
4186 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
4189 struct rcu_node *rnp = rnp_leaf;
4191 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4192 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4193 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4194 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4197 mask = rnp->grpmask;
4201 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4202 rnp->qsmaskinit &= ~mask;
4203 /* Between grace periods, so better already be zero! */
4204 WARN_ON_ONCE(rnp->qsmask);
4205 if (rnp->qsmaskinit) {
4206 raw_spin_unlock_rcu_node(rnp);
4207 /* irqs remain disabled. */
4210 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4215 * The CPU has been completely removed, and some other CPU is reporting
4216 * this fact from process context. Do the remainder of the cleanup.
4217 * There can only be one CPU hotplug operation at a time, so no need for
4220 int rcutree_dead_cpu(unsigned int cpu)
4222 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
4225 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4226 // Stop-machine done, so allow nohz_full to disable tick.
4227 tick_dep_clear(TICK_DEP_BIT_RCU);
4232 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4233 * first CPU in a given leaf rcu_node structure coming online. The caller
4234 * must hold the corresponding leaf rcu_node ->lock with interrupts
4237 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4241 struct rcu_node *rnp = rnp_leaf;
4243 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4244 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4246 mask = rnp->grpmask;
4250 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4251 oldmask = rnp->qsmaskinit;
4252 rnp->qsmaskinit |= mask;
4253 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4260 * Do boot-time initialization of a CPU's per-CPU RCU data.
4263 rcu_boot_init_percpu_data(int cpu)
4265 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4266 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4268 /* Set up local state, ensuring consistent view of global state. */
4269 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4270 INIT_WORK(&rdp->strict_work, strict_work_handler);
4271 WARN_ON_ONCE(ct->dynticks_nesting != 1);
4272 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu)));
4273 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4274 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4275 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4276 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4277 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4278 rdp->last_sched_clock = jiffies;
4280 rcu_boot_init_nocb_percpu_data(rdp);
4284 * Invoked early in the CPU-online process, when pretty much all services
4285 * are available. The incoming CPU is not present.
4287 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4288 * offline event can be happening at a given time. Note also that we can
4289 * accept some slop in the rsp->gp_seq access due to the fact that this
4290 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4291 * And any offloaded callbacks are being numbered elsewhere.
4293 int rcutree_prepare_cpu(unsigned int cpu)
4295 unsigned long flags;
4296 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4297 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4298 struct rcu_node *rnp = rcu_get_root();
4300 /* Set up local state, ensuring consistent view of global state. */
4301 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4302 rdp->qlen_last_fqs_check = 0;
4303 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4304 rdp->blimit = blimit;
4305 ct->dynticks_nesting = 1; /* CPU not up, no tearing. */
4306 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4309 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4312 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4313 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4316 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4317 * propagation up the rcu_node tree will happen at the beginning
4318 * of the next grace period.
4321 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4322 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4323 rdp->gp_seq_needed = rdp->gp_seq;
4324 rdp->cpu_no_qs.b.norm = true;
4325 rdp->core_needs_qs = false;
4326 rdp->rcu_iw_pending = false;
4327 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4328 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4329 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4330 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4331 rcu_spawn_one_boost_kthread(rnp);
4332 rcu_spawn_cpu_nocb_kthread(cpu);
4333 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4339 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4341 static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4343 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4345 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4349 * Has the specified (known valid) CPU ever been fully online?
4351 bool rcu_cpu_beenfullyonline(int cpu)
4353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4355 return smp_load_acquire(&rdp->beenonline);
4359 * Near the end of the CPU-online process. Pretty much all services
4360 * enabled, and the CPU is now very much alive.
4362 int rcutree_online_cpu(unsigned int cpu)
4364 unsigned long flags;
4365 struct rcu_data *rdp;
4366 struct rcu_node *rnp;
4368 rdp = per_cpu_ptr(&rcu_data, cpu);
4370 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4371 rnp->ffmask |= rdp->grpmask;
4372 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4373 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4374 return 0; /* Too early in boot for scheduler work. */
4375 sync_sched_exp_online_cleanup(cpu);
4376 rcutree_affinity_setting(cpu, -1);
4378 // Stop-machine done, so allow nohz_full to disable tick.
4379 tick_dep_clear(TICK_DEP_BIT_RCU);
4384 * Near the beginning of the process. The CPU is still very much alive
4385 * with pretty much all services enabled.
4387 int rcutree_offline_cpu(unsigned int cpu)
4389 unsigned long flags;
4390 struct rcu_data *rdp;
4391 struct rcu_node *rnp;
4393 rdp = per_cpu_ptr(&rcu_data, cpu);
4395 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4396 rnp->ffmask &= ~rdp->grpmask;
4397 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4399 rcutree_affinity_setting(cpu, cpu);
4401 // nohz_full CPUs need the tick for stop-machine to work quickly
4402 tick_dep_set(TICK_DEP_BIT_RCU);
4407 * Mark the specified CPU as being online so that subsequent grace periods
4408 * (both expedited and normal) will wait on it. Note that this means that
4409 * incoming CPUs are not allowed to use RCU read-side critical sections
4410 * until this function is called. Failing to observe this restriction
4411 * will result in lockdep splats.
4413 * Note that this function is special in that it is invoked directly
4414 * from the incoming CPU rather than from the cpuhp_step mechanism.
4415 * This is because this function must be invoked at a precise location.
4416 * This incoming CPU must not have enabled interrupts yet.
4418 void rcu_cpu_starting(unsigned int cpu)
4421 struct rcu_data *rdp;
4422 struct rcu_node *rnp;
4425 lockdep_assert_irqs_disabled();
4426 rdp = per_cpu_ptr(&rcu_data, cpu);
4427 if (rdp->cpu_started)
4429 rdp->cpu_started = true;
4432 mask = rdp->grpmask;
4433 arch_spin_lock(&rcu_state.ofl_lock);
4434 rcu_dynticks_eqs_online();
4435 raw_spin_lock(&rcu_state.barrier_lock);
4436 raw_spin_lock_rcu_node(rnp);
4437 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4438 raw_spin_unlock(&rcu_state.barrier_lock);
4439 newcpu = !(rnp->expmaskinitnext & mask);
4440 rnp->expmaskinitnext |= mask;
4441 /* Allow lockless access for expedited grace periods. */
4442 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4443 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4444 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4445 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4446 rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4448 /* An incoming CPU should never be blocking a grace period. */
4449 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4450 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4451 unsigned long flags;
4453 local_irq_save(flags);
4454 rcu_disable_urgency_upon_qs(rdp);
4455 /* Report QS -after- changing ->qsmaskinitnext! */
4456 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4458 raw_spin_unlock_rcu_node(rnp);
4460 arch_spin_unlock(&rcu_state.ofl_lock);
4461 smp_store_release(&rdp->beenonline, true);
4462 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4466 * The outgoing function has no further need of RCU, so remove it from
4467 * the rcu_node tree's ->qsmaskinitnext bit masks.
4469 * Note that this function is special in that it is invoked directly
4470 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4471 * This is because this function must be invoked at a precise location.
4473 void rcu_report_dead(unsigned int cpu)
4475 unsigned long flags, seq_flags;
4477 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4478 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4480 // Do any dangling deferred wakeups.
4481 do_nocb_deferred_wakeup(rdp);
4483 rcu_preempt_deferred_qs(current);
4485 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4486 mask = rdp->grpmask;
4487 local_irq_save(seq_flags);
4488 arch_spin_lock(&rcu_state.ofl_lock);
4489 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4490 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4491 rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4492 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4493 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4494 rcu_disable_urgency_upon_qs(rdp);
4495 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4496 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4498 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4499 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4500 arch_spin_unlock(&rcu_state.ofl_lock);
4501 local_irq_restore(seq_flags);
4503 rdp->cpu_started = false;
4506 #ifdef CONFIG_HOTPLUG_CPU
4508 * The outgoing CPU has just passed through the dying-idle state, and we
4509 * are being invoked from the CPU that was IPIed to continue the offline
4510 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4512 void rcutree_migrate_callbacks(int cpu)
4514 unsigned long flags;
4515 struct rcu_data *my_rdp;
4516 struct rcu_node *my_rnp;
4517 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4520 if (rcu_rdp_is_offloaded(rdp) ||
4521 rcu_segcblist_empty(&rdp->cblist))
4522 return; /* No callbacks to migrate. */
4524 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4525 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4526 rcu_barrier_entrain(rdp);
4527 my_rdp = this_cpu_ptr(&rcu_data);
4528 my_rnp = my_rdp->mynode;
4529 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4530 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4531 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4532 /* Leverage recent GPs and set GP for new callbacks. */
4533 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4534 rcu_advance_cbs(my_rnp, my_rdp);
4535 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4536 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4537 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4538 rcu_segcblist_disable(&rdp->cblist);
4539 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4540 check_cb_ovld_locked(my_rdp, my_rnp);
4541 if (rcu_rdp_is_offloaded(my_rdp)) {
4542 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4543 __call_rcu_nocb_wake(my_rdp, true, flags);
4545 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4546 raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4549 rcu_gp_kthread_wake();
4550 lockdep_assert_irqs_enabled();
4551 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4552 !rcu_segcblist_empty(&rdp->cblist),
4553 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4554 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4555 rcu_segcblist_first_cb(&rdp->cblist));
4560 * On non-huge systems, use expedited RCU grace periods to make suspend
4561 * and hibernation run faster.
4563 static int rcu_pm_notify(struct notifier_block *self,
4564 unsigned long action, void *hcpu)
4567 case PM_HIBERNATION_PREPARE:
4568 case PM_SUSPEND_PREPARE:
4572 case PM_POST_HIBERNATION:
4573 case PM_POST_SUSPEND:
4574 rcu_unexpedite_gp();
4583 #ifdef CONFIG_RCU_EXP_KTHREAD
4584 struct kthread_worker *rcu_exp_gp_kworker;
4585 struct kthread_worker *rcu_exp_par_gp_kworker;
4587 static void __init rcu_start_exp_gp_kworkers(void)
4589 const char *par_gp_kworker_name = "rcu_exp_par_gp_kthread_worker";
4590 const char *gp_kworker_name = "rcu_exp_gp_kthread_worker";
4591 struct sched_param param = { .sched_priority = kthread_prio };
4593 rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
4594 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4595 pr_err("Failed to create %s!\n", gp_kworker_name);
4599 rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
4600 if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
4601 pr_err("Failed to create %s!\n", par_gp_kworker_name);
4602 kthread_destroy_worker(rcu_exp_gp_kworker);
4606 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4607 sched_setscheduler_nocheck(rcu_exp_par_gp_kworker->task, SCHED_FIFO,
4611 static inline void rcu_alloc_par_gp_wq(void)
4614 #else /* !CONFIG_RCU_EXP_KTHREAD */
4615 struct workqueue_struct *rcu_par_gp_wq;
4617 static void __init rcu_start_exp_gp_kworkers(void)
4621 static inline void rcu_alloc_par_gp_wq(void)
4623 rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4624 WARN_ON(!rcu_par_gp_wq);
4626 #endif /* CONFIG_RCU_EXP_KTHREAD */
4629 * Spawn the kthreads that handle RCU's grace periods.
4631 static int __init rcu_spawn_gp_kthread(void)
4633 unsigned long flags;
4634 struct rcu_node *rnp;
4635 struct sched_param sp;
4636 struct task_struct *t;
4637 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4639 rcu_scheduler_fully_active = 1;
4640 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4641 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4644 sp.sched_priority = kthread_prio;
4645 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4647 rnp = rcu_get_root();
4648 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4649 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4650 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4651 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4652 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4653 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4655 /* This is a pre-SMP initcall, we expect a single CPU */
4656 WARN_ON(num_online_cpus() > 1);
4658 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4659 * due to rcu_scheduler_fully_active.
4661 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4662 rcu_spawn_one_boost_kthread(rdp->mynode);
4663 rcu_spawn_core_kthreads();
4664 /* Create kthread worker for expedited GPs */
4665 rcu_start_exp_gp_kworkers();
4668 early_initcall(rcu_spawn_gp_kthread);
4671 * This function is invoked towards the end of the scheduler's
4672 * initialization process. Before this is called, the idle task might
4673 * contain synchronous grace-period primitives (during which time, this idle
4674 * task is booting the system, and such primitives are no-ops). After this
4675 * function is called, any synchronous grace-period primitives are run as
4676 * expedited, with the requesting task driving the grace period forward.
4677 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4678 * runtime RCU functionality.
4680 void rcu_scheduler_starting(void)
4682 unsigned long flags;
4683 struct rcu_node *rnp;
4685 WARN_ON(num_online_cpus() != 1);
4686 WARN_ON(nr_context_switches() > 0);
4687 rcu_test_sync_prims();
4689 // Fix up the ->gp_seq counters.
4690 local_irq_save(flags);
4691 rcu_for_each_node_breadth_first(rnp)
4692 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4693 local_irq_restore(flags);
4695 // Switch out of early boot mode.
4696 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4697 rcu_test_sync_prims();
4701 * Helper function for rcu_init() that initializes the rcu_state structure.
4703 static void __init rcu_init_one(void)
4705 static const char * const buf[] = RCU_NODE_NAME_INIT;
4706 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4707 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4708 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4710 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4714 struct rcu_node *rnp;
4716 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4718 /* Silence gcc 4.8 false positive about array index out of range. */
4719 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4720 panic("rcu_init_one: rcu_num_lvls out of range");
4722 /* Initialize the level-tracking arrays. */
4724 for (i = 1; i < rcu_num_lvls; i++)
4725 rcu_state.level[i] =
4726 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4727 rcu_init_levelspread(levelspread, num_rcu_lvl);
4729 /* Initialize the elements themselves, starting from the leaves. */
4731 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4732 cpustride *= levelspread[i];
4733 rnp = rcu_state.level[i];
4734 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4735 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4736 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4737 &rcu_node_class[i], buf[i]);
4738 raw_spin_lock_init(&rnp->fqslock);
4739 lockdep_set_class_and_name(&rnp->fqslock,
4740 &rcu_fqs_class[i], fqs[i]);
4741 rnp->gp_seq = rcu_state.gp_seq;
4742 rnp->gp_seq_needed = rcu_state.gp_seq;
4743 rnp->completedqs = rcu_state.gp_seq;
4745 rnp->qsmaskinit = 0;
4746 rnp->grplo = j * cpustride;
4747 rnp->grphi = (j + 1) * cpustride - 1;
4748 if (rnp->grphi >= nr_cpu_ids)
4749 rnp->grphi = nr_cpu_ids - 1;
4755 rnp->grpnum = j % levelspread[i - 1];
4756 rnp->grpmask = BIT(rnp->grpnum);
4757 rnp->parent = rcu_state.level[i - 1] +
4758 j / levelspread[i - 1];
4761 INIT_LIST_HEAD(&rnp->blkd_tasks);
4762 rcu_init_one_nocb(rnp);
4763 init_waitqueue_head(&rnp->exp_wq[0]);
4764 init_waitqueue_head(&rnp->exp_wq[1]);
4765 init_waitqueue_head(&rnp->exp_wq[2]);
4766 init_waitqueue_head(&rnp->exp_wq[3]);
4767 spin_lock_init(&rnp->exp_lock);
4768 mutex_init(&rnp->boost_kthread_mutex);
4769 raw_spin_lock_init(&rnp->exp_poll_lock);
4770 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4771 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4775 init_swait_queue_head(&rcu_state.gp_wq);
4776 init_swait_queue_head(&rcu_state.expedited_wq);
4777 rnp = rcu_first_leaf_node();
4778 for_each_possible_cpu(i) {
4779 while (i > rnp->grphi)
4781 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4782 rcu_boot_init_percpu_data(i);
4787 * Force priority from the kernel command-line into range.
4789 static void __init sanitize_kthread_prio(void)
4791 int kthread_prio_in = kthread_prio;
4793 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4794 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4796 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4798 else if (kthread_prio < 0)
4800 else if (kthread_prio > 99)
4803 if (kthread_prio != kthread_prio_in)
4804 pr_alert("%s: Limited prio to %d from %d\n",
4805 __func__, kthread_prio, kthread_prio_in);
4809 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4810 * replace the definitions in tree.h because those are needed to size
4811 * the ->node array in the rcu_state structure.
4813 void rcu_init_geometry(void)
4817 static unsigned long old_nr_cpu_ids;
4818 int rcu_capacity[RCU_NUM_LVLS];
4819 static bool initialized;
4823 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4824 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4826 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4830 old_nr_cpu_ids = nr_cpu_ids;
4834 * Initialize any unspecified boot parameters.
4835 * The default values of jiffies_till_first_fqs and
4836 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4837 * value, which is a function of HZ, then adding one for each
4838 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4840 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4841 if (jiffies_till_first_fqs == ULONG_MAX)
4842 jiffies_till_first_fqs = d;
4843 if (jiffies_till_next_fqs == ULONG_MAX)
4844 jiffies_till_next_fqs = d;
4845 adjust_jiffies_till_sched_qs();
4847 /* If the compile-time values are accurate, just leave. */
4848 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4849 nr_cpu_ids == NR_CPUS)
4851 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4852 rcu_fanout_leaf, nr_cpu_ids);
4855 * The boot-time rcu_fanout_leaf parameter must be at least two
4856 * and cannot exceed the number of bits in the rcu_node masks.
4857 * Complain and fall back to the compile-time values if this
4858 * limit is exceeded.
4860 if (rcu_fanout_leaf < 2 ||
4861 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4862 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4868 * Compute number of nodes that can be handled an rcu_node tree
4869 * with the given number of levels.
4871 rcu_capacity[0] = rcu_fanout_leaf;
4872 for (i = 1; i < RCU_NUM_LVLS; i++)
4873 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4876 * The tree must be able to accommodate the configured number of CPUs.
4877 * If this limit is exceeded, fall back to the compile-time values.
4879 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4880 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4885 /* Calculate the number of levels in the tree. */
4886 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4888 rcu_num_lvls = i + 1;
4890 /* Calculate the number of rcu_nodes at each level of the tree. */
4891 for (i = 0; i < rcu_num_lvls; i++) {
4892 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4893 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4896 /* Calculate the total number of rcu_node structures. */
4898 for (i = 0; i < rcu_num_lvls; i++)
4899 rcu_num_nodes += num_rcu_lvl[i];
4903 * Dump out the structure of the rcu_node combining tree associated
4904 * with the rcu_state structure.
4906 static void __init rcu_dump_rcu_node_tree(void)
4909 struct rcu_node *rnp;
4911 pr_info("rcu_node tree layout dump\n");
4913 rcu_for_each_node_breadth_first(rnp) {
4914 if (rnp->level != level) {
4919 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4924 struct workqueue_struct *rcu_gp_wq;
4926 static void __init kfree_rcu_batch_init(void)
4931 /* Clamp it to [0:100] seconds interval. */
4932 if (rcu_delay_page_cache_fill_msec < 0 ||
4933 rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4935 rcu_delay_page_cache_fill_msec =
4936 clamp(rcu_delay_page_cache_fill_msec, 0,
4937 (int) (100 * MSEC_PER_SEC));
4939 pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4940 rcu_delay_page_cache_fill_msec);
4943 for_each_possible_cpu(cpu) {
4944 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4946 for (i = 0; i < KFREE_N_BATCHES; i++) {
4947 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4948 krcp->krw_arr[i].krcp = krcp;
4950 for (j = 0; j < FREE_N_CHANNELS; j++)
4951 INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
4954 for (i = 0; i < FREE_N_CHANNELS; i++)
4955 INIT_LIST_HEAD(&krcp->bulk_head[i]);
4957 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4958 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4959 krcp->initialized = true;
4961 if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree"))
4962 pr_err("Failed to register kfree_rcu() shrinker!\n");
4965 void __init rcu_init(void)
4967 int cpu = smp_processor_id();
4969 rcu_early_boot_tests();
4971 kfree_rcu_batch_init();
4972 rcu_bootup_announce();
4973 sanitize_kthread_prio();
4974 rcu_init_geometry();
4977 rcu_dump_rcu_node_tree();
4979 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4982 * We don't need protection against CPU-hotplug here because
4983 * this is called early in boot, before either interrupts
4984 * or the scheduler are operational.
4986 pm_notifier(rcu_pm_notify, 0);
4987 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4988 rcutree_prepare_cpu(cpu);
4989 rcu_cpu_starting(cpu);
4990 rcutree_online_cpu(cpu);
4992 /* Create workqueue for Tree SRCU and for expedited GPs. */
4993 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4994 WARN_ON(!rcu_gp_wq);
4995 rcu_alloc_par_gp_wq();
4997 /* Fill in default value for rcutree.qovld boot parameter. */
4998 /* -After- the rcu_node ->lock fields are initialized! */
5000 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
5004 // Kick-start in case any polled grace periods started early.
5005 (void)start_poll_synchronize_rcu_expedited();
5007 rcu_test_sync_prims();
5010 #include "tree_stall.h"
5011 #include "tree_exp.h"
5012 #include "tree_nocb.h"
5013 #include "tree_plugin.h"