1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * RCU expedited grace periods
5 * Copyright IBM Corporation, 2016
10 #include <linux/lockdep.h>
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
17 * Record the start of an expedited grace period.
19 static void rcu_exp_gp_seq_start(void)
21 rcu_seq_start(&rcu_state.expedited_sequence);
22 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
26 * Return the value that the expedited-grace-period counter will have
27 * at the end of the current grace period.
29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
31 return rcu_seq_endval(&rcu_state.expedited_sequence);
35 * Record the end of an expedited grace period.
37 static void rcu_exp_gp_seq_end(void)
39 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
40 rcu_seq_end(&rcu_state.expedited_sequence);
41 smp_mb(); /* Ensure that consecutive grace periods serialize. */
45 * Take a snapshot of the expedited-grace-period counter, which is the
46 * earliest value that will indicate that a full grace period has
47 * elapsed since the current time.
49 static unsigned long rcu_exp_gp_seq_snap(void)
53 smp_mb(); /* Caller's modifications seen first by other CPUs. */
54 s = rcu_seq_snap(&rcu_state.expedited_sequence);
55 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
60 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
61 * if a full expedited grace period has elapsed since that snapshot
64 static bool rcu_exp_gp_seq_done(unsigned long s)
66 return rcu_seq_done(&rcu_state.expedited_sequence, s);
70 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
71 * recent CPU-online activity. Note that these masks are not cleared
72 * when CPUs go offline, so they reflect the union of all CPUs that have
73 * ever been online. This means that this function normally takes its
74 * no-work-to-do fastpath.
76 static void sync_exp_reset_tree_hotplug(void)
81 unsigned long oldmask;
82 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
84 struct rcu_node *rnp_up;
86 /* If no new CPUs onlined since last time, nothing to do. */
87 if (likely(ncpus == rcu_state.ncpus_snap))
89 rcu_state.ncpus_snap = ncpus;
92 * Each pass through the following loop propagates newly onlined
93 * CPUs for the current rcu_node structure up the rcu_node tree.
95 rcu_for_each_leaf_node(rnp) {
96 raw_spin_lock_irqsave_rcu_node(rnp, flags);
97 if (rnp->expmaskinit == rnp->expmaskinitnext) {
98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
99 continue; /* No new CPUs, nothing to do. */
102 /* Update this node's mask, track old value for propagation. */
103 oldmask = rnp->expmaskinit;
104 rnp->expmaskinit = rnp->expmaskinitnext;
105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
107 /* If was already nonzero, nothing to propagate. */
111 /* Propagate the new CPU up the tree. */
113 rnp_up = rnp->parent;
116 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
117 if (rnp_up->expmaskinit)
119 rnp_up->expmaskinit |= mask;
120 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
123 mask = rnp_up->grpmask;
124 rnp_up = rnp_up->parent;
130 * Reset the ->expmask values in the rcu_node tree in preparation for
131 * a new expedited grace period.
133 static void __maybe_unused sync_exp_reset_tree(void)
136 struct rcu_node *rnp;
138 sync_exp_reset_tree_hotplug();
139 rcu_for_each_node_breadth_first(rnp) {
140 raw_spin_lock_irqsave_rcu_node(rnp, flags);
141 WARN_ON_ONCE(rnp->expmask);
142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
148 * Return non-zero if there is no RCU expedited grace period in progress
149 * for the specified rcu_node structure, in other words, if all CPUs and
150 * tasks covered by the specified rcu_node structure have done their bit
151 * for the current expedited grace period.
153 static bool sync_rcu_exp_done(struct rcu_node *rnp)
155 raw_lockdep_assert_held_rcu_node(rnp);
156 return READ_ONCE(rnp->exp_tasks) == NULL &&
157 READ_ONCE(rnp->expmask) == 0;
161 * Like sync_rcu_exp_done(), but where the caller does not hold the
164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
178 * Report the exit from RCU read-side critical section for the last task
179 * that queued itself during or before the current expedited preemptible-RCU
180 * grace period. This event is reported either to the rcu_node structure on
181 * which the task was queued or to one of that rcu_node structure's ancestors,
182 * recursively up the tree. (Calm down, calm down, we do the recursion
185 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
186 bool wake, unsigned long flags)
187 __releases(rnp->lock)
191 raw_lockdep_assert_held_rcu_node(rnp);
193 if (!sync_rcu_exp_done(rnp)) {
195 rcu_initiate_boost(rnp, flags);
197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
200 if (rnp->parent == NULL) {
201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 smp_mb(); /* EGP done before wake_up(). */
204 swake_up_one(&rcu_state.expedited_wq);
209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
212 WARN_ON_ONCE(!(rnp->expmask & mask));
213 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
218 * Report expedited quiescent state for specified node. This is a
219 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
225 raw_spin_lock_irqsave_rcu_node(rnp, flags);
226 __rcu_report_exp_rnp(rnp, wake, flags);
230 * Report expedited quiescent state for multiple CPUs, all covered by the
231 * specified leaf rcu_node structure.
233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
234 unsigned long mask, bool wake)
238 struct rcu_data *rdp;
240 raw_spin_lock_irqsave_rcu_node(rnp, flags);
241 if (!(rnp->expmask & mask)) {
242 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
245 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
246 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
247 rdp = per_cpu_ptr(&rcu_data, cpu);
248 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
250 rdp->rcu_forced_tick_exp = false;
251 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
253 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
257 * Report expedited quiescent state for specified rcu_data (CPU).
259 static void rcu_report_exp_rdp(struct rcu_data *rdp)
261 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
262 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
265 /* Common code for work-done checking. */
266 static bool sync_exp_work_done(unsigned long s)
268 if (rcu_exp_gp_seq_done(s)) {
269 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
270 smp_mb(); /* Ensure test happens before caller kfree(). */
277 * Funnel-lock acquisition for expedited grace periods. Returns true
278 * if some other task completed an expedited grace period that this task
279 * can piggy-back on, and with no mutex held. Otherwise, returns false
280 * with the mutex held, indicating that the caller must actually do the
281 * expedited grace period.
283 static bool exp_funnel_lock(unsigned long s)
285 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
286 struct rcu_node *rnp = rdp->mynode;
287 struct rcu_node *rnp_root = rcu_get_root();
289 /* Low-contention fastpath. */
290 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
292 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
293 mutex_trylock(&rcu_state.exp_mutex))
297 * Each pass through the following loop works its way up
298 * the rcu_node tree, returning if others have done the work or
299 * otherwise falls through to acquire ->exp_mutex. The mapping
300 * from CPU to rcu_node structure can be inexact, as it is just
301 * promoting locality and is not strictly needed for correctness.
303 for (; rnp != NULL; rnp = rnp->parent) {
304 if (sync_exp_work_done(s))
307 /* Work not done, either wait here or go up. */
308 spin_lock(&rnp->exp_lock);
309 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
311 /* Someone else doing GP, so wait for them. */
312 spin_unlock(&rnp->exp_lock);
313 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
314 rnp->grplo, rnp->grphi,
316 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
317 sync_exp_work_done(s));
320 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
321 spin_unlock(&rnp->exp_lock);
322 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
323 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
325 mutex_lock(&rcu_state.exp_mutex);
327 if (sync_exp_work_done(s)) {
328 mutex_unlock(&rcu_state.exp_mutex);
331 rcu_exp_gp_seq_start();
332 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
337 * Select the CPUs within the specified rcu_node that the upcoming
338 * expedited grace period needs to wait for.
340 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
344 unsigned long mask_ofl_test;
345 unsigned long mask_ofl_ipi;
347 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
349 raw_spin_lock_irqsave_rcu_node(rnp, flags);
351 /* Each pass checks a CPU for identity, offline, and idle. */
353 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
354 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
355 unsigned long mask = rdp->grpmask;
358 if (raw_smp_processor_id() == cpu ||
359 !(rnp->qsmaskinitnext & mask)) {
360 mask_ofl_test |= mask;
362 snap = rcu_dynticks_snap(cpu);
363 if (rcu_dynticks_in_eqs(snap))
364 mask_ofl_test |= mask;
366 rdp->exp_dynticks_snap = snap;
369 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
372 * Need to wait for any blocked tasks as well. Note that
373 * additional blocking tasks will also block the expedited GP
374 * until such time as the ->expmask bits are cleared.
376 if (rcu_preempt_has_tasks(rnp))
377 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
378 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
380 /* IPI the remaining CPUs for expedited quiescent state. */
381 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
382 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
383 unsigned long mask = rdp->grpmask;
386 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
387 mask_ofl_test |= mask;
390 if (get_cpu() == cpu) {
391 mask_ofl_test |= mask;
395 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
397 /* The CPU will report the QS in response to the IPI. */
401 /* Failed, raced with CPU hotplug operation. */
402 raw_spin_lock_irqsave_rcu_node(rnp, flags);
403 if ((rnp->qsmaskinitnext & mask) &&
404 (rnp->expmask & mask)) {
405 /* Online, so delay for a bit and try again. */
406 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
407 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
408 schedule_timeout_idle(1);
411 /* CPU really is offline, so we must report its QS. */
412 if (rnp->expmask & mask)
413 mask_ofl_test |= mask;
414 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
416 /* Report quiescent states for those that went offline. */
418 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
421 static void rcu_exp_sel_wait_wake(unsigned long s);
423 #ifdef CONFIG_RCU_EXP_KTHREAD
424 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
426 struct rcu_exp_work *rewp =
427 container_of(wp, struct rcu_exp_work, rew_work);
429 __sync_rcu_exp_select_node_cpus(rewp);
432 static inline bool rcu_gp_par_worker_started(void)
434 return !!READ_ONCE(rcu_exp_par_gp_kworker);
437 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
439 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
441 * Use rcu_exp_par_gp_kworker, because flushing a work item from
442 * another work item on the same kthread worker can result in
445 kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
448 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
450 kthread_flush_work(&rnp->rew.rew_work);
454 * Work-queue handler to drive an expedited grace period forward.
456 static void wait_rcu_exp_gp(struct kthread_work *wp)
458 struct rcu_exp_work *rewp;
460 rewp = container_of(wp, struct rcu_exp_work, rew_work);
461 rcu_exp_sel_wait_wake(rewp->rew_s);
464 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
466 kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
467 kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
470 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
473 #else /* !CONFIG_RCU_EXP_KTHREAD */
474 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
476 struct rcu_exp_work *rewp =
477 container_of(wp, struct rcu_exp_work, rew_work);
479 __sync_rcu_exp_select_node_cpus(rewp);
482 static inline bool rcu_gp_par_worker_started(void)
484 return !!READ_ONCE(rcu_par_gp_wq);
487 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
489 int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
491 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
492 /* If all offline, queue the work on an unbound CPU. */
493 if (unlikely(cpu > rnp->grphi - rnp->grplo))
494 cpu = WORK_CPU_UNBOUND;
497 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
500 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
502 flush_work(&rnp->rew.rew_work);
506 * Work-queue handler to drive an expedited grace period forward.
508 static void wait_rcu_exp_gp(struct work_struct *wp)
510 struct rcu_exp_work *rewp;
512 rewp = container_of(wp, struct rcu_exp_work, rew_work);
513 rcu_exp_sel_wait_wake(rewp->rew_s);
516 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
518 INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
519 queue_work(rcu_gp_wq, &rew->rew_work);
522 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
524 destroy_work_on_stack(&rew->rew_work);
526 #endif /* CONFIG_RCU_EXP_KTHREAD */
529 * Select the nodes that the upcoming expedited grace period needs
532 static void sync_rcu_exp_select_cpus(void)
534 struct rcu_node *rnp;
536 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
537 sync_exp_reset_tree();
538 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
540 /* Schedule work for each leaf rcu_node structure. */
541 rcu_for_each_leaf_node(rnp) {
542 rnp->exp_need_flush = false;
543 if (!READ_ONCE(rnp->expmask))
544 continue; /* Avoid early boot non-existent wq. */
545 if (!rcu_gp_par_worker_started() ||
546 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
547 rcu_is_last_leaf_node(rnp)) {
548 /* No worker started yet or last leaf, do direct call. */
549 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
552 sync_rcu_exp_select_cpus_queue_work(rnp);
553 rnp->exp_need_flush = true;
556 /* Wait for jobs (if any) to complete. */
557 rcu_for_each_leaf_node(rnp)
558 if (rnp->exp_need_flush)
559 sync_rcu_exp_select_cpus_flush_work(rnp);
563 * Wait for the expedited grace period to elapse, within time limit.
564 * If the time limit is exceeded without the grace period elapsing,
565 * return false, otherwise return true.
567 static bool synchronize_rcu_expedited_wait_once(long tlimit)
570 struct rcu_node *rnp_root = rcu_get_root();
572 t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
573 sync_rcu_exp_done_unlocked(rnp_root),
575 // Workqueues should not be signaled.
576 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
578 WARN_ON(t < 0); /* workqueues should not be signaled. */
583 * Wait for the expedited grace period to elapse, issuing any needed
584 * RCU CPU stall warnings along the way.
586 static void synchronize_rcu_expedited_wait(void)
590 unsigned long jiffies_stall;
591 unsigned long jiffies_start;
594 struct rcu_data *rdp;
595 struct rcu_node *rnp;
596 struct rcu_node *rnp_root = rcu_get_root();
599 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
600 jiffies_stall = rcu_exp_jiffies_till_stall_check();
601 jiffies_start = jiffies;
602 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
603 if (synchronize_rcu_expedited_wait_once(1))
605 rcu_for_each_leaf_node(rnp) {
606 raw_spin_lock_irqsave_rcu_node(rnp, flags);
607 mask = READ_ONCE(rnp->expmask);
608 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
609 rdp = per_cpu_ptr(&rcu_data, cpu);
610 if (rdp->rcu_forced_tick_exp)
612 rdp->rcu_forced_tick_exp = true;
614 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
616 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
618 j = READ_ONCE(jiffies_till_first_fqs);
619 if (synchronize_rcu_expedited_wait_once(j + HZ))
624 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
626 if (rcu_stall_is_suppressed())
628 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
629 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
632 rcu_for_each_leaf_node(rnp) {
633 ndetected += rcu_print_task_exp_stall(rnp);
634 for_each_leaf_node_possible_cpu(rnp, cpu) {
635 struct rcu_data *rdp;
637 mask = leaf_node_cpu_bit(rnp, cpu);
638 if (!(READ_ONCE(rnp->expmask) & mask))
641 rdp = per_cpu_ptr(&rcu_data, cpu);
642 pr_cont(" %d-%c%c%c%c", cpu,
643 "O."[!!cpu_online(cpu)],
644 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
645 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
646 "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
649 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
650 jiffies - jiffies_start, rcu_state.expedited_sequence,
651 data_race(rnp_root->expmask),
652 ".T"[!!data_race(rnp_root->exp_tasks)]);
654 pr_err("blocking rcu_node structures (internal RCU debug):");
655 rcu_for_each_node_breadth_first(rnp) {
657 continue; /* printed unconditionally */
658 if (sync_rcu_exp_done_unlocked(rnp))
660 pr_cont(" l=%u:%d-%d:%#lx/%c",
661 rnp->level, rnp->grplo, rnp->grphi,
662 data_race(rnp->expmask),
663 ".T"[!!data_race(rnp->exp_tasks)]);
667 rcu_for_each_leaf_node(rnp) {
668 for_each_leaf_node_possible_cpu(rnp, cpu) {
669 mask = leaf_node_cpu_bit(rnp, cpu);
670 if (!(READ_ONCE(rnp->expmask) & mask))
672 preempt_disable(); // For smp_processor_id() in dump_cpu_task().
676 rcu_exp_print_detail_task_stall_rnp(rnp);
678 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
679 panic_on_rcu_stall();
684 * Wait for the current expedited grace period to complete, and then
685 * wake up everyone who piggybacked on the just-completed expedited
686 * grace period. Also update all the ->exp_seq_rq counters as needed
687 * in order to avoid counter-wrap problems.
689 static void rcu_exp_wait_wake(unsigned long s)
691 struct rcu_node *rnp;
693 synchronize_rcu_expedited_wait();
695 // Switch over to wakeup mode, allowing the next GP to proceed.
696 // End the previous grace period only after acquiring the mutex
697 // to ensure that only one GP runs concurrently with wakeups.
698 mutex_lock(&rcu_state.exp_wake_mutex);
699 rcu_exp_gp_seq_end();
700 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
702 rcu_for_each_node_breadth_first(rnp) {
703 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
704 spin_lock(&rnp->exp_lock);
705 /* Recheck, avoid hang in case someone just arrived. */
706 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
707 WRITE_ONCE(rnp->exp_seq_rq, s);
708 spin_unlock(&rnp->exp_lock);
710 smp_mb(); /* All above changes before wakeup. */
711 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
713 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
714 mutex_unlock(&rcu_state.exp_wake_mutex);
718 * Common code to drive an expedited grace period forward, used by
719 * workqueues and mid-boot-time tasks.
721 static void rcu_exp_sel_wait_wake(unsigned long s)
723 /* Initialize the rcu_node tree in preparation for the wait. */
724 sync_rcu_exp_select_cpus();
726 /* Wait and clean up, including waking everyone. */
727 rcu_exp_wait_wake(s);
730 #ifdef CONFIG_PREEMPT_RCU
733 * Remote handler for smp_call_function_single(). If there is an
734 * RCU read-side critical section in effect, request that the
735 * next rcu_read_unlock() record the quiescent state up the
736 * ->expmask fields in the rcu_node tree. Otherwise, immediately
737 * report the quiescent state.
739 static void rcu_exp_handler(void *unused)
741 int depth = rcu_preempt_depth();
743 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
744 struct rcu_node *rnp = rdp->mynode;
745 struct task_struct *t = current;
748 * First, the common case of not being in an RCU read-side
749 * critical section. If also enabled or idle, immediately
750 * report the quiescent state, otherwise defer.
753 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
754 rcu_is_cpu_rrupt_from_idle()) {
755 rcu_report_exp_rdp(rdp);
757 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
758 set_tsk_need_resched(t);
759 set_preempt_need_resched();
765 * Second, the less-common case of being in an RCU read-side
766 * critical section. In this case we can count on a future
767 * rcu_read_unlock(). However, this rcu_read_unlock() might
768 * execute on some other CPU, but in that case there will be
769 * a future context switch. Either way, if the expedited
770 * grace period is still waiting on this CPU, set ->deferred_qs
771 * so that the eventual quiescent state will be reported.
772 * Note that there is a large group of race conditions that
773 * can have caused this quiescent state to already have been
774 * reported, so we really do need to check ->expmask.
777 raw_spin_lock_irqsave_rcu_node(rnp, flags);
778 if (rnp->expmask & rdp->grpmask) {
779 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
780 t->rcu_read_unlock_special.b.exp_hint = true;
782 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
786 // Finally, negative nesting depth should not happen.
790 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
791 static void sync_sched_exp_online_cleanup(int cpu)
796 * Scan the current list of tasks blocked within RCU read-side critical
797 * sections, printing out the tid of each that is blocking the current
798 * expedited grace period.
800 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
804 struct task_struct *t;
806 raw_spin_lock_irqsave_rcu_node(rnp, flags);
807 if (!rnp->exp_tasks) {
808 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
811 t = list_entry(rnp->exp_tasks->prev,
812 struct task_struct, rcu_node_entry);
813 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
814 pr_cont(" P%d", t->pid);
817 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
822 * Scan the current list of tasks blocked within RCU read-side critical
823 * sections, dumping the stack of each that is blocking the current
824 * expedited grace period.
826 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
829 struct task_struct *t;
831 if (!rcu_exp_stall_task_details)
833 raw_spin_lock_irqsave_rcu_node(rnp, flags);
834 if (!READ_ONCE(rnp->exp_tasks)) {
835 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
838 t = list_entry(rnp->exp_tasks->prev,
839 struct task_struct, rcu_node_entry);
840 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
842 * We could be printing a lot while holding a spinlock.
843 * Avoid triggering hard lockup.
845 touch_nmi_watchdog();
848 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
851 #else /* #ifdef CONFIG_PREEMPT_RCU */
853 /* Request an expedited quiescent state. */
854 static void rcu_exp_need_qs(void)
856 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
857 /* Store .exp before .rcu_urgent_qs. */
858 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
859 set_tsk_need_resched(current);
860 set_preempt_need_resched();
863 /* Invoked on each online non-idle CPU for expedited quiescent state. */
864 static void rcu_exp_handler(void *unused)
866 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
867 struct rcu_node *rnp = rdp->mynode;
868 bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
870 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
871 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
873 if (rcu_is_cpu_rrupt_from_idle() ||
874 (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
875 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
881 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
882 static void sync_sched_exp_online_cleanup(int cpu)
886 struct rcu_data *rdp;
888 struct rcu_node *rnp;
890 rdp = per_cpu_ptr(&rcu_data, cpu);
893 /* Quiescent state either not needed or already requested, leave. */
894 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
895 READ_ONCE(rdp->cpu_no_qs.b.exp)) {
899 /* Quiescent state needed on current CPU, so set it up locally. */
901 local_irq_save(flags);
903 local_irq_restore(flags);
907 /* Quiescent state needed on some other CPU, send IPI. */
908 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
914 * Because preemptible RCU does not exist, we never have to check for
915 * tasks blocked within RCU read-side critical sections that are
916 * blocking the current expedited grace period.
918 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
924 * Because preemptible RCU does not exist, we never have to print out
925 * tasks blocked within RCU read-side critical sections that are blocking
926 * the current expedited grace period.
928 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
932 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
935 * synchronize_rcu_expedited - Brute-force RCU grace period
937 * Wait for an RCU grace period, but expedite it. The basic idea is to
938 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
939 * the CPU is in an RCU critical section, and if so, it sets a flag that
940 * causes the outermost rcu_read_unlock() to report the quiescent state
941 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
942 * other hand, if the CPU is not in an RCU read-side critical section,
943 * the IPI handler reports the quiescent state immediately.
945 * Although this is a great improvement over previous expedited
946 * implementations, it is still unfriendly to real-time workloads, so is
947 * thus not recommended for any sort of common-case code. In fact, if
948 * you are using synchronize_rcu_expedited() in a loop, please restructure
949 * your code to batch your updates, and then use a single synchronize_rcu()
952 * This has the same semantics as (but is more brutal than) synchronize_rcu().
954 void synchronize_rcu_expedited(void)
956 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
958 struct rcu_exp_work rew;
959 struct rcu_node *rnp;
962 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
963 lock_is_held(&rcu_lock_map) ||
964 lock_is_held(&rcu_sched_lock_map),
965 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
967 /* Is the state is such that the call is a grace period? */
968 if (rcu_blocking_is_gp()) {
969 // Note well that this code runs with !PREEMPT && !SMP.
970 // In addition, all code that advances grace periods runs
971 // at process level. Therefore, this expedited GP overlaps
972 // with other expedited GPs only by being fully nested within
973 // them, which allows reuse of ->gp_seq_polled_exp_snap.
974 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
975 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
977 local_irq_save(flags);
978 WARN_ON_ONCE(num_online_cpus() > 1);
979 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
980 local_irq_restore(flags);
981 return; // Context allows vacuous grace periods.
984 /* If expedited grace periods are prohibited, fall back to normal. */
985 if (rcu_gp_is_normal()) {
986 wait_rcu_gp(call_rcu_hurry);
990 /* Take a snapshot of the sequence number. */
991 s = rcu_exp_gp_seq_snap();
992 if (exp_funnel_lock(s))
993 return; /* Someone else did our work for us. */
995 /* Ensure that load happens before action based on it. */
996 if (unlikely(boottime)) {
997 /* Direct call during scheduler init and early_initcalls(). */
998 rcu_exp_sel_wait_wake(s);
1000 /* Marshall arguments & schedule the expedited grace period. */
1002 synchronize_rcu_expedited_queue_work(&rew);
1005 /* Wait for expedited grace period to complete. */
1006 rnp = rcu_get_root();
1007 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
1008 sync_exp_work_done(s));
1009 smp_mb(); /* Work actions happen before return. */
1011 /* Let the next expedited grace period start. */
1012 mutex_unlock(&rcu_state.exp_mutex);
1014 if (likely(!boottime))
1015 synchronize_rcu_expedited_destroy_work(&rew);
1017 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1020 * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
1021 * RCU grace periods that it needs.
1023 static void sync_rcu_do_polled_gp(struct work_struct *wp)
1025 unsigned long flags;
1027 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
1030 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1031 s = rnp->exp_seq_poll_rq;
1032 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1033 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1034 if (s == RCU_GET_STATE_COMPLETED)
1036 while (!poll_state_synchronize_rcu(s)) {
1037 synchronize_rcu_expedited();
1038 if (i == 10 || i == 20)
1039 pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
1042 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1043 s = rnp->exp_seq_poll_rq;
1044 if (poll_state_synchronize_rcu(s))
1045 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1046 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1050 * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1052 * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1053 * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1054 * allowing them to determine whether or not any sort of grace period has
1055 * elapsed in the meantime. If the needed expedited grace period is not
1056 * already slated to start, initiates that grace period.
1058 unsigned long start_poll_synchronize_rcu_expedited(void)
1060 unsigned long flags;
1061 struct rcu_data *rdp;
1062 struct rcu_node *rnp;
1065 s = get_state_synchronize_rcu();
1066 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1068 if (rcu_init_invoked())
1069 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1070 if (!poll_state_synchronize_rcu(s)) {
1071 if (rcu_init_invoked()) {
1072 rnp->exp_seq_poll_rq = s;
1073 queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1076 if (rcu_init_invoked())
1077 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1081 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1084 * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1085 * @rgosp: Place to put snapshot of grace-period state
1087 * Places the normal and expedited grace-period states in rgosp. This
1088 * state value can be passed to a later call to cond_synchronize_rcu_full()
1089 * or poll_state_synchronize_rcu_full() to determine whether or not a
1090 * grace period (whether normal or expedited) has elapsed in the meantime.
1091 * If the needed expedited grace period is not already slated to start,
1092 * initiates that grace period.
1094 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1096 get_state_synchronize_rcu_full(rgosp);
1097 (void)start_poll_synchronize_rcu_expedited();
1099 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1102 * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1104 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1106 * If any type of full RCU grace period has elapsed since the earlier
1107 * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1108 * or start_poll_synchronize_rcu_expedited(), just return. Otherwise,
1109 * invoke synchronize_rcu_expedited() to wait for a full grace period.
1111 * Yes, this function does not take counter wrap into account.
1112 * But counter wrap is harmless. If the counter wraps, we have waited for
1113 * more than 2 billion grace periods (and way more on a 64-bit system!),
1114 * so waiting for a couple of additional grace periods should be just fine.
1116 * This function provides the same memory-ordering guarantees that
1117 * would be provided by a synchronize_rcu() that was invoked at the call
1118 * to the function that provided @oldstate and that returned at the end
1121 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1123 if (!poll_state_synchronize_rcu(oldstate))
1124 synchronize_rcu_expedited();
1126 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1129 * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1130 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1132 * If a full RCU grace period has elapsed since the call to
1133 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1134 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1135 * obtained, just return. Otherwise, invoke synchronize_rcu_expedited()
1136 * to wait for a full grace period.
1138 * Yes, this function does not take counter wrap into account.
1139 * But counter wrap is harmless. If the counter wraps, we have waited for
1140 * more than 2 billion grace periods (and way more on a 64-bit system!),
1141 * so waiting for a couple of additional grace periods should be just fine.
1143 * This function provides the same memory-ordering guarantees that
1144 * would be provided by a synchronize_rcu() that was invoked at the call
1145 * to the function that provided @rgosp and that returned at the end of
1148 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1150 if (!poll_state_synchronize_rcu_full(rgosp))
1151 synchronize_rcu_expedited();
1153 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);