1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
19 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
21 return lockdep_is_held(&rdp->nocb_lock);
24 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
26 /* Race on early boot between thread creation and assignment */
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
37 * Offload callback processing from the boot-time-specified set of CPUs
38 * specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
39 * created that pull the callbacks from the corresponding CPU, wait for
40 * a grace period to elapse, and invoke the callbacks. These kthreads
41 * are organized into GP kthreads, which manage incoming callbacks, wait for
42 * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43 * invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
44 * do a wake_up() on their GP kthread when they insert a callback into any
45 * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46 * in which case each kthread actively polls its CPU. (Which isn't so great
47 * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
49 * This is intended to be used in conjunction with Frederic Weisbecker's
50 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51 * running CPU-bound user-mode computations.
53 * Offloading of callbacks can also be used as an energy-efficiency
54 * measure because CPUs with no RCU callbacks queued are more aggressive
55 * about entering dyntick-idle mode.
60 * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61 * If the list is invalid, a warning is emitted and all CPUs are offloaded.
63 static int __init rcu_nocb_setup(char *str)
65 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
67 if (cpulist_parse(++str, rcu_nocb_mask)) {
68 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
69 cpumask_setall(rcu_nocb_mask);
72 rcu_state.nocb_is_setup = true;
75 __setup("rcu_nocbs", rcu_nocb_setup);
77 static int __init parse_rcu_nocb_poll(char *arg)
82 __setup("rcu_nocb_poll", parse_rcu_nocb_poll);
85 * Don't bother bypassing ->cblist if the call_rcu() rate is low.
86 * After all, the main point of bypassing is to avoid lock contention
87 * on ->nocb_lock, which only can happen at high call_rcu() rates.
89 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
90 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
93 * Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
94 * lock isn't immediately available, increment ->nocb_lock_contended to
95 * flag the contention.
97 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
98 __acquires(&rdp->nocb_bypass_lock)
100 lockdep_assert_irqs_disabled();
101 if (raw_spin_trylock(&rdp->nocb_bypass_lock))
103 atomic_inc(&rdp->nocb_lock_contended);
104 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
105 smp_mb__after_atomic(); /* atomic_inc() before lock. */
106 raw_spin_lock(&rdp->nocb_bypass_lock);
107 smp_mb__before_atomic(); /* atomic_dec() after lock. */
108 atomic_dec(&rdp->nocb_lock_contended);
112 * Spinwait until the specified rcu_data structure's ->nocb_lock is
113 * not contended. Please note that this is extremely special-purpose,
114 * relying on the fact that at most two kthreads and one CPU contend for
115 * this lock, and also that the two kthreads are guaranteed to have frequent
116 * grace-period-duration time intervals between successive acquisitions
117 * of the lock. This allows us to use an extremely simple throttling
118 * mechanism, and further to apply it only to the CPU doing floods of
119 * call_rcu() invocations. Don't try this at home!
121 static void rcu_nocb_wait_contended(struct rcu_data *rdp)
123 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
124 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
129 * Conditionally acquire the specified rcu_data structure's
130 * ->nocb_bypass_lock.
132 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
134 lockdep_assert_irqs_disabled();
135 return raw_spin_trylock(&rdp->nocb_bypass_lock);
139 * Release the specified rcu_data structure's ->nocb_bypass_lock.
141 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
142 __releases(&rdp->nocb_bypass_lock)
144 lockdep_assert_irqs_disabled();
145 raw_spin_unlock(&rdp->nocb_bypass_lock);
149 * Acquire the specified rcu_data structure's ->nocb_lock, but only
150 * if it corresponds to a no-CBs CPU.
152 static void rcu_nocb_lock(struct rcu_data *rdp)
154 lockdep_assert_irqs_disabled();
155 if (!rcu_rdp_is_offloaded(rdp))
157 raw_spin_lock(&rdp->nocb_lock);
161 * Release the specified rcu_data structure's ->nocb_lock, but only
162 * if it corresponds to a no-CBs CPU.
164 static void rcu_nocb_unlock(struct rcu_data *rdp)
166 if (rcu_rdp_is_offloaded(rdp)) {
167 lockdep_assert_irqs_disabled();
168 raw_spin_unlock(&rdp->nocb_lock);
173 * Release the specified rcu_data structure's ->nocb_lock and restore
174 * interrupts, but only if it corresponds to a no-CBs CPU.
176 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
179 if (rcu_rdp_is_offloaded(rdp)) {
180 lockdep_assert_irqs_disabled();
181 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
183 local_irq_restore(flags);
187 /* Lockdep check that ->cblist may be safely accessed. */
188 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
190 lockdep_assert_irqs_disabled();
191 if (rcu_rdp_is_offloaded(rdp))
192 lockdep_assert_held(&rdp->nocb_lock);
196 * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
199 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
204 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
206 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
209 static void rcu_init_one_nocb(struct rcu_node *rnp)
211 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
212 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
215 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
216 struct rcu_data *rdp,
217 bool force, unsigned long flags)
218 __releases(rdp_gp->nocb_gp_lock)
220 bool needwake = false;
222 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
223 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
224 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
225 TPS("AlreadyAwake"));
229 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
230 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
231 del_timer(&rdp_gp->nocb_timer);
234 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
235 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
238 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
240 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
241 wake_up_process(rdp_gp->nocb_gp_kthread);
248 * Kick the GP kthread for this NOCB group.
250 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
253 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
255 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
256 return __wake_nocb_gp(rdp_gp, rdp, force, flags);
259 #ifdef CONFIG_RCU_LAZY
261 * LAZY_FLUSH_JIFFIES decides the maximum amount of time that
262 * can elapse before lazy callbacks are flushed. Lazy callbacks
263 * could be flushed much earlier for a number of other reasons
264 * however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
265 * left unsubmitted to RCU after those many jiffies.
267 #define LAZY_FLUSH_JIFFIES (10 * HZ)
268 static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES;
270 // To be called only from test code.
271 void rcu_set_jiffies_lazy_flush(unsigned long jif)
273 jiffies_lazy_flush = jif;
275 EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush);
277 unsigned long rcu_get_jiffies_lazy_flush(void)
279 return jiffies_lazy_flush;
281 EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush);
285 * Arrange to wake the GP kthread for this NOCB group at some future
286 * time when it is safe to do so.
288 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
292 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
294 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
297 * Bypass wakeup overrides previous deferments. In case of
298 * callback storms, no need to wake up too early.
300 if (waketype == RCU_NOCB_WAKE_LAZY &&
301 rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
302 mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
303 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
304 } else if (waketype == RCU_NOCB_WAKE_BYPASS) {
305 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
306 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
308 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
309 mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
310 if (rdp_gp->nocb_defer_wakeup < waketype)
311 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
314 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
316 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
320 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
321 * However, if there is a callback to be enqueued and if ->nocb_bypass
322 * proves to be initially empty, just return false because the no-CB GP
323 * kthread may need to be awakened in this case.
325 * Return true if there was something to be flushed and it succeeded, otherwise
328 * Note that this function always returns true if rhp is NULL.
330 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
331 unsigned long j, bool lazy)
333 struct rcu_cblist rcl;
334 struct rcu_head *rhp = rhp_in;
336 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
337 rcu_lockdep_assert_cblist_protected(rdp);
338 lockdep_assert_held(&rdp->nocb_bypass_lock);
339 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
340 raw_spin_unlock(&rdp->nocb_bypass_lock);
343 /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
345 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
348 * If the new CB requested was a lazy one, queue it onto the main
349 * ->cblist so that we can take advantage of the grace-period that will
350 * happen regardless. But queue it onto the bypass list first so that
351 * the lazy CB is ordered with the existing CBs in the bypass list.
354 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
357 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
358 WRITE_ONCE(rdp->lazy_len, 0);
360 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
361 WRITE_ONCE(rdp->nocb_bypass_first, j);
362 rcu_nocb_bypass_unlock(rdp);
367 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
368 * However, if there is a callback to be enqueued and if ->nocb_bypass
369 * proves to be initially empty, just return false because the no-CB GP
370 * kthread may need to be awakened in this case.
372 * Note that this function always returns true if rhp is NULL.
374 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
375 unsigned long j, bool lazy)
377 if (!rcu_rdp_is_offloaded(rdp))
379 rcu_lockdep_assert_cblist_protected(rdp);
380 rcu_nocb_bypass_lock(rdp);
381 return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
385 * If the ->nocb_bypass_lock is immediately available, flush the
386 * ->nocb_bypass queue into ->cblist.
388 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
390 rcu_lockdep_assert_cblist_protected(rdp);
391 if (!rcu_rdp_is_offloaded(rdp) ||
392 !rcu_nocb_bypass_trylock(rdp))
394 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
398 * See whether it is appropriate to use the ->nocb_bypass list in order
399 * to control contention on ->nocb_lock. A limited number of direct
400 * enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
401 * is non-empty, further callbacks must be placed into ->nocb_bypass,
402 * otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
403 * back to direct use of ->cblist. However, ->nocb_bypass should not be
404 * used if ->cblist is empty, because otherwise callbacks can be stranded
405 * on ->nocb_bypass because we cannot count on the current CPU ever again
406 * invoking call_rcu(). The general rule is that if ->nocb_bypass is
407 * non-empty, the corresponding no-CBs grace-period kthread must not be
408 * in an indefinite sleep state.
410 * Finally, it is not permitted to use the bypass during early boot,
411 * as doing so would confuse the auto-initialization code. Besides
412 * which, there is no point in worrying about lock contention while
413 * there is only one CPU in operation.
415 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
416 bool *was_alldone, unsigned long flags,
420 unsigned long cur_gp_seq;
421 unsigned long j = jiffies;
422 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
423 bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
425 lockdep_assert_irqs_disabled();
427 // Pure softirq/rcuc based processing: no bypassing, no
429 if (!rcu_rdp_is_offloaded(rdp)) {
430 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
434 // In the process of (de-)offloading: no bypassing, but
436 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
438 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
439 return false; /* Not offloaded, no bypassing. */
442 // Don't use ->nocb_bypass during early boot.
443 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
445 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
446 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
450 // If we have advanced to a new jiffy, reset counts to allow
451 // moving back from ->nocb_bypass to ->cblist.
452 if (j == rdp->nocb_nobypass_last) {
453 c = rdp->nocb_nobypass_count + 1;
455 WRITE_ONCE(rdp->nocb_nobypass_last, j);
456 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
457 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
458 nocb_nobypass_lim_per_jiffy))
460 else if (c > nocb_nobypass_lim_per_jiffy)
461 c = nocb_nobypass_lim_per_jiffy;
463 WRITE_ONCE(rdp->nocb_nobypass_count, c);
465 // If there hasn't yet been all that many ->cblist enqueues
466 // this jiffy, tell the caller to enqueue onto ->cblist. But flush
467 // ->nocb_bypass first.
468 // Lazy CBs throttle this back and do immediate bypass queuing.
469 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
471 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
473 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
476 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
477 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
478 return false; // Caller must enqueue the callback.
481 // If ->nocb_bypass has been used too long or is too full,
482 // flush ->nocb_bypass to ->cblist.
483 if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
484 (ncbs && bypass_is_lazy &&
485 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) ||
488 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
490 if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
492 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
494 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
495 return false; // Caller must enqueue the callback.
497 if (j != rdp->nocb_gp_adv_time &&
498 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
499 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
500 rcu_advance_cbs_nowake(rdp->mynode, rdp);
501 rdp->nocb_gp_adv_time = j;
504 // The flush succeeded and we moved CBs into the regular list.
505 // Don't wait for the wake up timer as it may be too far ahead.
506 // Wake up the GP thread now instead, if the cblist was empty.
507 __call_rcu_nocb_wake(rdp, *was_alldone, flags);
509 return true; // Callback already enqueued.
512 // We need to use the bypass.
513 rcu_nocb_wait_contended(rdp);
514 rcu_nocb_bypass_lock(rdp);
515 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
516 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
517 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
520 WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
523 WRITE_ONCE(rdp->nocb_bypass_first, j);
524 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
526 rcu_nocb_bypass_unlock(rdp);
527 smp_mb(); /* Order enqueue before wake. */
528 // A wake up of the grace period kthread or timer adjustment
529 // needs to be done only if:
530 // 1. Bypass list was fully empty before (this is the first
531 // bypass list entry), or:
532 // 2. Both of these conditions are met:
533 // a. The bypass list previously had only lazy CBs, and:
534 // b. The new CB is non-lazy.
535 if (!ncbs || (bypass_is_lazy && !lazy)) {
536 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
537 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
538 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
539 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
541 __call_rcu_nocb_wake(rdp, true, flags);
543 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
544 TPS("FirstBQnoWake"));
545 rcu_nocb_unlock(rdp);
548 return true; // Callback already enqueued.
552 * Awaken the no-CBs grace-period kthread if needed, either due to it
553 * legitimately being asleep or due to overload conditions.
555 * If warranted, also wake up the kthread servicing this CPUs queues.
557 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
559 __releases(rdp->nocb_lock)
562 unsigned long cur_gp_seq;
566 struct task_struct *t;
567 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
569 // If we are being polled or there is no kthread, just leave.
570 t = READ_ONCE(rdp->nocb_gp_kthread);
571 if (rcu_nocb_poll || !t) {
572 rcu_nocb_unlock(rdp);
573 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
577 // Need to actually to a wakeup.
578 len = rcu_segcblist_n_cbs(&rdp->cblist);
579 bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
580 lazy_len = READ_ONCE(rdp->lazy_len);
582 rdp->qlen_last_fqs_check = len;
583 // Only lazy CBs in bypass list
584 if (lazy_len && bypass_len == lazy_len) {
585 rcu_nocb_unlock(rdp);
586 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
588 } else if (!irqs_disabled_flags(flags)) {
589 /* ... if queue was empty ... */
590 rcu_nocb_unlock(rdp);
591 wake_nocb_gp(rdp, false);
592 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
595 rcu_nocb_unlock(rdp);
596 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
597 TPS("WakeEmptyIsDeferred"));
599 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
600 /* ... or if many callbacks queued. */
601 rdp->qlen_last_fqs_check = len;
603 if (j != rdp->nocb_gp_adv_time &&
604 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
605 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
606 rcu_advance_cbs_nowake(rdp->mynode, rdp);
607 rdp->nocb_gp_adv_time = j;
609 smp_mb(); /* Enqueue before timer_pending(). */
610 if ((rdp->nocb_cb_sleep ||
611 !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
612 !timer_pending(&rdp_gp->nocb_timer)) {
613 rcu_nocb_unlock(rdp);
614 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
615 TPS("WakeOvfIsDeferred"));
617 rcu_nocb_unlock(rdp);
618 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
621 rcu_nocb_unlock(rdp);
622 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
626 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
627 rcu_callback_t func, unsigned long flags, bool lazy)
631 if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
632 /* Not enqueued on bypass but locked, do regular enqueue */
633 rcutree_enqueue(rdp, head, func);
634 __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
638 static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
641 struct rcu_segcblist *cblist = &rdp->cblist;
645 rcu_nocb_lock_irqsave(rdp, flags);
646 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
647 !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
649 * Offloading. Set our flag and notify the offload worker.
650 * We will handle this rdp until it ever gets de-offloaded.
652 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
653 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
656 } else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
657 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
659 * De-offloading. Clear our flag and notify the de-offload worker.
660 * We will ignore this rdp until it ever gets re-offloaded.
662 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
663 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
671 rcu_nocb_unlock_irqrestore(rdp, flags);
676 static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
678 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
679 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
680 !READ_ONCE(my_rdp->nocb_gp_sleep));
681 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
685 * No-CBs GP kthreads come here to wait for additional callbacks to show up
686 * or for grace periods to end.
688 static void nocb_gp_wait(struct rcu_data *my_rdp)
691 int __maybe_unused cpu = my_rdp->cpu;
692 unsigned long cur_gp_seq;
695 unsigned long j = jiffies;
697 bool needwait_gp = false; // This prevents actual uninitialized use.
700 struct rcu_data *rdp, *rdp_toggling = NULL;
701 struct rcu_node *rnp;
702 unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
703 bool wasempty = false;
706 * Each pass through the following loop checks for CBs and for the
707 * nearest grace period (if any) to wait for next. The CB kthreads
708 * and the global grace-period kthread are awakened if needed.
710 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
712 * An rcu_data structure is removed from the list after its
713 * CPU is de-offloaded and added to the list before that CPU is
714 * (re-)offloaded. If the following loop happens to be referencing
715 * that rcu_data structure during the time that the corresponding
716 * CPU is de-offloaded and then immediately re-offloaded, this
717 * loop's rdp pointer will be carried to the end of the list by
718 * the resulting pair of list operations. This can cause the loop
719 * to skip over some of the rcu_data structures that were supposed
720 * to have been scanned. Fortunately a new iteration through the
721 * entire loop is forced after a given CPU's rcu_data structure
722 * is added to the list, so the skipped-over rcu_data structures
723 * won't be ignored for long.
725 list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
727 bool flush_bypass = false;
730 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
731 rcu_nocb_lock_irqsave(rdp, flags);
732 lockdep_assert_held(&rdp->nocb_lock);
733 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
734 lazy_ncbs = READ_ONCE(rdp->lazy_len);
736 if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
737 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) ||
738 bypass_ncbs > 2 * qhimark)) {
740 } else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
741 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
742 bypass_ncbs > 2 * qhimark)) {
744 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
745 rcu_nocb_unlock_irqrestore(rdp, flags);
746 continue; /* No callbacks here, try next. */
750 // Bypass full or old, so flush it.
751 (void)rcu_nocb_try_flush_bypass(rdp, j);
752 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
753 lazy_ncbs = READ_ONCE(rdp->lazy_len);
757 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
758 bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
759 if (bypass_ncbs == lazy_ncbs)
766 // Advance callbacks if helpful and low contention.
768 if (!rcu_segcblist_restempty(&rdp->cblist,
769 RCU_NEXT_READY_TAIL) ||
770 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
771 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
772 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
773 needwake_gp = rcu_advance_cbs(rnp, rdp);
774 wasempty = rcu_segcblist_restempty(&rdp->cblist,
775 RCU_NEXT_READY_TAIL);
776 raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
778 // Need to wait on some grace period?
779 WARN_ON_ONCE(wasempty &&
780 !rcu_segcblist_restempty(&rdp->cblist,
781 RCU_NEXT_READY_TAIL));
782 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
784 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
785 wait_gp_seq = cur_gp_seq;
787 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
790 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
791 needwake = rdp->nocb_cb_sleep;
792 WRITE_ONCE(rdp->nocb_cb_sleep, false);
796 rcu_nocb_unlock_irqrestore(rdp, flags);
798 swake_up_one(&rdp->nocb_cb_wq);
802 rcu_gp_kthread_wake();
805 my_rdp->nocb_gp_bypass = bypass;
806 my_rdp->nocb_gp_gp = needwait_gp;
807 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
809 // At least one child with non-empty ->nocb_bypass, so set
810 // timer in order to avoid stranding its callbacks.
811 if (!rcu_nocb_poll) {
812 // If bypass list only has lazy CBs. Add a deferred lazy wake up.
813 if (lazy && !bypass) {
814 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
815 TPS("WakeLazyIsDeferred"));
816 // Otherwise add a deferred bypass wake up.
818 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
819 TPS("WakeBypassIsDeferred"));
824 /* Polling, so trace if first poll in the series. */
826 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
827 if (list_empty(&my_rdp->nocb_head_rdp)) {
828 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
829 if (!my_rdp->nocb_toggling_rdp)
830 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
831 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
832 /* Wait for any offloading rdp */
833 nocb_gp_sleep(my_rdp, cpu);
835 schedule_timeout_idle(1);
837 } else if (!needwait_gp) {
838 /* Wait for callbacks to appear. */
839 nocb_gp_sleep(my_rdp, cpu);
841 rnp = my_rdp->mynode;
842 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
843 swait_event_interruptible_exclusive(
844 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
845 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
846 !READ_ONCE(my_rdp->nocb_gp_sleep));
847 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
850 if (!rcu_nocb_poll) {
851 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
852 // (De-)queue an rdp to/from the group if its nocb state is changing
853 rdp_toggling = my_rdp->nocb_toggling_rdp;
855 my_rdp->nocb_toggling_rdp = NULL;
857 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
858 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
859 del_timer(&my_rdp->nocb_timer);
861 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
862 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
864 rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
867 * Paranoid locking to make sure nocb_toggling_rdp is well
868 * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
869 * race with another round of nocb toggling for this rdp.
870 * Nocb locking should prevent from that already but we stick
871 * to paranoia, especially in rare path.
873 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
874 my_rdp->nocb_toggling_rdp = NULL;
875 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
880 bool wake_state = false;
883 ret = nocb_gp_toggle_rdp(rdp_toggling, &wake_state);
885 list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
887 list_del(&rdp_toggling->nocb_entry_rdp);
889 swake_up_one(&rdp_toggling->nocb_state_wq);
892 my_rdp->nocb_gp_seq = -1;
893 WARN_ON(signal_pending(current));
897 * No-CBs grace-period-wait kthread. There is one of these per group
898 * of CPUs, but only once at least one CPU in that group has come online
899 * at least once since boot. This kthread checks for newly posted
900 * callbacks from any of the CPUs it is responsible for, waits for a
901 * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
902 * that then have callback-invocation work to do.
904 static int rcu_nocb_gp_kthread(void *arg)
906 struct rcu_data *rdp = arg;
909 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
911 cond_resched_tasks_rcu_qs();
916 static inline bool nocb_cb_can_run(struct rcu_data *rdp)
918 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
920 return rcu_segcblist_test_flags(&rdp->cblist, flags);
923 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
925 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
929 * Invoke any ready callbacks from the corresponding no-CBs CPU,
930 * then, if there are no more, wait for more to appear.
932 static void nocb_cb_wait(struct rcu_data *rdp)
934 struct rcu_segcblist *cblist = &rdp->cblist;
935 unsigned long cur_gp_seq;
937 bool needwake_state = false;
938 bool needwake_gp = false;
939 bool can_sleep = true;
940 struct rcu_node *rnp = rdp->mynode;
943 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
944 nocb_cb_wait_cond(rdp));
946 if (READ_ONCE(rdp->nocb_cb_sleep)) {
947 WARN_ON(signal_pending(current));
948 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
950 } while (!nocb_cb_can_run(rdp));
953 local_irq_save(flags);
954 rcu_momentary_dyntick_idle();
955 local_irq_restore(flags);
957 * Disable BH to provide the expected environment. Also, when
958 * transitioning to/from NOCB mode, a self-requeuing callback might
959 * be invoked from softirq. A short grace period could cause both
960 * instances of this callback would execute concurrently.
965 lockdep_assert_irqs_enabled();
966 rcu_nocb_lock_irqsave(rdp, flags);
967 if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
968 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
969 raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
970 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
971 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
974 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
975 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
976 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
977 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
978 needwake_state = true;
980 if (rcu_segcblist_ready_cbs(cblist))
984 * De-offloading. Clear our flag and notify the de-offload worker.
985 * We won't touch the callbacks and keep sleeping until we ever
988 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
989 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
990 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
991 needwake_state = true;
994 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
996 if (rdp->nocb_cb_sleep)
997 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
999 rcu_nocb_unlock_irqrestore(rdp, flags);
1001 rcu_gp_kthread_wake();
1004 swake_up_one(&rdp->nocb_state_wq);
1008 * Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
1009 * nocb_cb_wait() to do the dirty work.
1011 static int rcu_nocb_cb_kthread(void *arg)
1013 struct rcu_data *rdp = arg;
1015 // Each pass through this loop does one callback batch, and,
1016 // if there are no more ready callbacks, waits for them.
1019 cond_resched_tasks_rcu_qs();
1024 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
1025 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1027 return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
1030 /* Do a deferred wakeup of rcu_nocb_kthread(). */
1031 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
1032 struct rcu_data *rdp, int level,
1033 unsigned long flags)
1034 __releases(rdp_gp->nocb_gp_lock)
1039 if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
1040 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1044 ndw = rdp_gp->nocb_defer_wakeup;
1045 ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
1046 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
1051 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
1052 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
1054 unsigned long flags;
1055 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
1057 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
1058 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
1060 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
1061 smp_mb__after_spinlock(); /* Timer expire before wakeup. */
1062 do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
1066 * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
1067 * This means we do an inexact common-case check. Note that if
1068 * we miss, ->nocb_timer will eventually clean things up.
1070 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1072 unsigned long flags;
1073 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1075 if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
1078 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1079 return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
1082 void rcu_nocb_flush_deferred_wakeup(void)
1084 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
1086 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
1088 static int rdp_offload_toggle(struct rcu_data *rdp,
1089 bool offload, unsigned long flags)
1090 __releases(rdp->nocb_lock)
1092 struct rcu_segcblist *cblist = &rdp->cblist;
1093 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1094 bool wake_gp = false;
1096 rcu_segcblist_offload(cblist, offload);
1098 if (rdp->nocb_cb_sleep)
1099 rdp->nocb_cb_sleep = false;
1100 rcu_nocb_unlock_irqrestore(rdp, flags);
1103 * Ignore former value of nocb_cb_sleep and force wake up as it could
1104 * have been spuriously set to false already.
1106 swake_up_one(&rdp->nocb_cb_wq);
1108 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1109 // Queue this rdp for add/del to/from the list to iterate on rcuog
1110 WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
1111 if (rdp_gp->nocb_gp_sleep) {
1112 rdp_gp->nocb_gp_sleep = false;
1115 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1120 static long rcu_nocb_rdp_deoffload(void *arg)
1122 struct rcu_data *rdp = arg;
1123 struct rcu_segcblist *cblist = &rdp->cblist;
1124 unsigned long flags;
1126 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1129 * rcu_nocb_rdp_deoffload() may be called directly if
1130 * rcuog/o[p] spawn failed, because at this time the rdp->cpu
1131 * is not online yet.
1133 WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu));
1135 pr_info("De-offloading %d\n", rdp->cpu);
1137 rcu_nocb_lock_irqsave(rdp, flags);
1139 * Flush once and for all now. This suffices because we are
1140 * running on the target CPU holding ->nocb_lock (thus having
1141 * interrupts disabled), and because rdp_offload_toggle()
1142 * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
1143 * Thus future calls to rcu_segcblist_completely_offloaded() will
1144 * return false, which means that future calls to rcu_nocb_try_bypass()
1145 * will refuse to put anything into the bypass.
1147 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
1149 * Start with invoking rcu_core() early. This way if the current thread
1150 * happens to preempt an ongoing call to rcu_core() in the middle,
1151 * leaving some work dismissed because rcu_core() still thinks the rdp is
1152 * completely offloaded, we are guaranteed a nearby future instance of
1153 * rcu_core() to catch up.
1155 rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
1157 wake_gp = rdp_offload_toggle(rdp, false, flags);
1159 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1160 if (rdp_gp->nocb_gp_kthread) {
1162 wake_up_process(rdp_gp->nocb_gp_kthread);
1165 * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
1166 * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
1168 if (!rdp->nocb_cb_kthread) {
1169 rcu_nocb_lock_irqsave(rdp, flags);
1170 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
1171 rcu_nocb_unlock_irqrestore(rdp, flags);
1174 swait_event_exclusive(rdp->nocb_state_wq,
1175 !rcu_segcblist_test_flags(cblist,
1176 SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP));
1179 * No kthread to clear the flags for us or remove the rdp from the nocb list
1180 * to iterate. Do it here instead. Locking doesn't look stricly necessary
1181 * but we stick to paranoia in this rare path.
1183 rcu_nocb_lock_irqsave(rdp, flags);
1184 rcu_segcblist_clear_flags(&rdp->cblist,
1185 SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1186 rcu_nocb_unlock_irqrestore(rdp, flags);
1188 list_del(&rdp->nocb_entry_rdp);
1190 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1193 * Lock one last time to acquire latest callback updates from kthreads
1194 * so we can later handle callbacks locally without locking.
1196 rcu_nocb_lock_irqsave(rdp, flags);
1198 * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
1199 * lock is released but how about being paranoid for once?
1201 rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
1203 * Without SEGCBLIST_LOCKING, we can't use
1204 * rcu_nocb_unlock_irqrestore() anymore.
1206 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1209 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1215 int rcu_nocb_cpu_deoffload(int cpu)
1217 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1221 mutex_lock(&rcu_state.barrier_mutex);
1222 if (rcu_rdp_is_offloaded(rdp)) {
1223 if (cpu_online(cpu)) {
1224 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1226 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1228 pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
1232 mutex_unlock(&rcu_state.barrier_mutex);
1237 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1239 static long rcu_nocb_rdp_offload(void *arg)
1241 struct rcu_data *rdp = arg;
1242 struct rcu_segcblist *cblist = &rdp->cblist;
1243 unsigned long flags;
1245 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1247 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1249 * For now we only support re-offload, ie: the rdp must have been
1250 * offloaded on boot first.
1252 if (!rdp->nocb_gp_rdp)
1255 if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
1258 pr_info("Offloading %d\n", rdp->cpu);
1261 * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1264 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1267 * We didn't take the nocb lock while working on the
1268 * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
1269 * Every modifications that have been done previously on
1270 * rdp->cblist must be visible remotely by the nocb kthreads
1271 * upon wake up after reading the cblist flags.
1273 * The layout against nocb_lock enforces that ordering:
1275 * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait()
1276 * ------------------------- ----------------------------
1277 * WRITE callbacks rcu_nocb_lock()
1278 * rcu_nocb_lock() READ flags
1279 * WRITE flags READ callbacks
1280 * rcu_nocb_unlock() rcu_nocb_unlock()
1282 wake_gp = rdp_offload_toggle(rdp, true, flags);
1284 wake_up_process(rdp_gp->nocb_gp_kthread);
1285 swait_event_exclusive(rdp->nocb_state_wq,
1286 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1287 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1290 * All kthreads are ready to work, we can finally relieve rcu_core() and
1291 * enable nocb bypass.
1293 rcu_nocb_lock_irqsave(rdp, flags);
1294 rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
1295 rcu_nocb_unlock_irqrestore(rdp, flags);
1300 int rcu_nocb_cpu_offload(int cpu)
1302 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1306 mutex_lock(&rcu_state.barrier_mutex);
1307 if (!rcu_rdp_is_offloaded(rdp)) {
1308 if (cpu_online(cpu)) {
1309 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1311 cpumask_set_cpu(cpu, rcu_nocb_mask);
1313 pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
1317 mutex_unlock(&rcu_state.barrier_mutex);
1322 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1324 #ifdef CONFIG_RCU_LAZY
1325 static unsigned long
1326 lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1329 unsigned long count = 0;
1331 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1334 /* Protect rcu_nocb_mask against concurrent (de-)offloading. */
1335 if (!mutex_trylock(&rcu_state.barrier_mutex))
1338 /* Snapshot count of all CPUs */
1339 for_each_cpu(cpu, rcu_nocb_mask) {
1340 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1342 count += READ_ONCE(rdp->lazy_len);
1345 mutex_unlock(&rcu_state.barrier_mutex);
1347 return count ? count : SHRINK_EMPTY;
1350 static unsigned long
1351 lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1354 unsigned long flags;
1355 unsigned long count = 0;
1357 if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
1360 * Protect against concurrent (de-)offloading. Otherwise nocb locking
1361 * may be ignored or imbalanced.
1363 if (!mutex_trylock(&rcu_state.barrier_mutex)) {
1365 * But really don't insist if barrier_mutex is contended since we
1366 * can't guarantee that it will never engage in a dependency
1367 * chain involving memory allocation. The lock is seldom contended
1373 /* Snapshot count of all CPUs */
1374 for_each_cpu(cpu, rcu_nocb_mask) {
1375 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1378 if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
1381 if (!READ_ONCE(rdp->lazy_len))
1384 rcu_nocb_lock_irqsave(rdp, flags);
1386 * Recheck under the nocb lock. Since we are not holding the bypass
1387 * lock we may still race with increments from the enqueuer but still
1388 * we know for sure if there is at least one lazy callback.
1390 _count = READ_ONCE(rdp->lazy_len);
1392 rcu_nocb_unlock_irqrestore(rdp, flags);
1395 rcu_nocb_try_flush_bypass(rdp, jiffies);
1396 rcu_nocb_unlock_irqrestore(rdp, flags);
1397 wake_nocb_gp(rdp, false);
1398 sc->nr_to_scan -= _count;
1400 if (sc->nr_to_scan <= 0)
1404 mutex_unlock(&rcu_state.barrier_mutex);
1406 return count ? count : SHRINK_STOP;
1408 #endif // #ifdef CONFIG_RCU_LAZY
1410 void __init rcu_init_nohz(void)
1413 struct rcu_data *rdp;
1414 const struct cpumask *cpumask = NULL;
1415 struct shrinker * __maybe_unused lazy_rcu_shrinker;
1417 #if defined(CONFIG_NO_HZ_FULL)
1418 if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
1419 cpumask = tick_nohz_full_mask;
1422 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) &&
1423 !rcu_state.nocb_is_setup && !cpumask)
1424 cpumask = cpu_possible_mask;
1427 if (!cpumask_available(rcu_nocb_mask)) {
1428 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1429 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1434 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
1435 rcu_state.nocb_is_setup = true;
1438 if (!rcu_state.nocb_is_setup)
1441 #ifdef CONFIG_RCU_LAZY
1442 lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
1443 if (!lazy_rcu_shrinker) {
1444 pr_err("Failed to allocate lazy_rcu shrinker!\n");
1446 lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
1447 lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
1449 shrinker_register(lazy_rcu_shrinker);
1451 #endif // #ifdef CONFIG_RCU_LAZY
1453 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1454 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1455 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1458 if (cpumask_empty(rcu_nocb_mask))
1459 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1461 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1462 cpumask_pr_args(rcu_nocb_mask));
1464 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1466 for_each_cpu(cpu, rcu_nocb_mask) {
1467 rdp = per_cpu_ptr(&rcu_data, cpu);
1468 if (rcu_segcblist_empty(&rdp->cblist))
1469 rcu_segcblist_init(&rdp->cblist);
1470 rcu_segcblist_offload(&rdp->cblist, true);
1471 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1472 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
1474 rcu_organize_nocb_kthreads();
1477 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1478 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1480 init_swait_queue_head(&rdp->nocb_cb_wq);
1481 init_swait_queue_head(&rdp->nocb_gp_wq);
1482 init_swait_queue_head(&rdp->nocb_state_wq);
1483 raw_spin_lock_init(&rdp->nocb_lock);
1484 raw_spin_lock_init(&rdp->nocb_bypass_lock);
1485 raw_spin_lock_init(&rdp->nocb_gp_lock);
1486 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1487 rcu_cblist_init(&rdp->nocb_bypass);
1488 WRITE_ONCE(rdp->lazy_len, 0);
1489 mutex_init(&rdp->nocb_gp_kthread_mutex);
1493 * If the specified CPU is a no-CBs CPU that does not already have its
1494 * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
1495 * for this CPU's group has not yet been created, spawn it as well.
1497 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1499 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1500 struct rcu_data *rdp_gp;
1501 struct task_struct *t;
1502 struct sched_param sp;
1504 if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
1507 /* If there already is an rcuo kthread, then nothing to do. */
1508 if (rdp->nocb_cb_kthread)
1511 /* If we didn't spawn the GP kthread first, reorganize! */
1512 sp.sched_priority = kthread_prio;
1513 rdp_gp = rdp->nocb_gp_rdp;
1514 mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1515 if (!rdp_gp->nocb_gp_kthread) {
1516 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1517 "rcuog/%d", rdp_gp->cpu);
1518 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
1519 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1522 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1524 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1526 mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1528 /* Spawn the kthread for this CPU. */
1529 t = kthread_run(rcu_nocb_cb_kthread, rdp,
1530 "rcuo%c/%d", rcu_state.abbr, cpu);
1531 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1534 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
1535 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1537 WRITE_ONCE(rdp->nocb_cb_kthread, t);
1538 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1541 mutex_lock(&rcu_state.barrier_mutex);
1542 if (rcu_rdp_is_offloaded(rdp)) {
1543 rcu_nocb_rdp_deoffload(rdp);
1544 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1546 mutex_unlock(&rcu_state.barrier_mutex);
1549 /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
1550 static int rcu_nocb_gp_stride = -1;
1551 module_param(rcu_nocb_gp_stride, int, 0444);
1554 * Initialize GP-CB relationships for all no-CBs CPU.
1556 static void __init rcu_organize_nocb_kthreads(void)
1559 bool firsttime = true;
1560 bool gotnocbs = false;
1561 bool gotnocbscbs = true;
1562 int ls = rcu_nocb_gp_stride;
1563 int nl = 0; /* Next GP kthread. */
1564 struct rcu_data *rdp;
1565 struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */
1567 if (!cpumask_available(rcu_nocb_mask))
1570 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1571 rcu_nocb_gp_stride = ls;
1575 * Each pass through this loop sets up one rcu_data structure.
1576 * Should the corresponding CPU come online in the future, then
1577 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1579 for_each_possible_cpu(cpu) {
1580 rdp = per_cpu_ptr(&rcu_data, cpu);
1581 if (rdp->cpu >= nl) {
1582 /* New GP kthread, set up for CBs & next GP. */
1584 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1586 INIT_LIST_HEAD(&rdp->nocb_head_rdp);
1589 pr_cont("%s\n", gotnocbscbs
1590 ? "" : " (self only)");
1591 gotnocbscbs = false;
1593 pr_alert("%s: No-CB GP kthread CPU %d:",
1597 /* Another CB kthread, link to previous GP kthread. */
1600 pr_cont(" %d", cpu);
1602 rdp->nocb_gp_rdp = rdp_gp;
1603 if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1604 list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
1606 if (gotnocbs && dump_tree)
1607 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1611 * Bind the current task to the offloaded CPUs. If there are no offloaded
1612 * CPUs, leave the task unbound. Splat if the bind attempt fails.
1614 void rcu_bind_current_to_nocb(void)
1616 if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
1617 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1619 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1621 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1623 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1625 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1627 #else // #ifdef CONFIG_SMP
1628 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1632 #endif // #else #ifdef CONFIG_SMP
1635 * Dump out nocb grace-period kthread state for the specified rcu_data
1638 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1640 struct rcu_node *rnp = rdp->mynode;
1642 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1644 "kK"[!!rdp->nocb_gp_kthread],
1645 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1646 "dD"[!!rdp->nocb_defer_wakeup],
1647 "tT"[timer_pending(&rdp->nocb_timer)],
1648 "sS"[!!rdp->nocb_gp_sleep],
1649 ".W"[swait_active(&rdp->nocb_gp_wq)],
1650 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1651 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1652 ".B"[!!rdp->nocb_gp_bypass],
1653 ".G"[!!rdp->nocb_gp_gp],
1654 (long)rdp->nocb_gp_seq,
1655 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1656 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1657 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1658 show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
1661 /* Dump out nocb kthread state for the specified rcu_data structure. */
1662 static void show_rcu_nocb_state(struct rcu_data *rdp)
1666 struct rcu_data *nocb_next_rdp;
1667 struct rcu_segcblist *rsclp = &rdp->cblist;
1671 if (rdp->nocb_gp_rdp == rdp)
1672 show_rcu_nocb_gp_state(rdp);
1674 nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1675 &rdp->nocb_entry_rdp,
1679 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1680 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1681 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1682 rdp->cpu, rdp->nocb_gp_rdp->cpu,
1683 nocb_next_rdp ? nocb_next_rdp->cpu : -1,
1684 "kK"[!!rdp->nocb_cb_kthread],
1685 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1686 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1687 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1688 "sS"[!!rdp->nocb_cb_sleep],
1689 ".W"[swait_active(&rdp->nocb_cb_wq)],
1690 jiffies - rdp->nocb_bypass_first,
1691 jiffies - rdp->nocb_nobypass_last,
1692 rdp->nocb_nobypass_count,
1693 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1694 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1695 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1696 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1697 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1698 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1699 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1700 rcu_segcblist_n_cbs(&rdp->cblist),
1701 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1702 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
1703 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1705 /* It is OK for GP kthreads to have GP state. */
1706 if (rdp->nocb_gp_rdp == rdp)
1709 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1710 wassleep = swait_active(&rdp->nocb_gp_wq);
1711 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1712 return; /* Nothing untoward. */
1714 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1716 "dD"[!!rdp->nocb_defer_wakeup],
1717 "sS"[!!rdp->nocb_gp_sleep],
1721 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1723 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1728 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1733 /* No ->nocb_lock to acquire. */
1734 static void rcu_nocb_lock(struct rcu_data *rdp)
1738 /* No ->nocb_lock to release. */
1739 static void rcu_nocb_unlock(struct rcu_data *rdp)
1743 /* No ->nocb_lock to release. */
1744 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1745 unsigned long flags)
1747 local_irq_restore(flags);
1750 /* Lockdep check that ->cblist may be safely accessed. */
1751 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1753 lockdep_assert_irqs_disabled();
1756 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1760 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1765 static void rcu_init_one_nocb(struct rcu_node *rnp)
1769 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
1774 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1775 unsigned long j, bool lazy)
1780 static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
1781 rcu_callback_t func, unsigned long flags, bool lazy)
1783 WARN_ON_ONCE(1); /* Should be dead code! */
1786 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1787 unsigned long flags)
1789 WARN_ON_ONCE(1); /* Should be dead code! */
1792 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1796 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1801 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1806 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1810 static void show_rcu_nocb_state(struct rcu_data *rdp)
1814 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */