1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
11 ////////////////////////////////////////////////////////////////////////
13 // Generic data structures.
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(void);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @rtp_n_lock_retries: Rough lock-contention statistic.
29 * @rtp_work: Work queue for invoking callbacks.
30 * @rtp_irq_work: IRQ work queue for deferred wakeups.
31 * @barrier_q_head: RCU callback for barrier operation.
32 * @cpu: CPU number corresponding to this entry.
33 * @rtpp: Pointer to the rcu_tasks structure.
35 struct rcu_tasks_percpu {
36 struct rcu_segcblist cblist;
37 raw_spinlock_t __private lock;
38 unsigned long rtp_jiffies;
39 unsigned long rtp_n_lock_retries;
40 struct work_struct rtp_work;
41 struct irq_work rtp_irq_work;
42 struct rcu_head barrier_q_head;
44 struct rcu_tasks *rtpp;
48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
49 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
50 * @cbs_gbl_lock: Lock protecting callback list.
51 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
52 * @gp_func: This flavor's grace-period-wait function.
53 * @gp_state: Grace period's most recent state transition (debugging).
54 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
55 * @init_fract: Initial backoff sleep interval.
56 * @gp_jiffies: Time of last @gp_state transition.
57 * @gp_start: Most recent grace-period start in jiffies.
58 * @tasks_gp_seq: Number of grace periods completed since boot.
59 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
60 * @n_ipis_fails: Number of IPI-send failures.
61 * @pregp_func: This flavor's pre-grace-period function (optional).
62 * @pertask_func: This flavor's per-task scan function (optional).
63 * @postscan_func: This flavor's post-task scan function (optional).
64 * @holdouts_func: This flavor's holdout-list scan function (optional).
65 * @postgp_func: This flavor's post-grace-period function (optional).
66 * @call_func: This flavor's call_rcu()-equivalent function.
67 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
68 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
69 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
70 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
71 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
72 * @barrier_q_mutex: Serialize barrier operations.
73 * @barrier_q_count: Number of queues being waited on.
74 * @barrier_q_completion: Barrier wait/wakeup mechanism.
75 * @barrier_q_seq: Sequence number for barrier operations.
76 * @name: This flavor's textual name.
77 * @kname: This flavor's kthread name.
80 struct wait_queue_head cbs_wq;
81 raw_spinlock_t cbs_gbl_lock;
85 unsigned long gp_jiffies;
86 unsigned long gp_start;
87 unsigned long tasks_gp_seq;
89 unsigned long n_ipis_fails;
90 struct task_struct *kthread_ptr;
91 rcu_tasks_gp_func_t gp_func;
92 pregp_func_t pregp_func;
93 pertask_func_t pertask_func;
94 postscan_func_t postscan_func;
95 holdouts_func_t holdouts_func;
96 postgp_func_t postgp_func;
97 call_rcu_func_t call_func;
98 struct rcu_tasks_percpu __percpu *rtpcpu;
99 int percpu_enqueue_shift;
100 int percpu_enqueue_lim;
101 int percpu_dequeue_lim;
102 unsigned long percpu_dequeue_gpseq;
103 struct mutex barrier_q_mutex;
104 atomic_t barrier_q_count;
105 struct completion barrier_q_completion;
106 unsigned long barrier_q_seq;
111 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
113 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
114 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
115 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
116 .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \
118 static struct rcu_tasks rt_name = \
120 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
121 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
124 .rtpcpu = &rt_name ## __percpu, \
126 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
127 .percpu_enqueue_lim = 1, \
128 .percpu_dequeue_lim = 1, \
129 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
130 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
134 /* Track exiting tasks in order to allow them to be waited for. */
135 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
137 /* Avoid IPIing CPUs early in the grace period. */
138 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
139 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
140 module_param(rcu_task_ipi_delay, int, 0644);
142 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
143 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
144 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
145 module_param(rcu_task_stall_timeout, int, 0644);
147 static int rcu_task_enqueue_lim __read_mostly = -1;
148 module_param(rcu_task_enqueue_lim, int, 0444);
150 static bool rcu_task_cb_adjust;
151 static int rcu_task_contend_lim __read_mostly = 100;
152 module_param(rcu_task_contend_lim, int, 0444);
153 static int rcu_task_collapse_lim __read_mostly = 10;
154 module_param(rcu_task_collapse_lim, int, 0444);
156 /* RCU tasks grace-period state for debugging. */
158 #define RTGS_WAIT_WAIT_CBS 1
159 #define RTGS_WAIT_GP 2
160 #define RTGS_PRE_WAIT_GP 3
161 #define RTGS_SCAN_TASKLIST 4
162 #define RTGS_POST_SCAN_TASKLIST 5
163 #define RTGS_WAIT_SCAN_HOLDOUTS 6
164 #define RTGS_SCAN_HOLDOUTS 7
165 #define RTGS_POST_GP 8
166 #define RTGS_WAIT_READERS 9
167 #define RTGS_INVOKE_CBS 10
168 #define RTGS_WAIT_CBS 11
169 #ifndef CONFIG_TINY_RCU
170 static const char * const rcu_tasks_gp_state_names[] = {
172 "RTGS_WAIT_WAIT_CBS",
175 "RTGS_SCAN_TASKLIST",
176 "RTGS_POST_SCAN_TASKLIST",
177 "RTGS_WAIT_SCAN_HOLDOUTS",
178 "RTGS_SCAN_HOLDOUTS",
184 #endif /* #ifndef CONFIG_TINY_RCU */
186 ////////////////////////////////////////////////////////////////////////
190 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
192 /* Record grace-period phase and time. */
193 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
195 rtp->gp_state = newstate;
196 rtp->gp_jiffies = jiffies;
199 #ifndef CONFIG_TINY_RCU
200 /* Return state name. */
201 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
203 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
204 int j = READ_ONCE(i); // Prevent the compiler from reading twice
206 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
208 return rcu_tasks_gp_state_names[j];
210 #endif /* #ifndef CONFIG_TINY_RCU */
212 // Initialize per-CPU callback lists for the specified flavor of
214 static void cblist_init_generic(struct rcu_tasks *rtp)
221 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
222 if (rcu_task_enqueue_lim < 0) {
223 rcu_task_enqueue_lim = 1;
224 rcu_task_cb_adjust = true;
225 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
226 } else if (rcu_task_enqueue_lim == 0) {
227 rcu_task_enqueue_lim = 1;
229 lim = rcu_task_enqueue_lim;
231 if (lim > nr_cpu_ids)
233 shift = ilog2(nr_cpu_ids / lim);
234 if (((nr_cpu_ids - 1) >> shift) >= lim)
236 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
237 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
238 smp_store_release(&rtp->percpu_enqueue_lim, lim);
239 for_each_possible_cpu(cpu) {
240 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
242 WARN_ON_ONCE(!rtpcp);
244 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
245 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
246 if (rcu_segcblist_empty(&rtpcp->cblist))
247 rcu_segcblist_init(&rtpcp->cblist);
248 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
251 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
253 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
254 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
257 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
258 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
260 struct rcu_tasks *rtp;
261 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
264 wake_up(&rtp->cbs_wq);
267 // Enqueue a callback for the specified flavor of Tasks RCU.
268 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
269 struct rcu_tasks *rtp)
273 bool needadjust = false;
275 struct rcu_tasks_percpu *rtpcp;
279 local_irq_save(flags);
281 rtpcp = per_cpu_ptr(rtp->rtpcpu,
282 smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift));
283 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
284 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
286 if (rtpcp->rtp_jiffies != j) {
287 rtpcp->rtp_jiffies = j;
288 rtpcp->rtp_n_lock_retries = 0;
290 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
291 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
292 needadjust = true; // Defer adjustment to avoid deadlock.
294 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
295 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
296 cblist_init_generic(rtp);
297 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
299 needwake = rcu_segcblist_empty(&rtpcp->cblist);
300 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
301 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
302 if (unlikely(needadjust)) {
303 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
304 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
305 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
306 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
307 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
308 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
310 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
313 /* We can't create the thread unless interrupts are enabled. */
314 if (needwake && READ_ONCE(rtp->kthread_ptr))
315 irq_work_queue(&rtpcp->rtp_irq_work);
318 // Wait for a grace period for the specified flavor of Tasks RCU.
319 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
321 /* Complain if the scheduler has not started. */
322 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
323 "synchronize_rcu_tasks called too soon");
325 /* Wait for the grace period. */
326 wait_rcu_gp(rtp->call_func);
329 // RCU callback function for rcu_barrier_tasks_generic().
330 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
332 struct rcu_tasks *rtp;
333 struct rcu_tasks_percpu *rtpcp;
335 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
337 if (atomic_dec_and_test(&rtp->barrier_q_count))
338 complete(&rtp->barrier_q_completion);
341 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
342 // Operates in a manner similar to rcu_barrier().
343 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
347 struct rcu_tasks_percpu *rtpcp;
348 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
350 mutex_lock(&rtp->barrier_q_mutex);
351 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
353 mutex_unlock(&rtp->barrier_q_mutex);
356 rcu_seq_start(&rtp->barrier_q_seq);
357 init_completion(&rtp->barrier_q_completion);
358 atomic_set(&rtp->barrier_q_count, 2);
359 for_each_possible_cpu(cpu) {
360 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
362 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
363 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
364 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
365 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
366 atomic_inc(&rtp->barrier_q_count);
367 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
369 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
370 complete(&rtp->barrier_q_completion);
371 wait_for_completion(&rtp->barrier_q_completion);
372 rcu_seq_end(&rtp->barrier_q_seq);
373 mutex_unlock(&rtp->barrier_q_mutex);
376 // Advance callbacks and indicate whether either a grace period or
377 // callback invocation is needed.
378 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
387 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
388 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
390 /* Advance and accelerate any new callbacks. */
391 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
393 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
394 // Should we shrink down to a single callback queue?
395 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
401 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
402 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
403 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
405 if (!rcu_segcblist_empty(&rtpcp->cblist))
407 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
410 // Shrink down to a single callback queue if appropriate.
411 // This is done in two stages: (1) If there are no more than
412 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
413 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
414 // if there has not been an increase in callbacks, limit dequeuing
415 // to CPU 0. Note the matching RCU read-side critical section in
416 // call_rcu_tasks_generic().
417 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
418 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
419 if (rtp->percpu_enqueue_lim > 1) {
420 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
421 smp_store_release(&rtp->percpu_enqueue_lim, 1);
422 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
423 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
425 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
427 if (rcu_task_cb_adjust && !ncbsnz &&
428 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
429 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
430 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
431 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
432 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
434 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
440 // Advance callbacks and invoke any that are ready.
441 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
447 struct rcu_head *rhp;
448 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
449 struct rcu_tasks_percpu *rtpcp_next;
452 cpunext = cpu * 2 + 1;
453 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
454 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
455 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
457 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
458 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
459 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
463 if (rcu_segcblist_empty(&rtpcp->cblist))
465 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
466 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
467 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
468 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
470 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
476 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
477 rcu_segcblist_add_len(&rtpcp->cblist, -len);
478 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
479 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
482 // Workqueue flood to advance callbacks and invoke any that are ready.
483 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
485 struct rcu_tasks *rtp;
486 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
489 rcu_tasks_invoke_cbs(rtp, rtpcp);
492 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
493 static int __noreturn rcu_tasks_kthread(void *arg)
496 struct rcu_tasks *rtp = arg;
498 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
499 housekeeping_affine(current, HK_TYPE_RCU);
500 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
503 * Each pass through the following loop makes one check for
504 * newly arrived callbacks, and, if there are some, waits for
505 * one RCU-tasks grace period and then invokes the callbacks.
506 * This loop is terminated by the system going down. ;-)
509 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
511 /* If there were none, wait a bit and start over. */
512 wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp)));
514 if (needgpcb & 0x2) {
515 // Wait for one grace period.
516 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
517 rtp->gp_start = jiffies;
518 rcu_seq_start(&rtp->tasks_gp_seq);
520 rcu_seq_end(&rtp->tasks_gp_seq);
523 /* Invoke callbacks. */
524 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
525 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
527 /* Paranoid sleep to keep this from entering a tight loop */
528 schedule_timeout_idle(rtp->gp_sleep);
532 /* Spawn RCU-tasks grace-period kthread. */
533 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
535 struct task_struct *t;
537 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
538 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
540 smp_mb(); /* Ensure others see full kthread. */
543 #ifndef CONFIG_TINY_RCU
546 * Print any non-default Tasks RCU settings.
548 static void __init rcu_tasks_bootup_oddness(void)
550 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
551 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
552 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
553 #endif /* #ifdef CONFIG_TASKS_RCU */
554 #ifdef CONFIG_TASKS_RCU
555 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
556 #endif /* #ifdef CONFIG_TASKS_RCU */
557 #ifdef CONFIG_TASKS_RUDE_RCU
558 pr_info("\tRude variant of Tasks RCU enabled.\n");
559 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
560 #ifdef CONFIG_TASKS_TRACE_RCU
561 pr_info("\tTracing variant of Tasks RCU enabled.\n");
562 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
565 #endif /* #ifndef CONFIG_TINY_RCU */
567 #ifndef CONFIG_TINY_RCU
568 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
569 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
571 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each...
572 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
574 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
575 jiffies - data_race(rtp->gp_jiffies),
576 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
577 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
578 ".k"[!!data_race(rtp->kthread_ptr)],
579 ".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))],
582 #endif // #ifndef CONFIG_TINY_RCU
584 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
586 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
588 ////////////////////////////////////////////////////////////////////////
590 // Shared code between task-list-scanning variants of Tasks RCU.
592 /* Wait for one RCU-tasks grace period. */
593 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
595 struct task_struct *g, *t;
596 unsigned long lastreport;
600 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
604 * There were callbacks, so we need to wait for an RCU-tasks
605 * grace period. Start off by scanning the task list for tasks
606 * that are not already voluntarily blocked. Mark these tasks
607 * and make a list of them in holdouts.
609 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
611 for_each_process_thread(g, t)
612 rtp->pertask_func(t, &holdouts);
615 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
616 rtp->postscan_func(&holdouts);
619 * Each pass through the following loop scans the list of holdout
620 * tasks, removing any that are no longer holdouts. When the list
621 * is empty, we are done.
623 lastreport = jiffies;
625 // Start off with initial wait and slowly back off to 1 HZ wait.
626 fract = rtp->init_fract;
628 while (!list_empty(&holdouts)) {
633 /* Slowly back off waiting for holdouts */
634 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
635 schedule_timeout_idle(fract);
640 rtst = READ_ONCE(rcu_task_stall_timeout);
641 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
643 lastreport = jiffies;
645 WARN_ON(signal_pending(current));
646 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
647 rtp->holdouts_func(&holdouts, needreport, &firstreport);
650 set_tasks_gp_state(rtp, RTGS_POST_GP);
651 rtp->postgp_func(rtp);
654 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
656 #ifdef CONFIG_TASKS_RCU
658 ////////////////////////////////////////////////////////////////////////
660 // Simple variant of RCU whose quiescent states are voluntary context
661 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
662 // As such, grace periods can take one good long time. There are no
663 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
664 // because this implementation is intended to get the system into a safe
665 // state for some of the manipulations involved in tracing and the like.
666 // Finally, this implementation does not support high call_rcu_tasks()
667 // rates from multiple CPUs. If this is required, per-CPU callback lists
670 // The implementation uses rcu_tasks_wait_gp(), which relies on function
671 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
672 // function sets these function pointers up so that rcu_tasks_wait_gp()
673 // invokes these functions in this order:
675 // rcu_tasks_pregp_step():
676 // Invokes synchronize_rcu() in order to wait for all in-flight
677 // t->on_rq and t->nvcsw transitions to complete. This works because
678 // all such transitions are carried out with interrupts disabled.
679 // rcu_tasks_pertask(), invoked on every non-idle task:
680 // For every runnable non-idle task other than the current one, use
681 // get_task_struct() to pin down that task, snapshot that task's
682 // number of voluntary context switches, and add that task to the
684 // rcu_tasks_postscan():
685 // Invoke synchronize_srcu() to ensure that all tasks that were
686 // in the process of exiting (and which thus might not know to
687 // synchronize with this RCU Tasks grace period) have completed
689 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
690 // Scans the holdout list, attempting to identify a quiescent state
691 // for each task on the list. If there is a quiescent state, the
692 // corresponding task is removed from the holdout list.
693 // rcu_tasks_postgp():
694 // Invokes synchronize_rcu() in order to ensure that all prior
695 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
696 // to have happened before the end of this RCU Tasks grace period.
697 // Again, this works because all such transitions are carried out
698 // with interrupts disabled.
700 // For each exiting task, the exit_tasks_rcu_start() and
701 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
702 // read-side critical sections waited for by rcu_tasks_postscan().
704 // Pre-grace-period update-side code is ordered before the grace
705 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
706 // is ordered before the grace period via synchronize_rcu() call in
707 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
710 /* Pre-grace-period preparation. */
711 static void rcu_tasks_pregp_step(void)
714 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
715 * to complete. Invoking synchronize_rcu() suffices because all
716 * these transitions occur with interrupts disabled. Without this
717 * synchronize_rcu(), a read-side critical section that started
718 * before the grace period might be incorrectly seen as having
719 * started after the grace period.
721 * This synchronize_rcu() also dispenses with the need for a
722 * memory barrier on the first store to t->rcu_tasks_holdout,
723 * as it forces the store to happen after the beginning of the
729 /* Per-task initial processing. */
730 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
732 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
734 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
735 WRITE_ONCE(t->rcu_tasks_holdout, true);
736 list_add(&t->rcu_tasks_holdout_list, hop);
740 /* Processing between scanning taskslist and draining the holdout list. */
741 static void rcu_tasks_postscan(struct list_head *hop)
744 * Wait for tasks that are in the process of exiting. This
745 * does only part of the job, ensuring that all tasks that were
746 * previously exiting reach the point where they have disabled
747 * preemption, allowing the later synchronize_rcu() to finish
750 synchronize_srcu(&tasks_rcu_exit_srcu);
753 /* See if tasks are still holding out, complain if so. */
754 static void check_holdout_task(struct task_struct *t,
755 bool needreport, bool *firstreport)
759 if (!READ_ONCE(t->rcu_tasks_holdout) ||
760 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
761 !READ_ONCE(t->on_rq) ||
762 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
763 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
764 WRITE_ONCE(t->rcu_tasks_holdout, false);
765 list_del_init(&t->rcu_tasks_holdout_list);
769 rcu_request_urgent_qs_task(t);
773 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
774 *firstreport = false;
777 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
778 t, ".I"[is_idle_task(t)],
779 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
780 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
781 t->rcu_tasks_idle_cpu, cpu);
785 /* Scan the holdout lists for tasks no longer holding out. */
786 static void check_all_holdout_tasks(struct list_head *hop,
787 bool needreport, bool *firstreport)
789 struct task_struct *t, *t1;
791 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
792 check_holdout_task(t, needreport, firstreport);
797 /* Finish off the Tasks-RCU grace period. */
798 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
801 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
802 * memory barriers prior to them in the schedule() path, memory
803 * reordering on other CPUs could cause their RCU-tasks read-side
804 * critical sections to extend past the end of the grace period.
805 * However, because these ->nvcsw updates are carried out with
806 * interrupts disabled, we can use synchronize_rcu() to force the
807 * needed ordering on all such CPUs.
809 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
810 * accesses to be within the grace period, avoiding the need for
811 * memory barriers for ->rcu_tasks_holdout accesses.
813 * In addition, this synchronize_rcu() waits for exiting tasks
814 * to complete their final preempt_disable() region of execution,
815 * cleaning up after the synchronize_srcu() above.
820 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
821 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
824 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
825 * @rhp: structure to be used for queueing the RCU updates.
826 * @func: actual callback function to be invoked after the grace period
828 * The callback function will be invoked some time after a full grace
829 * period elapses, in other words after all currently executing RCU
830 * read-side critical sections have completed. call_rcu_tasks() assumes
831 * that the read-side critical sections end at a voluntary context
832 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
833 * or transition to usermode execution. As such, there are no read-side
834 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
835 * this primitive is intended to determine that all tasks have passed
836 * through a safe state, not so much for data-structure synchronization.
838 * See the description of call_rcu() for more detailed information on
839 * memory ordering guarantees.
841 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
843 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
845 EXPORT_SYMBOL_GPL(call_rcu_tasks);
848 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
850 * Control will return to the caller some time after a full rcu-tasks
851 * grace period has elapsed, in other words after all currently
852 * executing rcu-tasks read-side critical sections have elapsed. These
853 * read-side critical sections are delimited by calls to schedule(),
854 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
855 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
857 * This is a very specialized primitive, intended only for a few uses in
858 * tracing and other situations requiring manipulation of function
859 * preambles and profiling hooks. The synchronize_rcu_tasks() function
860 * is not (yet) intended for heavy use from multiple CPUs.
862 * See the description of synchronize_rcu() for more detailed information
863 * on memory ordering guarantees.
865 void synchronize_rcu_tasks(void)
867 synchronize_rcu_tasks_generic(&rcu_tasks);
869 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
872 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
874 * Although the current implementation is guaranteed to wait, it is not
875 * obligated to, for example, if there are no pending callbacks.
877 void rcu_barrier_tasks(void)
879 rcu_barrier_tasks_generic(&rcu_tasks);
881 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
883 static int __init rcu_spawn_tasks_kthread(void)
885 cblist_init_generic(&rcu_tasks);
886 rcu_tasks.gp_sleep = HZ / 10;
887 rcu_tasks.init_fract = HZ / 10;
888 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
889 rcu_tasks.pertask_func = rcu_tasks_pertask;
890 rcu_tasks.postscan_func = rcu_tasks_postscan;
891 rcu_tasks.holdouts_func = check_all_holdout_tasks;
892 rcu_tasks.postgp_func = rcu_tasks_postgp;
893 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
897 #if !defined(CONFIG_TINY_RCU)
898 void show_rcu_tasks_classic_gp_kthread(void)
900 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
902 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
903 #endif // !defined(CONFIG_TINY_RCU)
905 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
906 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
909 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
913 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
914 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
916 struct task_struct *t = current;
919 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
921 exit_tasks_rcu_finish_trace(t);
924 #else /* #ifdef CONFIG_TASKS_RCU */
925 void exit_tasks_rcu_start(void) { }
926 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
927 #endif /* #else #ifdef CONFIG_TASKS_RCU */
929 #ifdef CONFIG_TASKS_RUDE_RCU
931 ////////////////////////////////////////////////////////////////////////
933 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
934 // passing an empty function to schedule_on_each_cpu(). This approach
935 // provides an asynchronous call_rcu_tasks_rude() API and batching of
936 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
937 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
938 // and induces otherwise unnecessary context switches on all online CPUs,
939 // whether idle or not.
941 // Callback handling is provided by the rcu_tasks_kthread() function.
943 // Ordering is provided by the scheduler's context-switch code.
945 // Empty function to allow workqueues to force a context switch.
946 static void rcu_tasks_be_rude(struct work_struct *work)
950 // Wait for one rude RCU-tasks grace period.
951 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
953 rtp->n_ipis += cpumask_weight(cpu_online_mask);
954 schedule_on_each_cpu(rcu_tasks_be_rude);
957 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
958 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
962 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
963 * @rhp: structure to be used for queueing the RCU updates.
964 * @func: actual callback function to be invoked after the grace period
966 * The callback function will be invoked some time after a full grace
967 * period elapses, in other words after all currently executing RCU
968 * read-side critical sections have completed. call_rcu_tasks_rude()
969 * assumes that the read-side critical sections end at context switch,
970 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
971 * usermode execution is schedulable). As such, there are no read-side
972 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
973 * this primitive is intended to determine that all tasks have passed
974 * through a safe state, not so much for data-structure synchronization.
976 * See the description of call_rcu() for more detailed information on
977 * memory ordering guarantees.
979 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
981 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
983 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
986 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
988 * Control will return to the caller some time after a rude rcu-tasks
989 * grace period has elapsed, in other words after all currently
990 * executing rcu-tasks read-side critical sections have elapsed. These
991 * read-side critical sections are delimited by calls to schedule(),
992 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
993 * context), and (in theory, anyway) cond_resched().
995 * This is a very specialized primitive, intended only for a few uses in
996 * tracing and other situations requiring manipulation of function preambles
997 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
998 * (yet) intended for heavy use from multiple CPUs.
1000 * See the description of synchronize_rcu() for more detailed information
1001 * on memory ordering guarantees.
1003 void synchronize_rcu_tasks_rude(void)
1005 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1007 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1010 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1012 * Although the current implementation is guaranteed to wait, it is not
1013 * obligated to, for example, if there are no pending callbacks.
1015 void rcu_barrier_tasks_rude(void)
1017 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1019 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1021 static int __init rcu_spawn_tasks_rude_kthread(void)
1023 cblist_init_generic(&rcu_tasks_rude);
1024 rcu_tasks_rude.gp_sleep = HZ / 10;
1025 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1029 #if !defined(CONFIG_TINY_RCU)
1030 void show_rcu_tasks_rude_gp_kthread(void)
1032 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1034 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1035 #endif // !defined(CONFIG_TINY_RCU)
1036 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1038 ////////////////////////////////////////////////////////////////////////
1040 // Tracing variant of Tasks RCU. This variant is designed to be used
1041 // to protect tracing hooks, including those of BPF. This variant
1044 // 1. Has explicit read-side markers to allow finite grace periods
1045 // in the face of in-kernel loops for PREEMPT=n builds.
1047 // 2. Protects code in the idle loop, exception entry/exit, and
1048 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1050 // 3. Avoids expensive read-side instructions, having overhead similar
1051 // to that of Preemptible RCU.
1053 // There are of course downsides. The grace-period code can send IPIs to
1054 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
1055 // It is necessary to scan the full tasklist, much as for Tasks RCU. There
1056 // is a single callback queue guarded by a single lock, again, much as for
1057 // Tasks RCU. If needed, these downsides can be at least partially remedied.
1059 // Perhaps most important, this variant of RCU does not affect the vanilla
1060 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1061 // readers can operate from idle, offline, and exception entry/exit in no
1062 // way allows rcu_preempt and rcu_sched readers to also do so.
1064 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1065 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1066 // function sets these function pointers up so that rcu_tasks_wait_gp()
1067 // invokes these functions in this order:
1069 // rcu_tasks_trace_pregp_step():
1070 // Initialize the count of readers and block CPU-hotplug operations.
1071 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
1072 // Initialize per-task state and attempt to identify an immediate
1073 // quiescent state for that task, or, failing that, attempt to
1074 // set that task's .need_qs flag so that task's next outermost
1075 // rcu_read_unlock_trace() will report the quiescent state (in which
1076 // case the count of readers is incremented). If both attempts fail,
1077 // the task is added to a "holdout" list. Note that IPIs are used
1078 // to invoke trc_read_check_handler() in the context of running tasks
1079 // in order to avoid ordering overhead on common-case shared-variable
1081 // rcu_tasks_trace_postscan():
1082 // Initialize state and attempt to identify an immediate quiescent
1083 // state as above (but only for idle tasks), unblock CPU-hotplug
1084 // operations, and wait for an RCU grace period to avoid races with
1085 // tasks that are in the process of exiting.
1086 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1087 // Scans the holdout list, attempting to identify a quiescent state
1088 // for each task on the list. If there is a quiescent state, the
1089 // corresponding task is removed from the holdout list.
1090 // rcu_tasks_trace_postgp():
1091 // Wait for the count of readers do drop to zero, reporting any stalls.
1092 // Also execute full memory barriers to maintain ordering with code
1093 // executing after the grace period.
1095 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1097 // Pre-grace-period update-side code is ordered before the grace
1098 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
1099 // Pre-grace-period read-side code is ordered before the grace period by
1100 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
1101 // scheduler context-switch ordering (for locked-down non-running readers).
1103 // The lockdep state must be outside of #ifdef to be useful.
1104 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1105 static struct lock_class_key rcu_lock_trace_key;
1106 struct lockdep_map rcu_trace_lock_map =
1107 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1108 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1109 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1111 #ifdef CONFIG_TASKS_TRACE_RCU
1113 static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
1114 static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
1116 // Record outstanding IPIs to each CPU. No point in sending two...
1117 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1119 // The number of detections of task quiescent state relying on
1120 // heavyweight readers executing explicit memory barriers.
1121 static unsigned long n_heavy_reader_attempts;
1122 static unsigned long n_heavy_reader_updates;
1123 static unsigned long n_heavy_reader_ofl_updates;
1125 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1126 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1130 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
1131 * while the scheduler locks are held.
1133 static void rcu_read_unlock_iw(struct irq_work *iwp)
1137 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
1139 /* If we are the last reader, wake up the grace-period kthread. */
1140 void rcu_read_unlock_trace_special(struct task_struct *t)
1142 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
1144 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
1145 t->trc_reader_special.b.need_mb)
1146 smp_mb(); // Pairs with update-side barriers.
1147 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1149 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1150 WRITE_ONCE(t->trc_reader_nesting, 0);
1151 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
1152 irq_work_queue(&rcu_tasks_trace_iw);
1154 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1156 /* Add a task to the holdout list, if it is not already on the list. */
1157 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1159 if (list_empty(&t->trc_holdout_list)) {
1161 list_add(&t->trc_holdout_list, bhp);
1165 /* Remove a task from the holdout list, if it is in fact present. */
1166 static void trc_del_holdout(struct task_struct *t)
1168 if (!list_empty(&t->trc_holdout_list)) {
1169 list_del_init(&t->trc_holdout_list);
1174 /* IPI handler to check task state. */
1175 static void trc_read_check_handler(void *t_in)
1177 struct task_struct *t = current;
1178 struct task_struct *texp = t_in;
1180 // If the task is no longer running on this CPU, leave.
1181 if (unlikely(texp != t)) {
1182 goto reset_ipi; // Already on holdout list, so will check later.
1185 // If the task is not in a read-side critical section, and
1186 // if this is the last reader, awaken the grace-period kthread.
1187 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
1188 WRITE_ONCE(t->trc_reader_checked, true);
1191 // If we are racing with an rcu_read_unlock_trace(), try again later.
1192 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
1194 WRITE_ONCE(t->trc_reader_checked, true);
1196 // Get here if the task is in a read-side critical section. Set
1197 // its state so that it will awaken the grace-period kthread upon
1198 // exit from that critical section.
1199 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1200 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1201 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1204 // Allow future IPIs to be sent on CPU and for task.
1205 // Also order this IPI handler against any later manipulations of
1206 // the intended task.
1207 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1208 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1211 /* Callback function for scheduler to check locked-down task. */
1212 static int trc_inspect_reader(struct task_struct *t, void *arg)
1214 int cpu = task_cpu(t);
1216 bool ofl = cpu_is_offline(cpu);
1219 WARN_ON_ONCE(ofl && !is_idle_task(t));
1221 // If no chance of heavyweight readers, do it the hard way.
1222 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1225 // If heavyweight readers are enabled on the remote task,
1226 // we can inspect its state despite its currently running.
1227 // However, we cannot safely change its state.
1228 n_heavy_reader_attempts++;
1229 if (!ofl && // Check for "running" idle tasks on offline CPUs.
1230 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1231 return -EINVAL; // No quiescent state, do it the hard way.
1232 n_heavy_reader_updates++;
1234 n_heavy_reader_ofl_updates++;
1237 // The task is not running, so C-language access is safe.
1238 nesting = t->trc_reader_nesting;
1241 // If not exiting a read-side critical section, mark as checked
1242 // so that the grace-period kthread will remove it from the
1244 t->trc_reader_checked = nesting >= 0;
1246 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later.
1248 // The task is in a read-side critical section, so set up its
1249 // state so that it will awaken the grace-period kthread upon exit
1250 // from that critical section.
1251 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
1252 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1253 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1257 /* Attempt to extract the state for the specified task. */
1258 static void trc_wait_for_one_reader(struct task_struct *t,
1259 struct list_head *bhp)
1263 // If a previous IPI is still in flight, let it complete.
1264 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1267 // The current task had better be in a quiescent state.
1269 t->trc_reader_checked = true;
1270 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1274 // Attempt to nail down the task for inspection.
1276 if (!task_call_func(t, trc_inspect_reader, NULL)) {
1282 // If this task is not yet on the holdout list, then we are in
1283 // an RCU read-side critical section. Otherwise, the invocation of
1284 // trc_add_holdout() that added it to the list did the necessary
1285 // get_task_struct(). Either way, the task cannot be freed out
1286 // from under this code.
1288 // If currently running, send an IPI, either way, add to list.
1289 trc_add_holdout(t, bhp);
1291 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1292 // The task is currently running, so try IPIing it.
1295 // If there is already an IPI outstanding, let it happen.
1296 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1299 per_cpu(trc_ipi_to_cpu, cpu) = true;
1300 t->trc_ipi_to_cpu = cpu;
1301 rcu_tasks_trace.n_ipis++;
1302 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1303 // Just in case there is some other reason for
1304 // failure than the target CPU being offline.
1305 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1307 rcu_tasks_trace.n_ipis_fails++;
1308 per_cpu(trc_ipi_to_cpu, cpu) = false;
1309 t->trc_ipi_to_cpu = -1;
1314 /* Initialize for a new RCU-tasks-trace grace period. */
1315 static void rcu_tasks_trace_pregp_step(void)
1319 // Allow for fast-acting IPIs.
1320 atomic_set(&trc_n_readers_need_end, 1);
1322 // There shouldn't be any old IPIs, but...
1323 for_each_possible_cpu(cpu)
1324 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1326 // Disable CPU hotplug across the tasklist scan.
1327 // This also waits for all readers in CPU-hotplug code paths.
1331 /* Do first-round processing for the specified task. */
1332 static void rcu_tasks_trace_pertask(struct task_struct *t,
1333 struct list_head *hop)
1335 // During early boot when there is only the one boot CPU, there
1336 // is no idle task for the other CPUs. Just return.
1337 if (unlikely(t == NULL))
1340 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1341 WRITE_ONCE(t->trc_reader_checked, false);
1342 t->trc_ipi_to_cpu = -1;
1343 trc_wait_for_one_reader(t, hop);
1347 * Do intermediate processing between task and holdout scans and
1348 * pick up the idle tasks.
1350 static void rcu_tasks_trace_postscan(struct list_head *hop)
1354 for_each_possible_cpu(cpu)
1355 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1357 // Re-enable CPU hotplug now that the tasklist scan has completed.
1360 // Wait for late-stage exiting tasks to finish exiting.
1361 // These might have passed the call to exit_tasks_rcu_finish().
1363 // Any tasks that exit after this point will set ->trc_reader_checked.
1366 /* Communicate task state back to the RCU tasks trace stall warning request. */
1367 struct trc_stall_chk_rdr {
1373 static int trc_check_slow_task(struct task_struct *t, void *arg)
1375 struct trc_stall_chk_rdr *trc_rdrp = arg;
1378 return false; // It is running, so decline to inspect it.
1379 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1380 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1381 trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs);
1385 /* Show the state of a task stalling the current RCU tasks trace GP. */
1386 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1389 struct trc_stall_chk_rdr trc_rdr;
1390 bool is_idle_tsk = is_idle_task(t);
1393 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1394 *firstreport = false;
1397 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1398 pr_alert("P%d: %c\n",
1402 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1404 ".I"[trc_rdr.ipi_to_cpu >= 0],
1406 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1408 " N"[!!trc_rdr.needqs],
1413 /* List stalled IPIs for RCU tasks trace. */
1414 static void show_stalled_ipi_trace(void)
1418 for_each_possible_cpu(cpu)
1419 if (per_cpu(trc_ipi_to_cpu, cpu))
1420 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1423 /* Do one scan of the holdout list. */
1424 static void check_all_holdout_tasks_trace(struct list_head *hop,
1425 bool needreport, bool *firstreport)
1427 struct task_struct *g, *t;
1429 // Disable CPU hotplug across the holdout list scan.
1432 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1433 // If safe and needed, try to check the current task.
1434 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1435 !READ_ONCE(t->trc_reader_checked))
1436 trc_wait_for_one_reader(t, hop);
1438 // If check succeeded, remove this task from the list.
1439 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1440 READ_ONCE(t->trc_reader_checked))
1442 else if (needreport)
1443 show_stalled_task_trace(t, firstreport);
1446 // Re-enable CPU hotplug now that the holdout list scan has completed.
1451 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1452 show_stalled_ipi_trace();
1456 static void rcu_tasks_trace_empty_fn(void *unused)
1460 /* Wait for grace period to complete and provide ordering. */
1461 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1465 struct task_struct *g, *t;
1466 LIST_HEAD(holdouts);
1469 // Wait for any lingering IPI handlers to complete. Note that
1470 // if a CPU has gone offline or transitioned to userspace in the
1471 // meantime, all IPI handlers should have been drained beforehand.
1472 // Yes, this assumes that CPUs process IPIs in order. If that ever
1473 // changes, there will need to be a recheck and/or timed wait.
1474 for_each_online_cpu(cpu)
1475 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1476 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1478 // Remove the safety count.
1479 smp_mb__before_atomic(); // Order vs. earlier atomics
1480 atomic_dec(&trc_n_readers_need_end);
1481 smp_mb__after_atomic(); // Order vs. later atomics
1483 // Wait for readers.
1484 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1486 ret = wait_event_idle_exclusive_timeout(
1488 atomic_read(&trc_n_readers_need_end) == 0,
1489 READ_ONCE(rcu_task_stall_timeout));
1491 break; // Count reached zero.
1492 // Stall warning time, so make a list of the offenders.
1494 for_each_process_thread(g, t)
1495 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1496 trc_add_holdout(t, &holdouts);
1499 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1500 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1501 show_stalled_task_trace(t, &firstreport);
1502 trc_del_holdout(t); // Release task_struct reference.
1505 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1506 show_stalled_ipi_trace();
1507 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1509 smp_mb(); // Caller's code must be ordered after wakeup.
1510 // Pairs with pretty much every ordering primitive.
1513 /* Report any needed quiescent state for this exiting task. */
1514 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1516 WRITE_ONCE(t->trc_reader_checked, true);
1517 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1518 WRITE_ONCE(t->trc_reader_nesting, 0);
1519 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1520 rcu_read_unlock_trace_special(t);
1524 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1525 * @rhp: structure to be used for queueing the RCU updates.
1526 * @func: actual callback function to be invoked after the grace period
1528 * The callback function will be invoked some time after a trace rcu-tasks
1529 * grace period elapses, in other words after all currently executing
1530 * trace rcu-tasks read-side critical sections have completed. These
1531 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1532 * and rcu_read_unlock_trace().
1534 * See the description of call_rcu() for more detailed information on
1535 * memory ordering guarantees.
1537 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1539 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1541 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1544 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1546 * Control will return to the caller some time after a trace rcu-tasks
1547 * grace period has elapsed, in other words after all currently executing
1548 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1549 * critical sections are delimited by calls to rcu_read_lock_trace()
1550 * and rcu_read_unlock_trace().
1552 * This is a very specialized primitive, intended only for a few uses in
1553 * tracing and other situations requiring manipulation of function preambles
1554 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1555 * (yet) intended for heavy use from multiple CPUs.
1557 * See the description of synchronize_rcu() for more detailed information
1558 * on memory ordering guarantees.
1560 void synchronize_rcu_tasks_trace(void)
1562 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1563 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1565 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1568 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1570 * Although the current implementation is guaranteed to wait, it is not
1571 * obligated to, for example, if there are no pending callbacks.
1573 void rcu_barrier_tasks_trace(void)
1575 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1577 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1579 static int __init rcu_spawn_tasks_trace_kthread(void)
1581 cblist_init_generic(&rcu_tasks_trace);
1582 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1583 rcu_tasks_trace.gp_sleep = HZ / 10;
1584 rcu_tasks_trace.init_fract = HZ / 10;
1586 rcu_tasks_trace.gp_sleep = HZ / 200;
1587 if (rcu_tasks_trace.gp_sleep <= 0)
1588 rcu_tasks_trace.gp_sleep = 1;
1589 rcu_tasks_trace.init_fract = HZ / 200;
1590 if (rcu_tasks_trace.init_fract <= 0)
1591 rcu_tasks_trace.init_fract = 1;
1593 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1594 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1595 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1596 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1597 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1598 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1602 #if !defined(CONFIG_TINY_RCU)
1603 void show_rcu_tasks_trace_gp_kthread(void)
1607 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1608 data_race(n_heavy_reader_ofl_updates),
1609 data_race(n_heavy_reader_updates),
1610 data_race(n_heavy_reader_attempts));
1611 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1613 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1614 #endif // !defined(CONFIG_TINY_RCU)
1616 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1617 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1618 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1620 #ifndef CONFIG_TINY_RCU
1621 void show_rcu_tasks_gp_kthreads(void)
1623 show_rcu_tasks_classic_gp_kthread();
1624 show_rcu_tasks_rude_gp_kthread();
1625 show_rcu_tasks_trace_gp_kthread();
1627 #endif /* #ifndef CONFIG_TINY_RCU */
1629 #ifdef CONFIG_PROVE_RCU
1630 struct rcu_tasks_test_desc {
1636 static struct rcu_tasks_test_desc tests[] = {
1638 .name = "call_rcu_tasks()",
1639 /* If not defined, the test is skipped. */
1640 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1643 .name = "call_rcu_tasks_rude()",
1644 /* If not defined, the test is skipped. */
1645 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1648 .name = "call_rcu_tasks_trace()",
1649 /* If not defined, the test is skipped. */
1650 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1654 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1656 struct rcu_tasks_test_desc *rttd =
1657 container_of(rhp, struct rcu_tasks_test_desc, rh);
1659 pr_info("Callback from %s invoked.\n", rttd->name);
1661 rttd->notrun = true;
1664 static void rcu_tasks_initiate_self_tests(void)
1666 pr_info("Running RCU-tasks wait API self tests\n");
1667 #ifdef CONFIG_TASKS_RCU
1668 synchronize_rcu_tasks();
1669 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1672 #ifdef CONFIG_TASKS_RUDE_RCU
1673 synchronize_rcu_tasks_rude();
1674 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1677 #ifdef CONFIG_TASKS_TRACE_RCU
1678 synchronize_rcu_tasks_trace();
1679 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1683 static int rcu_tasks_verify_self_tests(void)
1688 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1689 if (!tests[i].notrun) { // still hanging.
1690 pr_err("%s has been failed.\n", tests[i].name);
1700 late_initcall(rcu_tasks_verify_self_tests);
1701 #else /* #ifdef CONFIG_PROVE_RCU */
1702 static void rcu_tasks_initiate_self_tests(void) { }
1703 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1705 void __init rcu_init_tasks_generic(void)
1707 #ifdef CONFIG_TASKS_RCU
1708 rcu_spawn_tasks_kthread();
1711 #ifdef CONFIG_TASKS_RUDE_RCU
1712 rcu_spawn_tasks_rude_kthread();
1715 #ifdef CONFIG_TASKS_TRACE_RCU
1716 rcu_spawn_tasks_trace_kthread();
1719 // Run the self-tests.
1720 rcu_tasks_initiate_self_tests();
1723 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1724 static inline void rcu_tasks_bootup_oddness(void) {}
1725 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */