]> Git Repo - linux.git/blob - kernel/rcu/rcutorture.c
KVM: x86: fix CPUID entries returned by KVM_GET_CPUID2 ioctl
[linux.git] / kernel / rcu / rcutorture.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <[email protected]>
8  *        Josh Triplett <[email protected]>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12
13 #define pr_fmt(fmt) fmt
14
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49
50 #include "rcu.h"
51
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <[email protected]> and Josh Triplett <[email protected]>");
54
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT     8      /* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK      ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH        0x01   /* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ       0x02   /*  ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT   0x04   /*  ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH       0x08   /*  ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED     0x10   /*  ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU       0x20   /*  ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS     6      /* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND    \
66         (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67          RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7    /* Maximum reader extensions. */
69                                         /* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71
72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73               "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration, 0,
75               "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff, 60,
81               "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched, 1,
83               "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal, false,
87              "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
89 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
90 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
91 torture_param(int, n_barrier_cbs, 0,
92              "# of callbacks/kthreads for barrier testing");
93 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
94 torture_param(int, nreaders, -1, "Number of RCU reader threads");
95 torture_param(int, object_debug, 0,
96              "Enable debug-object double call_rcu() testing");
97 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
98 torture_param(int, onoff_interval, 0,
99              "Time between CPU hotplugs (jiffies), 0=disable");
100 torture_param(int, read_exit_delay, 13,
101               "Delay between read-then-exit episodes (s)");
102 torture_param(int, read_exit_burst, 16,
103               "# of read-then-exit bursts per episode, zero to disable");
104 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
105 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
106 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
107 torture_param(int, stall_cpu_holdoff, 10,
108              "Time to wait before starting stall (s).");
109 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
110 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
111 torture_param(int, stall_gp_kthread, 0,
112               "Grace-period kthread stall duration (s).");
113 torture_param(int, stat_interval, 60,
114              "Number of seconds between stats printk()s");
115 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
116 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
117 torture_param(int, test_boost_duration, 4,
118              "Duration of each boost test, seconds.");
119 torture_param(int, test_boost_interval, 7,
120              "Interval between boost tests, seconds.");
121 torture_param(bool, test_no_idle_hz, true,
122              "Test support for tickless idle CPUs");
123 torture_param(int, verbose, 1,
124              "Enable verbose debugging printk()s");
125
126 static char *torture_type = "rcu";
127 module_param(torture_type, charp, 0444);
128 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
129
130 static int nrealreaders;
131 static struct task_struct *writer_task;
132 static struct task_struct **fakewriter_tasks;
133 static struct task_struct **reader_tasks;
134 static struct task_struct *stats_task;
135 static struct task_struct *fqs_task;
136 static struct task_struct *boost_tasks[NR_CPUS];
137 static struct task_struct *stall_task;
138 static struct task_struct *fwd_prog_task;
139 static struct task_struct **barrier_cbs_tasks;
140 static struct task_struct *barrier_task;
141 static struct task_struct *read_exit_task;
142
143 #define RCU_TORTURE_PIPE_LEN 10
144
145 struct rcu_torture {
146         struct rcu_head rtort_rcu;
147         int rtort_pipe_count;
148         struct list_head rtort_free;
149         int rtort_mbtest;
150 };
151
152 static LIST_HEAD(rcu_torture_freelist);
153 static struct rcu_torture __rcu *rcu_torture_current;
154 static unsigned long rcu_torture_current_version;
155 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
156 static DEFINE_SPINLOCK(rcu_torture_lock);
157 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
158 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
159 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
160 static atomic_t n_rcu_torture_alloc;
161 static atomic_t n_rcu_torture_alloc_fail;
162 static atomic_t n_rcu_torture_free;
163 static atomic_t n_rcu_torture_mberror;
164 static atomic_t n_rcu_torture_error;
165 static long n_rcu_torture_barrier_error;
166 static long n_rcu_torture_boost_ktrerror;
167 static long n_rcu_torture_boost_rterror;
168 static long n_rcu_torture_boost_failure;
169 static long n_rcu_torture_boosts;
170 static atomic_long_t n_rcu_torture_timers;
171 static long n_barrier_attempts;
172 static long n_barrier_successes; /* did rcu_barrier test succeed? */
173 static unsigned long n_read_exits;
174 static struct list_head rcu_torture_removed;
175 static unsigned long shutdown_jiffies;
176 static unsigned long start_gp_seq;
177
178 static int rcu_torture_writer_state;
179 #define RTWS_FIXED_DELAY        0
180 #define RTWS_DELAY              1
181 #define RTWS_REPLACE            2
182 #define RTWS_DEF_FREE           3
183 #define RTWS_EXP_SYNC           4
184 #define RTWS_COND_GET           5
185 #define RTWS_COND_SYNC          6
186 #define RTWS_SYNC               7
187 #define RTWS_STUTTER            8
188 #define RTWS_STOPPING           9
189 static const char * const rcu_torture_writer_state_names[] = {
190         "RTWS_FIXED_DELAY",
191         "RTWS_DELAY",
192         "RTWS_REPLACE",
193         "RTWS_DEF_FREE",
194         "RTWS_EXP_SYNC",
195         "RTWS_COND_GET",
196         "RTWS_COND_SYNC",
197         "RTWS_SYNC",
198         "RTWS_STUTTER",
199         "RTWS_STOPPING",
200 };
201
202 /* Record reader segment types and duration for first failing read. */
203 struct rt_read_seg {
204         int rt_readstate;
205         unsigned long rt_delay_jiffies;
206         unsigned long rt_delay_ms;
207         unsigned long rt_delay_us;
208         bool rt_preempted;
209 };
210 static int err_segs_recorded;
211 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
212 static int rt_read_nsegs;
213
214 static const char *rcu_torture_writer_state_getname(void)
215 {
216         unsigned int i = READ_ONCE(rcu_torture_writer_state);
217
218         if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
219                 return "???";
220         return rcu_torture_writer_state_names[i];
221 }
222
223 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
224 #define rcu_can_boost() 1
225 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
226 #define rcu_can_boost() 0
227 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
228
229 #ifdef CONFIG_RCU_TRACE
230 static u64 notrace rcu_trace_clock_local(void)
231 {
232         u64 ts = trace_clock_local();
233
234         (void)do_div(ts, NSEC_PER_USEC);
235         return ts;
236 }
237 #else /* #ifdef CONFIG_RCU_TRACE */
238 static u64 notrace rcu_trace_clock_local(void)
239 {
240         return 0ULL;
241 }
242 #endif /* #else #ifdef CONFIG_RCU_TRACE */
243
244 /*
245  * Stop aggressive CPU-hog tests a bit before the end of the test in order
246  * to avoid interfering with test shutdown.
247  */
248 static bool shutdown_time_arrived(void)
249 {
250         return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
251 }
252
253 static unsigned long boost_starttime;   /* jiffies of next boost test start. */
254 static DEFINE_MUTEX(boost_mutex);       /* protect setting boost_starttime */
255                                         /*  and boost task create/destroy. */
256 static atomic_t barrier_cbs_count;      /* Barrier callbacks registered. */
257 static bool barrier_phase;              /* Test phase. */
258 static atomic_t barrier_cbs_invoked;    /* Barrier callbacks invoked. */
259 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
260 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
261
262 static bool rcu_fwd_cb_nodelay;         /* Short rcu_torture_delay() delays. */
263
264 /*
265  * Allocate an element from the rcu_tortures pool.
266  */
267 static struct rcu_torture *
268 rcu_torture_alloc(void)
269 {
270         struct list_head *p;
271
272         spin_lock_bh(&rcu_torture_lock);
273         if (list_empty(&rcu_torture_freelist)) {
274                 atomic_inc(&n_rcu_torture_alloc_fail);
275                 spin_unlock_bh(&rcu_torture_lock);
276                 return NULL;
277         }
278         atomic_inc(&n_rcu_torture_alloc);
279         p = rcu_torture_freelist.next;
280         list_del_init(p);
281         spin_unlock_bh(&rcu_torture_lock);
282         return container_of(p, struct rcu_torture, rtort_free);
283 }
284
285 /*
286  * Free an element to the rcu_tortures pool.
287  */
288 static void
289 rcu_torture_free(struct rcu_torture *p)
290 {
291         atomic_inc(&n_rcu_torture_free);
292         spin_lock_bh(&rcu_torture_lock);
293         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
294         spin_unlock_bh(&rcu_torture_lock);
295 }
296
297 /*
298  * Operations vector for selecting different types of tests.
299  */
300
301 struct rcu_torture_ops {
302         int ttype;
303         void (*init)(void);
304         void (*cleanup)(void);
305         int (*readlock)(void);
306         void (*read_delay)(struct torture_random_state *rrsp,
307                            struct rt_read_seg *rtrsp);
308         void (*readunlock)(int idx);
309         unsigned long (*get_gp_seq)(void);
310         unsigned long (*gp_diff)(unsigned long new, unsigned long old);
311         void (*deferred_free)(struct rcu_torture *p);
312         void (*sync)(void);
313         void (*exp_sync)(void);
314         unsigned long (*get_state)(void);
315         void (*cond_sync)(unsigned long oldstate);
316         call_rcu_func_t call;
317         void (*cb_barrier)(void);
318         void (*fqs)(void);
319         void (*stats)(void);
320         void (*gp_kthread_dbg)(void);
321         int (*stall_dur)(void);
322         int irq_capable;
323         int can_boost;
324         int extendables;
325         int slow_gps;
326         const char *name;
327 };
328
329 static struct rcu_torture_ops *cur_ops;
330
331 /*
332  * Definitions for rcu torture testing.
333  */
334
335 static int rcu_torture_read_lock(void) __acquires(RCU)
336 {
337         rcu_read_lock();
338         return 0;
339 }
340
341 static void
342 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
343 {
344         unsigned long started;
345         unsigned long completed;
346         const unsigned long shortdelay_us = 200;
347         unsigned long longdelay_ms = 300;
348         unsigned long long ts;
349
350         /* We want a short delay sometimes to make a reader delay the grace
351          * period, and we want a long delay occasionally to trigger
352          * force_quiescent_state. */
353
354         if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
355             !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
356                 started = cur_ops->get_gp_seq();
357                 ts = rcu_trace_clock_local();
358                 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
359                         longdelay_ms = 5; /* Avoid triggering BH limits. */
360                 mdelay(longdelay_ms);
361                 rtrsp->rt_delay_ms = longdelay_ms;
362                 completed = cur_ops->get_gp_seq();
363                 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
364                                           started, completed);
365         }
366         if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
367                 udelay(shortdelay_us);
368                 rtrsp->rt_delay_us = shortdelay_us;
369         }
370         if (!preempt_count() &&
371             !(torture_random(rrsp) % (nrealreaders * 500))) {
372                 torture_preempt_schedule();  /* QS only if preemptible. */
373                 rtrsp->rt_preempted = true;
374         }
375 }
376
377 static void rcu_torture_read_unlock(int idx) __releases(RCU)
378 {
379         rcu_read_unlock();
380 }
381
382 /*
383  * Update callback in the pipe.  This should be invoked after a grace period.
384  */
385 static bool
386 rcu_torture_pipe_update_one(struct rcu_torture *rp)
387 {
388         int i;
389
390         i = READ_ONCE(rp->rtort_pipe_count);
391         if (i > RCU_TORTURE_PIPE_LEN)
392                 i = RCU_TORTURE_PIPE_LEN;
393         atomic_inc(&rcu_torture_wcount[i]);
394         WRITE_ONCE(rp->rtort_pipe_count, i + 1);
395         if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
396                 rp->rtort_mbtest = 0;
397                 return true;
398         }
399         return false;
400 }
401
402 /*
403  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
404  * primitives.
405  */
406 static void
407 rcu_torture_pipe_update(struct rcu_torture *old_rp)
408 {
409         struct rcu_torture *rp;
410         struct rcu_torture *rp1;
411
412         if (old_rp)
413                 list_add(&old_rp->rtort_free, &rcu_torture_removed);
414         list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
415                 if (rcu_torture_pipe_update_one(rp)) {
416                         list_del(&rp->rtort_free);
417                         rcu_torture_free(rp);
418                 }
419         }
420 }
421
422 static void
423 rcu_torture_cb(struct rcu_head *p)
424 {
425         struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
426
427         if (torture_must_stop_irq()) {
428                 /* Test is ending, just drop callbacks on the floor. */
429                 /* The next initialization will pick up the pieces. */
430                 return;
431         }
432         if (rcu_torture_pipe_update_one(rp))
433                 rcu_torture_free(rp);
434         else
435                 cur_ops->deferred_free(rp);
436 }
437
438 static unsigned long rcu_no_completed(void)
439 {
440         return 0;
441 }
442
443 static void rcu_torture_deferred_free(struct rcu_torture *p)
444 {
445         call_rcu(&p->rtort_rcu, rcu_torture_cb);
446 }
447
448 static void rcu_sync_torture_init(void)
449 {
450         INIT_LIST_HEAD(&rcu_torture_removed);
451 }
452
453 static struct rcu_torture_ops rcu_ops = {
454         .ttype          = RCU_FLAVOR,
455         .init           = rcu_sync_torture_init,
456         .readlock       = rcu_torture_read_lock,
457         .read_delay     = rcu_read_delay,
458         .readunlock     = rcu_torture_read_unlock,
459         .get_gp_seq     = rcu_get_gp_seq,
460         .gp_diff        = rcu_seq_diff,
461         .deferred_free  = rcu_torture_deferred_free,
462         .sync           = synchronize_rcu,
463         .exp_sync       = synchronize_rcu_expedited,
464         .get_state      = get_state_synchronize_rcu,
465         .cond_sync      = cond_synchronize_rcu,
466         .call           = call_rcu,
467         .cb_barrier     = rcu_barrier,
468         .fqs            = rcu_force_quiescent_state,
469         .stats          = NULL,
470         .gp_kthread_dbg = show_rcu_gp_kthreads,
471         .stall_dur      = rcu_jiffies_till_stall_check,
472         .irq_capable    = 1,
473         .can_boost      = rcu_can_boost(),
474         .extendables    = RCUTORTURE_MAX_EXTEND,
475         .name           = "rcu"
476 };
477
478 /*
479  * Don't even think about trying any of these in real life!!!
480  * The names includes "busted", and they really means it!
481  * The only purpose of these functions is to provide a buggy RCU
482  * implementation to make sure that rcutorture correctly emits
483  * buggy-RCU error messages.
484  */
485 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
486 {
487         /* This is a deliberate bug for testing purposes only! */
488         rcu_torture_cb(&p->rtort_rcu);
489 }
490
491 static void synchronize_rcu_busted(void)
492 {
493         /* This is a deliberate bug for testing purposes only! */
494 }
495
496 static void
497 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
498 {
499         /* This is a deliberate bug for testing purposes only! */
500         func(head);
501 }
502
503 static struct rcu_torture_ops rcu_busted_ops = {
504         .ttype          = INVALID_RCU_FLAVOR,
505         .init           = rcu_sync_torture_init,
506         .readlock       = rcu_torture_read_lock,
507         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
508         .readunlock     = rcu_torture_read_unlock,
509         .get_gp_seq     = rcu_no_completed,
510         .deferred_free  = rcu_busted_torture_deferred_free,
511         .sync           = synchronize_rcu_busted,
512         .exp_sync       = synchronize_rcu_busted,
513         .call           = call_rcu_busted,
514         .cb_barrier     = NULL,
515         .fqs            = NULL,
516         .stats          = NULL,
517         .irq_capable    = 1,
518         .name           = "busted"
519 };
520
521 /*
522  * Definitions for srcu torture testing.
523  */
524
525 DEFINE_STATIC_SRCU(srcu_ctl);
526 static struct srcu_struct srcu_ctld;
527 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
528
529 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
530 {
531         return srcu_read_lock(srcu_ctlp);
532 }
533
534 static void
535 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
536 {
537         long delay;
538         const long uspertick = 1000000 / HZ;
539         const long longdelay = 10;
540
541         /* We want there to be long-running readers, but not all the time. */
542
543         delay = torture_random(rrsp) %
544                 (nrealreaders * 2 * longdelay * uspertick);
545         if (!delay && in_task()) {
546                 schedule_timeout_interruptible(longdelay);
547                 rtrsp->rt_delay_jiffies = longdelay;
548         } else {
549                 rcu_read_delay(rrsp, rtrsp);
550         }
551 }
552
553 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
554 {
555         srcu_read_unlock(srcu_ctlp, idx);
556 }
557
558 static unsigned long srcu_torture_completed(void)
559 {
560         return srcu_batches_completed(srcu_ctlp);
561 }
562
563 static void srcu_torture_deferred_free(struct rcu_torture *rp)
564 {
565         call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
566 }
567
568 static void srcu_torture_synchronize(void)
569 {
570         synchronize_srcu(srcu_ctlp);
571 }
572
573 static void srcu_torture_call(struct rcu_head *head,
574                               rcu_callback_t func)
575 {
576         call_srcu(srcu_ctlp, head, func);
577 }
578
579 static void srcu_torture_barrier(void)
580 {
581         srcu_barrier(srcu_ctlp);
582 }
583
584 static void srcu_torture_stats(void)
585 {
586         srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
587 }
588
589 static void srcu_torture_synchronize_expedited(void)
590 {
591         synchronize_srcu_expedited(srcu_ctlp);
592 }
593
594 static struct rcu_torture_ops srcu_ops = {
595         .ttype          = SRCU_FLAVOR,
596         .init           = rcu_sync_torture_init,
597         .readlock       = srcu_torture_read_lock,
598         .read_delay     = srcu_read_delay,
599         .readunlock     = srcu_torture_read_unlock,
600         .get_gp_seq     = srcu_torture_completed,
601         .deferred_free  = srcu_torture_deferred_free,
602         .sync           = srcu_torture_synchronize,
603         .exp_sync       = srcu_torture_synchronize_expedited,
604         .call           = srcu_torture_call,
605         .cb_barrier     = srcu_torture_barrier,
606         .stats          = srcu_torture_stats,
607         .irq_capable    = 1,
608         .name           = "srcu"
609 };
610
611 static void srcu_torture_init(void)
612 {
613         rcu_sync_torture_init();
614         WARN_ON(init_srcu_struct(&srcu_ctld));
615         srcu_ctlp = &srcu_ctld;
616 }
617
618 static void srcu_torture_cleanup(void)
619 {
620         cleanup_srcu_struct(&srcu_ctld);
621         srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
622 }
623
624 /* As above, but dynamically allocated. */
625 static struct rcu_torture_ops srcud_ops = {
626         .ttype          = SRCU_FLAVOR,
627         .init           = srcu_torture_init,
628         .cleanup        = srcu_torture_cleanup,
629         .readlock       = srcu_torture_read_lock,
630         .read_delay     = srcu_read_delay,
631         .readunlock     = srcu_torture_read_unlock,
632         .get_gp_seq     = srcu_torture_completed,
633         .deferred_free  = srcu_torture_deferred_free,
634         .sync           = srcu_torture_synchronize,
635         .exp_sync       = srcu_torture_synchronize_expedited,
636         .call           = srcu_torture_call,
637         .cb_barrier     = srcu_torture_barrier,
638         .stats          = srcu_torture_stats,
639         .irq_capable    = 1,
640         .name           = "srcud"
641 };
642
643 /* As above, but broken due to inappropriate reader extension. */
644 static struct rcu_torture_ops busted_srcud_ops = {
645         .ttype          = SRCU_FLAVOR,
646         .init           = srcu_torture_init,
647         .cleanup        = srcu_torture_cleanup,
648         .readlock       = srcu_torture_read_lock,
649         .read_delay     = rcu_read_delay,
650         .readunlock     = srcu_torture_read_unlock,
651         .get_gp_seq     = srcu_torture_completed,
652         .deferred_free  = srcu_torture_deferred_free,
653         .sync           = srcu_torture_synchronize,
654         .exp_sync       = srcu_torture_synchronize_expedited,
655         .call           = srcu_torture_call,
656         .cb_barrier     = srcu_torture_barrier,
657         .stats          = srcu_torture_stats,
658         .irq_capable    = 1,
659         .extendables    = RCUTORTURE_MAX_EXTEND,
660         .name           = "busted_srcud"
661 };
662
663 /*
664  * Definitions for RCU-tasks torture testing.
665  */
666
667 static int tasks_torture_read_lock(void)
668 {
669         return 0;
670 }
671
672 static void tasks_torture_read_unlock(int idx)
673 {
674 }
675
676 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
677 {
678         call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
679 }
680
681 static void synchronize_rcu_mult_test(void)
682 {
683         synchronize_rcu_mult(call_rcu_tasks, call_rcu);
684 }
685
686 static struct rcu_torture_ops tasks_ops = {
687         .ttype          = RCU_TASKS_FLAVOR,
688         .init           = rcu_sync_torture_init,
689         .readlock       = tasks_torture_read_lock,
690         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
691         .readunlock     = tasks_torture_read_unlock,
692         .get_gp_seq     = rcu_no_completed,
693         .deferred_free  = rcu_tasks_torture_deferred_free,
694         .sync           = synchronize_rcu_tasks,
695         .exp_sync       = synchronize_rcu_mult_test,
696         .call           = call_rcu_tasks,
697         .cb_barrier     = rcu_barrier_tasks,
698         .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
699         .fqs            = NULL,
700         .stats          = NULL,
701         .irq_capable    = 1,
702         .slow_gps       = 1,
703         .name           = "tasks"
704 };
705
706 /*
707  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
708  * This implementation does not necessarily work well with CPU hotplug.
709  */
710
711 static void synchronize_rcu_trivial(void)
712 {
713         int cpu;
714
715         for_each_online_cpu(cpu) {
716                 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
717                 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
718         }
719 }
720
721 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
722 {
723         preempt_disable();
724         return 0;
725 }
726
727 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
728 {
729         preempt_enable();
730 }
731
732 static struct rcu_torture_ops trivial_ops = {
733         .ttype          = RCU_TRIVIAL_FLAVOR,
734         .init           = rcu_sync_torture_init,
735         .readlock       = rcu_torture_read_lock_trivial,
736         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
737         .readunlock     = rcu_torture_read_unlock_trivial,
738         .get_gp_seq     = rcu_no_completed,
739         .sync           = synchronize_rcu_trivial,
740         .exp_sync       = synchronize_rcu_trivial,
741         .fqs            = NULL,
742         .stats          = NULL,
743         .irq_capable    = 1,
744         .name           = "trivial"
745 };
746
747 /*
748  * Definitions for rude RCU-tasks torture testing.
749  */
750
751 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
752 {
753         call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
754 }
755
756 static struct rcu_torture_ops tasks_rude_ops = {
757         .ttype          = RCU_TASKS_RUDE_FLAVOR,
758         .init           = rcu_sync_torture_init,
759         .readlock       = rcu_torture_read_lock_trivial,
760         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
761         .readunlock     = rcu_torture_read_unlock_trivial,
762         .get_gp_seq     = rcu_no_completed,
763         .deferred_free  = rcu_tasks_rude_torture_deferred_free,
764         .sync           = synchronize_rcu_tasks_rude,
765         .exp_sync       = synchronize_rcu_tasks_rude,
766         .call           = call_rcu_tasks_rude,
767         .cb_barrier     = rcu_barrier_tasks_rude,
768         .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
769         .fqs            = NULL,
770         .stats          = NULL,
771         .irq_capable    = 1,
772         .name           = "tasks-rude"
773 };
774
775 /*
776  * Definitions for tracing RCU-tasks torture testing.
777  */
778
779 static int tasks_tracing_torture_read_lock(void)
780 {
781         rcu_read_lock_trace();
782         return 0;
783 }
784
785 static void tasks_tracing_torture_read_unlock(int idx)
786 {
787         rcu_read_unlock_trace();
788 }
789
790 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
791 {
792         call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
793 }
794
795 static struct rcu_torture_ops tasks_tracing_ops = {
796         .ttype          = RCU_TASKS_TRACING_FLAVOR,
797         .init           = rcu_sync_torture_init,
798         .readlock       = tasks_tracing_torture_read_lock,
799         .read_delay     = srcu_read_delay,  /* just reuse srcu's version. */
800         .readunlock     = tasks_tracing_torture_read_unlock,
801         .get_gp_seq     = rcu_no_completed,
802         .deferred_free  = rcu_tasks_tracing_torture_deferred_free,
803         .sync           = synchronize_rcu_tasks_trace,
804         .exp_sync       = synchronize_rcu_tasks_trace,
805         .call           = call_rcu_tasks_trace,
806         .cb_barrier     = rcu_barrier_tasks_trace,
807         .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
808         .fqs            = NULL,
809         .stats          = NULL,
810         .irq_capable    = 1,
811         .slow_gps       = 1,
812         .name           = "tasks-tracing"
813 };
814
815 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
816 {
817         if (!cur_ops->gp_diff)
818                 return new - old;
819         return cur_ops->gp_diff(new, old);
820 }
821
822 static bool __maybe_unused torturing_tasks(void)
823 {
824         return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
825 }
826
827 /*
828  * RCU torture priority-boost testing.  Runs one real-time thread per
829  * CPU for moderate bursts, repeatedly registering RCU callbacks and
830  * spinning waiting for them to be invoked.  If a given callback takes
831  * too long to be invoked, we assume that priority inversion has occurred.
832  */
833
834 struct rcu_boost_inflight {
835         struct rcu_head rcu;
836         int inflight;
837 };
838
839 static void rcu_torture_boost_cb(struct rcu_head *head)
840 {
841         struct rcu_boost_inflight *rbip =
842                 container_of(head, struct rcu_boost_inflight, rcu);
843
844         /* Ensure RCU-core accesses precede clearing ->inflight */
845         smp_store_release(&rbip->inflight, 0);
846 }
847
848 static int old_rt_runtime = -1;
849
850 static void rcu_torture_disable_rt_throttle(void)
851 {
852         /*
853          * Disable RT throttling so that rcutorture's boost threads don't get
854          * throttled. Only possible if rcutorture is built-in otherwise the
855          * user should manually do this by setting the sched_rt_period_us and
856          * sched_rt_runtime sysctls.
857          */
858         if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
859                 return;
860
861         old_rt_runtime = sysctl_sched_rt_runtime;
862         sysctl_sched_rt_runtime = -1;
863 }
864
865 static void rcu_torture_enable_rt_throttle(void)
866 {
867         if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
868                 return;
869
870         sysctl_sched_rt_runtime = old_rt_runtime;
871         old_rt_runtime = -1;
872 }
873
874 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
875 {
876         if (end - start > test_boost_duration * HZ - HZ / 2) {
877                 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
878                 n_rcu_torture_boost_failure++;
879
880                 return true; /* failed */
881         }
882
883         return false; /* passed */
884 }
885
886 static int rcu_torture_boost(void *arg)
887 {
888         unsigned long call_rcu_time;
889         unsigned long endtime;
890         unsigned long oldstarttime;
891         struct rcu_boost_inflight rbi = { .inflight = 0 };
892
893         VERBOSE_TOROUT_STRING("rcu_torture_boost started");
894
895         /* Set real-time priority. */
896         sched_set_fifo_low(current);
897
898         init_rcu_head_on_stack(&rbi.rcu);
899         /* Each pass through the following loop does one boost-test cycle. */
900         do {
901                 /* Track if the test failed already in this test interval? */
902                 bool failed = false;
903
904                 /* Increment n_rcu_torture_boosts once per boost-test */
905                 while (!kthread_should_stop()) {
906                         if (mutex_trylock(&boost_mutex)) {
907                                 n_rcu_torture_boosts++;
908                                 mutex_unlock(&boost_mutex);
909                                 break;
910                         }
911                         schedule_timeout_uninterruptible(1);
912                 }
913                 if (kthread_should_stop())
914                         goto checkwait;
915
916                 /* Wait for the next test interval. */
917                 oldstarttime = boost_starttime;
918                 while (time_before(jiffies, oldstarttime)) {
919                         schedule_timeout_interruptible(oldstarttime - jiffies);
920                         if (stutter_wait("rcu_torture_boost"))
921                                 sched_set_fifo_low(current);
922                         if (torture_must_stop())
923                                 goto checkwait;
924                 }
925
926                 /* Do one boost-test interval. */
927                 endtime = oldstarttime + test_boost_duration * HZ;
928                 call_rcu_time = jiffies;
929                 while (time_before(jiffies, endtime)) {
930                         /* If we don't have a callback in flight, post one. */
931                         if (!smp_load_acquire(&rbi.inflight)) {
932                                 /* RCU core before ->inflight = 1. */
933                                 smp_store_release(&rbi.inflight, 1);
934                                 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
935                                 /* Check if the boost test failed */
936                                 failed = failed ||
937                                          rcu_torture_boost_failed(call_rcu_time,
938                                                                  jiffies);
939                                 call_rcu_time = jiffies;
940                         }
941                         if (stutter_wait("rcu_torture_boost"))
942                                 sched_set_fifo_low(current);
943                         if (torture_must_stop())
944                                 goto checkwait;
945                 }
946
947                 /*
948                  * If boost never happened, then inflight will always be 1, in
949                  * this case the boost check would never happen in the above
950                  * loop so do another one here.
951                  */
952                 if (!failed && smp_load_acquire(&rbi.inflight))
953                         rcu_torture_boost_failed(call_rcu_time, jiffies);
954
955                 /*
956                  * Set the start time of the next test interval.
957                  * Yes, this is vulnerable to long delays, but such
958                  * delays simply cause a false negative for the next
959                  * interval.  Besides, we are running at RT priority,
960                  * so delays should be relatively rare.
961                  */
962                 while (oldstarttime == boost_starttime &&
963                        !kthread_should_stop()) {
964                         if (mutex_trylock(&boost_mutex)) {
965                                 boost_starttime = jiffies +
966                                                   test_boost_interval * HZ;
967                                 mutex_unlock(&boost_mutex);
968                                 break;
969                         }
970                         schedule_timeout_uninterruptible(1);
971                 }
972
973                 /* Go do the stutter. */
974 checkwait:      if (stutter_wait("rcu_torture_boost"))
975                         sched_set_fifo_low(current);
976         } while (!torture_must_stop());
977
978         /* Clean up and exit. */
979         while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
980                 torture_shutdown_absorb("rcu_torture_boost");
981                 schedule_timeout_uninterruptible(1);
982         }
983         destroy_rcu_head_on_stack(&rbi.rcu);
984         torture_kthread_stopping("rcu_torture_boost");
985         return 0;
986 }
987
988 /*
989  * RCU torture force-quiescent-state kthread.  Repeatedly induces
990  * bursts of calls to force_quiescent_state(), increasing the probability
991  * of occurrence of some important types of race conditions.
992  */
993 static int
994 rcu_torture_fqs(void *arg)
995 {
996         unsigned long fqs_resume_time;
997         int fqs_burst_remaining;
998         int oldnice = task_nice(current);
999
1000         VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1001         do {
1002                 fqs_resume_time = jiffies + fqs_stutter * HZ;
1003                 while (time_before(jiffies, fqs_resume_time) &&
1004                        !kthread_should_stop()) {
1005                         schedule_timeout_interruptible(1);
1006                 }
1007                 fqs_burst_remaining = fqs_duration;
1008                 while (fqs_burst_remaining > 0 &&
1009                        !kthread_should_stop()) {
1010                         cur_ops->fqs();
1011                         udelay(fqs_holdoff);
1012                         fqs_burst_remaining -= fqs_holdoff;
1013                 }
1014                 if (stutter_wait("rcu_torture_fqs"))
1015                         sched_set_normal(current, oldnice);
1016         } while (!torture_must_stop());
1017         torture_kthread_stopping("rcu_torture_fqs");
1018         return 0;
1019 }
1020
1021 /*
1022  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1023  * for that pointed to by rcu_torture_current, freeing the old structure
1024  * after a series of grace periods (the "pipeline").
1025  */
1026 static int
1027 rcu_torture_writer(void *arg)
1028 {
1029         bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1030         int expediting = 0;
1031         unsigned long gp_snap;
1032         bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1033         bool gp_sync1 = gp_sync;
1034         int i;
1035         int oldnice = task_nice(current);
1036         struct rcu_torture *rp;
1037         struct rcu_torture *old_rp;
1038         static DEFINE_TORTURE_RANDOM(rand);
1039         bool stutter_waited;
1040         int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
1041                            RTWS_COND_GET, RTWS_SYNC };
1042         int nsynctypes = 0;
1043
1044         VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1045         if (!can_expedite)
1046                 pr_alert("%s" TORTURE_FLAG
1047                          " GP expediting controlled from boot/sysfs for %s.\n",
1048                          torture_type, cur_ops->name);
1049
1050         /* Initialize synctype[] array.  If none set, take default. */
1051         if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
1052                 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
1053         if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
1054                 synctype[nsynctypes++] = RTWS_COND_GET;
1055                 pr_info("%s: Testing conditional GPs.\n", __func__);
1056         } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
1057                 pr_alert("%s: gp_cond without primitives.\n", __func__);
1058         }
1059         if (gp_exp1 && cur_ops->exp_sync) {
1060                 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1061                 pr_info("%s: Testing expedited GPs.\n", __func__);
1062         } else if (gp_exp && !cur_ops->exp_sync) {
1063                 pr_alert("%s: gp_exp without primitives.\n", __func__);
1064         }
1065         if (gp_normal1 && cur_ops->deferred_free) {
1066                 synctype[nsynctypes++] = RTWS_DEF_FREE;
1067                 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1068         } else if (gp_normal && !cur_ops->deferred_free) {
1069                 pr_alert("%s: gp_normal without primitives.\n", __func__);
1070         }
1071         if (gp_sync1 && cur_ops->sync) {
1072                 synctype[nsynctypes++] = RTWS_SYNC;
1073                 pr_info("%s: Testing normal GPs.\n", __func__);
1074         } else if (gp_sync && !cur_ops->sync) {
1075                 pr_alert("%s: gp_sync without primitives.\n", __func__);
1076         }
1077         if (WARN_ONCE(nsynctypes == 0,
1078                       "rcu_torture_writer: No update-side primitives.\n")) {
1079                 /*
1080                  * No updates primitives, so don't try updating.
1081                  * The resulting test won't be testing much, hence the
1082                  * above WARN_ONCE().
1083                  */
1084                 rcu_torture_writer_state = RTWS_STOPPING;
1085                 torture_kthread_stopping("rcu_torture_writer");
1086         }
1087
1088         do {
1089                 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1090                 schedule_timeout_uninterruptible(1);
1091                 rp = rcu_torture_alloc();
1092                 if (rp == NULL)
1093                         continue;
1094                 rp->rtort_pipe_count = 0;
1095                 rcu_torture_writer_state = RTWS_DELAY;
1096                 udelay(torture_random(&rand) & 0x3ff);
1097                 rcu_torture_writer_state = RTWS_REPLACE;
1098                 old_rp = rcu_dereference_check(rcu_torture_current,
1099                                                current == writer_task);
1100                 rp->rtort_mbtest = 1;
1101                 rcu_assign_pointer(rcu_torture_current, rp);
1102                 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1103                 if (old_rp) {
1104                         i = old_rp->rtort_pipe_count;
1105                         if (i > RCU_TORTURE_PIPE_LEN)
1106                                 i = RCU_TORTURE_PIPE_LEN;
1107                         atomic_inc(&rcu_torture_wcount[i]);
1108                         WRITE_ONCE(old_rp->rtort_pipe_count,
1109                                    old_rp->rtort_pipe_count + 1);
1110                         switch (synctype[torture_random(&rand) % nsynctypes]) {
1111                         case RTWS_DEF_FREE:
1112                                 rcu_torture_writer_state = RTWS_DEF_FREE;
1113                                 cur_ops->deferred_free(old_rp);
1114                                 break;
1115                         case RTWS_EXP_SYNC:
1116                                 rcu_torture_writer_state = RTWS_EXP_SYNC;
1117                                 cur_ops->exp_sync();
1118                                 rcu_torture_pipe_update(old_rp);
1119                                 break;
1120                         case RTWS_COND_GET:
1121                                 rcu_torture_writer_state = RTWS_COND_GET;
1122                                 gp_snap = cur_ops->get_state();
1123                                 i = torture_random(&rand) % 16;
1124                                 if (i != 0)
1125                                         schedule_timeout_interruptible(i);
1126                                 udelay(torture_random(&rand) % 1000);
1127                                 rcu_torture_writer_state = RTWS_COND_SYNC;
1128                                 cur_ops->cond_sync(gp_snap);
1129                                 rcu_torture_pipe_update(old_rp);
1130                                 break;
1131                         case RTWS_SYNC:
1132                                 rcu_torture_writer_state = RTWS_SYNC;
1133                                 cur_ops->sync();
1134                                 rcu_torture_pipe_update(old_rp);
1135                                 break;
1136                         default:
1137                                 WARN_ON_ONCE(1);
1138                                 break;
1139                         }
1140                 }
1141                 WRITE_ONCE(rcu_torture_current_version,
1142                            rcu_torture_current_version + 1);
1143                 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1144                 if (can_expedite &&
1145                     !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1146                         WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1147                         if (expediting >= 0)
1148                                 rcu_expedite_gp();
1149                         else
1150                                 rcu_unexpedite_gp();
1151                         if (++expediting > 3)
1152                                 expediting = -expediting;
1153                 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1154                         can_expedite = !rcu_gp_is_expedited() &&
1155                                        !rcu_gp_is_normal();
1156                 }
1157                 rcu_torture_writer_state = RTWS_STUTTER;
1158                 stutter_waited = stutter_wait("rcu_torture_writer");
1159                 if (stutter_waited &&
1160                     !READ_ONCE(rcu_fwd_cb_nodelay) &&
1161                     !cur_ops->slow_gps &&
1162                     !torture_must_stop() &&
1163                     rcu_inkernel_boot_has_ended())
1164                         for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1165                                 if (list_empty(&rcu_tortures[i].rtort_free) &&
1166                                     rcu_access_pointer(rcu_torture_current) !=
1167                                     &rcu_tortures[i]) {
1168                                         rcu_ftrace_dump(DUMP_ALL);
1169                                         WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1170                                 }
1171                 if (stutter_waited)
1172                         sched_set_normal(current, oldnice);
1173         } while (!torture_must_stop());
1174         rcu_torture_current = NULL;  // Let stats task know that we are done.
1175         /* Reset expediting back to unexpedited. */
1176         if (expediting > 0)
1177                 expediting = -expediting;
1178         while (can_expedite && expediting++ < 0)
1179                 rcu_unexpedite_gp();
1180         WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1181         if (!can_expedite)
1182                 pr_alert("%s" TORTURE_FLAG
1183                          " Dynamic grace-period expediting was disabled.\n",
1184                          torture_type);
1185         rcu_torture_writer_state = RTWS_STOPPING;
1186         torture_kthread_stopping("rcu_torture_writer");
1187         return 0;
1188 }
1189
1190 /*
1191  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1192  * delay between calls.
1193  */
1194 static int
1195 rcu_torture_fakewriter(void *arg)
1196 {
1197         DEFINE_TORTURE_RANDOM(rand);
1198
1199         VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1200         set_user_nice(current, MAX_NICE);
1201
1202         do {
1203                 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1204                 udelay(torture_random(&rand) & 0x3ff);
1205                 if (cur_ops->cb_barrier != NULL &&
1206                     torture_random(&rand) % (nfakewriters * 8) == 0) {
1207                         cur_ops->cb_barrier();
1208                 } else if (gp_normal == gp_exp) {
1209                         if (cur_ops->sync && torture_random(&rand) & 0x80)
1210                                 cur_ops->sync();
1211                         else if (cur_ops->exp_sync)
1212                                 cur_ops->exp_sync();
1213                 } else if (gp_normal && cur_ops->sync) {
1214                         cur_ops->sync();
1215                 } else if (cur_ops->exp_sync) {
1216                         cur_ops->exp_sync();
1217                 }
1218                 stutter_wait("rcu_torture_fakewriter");
1219         } while (!torture_must_stop());
1220
1221         torture_kthread_stopping("rcu_torture_fakewriter");
1222         return 0;
1223 }
1224
1225 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1226 {
1227         kfree(rhp);
1228 }
1229
1230 /*
1231  * Do one extension of an RCU read-side critical section using the
1232  * current reader state in readstate (set to zero for initial entry
1233  * to extended critical section), set the new state as specified by
1234  * newstate (set to zero for final exit from extended critical section),
1235  * and random-number-generator state in trsp.  If this is neither the
1236  * beginning or end of the critical section and if there was actually a
1237  * change, do a ->read_delay().
1238  */
1239 static void rcutorture_one_extend(int *readstate, int newstate,
1240                                   struct torture_random_state *trsp,
1241                                   struct rt_read_seg *rtrsp)
1242 {
1243         unsigned long flags;
1244         int idxnew = -1;
1245         int idxold = *readstate;
1246         int statesnew = ~*readstate & newstate;
1247         int statesold = *readstate & ~newstate;
1248
1249         WARN_ON_ONCE(idxold < 0);
1250         WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1251         rtrsp->rt_readstate = newstate;
1252
1253         /* First, put new protection in place to avoid critical-section gap. */
1254         if (statesnew & RCUTORTURE_RDR_BH)
1255                 local_bh_disable();
1256         if (statesnew & RCUTORTURE_RDR_IRQ)
1257                 local_irq_disable();
1258         if (statesnew & RCUTORTURE_RDR_PREEMPT)
1259                 preempt_disable();
1260         if (statesnew & RCUTORTURE_RDR_RBH)
1261                 rcu_read_lock_bh();
1262         if (statesnew & RCUTORTURE_RDR_SCHED)
1263                 rcu_read_lock_sched();
1264         if (statesnew & RCUTORTURE_RDR_RCU)
1265                 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1266
1267         /* Next, remove old protection, irq first due to bh conflict. */
1268         if (statesold & RCUTORTURE_RDR_IRQ)
1269                 local_irq_enable();
1270         if (statesold & RCUTORTURE_RDR_BH)
1271                 local_bh_enable();
1272         if (statesold & RCUTORTURE_RDR_PREEMPT)
1273                 preempt_enable();
1274         if (statesold & RCUTORTURE_RDR_RBH)
1275                 rcu_read_unlock_bh();
1276         if (statesold & RCUTORTURE_RDR_SCHED)
1277                 rcu_read_unlock_sched();
1278         if (statesold & RCUTORTURE_RDR_RCU) {
1279                 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1280
1281                 if (lockit)
1282                         raw_spin_lock_irqsave(&current->pi_lock, flags);
1283                 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1284                 if (lockit)
1285                         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1286         }
1287
1288         /* Delay if neither beginning nor end and there was a change. */
1289         if ((statesnew || statesold) && *readstate && newstate)
1290                 cur_ops->read_delay(trsp, rtrsp);
1291
1292         /* Update the reader state. */
1293         if (idxnew == -1)
1294                 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1295         WARN_ON_ONCE(idxnew < 0);
1296         WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1297         *readstate = idxnew | newstate;
1298         WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1299         WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1300 }
1301
1302 /* Return the biggest extendables mask given current RCU and boot parameters. */
1303 static int rcutorture_extend_mask_max(void)
1304 {
1305         int mask;
1306
1307         WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1308         mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1309         mask = mask | RCUTORTURE_RDR_RCU;
1310         return mask;
1311 }
1312
1313 /* Return a random protection state mask, but with at least one bit set. */
1314 static int
1315 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1316 {
1317         int mask = rcutorture_extend_mask_max();
1318         unsigned long randmask1 = torture_random(trsp) >> 8;
1319         unsigned long randmask2 = randmask1 >> 3;
1320
1321         WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1322         /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1323         if (!(randmask1 & 0x7))
1324                 mask = mask & randmask2;
1325         else
1326                 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1327         /* Can't enable bh w/irq disabled. */
1328         if ((mask & RCUTORTURE_RDR_IRQ) &&
1329             ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1330              (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1331                 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1332         return mask ?: RCUTORTURE_RDR_RCU;
1333 }
1334
1335 /*
1336  * Do a randomly selected number of extensions of an existing RCU read-side
1337  * critical section.
1338  */
1339 static struct rt_read_seg *
1340 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1341                        struct rt_read_seg *rtrsp)
1342 {
1343         int i;
1344         int j;
1345         int mask = rcutorture_extend_mask_max();
1346
1347         WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1348         if (!((mask - 1) & mask))
1349                 return rtrsp;  /* Current RCU reader not extendable. */
1350         /* Bias towards larger numbers of loops. */
1351         i = (torture_random(trsp) >> 3);
1352         i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1353         for (j = 0; j < i; j++) {
1354                 mask = rcutorture_extend_mask(*readstate, trsp);
1355                 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1356         }
1357         return &rtrsp[j];
1358 }
1359
1360 /*
1361  * Do one read-side critical section, returning false if there was
1362  * no data to read.  Can be invoked both from process context and
1363  * from a timer handler.
1364  */
1365 static bool rcu_torture_one_read(struct torture_random_state *trsp)
1366 {
1367         int i;
1368         unsigned long started;
1369         unsigned long completed;
1370         int newstate;
1371         struct rcu_torture *p;
1372         int pipe_count;
1373         int readstate = 0;
1374         struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1375         struct rt_read_seg *rtrsp = &rtseg[0];
1376         struct rt_read_seg *rtrsp1;
1377         unsigned long long ts;
1378
1379         WARN_ON_ONCE(!rcu_is_watching());
1380         newstate = rcutorture_extend_mask(readstate, trsp);
1381         rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1382         started = cur_ops->get_gp_seq();
1383         ts = rcu_trace_clock_local();
1384         p = rcu_dereference_check(rcu_torture_current,
1385                                   rcu_read_lock_bh_held() ||
1386                                   rcu_read_lock_sched_held() ||
1387                                   srcu_read_lock_held(srcu_ctlp) ||
1388                                   rcu_read_lock_trace_held() ||
1389                                   torturing_tasks());
1390         if (p == NULL) {
1391                 /* Wait for rcu_torture_writer to get underway */
1392                 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1393                 return false;
1394         }
1395         if (p->rtort_mbtest == 0)
1396                 atomic_inc(&n_rcu_torture_mberror);
1397         rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1398         preempt_disable();
1399         pipe_count = READ_ONCE(p->rtort_pipe_count);
1400         if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1401                 /* Should not happen, but... */
1402                 pipe_count = RCU_TORTURE_PIPE_LEN;
1403         }
1404         completed = cur_ops->get_gp_seq();
1405         if (pipe_count > 1) {
1406                 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1407                                           ts, started, completed);
1408                 rcu_ftrace_dump(DUMP_ALL);
1409         }
1410         __this_cpu_inc(rcu_torture_count[pipe_count]);
1411         completed = rcutorture_seq_diff(completed, started);
1412         if (completed > RCU_TORTURE_PIPE_LEN) {
1413                 /* Should not happen, but... */
1414                 completed = RCU_TORTURE_PIPE_LEN;
1415         }
1416         __this_cpu_inc(rcu_torture_batch[completed]);
1417         preempt_enable();
1418         rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1419         WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1420         // This next splat is expected behavior if leakpointer, especially
1421         // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1422         WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1423
1424         /* If error or close call, record the sequence of reader protections. */
1425         if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1426                 i = 0;
1427                 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1428                         err_segs[i++] = *rtrsp1;
1429                 rt_read_nsegs = i;
1430         }
1431
1432         return true;
1433 }
1434
1435 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1436
1437 /*
1438  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1439  * incrementing the corresponding element of the pipeline array.  The
1440  * counter in the element should never be greater than 1, otherwise, the
1441  * RCU implementation is broken.
1442  */
1443 static void rcu_torture_timer(struct timer_list *unused)
1444 {
1445         atomic_long_inc(&n_rcu_torture_timers);
1446         (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1447
1448         /* Test call_rcu() invocation from interrupt handler. */
1449         if (cur_ops->call) {
1450                 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1451
1452                 if (rhp)
1453                         cur_ops->call(rhp, rcu_torture_timer_cb);
1454         }
1455 }
1456
1457 /*
1458  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1459  * incrementing the corresponding element of the pipeline array.  The
1460  * counter in the element should never be greater than 1, otherwise, the
1461  * RCU implementation is broken.
1462  */
1463 static int
1464 rcu_torture_reader(void *arg)
1465 {
1466         unsigned long lastsleep = jiffies;
1467         long myid = (long)arg;
1468         int mynumonline = myid;
1469         DEFINE_TORTURE_RANDOM(rand);
1470         struct timer_list t;
1471
1472         VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1473         set_user_nice(current, MAX_NICE);
1474         if (irqreader && cur_ops->irq_capable)
1475                 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1476         tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1477         do {
1478                 if (irqreader && cur_ops->irq_capable) {
1479                         if (!timer_pending(&t))
1480                                 mod_timer(&t, jiffies + 1);
1481                 }
1482                 if (!rcu_torture_one_read(&rand) && !torture_must_stop())
1483                         schedule_timeout_interruptible(HZ);
1484                 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1485                         schedule_timeout_interruptible(1);
1486                         lastsleep = jiffies + 10;
1487                 }
1488                 while (num_online_cpus() < mynumonline && !torture_must_stop())
1489                         schedule_timeout_interruptible(HZ / 5);
1490                 stutter_wait("rcu_torture_reader");
1491         } while (!torture_must_stop());
1492         if (irqreader && cur_ops->irq_capable) {
1493                 del_timer_sync(&t);
1494                 destroy_timer_on_stack(&t);
1495         }
1496         tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1497         torture_kthread_stopping("rcu_torture_reader");
1498         return 0;
1499 }
1500
1501 /*
1502  * Print torture statistics.  Caller must ensure that there is only
1503  * one call to this function at a given time!!!  This is normally
1504  * accomplished by relying on the module system to only have one copy
1505  * of the module loaded, and then by giving the rcu_torture_stats
1506  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1507  * thread is not running).
1508  */
1509 static void
1510 rcu_torture_stats_print(void)
1511 {
1512         int cpu;
1513         int i;
1514         long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1515         long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1516         struct rcu_torture *rtcp;
1517         static unsigned long rtcv_snap = ULONG_MAX;
1518         static bool splatted;
1519         struct task_struct *wtp;
1520
1521         for_each_possible_cpu(cpu) {
1522                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1523                         pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1524                         batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1525                 }
1526         }
1527         for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1528                 if (pipesummary[i] != 0)
1529                         break;
1530         }
1531
1532         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1533         rtcp = rcu_access_pointer(rcu_torture_current);
1534         pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1535                 rtcp,
1536                 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1537                 rcu_torture_current_version,
1538                 list_empty(&rcu_torture_freelist),
1539                 atomic_read(&n_rcu_torture_alloc),
1540                 atomic_read(&n_rcu_torture_alloc_fail),
1541                 atomic_read(&n_rcu_torture_free));
1542         pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1543                 atomic_read(&n_rcu_torture_mberror),
1544                 n_rcu_torture_barrier_error,
1545                 n_rcu_torture_boost_ktrerror,
1546                 n_rcu_torture_boost_rterror);
1547         pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1548                 n_rcu_torture_boost_failure,
1549                 n_rcu_torture_boosts,
1550                 atomic_long_read(&n_rcu_torture_timers));
1551         torture_onoff_stats();
1552         pr_cont("barrier: %ld/%ld:%ld ",
1553                 data_race(n_barrier_successes),
1554                 data_race(n_barrier_attempts),
1555                 data_race(n_rcu_torture_barrier_error));
1556         pr_cont("read-exits: %ld\n", data_race(n_read_exits));
1557
1558         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1559         if (atomic_read(&n_rcu_torture_mberror) ||
1560             n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1561             n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1562             i > 1) {
1563                 pr_cont("%s", "!!! ");
1564                 atomic_inc(&n_rcu_torture_error);
1565                 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1566                 WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1567                 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1568                 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1569                 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1570                 WARN_ON_ONCE(i > 1); // Too-short grace period
1571         }
1572         pr_cont("Reader Pipe: ");
1573         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1574                 pr_cont(" %ld", pipesummary[i]);
1575         pr_cont("\n");
1576
1577         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1578         pr_cont("Reader Batch: ");
1579         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1580                 pr_cont(" %ld", batchsummary[i]);
1581         pr_cont("\n");
1582
1583         pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1584         pr_cont("Free-Block Circulation: ");
1585         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1586                 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1587         }
1588         pr_cont("\n");
1589
1590         if (cur_ops->stats)
1591                 cur_ops->stats();
1592         if (rtcv_snap == rcu_torture_current_version &&
1593             rcu_access_pointer(rcu_torture_current) &&
1594             !rcu_stall_is_suppressed()) {
1595                 int __maybe_unused flags = 0;
1596                 unsigned long __maybe_unused gp_seq = 0;
1597
1598                 rcutorture_get_gp_data(cur_ops->ttype,
1599                                        &flags, &gp_seq);
1600                 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1601                                         &flags, &gp_seq);
1602                 wtp = READ_ONCE(writer_task);
1603                 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1604                          rcu_torture_writer_state_getname(),
1605                          rcu_torture_writer_state, gp_seq, flags,
1606                          wtp == NULL ? ~0UL : wtp->state,
1607                          wtp == NULL ? -1 : (int)task_cpu(wtp));
1608                 if (!splatted && wtp) {
1609                         sched_show_task(wtp);
1610                         splatted = true;
1611                 }
1612                 if (cur_ops->gp_kthread_dbg)
1613                         cur_ops->gp_kthread_dbg();
1614                 rcu_ftrace_dump(DUMP_ALL);
1615         }
1616         rtcv_snap = rcu_torture_current_version;
1617 }
1618
1619 /*
1620  * Periodically prints torture statistics, if periodic statistics printing
1621  * was specified via the stat_interval module parameter.
1622  */
1623 static int
1624 rcu_torture_stats(void *arg)
1625 {
1626         VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1627         do {
1628                 schedule_timeout_interruptible(stat_interval * HZ);
1629                 rcu_torture_stats_print();
1630                 torture_shutdown_absorb("rcu_torture_stats");
1631         } while (!torture_must_stop());
1632         torture_kthread_stopping("rcu_torture_stats");
1633         return 0;
1634 }
1635
1636 static void
1637 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1638 {
1639         pr_alert("%s" TORTURE_FLAG
1640                  "--- %s: nreaders=%d nfakewriters=%d "
1641                  "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1642                  "shuffle_interval=%d stutter=%d irqreader=%d "
1643                  "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1644                  "test_boost=%d/%d test_boost_interval=%d "
1645                  "test_boost_duration=%d shutdown_secs=%d "
1646                  "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1647                  "stall_cpu_block=%d "
1648                  "n_barrier_cbs=%d "
1649                  "onoff_interval=%d onoff_holdoff=%d "
1650                  "read_exit_delay=%d read_exit_burst=%d\n",
1651                  torture_type, tag, nrealreaders, nfakewriters,
1652                  stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1653                  stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1654                  test_boost, cur_ops->can_boost,
1655                  test_boost_interval, test_boost_duration, shutdown_secs,
1656                  stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1657                  stall_cpu_block,
1658                  n_barrier_cbs,
1659                  onoff_interval, onoff_holdoff,
1660                  read_exit_delay, read_exit_burst);
1661 }
1662
1663 static int rcutorture_booster_cleanup(unsigned int cpu)
1664 {
1665         struct task_struct *t;
1666
1667         if (boost_tasks[cpu] == NULL)
1668                 return 0;
1669         mutex_lock(&boost_mutex);
1670         t = boost_tasks[cpu];
1671         boost_tasks[cpu] = NULL;
1672         rcu_torture_enable_rt_throttle();
1673         mutex_unlock(&boost_mutex);
1674
1675         /* This must be outside of the mutex, otherwise deadlock! */
1676         torture_stop_kthread(rcu_torture_boost, t);
1677         return 0;
1678 }
1679
1680 static int rcutorture_booster_init(unsigned int cpu)
1681 {
1682         int retval;
1683
1684         if (boost_tasks[cpu] != NULL)
1685                 return 0;  /* Already created, nothing more to do. */
1686
1687         /* Don't allow time recalculation while creating a new task. */
1688         mutex_lock(&boost_mutex);
1689         rcu_torture_disable_rt_throttle();
1690         VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1691         boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1692                                                   cpu_to_node(cpu),
1693                                                   "rcu_torture_boost");
1694         if (IS_ERR(boost_tasks[cpu])) {
1695                 retval = PTR_ERR(boost_tasks[cpu]);
1696                 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1697                 n_rcu_torture_boost_ktrerror++;
1698                 boost_tasks[cpu] = NULL;
1699                 mutex_unlock(&boost_mutex);
1700                 return retval;
1701         }
1702         kthread_bind(boost_tasks[cpu], cpu);
1703         wake_up_process(boost_tasks[cpu]);
1704         mutex_unlock(&boost_mutex);
1705         return 0;
1706 }
1707
1708 /*
1709  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1710  * induces a CPU stall for the time specified by stall_cpu.
1711  */
1712 static int rcu_torture_stall(void *args)
1713 {
1714         int idx;
1715         unsigned long stop_at;
1716
1717         VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1718         if (stall_cpu_holdoff > 0) {
1719                 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1720                 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1721                 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1722         }
1723         if (!kthread_should_stop() && stall_gp_kthread > 0) {
1724                 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1725                 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1726                 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1727                         if (kthread_should_stop())
1728                                 break;
1729                         schedule_timeout_uninterruptible(HZ);
1730                 }
1731         }
1732         if (!kthread_should_stop() && stall_cpu > 0) {
1733                 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1734                 stop_at = ktime_get_seconds() + stall_cpu;
1735                 /* RCU CPU stall is expected behavior in following code. */
1736                 idx = cur_ops->readlock();
1737                 if (stall_cpu_irqsoff)
1738                         local_irq_disable();
1739                 else if (!stall_cpu_block)
1740                         preempt_disable();
1741                 pr_alert("rcu_torture_stall start on CPU %d.\n",
1742                          raw_smp_processor_id());
1743                 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1744                                     stop_at))
1745                         if (stall_cpu_block)
1746                                 schedule_timeout_uninterruptible(HZ);
1747                 if (stall_cpu_irqsoff)
1748                         local_irq_enable();
1749                 else if (!stall_cpu_block)
1750                         preempt_enable();
1751                 cur_ops->readunlock(idx);
1752         }
1753         pr_alert("rcu_torture_stall end.\n");
1754         torture_shutdown_absorb("rcu_torture_stall");
1755         while (!kthread_should_stop())
1756                 schedule_timeout_interruptible(10 * HZ);
1757         return 0;
1758 }
1759
1760 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1761 static int __init rcu_torture_stall_init(void)
1762 {
1763         if (stall_cpu <= 0 && stall_gp_kthread <= 0)
1764                 return 0;
1765         return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1766 }
1767
1768 /* State structure for forward-progress self-propagating RCU callback. */
1769 struct fwd_cb_state {
1770         struct rcu_head rh;
1771         int stop;
1772 };
1773
1774 /*
1775  * Forward-progress self-propagating RCU callback function.  Because
1776  * callbacks run from softirq, this function is an implicit RCU read-side
1777  * critical section.
1778  */
1779 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1780 {
1781         struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1782
1783         if (READ_ONCE(fcsp->stop)) {
1784                 WRITE_ONCE(fcsp->stop, 2);
1785                 return;
1786         }
1787         cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1788 }
1789
1790 /* State for continuous-flood RCU callbacks. */
1791 struct rcu_fwd_cb {
1792         struct rcu_head rh;
1793         struct rcu_fwd_cb *rfc_next;
1794         struct rcu_fwd *rfc_rfp;
1795         int rfc_gps;
1796 };
1797
1798 #define MAX_FWD_CB_JIFFIES      (8 * HZ) /* Maximum CB test duration. */
1799 #define MIN_FWD_CB_LAUNDERS     3       /* This many CB invocations to count. */
1800 #define MIN_FWD_CBS_LAUNDERED   100     /* Number of counted CBs. */
1801 #define FWD_CBS_HIST_DIV        10      /* Histogram buckets/second. */
1802 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
1803
1804 struct rcu_launder_hist {
1805         long n_launders;
1806         unsigned long launder_gp_seq;
1807 };
1808
1809 struct rcu_fwd {
1810         spinlock_t rcu_fwd_lock;
1811         struct rcu_fwd_cb *rcu_fwd_cb_head;
1812         struct rcu_fwd_cb **rcu_fwd_cb_tail;
1813         long n_launders_cb;
1814         unsigned long rcu_fwd_startat;
1815         struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
1816         unsigned long rcu_launder_gp_seq_start;
1817 };
1818
1819 static DEFINE_MUTEX(rcu_fwd_mutex);
1820 static struct rcu_fwd *rcu_fwds;
1821 static bool rcu_fwd_emergency_stop;
1822
1823 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
1824 {
1825         unsigned long gps;
1826         unsigned long gps_old;
1827         int i;
1828         int j;
1829
1830         for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
1831                 if (rfp->n_launders_hist[i].n_launders > 0)
1832                         break;
1833         pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
1834                  __func__, jiffies - rfp->rcu_fwd_startat);
1835         gps_old = rfp->rcu_launder_gp_seq_start;
1836         for (j = 0; j <= i; j++) {
1837                 gps = rfp->n_launders_hist[j].launder_gp_seq;
1838                 pr_cont(" %ds/%d: %ld:%ld",
1839                         j + 1, FWD_CBS_HIST_DIV,
1840                         rfp->n_launders_hist[j].n_launders,
1841                         rcutorture_seq_diff(gps, gps_old));
1842                 gps_old = gps;
1843         }
1844         pr_cont("\n");
1845 }
1846
1847 /* Callback function for continuous-flood RCU callbacks. */
1848 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
1849 {
1850         unsigned long flags;
1851         int i;
1852         struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
1853         struct rcu_fwd_cb **rfcpp;
1854         struct rcu_fwd *rfp = rfcp->rfc_rfp;
1855
1856         rfcp->rfc_next = NULL;
1857         rfcp->rfc_gps++;
1858         spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1859         rfcpp = rfp->rcu_fwd_cb_tail;
1860         rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
1861         WRITE_ONCE(*rfcpp, rfcp);
1862         WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
1863         i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
1864         if (i >= ARRAY_SIZE(rfp->n_launders_hist))
1865                 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
1866         rfp->n_launders_hist[i].n_launders++;
1867         rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
1868         spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1869 }
1870
1871 // Give the scheduler a chance, even on nohz_full CPUs.
1872 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
1873 {
1874         if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
1875                 // Real call_rcu() floods hit userspace, so emulate that.
1876                 if (need_resched() || (iter & 0xfff))
1877                         schedule();
1878                 return;
1879         }
1880         // No userspace emulation: CB invocation throttles call_rcu()
1881         cond_resched();
1882 }
1883
1884 /*
1885  * Free all callbacks on the rcu_fwd_cb_head list, either because the
1886  * test is over or because we hit an OOM event.
1887  */
1888 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
1889 {
1890         unsigned long flags;
1891         unsigned long freed = 0;
1892         struct rcu_fwd_cb *rfcp;
1893
1894         for (;;) {
1895                 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
1896                 rfcp = rfp->rcu_fwd_cb_head;
1897                 if (!rfcp) {
1898                         spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1899                         break;
1900                 }
1901                 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
1902                 if (!rfp->rcu_fwd_cb_head)
1903                         rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
1904                 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
1905                 kfree(rfcp);
1906                 freed++;
1907                 rcu_torture_fwd_prog_cond_resched(freed);
1908                 if (tick_nohz_full_enabled()) {
1909                         local_irq_save(flags);
1910                         rcu_momentary_dyntick_idle();
1911                         local_irq_restore(flags);
1912                 }
1913         }
1914         return freed;
1915 }
1916
1917 /* Carry out need_resched()/cond_resched() forward-progress testing. */
1918 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
1919                                     int *tested, int *tested_tries)
1920 {
1921         unsigned long cver;
1922         unsigned long dur;
1923         struct fwd_cb_state fcs;
1924         unsigned long gps;
1925         int idx;
1926         int sd;
1927         int sd4;
1928         bool selfpropcb = false;
1929         unsigned long stopat;
1930         static DEFINE_TORTURE_RANDOM(trs);
1931
1932         if (!cur_ops->sync)
1933                 return; // Cannot do need_resched() forward progress testing without ->sync.
1934         if (cur_ops->call && cur_ops->cb_barrier) {
1935                 init_rcu_head_on_stack(&fcs.rh);
1936                 selfpropcb = true;
1937         }
1938
1939         /* Tight loop containing cond_resched(). */
1940         WRITE_ONCE(rcu_fwd_cb_nodelay, true);
1941         cur_ops->sync(); /* Later readers see above write. */
1942         if  (selfpropcb) {
1943                 WRITE_ONCE(fcs.stop, 0);
1944                 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1945         }
1946         cver = READ_ONCE(rcu_torture_current_version);
1947         gps = cur_ops->get_gp_seq();
1948         sd = cur_ops->stall_dur() + 1;
1949         sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1950         dur = sd4 + torture_random(&trs) % (sd - sd4);
1951         WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
1952         stopat = rfp->rcu_fwd_startat + dur;
1953         while (time_before(jiffies, stopat) &&
1954                !shutdown_time_arrived() &&
1955                !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1956                 idx = cur_ops->readlock();
1957                 udelay(10);
1958                 cur_ops->readunlock(idx);
1959                 if (!fwd_progress_need_resched || need_resched())
1960                         cond_resched();
1961         }
1962         (*tested_tries)++;
1963         if (!time_before(jiffies, stopat) &&
1964             !shutdown_time_arrived() &&
1965             !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
1966                 (*tested)++;
1967                 cver = READ_ONCE(rcu_torture_current_version) - cver;
1968                 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1969                 WARN_ON(!cver && gps < 2);
1970                 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1971         }
1972         if (selfpropcb) {
1973                 WRITE_ONCE(fcs.stop, 1);
1974                 cur_ops->sync(); /* Wait for running CB to complete. */
1975                 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
1976         }
1977
1978         if (selfpropcb) {
1979                 WARN_ON(READ_ONCE(fcs.stop) != 2);
1980                 destroy_rcu_head_on_stack(&fcs.rh);
1981         }
1982         schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
1983         WRITE_ONCE(rcu_fwd_cb_nodelay, false);
1984 }
1985
1986 /* Carry out call_rcu() forward-progress testing. */
1987 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
1988 {
1989         unsigned long cver;
1990         unsigned long flags;
1991         unsigned long gps;
1992         int i;
1993         long n_launders;
1994         long n_launders_cb_snap;
1995         long n_launders_sa;
1996         long n_max_cbs;
1997         long n_max_gps;
1998         struct rcu_fwd_cb *rfcp;
1999         struct rcu_fwd_cb *rfcpn;
2000         unsigned long stopat;
2001         unsigned long stoppedat;
2002
2003         if (READ_ONCE(rcu_fwd_emergency_stop))
2004                 return; /* Get out of the way quickly, no GP wait! */
2005         if (!cur_ops->call)
2006                 return; /* Can't do call_rcu() fwd prog without ->call. */
2007
2008         /* Loop continuously posting RCU callbacks. */
2009         WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2010         cur_ops->sync(); /* Later readers see above write. */
2011         WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2012         stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2013         n_launders = 0;
2014         rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2015         n_launders_sa = 0;
2016         n_max_cbs = 0;
2017         n_max_gps = 0;
2018         for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2019                 rfp->n_launders_hist[i].n_launders = 0;
2020         cver = READ_ONCE(rcu_torture_current_version);
2021         gps = cur_ops->get_gp_seq();
2022         rfp->rcu_launder_gp_seq_start = gps;
2023         tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2024         while (time_before(jiffies, stopat) &&
2025                !shutdown_time_arrived() &&
2026                !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2027                 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2028                 rfcpn = NULL;
2029                 if (rfcp)
2030                         rfcpn = READ_ONCE(rfcp->rfc_next);
2031                 if (rfcpn) {
2032                         if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2033                             ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2034                                 break;
2035                         rfp->rcu_fwd_cb_head = rfcpn;
2036                         n_launders++;
2037                         n_launders_sa++;
2038                 } else {
2039                         rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2040                         if (WARN_ON_ONCE(!rfcp)) {
2041                                 schedule_timeout_interruptible(1);
2042                                 continue;
2043                         }
2044                         n_max_cbs++;
2045                         n_launders_sa = 0;
2046                         rfcp->rfc_gps = 0;
2047                         rfcp->rfc_rfp = rfp;
2048                 }
2049                 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2050                 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2051                 if (tick_nohz_full_enabled()) {
2052                         local_irq_save(flags);
2053                         rcu_momentary_dyntick_idle();
2054                         local_irq_restore(flags);
2055                 }
2056         }
2057         stoppedat = jiffies;
2058         n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2059         cver = READ_ONCE(rcu_torture_current_version) - cver;
2060         gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2061         cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2062         (void)rcu_torture_fwd_prog_cbfree(rfp);
2063
2064         if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2065             !shutdown_time_arrived()) {
2066                 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2067                 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2068                          __func__,
2069                          stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2070                          n_launders + n_max_cbs - n_launders_cb_snap,
2071                          n_launders, n_launders_sa,
2072                          n_max_gps, n_max_cbs, cver, gps);
2073                 rcu_torture_fwd_cb_hist(rfp);
2074         }
2075         schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2076         tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2077         WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2078 }
2079
2080
2081 /*
2082  * OOM notifier, but this only prints diagnostic information for the
2083  * current forward-progress test.
2084  */
2085 static int rcutorture_oom_notify(struct notifier_block *self,
2086                                  unsigned long notused, void *nfreed)
2087 {
2088         struct rcu_fwd *rfp;
2089
2090         mutex_lock(&rcu_fwd_mutex);
2091         rfp = rcu_fwds;
2092         if (!rfp) {
2093                 mutex_unlock(&rcu_fwd_mutex);
2094                 return NOTIFY_OK;
2095         }
2096         WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2097              __func__);
2098         rcu_torture_fwd_cb_hist(rfp);
2099         rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2100         WRITE_ONCE(rcu_fwd_emergency_stop, true);
2101         smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2102         pr_info("%s: Freed %lu RCU callbacks.\n",
2103                 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2104         rcu_barrier();
2105         pr_info("%s: Freed %lu RCU callbacks.\n",
2106                 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2107         rcu_barrier();
2108         pr_info("%s: Freed %lu RCU callbacks.\n",
2109                 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2110         smp_mb(); /* Frees before return to avoid redoing OOM. */
2111         (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2112         pr_info("%s returning after OOM processing.\n", __func__);
2113         mutex_unlock(&rcu_fwd_mutex);
2114         return NOTIFY_OK;
2115 }
2116
2117 static struct notifier_block rcutorture_oom_nb = {
2118         .notifier_call = rcutorture_oom_notify
2119 };
2120
2121 /* Carry out grace-period forward-progress testing. */
2122 static int rcu_torture_fwd_prog(void *args)
2123 {
2124         int oldnice = task_nice(current);
2125         struct rcu_fwd *rfp = args;
2126         int tested = 0;
2127         int tested_tries = 0;
2128
2129         VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2130         rcu_bind_current_to_nocb();
2131         if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2132                 set_user_nice(current, MAX_NICE);
2133         do {
2134                 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2135                 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2136                 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2137                     rcu_inkernel_boot_has_ended())
2138                         rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2139                 if (rcu_inkernel_boot_has_ended())
2140                         rcu_torture_fwd_prog_cr(rfp);
2141
2142                 /* Avoid slow periods, better to test when busy. */
2143                 if (stutter_wait("rcu_torture_fwd_prog"))
2144                         sched_set_normal(current, oldnice);
2145         } while (!torture_must_stop());
2146         /* Short runs might not contain a valid forward-progress attempt. */
2147         WARN_ON(!tested && tested_tries >= 5);
2148         pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2149         torture_kthread_stopping("rcu_torture_fwd_prog");
2150         return 0;
2151 }
2152
2153 /* If forward-progress checking is requested and feasible, spawn the thread. */
2154 static int __init rcu_torture_fwd_prog_init(void)
2155 {
2156         struct rcu_fwd *rfp;
2157
2158         if (!fwd_progress)
2159                 return 0; /* Not requested, so don't do it. */
2160         if ((!cur_ops->sync && !cur_ops->call) ||
2161             !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2162                 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2163                 return 0;
2164         }
2165         if (stall_cpu > 0) {
2166                 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2167                 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2168                         return -EINVAL; /* In module, can fail back to user. */
2169                 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2170                 return 0;
2171         }
2172         if (fwd_progress_holdoff <= 0)
2173                 fwd_progress_holdoff = 1;
2174         if (fwd_progress_div <= 0)
2175                 fwd_progress_div = 4;
2176         rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2177         if (!rfp)
2178                 return -ENOMEM;
2179         spin_lock_init(&rfp->rcu_fwd_lock);
2180         rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2181         mutex_lock(&rcu_fwd_mutex);
2182         rcu_fwds = rfp;
2183         mutex_unlock(&rcu_fwd_mutex);
2184         register_oom_notifier(&rcutorture_oom_nb);
2185         return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2186 }
2187
2188 static void rcu_torture_fwd_prog_cleanup(void)
2189 {
2190         struct rcu_fwd *rfp;
2191
2192         torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2193         rfp = rcu_fwds;
2194         mutex_lock(&rcu_fwd_mutex);
2195         rcu_fwds = NULL;
2196         mutex_unlock(&rcu_fwd_mutex);
2197         unregister_oom_notifier(&rcutorture_oom_nb);
2198         kfree(rfp);
2199 }
2200
2201 /* Callback function for RCU barrier testing. */
2202 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2203 {
2204         atomic_inc(&barrier_cbs_invoked);
2205 }
2206
2207 /* IPI handler to get callback posted on desired CPU, if online. */
2208 static void rcu_torture_barrier1cb(void *rcu_void)
2209 {
2210         struct rcu_head *rhp = rcu_void;
2211
2212         cur_ops->call(rhp, rcu_torture_barrier_cbf);
2213 }
2214
2215 /* kthread function to register callbacks used to test RCU barriers. */
2216 static int rcu_torture_barrier_cbs(void *arg)
2217 {
2218         long myid = (long)arg;
2219         bool lastphase = false;
2220         bool newphase;
2221         struct rcu_head rcu;
2222
2223         init_rcu_head_on_stack(&rcu);
2224         VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2225         set_user_nice(current, MAX_NICE);
2226         do {
2227                 wait_event(barrier_cbs_wq[myid],
2228                            (newphase =
2229                             smp_load_acquire(&barrier_phase)) != lastphase ||
2230                            torture_must_stop());
2231                 lastphase = newphase;
2232                 if (torture_must_stop())
2233                         break;
2234                 /*
2235                  * The above smp_load_acquire() ensures barrier_phase load
2236                  * is ordered before the following ->call().
2237                  */
2238                 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2239                                              &rcu, 1)) {
2240                         // IPI failed, so use direct call from current CPU.
2241                         cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2242                 }
2243                 if (atomic_dec_and_test(&barrier_cbs_count))
2244                         wake_up(&barrier_wq);
2245         } while (!torture_must_stop());
2246         if (cur_ops->cb_barrier != NULL)
2247                 cur_ops->cb_barrier();
2248         destroy_rcu_head_on_stack(&rcu);
2249         torture_kthread_stopping("rcu_torture_barrier_cbs");
2250         return 0;
2251 }
2252
2253 /* kthread function to drive and coordinate RCU barrier testing. */
2254 static int rcu_torture_barrier(void *arg)
2255 {
2256         int i;
2257
2258         VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2259         do {
2260                 atomic_set(&barrier_cbs_invoked, 0);
2261                 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2262                 /* Ensure barrier_phase ordered after prior assignments. */
2263                 smp_store_release(&barrier_phase, !barrier_phase);
2264                 for (i = 0; i < n_barrier_cbs; i++)
2265                         wake_up(&barrier_cbs_wq[i]);
2266                 wait_event(barrier_wq,
2267                            atomic_read(&barrier_cbs_count) == 0 ||
2268                            torture_must_stop());
2269                 if (torture_must_stop())
2270                         break;
2271                 n_barrier_attempts++;
2272                 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2273                 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2274                         n_rcu_torture_barrier_error++;
2275                         pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2276                                atomic_read(&barrier_cbs_invoked),
2277                                n_barrier_cbs);
2278                         WARN_ON(1);
2279                         // Wait manually for the remaining callbacks
2280                         i = 0;
2281                         do {
2282                                 if (WARN_ON(i++ > HZ))
2283                                         i = INT_MIN;
2284                                 schedule_timeout_interruptible(1);
2285                                 cur_ops->cb_barrier();
2286                         } while (atomic_read(&barrier_cbs_invoked) !=
2287                                  n_barrier_cbs &&
2288                                  !torture_must_stop());
2289                         smp_mb(); // Can't trust ordering if broken.
2290                         if (!torture_must_stop())
2291                                 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2292                                        atomic_read(&barrier_cbs_invoked));
2293                 } else {
2294                         n_barrier_successes++;
2295                 }
2296                 schedule_timeout_interruptible(HZ / 10);
2297         } while (!torture_must_stop());
2298         torture_kthread_stopping("rcu_torture_barrier");
2299         return 0;
2300 }
2301
2302 /* Initialize RCU barrier testing. */
2303 static int rcu_torture_barrier_init(void)
2304 {
2305         int i;
2306         int ret;
2307
2308         if (n_barrier_cbs <= 0)
2309                 return 0;
2310         if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2311                 pr_alert("%s" TORTURE_FLAG
2312                          " Call or barrier ops missing for %s,\n",
2313                          torture_type, cur_ops->name);
2314                 pr_alert("%s" TORTURE_FLAG
2315                          " RCU barrier testing omitted from run.\n",
2316                          torture_type);
2317                 return 0;
2318         }
2319         atomic_set(&barrier_cbs_count, 0);
2320         atomic_set(&barrier_cbs_invoked, 0);
2321         barrier_cbs_tasks =
2322                 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2323                         GFP_KERNEL);
2324         barrier_cbs_wq =
2325                 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2326         if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2327                 return -ENOMEM;
2328         for (i = 0; i < n_barrier_cbs; i++) {
2329                 init_waitqueue_head(&barrier_cbs_wq[i]);
2330                 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2331                                              (void *)(long)i,
2332                                              barrier_cbs_tasks[i]);
2333                 if (ret)
2334                         return ret;
2335         }
2336         return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2337 }
2338
2339 /* Clean up after RCU barrier testing. */
2340 static void rcu_torture_barrier_cleanup(void)
2341 {
2342         int i;
2343
2344         torture_stop_kthread(rcu_torture_barrier, barrier_task);
2345         if (barrier_cbs_tasks != NULL) {
2346                 for (i = 0; i < n_barrier_cbs; i++)
2347                         torture_stop_kthread(rcu_torture_barrier_cbs,
2348                                              barrier_cbs_tasks[i]);
2349                 kfree(barrier_cbs_tasks);
2350                 barrier_cbs_tasks = NULL;
2351         }
2352         if (barrier_cbs_wq != NULL) {
2353                 kfree(barrier_cbs_wq);
2354                 barrier_cbs_wq = NULL;
2355         }
2356 }
2357
2358 static bool rcu_torture_can_boost(void)
2359 {
2360         static int boost_warn_once;
2361         int prio;
2362
2363         if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2364                 return false;
2365
2366         prio = rcu_get_gp_kthreads_prio();
2367         if (!prio)
2368                 return false;
2369
2370         if (prio < 2) {
2371                 if (boost_warn_once  == 1)
2372                         return false;
2373
2374                 pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2375                 boost_warn_once = 1;
2376                 return false;
2377         }
2378
2379         return true;
2380 }
2381
2382 static bool read_exit_child_stop;
2383 static bool read_exit_child_stopped;
2384 static wait_queue_head_t read_exit_wq;
2385
2386 // Child kthread which just does an rcutorture reader and exits.
2387 static int rcu_torture_read_exit_child(void *trsp_in)
2388 {
2389         struct torture_random_state *trsp = trsp_in;
2390
2391         set_user_nice(current, MAX_NICE);
2392         // Minimize time between reading and exiting.
2393         while (!kthread_should_stop())
2394                 schedule_timeout_uninterruptible(1);
2395         (void)rcu_torture_one_read(trsp);
2396         return 0;
2397 }
2398
2399 // Parent kthread which creates and destroys read-exit child kthreads.
2400 static int rcu_torture_read_exit(void *unused)
2401 {
2402         int count = 0;
2403         bool errexit = false;
2404         int i;
2405         struct task_struct *tsp;
2406         DEFINE_TORTURE_RANDOM(trs);
2407
2408         // Allocate and initialize.
2409         set_user_nice(current, MAX_NICE);
2410         VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2411
2412         // Each pass through this loop does one read-exit episode.
2413         do {
2414                 if (++count > read_exit_burst) {
2415                         VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2416                         rcu_barrier(); // Wait for task_struct free, avoid OOM.
2417                         for (i = 0; i < read_exit_delay; i++) {
2418                                 schedule_timeout_uninterruptible(HZ);
2419                                 if (READ_ONCE(read_exit_child_stop))
2420                                         break;
2421                         }
2422                         if (!READ_ONCE(read_exit_child_stop))
2423                                 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2424                         count = 0;
2425                 }
2426                 if (READ_ONCE(read_exit_child_stop))
2427                         break;
2428                 // Spawn child.
2429                 tsp = kthread_run(rcu_torture_read_exit_child,
2430                                      &trs, "%s",
2431                                      "rcu_torture_read_exit_child");
2432                 if (IS_ERR(tsp)) {
2433                         VERBOSE_TOROUT_ERRSTRING("out of memory");
2434                         errexit = true;
2435                         tsp = NULL;
2436                         break;
2437                 }
2438                 cond_resched();
2439                 kthread_stop(tsp);
2440                 n_read_exits ++;
2441                 stutter_wait("rcu_torture_read_exit");
2442         } while (!errexit && !READ_ONCE(read_exit_child_stop));
2443
2444         // Clean up and exit.
2445         smp_store_release(&read_exit_child_stopped, true); // After reaping.
2446         smp_mb(); // Store before wakeup.
2447         wake_up(&read_exit_wq);
2448         while (!torture_must_stop())
2449                 schedule_timeout_uninterruptible(1);
2450         torture_kthread_stopping("rcu_torture_read_exit");
2451         return 0;
2452 }
2453
2454 static int rcu_torture_read_exit_init(void)
2455 {
2456         if (read_exit_burst <= 0)
2457                 return -EINVAL;
2458         init_waitqueue_head(&read_exit_wq);
2459         read_exit_child_stop = false;
2460         read_exit_child_stopped = false;
2461         return torture_create_kthread(rcu_torture_read_exit, NULL,
2462                                       read_exit_task);
2463 }
2464
2465 static void rcu_torture_read_exit_cleanup(void)
2466 {
2467         if (!read_exit_task)
2468                 return;
2469         WRITE_ONCE(read_exit_child_stop, true);
2470         smp_mb(); // Above write before wait.
2471         wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2472         torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2473 }
2474
2475 static enum cpuhp_state rcutor_hp;
2476
2477 static void
2478 rcu_torture_cleanup(void)
2479 {
2480         int firsttime;
2481         int flags = 0;
2482         unsigned long gp_seq = 0;
2483         int i;
2484
2485         if (torture_cleanup_begin()) {
2486                 if (cur_ops->cb_barrier != NULL)
2487                         cur_ops->cb_barrier();
2488                 return;
2489         }
2490         if (!cur_ops) {
2491                 torture_cleanup_end();
2492                 return;
2493         }
2494
2495         if (cur_ops->gp_kthread_dbg)
2496                 cur_ops->gp_kthread_dbg();
2497         rcu_torture_read_exit_cleanup();
2498         rcu_torture_barrier_cleanup();
2499         rcu_torture_fwd_prog_cleanup();
2500         torture_stop_kthread(rcu_torture_stall, stall_task);
2501         torture_stop_kthread(rcu_torture_writer, writer_task);
2502
2503         if (reader_tasks) {
2504                 for (i = 0; i < nrealreaders; i++)
2505                         torture_stop_kthread(rcu_torture_reader,
2506                                              reader_tasks[i]);
2507                 kfree(reader_tasks);
2508                 reader_tasks = NULL;
2509         }
2510
2511         if (fakewriter_tasks) {
2512                 for (i = 0; i < nfakewriters; i++)
2513                         torture_stop_kthread(rcu_torture_fakewriter,
2514                                              fakewriter_tasks[i]);
2515                 kfree(fakewriter_tasks);
2516                 fakewriter_tasks = NULL;
2517         }
2518
2519         rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2520         srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2521         pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2522                  cur_ops->name, (long)gp_seq, flags,
2523                  rcutorture_seq_diff(gp_seq, start_gp_seq));
2524         torture_stop_kthread(rcu_torture_stats, stats_task);
2525         torture_stop_kthread(rcu_torture_fqs, fqs_task);
2526         if (rcu_torture_can_boost())
2527                 cpuhp_remove_state(rcutor_hp);
2528
2529         /*
2530          * Wait for all RCU callbacks to fire, then do torture-type-specific
2531          * cleanup operations.
2532          */
2533         if (cur_ops->cb_barrier != NULL)
2534                 cur_ops->cb_barrier();
2535         if (cur_ops->cleanup != NULL)
2536                 cur_ops->cleanup();
2537
2538         rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2539
2540         if (err_segs_recorded) {
2541                 pr_alert("Failure/close-call rcutorture reader segments:\n");
2542                 if (rt_read_nsegs == 0)
2543                         pr_alert("\t: No segments recorded!!!\n");
2544                 firsttime = 1;
2545                 for (i = 0; i < rt_read_nsegs; i++) {
2546                         pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2547                         if (err_segs[i].rt_delay_jiffies != 0) {
2548                                 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2549                                         err_segs[i].rt_delay_jiffies);
2550                                 firsttime = 0;
2551                         }
2552                         if (err_segs[i].rt_delay_ms != 0) {
2553                                 pr_cont("%s%ldms", firsttime ? "" : "+",
2554                                         err_segs[i].rt_delay_ms);
2555                                 firsttime = 0;
2556                         }
2557                         if (err_segs[i].rt_delay_us != 0) {
2558                                 pr_cont("%s%ldus", firsttime ? "" : "+",
2559                                         err_segs[i].rt_delay_us);
2560                                 firsttime = 0;
2561                         }
2562                         pr_cont("%s\n",
2563                                 err_segs[i].rt_preempted ? "preempted" : "");
2564
2565                 }
2566         }
2567         if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2568                 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2569         else if (torture_onoff_failures())
2570                 rcu_torture_print_module_parms(cur_ops,
2571                                                "End of test: RCU_HOTPLUG");
2572         else
2573                 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2574         torture_cleanup_end();
2575 }
2576
2577 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2578 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2579 {
2580 }
2581
2582 static void rcu_torture_err_cb(struct rcu_head *rhp)
2583 {
2584         /*
2585          * This -might- happen due to race conditions, but is unlikely.
2586          * The scenario that leads to this happening is that the
2587          * first of the pair of duplicate callbacks is queued,
2588          * someone else starts a grace period that includes that
2589          * callback, then the second of the pair must wait for the
2590          * next grace period.  Unlikely, but can happen.  If it
2591          * does happen, the debug-objects subsystem won't have splatted.
2592          */
2593         pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2594 }
2595 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2596
2597 /*
2598  * Verify that double-free causes debug-objects to complain, but only
2599  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
2600  * cannot be carried out.
2601  */
2602 static void rcu_test_debug_objects(void)
2603 {
2604 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2605         struct rcu_head rh1;
2606         struct rcu_head rh2;
2607
2608         init_rcu_head_on_stack(&rh1);
2609         init_rcu_head_on_stack(&rh2);
2610         pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2611
2612         /* Try to queue the rh2 pair of callbacks for the same grace period. */
2613         preempt_disable(); /* Prevent preemption from interrupting test. */
2614         rcu_read_lock(); /* Make it impossible to finish a grace period. */
2615         call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2616         local_irq_disable(); /* Make it harder to start a new grace period. */
2617         call_rcu(&rh2, rcu_torture_leak_cb);
2618         call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2619         local_irq_enable();
2620         rcu_read_unlock();
2621         preempt_enable();
2622
2623         /* Wait for them all to get done so we can safely return. */
2624         rcu_barrier();
2625         pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2626         destroy_rcu_head_on_stack(&rh1);
2627         destroy_rcu_head_on_stack(&rh2);
2628 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2629         pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2630 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2631 }
2632
2633 static void rcutorture_sync(void)
2634 {
2635         static unsigned long n;
2636
2637         if (cur_ops->sync && !(++n & 0xfff))
2638                 cur_ops->sync();
2639 }
2640
2641 static int __init
2642 rcu_torture_init(void)
2643 {
2644         long i;
2645         int cpu;
2646         int firsterr = 0;
2647         int flags = 0;
2648         unsigned long gp_seq = 0;
2649         static struct rcu_torture_ops *torture_ops[] = {
2650                 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2651                 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2652                 &tasks_tracing_ops, &trivial_ops,
2653         };
2654
2655         if (!torture_init_begin(torture_type, verbose))
2656                 return -EBUSY;
2657
2658         /* Process args and tell the world that the torturer is on the job. */
2659         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2660                 cur_ops = torture_ops[i];
2661                 if (strcmp(torture_type, cur_ops->name) == 0)
2662                         break;
2663         }
2664         if (i == ARRAY_SIZE(torture_ops)) {
2665                 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2666                          torture_type);
2667                 pr_alert("rcu-torture types:");
2668                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2669                         pr_cont(" %s", torture_ops[i]->name);
2670                 pr_cont("\n");
2671                 firsterr = -EINVAL;
2672                 cur_ops = NULL;
2673                 goto unwind;
2674         }
2675         if (cur_ops->fqs == NULL && fqs_duration != 0) {
2676                 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2677                 fqs_duration = 0;
2678         }
2679         if (cur_ops->init)
2680                 cur_ops->init();
2681
2682         if (nreaders >= 0) {
2683                 nrealreaders = nreaders;
2684         } else {
2685                 nrealreaders = num_online_cpus() - 2 - nreaders;
2686                 if (nrealreaders <= 0)
2687                         nrealreaders = 1;
2688         }
2689         rcu_torture_print_module_parms(cur_ops, "Start of test");
2690         rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2691         srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2692         start_gp_seq = gp_seq;
2693         pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
2694                  cur_ops->name, (long)gp_seq, flags);
2695
2696         /* Set up the freelist. */
2697
2698         INIT_LIST_HEAD(&rcu_torture_freelist);
2699         for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2700                 rcu_tortures[i].rtort_mbtest = 0;
2701                 list_add_tail(&rcu_tortures[i].rtort_free,
2702                               &rcu_torture_freelist);
2703         }
2704
2705         /* Initialize the statistics so that each run gets its own numbers. */
2706
2707         rcu_torture_current = NULL;
2708         rcu_torture_current_version = 0;
2709         atomic_set(&n_rcu_torture_alloc, 0);
2710         atomic_set(&n_rcu_torture_alloc_fail, 0);
2711         atomic_set(&n_rcu_torture_free, 0);
2712         atomic_set(&n_rcu_torture_mberror, 0);
2713         atomic_set(&n_rcu_torture_error, 0);
2714         n_rcu_torture_barrier_error = 0;
2715         n_rcu_torture_boost_ktrerror = 0;
2716         n_rcu_torture_boost_rterror = 0;
2717         n_rcu_torture_boost_failure = 0;
2718         n_rcu_torture_boosts = 0;
2719         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2720                 atomic_set(&rcu_torture_wcount[i], 0);
2721         for_each_possible_cpu(cpu) {
2722                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2723                         per_cpu(rcu_torture_count, cpu)[i] = 0;
2724                         per_cpu(rcu_torture_batch, cpu)[i] = 0;
2725                 }
2726         }
2727         err_segs_recorded = 0;
2728         rt_read_nsegs = 0;
2729
2730         /* Start up the kthreads. */
2731
2732         firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2733                                           writer_task);
2734         if (firsterr)
2735                 goto unwind;
2736         if (nfakewriters > 0) {
2737                 fakewriter_tasks = kcalloc(nfakewriters,
2738                                            sizeof(fakewriter_tasks[0]),
2739                                            GFP_KERNEL);
2740                 if (fakewriter_tasks == NULL) {
2741                         VERBOSE_TOROUT_ERRSTRING("out of memory");
2742                         firsterr = -ENOMEM;
2743                         goto unwind;
2744                 }
2745         }
2746         for (i = 0; i < nfakewriters; i++) {
2747                 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2748                                                   NULL, fakewriter_tasks[i]);
2749                 if (firsterr)
2750                         goto unwind;
2751         }
2752         reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
2753                                GFP_KERNEL);
2754         if (reader_tasks == NULL) {
2755                 VERBOSE_TOROUT_ERRSTRING("out of memory");
2756                 firsterr = -ENOMEM;
2757                 goto unwind;
2758         }
2759         for (i = 0; i < nrealreaders; i++) {
2760                 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
2761                                                   reader_tasks[i]);
2762                 if (firsterr)
2763                         goto unwind;
2764         }
2765         if (stat_interval > 0) {
2766                 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2767                                                   stats_task);
2768                 if (firsterr)
2769                         goto unwind;
2770         }
2771         if (test_no_idle_hz && shuffle_interval > 0) {
2772                 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2773                 if (firsterr)
2774                         goto unwind;
2775         }
2776         if (stutter < 0)
2777                 stutter = 0;
2778         if (stutter) {
2779                 int t;
2780
2781                 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
2782                 firsterr = torture_stutter_init(stutter * HZ, t);
2783                 if (firsterr)
2784                         goto unwind;
2785         }
2786         if (fqs_duration < 0)
2787                 fqs_duration = 0;
2788         if (fqs_duration) {
2789                 /* Create the fqs thread */
2790                 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2791                                                   fqs_task);
2792                 if (firsterr)
2793                         goto unwind;
2794         }
2795         if (test_boost_interval < 1)
2796                 test_boost_interval = 1;
2797         if (test_boost_duration < 2)
2798                 test_boost_duration = 2;
2799         if (rcu_torture_can_boost()) {
2800
2801                 boost_starttime = jiffies + test_boost_interval * HZ;
2802
2803                 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2804                                              rcutorture_booster_init,
2805                                              rcutorture_booster_cleanup);
2806                 if (firsterr < 0)
2807                         goto unwind;
2808                 rcutor_hp = firsterr;
2809         }
2810         shutdown_jiffies = jiffies + shutdown_secs * HZ;
2811         firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2812         if (firsterr)
2813                 goto unwind;
2814         firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
2815                                       rcutorture_sync);
2816         if (firsterr)
2817                 goto unwind;
2818         firsterr = rcu_torture_stall_init();
2819         if (firsterr)
2820                 goto unwind;
2821         firsterr = rcu_torture_fwd_prog_init();
2822         if (firsterr)
2823                 goto unwind;
2824         firsterr = rcu_torture_barrier_init();
2825         if (firsterr)
2826                 goto unwind;
2827         firsterr = rcu_torture_read_exit_init();
2828         if (firsterr)
2829                 goto unwind;
2830         if (object_debug)
2831                 rcu_test_debug_objects();
2832         torture_init_end();
2833         return 0;
2834
2835 unwind:
2836         torture_init_end();
2837         rcu_torture_cleanup();
2838         if (shutdown_secs) {
2839                 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
2840                 kernel_power_off();
2841         }
2842         return firsterr;
2843 }
2844
2845 module_init(rcu_torture_init);
2846 module_exit(rcu_torture_cleanup);
This page took 0.201931 seconds and 4 git commands to generate.