]>
Commit | Line | Data |
---|---|---|
8bf05ed3 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
8704baab | 2 | /* |
4e88ec4a | 3 | * Read-Copy Update module-based scalability-test facility |
8704baab | 4 | * |
8704baab PM |
5 | * Copyright (C) IBM Corporation, 2015 |
6 | * | |
8bf05ed3 | 7 | * Authors: Paul E. McKenney <[email protected]> |
8704baab | 8 | */ |
60500037 PM |
9 | |
10 | #define pr_fmt(fmt) fmt | |
11 | ||
8704baab PM |
12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> | |
14 | #include <linux/init.h> | |
12af6603 | 15 | #include <linux/mm.h> |
8704baab PM |
16 | #include <linux/module.h> |
17 | #include <linux/kthread.h> | |
18 | #include <linux/err.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/rcupdate.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/sched.h> | |
ae7e81c0 | 24 | #include <uapi/linux/sched/types.h> |
8704baab PM |
25 | #include <linux/atomic.h> |
26 | #include <linux/bitops.h> | |
27 | #include <linux/completion.h> | |
28 | #include <linux/moduleparam.h> | |
29 | #include <linux/percpu.h> | |
30 | #include <linux/notifier.h> | |
31 | #include <linux/reboot.h> | |
32 | #include <linux/freezer.h> | |
33 | #include <linux/cpu.h> | |
34 | #include <linux/delay.h> | |
35 | #include <linux/stat.h> | |
36 | #include <linux/srcu.h> | |
37 | #include <linux/slab.h> | |
38 | #include <asm/byteorder.h> | |
39 | #include <linux/torture.h> | |
40 | #include <linux/vmalloc.h> | |
899f317e | 41 | #include <linux/rcupdate_trace.h> |
8704baab | 42 | |
25c36329 PM |
43 | #include "rcu.h" |
44 | ||
8704baab | 45 | MODULE_LICENSE("GPL"); |
8bf05ed3 | 46 | MODULE_AUTHOR("Paul E. McKenney <[email protected]>"); |
8704baab | 47 | |
4e88ec4a PM |
48 | #define SCALE_FLAG "-scale:" |
49 | #define SCALEOUT_STRING(s) \ | |
50 | pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s) | |
51 | #define VERBOSE_SCALEOUT_STRING(s) \ | |
52 | do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0) | |
53 | #define VERBOSE_SCALEOUT_ERRSTRING(s) \ | |
54 | do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0) | |
8704baab | 55 | |
85ba6bfe PM |
56 | /* |
57 | * The intended use cases for the nreaders and nwriters module parameters | |
58 | * are as follows: | |
59 | * | |
60 | * 1. Specify only the nr_cpus kernel boot parameter. This will | |
61 | * set both nreaders and nwriters to the value specified by | |
62 | * nr_cpus for a mixed reader/writer test. | |
63 | * | |
64 | * 2. Specify the nr_cpus kernel boot parameter, but set | |
4e88ec4a | 65 | * rcuscale.nreaders to zero. This will set nwriters to the |
85ba6bfe PM |
66 | * value specified by nr_cpus for an update-only test. |
67 | * | |
68 | * 3. Specify the nr_cpus kernel boot parameter, but set | |
4e88ec4a | 69 | * rcuscale.nwriters to zero. This will set nreaders to the |
85ba6bfe PM |
70 | * value specified by nr_cpus for a read-only test. |
71 | * | |
72 | * Various other use cases may of course be specified. | |
708cda31 PM |
73 | * |
74 | * Note that this test's readers are intended only as a test load for | |
4e88ec4a | 75 | * the writers. The reader scalability statistics will be overly |
708cda31 PM |
76 | * pessimistic due to the per-critical-section interrupt disabling, |
77 | * test-end checks, and the pair of calls through pointers. | |
85ba6bfe PM |
78 | */ |
79 | ||
e838a7d6 | 80 | #ifdef MODULE |
4e88ec4a | 81 | # define RCUSCALE_SHUTDOWN 0 |
e838a7d6 | 82 | #else |
4e88ec4a | 83 | # define RCUSCALE_SHUTDOWN 1 |
e838a7d6 PM |
84 | #endif |
85 | ||
881ed593 PM |
86 | torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); |
87 | torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); | |
af06d4f7 | 88 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
df37e66b | 89 | torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); |
85ba6bfe | 90 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
8704baab | 91 | torture_param(int, nwriters, -1, "Number of RCU updater threads"); |
4e88ec4a PM |
92 | torture_param(bool, shutdown, RCUSCALE_SHUTDOWN, |
93 | "Shutdown at end of scalability tests."); | |
90127d60 | 94 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); |
820687a7 | 95 | torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); |
4e88ec4a | 96 | torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?"); |
f87dc808 | 97 | torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate."); |
8704baab | 98 | |
4e88ec4a PM |
99 | static char *scale_type = "rcu"; |
100 | module_param(scale_type, charp, 0444); | |
101 | MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)"); | |
8704baab PM |
102 | |
103 | static int nrealreaders; | |
104 | static int nrealwriters; | |
105 | static struct task_struct **writer_tasks; | |
106 | static struct task_struct **reader_tasks; | |
107 | static struct task_struct *shutdown_task; | |
108 | ||
109 | static u64 **writer_durations; | |
110 | static int *writer_n_durations; | |
4e88ec4a PM |
111 | static atomic_t n_rcu_scale_reader_started; |
112 | static atomic_t n_rcu_scale_writer_started; | |
113 | static atomic_t n_rcu_scale_writer_finished; | |
8704baab | 114 | static wait_queue_head_t shutdown_wq; |
4e88ec4a PM |
115 | static u64 t_rcu_scale_writer_started; |
116 | static u64 t_rcu_scale_writer_finished; | |
e6e78b00 JFG |
117 | static unsigned long b_rcu_gp_test_started; |
118 | static unsigned long b_rcu_gp_test_finished; | |
881ed593 | 119 | static DEFINE_PER_CPU(atomic_t, n_async_inflight); |
8704baab | 120 | |
8704baab PM |
121 | #define MAX_MEAS 10000 |
122 | #define MIN_MEAS 100 | |
123 | ||
8704baab PM |
124 | /* |
125 | * Operations vector for selecting different types of tests. | |
126 | */ | |
127 | ||
4e88ec4a | 128 | struct rcu_scale_ops { |
8704baab PM |
129 | int ptype; |
130 | void (*init)(void); | |
131 | void (*cleanup)(void); | |
132 | int (*readlock)(void); | |
133 | void (*readunlock)(int idx); | |
17ef2fe9 | 134 | unsigned long (*get_gp_seq)(void); |
d7219312 | 135 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
8704baab | 136 | unsigned long (*exp_completed)(void); |
881ed593 PM |
137 | void (*async)(struct rcu_head *head, rcu_callback_t func); |
138 | void (*gp_barrier)(void); | |
8704baab PM |
139 | void (*sync)(void); |
140 | void (*exp_sync)(void); | |
141 | const char *name; | |
142 | }; | |
143 | ||
4e88ec4a | 144 | static struct rcu_scale_ops *cur_ops; |
8704baab PM |
145 | |
146 | /* | |
4e88ec4a | 147 | * Definitions for rcu scalability testing. |
8704baab PM |
148 | */ |
149 | ||
4e88ec4a | 150 | static int rcu_scale_read_lock(void) __acquires(RCU) |
8704baab PM |
151 | { |
152 | rcu_read_lock(); | |
153 | return 0; | |
154 | } | |
155 | ||
4e88ec4a | 156 | static void rcu_scale_read_unlock(int idx) __releases(RCU) |
8704baab PM |
157 | { |
158 | rcu_read_unlock(); | |
159 | } | |
160 | ||
161 | static unsigned long __maybe_unused rcu_no_completed(void) | |
162 | { | |
163 | return 0; | |
164 | } | |
165 | ||
4e88ec4a | 166 | static void rcu_sync_scale_init(void) |
8704baab PM |
167 | { |
168 | } | |
169 | ||
4e88ec4a | 170 | static struct rcu_scale_ops rcu_ops = { |
8704baab | 171 | .ptype = RCU_FLAVOR, |
4e88ec4a PM |
172 | .init = rcu_sync_scale_init, |
173 | .readlock = rcu_scale_read_lock, | |
174 | .readunlock = rcu_scale_read_unlock, | |
17ef2fe9 | 175 | .get_gp_seq = rcu_get_gp_seq, |
d7219312 | 176 | .gp_diff = rcu_seq_diff, |
8704baab | 177 | .exp_completed = rcu_exp_batches_completed, |
881ed593 PM |
178 | .async = call_rcu, |
179 | .gp_barrier = rcu_barrier, | |
8704baab PM |
180 | .sync = synchronize_rcu, |
181 | .exp_sync = synchronize_rcu_expedited, | |
182 | .name = "rcu" | |
183 | }; | |
184 | ||
8704baab | 185 | /* |
4e88ec4a | 186 | * Definitions for srcu scalability testing. |
8704baab PM |
187 | */ |
188 | ||
4e88ec4a PM |
189 | DEFINE_STATIC_SRCU(srcu_ctl_scale); |
190 | static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale; | |
8704baab | 191 | |
4e88ec4a | 192 | static int srcu_scale_read_lock(void) __acquires(srcu_ctlp) |
8704baab PM |
193 | { |
194 | return srcu_read_lock(srcu_ctlp); | |
195 | } | |
196 | ||
4e88ec4a | 197 | static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp) |
8704baab PM |
198 | { |
199 | srcu_read_unlock(srcu_ctlp, idx); | |
200 | } | |
201 | ||
4e88ec4a | 202 | static unsigned long srcu_scale_completed(void) |
8704baab PM |
203 | { |
204 | return srcu_batches_completed(srcu_ctlp); | |
205 | } | |
206 | ||
881ed593 PM |
207 | static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func) |
208 | { | |
209 | call_srcu(srcu_ctlp, head, func); | |
210 | } | |
211 | ||
212 | static void srcu_rcu_barrier(void) | |
213 | { | |
214 | srcu_barrier(srcu_ctlp); | |
215 | } | |
216 | ||
4e88ec4a | 217 | static void srcu_scale_synchronize(void) |
8704baab PM |
218 | { |
219 | synchronize_srcu(srcu_ctlp); | |
220 | } | |
221 | ||
4e88ec4a | 222 | static void srcu_scale_synchronize_expedited(void) |
8704baab PM |
223 | { |
224 | synchronize_srcu_expedited(srcu_ctlp); | |
225 | } | |
226 | ||
4e88ec4a | 227 | static struct rcu_scale_ops srcu_ops = { |
8704baab | 228 | .ptype = SRCU_FLAVOR, |
4e88ec4a PM |
229 | .init = rcu_sync_scale_init, |
230 | .readlock = srcu_scale_read_lock, | |
231 | .readunlock = srcu_scale_read_unlock, | |
232 | .get_gp_seq = srcu_scale_completed, | |
d7219312 | 233 | .gp_diff = rcu_seq_diff, |
4e88ec4a | 234 | .exp_completed = srcu_scale_completed, |
881ed593 PM |
235 | .async = srcu_call_rcu, |
236 | .gp_barrier = srcu_rcu_barrier, | |
4e88ec4a PM |
237 | .sync = srcu_scale_synchronize, |
238 | .exp_sync = srcu_scale_synchronize_expedited, | |
8704baab PM |
239 | .name = "srcu" |
240 | }; | |
241 | ||
f60cb4d4 PM |
242 | static struct srcu_struct srcud; |
243 | ||
4e88ec4a | 244 | static void srcu_sync_scale_init(void) |
f60cb4d4 PM |
245 | { |
246 | srcu_ctlp = &srcud; | |
247 | init_srcu_struct(srcu_ctlp); | |
248 | } | |
249 | ||
4e88ec4a | 250 | static void srcu_sync_scale_cleanup(void) |
f60cb4d4 PM |
251 | { |
252 | cleanup_srcu_struct(srcu_ctlp); | |
253 | } | |
254 | ||
4e88ec4a | 255 | static struct rcu_scale_ops srcud_ops = { |
f60cb4d4 | 256 | .ptype = SRCU_FLAVOR, |
4e88ec4a PM |
257 | .init = srcu_sync_scale_init, |
258 | .cleanup = srcu_sync_scale_cleanup, | |
259 | .readlock = srcu_scale_read_lock, | |
260 | .readunlock = srcu_scale_read_unlock, | |
261 | .get_gp_seq = srcu_scale_completed, | |
d7219312 | 262 | .gp_diff = rcu_seq_diff, |
4e88ec4a | 263 | .exp_completed = srcu_scale_completed, |
f60cb4d4 PM |
264 | .async = srcu_call_rcu, |
265 | .gp_barrier = srcu_rcu_barrier, | |
4e88ec4a PM |
266 | .sync = srcu_scale_synchronize, |
267 | .exp_sync = srcu_scale_synchronize_expedited, | |
f60cb4d4 PM |
268 | .name = "srcud" |
269 | }; | |
270 | ||
8704baab | 271 | /* |
4e88ec4a | 272 | * Definitions for RCU-tasks scalability testing. |
8704baab PM |
273 | */ |
274 | ||
4e88ec4a | 275 | static int tasks_scale_read_lock(void) |
8704baab PM |
276 | { |
277 | return 0; | |
278 | } | |
279 | ||
4e88ec4a | 280 | static void tasks_scale_read_unlock(int idx) |
8704baab PM |
281 | { |
282 | } | |
283 | ||
4e88ec4a | 284 | static struct rcu_scale_ops tasks_ops = { |
8704baab | 285 | .ptype = RCU_TASKS_FLAVOR, |
4e88ec4a PM |
286 | .init = rcu_sync_scale_init, |
287 | .readlock = tasks_scale_read_lock, | |
288 | .readunlock = tasks_scale_read_unlock, | |
17ef2fe9 | 289 | .get_gp_seq = rcu_no_completed, |
d7219312 | 290 | .gp_diff = rcu_seq_diff, |
881ed593 PM |
291 | .async = call_rcu_tasks, |
292 | .gp_barrier = rcu_barrier_tasks, | |
8704baab PM |
293 | .sync = synchronize_rcu_tasks, |
294 | .exp_sync = synchronize_rcu_tasks, | |
295 | .name = "tasks" | |
296 | }; | |
297 | ||
899f317e PM |
298 | /* |
299 | * Definitions for RCU-tasks-trace scalability testing. | |
300 | */ | |
301 | ||
302 | static int tasks_trace_scale_read_lock(void) | |
303 | { | |
304 | rcu_read_lock_trace(); | |
305 | return 0; | |
306 | } | |
307 | ||
308 | static void tasks_trace_scale_read_unlock(int idx) | |
309 | { | |
310 | rcu_read_unlock_trace(); | |
311 | } | |
312 | ||
313 | static struct rcu_scale_ops tasks_tracing_ops = { | |
314 | .ptype = RCU_TASKS_FLAVOR, | |
315 | .init = rcu_sync_scale_init, | |
316 | .readlock = tasks_trace_scale_read_lock, | |
317 | .readunlock = tasks_trace_scale_read_unlock, | |
318 | .get_gp_seq = rcu_no_completed, | |
319 | .gp_diff = rcu_seq_diff, | |
320 | .async = call_rcu_tasks_trace, | |
321 | .gp_barrier = rcu_barrier_tasks_trace, | |
322 | .sync = synchronize_rcu_tasks_trace, | |
323 | .exp_sync = synchronize_rcu_tasks_trace, | |
324 | .name = "tasks-tracing" | |
325 | }; | |
326 | ||
4e88ec4a | 327 | static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old) |
d7219312 PM |
328 | { |
329 | if (!cur_ops->gp_diff) | |
330 | return new - old; | |
331 | return cur_ops->gp_diff(new, old); | |
332 | } | |
333 | ||
8704baab | 334 | /* |
4e88ec4a | 335 | * If scalability tests complete, wait for shutdown to commence. |
8704baab | 336 | */ |
4e88ec4a | 337 | static void rcu_scale_wait_shutdown(void) |
8704baab | 338 | { |
cee43939 | 339 | cond_resched_tasks_rcu_qs(); |
4e88ec4a | 340 | if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters) |
8704baab PM |
341 | return; |
342 | while (!torture_must_stop()) | |
343 | schedule_timeout_uninterruptible(1); | |
344 | } | |
345 | ||
346 | /* | |
4e88ec4a PM |
347 | * RCU scalability reader kthread. Repeatedly does empty RCU read-side |
348 | * critical section, minimizing update-side interference. However, the | |
349 | * point of this test is not to evaluate reader scalability, but instead | |
350 | * to serve as a test load for update-side scalability testing. | |
8704baab PM |
351 | */ |
352 | static int | |
4e88ec4a | 353 | rcu_scale_reader(void *arg) |
8704baab PM |
354 | { |
355 | unsigned long flags; | |
356 | int idx; | |
6b558c4c | 357 | long me = (long)arg; |
8704baab | 358 | |
4e88ec4a | 359 | VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started"); |
6b558c4c | 360 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
8704baab | 361 | set_user_nice(current, MAX_NICE); |
4e88ec4a | 362 | atomic_inc(&n_rcu_scale_reader_started); |
8704baab PM |
363 | |
364 | do { | |
365 | local_irq_save(flags); | |
366 | idx = cur_ops->readlock(); | |
367 | cur_ops->readunlock(idx); | |
368 | local_irq_restore(flags); | |
4e88ec4a | 369 | rcu_scale_wait_shutdown(); |
8704baab | 370 | } while (!torture_must_stop()); |
4e88ec4a | 371 | torture_kthread_stopping("rcu_scale_reader"); |
8704baab PM |
372 | return 0; |
373 | } | |
374 | ||
881ed593 | 375 | /* |
4e88ec4a | 376 | * Callback function for asynchronous grace periods from rcu_scale_writer(). |
881ed593 | 377 | */ |
4e88ec4a | 378 | static void rcu_scale_async_cb(struct rcu_head *rhp) |
881ed593 PM |
379 | { |
380 | atomic_dec(this_cpu_ptr(&n_async_inflight)); | |
381 | kfree(rhp); | |
382 | } | |
383 | ||
8704baab | 384 | /* |
4e88ec4a | 385 | * RCU scale writer kthread. Repeatedly does a grace period. |
8704baab PM |
386 | */ |
387 | static int | |
4e88ec4a | 388 | rcu_scale_writer(void *arg) |
8704baab PM |
389 | { |
390 | int i = 0; | |
391 | int i_max; | |
392 | long me = (long)arg; | |
881ed593 | 393 | struct rcu_head *rhp = NULL; |
8704baab PM |
394 | bool started = false, done = false, alldone = false; |
395 | u64 t; | |
396 | u64 *wdp; | |
397 | u64 *wdpp = writer_durations[me]; | |
398 | ||
4e88ec4a | 399 | VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started"); |
8704baab | 400 | WARN_ON(!wdpp); |
6b558c4c | 401 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
b1433395 | 402 | sched_set_fifo_low(current); |
df37e66b PM |
403 | |
404 | if (holdoff) | |
405 | schedule_timeout_uninterruptible(holdoff * HZ); | |
406 | ||
77e9752c JFG |
407 | /* |
408 | * Wait until rcu_end_inkernel_boot() is called for normal GP tests | |
409 | * so that RCU is not always expedited for normal GP tests. | |
410 | * The system_state test is approximate, but works well in practice. | |
411 | */ | |
412 | while (!gp_exp && system_state != SYSTEM_RUNNING) | |
413 | schedule_timeout_uninterruptible(1); | |
414 | ||
8704baab | 415 | t = ktime_get_mono_fast_ns(); |
4e88ec4a PM |
416 | if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) { |
417 | t_rcu_scale_writer_started = t; | |
8704baab | 418 | if (gp_exp) { |
e6e78b00 | 419 | b_rcu_gp_test_started = |
8704baab PM |
420 | cur_ops->exp_completed() / 2; |
421 | } else { | |
e6e78b00 | 422 | b_rcu_gp_test_started = cur_ops->get_gp_seq(); |
8704baab PM |
423 | } |
424 | } | |
425 | ||
426 | do { | |
820687a7 PM |
427 | if (writer_holdoff) |
428 | udelay(writer_holdoff); | |
8704baab PM |
429 | wdp = &wdpp[i]; |
430 | *wdp = ktime_get_mono_fast_ns(); | |
881ed593 PM |
431 | if (gp_async) { |
432 | retry: | |
433 | if (!rhp) | |
434 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); | |
435 | if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { | |
881ed593 | 436 | atomic_inc(this_cpu_ptr(&n_async_inflight)); |
4e88ec4a | 437 | cur_ops->async(rhp, rcu_scale_async_cb); |
881ed593 PM |
438 | rhp = NULL; |
439 | } else if (!kthread_should_stop()) { | |
881ed593 PM |
440 | cur_ops->gp_barrier(); |
441 | goto retry; | |
442 | } else { | |
443 | kfree(rhp); /* Because we are stopping. */ | |
444 | } | |
445 | } else if (gp_exp) { | |
8704baab PM |
446 | cur_ops->exp_sync(); |
447 | } else { | |
8704baab PM |
448 | cur_ops->sync(); |
449 | } | |
8704baab PM |
450 | t = ktime_get_mono_fast_ns(); |
451 | *wdp = t - *wdp; | |
452 | i_max = i; | |
453 | if (!started && | |
4e88ec4a | 454 | atomic_read(&n_rcu_scale_writer_started) >= nrealwriters) |
8704baab PM |
455 | started = true; |
456 | if (!done && i >= MIN_MEAS) { | |
457 | done = true; | |
b1433395 | 458 | sched_set_normal(current, 0); |
4e88ec4a PM |
459 | pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n", |
460 | scale_type, SCALE_FLAG, me, MIN_MEAS); | |
461 | if (atomic_inc_return(&n_rcu_scale_writer_finished) >= | |
8704baab | 462 | nrealwriters) { |
620316e5 | 463 | schedule_timeout_interruptible(10); |
ac2bb275 | 464 | rcu_ftrace_dump(DUMP_ALL); |
4e88ec4a PM |
465 | SCALEOUT_STRING("Test complete"); |
466 | t_rcu_scale_writer_finished = t; | |
8704baab | 467 | if (gp_exp) { |
e6e78b00 | 468 | b_rcu_gp_test_finished = |
8704baab PM |
469 | cur_ops->exp_completed() / 2; |
470 | } else { | |
e6e78b00 | 471 | b_rcu_gp_test_finished = |
17ef2fe9 | 472 | cur_ops->get_gp_seq(); |
8704baab | 473 | } |
e6fb1fc1 AS |
474 | if (shutdown) { |
475 | smp_mb(); /* Assign before wake. */ | |
476 | wake_up(&shutdown_wq); | |
477 | } | |
8704baab PM |
478 | } |
479 | } | |
480 | if (done && !alldone && | |
4e88ec4a | 481 | atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters) |
8704baab PM |
482 | alldone = true; |
483 | if (started && !alldone && i < MAX_MEAS - 1) | |
484 | i++; | |
4e88ec4a | 485 | rcu_scale_wait_shutdown(); |
8704baab | 486 | } while (!torture_must_stop()); |
881ed593 | 487 | if (gp_async) { |
881ed593 PM |
488 | cur_ops->gp_barrier(); |
489 | } | |
8704baab | 490 | writer_n_durations[me] = i_max; |
4e88ec4a | 491 | torture_kthread_stopping("rcu_scale_writer"); |
8704baab PM |
492 | return 0; |
493 | } | |
494 | ||
96221795 | 495 | static void |
4e88ec4a | 496 | rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag) |
8704baab | 497 | { |
4e88ec4a | 498 | pr_alert("%s" SCALE_FLAG |
8704baab | 499 | "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n", |
4e88ec4a | 500 | scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown); |
8704baab PM |
501 | } |
502 | ||
503 | static void | |
4e88ec4a | 504 | rcu_scale_cleanup(void) |
8704baab PM |
505 | { |
506 | int i; | |
507 | int j; | |
508 | int ngps = 0; | |
509 | u64 *wdp; | |
510 | u64 *wdpp; | |
511 | ||
9683937d PM |
512 | /* |
513 | * Would like warning at start, but everything is expedited | |
514 | * during the mid-boot phase, so have to wait till the end. | |
515 | */ | |
516 | if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) | |
4e88ec4a | 517 | VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); |
9683937d | 518 | if (rcu_gp_is_normal() && gp_exp) |
4e88ec4a | 519 | VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); |
881ed593 | 520 | if (gp_exp && gp_async) |
4e88ec4a | 521 | VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); |
9683937d | 522 | |
8704baab PM |
523 | if (torture_cleanup_begin()) |
524 | return; | |
ad092c02 PM |
525 | if (!cur_ops) { |
526 | torture_cleanup_end(); | |
527 | return; | |
528 | } | |
8704baab PM |
529 | |
530 | if (reader_tasks) { | |
531 | for (i = 0; i < nrealreaders; i++) | |
4e88ec4a | 532 | torture_stop_kthread(rcu_scale_reader, |
8704baab PM |
533 | reader_tasks[i]); |
534 | kfree(reader_tasks); | |
535 | } | |
536 | ||
537 | if (writer_tasks) { | |
538 | for (i = 0; i < nrealwriters; i++) { | |
4e88ec4a | 539 | torture_stop_kthread(rcu_scale_writer, |
8704baab PM |
540 | writer_tasks[i]); |
541 | if (!writer_n_durations) | |
542 | continue; | |
543 | j = writer_n_durations[i]; | |
544 | pr_alert("%s%s writer %d gps: %d\n", | |
4e88ec4a | 545 | scale_type, SCALE_FLAG, i, j); |
8704baab PM |
546 | ngps += j; |
547 | } | |
548 | pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n", | |
4e88ec4a PM |
549 | scale_type, SCALE_FLAG, |
550 | t_rcu_scale_writer_started, t_rcu_scale_writer_finished, | |
551 | t_rcu_scale_writer_finished - | |
552 | t_rcu_scale_writer_started, | |
8704baab | 553 | ngps, |
4e88ec4a PM |
554 | rcuscale_seq_diff(b_rcu_gp_test_finished, |
555 | b_rcu_gp_test_started)); | |
8704baab PM |
556 | for (i = 0; i < nrealwriters; i++) { |
557 | if (!writer_durations) | |
558 | break; | |
559 | if (!writer_n_durations) | |
560 | continue; | |
561 | wdpp = writer_durations[i]; | |
562 | if (!wdpp) | |
563 | continue; | |
564 | for (j = 0; j <= writer_n_durations[i]; j++) { | |
565 | wdp = &wdpp[j]; | |
566 | pr_alert("%s%s %4d writer-duration: %5d %llu\n", | |
4e88ec4a | 567 | scale_type, SCALE_FLAG, |
8704baab PM |
568 | i, j, *wdp); |
569 | if (j % 100 == 0) | |
570 | schedule_timeout_uninterruptible(1); | |
571 | } | |
572 | kfree(writer_durations[i]); | |
573 | } | |
574 | kfree(writer_tasks); | |
575 | kfree(writer_durations); | |
576 | kfree(writer_n_durations); | |
577 | } | |
578 | ||
620d2460 | 579 | /* Do torture-type-specific cleanup operations. */ |
8704baab PM |
580 | if (cur_ops->cleanup != NULL) |
581 | cur_ops->cleanup(); | |
582 | ||
583 | torture_cleanup_end(); | |
584 | } | |
585 | ||
586 | /* | |
587 | * Return the number if non-negative. If -1, the number of CPUs. | |
588 | * If less than -1, that much less than the number of CPUs, but | |
589 | * at least one. | |
590 | */ | |
591 | static int compute_real(int n) | |
592 | { | |
593 | int nr; | |
594 | ||
595 | if (n >= 0) { | |
596 | nr = n; | |
597 | } else { | |
598 | nr = num_online_cpus() + 1 + n; | |
599 | if (nr <= 0) | |
600 | nr = 1; | |
601 | } | |
602 | return nr; | |
603 | } | |
604 | ||
605 | /* | |
4e88ec4a | 606 | * RCU scalability shutdown kthread. Just waits to be awakened, then shuts |
8704baab PM |
607 | * down system. |
608 | */ | |
609 | static int | |
4e88ec4a | 610 | rcu_scale_shutdown(void *arg) |
8704baab | 611 | { |
7e866460 | 612 | wait_event(shutdown_wq, |
4e88ec4a | 613 | atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters); |
8704baab | 614 | smp_mb(); /* Wake before output. */ |
4e88ec4a | 615 | rcu_scale_cleanup(); |
8704baab PM |
616 | kernel_power_off(); |
617 | return -EINVAL; | |
618 | } | |
619 | ||
e6e78b00 | 620 | /* |
4e88ec4a | 621 | * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number |
e6e78b00 JFG |
622 | * of iterations and measure total time and number of GP for all iterations to complete. |
623 | */ | |
624 | ||
625 | torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu()."); | |
626 | torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration."); | |
627 | torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees."); | |
e6e78b00 JFG |
628 | |
629 | static struct task_struct **kfree_reader_tasks; | |
630 | static int kfree_nrealthreads; | |
4e88ec4a PM |
631 | static atomic_t n_kfree_scale_thread_started; |
632 | static atomic_t n_kfree_scale_thread_ended; | |
e6e78b00 JFG |
633 | |
634 | struct kfree_obj { | |
635 | char kfree_obj[8]; | |
636 | struct rcu_head rh; | |
637 | }; | |
638 | ||
639 | static int | |
4e88ec4a | 640 | kfree_scale_thread(void *arg) |
e6e78b00 JFG |
641 | { |
642 | int i, loop = 0; | |
643 | long me = (long)arg; | |
644 | struct kfree_obj *alloc_ptr; | |
645 | u64 start_time, end_time; | |
12af6603 | 646 | long long mem_begin, mem_during = 0; |
e6e78b00 | 647 | |
4e88ec4a | 648 | VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started"); |
e6e78b00 JFG |
649 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
650 | set_user_nice(current, MAX_NICE); | |
651 | ||
652 | start_time = ktime_get_mono_fast_ns(); | |
653 | ||
4e88ec4a | 654 | if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) { |
e6e78b00 JFG |
655 | if (gp_exp) |
656 | b_rcu_gp_test_started = cur_ops->exp_completed() / 2; | |
657 | else | |
658 | b_rcu_gp_test_started = cur_ops->get_gp_seq(); | |
659 | } | |
660 | ||
661 | do { | |
12af6603 JFG |
662 | if (!mem_during) { |
663 | mem_during = mem_begin = si_mem_available(); | |
664 | } else if (loop % (kfree_loops / 4) == 0) { | |
665 | mem_during = (mem_during + si_mem_available()) / 2; | |
666 | } | |
667 | ||
e6e78b00 | 668 | for (i = 0; i < kfree_alloc_num; i++) { |
f87dc808 | 669 | alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL); |
e6e78b00 JFG |
670 | if (!alloc_ptr) |
671 | return -ENOMEM; | |
672 | ||
189a6883 | 673 | kfree_rcu(alloc_ptr, rh); |
e6e78b00 JFG |
674 | } |
675 | ||
676 | cond_resched(); | |
677 | } while (!torture_must_stop() && ++loop < kfree_loops); | |
678 | ||
4e88ec4a | 679 | if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) { |
e6e78b00 JFG |
680 | end_time = ktime_get_mono_fast_ns(); |
681 | ||
682 | if (gp_exp) | |
683 | b_rcu_gp_test_finished = cur_ops->exp_completed() / 2; | |
684 | else | |
685 | b_rcu_gp_test_finished = cur_ops->get_gp_seq(); | |
686 | ||
12af6603 | 687 | pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", |
e6e78b00 | 688 | (unsigned long long)(end_time - start_time), kfree_loops, |
4e88ec4a | 689 | rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), |
12af6603 JFG |
690 | (mem_begin - mem_during) >> (20 - PAGE_SHIFT)); |
691 | ||
e6e78b00 JFG |
692 | if (shutdown) { |
693 | smp_mb(); /* Assign before wake. */ | |
694 | wake_up(&shutdown_wq); | |
695 | } | |
696 | } | |
697 | ||
4e88ec4a | 698 | torture_kthread_stopping("kfree_scale_thread"); |
e6e78b00 JFG |
699 | return 0; |
700 | } | |
701 | ||
702 | static void | |
4e88ec4a | 703 | kfree_scale_cleanup(void) |
e6e78b00 JFG |
704 | { |
705 | int i; | |
706 | ||
707 | if (torture_cleanup_begin()) | |
708 | return; | |
709 | ||
710 | if (kfree_reader_tasks) { | |
711 | for (i = 0; i < kfree_nrealthreads; i++) | |
4e88ec4a | 712 | torture_stop_kthread(kfree_scale_thread, |
e6e78b00 JFG |
713 | kfree_reader_tasks[i]); |
714 | kfree(kfree_reader_tasks); | |
715 | } | |
716 | ||
717 | torture_cleanup_end(); | |
718 | } | |
719 | ||
720 | /* | |
721 | * shutdown kthread. Just waits to be awakened, then shuts down system. | |
722 | */ | |
723 | static int | |
4e88ec4a | 724 | kfree_scale_shutdown(void *arg) |
e6e78b00 | 725 | { |
7e866460 | 726 | wait_event(shutdown_wq, |
4e88ec4a | 727 | atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads); |
e6e78b00 JFG |
728 | |
729 | smp_mb(); /* Wake before output. */ | |
730 | ||
4e88ec4a | 731 | kfree_scale_cleanup(); |
e6e78b00 JFG |
732 | kernel_power_off(); |
733 | return -EINVAL; | |
734 | } | |
735 | ||
736 | static int __init | |
4e88ec4a | 737 | kfree_scale_init(void) |
e6e78b00 JFG |
738 | { |
739 | long i; | |
740 | int firsterr = 0; | |
741 | ||
742 | kfree_nrealthreads = compute_real(kfree_nthreads); | |
743 | /* Start up the kthreads. */ | |
744 | if (shutdown) { | |
745 | init_waitqueue_head(&shutdown_wq); | |
4e88ec4a | 746 | firsterr = torture_create_kthread(kfree_scale_shutdown, NULL, |
e6e78b00 JFG |
747 | shutdown_task); |
748 | if (firsterr) | |
749 | goto unwind; | |
750 | schedule_timeout_uninterruptible(1); | |
751 | } | |
752 | ||
b3e2d209 | 753 | pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj)); |
f87dc808 | 754 | |
e6e78b00 JFG |
755 | kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]), |
756 | GFP_KERNEL); | |
757 | if (kfree_reader_tasks == NULL) { | |
758 | firsterr = -ENOMEM; | |
759 | goto unwind; | |
760 | } | |
761 | ||
762 | for (i = 0; i < kfree_nrealthreads; i++) { | |
4e88ec4a | 763 | firsterr = torture_create_kthread(kfree_scale_thread, (void *)i, |
e6e78b00 JFG |
764 | kfree_reader_tasks[i]); |
765 | if (firsterr) | |
766 | goto unwind; | |
767 | } | |
768 | ||
4e88ec4a | 769 | while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads) |
e6e78b00 JFG |
770 | schedule_timeout_uninterruptible(1); |
771 | ||
772 | torture_init_end(); | |
773 | return 0; | |
774 | ||
775 | unwind: | |
776 | torture_init_end(); | |
4e88ec4a | 777 | kfree_scale_cleanup(); |
e6e78b00 JFG |
778 | return firsterr; |
779 | } | |
780 | ||
8704baab | 781 | static int __init |
4e88ec4a | 782 | rcu_scale_init(void) |
8704baab PM |
783 | { |
784 | long i; | |
785 | int firsterr = 0; | |
4e88ec4a | 786 | static struct rcu_scale_ops *scale_ops[] = { |
899f317e | 787 | &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, &tasks_tracing_ops |
8704baab PM |
788 | }; |
789 | ||
4e88ec4a | 790 | if (!torture_init_begin(scale_type, verbose)) |
8704baab PM |
791 | return -EBUSY; |
792 | ||
4e88ec4a PM |
793 | /* Process args and announce that the scalability'er is on the job. */ |
794 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { | |
795 | cur_ops = scale_ops[i]; | |
796 | if (strcmp(scale_type, cur_ops->name) == 0) | |
8704baab PM |
797 | break; |
798 | } | |
4e88ec4a PM |
799 | if (i == ARRAY_SIZE(scale_ops)) { |
800 | pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); | |
801 | pr_alert("rcu-scale types:"); | |
802 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) | |
803 | pr_cont(" %s", scale_ops[i]->name); | |
a7538352 | 804 | pr_cont("\n"); |
8704baab | 805 | firsterr = -EINVAL; |
ad092c02 | 806 | cur_ops = NULL; |
8704baab PM |
807 | goto unwind; |
808 | } | |
809 | if (cur_ops->init) | |
810 | cur_ops->init(); | |
811 | ||
e6e78b00 | 812 | if (kfree_rcu_test) |
4e88ec4a | 813 | return kfree_scale_init(); |
e6e78b00 | 814 | |
8704baab PM |
815 | nrealwriters = compute_real(nwriters); |
816 | nrealreaders = compute_real(nreaders); | |
4e88ec4a PM |
817 | atomic_set(&n_rcu_scale_reader_started, 0); |
818 | atomic_set(&n_rcu_scale_writer_started, 0); | |
819 | atomic_set(&n_rcu_scale_writer_finished, 0); | |
820 | rcu_scale_print_module_parms(cur_ops, "Start of test"); | |
8704baab PM |
821 | |
822 | /* Start up the kthreads. */ | |
823 | ||
824 | if (shutdown) { | |
825 | init_waitqueue_head(&shutdown_wq); | |
4e88ec4a | 826 | firsterr = torture_create_kthread(rcu_scale_shutdown, NULL, |
8704baab PM |
827 | shutdown_task); |
828 | if (firsterr) | |
829 | goto unwind; | |
830 | schedule_timeout_uninterruptible(1); | |
831 | } | |
832 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), | |
833 | GFP_KERNEL); | |
834 | if (reader_tasks == NULL) { | |
4e88ec4a | 835 | VERBOSE_SCALEOUT_ERRSTRING("out of memory"); |
8704baab PM |
836 | firsterr = -ENOMEM; |
837 | goto unwind; | |
838 | } | |
839 | for (i = 0; i < nrealreaders; i++) { | |
4e88ec4a | 840 | firsterr = torture_create_kthread(rcu_scale_reader, (void *)i, |
8704baab PM |
841 | reader_tasks[i]); |
842 | if (firsterr) | |
843 | goto unwind; | |
844 | } | |
4e88ec4a | 845 | while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) |
8704baab PM |
846 | schedule_timeout_uninterruptible(1); |
847 | writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]), | |
848 | GFP_KERNEL); | |
849 | writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), | |
850 | GFP_KERNEL); | |
851 | writer_n_durations = | |
852 | kcalloc(nrealwriters, sizeof(*writer_n_durations), | |
853 | GFP_KERNEL); | |
854 | if (!writer_tasks || !writer_durations || !writer_n_durations) { | |
4e88ec4a | 855 | VERBOSE_SCALEOUT_ERRSTRING("out of memory"); |
8704baab PM |
856 | firsterr = -ENOMEM; |
857 | goto unwind; | |
858 | } | |
859 | for (i = 0; i < nrealwriters; i++) { | |
860 | writer_durations[i] = | |
861 | kcalloc(MAX_MEAS, sizeof(*writer_durations[i]), | |
862 | GFP_KERNEL); | |
05dbbfe7 WY |
863 | if (!writer_durations[i]) { |
864 | firsterr = -ENOMEM; | |
8704baab | 865 | goto unwind; |
05dbbfe7 | 866 | } |
4e88ec4a | 867 | firsterr = torture_create_kthread(rcu_scale_writer, (void *)i, |
8704baab PM |
868 | writer_tasks[i]); |
869 | if (firsterr) | |
870 | goto unwind; | |
871 | } | |
872 | torture_init_end(); | |
873 | return 0; | |
874 | ||
875 | unwind: | |
876 | torture_init_end(); | |
4e88ec4a | 877 | rcu_scale_cleanup(); |
2f2214d4 PM |
878 | if (shutdown) { |
879 | WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST)); | |
880 | kernel_power_off(); | |
881 | } | |
8704baab PM |
882 | return firsterr; |
883 | } | |
884 | ||
4e88ec4a PM |
885 | module_init(rcu_scale_init); |
886 | module_exit(rcu_scale_cleanup); |