]>
Commit | Line | Data |
---|---|---|
653ed64b JFG |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
8e4ec3d0 | 3 | // Scalability test comparing RCU vs other mechanisms |
653ed64b JFG |
4 | // for acquiring references on objects. |
5 | // | |
6 | // Copyright (C) Google, 2020. | |
7 | // | |
8 | // Author: Joel Fernandes <[email protected]> | |
9 | ||
10 | #define pr_fmt(fmt) fmt | |
11 | ||
12 | #include <linux/atomic.h> | |
13 | #include <linux/bitops.h> | |
14 | #include <linux/completion.h> | |
15 | #include <linux/cpu.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/moduleparam.h> | |
25 | #include <linux/notifier.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/rcupdate.h> | |
72bb749e | 28 | #include <linux/rcupdate_trace.h> |
653ed64b JFG |
29 | #include <linux/reboot.h> |
30 | #include <linux/sched.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/smp.h> | |
33 | #include <linux/stat.h> | |
34 | #include <linux/srcu.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/torture.h> | |
37 | #include <linux/types.h> | |
38 | ||
39 | #include "rcu.h" | |
40 | ||
1fbeb3a8 | 41 | #define SCALE_FLAG "-ref-scale: " |
653ed64b | 42 | |
1fbeb3a8 PM |
43 | #define SCALEOUT(s, x...) \ |
44 | pr_alert("%s" SCALE_FLAG s, scale_type, ## x) | |
653ed64b | 45 | |
1fbeb3a8 PM |
46 | #define VERBOSE_SCALEOUT(s, x...) \ |
47 | do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0) | |
653ed64b | 48 | |
e76506f0 PM |
49 | static atomic_t verbose_batch_ctr; |
50 | ||
51 | #define VERBOSE_SCALEOUT_BATCH(s, x...) \ | |
52 | do { \ | |
53 | if (verbose && \ | |
54 | (verbose_batched <= 0 || \ | |
414c116e PM |
55 | !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ |
56 | schedule_timeout_uninterruptible(1); \ | |
e76506f0 | 57 | pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \ |
414c116e | 58 | } \ |
e76506f0 PM |
59 | } while (0) |
60 | ||
1fbeb3a8 PM |
61 | #define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \ |
62 | do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0) | |
653ed64b JFG |
63 | |
64 | MODULE_LICENSE("GPL"); | |
65 | MODULE_AUTHOR("Joel Fernandes (Google) <[email protected]>"); | |
66 | ||
1fbeb3a8 PM |
67 | static char *scale_type = "rcu"; |
68 | module_param(scale_type, charp, 0444); | |
69 | MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock."); | |
653ed64b JFG |
70 | |
71 | torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); | |
e76506f0 | 72 | torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s"); |
653ed64b | 73 | |
777a54c9 | 74 | // Wait until there are multiple CPUs before starting test. |
8e4ec3d0 | 75 | torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0, |
777a54c9 PM |
76 | "Holdoff time before test start (s)"); |
77 | // Number of loops per experiment, all readers execute operations concurrently. | |
4dd72a33 | 78 | torture_param(long, loops, 10000, "Number of loops per experiment."); |
8fc28783 PM |
79 | // Number of readers, with -1 defaulting to about 75% of the CPUs. |
80 | torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs."); | |
81 | // Number of runs. | |
82 | torture_param(int, nruns, 30, "Number of experiments to run."); | |
918b351d PM |
83 | // Reader delay in nanoseconds, 0 for no delay. |
84 | torture_param(int, readdelay, 0, "Read-side delay in nanoseconds."); | |
653ed64b JFG |
85 | |
86 | #ifdef MODULE | |
1fbeb3a8 | 87 | # define REFSCALE_SHUTDOWN 0 |
653ed64b | 88 | #else |
1fbeb3a8 | 89 | # define REFSCALE_SHUTDOWN 1 |
653ed64b JFG |
90 | #endif |
91 | ||
1fbeb3a8 PM |
92 | torture_param(bool, shutdown, REFSCALE_SHUTDOWN, |
93 | "Shutdown at end of scalability tests."); | |
653ed64b JFG |
94 | |
95 | struct reader_task { | |
96 | struct task_struct *task; | |
af2789db | 97 | int start_reader; |
653ed64b JFG |
98 | wait_queue_head_t wq; |
99 | u64 last_duration_ns; | |
653ed64b JFG |
100 | }; |
101 | ||
102 | static struct task_struct *shutdown_task; | |
103 | static wait_queue_head_t shutdown_wq; | |
104 | ||
105 | static struct task_struct *main_task; | |
106 | static wait_queue_head_t main_wq; | |
107 | static int shutdown_start; | |
108 | ||
109 | static struct reader_task *reader_tasks; | |
653ed64b JFG |
110 | |
111 | // Number of readers that are part of the current experiment. | |
112 | static atomic_t nreaders_exp; | |
113 | ||
114 | // Use to wait for all threads to start. | |
115 | static atomic_t n_init; | |
86e0da2b | 116 | static atomic_t n_started; |
2db0bda3 PM |
117 | static atomic_t n_warmedup; |
118 | static atomic_t n_cooleddown; | |
653ed64b JFG |
119 | |
120 | // Track which experiment is currently running. | |
121 | static int exp_idx; | |
122 | ||
123 | // Operations vector for selecting different types of tests. | |
1fbeb3a8 | 124 | struct ref_scale_ops { |
653ed64b JFG |
125 | void (*init)(void); |
126 | void (*cleanup)(void); | |
75dd8efe | 127 | void (*readsection)(const int nloops); |
918b351d | 128 | void (*delaysection)(const int nloops, const int udl, const int ndl); |
653ed64b JFG |
129 | const char *name; |
130 | }; | |
131 | ||
1fbeb3a8 | 132 | static struct ref_scale_ops *cur_ops; |
653ed64b | 133 | |
918b351d PM |
134 | static void un_delay(const int udl, const int ndl) |
135 | { | |
136 | if (udl) | |
137 | udelay(udl); | |
138 | if (ndl) | |
139 | ndelay(ndl); | |
140 | } | |
141 | ||
75dd8efe | 142 | static void ref_rcu_read_section(const int nloops) |
653ed64b | 143 | { |
75dd8efe | 144 | int i; |
653ed64b | 145 | |
75dd8efe PM |
146 | for (i = nloops; i >= 0; i--) { |
147 | rcu_read_lock(); | |
148 | rcu_read_unlock(); | |
149 | } | |
653ed64b JFG |
150 | } |
151 | ||
918b351d | 152 | static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl) |
b4d1e34f PM |
153 | { |
154 | int i; | |
155 | ||
156 | for (i = nloops; i >= 0; i--) { | |
157 | rcu_read_lock(); | |
918b351d | 158 | un_delay(udl, ndl); |
b4d1e34f PM |
159 | rcu_read_unlock(); |
160 | } | |
161 | } | |
162 | ||
1fbeb3a8 | 163 | static void rcu_sync_scale_init(void) |
653ed64b JFG |
164 | { |
165 | } | |
166 | ||
1fbeb3a8 PM |
167 | static struct ref_scale_ops rcu_ops = { |
168 | .init = rcu_sync_scale_init, | |
75dd8efe | 169 | .readsection = ref_rcu_read_section, |
b4d1e34f | 170 | .delaysection = ref_rcu_delay_section, |
653ed64b JFG |
171 | .name = "rcu" |
172 | }; | |
173 | ||
1fbeb3a8 PM |
174 | // Definitions for SRCU ref scale testing. |
175 | DEFINE_STATIC_SRCU(srcu_refctl_scale); | |
176 | static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale; | |
653ed64b | 177 | |
1fbeb3a8 | 178 | static void srcu_ref_scale_read_section(const int nloops) |
653ed64b | 179 | { |
75dd8efe PM |
180 | int i; |
181 | int idx; | |
653ed64b | 182 | |
75dd8efe PM |
183 | for (i = nloops; i >= 0; i--) { |
184 | idx = srcu_read_lock(srcu_ctlp); | |
185 | srcu_read_unlock(srcu_ctlp, idx); | |
186 | } | |
653ed64b JFG |
187 | } |
188 | ||
1fbeb3a8 | 189 | static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
b4d1e34f PM |
190 | { |
191 | int i; | |
192 | int idx; | |
193 | ||
194 | for (i = nloops; i >= 0; i--) { | |
195 | idx = srcu_read_lock(srcu_ctlp); | |
918b351d | 196 | un_delay(udl, ndl); |
b4d1e34f PM |
197 | srcu_read_unlock(srcu_ctlp, idx); |
198 | } | |
199 | } | |
200 | ||
1fbeb3a8 PM |
201 | static struct ref_scale_ops srcu_ops = { |
202 | .init = rcu_sync_scale_init, | |
203 | .readsection = srcu_ref_scale_read_section, | |
204 | .delaysection = srcu_ref_scale_delay_section, | |
653ed64b JFG |
205 | .name = "srcu" |
206 | }; | |
207 | ||
1fbeb3a8 | 208 | // Definitions for RCU Tasks ref scale testing: Empty read markers. |
e13ef442 | 209 | // These definitions also work for RCU Rude readers. |
1fbeb3a8 | 210 | static void rcu_tasks_ref_scale_read_section(const int nloops) |
e13ef442 PM |
211 | { |
212 | int i; | |
213 | ||
214 | for (i = nloops; i >= 0; i--) | |
215 | continue; | |
216 | } | |
217 | ||
1fbeb3a8 | 218 | static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
e13ef442 PM |
219 | { |
220 | int i; | |
221 | ||
222 | for (i = nloops; i >= 0; i--) | |
223 | un_delay(udl, ndl); | |
224 | } | |
225 | ||
1fbeb3a8 PM |
226 | static struct ref_scale_ops rcu_tasks_ops = { |
227 | .init = rcu_sync_scale_init, | |
228 | .readsection = rcu_tasks_ref_scale_read_section, | |
229 | .delaysection = rcu_tasks_ref_scale_delay_section, | |
e13ef442 PM |
230 | .name = "rcu-tasks" |
231 | }; | |
232 | ||
1fbeb3a8 PM |
233 | // Definitions for RCU Tasks Trace ref scale testing. |
234 | static void rcu_trace_ref_scale_read_section(const int nloops) | |
72bb749e PM |
235 | { |
236 | int i; | |
237 | ||
238 | for (i = nloops; i >= 0; i--) { | |
239 | rcu_read_lock_trace(); | |
240 | rcu_read_unlock_trace(); | |
241 | } | |
242 | } | |
243 | ||
1fbeb3a8 | 244 | static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl) |
72bb749e PM |
245 | { |
246 | int i; | |
247 | ||
248 | for (i = nloops; i >= 0; i--) { | |
249 | rcu_read_lock_trace(); | |
250 | un_delay(udl, ndl); | |
251 | rcu_read_unlock_trace(); | |
252 | } | |
253 | } | |
254 | ||
1fbeb3a8 PM |
255 | static struct ref_scale_ops rcu_trace_ops = { |
256 | .init = rcu_sync_scale_init, | |
257 | .readsection = rcu_trace_ref_scale_read_section, | |
258 | .delaysection = rcu_trace_ref_scale_delay_section, | |
72bb749e PM |
259 | .name = "rcu-trace" |
260 | }; | |
261 | ||
653ed64b JFG |
262 | // Definitions for reference count |
263 | static atomic_t refcnt; | |
264 | ||
b4d1e34f | 265 | static void ref_refcnt_section(const int nloops) |
653ed64b | 266 | { |
75dd8efe | 267 | int i; |
653ed64b | 268 | |
75dd8efe PM |
269 | for (i = nloops; i >= 0; i--) { |
270 | atomic_inc(&refcnt); | |
271 | atomic_dec(&refcnt); | |
272 | } | |
653ed64b JFG |
273 | } |
274 | ||
918b351d | 275 | static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl) |
b4d1e34f PM |
276 | { |
277 | int i; | |
278 | ||
279 | for (i = nloops; i >= 0; i--) { | |
280 | atomic_inc(&refcnt); | |
918b351d | 281 | un_delay(udl, ndl); |
b4d1e34f PM |
282 | atomic_dec(&refcnt); |
283 | } | |
284 | } | |
285 | ||
1fbeb3a8 PM |
286 | static struct ref_scale_ops refcnt_ops = { |
287 | .init = rcu_sync_scale_init, | |
b4d1e34f PM |
288 | .readsection = ref_refcnt_section, |
289 | .delaysection = ref_refcnt_delay_section, | |
653ed64b JFG |
290 | .name = "refcnt" |
291 | }; | |
292 | ||
293 | // Definitions for rwlock | |
294 | static rwlock_t test_rwlock; | |
295 | ||
b4d1e34f | 296 | static void ref_rwlock_init(void) |
653ed64b JFG |
297 | { |
298 | rwlock_init(&test_rwlock); | |
299 | } | |
300 | ||
b4d1e34f PM |
301 | static void ref_rwlock_section(const int nloops) |
302 | { | |
303 | int i; | |
304 | ||
305 | for (i = nloops; i >= 0; i--) { | |
306 | read_lock(&test_rwlock); | |
307 | read_unlock(&test_rwlock); | |
308 | } | |
309 | } | |
310 | ||
918b351d | 311 | static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl) |
653ed64b | 312 | { |
75dd8efe | 313 | int i; |
653ed64b | 314 | |
75dd8efe PM |
315 | for (i = nloops; i >= 0; i--) { |
316 | read_lock(&test_rwlock); | |
918b351d | 317 | un_delay(udl, ndl); |
75dd8efe PM |
318 | read_unlock(&test_rwlock); |
319 | } | |
653ed64b JFG |
320 | } |
321 | ||
1fbeb3a8 | 322 | static struct ref_scale_ops rwlock_ops = { |
b4d1e34f PM |
323 | .init = ref_rwlock_init, |
324 | .readsection = ref_rwlock_section, | |
325 | .delaysection = ref_rwlock_delay_section, | |
653ed64b JFG |
326 | .name = "rwlock" |
327 | }; | |
328 | ||
329 | // Definitions for rwsem | |
330 | static struct rw_semaphore test_rwsem; | |
331 | ||
b4d1e34f | 332 | static void ref_rwsem_init(void) |
653ed64b JFG |
333 | { |
334 | init_rwsem(&test_rwsem); | |
335 | } | |
336 | ||
b4d1e34f | 337 | static void ref_rwsem_section(const int nloops) |
653ed64b | 338 | { |
75dd8efe | 339 | int i; |
653ed64b | 340 | |
75dd8efe PM |
341 | for (i = nloops; i >= 0; i--) { |
342 | down_read(&test_rwsem); | |
343 | up_read(&test_rwsem); | |
344 | } | |
653ed64b JFG |
345 | } |
346 | ||
918b351d | 347 | static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl) |
b4d1e34f PM |
348 | { |
349 | int i; | |
350 | ||
351 | for (i = nloops; i >= 0; i--) { | |
352 | down_read(&test_rwsem); | |
918b351d | 353 | un_delay(udl, ndl); |
b4d1e34f PM |
354 | up_read(&test_rwsem); |
355 | } | |
356 | } | |
357 | ||
1fbeb3a8 | 358 | static struct ref_scale_ops rwsem_ops = { |
b4d1e34f PM |
359 | .init = ref_rwsem_init, |
360 | .readsection = ref_rwsem_section, | |
361 | .delaysection = ref_rwsem_delay_section, | |
653ed64b JFG |
362 | .name = "rwsem" |
363 | }; | |
364 | ||
1fbeb3a8 | 365 | static void rcu_scale_one_reader(void) |
b4d1e34f PM |
366 | { |
367 | if (readdelay <= 0) | |
368 | cur_ops->readsection(loops); | |
369 | else | |
918b351d | 370 | cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000); |
b4d1e34f PM |
371 | } |
372 | ||
653ed64b JFG |
373 | // Reader kthread. Repeatedly does empty RCU read-side |
374 | // critical section, minimizing update-side interference. | |
375 | static int | |
1fbeb3a8 | 376 | ref_scale_reader(void *arg) |
653ed64b JFG |
377 | { |
378 | unsigned long flags; | |
379 | long me = (long)arg; | |
380 | struct reader_task *rt = &(reader_tasks[me]); | |
653ed64b JFG |
381 | u64 start; |
382 | s64 duration; | |
383 | ||
e76506f0 | 384 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); |
653ed64b JFG |
385 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
386 | set_user_nice(current, MAX_NICE); | |
387 | atomic_inc(&n_init); | |
777a54c9 PM |
388 | if (holdoff) |
389 | schedule_timeout_interruptible(holdoff * HZ); | |
653ed64b | 390 | repeat: |
e76506f0 | 391 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id()); |
653ed64b JFG |
392 | |
393 | // Wait for signal that this reader can start. | |
af2789db | 394 | wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || |
653ed64b JFG |
395 | torture_must_stop()); |
396 | ||
397 | if (torture_must_stop()) | |
398 | goto end; | |
399 | ||
400 | // Make sure that the CPU is affinitized appropriately during testing. | |
401 | WARN_ON_ONCE(smp_processor_id() != me); | |
402 | ||
af2789db | 403 | WRITE_ONCE(rt->start_reader, 0); |
86e0da2b PM |
404 | if (!atomic_dec_return(&n_started)) |
405 | while (atomic_read_acquire(&n_started)) | |
406 | cpu_relax(); | |
653ed64b | 407 | |
e76506f0 | 408 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx); |
b864f89f | 409 | |
2db0bda3 PM |
410 | |
411 | // To reduce noise, do an initial cache-warming invocation, check | |
412 | // in, and then keep warming until everyone has checked in. | |
1fbeb3a8 | 413 | rcu_scale_one_reader(); |
2db0bda3 PM |
414 | if (!atomic_dec_return(&n_warmedup)) |
415 | while (atomic_read_acquire(&n_warmedup)) | |
1fbeb3a8 | 416 | rcu_scale_one_reader(); |
2db0bda3 PM |
417 | // Also keep interrupts disabled. This also has the effect |
418 | // of preventing entries into slow path for rcu_read_unlock(). | |
653ed64b JFG |
419 | local_irq_save(flags); |
420 | start = ktime_get_mono_fast_ns(); | |
421 | ||
1fbeb3a8 | 422 | rcu_scale_one_reader(); |
653ed64b JFG |
423 | |
424 | duration = ktime_get_mono_fast_ns() - start; | |
425 | local_irq_restore(flags); | |
426 | ||
427 | rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration; | |
2db0bda3 PM |
428 | // To reduce runtime-skew noise, do maintain-load invocations until |
429 | // everyone is done. | |
430 | if (!atomic_dec_return(&n_cooleddown)) | |
431 | while (atomic_read_acquire(&n_cooleddown)) | |
1fbeb3a8 | 432 | rcu_scale_one_reader(); |
653ed64b | 433 | |
b864f89f PM |
434 | if (atomic_dec_and_test(&nreaders_exp)) |
435 | wake_up(&main_wq); | |
653ed64b | 436 | |
e76506f0 PM |
437 | VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", |
438 | me, exp_idx, atomic_read(&nreaders_exp)); | |
653ed64b | 439 | |
653ed64b JFG |
440 | if (!torture_must_stop()) |
441 | goto repeat; | |
442 | end: | |
1fbeb3a8 | 443 | torture_kthread_stopping("ref_scale_reader"); |
653ed64b JFG |
444 | return 0; |
445 | } | |
446 | ||
2990750b | 447 | static void reset_readers(void) |
653ed64b JFG |
448 | { |
449 | int i; | |
450 | struct reader_task *rt; | |
451 | ||
dbf28efd | 452 | for (i = 0; i < nreaders; i++) { |
653ed64b JFG |
453 | rt = &(reader_tasks[i]); |
454 | ||
455 | rt->last_duration_ns = 0; | |
456 | } | |
457 | } | |
458 | ||
459 | // Print the results of each reader and return the sum of all their durations. | |
2990750b | 460 | static u64 process_durations(int n) |
653ed64b JFG |
461 | { |
462 | int i; | |
463 | struct reader_task *rt; | |
464 | char buf1[64]; | |
2e90de76 | 465 | char *buf; |
653ed64b JFG |
466 | u64 sum = 0; |
467 | ||
2e90de76 PM |
468 | buf = kmalloc(128 + nreaders * 32, GFP_KERNEL); |
469 | if (!buf) | |
470 | return 0; | |
653ed64b JFG |
471 | buf[0] = 0; |
472 | sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)", | |
473 | exp_idx); | |
474 | ||
dbf28efd | 475 | for (i = 0; i < n && !torture_must_stop(); i++) { |
653ed64b JFG |
476 | rt = &(reader_tasks[i]); |
477 | sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns); | |
478 | ||
479 | if (i % 5 == 0) | |
480 | strcat(buf, "\n"); | |
481 | strcat(buf, buf1); | |
482 | ||
483 | sum += rt->last_duration_ns; | |
484 | } | |
485 | strcat(buf, "\n"); | |
486 | ||
1fbeb3a8 | 487 | SCALEOUT("%s\n", buf); |
653ed64b | 488 | |
2e90de76 | 489 | kfree(buf); |
653ed64b JFG |
490 | return sum; |
491 | } | |
492 | ||
493 | // The main_func is the main orchestrator, it performs a bunch of | |
494 | // experiments. For every experiment, it orders all the readers | |
495 | // involved to start and waits for them to finish the experiment. It | |
496 | // then reads their timestamps and starts the next experiment. Each | |
497 | // experiment progresses from 1 concurrent reader to N of them at which | |
498 | // point all the timestamps are printed. | |
499 | static int main_func(void *arg) | |
500 | { | |
f518f154 | 501 | bool errexit = false; |
653ed64b JFG |
502 | int exp, r; |
503 | char buf1[64]; | |
f518f154 | 504 | char *buf; |
dbf28efd | 505 | u64 *result_avg; |
653ed64b JFG |
506 | |
507 | set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids)); | |
508 | set_user_nice(current, MAX_NICE); | |
509 | ||
1fbeb3a8 | 510 | VERBOSE_SCALEOUT("main_func task started"); |
dbf28efd | 511 | result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL); |
f518f154 PM |
512 | buf = kzalloc(64 + nruns * 32, GFP_KERNEL); |
513 | if (!result_avg || !buf) { | |
1fbeb3a8 | 514 | VERBOSE_SCALEOUT_ERRSTRING("out of memory"); |
f518f154 PM |
515 | errexit = true; |
516 | } | |
777a54c9 PM |
517 | if (holdoff) |
518 | schedule_timeout_interruptible(holdoff * HZ); | |
653ed64b | 519 | |
96af8669 PM |
520 | // Wait for all threads to start. |
521 | atomic_inc(&n_init); | |
522 | while (atomic_read(&n_init) < nreaders + 1) | |
523 | schedule_timeout_uninterruptible(1); | |
524 | ||
653ed64b | 525 | // Start exp readers up per experiment |
dbf28efd | 526 | for (exp = 0; exp < nruns && !torture_must_stop(); exp++) { |
f518f154 | 527 | if (errexit) |
dbf28efd | 528 | break; |
653ed64b JFG |
529 | if (torture_must_stop()) |
530 | goto end; | |
531 | ||
dbf28efd PM |
532 | reset_readers(); |
533 | atomic_set(&nreaders_exp, nreaders); | |
86e0da2b | 534 | atomic_set(&n_started, nreaders); |
2db0bda3 PM |
535 | atomic_set(&n_warmedup, nreaders); |
536 | atomic_set(&n_cooleddown, nreaders); | |
653ed64b JFG |
537 | |
538 | exp_idx = exp; | |
539 | ||
dbf28efd | 540 | for (r = 0; r < nreaders; r++) { |
af2789db | 541 | smp_store_release(&reader_tasks[r].start_reader, 1); |
653ed64b JFG |
542 | wake_up(&reader_tasks[r].wq); |
543 | } | |
544 | ||
1fbeb3a8 | 545 | VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers", |
dbf28efd | 546 | nreaders); |
653ed64b JFG |
547 | |
548 | wait_event(main_wq, | |
549 | !atomic_read(&nreaders_exp) || torture_must_stop()); | |
550 | ||
1fbeb3a8 | 551 | VERBOSE_SCALEOUT("main_func: experiment ended"); |
653ed64b JFG |
552 | |
553 | if (torture_must_stop()) | |
554 | goto end; | |
555 | ||
7c944d7c | 556 | result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops); |
653ed64b JFG |
557 | } |
558 | ||
559 | // Print the average of all experiments | |
1fbeb3a8 | 560 | SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n"); |
653ed64b | 561 | |
58db5785 CIK |
562 | if (!errexit) { |
563 | buf[0] = 0; | |
564 | strcat(buf, "\n"); | |
565 | strcat(buf, "Runs\tTime(ns)\n"); | |
566 | } | |
653ed64b | 567 | |
dbf28efd | 568 | for (exp = 0; exp < nruns; exp++) { |
7c944d7c AB |
569 | u64 avg; |
570 | u32 rem; | |
571 | ||
f518f154 | 572 | if (errexit) |
dbf28efd | 573 | break; |
7c944d7c AB |
574 | avg = div_u64_rem(result_avg[exp], 1000, &rem); |
575 | sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem); | |
653ed64b JFG |
576 | strcat(buf, buf1); |
577 | } | |
578 | ||
f518f154 | 579 | if (!errexit) |
1fbeb3a8 | 580 | SCALEOUT("%s", buf); |
653ed64b JFG |
581 | |
582 | // This will shutdown everything including us. | |
583 | if (shutdown) { | |
584 | shutdown_start = 1; | |
585 | wake_up(&shutdown_wq); | |
586 | } | |
587 | ||
588 | // Wait for torture to stop us | |
589 | while (!torture_must_stop()) | |
590 | schedule_timeout_uninterruptible(1); | |
591 | ||
592 | end: | |
593 | torture_kthread_stopping("main_func"); | |
f518f154 PM |
594 | kfree(result_avg); |
595 | kfree(buf); | |
653ed64b JFG |
596 | return 0; |
597 | } | |
598 | ||
599 | static void | |
1fbeb3a8 | 600 | ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag) |
653ed64b | 601 | { |
1fbeb3a8 PM |
602 | pr_alert("%s" SCALE_FLAG |
603 | "--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag, | |
b4d1e34f | 604 | verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay); |
653ed64b JFG |
605 | } |
606 | ||
607 | static void | |
1fbeb3a8 | 608 | ref_scale_cleanup(void) |
653ed64b JFG |
609 | { |
610 | int i; | |
611 | ||
612 | if (torture_cleanup_begin()) | |
613 | return; | |
614 | ||
615 | if (!cur_ops) { | |
616 | torture_cleanup_end(); | |
617 | return; | |
618 | } | |
619 | ||
620 | if (reader_tasks) { | |
621 | for (i = 0; i < nreaders; i++) | |
1fbeb3a8 | 622 | torture_stop_kthread("ref_scale_reader", |
653ed64b JFG |
623 | reader_tasks[i].task); |
624 | } | |
625 | kfree(reader_tasks); | |
626 | ||
627 | torture_stop_kthread("main_task", main_task); | |
628 | kfree(main_task); | |
629 | ||
1fbeb3a8 | 630 | // Do scale-type-specific cleanup operations. |
653ed64b JFG |
631 | if (cur_ops->cleanup != NULL) |
632 | cur_ops->cleanup(); | |
633 | ||
634 | torture_cleanup_end(); | |
635 | } | |
636 | ||
637 | // Shutdown kthread. Just waits to be awakened, then shuts down system. | |
638 | static int | |
1fbeb3a8 | 639 | ref_scale_shutdown(void *arg) |
653ed64b JFG |
640 | { |
641 | wait_event(shutdown_wq, shutdown_start); | |
642 | ||
643 | smp_mb(); // Wake before output. | |
1fbeb3a8 | 644 | ref_scale_cleanup(); |
653ed64b JFG |
645 | kernel_power_off(); |
646 | ||
647 | return -EINVAL; | |
648 | } | |
649 | ||
650 | static int __init | |
1fbeb3a8 | 651 | ref_scale_init(void) |
653ed64b JFG |
652 | { |
653 | long i; | |
654 | int firsterr = 0; | |
1fbeb3a8 | 655 | static struct ref_scale_ops *scale_ops[] = { |
e13ef442 PM |
656 | &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, |
657 | &refcnt_ops, &rwlock_ops, &rwsem_ops, | |
653ed64b JFG |
658 | }; |
659 | ||
1fbeb3a8 | 660 | if (!torture_init_begin(scale_type, verbose)) |
653ed64b JFG |
661 | return -EBUSY; |
662 | ||
1fbeb3a8 PM |
663 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { |
664 | cur_ops = scale_ops[i]; | |
665 | if (strcmp(scale_type, cur_ops->name) == 0) | |
653ed64b JFG |
666 | break; |
667 | } | |
1fbeb3a8 PM |
668 | if (i == ARRAY_SIZE(scale_ops)) { |
669 | pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); | |
670 | pr_alert("rcu-scale types:"); | |
671 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) | |
672 | pr_cont(" %s", scale_ops[i]->name); | |
653ed64b | 673 | pr_cont("\n"); |
653ed64b JFG |
674 | firsterr = -EINVAL; |
675 | cur_ops = NULL; | |
676 | goto unwind; | |
677 | } | |
678 | if (cur_ops->init) | |
679 | cur_ops->init(); | |
680 | ||
1fbeb3a8 | 681 | ref_scale_print_module_parms(cur_ops, "Start of test"); |
653ed64b JFG |
682 | |
683 | // Shutdown task | |
684 | if (shutdown) { | |
685 | init_waitqueue_head(&shutdown_wq); | |
1fbeb3a8 | 686 | firsterr = torture_create_kthread(ref_scale_shutdown, NULL, |
653ed64b JFG |
687 | shutdown_task); |
688 | if (firsterr) | |
689 | goto unwind; | |
690 | schedule_timeout_uninterruptible(1); | |
691 | } | |
692 | ||
8fc28783 PM |
693 | // Reader tasks (default to ~75% of online CPUs). |
694 | if (nreaders < 0) | |
695 | nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2); | |
0c6d18d8 PM |
696 | if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops)) |
697 | loops = 1; | |
698 | if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders)) | |
699 | nreaders = 1; | |
700 | if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns)) | |
701 | nruns = 1; | |
653ed64b JFG |
702 | reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]), |
703 | GFP_KERNEL); | |
704 | if (!reader_tasks) { | |
1fbeb3a8 | 705 | VERBOSE_SCALEOUT_ERRSTRING("out of memory"); |
653ed64b JFG |
706 | firsterr = -ENOMEM; |
707 | goto unwind; | |
708 | } | |
709 | ||
1fbeb3a8 | 710 | VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders); |
653ed64b JFG |
711 | |
712 | for (i = 0; i < nreaders; i++) { | |
1fbeb3a8 | 713 | firsterr = torture_create_kthread(ref_scale_reader, (void *)i, |
653ed64b JFG |
714 | reader_tasks[i].task); |
715 | if (firsterr) | |
716 | goto unwind; | |
717 | ||
718 | init_waitqueue_head(&(reader_tasks[i].wq)); | |
719 | } | |
720 | ||
721 | // Main Task | |
722 | init_waitqueue_head(&main_wq); | |
723 | firsterr = torture_create_kthread(main_func, NULL, main_task); | |
724 | if (firsterr) | |
725 | goto unwind; | |
653ed64b JFG |
726 | |
727 | torture_init_end(); | |
728 | return 0; | |
729 | ||
730 | unwind: | |
731 | torture_init_end(); | |
1fbeb3a8 | 732 | ref_scale_cleanup(); |
bc80d353 PM |
733 | if (shutdown) { |
734 | WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST)); | |
735 | kernel_power_off(); | |
736 | } | |
653ed64b JFG |
737 | return firsterr; |
738 | } | |
739 | ||
1fbeb3a8 PM |
740 | module_init(ref_scale_init); |
741 | module_exit(ref_scale_cleanup); |