]> Git Repo - linux.git/blob - kernel/sched/debug.c
net: bgmac: Fix return value check for fixed_phy_register()
[linux.git] / kernel / sched / debug.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9
10 /*
11  * This allows printing both to /proc/sched_debug and
12  * to the console
13  */
14 #define SEQ_printf(m, x...)                     \
15  do {                                           \
16         if (m)                                  \
17                 seq_printf(m, x);               \
18         else                                    \
19                 pr_cont(x);                     \
20  } while (0)
21
22 /*
23  * Ease the printing of nsec fields:
24  */
25 static long long nsec_high(unsigned long long nsec)
26 {
27         if ((long long)nsec < 0) {
28                 nsec = -nsec;
29                 do_div(nsec, 1000000);
30                 return -nsec;
31         }
32         do_div(nsec, 1000000);
33
34         return nsec;
35 }
36
37 static unsigned long nsec_low(unsigned long long nsec)
38 {
39         if ((long long)nsec < 0)
40                 nsec = -nsec;
41
42         return do_div(nsec, 1000000);
43 }
44
45 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46
47 #define SCHED_FEAT(name, enabled)       \
48         #name ,
49
50 static const char * const sched_feat_names[] = {
51 #include "features.h"
52 };
53
54 #undef SCHED_FEAT
55
56 static int sched_feat_show(struct seq_file *m, void *v)
57 {
58         int i;
59
60         for (i = 0; i < __SCHED_FEAT_NR; i++) {
61                 if (!(sysctl_sched_features & (1UL << i)))
62                         seq_puts(m, "NO_");
63                 seq_printf(m, "%s ", sched_feat_names[i]);
64         }
65         seq_puts(m, "\n");
66
67         return 0;
68 }
69
70 #ifdef CONFIG_JUMP_LABEL
71
72 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
73 #define jump_label_key__false STATIC_KEY_INIT_FALSE
74
75 #define SCHED_FEAT(name, enabled)       \
76         jump_label_key__##enabled ,
77
78 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79 #include "features.h"
80 };
81
82 #undef SCHED_FEAT
83
84 static void sched_feat_disable(int i)
85 {
86         static_key_disable_cpuslocked(&sched_feat_keys[i]);
87 }
88
89 static void sched_feat_enable(int i)
90 {
91         static_key_enable_cpuslocked(&sched_feat_keys[i]);
92 }
93 #else
94 static void sched_feat_disable(int i) { };
95 static void sched_feat_enable(int i) { };
96 #endif /* CONFIG_JUMP_LABEL */
97
98 static int sched_feat_set(char *cmp)
99 {
100         int i;
101         int neg = 0;
102
103         if (strncmp(cmp, "NO_", 3) == 0) {
104                 neg = 1;
105                 cmp += 3;
106         }
107
108         i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109         if (i < 0)
110                 return i;
111
112         if (neg) {
113                 sysctl_sched_features &= ~(1UL << i);
114                 sched_feat_disable(i);
115         } else {
116                 sysctl_sched_features |= (1UL << i);
117                 sched_feat_enable(i);
118         }
119
120         return 0;
121 }
122
123 static ssize_t
124 sched_feat_write(struct file *filp, const char __user *ubuf,
125                 size_t cnt, loff_t *ppos)
126 {
127         char buf[64];
128         char *cmp;
129         int ret;
130         struct inode *inode;
131
132         if (cnt > 63)
133                 cnt = 63;
134
135         if (copy_from_user(&buf, ubuf, cnt))
136                 return -EFAULT;
137
138         buf[cnt] = 0;
139         cmp = strstrip(buf);
140
141         /* Ensure the static_key remains in a consistent state */
142         inode = file_inode(filp);
143         cpus_read_lock();
144         inode_lock(inode);
145         ret = sched_feat_set(cmp);
146         inode_unlock(inode);
147         cpus_read_unlock();
148         if (ret < 0)
149                 return ret;
150
151         *ppos += cnt;
152
153         return cnt;
154 }
155
156 static int sched_feat_open(struct inode *inode, struct file *filp)
157 {
158         return single_open(filp, sched_feat_show, NULL);
159 }
160
161 static const struct file_operations sched_feat_fops = {
162         .open           = sched_feat_open,
163         .write          = sched_feat_write,
164         .read           = seq_read,
165         .llseek         = seq_lseek,
166         .release        = single_release,
167 };
168
169 #ifdef CONFIG_SMP
170
171 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172                                    size_t cnt, loff_t *ppos)
173 {
174         char buf[16];
175         unsigned int scaling;
176
177         if (cnt > 15)
178                 cnt = 15;
179
180         if (copy_from_user(&buf, ubuf, cnt))
181                 return -EFAULT;
182         buf[cnt] = '\0';
183
184         if (kstrtouint(buf, 10, &scaling))
185                 return -EINVAL;
186
187         if (scaling >= SCHED_TUNABLESCALING_END)
188                 return -EINVAL;
189
190         sysctl_sched_tunable_scaling = scaling;
191         if (sched_update_scaling())
192                 return -EINVAL;
193
194         *ppos += cnt;
195         return cnt;
196 }
197
198 static int sched_scaling_show(struct seq_file *m, void *v)
199 {
200         seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201         return 0;
202 }
203
204 static int sched_scaling_open(struct inode *inode, struct file *filp)
205 {
206         return single_open(filp, sched_scaling_show, NULL);
207 }
208
209 static const struct file_operations sched_scaling_fops = {
210         .open           = sched_scaling_open,
211         .write          = sched_scaling_write,
212         .read           = seq_read,
213         .llseek         = seq_lseek,
214         .release        = single_release,
215 };
216
217 #endif /* SMP */
218
219 #ifdef CONFIG_PREEMPT_DYNAMIC
220
221 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222                                    size_t cnt, loff_t *ppos)
223 {
224         char buf[16];
225         int mode;
226
227         if (cnt > 15)
228                 cnt = 15;
229
230         if (copy_from_user(&buf, ubuf, cnt))
231                 return -EFAULT;
232
233         buf[cnt] = 0;
234         mode = sched_dynamic_mode(strstrip(buf));
235         if (mode < 0)
236                 return mode;
237
238         sched_dynamic_update(mode);
239
240         *ppos += cnt;
241
242         return cnt;
243 }
244
245 static int sched_dynamic_show(struct seq_file *m, void *v)
246 {
247         static const char * preempt_modes[] = {
248                 "none", "voluntary", "full"
249         };
250         int i;
251
252         for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
253                 if (preempt_dynamic_mode == i)
254                         seq_puts(m, "(");
255                 seq_puts(m, preempt_modes[i]);
256                 if (preempt_dynamic_mode == i)
257                         seq_puts(m, ")");
258
259                 seq_puts(m, " ");
260         }
261
262         seq_puts(m, "\n");
263         return 0;
264 }
265
266 static int sched_dynamic_open(struct inode *inode, struct file *filp)
267 {
268         return single_open(filp, sched_dynamic_show, NULL);
269 }
270
271 static const struct file_operations sched_dynamic_fops = {
272         .open           = sched_dynamic_open,
273         .write          = sched_dynamic_write,
274         .read           = seq_read,
275         .llseek         = seq_lseek,
276         .release        = single_release,
277 };
278
279 #endif /* CONFIG_PREEMPT_DYNAMIC */
280
281 __read_mostly bool sched_debug_verbose;
282
283 #ifdef CONFIG_SMP
284 static struct dentry           *sd_dentry;
285
286
287 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
288                                   size_t cnt, loff_t *ppos)
289 {
290         ssize_t result;
291         bool orig;
292
293         cpus_read_lock();
294         mutex_lock(&sched_domains_mutex);
295
296         orig = sched_debug_verbose;
297         result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
298
299         if (sched_debug_verbose && !orig)
300                 update_sched_domain_debugfs();
301         else if (!sched_debug_verbose && orig) {
302                 debugfs_remove(sd_dentry);
303                 sd_dentry = NULL;
304         }
305
306         mutex_unlock(&sched_domains_mutex);
307         cpus_read_unlock();
308
309         return result;
310 }
311 #else
312 #define sched_verbose_write debugfs_write_file_bool
313 #endif
314
315 static const struct file_operations sched_verbose_fops = {
316         .read =         debugfs_read_file_bool,
317         .write =        sched_verbose_write,
318         .open =         simple_open,
319         .llseek =       default_llseek,
320 };
321
322 static const struct seq_operations sched_debug_sops;
323
324 static int sched_debug_open(struct inode *inode, struct file *filp)
325 {
326         return seq_open(filp, &sched_debug_sops);
327 }
328
329 static const struct file_operations sched_debug_fops = {
330         .open           = sched_debug_open,
331         .read           = seq_read,
332         .llseek         = seq_lseek,
333         .release        = seq_release,
334 };
335
336 static struct dentry *debugfs_sched;
337
338 static __init int sched_init_debug(void)
339 {
340         struct dentry __maybe_unused *numa;
341
342         debugfs_sched = debugfs_create_dir("sched", NULL);
343
344         debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
345         debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
346 #ifdef CONFIG_PREEMPT_DYNAMIC
347         debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
348 #endif
349
350         debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
351         debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
352         debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
353         debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
354
355         debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
356         debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
357
358 #ifdef CONFIG_SMP
359         debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
360         debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
361         debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
362
363         mutex_lock(&sched_domains_mutex);
364         update_sched_domain_debugfs();
365         mutex_unlock(&sched_domains_mutex);
366 #endif
367
368 #ifdef CONFIG_NUMA_BALANCING
369         numa = debugfs_create_dir("numa_balancing", debugfs_sched);
370
371         debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
372         debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
373         debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
374         debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
375         debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
376 #endif
377
378         debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
379
380         return 0;
381 }
382 late_initcall(sched_init_debug);
383
384 #ifdef CONFIG_SMP
385
386 static cpumask_var_t            sd_sysctl_cpus;
387
388 static int sd_flags_show(struct seq_file *m, void *v)
389 {
390         unsigned long flags = *(unsigned int *)m->private;
391         int idx;
392
393         for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
394                 seq_puts(m, sd_flag_debug[idx].name);
395                 seq_puts(m, " ");
396         }
397         seq_puts(m, "\n");
398
399         return 0;
400 }
401
402 static int sd_flags_open(struct inode *inode, struct file *file)
403 {
404         return single_open(file, sd_flags_show, inode->i_private);
405 }
406
407 static const struct file_operations sd_flags_fops = {
408         .open           = sd_flags_open,
409         .read           = seq_read,
410         .llseek         = seq_lseek,
411         .release        = single_release,
412 };
413
414 static void register_sd(struct sched_domain *sd, struct dentry *parent)
415 {
416 #define SDM(type, mode, member) \
417         debugfs_create_##type(#member, mode, parent, &sd->member)
418
419         SDM(ulong, 0644, min_interval);
420         SDM(ulong, 0644, max_interval);
421         SDM(u64,   0644, max_newidle_lb_cost);
422         SDM(u32,   0644, busy_factor);
423         SDM(u32,   0644, imbalance_pct);
424         SDM(u32,   0644, cache_nice_tries);
425         SDM(str,   0444, name);
426
427 #undef SDM
428
429         debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
430 }
431
432 void update_sched_domain_debugfs(void)
433 {
434         int cpu, i;
435
436         /*
437          * This can unfortunately be invoked before sched_debug_init() creates
438          * the debug directory. Don't touch sd_sysctl_cpus until then.
439          */
440         if (!debugfs_sched)
441                 return;
442
443         if (!sched_debug_verbose)
444                 return;
445
446         if (!cpumask_available(sd_sysctl_cpus)) {
447                 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
448                         return;
449                 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
450         }
451
452         if (!sd_dentry) {
453                 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
454
455                 /* rebuild sd_sysctl_cpus if empty since it gets cleared below */
456                 if (cpumask_empty(sd_sysctl_cpus))
457                         cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
458         }
459
460         for_each_cpu(cpu, sd_sysctl_cpus) {
461                 struct sched_domain *sd;
462                 struct dentry *d_cpu;
463                 char buf[32];
464
465                 snprintf(buf, sizeof(buf), "cpu%d", cpu);
466                 debugfs_lookup_and_remove(buf, sd_dentry);
467                 d_cpu = debugfs_create_dir(buf, sd_dentry);
468
469                 i = 0;
470                 for_each_domain(cpu, sd) {
471                         struct dentry *d_sd;
472
473                         snprintf(buf, sizeof(buf), "domain%d", i);
474                         d_sd = debugfs_create_dir(buf, d_cpu);
475
476                         register_sd(sd, d_sd);
477                         i++;
478                 }
479
480                 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
481         }
482 }
483
484 void dirty_sched_domain_sysctl(int cpu)
485 {
486         if (cpumask_available(sd_sysctl_cpus))
487                 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
488 }
489
490 #endif /* CONFIG_SMP */
491
492 #ifdef CONFIG_FAIR_GROUP_SCHED
493 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
494 {
495         struct sched_entity *se = tg->se[cpu];
496
497 #define P(F)            SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)F)
498 #define P_SCHEDSTAT(F)  SEQ_printf(m, "  .%-30s: %lld\n",       \
499                 #F, (long long)schedstat_val(stats->F))
500 #define PN(F)           SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
501 #define PN_SCHEDSTAT(F) SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
502                 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
503
504         if (!se)
505                 return;
506
507         PN(se->exec_start);
508         PN(se->vruntime);
509         PN(se->sum_exec_runtime);
510
511         if (schedstat_enabled()) {
512                 struct sched_statistics *stats;
513                 stats = __schedstats_from_se(se);
514
515                 PN_SCHEDSTAT(wait_start);
516                 PN_SCHEDSTAT(sleep_start);
517                 PN_SCHEDSTAT(block_start);
518                 PN_SCHEDSTAT(sleep_max);
519                 PN_SCHEDSTAT(block_max);
520                 PN_SCHEDSTAT(exec_max);
521                 PN_SCHEDSTAT(slice_max);
522                 PN_SCHEDSTAT(wait_max);
523                 PN_SCHEDSTAT(wait_sum);
524                 P_SCHEDSTAT(wait_count);
525         }
526
527         P(se->load.weight);
528 #ifdef CONFIG_SMP
529         P(se->avg.load_avg);
530         P(se->avg.util_avg);
531         P(se->avg.runnable_avg);
532 #endif
533
534 #undef PN_SCHEDSTAT
535 #undef PN
536 #undef P_SCHEDSTAT
537 #undef P
538 }
539 #endif
540
541 #ifdef CONFIG_CGROUP_SCHED
542 static DEFINE_SPINLOCK(sched_debug_lock);
543 static char group_path[PATH_MAX];
544
545 static void task_group_path(struct task_group *tg, char *path, int plen)
546 {
547         if (autogroup_path(tg, path, plen))
548                 return;
549
550         cgroup_path(tg->css.cgroup, path, plen);
551 }
552
553 /*
554  * Only 1 SEQ_printf_task_group_path() caller can use the full length
555  * group_path[] for cgroup path. Other simultaneous callers will have
556  * to use a shorter stack buffer. A "..." suffix is appended at the end
557  * of the stack buffer so that it will show up in case the output length
558  * matches the given buffer size to indicate possible path name truncation.
559  */
560 #define SEQ_printf_task_group_path(m, tg, fmt...)                       \
561 {                                                                       \
562         if (spin_trylock(&sched_debug_lock)) {                          \
563                 task_group_path(tg, group_path, sizeof(group_path));    \
564                 SEQ_printf(m, fmt, group_path);                         \
565                 spin_unlock(&sched_debug_lock);                         \
566         } else {                                                        \
567                 char buf[128];                                          \
568                 char *bufend = buf + sizeof(buf) - 3;                   \
569                 task_group_path(tg, buf, bufend - buf);                 \
570                 strcpy(bufend - 1, "...");                              \
571                 SEQ_printf(m, fmt, buf);                                \
572         }                                                               \
573 }
574 #endif
575
576 static void
577 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
578 {
579         if (task_current(rq, p))
580                 SEQ_printf(m, ">R");
581         else
582                 SEQ_printf(m, " %c", task_state_to_char(p));
583
584         SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
585                 p->comm, task_pid_nr(p),
586                 SPLIT_NS(p->se.vruntime),
587                 (long long)(p->nvcsw + p->nivcsw),
588                 p->prio);
589
590         SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
591                 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
592                 SPLIT_NS(p->se.sum_exec_runtime),
593                 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
594                 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
595
596 #ifdef CONFIG_NUMA_BALANCING
597         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
598 #endif
599 #ifdef CONFIG_CGROUP_SCHED
600         SEQ_printf_task_group_path(m, task_group(p), " %s")
601 #endif
602
603         SEQ_printf(m, "\n");
604 }
605
606 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
607 {
608         struct task_struct *g, *p;
609
610         SEQ_printf(m, "\n");
611         SEQ_printf(m, "runnable tasks:\n");
612         SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
613                    "     wait-time             sum-exec        sum-sleep\n");
614         SEQ_printf(m, "-------------------------------------------------------"
615                    "------------------------------------------------------\n");
616
617         rcu_read_lock();
618         for_each_process_thread(g, p) {
619                 if (task_cpu(p) != rq_cpu)
620                         continue;
621
622                 print_task(m, rq, p);
623         }
624         rcu_read_unlock();
625 }
626
627 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
628 {
629         s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
630                 spread, rq0_min_vruntime, spread0;
631         struct rq *rq = cpu_rq(cpu);
632         struct sched_entity *last;
633         unsigned long flags;
634
635 #ifdef CONFIG_FAIR_GROUP_SCHED
636         SEQ_printf(m, "\n");
637         SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
638 #else
639         SEQ_printf(m, "\n");
640         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
641 #endif
642         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
643                         SPLIT_NS(cfs_rq->exec_clock));
644
645         raw_spin_rq_lock_irqsave(rq, flags);
646         if (rb_first_cached(&cfs_rq->tasks_timeline))
647                 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
648         last = __pick_last_entity(cfs_rq);
649         if (last)
650                 max_vruntime = last->vruntime;
651         min_vruntime = cfs_rq->min_vruntime;
652         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
653         raw_spin_rq_unlock_irqrestore(rq, flags);
654         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
655                         SPLIT_NS(MIN_vruntime));
656         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
657                         SPLIT_NS(min_vruntime));
658         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
659                         SPLIT_NS(max_vruntime));
660         spread = max_vruntime - MIN_vruntime;
661         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
662                         SPLIT_NS(spread));
663         spread0 = min_vruntime - rq0_min_vruntime;
664         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
665                         SPLIT_NS(spread0));
666         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
667                         cfs_rq->nr_spread_over);
668         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
669         SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
670         SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
671                         cfs_rq->idle_nr_running);
672         SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
673                         cfs_rq->idle_h_nr_running);
674         SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
675 #ifdef CONFIG_SMP
676         SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
677                         cfs_rq->avg.load_avg);
678         SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
679                         cfs_rq->avg.runnable_avg);
680         SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
681                         cfs_rq->avg.util_avg);
682         SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
683                         cfs_rq->avg.util_est.enqueued);
684         SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
685                         cfs_rq->removed.load_avg);
686         SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
687                         cfs_rq->removed.util_avg);
688         SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
689                         cfs_rq->removed.runnable_avg);
690 #ifdef CONFIG_FAIR_GROUP_SCHED
691         SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
692                         cfs_rq->tg_load_avg_contrib);
693         SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
694                         atomic_long_read(&cfs_rq->tg->load_avg));
695 #endif
696 #endif
697 #ifdef CONFIG_CFS_BANDWIDTH
698         SEQ_printf(m, "  .%-30s: %d\n", "throttled",
699                         cfs_rq->throttled);
700         SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
701                         cfs_rq->throttle_count);
702 #endif
703
704 #ifdef CONFIG_FAIR_GROUP_SCHED
705         print_cfs_group_stats(m, cpu, cfs_rq->tg);
706 #endif
707 }
708
709 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
710 {
711 #ifdef CONFIG_RT_GROUP_SCHED
712         SEQ_printf(m, "\n");
713         SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
714 #else
715         SEQ_printf(m, "\n");
716         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
717 #endif
718
719 #define P(x) \
720         SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
721 #define PU(x) \
722         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
723 #define PN(x) \
724         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
725
726         PU(rt_nr_running);
727 #ifdef CONFIG_SMP
728         PU(rt_nr_migratory);
729 #endif
730         P(rt_throttled);
731         PN(rt_time);
732         PN(rt_runtime);
733
734 #undef PN
735 #undef PU
736 #undef P
737 }
738
739 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
740 {
741         struct dl_bw *dl_bw;
742
743         SEQ_printf(m, "\n");
744         SEQ_printf(m, "dl_rq[%d]:\n", cpu);
745
746 #define PU(x) \
747         SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
748
749         PU(dl_nr_running);
750 #ifdef CONFIG_SMP
751         PU(dl_nr_migratory);
752         dl_bw = &cpu_rq(cpu)->rd->dl_bw;
753 #else
754         dl_bw = &dl_rq->dl_bw;
755 #endif
756         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
757         SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
758
759 #undef PU
760 }
761
762 static void print_cpu(struct seq_file *m, int cpu)
763 {
764         struct rq *rq = cpu_rq(cpu);
765
766 #ifdef CONFIG_X86
767         {
768                 unsigned int freq = cpu_khz ? : 1;
769
770                 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
771                            cpu, freq / 1000, (freq % 1000));
772         }
773 #else
774         SEQ_printf(m, "cpu#%d\n", cpu);
775 #endif
776
777 #define P(x)                                                            \
778 do {                                                                    \
779         if (sizeof(rq->x) == 4)                                         \
780                 SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));      \
781         else                                                            \
782                 SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
783 } while (0)
784
785 #define PN(x) \
786         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
787
788         P(nr_running);
789         P(nr_switches);
790         P(nr_uninterruptible);
791         PN(next_balance);
792         SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
793         PN(clock);
794         PN(clock_task);
795 #undef P
796 #undef PN
797
798 #ifdef CONFIG_SMP
799 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
800         P64(avg_idle);
801         P64(max_idle_balance_cost);
802 #undef P64
803 #endif
804
805 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
806         if (schedstat_enabled()) {
807                 P(yld_count);
808                 P(sched_count);
809                 P(sched_goidle);
810                 P(ttwu_count);
811                 P(ttwu_local);
812         }
813 #undef P
814
815         print_cfs_stats(m, cpu);
816         print_rt_stats(m, cpu);
817         print_dl_stats(m, cpu);
818
819         print_rq(m, rq, cpu);
820         SEQ_printf(m, "\n");
821 }
822
823 static const char *sched_tunable_scaling_names[] = {
824         "none",
825         "logarithmic",
826         "linear"
827 };
828
829 static void sched_debug_header(struct seq_file *m)
830 {
831         u64 ktime, sched_clk, cpu_clk;
832         unsigned long flags;
833
834         local_irq_save(flags);
835         ktime = ktime_to_ns(ktime_get());
836         sched_clk = sched_clock();
837         cpu_clk = local_clock();
838         local_irq_restore(flags);
839
840         SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
841                 init_utsname()->release,
842                 (int)strcspn(init_utsname()->version, " "),
843                 init_utsname()->version);
844
845 #define P(x) \
846         SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
847 #define PN(x) \
848         SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
849         PN(ktime);
850         PN(sched_clk);
851         PN(cpu_clk);
852         P(jiffies);
853 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
854         P(sched_clock_stable());
855 #endif
856 #undef PN
857 #undef P
858
859         SEQ_printf(m, "\n");
860         SEQ_printf(m, "sysctl_sched\n");
861
862 #define P(x) \
863         SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
864 #define PN(x) \
865         SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
866         PN(sysctl_sched_latency);
867         PN(sysctl_sched_min_granularity);
868         PN(sysctl_sched_idle_min_granularity);
869         PN(sysctl_sched_wakeup_granularity);
870         P(sysctl_sched_child_runs_first);
871         P(sysctl_sched_features);
872 #undef PN
873 #undef P
874
875         SEQ_printf(m, "  .%-40s: %d (%s)\n",
876                 "sysctl_sched_tunable_scaling",
877                 sysctl_sched_tunable_scaling,
878                 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
879         SEQ_printf(m, "\n");
880 }
881
882 static int sched_debug_show(struct seq_file *m, void *v)
883 {
884         int cpu = (unsigned long)(v - 2);
885
886         if (cpu != -1)
887                 print_cpu(m, cpu);
888         else
889                 sched_debug_header(m);
890
891         return 0;
892 }
893
894 void sysrq_sched_debug_show(void)
895 {
896         int cpu;
897
898         sched_debug_header(NULL);
899         for_each_online_cpu(cpu) {
900                 /*
901                  * Need to reset softlockup watchdogs on all CPUs, because
902                  * another CPU might be blocked waiting for us to process
903                  * an IPI or stop_machine.
904                  */
905                 touch_nmi_watchdog();
906                 touch_all_softlockup_watchdogs();
907                 print_cpu(NULL, cpu);
908         }
909 }
910
911 /*
912  * This iterator needs some explanation.
913  * It returns 1 for the header position.
914  * This means 2 is CPU 0.
915  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
916  * to use cpumask_* to iterate over the CPUs.
917  */
918 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
919 {
920         unsigned long n = *offset;
921
922         if (n == 0)
923                 return (void *) 1;
924
925         n--;
926
927         if (n > 0)
928                 n = cpumask_next(n - 1, cpu_online_mask);
929         else
930                 n = cpumask_first(cpu_online_mask);
931
932         *offset = n + 1;
933
934         if (n < nr_cpu_ids)
935                 return (void *)(unsigned long)(n + 2);
936
937         return NULL;
938 }
939
940 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
941 {
942         (*offset)++;
943         return sched_debug_start(file, offset);
944 }
945
946 static void sched_debug_stop(struct seq_file *file, void *data)
947 {
948 }
949
950 static const struct seq_operations sched_debug_sops = {
951         .start          = sched_debug_start,
952         .next           = sched_debug_next,
953         .stop           = sched_debug_stop,
954         .show           = sched_debug_show,
955 };
956
957 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
958 #define __P(F) __PS(#F, F)
959 #define   P(F) __PS(#F, p->F)
960 #define   PM(F, M) __PS(#F, p->F & (M))
961 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
962 #define __PN(F) __PSN(#F, F)
963 #define   PN(F) __PSN(#F, p->F)
964
965
966 #ifdef CONFIG_NUMA_BALANCING
967 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
968                 unsigned long tpf, unsigned long gsf, unsigned long gpf)
969 {
970         SEQ_printf(m, "numa_faults node=%d ", node);
971         SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
972         SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
973 }
974 #endif
975
976
977 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
978 {
979 #ifdef CONFIG_NUMA_BALANCING
980         if (p->mm)
981                 P(mm->numa_scan_seq);
982
983         P(numa_pages_migrated);
984         P(numa_preferred_nid);
985         P(total_numa_faults);
986         SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
987                         task_node(p), task_numa_group_id(p));
988         show_numa_stats(p, m);
989 #endif
990 }
991
992 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
993                                                   struct seq_file *m)
994 {
995         unsigned long nr_switches;
996
997         SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
998                                                 get_nr_threads(p));
999         SEQ_printf(m,
1000                 "---------------------------------------------------------"
1001                 "----------\n");
1002
1003 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1004 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1005
1006         PN(se.exec_start);
1007         PN(se.vruntime);
1008         PN(se.sum_exec_runtime);
1009
1010         nr_switches = p->nvcsw + p->nivcsw;
1011
1012         P(se.nr_migrations);
1013
1014         if (schedstat_enabled()) {
1015                 u64 avg_atom, avg_per_cpu;
1016
1017                 PN_SCHEDSTAT(sum_sleep_runtime);
1018                 PN_SCHEDSTAT(sum_block_runtime);
1019                 PN_SCHEDSTAT(wait_start);
1020                 PN_SCHEDSTAT(sleep_start);
1021                 PN_SCHEDSTAT(block_start);
1022                 PN_SCHEDSTAT(sleep_max);
1023                 PN_SCHEDSTAT(block_max);
1024                 PN_SCHEDSTAT(exec_max);
1025                 PN_SCHEDSTAT(slice_max);
1026                 PN_SCHEDSTAT(wait_max);
1027                 PN_SCHEDSTAT(wait_sum);
1028                 P_SCHEDSTAT(wait_count);
1029                 PN_SCHEDSTAT(iowait_sum);
1030                 P_SCHEDSTAT(iowait_count);
1031                 P_SCHEDSTAT(nr_migrations_cold);
1032                 P_SCHEDSTAT(nr_failed_migrations_affine);
1033                 P_SCHEDSTAT(nr_failed_migrations_running);
1034                 P_SCHEDSTAT(nr_failed_migrations_hot);
1035                 P_SCHEDSTAT(nr_forced_migrations);
1036                 P_SCHEDSTAT(nr_wakeups);
1037                 P_SCHEDSTAT(nr_wakeups_sync);
1038                 P_SCHEDSTAT(nr_wakeups_migrate);
1039                 P_SCHEDSTAT(nr_wakeups_local);
1040                 P_SCHEDSTAT(nr_wakeups_remote);
1041                 P_SCHEDSTAT(nr_wakeups_affine);
1042                 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1043                 P_SCHEDSTAT(nr_wakeups_passive);
1044                 P_SCHEDSTAT(nr_wakeups_idle);
1045
1046                 avg_atom = p->se.sum_exec_runtime;
1047                 if (nr_switches)
1048                         avg_atom = div64_ul(avg_atom, nr_switches);
1049                 else
1050                         avg_atom = -1LL;
1051
1052                 avg_per_cpu = p->se.sum_exec_runtime;
1053                 if (p->se.nr_migrations) {
1054                         avg_per_cpu = div64_u64(avg_per_cpu,
1055                                                 p->se.nr_migrations);
1056                 } else {
1057                         avg_per_cpu = -1LL;
1058                 }
1059
1060                 __PN(avg_atom);
1061                 __PN(avg_per_cpu);
1062
1063 #ifdef CONFIG_SCHED_CORE
1064                 PN_SCHEDSTAT(core_forceidle_sum);
1065 #endif
1066         }
1067
1068         __P(nr_switches);
1069         __PS("nr_voluntary_switches", p->nvcsw);
1070         __PS("nr_involuntary_switches", p->nivcsw);
1071
1072         P(se.load.weight);
1073 #ifdef CONFIG_SMP
1074         P(se.avg.load_sum);
1075         P(se.avg.runnable_sum);
1076         P(se.avg.util_sum);
1077         P(se.avg.load_avg);
1078         P(se.avg.runnable_avg);
1079         P(se.avg.util_avg);
1080         P(se.avg.last_update_time);
1081         P(se.avg.util_est.ewma);
1082         PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1083 #endif
1084 #ifdef CONFIG_UCLAMP_TASK
1085         __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1086         __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1087         __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1088         __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1089 #endif
1090         P(policy);
1091         P(prio);
1092         if (task_has_dl_policy(p)) {
1093                 P(dl.runtime);
1094                 P(dl.deadline);
1095         }
1096 #undef PN_SCHEDSTAT
1097 #undef P_SCHEDSTAT
1098
1099         {
1100                 unsigned int this_cpu = raw_smp_processor_id();
1101                 u64 t0, t1;
1102
1103                 t0 = cpu_clock(this_cpu);
1104                 t1 = cpu_clock(this_cpu);
1105                 __PS("clock-delta", t1-t0);
1106         }
1107
1108         sched_show_numa(p, m);
1109 }
1110
1111 void proc_sched_set_task(struct task_struct *p)
1112 {
1113 #ifdef CONFIG_SCHEDSTATS
1114         memset(&p->stats, 0, sizeof(p->stats));
1115 #endif
1116 }
1117
1118 void resched_latency_warn(int cpu, u64 latency)
1119 {
1120         static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1121
1122         WARN(__ratelimit(&latency_check_ratelimit),
1123              "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1124              "without schedule\n",
1125              cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1126 }
This page took 0.0966 seconds and 4 git commands to generate.