]> Git Repo - J-linux.git/blob - kernel/sched/stats.c
Merge patch series "riscv: Extension parsing fixes"
[J-linux.git] / kernel / sched / stats.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * /proc/schedstat implementation
4  */
5
6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
7                                struct sched_statistics *stats)
8 {
9         u64 wait_start, prev_wait_start;
10
11         wait_start = rq_clock(rq);
12         prev_wait_start = schedstat_val(stats->wait_start);
13
14         if (p && likely(wait_start > prev_wait_start))
15                 wait_start -= prev_wait_start;
16
17         __schedstat_set(stats->wait_start, wait_start);
18 }
19
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
21                              struct sched_statistics *stats)
22 {
23         u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
24
25         if (p) {
26                 if (task_on_rq_migrating(p)) {
27                         /*
28                          * Preserve migrating task's wait time so wait_start
29                          * time stamp can be adjusted to accumulate wait time
30                          * prior to migration.
31                          */
32                         __schedstat_set(stats->wait_start, delta);
33
34                         return;
35                 }
36
37                 trace_sched_stat_wait(p, delta);
38         }
39
40         __schedstat_set(stats->wait_max,
41                         max(schedstat_val(stats->wait_max), delta));
42         __schedstat_inc(stats->wait_count);
43         __schedstat_add(stats->wait_sum, delta);
44         __schedstat_set(stats->wait_start, 0);
45 }
46
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
48                                     struct sched_statistics *stats)
49 {
50         u64 sleep_start, block_start;
51
52         sleep_start = schedstat_val(stats->sleep_start);
53         block_start = schedstat_val(stats->block_start);
54
55         if (sleep_start) {
56                 u64 delta = rq_clock(rq) - sleep_start;
57
58                 if ((s64)delta < 0)
59                         delta = 0;
60
61                 if (unlikely(delta > schedstat_val(stats->sleep_max)))
62                         __schedstat_set(stats->sleep_max, delta);
63
64                 __schedstat_set(stats->sleep_start, 0);
65                 __schedstat_add(stats->sum_sleep_runtime, delta);
66
67                 if (p) {
68                         account_scheduler_latency(p, delta >> 10, 1);
69                         trace_sched_stat_sleep(p, delta);
70                 }
71         }
72
73         if (block_start) {
74                 u64 delta = rq_clock(rq) - block_start;
75
76                 if ((s64)delta < 0)
77                         delta = 0;
78
79                 if (unlikely(delta > schedstat_val(stats->block_max)))
80                         __schedstat_set(stats->block_max, delta);
81
82                 __schedstat_set(stats->block_start, 0);
83                 __schedstat_add(stats->sum_sleep_runtime, delta);
84                 __schedstat_add(stats->sum_block_runtime, delta);
85
86                 if (p) {
87                         if (p->in_iowait) {
88                                 __schedstat_add(stats->iowait_sum, delta);
89                                 __schedstat_inc(stats->iowait_count);
90                                 trace_sched_stat_iowait(p, delta);
91                         }
92
93                         trace_sched_stat_blocked(p, delta);
94
95                         /*
96                          * Blocking time is in units of nanosecs, so shift by
97                          * 20 to get a milliseconds-range estimation of the
98                          * amount of time that the task spent sleeping:
99                          */
100                         if (unlikely(prof_on == SLEEP_PROFILING)) {
101                                 profile_hits(SLEEP_PROFILING,
102                                              (void *)get_wchan(p),
103                                              delta >> 20);
104                         }
105                         account_scheduler_latency(p, delta >> 10, 0);
106                 }
107         }
108 }
109
110 /*
111  * Current schedstat API version.
112  *
113  * Bump this up when changing the output format or the meaning of an existing
114  * format, so that tools can adapt (or abort)
115  */
116 #define SCHEDSTAT_VERSION 16
117
118 static int show_schedstat(struct seq_file *seq, void *v)
119 {
120         int cpu;
121
122         if (v == (void *)1) {
123                 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
124                 seq_printf(seq, "timestamp %lu\n", jiffies);
125         } else {
126                 struct rq *rq;
127 #ifdef CONFIG_SMP
128                 struct sched_domain *sd;
129                 int dcount = 0;
130 #endif
131                 cpu = (unsigned long)(v - 2);
132                 rq = cpu_rq(cpu);
133
134                 /* runqueue-specific stats */
135                 seq_printf(seq,
136                     "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
137                     cpu, rq->yld_count,
138                     rq->sched_count, rq->sched_goidle,
139                     rq->ttwu_count, rq->ttwu_local,
140                     rq->rq_cpu_time,
141                     rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
142
143                 seq_printf(seq, "\n");
144
145 #ifdef CONFIG_SMP
146                 /* domain-specific stats */
147                 rcu_read_lock();
148                 for_each_domain(cpu, sd) {
149                         enum cpu_idle_type itype;
150
151                         seq_printf(seq, "domain%d %*pb", dcount++,
152                                    cpumask_pr_args(sched_domain_span(sd)));
153                         for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
154                                 seq_printf(seq, " %u %u %u %u %u %u %u %u",
155                                     sd->lb_count[itype],
156                                     sd->lb_balanced[itype],
157                                     sd->lb_failed[itype],
158                                     sd->lb_imbalance[itype],
159                                     sd->lb_gained[itype],
160                                     sd->lb_hot_gained[itype],
161                                     sd->lb_nobusyq[itype],
162                                     sd->lb_nobusyg[itype]);
163                         }
164                         seq_printf(seq,
165                                    " %u %u %u %u %u %u %u %u %u %u %u %u\n",
166                             sd->alb_count, sd->alb_failed, sd->alb_pushed,
167                             sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
168                             sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
169                             sd->ttwu_wake_remote, sd->ttwu_move_affine,
170                             sd->ttwu_move_balance);
171                 }
172                 rcu_read_unlock();
173 #endif
174         }
175         return 0;
176 }
177
178 /*
179  * This iterator needs some explanation.
180  * It returns 1 for the header position.
181  * This means 2 is cpu 0.
182  * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
183  * to use cpumask_* to iterate over the CPUs.
184  */
185 static void *schedstat_start(struct seq_file *file, loff_t *offset)
186 {
187         unsigned long n = *offset;
188
189         if (n == 0)
190                 return (void *) 1;
191
192         n--;
193
194         if (n > 0)
195                 n = cpumask_next(n - 1, cpu_online_mask);
196         else
197                 n = cpumask_first(cpu_online_mask);
198
199         *offset = n + 1;
200
201         if (n < nr_cpu_ids)
202                 return (void *)(unsigned long)(n + 2);
203
204         return NULL;
205 }
206
207 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
208 {
209         (*offset)++;
210
211         return schedstat_start(file, offset);
212 }
213
214 static void schedstat_stop(struct seq_file *file, void *data)
215 {
216 }
217
218 static const struct seq_operations schedstat_sops = {
219         .start = schedstat_start,
220         .next  = schedstat_next,
221         .stop  = schedstat_stop,
222         .show  = show_schedstat,
223 };
224
225 static int __init proc_schedstat_init(void)
226 {
227         proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
228         return 0;
229 }
230 subsys_initcall(proc_schedstat_init);
This page took 0.03952 seconds and 4 git commands to generate.