]> Git Repo - linux.git/blob - kernel/sched/stats.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / kernel / sched / stats.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * /proc/schedstat implementation
4  */
5
6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
7                                struct sched_statistics *stats)
8 {
9         u64 wait_start, prev_wait_start;
10
11         wait_start = rq_clock(rq);
12         prev_wait_start = schedstat_val(stats->wait_start);
13
14         if (p && likely(wait_start > prev_wait_start))
15                 wait_start -= prev_wait_start;
16
17         __schedstat_set(stats->wait_start, wait_start);
18 }
19
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
21                              struct sched_statistics *stats)
22 {
23         u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
24
25         if (p) {
26                 if (task_on_rq_migrating(p)) {
27                         /*
28                          * Preserve migrating task's wait time so wait_start
29                          * time stamp can be adjusted to accumulate wait time
30                          * prior to migration.
31                          */
32                         __schedstat_set(stats->wait_start, delta);
33
34                         return;
35                 }
36
37                 trace_sched_stat_wait(p, delta);
38         }
39
40         __schedstat_set(stats->wait_max,
41                         max(schedstat_val(stats->wait_max), delta));
42         __schedstat_inc(stats->wait_count);
43         __schedstat_add(stats->wait_sum, delta);
44         __schedstat_set(stats->wait_start, 0);
45 }
46
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
48                                     struct sched_statistics *stats)
49 {
50         u64 sleep_start, block_start;
51
52         sleep_start = schedstat_val(stats->sleep_start);
53         block_start = schedstat_val(stats->block_start);
54
55         if (sleep_start) {
56                 u64 delta = rq_clock(rq) - sleep_start;
57
58                 if ((s64)delta < 0)
59                         delta = 0;
60
61                 if (unlikely(delta > schedstat_val(stats->sleep_max)))
62                         __schedstat_set(stats->sleep_max, delta);
63
64                 __schedstat_set(stats->sleep_start, 0);
65                 __schedstat_add(stats->sum_sleep_runtime, delta);
66
67                 if (p) {
68                         account_scheduler_latency(p, delta >> 10, 1);
69                         trace_sched_stat_sleep(p, delta);
70                 }
71         }
72
73         if (block_start) {
74                 u64 delta = rq_clock(rq) - block_start;
75
76                 if ((s64)delta < 0)
77                         delta = 0;
78
79                 if (unlikely(delta > schedstat_val(stats->block_max)))
80                         __schedstat_set(stats->block_max, delta);
81
82                 __schedstat_set(stats->block_start, 0);
83                 __schedstat_add(stats->sum_sleep_runtime, delta);
84                 __schedstat_add(stats->sum_block_runtime, delta);
85
86                 if (p) {
87                         if (p->in_iowait) {
88                                 __schedstat_add(stats->iowait_sum, delta);
89                                 __schedstat_inc(stats->iowait_count);
90                                 trace_sched_stat_iowait(p, delta);
91                         }
92
93                         trace_sched_stat_blocked(p, delta);
94
95                         account_scheduler_latency(p, delta >> 10, 0);
96                 }
97         }
98 }
99
100 /*
101  * Current schedstat API version.
102  *
103  * Bump this up when changing the output format or the meaning of an existing
104  * format, so that tools can adapt (or abort)
105  */
106 #define SCHEDSTAT_VERSION 16
107
108 static int show_schedstat(struct seq_file *seq, void *v)
109 {
110         int cpu;
111
112         if (v == (void *)1) {
113                 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114                 seq_printf(seq, "timestamp %lu\n", jiffies);
115         } else {
116                 struct rq *rq;
117 #ifdef CONFIG_SMP
118                 struct sched_domain *sd;
119                 int dcount = 0;
120 #endif
121                 cpu = (unsigned long)(v - 2);
122                 rq = cpu_rq(cpu);
123
124                 /* runqueue-specific stats */
125                 seq_printf(seq,
126                     "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127                     cpu, rq->yld_count,
128                     rq->sched_count, rq->sched_goidle,
129                     rq->ttwu_count, rq->ttwu_local,
130                     rq->rq_cpu_time,
131                     rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
132
133                 seq_printf(seq, "\n");
134
135 #ifdef CONFIG_SMP
136                 /* domain-specific stats */
137                 rcu_read_lock();
138                 for_each_domain(cpu, sd) {
139                         enum cpu_idle_type itype;
140
141                         seq_printf(seq, "domain%d %*pb", dcount++,
142                                    cpumask_pr_args(sched_domain_span(sd)));
143                         for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
144                                 seq_printf(seq, " %u %u %u %u %u %u %u %u",
145                                     sd->lb_count[itype],
146                                     sd->lb_balanced[itype],
147                                     sd->lb_failed[itype],
148                                     sd->lb_imbalance[itype],
149                                     sd->lb_gained[itype],
150                                     sd->lb_hot_gained[itype],
151                                     sd->lb_nobusyq[itype],
152                                     sd->lb_nobusyg[itype]);
153                         }
154                         seq_printf(seq,
155                                    " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156                             sd->alb_count, sd->alb_failed, sd->alb_pushed,
157                             sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
158                             sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
159                             sd->ttwu_wake_remote, sd->ttwu_move_affine,
160                             sd->ttwu_move_balance);
161                 }
162                 rcu_read_unlock();
163 #endif
164         }
165         return 0;
166 }
167
168 /*
169  * This iterator needs some explanation.
170  * It returns 1 for the header position.
171  * This means 2 is cpu 0.
172  * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173  * to use cpumask_* to iterate over the CPUs.
174  */
175 static void *schedstat_start(struct seq_file *file, loff_t *offset)
176 {
177         unsigned long n = *offset;
178
179         if (n == 0)
180                 return (void *) 1;
181
182         n--;
183
184         if (n > 0)
185                 n = cpumask_next(n - 1, cpu_online_mask);
186         else
187                 n = cpumask_first(cpu_online_mask);
188
189         *offset = n + 1;
190
191         if (n < nr_cpu_ids)
192                 return (void *)(unsigned long)(n + 2);
193
194         return NULL;
195 }
196
197 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
198 {
199         (*offset)++;
200
201         return schedstat_start(file, offset);
202 }
203
204 static void schedstat_stop(struct seq_file *file, void *data)
205 {
206 }
207
208 static const struct seq_operations schedstat_sops = {
209         .start = schedstat_start,
210         .next  = schedstat_next,
211         .stop  = schedstat_stop,
212         .show  = show_schedstat,
213 };
214
215 static int __init proc_schedstat_init(void)
216 {
217         proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
218         return 0;
219 }
220 subsys_initcall(proc_schedstat_init);
This page took 0.045742 seconds and 4 git commands to generate.