]>
Commit | Line | Data |
---|---|---|
eb414681 JW |
1 | /* |
2 | * Pressure stall information for CPU, memory and IO | |
3 | * | |
4 | * Copyright (c) 2018 Facebook, Inc. | |
5 | * Author: Johannes Weiner <[email protected]> | |
6 | * | |
7 | * When CPU, memory and IO are contended, tasks experience delays that | |
8 | * reduce throughput and introduce latencies into the workload. Memory | |
9 | * and IO contention, in addition, can cause a full loss of forward | |
10 | * progress in which the CPU goes idle. | |
11 | * | |
12 | * This code aggregates individual task delays into resource pressure | |
13 | * metrics that indicate problems with both workload health and | |
14 | * resource utilization. | |
15 | * | |
16 | * Model | |
17 | * | |
18 | * The time in which a task can execute on a CPU is our baseline for | |
19 | * productivity. Pressure expresses the amount of time in which this | |
20 | * potential cannot be realized due to resource contention. | |
21 | * | |
22 | * This concept of productivity has two components: the workload and | |
23 | * the CPU. To measure the impact of pressure on both, we define two | |
24 | * contention states for a resource: SOME and FULL. | |
25 | * | |
26 | * In the SOME state of a given resource, one or more tasks are | |
27 | * delayed on that resource. This affects the workload's ability to | |
28 | * perform work, but the CPU may still be executing other tasks. | |
29 | * | |
30 | * In the FULL state of a given resource, all non-idle tasks are | |
31 | * delayed on that resource such that nobody is advancing and the CPU | |
32 | * goes idle. This leaves both workload and CPU unproductive. | |
33 | * | |
34 | * (Naturally, the FULL state doesn't exist for the CPU resource.) | |
35 | * | |
36 | * SOME = nr_delayed_tasks != 0 | |
37 | * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 | |
38 | * | |
39 | * The percentage of wallclock time spent in those compound stall | |
40 | * states gives pressure numbers between 0 and 100 for each resource, | |
41 | * where the SOME percentage indicates workload slowdowns and the FULL | |
42 | * percentage indicates reduced CPU utilization: | |
43 | * | |
44 | * %SOME = time(SOME) / period | |
45 | * %FULL = time(FULL) / period | |
46 | * | |
47 | * Multiple CPUs | |
48 | * | |
49 | * The more tasks and available CPUs there are, the more work can be | |
50 | * performed concurrently. This means that the potential that can go | |
51 | * unrealized due to resource contention *also* scales with non-idle | |
52 | * tasks and CPUs. | |
53 | * | |
54 | * Consider a scenario where 257 number crunching tasks are trying to | |
55 | * run concurrently on 256 CPUs. If we simply aggregated the task | |
56 | * states, we would have to conclude a CPU SOME pressure number of | |
57 | * 100%, since *somebody* is waiting on a runqueue at all | |
58 | * times. However, that is clearly not the amount of contention the | |
59 | * workload is experiencing: only one out of 256 possible exceution | |
60 | * threads will be contended at any given time, or about 0.4%. | |
61 | * | |
62 | * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any | |
63 | * given time *one* of the tasks is delayed due to a lack of memory. | |
64 | * Again, looking purely at the task state would yield a memory FULL | |
65 | * pressure number of 0%, since *somebody* is always making forward | |
66 | * progress. But again this wouldn't capture the amount of execution | |
67 | * potential lost, which is 1 out of 4 CPUs, or 25%. | |
68 | * | |
69 | * To calculate wasted potential (pressure) with multiple processors, | |
70 | * we have to base our calculation on the number of non-idle tasks in | |
71 | * conjunction with the number of available CPUs, which is the number | |
72 | * of potential execution threads. SOME becomes then the proportion of | |
73 | * delayed tasks to possibe threads, and FULL is the share of possible | |
74 | * threads that are unproductive due to delays: | |
75 | * | |
76 | * threads = min(nr_nonidle_tasks, nr_cpus) | |
77 | * SOME = min(nr_delayed_tasks / threads, 1) | |
78 | * FULL = (threads - min(nr_running_tasks, threads)) / threads | |
79 | * | |
80 | * For the 257 number crunchers on 256 CPUs, this yields: | |
81 | * | |
82 | * threads = min(257, 256) | |
83 | * SOME = min(1 / 256, 1) = 0.4% | |
84 | * FULL = (256 - min(257, 256)) / 256 = 0% | |
85 | * | |
86 | * For the 1 out of 4 memory-delayed tasks, this yields: | |
87 | * | |
88 | * threads = min(4, 4) | |
89 | * SOME = min(1 / 4, 1) = 25% | |
90 | * FULL = (4 - min(3, 4)) / 4 = 25% | |
91 | * | |
92 | * [ Substitute nr_cpus with 1, and you can see that it's a natural | |
93 | * extension of the single-CPU model. ] | |
94 | * | |
95 | * Implementation | |
96 | * | |
97 | * To assess the precise time spent in each such state, we would have | |
98 | * to freeze the system on task changes and start/stop the state | |
99 | * clocks accordingly. Obviously that doesn't scale in practice. | |
100 | * | |
101 | * Because the scheduler aims to distribute the compute load evenly | |
102 | * among the available CPUs, we can track task state locally to each | |
103 | * CPU and, at much lower frequency, extrapolate the global state for | |
104 | * the cumulative stall times and the running averages. | |
105 | * | |
106 | * For each runqueue, we track: | |
107 | * | |
108 | * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) | |
109 | * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) | |
110 | * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) | |
111 | * | |
112 | * and then periodically aggregate: | |
113 | * | |
114 | * tNONIDLE = sum(tNONIDLE[i]) | |
115 | * | |
116 | * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE | |
117 | * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE | |
118 | * | |
119 | * %SOME = tSOME / period | |
120 | * %FULL = tFULL / period | |
121 | * | |
122 | * This gives us an approximation of pressure that is practical | |
123 | * cost-wise, yet way more sensitive and accurate than periodic | |
124 | * sampling of the aggregate task states would be. | |
125 | */ | |
126 | ||
127 | #include <linux/sched/loadavg.h> | |
128 | #include <linux/seq_file.h> | |
129 | #include <linux/proc_fs.h> | |
130 | #include <linux/seqlock.h> | |
131 | #include <linux/cgroup.h> | |
132 | #include <linux/module.h> | |
133 | #include <linux/sched.h> | |
134 | #include <linux/psi.h> | |
135 | #include "sched.h" | |
136 | ||
137 | static int psi_bug __read_mostly; | |
138 | ||
139 | bool psi_disabled __read_mostly; | |
140 | core_param(psi_disabled, psi_disabled, bool, 0644); | |
141 | ||
142 | /* Running averages - we need to be higher-res than loadavg */ | |
143 | #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ | |
144 | #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ | |
145 | #define EXP_60s 1981 /* 1/exp(2s/60s) */ | |
146 | #define EXP_300s 2034 /* 1/exp(2s/300s) */ | |
147 | ||
148 | /* Sampling frequency in nanoseconds */ | |
149 | static u64 psi_period __read_mostly; | |
150 | ||
151 | /* System-level pressure and stall tracking */ | |
152 | static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); | |
153 | static struct psi_group psi_system = { | |
154 | .pcpu = &system_group_pcpu, | |
155 | }; | |
156 | ||
157 | static void psi_update_work(struct work_struct *work); | |
158 | ||
159 | static void group_init(struct psi_group *group) | |
160 | { | |
161 | int cpu; | |
162 | ||
163 | for_each_possible_cpu(cpu) | |
164 | seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); | |
165 | group->next_update = sched_clock() + psi_period; | |
166 | INIT_DELAYED_WORK(&group->clock_work, psi_update_work); | |
167 | mutex_init(&group->stat_lock); | |
168 | } | |
169 | ||
170 | void __init psi_init(void) | |
171 | { | |
172 | if (psi_disabled) | |
173 | return; | |
174 | ||
175 | psi_period = jiffies_to_nsecs(PSI_FREQ); | |
176 | group_init(&psi_system); | |
177 | } | |
178 | ||
179 | static bool test_state(unsigned int *tasks, enum psi_states state) | |
180 | { | |
181 | switch (state) { | |
182 | case PSI_IO_SOME: | |
183 | return tasks[NR_IOWAIT]; | |
184 | case PSI_IO_FULL: | |
185 | return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; | |
186 | case PSI_MEM_SOME: | |
187 | return tasks[NR_MEMSTALL]; | |
188 | case PSI_MEM_FULL: | |
189 | return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; | |
190 | case PSI_CPU_SOME: | |
191 | return tasks[NR_RUNNING] > 1; | |
192 | case PSI_NONIDLE: | |
193 | return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || | |
194 | tasks[NR_RUNNING]; | |
195 | default: | |
196 | return false; | |
197 | } | |
198 | } | |
199 | ||
200 | static void get_recent_times(struct psi_group *group, int cpu, u32 *times) | |
201 | { | |
202 | struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); | |
203 | unsigned int tasks[NR_PSI_TASK_COUNTS]; | |
204 | u64 now, state_start; | |
205 | unsigned int seq; | |
206 | int s; | |
207 | ||
208 | /* Snapshot a coherent view of the CPU state */ | |
209 | do { | |
210 | seq = read_seqcount_begin(&groupc->seq); | |
211 | now = cpu_clock(cpu); | |
212 | memcpy(times, groupc->times, sizeof(groupc->times)); | |
213 | memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); | |
214 | state_start = groupc->state_start; | |
215 | } while (read_seqcount_retry(&groupc->seq, seq)); | |
216 | ||
217 | /* Calculate state time deltas against the previous snapshot */ | |
218 | for (s = 0; s < NR_PSI_STATES; s++) { | |
219 | u32 delta; | |
220 | /* | |
221 | * In addition to already concluded states, we also | |
222 | * incorporate currently active states on the CPU, | |
223 | * since states may last for many sampling periods. | |
224 | * | |
225 | * This way we keep our delta sampling buckets small | |
226 | * (u32) and our reported pressure close to what's | |
227 | * actually happening. | |
228 | */ | |
229 | if (test_state(tasks, s)) | |
230 | times[s] += now - state_start; | |
231 | ||
232 | delta = times[s] - groupc->times_prev[s]; | |
233 | groupc->times_prev[s] = times[s]; | |
234 | ||
235 | times[s] = delta; | |
236 | } | |
237 | } | |
238 | ||
239 | static void calc_avgs(unsigned long avg[3], int missed_periods, | |
240 | u64 time, u64 period) | |
241 | { | |
242 | unsigned long pct; | |
243 | ||
244 | /* Fill in zeroes for periods of no activity */ | |
245 | if (missed_periods) { | |
246 | avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); | |
247 | avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); | |
248 | avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); | |
249 | } | |
250 | ||
251 | /* Sample the most recent active period */ | |
252 | pct = div_u64(time * 100, period); | |
253 | pct *= FIXED_1; | |
254 | avg[0] = calc_load(avg[0], EXP_10s, pct); | |
255 | avg[1] = calc_load(avg[1], EXP_60s, pct); | |
256 | avg[2] = calc_load(avg[2], EXP_300s, pct); | |
257 | } | |
258 | ||
259 | static bool update_stats(struct psi_group *group) | |
260 | { | |
261 | u64 deltas[NR_PSI_STATES - 1] = { 0, }; | |
262 | unsigned long missed_periods = 0; | |
263 | unsigned long nonidle_total = 0; | |
264 | u64 now, expires, period; | |
265 | int cpu; | |
266 | int s; | |
267 | ||
268 | mutex_lock(&group->stat_lock); | |
269 | ||
270 | /* | |
271 | * Collect the per-cpu time buckets and average them into a | |
272 | * single time sample that is normalized to wallclock time. | |
273 | * | |
274 | * For averaging, each CPU is weighted by its non-idle time in | |
275 | * the sampling period. This eliminates artifacts from uneven | |
276 | * loading, or even entirely idle CPUs. | |
277 | */ | |
278 | for_each_possible_cpu(cpu) { | |
279 | u32 times[NR_PSI_STATES]; | |
280 | u32 nonidle; | |
281 | ||
282 | get_recent_times(group, cpu, times); | |
283 | ||
284 | nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); | |
285 | nonidle_total += nonidle; | |
286 | ||
287 | for (s = 0; s < PSI_NONIDLE; s++) | |
288 | deltas[s] += (u64)times[s] * nonidle; | |
289 | } | |
290 | ||
291 | /* | |
292 | * Integrate the sample into the running statistics that are | |
293 | * reported to userspace: the cumulative stall times and the | |
294 | * decaying averages. | |
295 | * | |
296 | * Pressure percentages are sampled at PSI_FREQ. We might be | |
297 | * called more often when the user polls more frequently than | |
298 | * that; we might be called less often when there is no task | |
299 | * activity, thus no data, and clock ticks are sporadic. The | |
300 | * below handles both. | |
301 | */ | |
302 | ||
303 | /* total= */ | |
304 | for (s = 0; s < NR_PSI_STATES - 1; s++) | |
305 | group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL)); | |
306 | ||
307 | /* avgX= */ | |
308 | now = sched_clock(); | |
309 | expires = group->next_update; | |
310 | if (now < expires) | |
311 | goto out; | |
312 | if (now - expires > psi_period) | |
313 | missed_periods = div_u64(now - expires, psi_period); | |
314 | ||
315 | /* | |
316 | * The periodic clock tick can get delayed for various | |
317 | * reasons, especially on loaded systems. To avoid clock | |
318 | * drift, we schedule the clock in fixed psi_period intervals. | |
319 | * But the deltas we sample out of the per-cpu buckets above | |
320 | * are based on the actual time elapsing between clock ticks. | |
321 | */ | |
322 | group->next_update = expires + ((1 + missed_periods) * psi_period); | |
323 | period = now - (group->last_update + (missed_periods * psi_period)); | |
324 | group->last_update = now; | |
325 | ||
326 | for (s = 0; s < NR_PSI_STATES - 1; s++) { | |
327 | u32 sample; | |
328 | ||
329 | sample = group->total[s] - group->total_prev[s]; | |
330 | /* | |
331 | * Due to the lockless sampling of the time buckets, | |
332 | * recorded time deltas can slip into the next period, | |
333 | * which under full pressure can result in samples in | |
334 | * excess of the period length. | |
335 | * | |
336 | * We don't want to report non-sensical pressures in | |
337 | * excess of 100%, nor do we want to drop such events | |
338 | * on the floor. Instead we punt any overage into the | |
339 | * future until pressure subsides. By doing this we | |
340 | * don't underreport the occurring pressure curve, we | |
341 | * just report it delayed by one period length. | |
342 | * | |
343 | * The error isn't cumulative. As soon as another | |
344 | * delta slips from a period P to P+1, by definition | |
345 | * it frees up its time T in P. | |
346 | */ | |
347 | if (sample > period) | |
348 | sample = period; | |
349 | group->total_prev[s] += sample; | |
350 | calc_avgs(group->avg[s], missed_periods, sample, period); | |
351 | } | |
352 | out: | |
353 | mutex_unlock(&group->stat_lock); | |
354 | return nonidle_total; | |
355 | } | |
356 | ||
357 | static void psi_update_work(struct work_struct *work) | |
358 | { | |
359 | struct delayed_work *dwork; | |
360 | struct psi_group *group; | |
361 | bool nonidle; | |
362 | ||
363 | dwork = to_delayed_work(work); | |
364 | group = container_of(dwork, struct psi_group, clock_work); | |
365 | ||
366 | /* | |
367 | * If there is task activity, periodically fold the per-cpu | |
368 | * times and feed samples into the running averages. If things | |
369 | * are idle and there is no data to process, stop the clock. | |
370 | * Once restarted, we'll catch up the running averages in one | |
371 | * go - see calc_avgs() and missed_periods. | |
372 | */ | |
373 | ||
374 | nonidle = update_stats(group); | |
375 | ||
376 | if (nonidle) { | |
377 | unsigned long delay = 0; | |
378 | u64 now; | |
379 | ||
380 | now = sched_clock(); | |
381 | if (group->next_update > now) | |
382 | delay = nsecs_to_jiffies(group->next_update - now) + 1; | |
383 | schedule_delayed_work(dwork, delay); | |
384 | } | |
385 | } | |
386 | ||
387 | static void record_times(struct psi_group_cpu *groupc, int cpu, | |
388 | bool memstall_tick) | |
389 | { | |
390 | u32 delta; | |
391 | u64 now; | |
392 | ||
393 | now = cpu_clock(cpu); | |
394 | delta = now - groupc->state_start; | |
395 | groupc->state_start = now; | |
396 | ||
397 | if (test_state(groupc->tasks, PSI_IO_SOME)) { | |
398 | groupc->times[PSI_IO_SOME] += delta; | |
399 | if (test_state(groupc->tasks, PSI_IO_FULL)) | |
400 | groupc->times[PSI_IO_FULL] += delta; | |
401 | } | |
402 | ||
403 | if (test_state(groupc->tasks, PSI_MEM_SOME)) { | |
404 | groupc->times[PSI_MEM_SOME] += delta; | |
405 | if (test_state(groupc->tasks, PSI_MEM_FULL)) | |
406 | groupc->times[PSI_MEM_FULL] += delta; | |
407 | else if (memstall_tick) { | |
408 | u32 sample; | |
409 | /* | |
410 | * Since we care about lost potential, a | |
411 | * memstall is FULL when there are no other | |
412 | * working tasks, but also when the CPU is | |
413 | * actively reclaiming and nothing productive | |
414 | * could run even if it were runnable. | |
415 | * | |
416 | * When the timer tick sees a reclaiming CPU, | |
417 | * regardless of runnable tasks, sample a FULL | |
418 | * tick (or less if it hasn't been a full tick | |
419 | * since the last state change). | |
420 | */ | |
421 | sample = min(delta, (u32)jiffies_to_nsecs(1)); | |
422 | groupc->times[PSI_MEM_FULL] += sample; | |
423 | } | |
424 | } | |
425 | ||
426 | if (test_state(groupc->tasks, PSI_CPU_SOME)) | |
427 | groupc->times[PSI_CPU_SOME] += delta; | |
428 | ||
429 | if (test_state(groupc->tasks, PSI_NONIDLE)) | |
430 | groupc->times[PSI_NONIDLE] += delta; | |
431 | } | |
432 | ||
433 | static void psi_group_change(struct psi_group *group, int cpu, | |
434 | unsigned int clear, unsigned int set) | |
435 | { | |
436 | struct psi_group_cpu *groupc; | |
437 | unsigned int t, m; | |
438 | ||
439 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
440 | ||
441 | /* | |
442 | * First we assess the aggregate resource states this CPU's | |
443 | * tasks have been in since the last change, and account any | |
444 | * SOME and FULL time these may have resulted in. | |
445 | * | |
446 | * Then we update the task counts according to the state | |
447 | * change requested through the @clear and @set bits. | |
448 | */ | |
449 | write_seqcount_begin(&groupc->seq); | |
450 | ||
451 | record_times(groupc, cpu, false); | |
452 | ||
453 | for (t = 0, m = clear; m; m &= ~(1 << t), t++) { | |
454 | if (!(m & (1 << t))) | |
455 | continue; | |
456 | if (groupc->tasks[t] == 0 && !psi_bug) { | |
457 | printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n", | |
458 | cpu, t, groupc->tasks[0], | |
459 | groupc->tasks[1], groupc->tasks[2], | |
460 | clear, set); | |
461 | psi_bug = 1; | |
462 | } | |
463 | groupc->tasks[t]--; | |
464 | } | |
465 | ||
466 | for (t = 0; set; set &= ~(1 << t), t++) | |
467 | if (set & (1 << t)) | |
468 | groupc->tasks[t]++; | |
469 | ||
470 | write_seqcount_end(&groupc->seq); | |
471 | ||
472 | if (!delayed_work_pending(&group->clock_work)) | |
473 | schedule_delayed_work(&group->clock_work, PSI_FREQ); | |
474 | } | |
475 | ||
2ce7135a JW |
476 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) |
477 | { | |
478 | #ifdef CONFIG_CGROUPS | |
479 | struct cgroup *cgroup = NULL; | |
480 | ||
481 | if (!*iter) | |
482 | cgroup = task->cgroups->dfl_cgrp; | |
483 | else if (*iter == &psi_system) | |
484 | return NULL; | |
485 | else | |
486 | cgroup = cgroup_parent(*iter); | |
487 | ||
488 | if (cgroup && cgroup_parent(cgroup)) { | |
489 | *iter = cgroup; | |
490 | return cgroup_psi(cgroup); | |
491 | } | |
492 | #else | |
493 | if (*iter) | |
494 | return NULL; | |
495 | #endif | |
496 | *iter = &psi_system; | |
497 | return &psi_system; | |
498 | } | |
499 | ||
eb414681 JW |
500 | void psi_task_change(struct task_struct *task, int clear, int set) |
501 | { | |
502 | int cpu = task_cpu(task); | |
2ce7135a JW |
503 | struct psi_group *group; |
504 | void *iter = NULL; | |
eb414681 JW |
505 | |
506 | if (!task->pid) | |
507 | return; | |
508 | ||
509 | if (((task->psi_flags & set) || | |
510 | (task->psi_flags & clear) != clear) && | |
511 | !psi_bug) { | |
512 | printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", | |
513 | task->pid, task->comm, cpu, | |
514 | task->psi_flags, clear, set); | |
515 | psi_bug = 1; | |
516 | } | |
517 | ||
518 | task->psi_flags &= ~clear; | |
519 | task->psi_flags |= set; | |
520 | ||
2ce7135a JW |
521 | while ((group = iterate_groups(task, &iter))) |
522 | psi_group_change(group, cpu, clear, set); | |
eb414681 JW |
523 | } |
524 | ||
525 | void psi_memstall_tick(struct task_struct *task, int cpu) | |
526 | { | |
2ce7135a JW |
527 | struct psi_group *group; |
528 | void *iter = NULL; | |
eb414681 | 529 | |
2ce7135a JW |
530 | while ((group = iterate_groups(task, &iter))) { |
531 | struct psi_group_cpu *groupc; | |
532 | ||
533 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
534 | write_seqcount_begin(&groupc->seq); | |
535 | record_times(groupc, cpu, true); | |
536 | write_seqcount_end(&groupc->seq); | |
537 | } | |
eb414681 JW |
538 | } |
539 | ||
540 | /** | |
541 | * psi_memstall_enter - mark the beginning of a memory stall section | |
542 | * @flags: flags to handle nested sections | |
543 | * | |
544 | * Marks the calling task as being stalled due to a lack of memory, | |
545 | * such as waiting for a refault or performing reclaim. | |
546 | */ | |
547 | void psi_memstall_enter(unsigned long *flags) | |
548 | { | |
549 | struct rq_flags rf; | |
550 | struct rq *rq; | |
551 | ||
552 | if (psi_disabled) | |
553 | return; | |
554 | ||
555 | *flags = current->flags & PF_MEMSTALL; | |
556 | if (*flags) | |
557 | return; | |
558 | /* | |
559 | * PF_MEMSTALL setting & accounting needs to be atomic wrt | |
560 | * changes to the task's scheduling state, otherwise we can | |
561 | * race with CPU migration. | |
562 | */ | |
563 | rq = this_rq_lock_irq(&rf); | |
564 | ||
565 | current->flags |= PF_MEMSTALL; | |
566 | psi_task_change(current, 0, TSK_MEMSTALL); | |
567 | ||
568 | rq_unlock_irq(rq, &rf); | |
569 | } | |
570 | ||
571 | /** | |
572 | * psi_memstall_leave - mark the end of an memory stall section | |
573 | * @flags: flags to handle nested memdelay sections | |
574 | * | |
575 | * Marks the calling task as no longer stalled due to lack of memory. | |
576 | */ | |
577 | void psi_memstall_leave(unsigned long *flags) | |
578 | { | |
579 | struct rq_flags rf; | |
580 | struct rq *rq; | |
581 | ||
582 | if (psi_disabled) | |
583 | return; | |
584 | ||
585 | if (*flags) | |
586 | return; | |
587 | /* | |
588 | * PF_MEMSTALL clearing & accounting needs to be atomic wrt | |
589 | * changes to the task's scheduling state, otherwise we could | |
590 | * race with CPU migration. | |
591 | */ | |
592 | rq = this_rq_lock_irq(&rf); | |
593 | ||
594 | current->flags &= ~PF_MEMSTALL; | |
595 | psi_task_change(current, TSK_MEMSTALL, 0); | |
596 | ||
597 | rq_unlock_irq(rq, &rf); | |
598 | } | |
599 | ||
2ce7135a JW |
600 | #ifdef CONFIG_CGROUPS |
601 | int psi_cgroup_alloc(struct cgroup *cgroup) | |
602 | { | |
603 | if (psi_disabled) | |
604 | return 0; | |
605 | ||
606 | cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); | |
607 | if (!cgroup->psi.pcpu) | |
608 | return -ENOMEM; | |
609 | group_init(&cgroup->psi); | |
610 | return 0; | |
611 | } | |
612 | ||
613 | void psi_cgroup_free(struct cgroup *cgroup) | |
614 | { | |
615 | if (psi_disabled) | |
616 | return; | |
617 | ||
618 | cancel_delayed_work_sync(&cgroup->psi.clock_work); | |
619 | free_percpu(cgroup->psi.pcpu); | |
620 | } | |
621 | ||
622 | /** | |
623 | * cgroup_move_task - move task to a different cgroup | |
624 | * @task: the task | |
625 | * @to: the target css_set | |
626 | * | |
627 | * Move task to a new cgroup and safely migrate its associated stall | |
628 | * state between the different groups. | |
629 | * | |
630 | * This function acquires the task's rq lock to lock out concurrent | |
631 | * changes to the task's scheduling state and - in case the task is | |
632 | * running - concurrent changes to its stall state. | |
633 | */ | |
634 | void cgroup_move_task(struct task_struct *task, struct css_set *to) | |
635 | { | |
2ce7135a JW |
636 | unsigned int task_flags = 0; |
637 | struct rq_flags rf; | |
638 | struct rq *rq; | |
639 | ||
8fcb2312 OJ |
640 | if (psi_disabled) { |
641 | /* | |
642 | * Lame to do this here, but the scheduler cannot be locked | |
643 | * from the outside, so we move cgroups from inside sched/. | |
644 | */ | |
645 | rcu_assign_pointer(task->cgroups, to); | |
646 | return; | |
647 | } | |
2ce7135a | 648 | |
8fcb2312 | 649 | rq = task_rq_lock(task, &rf); |
2ce7135a | 650 | |
8fcb2312 OJ |
651 | if (task_on_rq_queued(task)) |
652 | task_flags = TSK_RUNNING; | |
653 | else if (task->in_iowait) | |
654 | task_flags = TSK_IOWAIT; | |
2ce7135a | 655 | |
8fcb2312 OJ |
656 | if (task->flags & PF_MEMSTALL) |
657 | task_flags |= TSK_MEMSTALL; | |
2ce7135a | 658 | |
8fcb2312 OJ |
659 | if (task_flags) |
660 | psi_task_change(task, task_flags, 0); | |
661 | ||
662 | /* See comment above */ | |
2ce7135a JW |
663 | rcu_assign_pointer(task->cgroups, to); |
664 | ||
8fcb2312 OJ |
665 | if (task_flags) |
666 | psi_task_change(task, 0, task_flags); | |
2ce7135a | 667 | |
8fcb2312 | 668 | task_rq_unlock(rq, task, &rf); |
2ce7135a JW |
669 | } |
670 | #endif /* CONFIG_CGROUPS */ | |
671 | ||
672 | int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) | |
eb414681 JW |
673 | { |
674 | int full; | |
675 | ||
676 | if (psi_disabled) | |
677 | return -EOPNOTSUPP; | |
678 | ||
679 | update_stats(group); | |
680 | ||
681 | for (full = 0; full < 2 - (res == PSI_CPU); full++) { | |
682 | unsigned long avg[3]; | |
683 | u64 total; | |
684 | int w; | |
685 | ||
686 | for (w = 0; w < 3; w++) | |
687 | avg[w] = group->avg[res * 2 + full][w]; | |
688 | total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC); | |
689 | ||
690 | seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", | |
691 | full ? "full" : "some", | |
692 | LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), | |
693 | LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), | |
694 | LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), | |
695 | total); | |
696 | } | |
697 | ||
698 | return 0; | |
699 | } | |
700 | ||
701 | static int psi_io_show(struct seq_file *m, void *v) | |
702 | { | |
703 | return psi_show(m, &psi_system, PSI_IO); | |
704 | } | |
705 | ||
706 | static int psi_memory_show(struct seq_file *m, void *v) | |
707 | { | |
708 | return psi_show(m, &psi_system, PSI_MEM); | |
709 | } | |
710 | ||
711 | static int psi_cpu_show(struct seq_file *m, void *v) | |
712 | { | |
713 | return psi_show(m, &psi_system, PSI_CPU); | |
714 | } | |
715 | ||
716 | static int psi_io_open(struct inode *inode, struct file *file) | |
717 | { | |
718 | return single_open(file, psi_io_show, NULL); | |
719 | } | |
720 | ||
721 | static int psi_memory_open(struct inode *inode, struct file *file) | |
722 | { | |
723 | return single_open(file, psi_memory_show, NULL); | |
724 | } | |
725 | ||
726 | static int psi_cpu_open(struct inode *inode, struct file *file) | |
727 | { | |
728 | return single_open(file, psi_cpu_show, NULL); | |
729 | } | |
730 | ||
731 | static const struct file_operations psi_io_fops = { | |
732 | .open = psi_io_open, | |
733 | .read = seq_read, | |
734 | .llseek = seq_lseek, | |
735 | .release = single_release, | |
736 | }; | |
737 | ||
738 | static const struct file_operations psi_memory_fops = { | |
739 | .open = psi_memory_open, | |
740 | .read = seq_read, | |
741 | .llseek = seq_lseek, | |
742 | .release = single_release, | |
743 | }; | |
744 | ||
745 | static const struct file_operations psi_cpu_fops = { | |
746 | .open = psi_cpu_open, | |
747 | .read = seq_read, | |
748 | .llseek = seq_lseek, | |
749 | .release = single_release, | |
750 | }; | |
751 | ||
752 | static int __init psi_proc_init(void) | |
753 | { | |
754 | proc_mkdir("pressure", NULL); | |
755 | proc_create("pressure/io", 0, NULL, &psi_io_fops); | |
756 | proc_create("pressure/memory", 0, NULL, &psi_memory_fops); | |
757 | proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops); | |
758 | return 0; | |
759 | } | |
760 | module_init(psi_proc_init); |