]> Git Repo - linux.git/blame - kernel/sched/deadline.c
sched/fair: Implement delayed dequeue
[linux.git] / kernel / sched / deadline.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
aab03e05
DF
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <[email protected]>,
1baca4ce 14 * Juri Lelli <[email protected]>,
aab03e05
DF
15 * Michael Trimarchi <[email protected]>,
16 * Fabio Checconi <[email protected]>
17 */
aab03e05 18
6c24849f
JL
19#include <linux/cpuset.h>
20
84227c12
ZN
21/*
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
25 */
26static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
28#ifdef CONFIG_SYSCTL
29static struct ctl_table sched_dl_sysctls[] = {
30 {
31 .procname = "sched_deadline_period_max_us",
32 .data = &sysctl_sched_dl_period_max,
33 .maxlen = sizeof(unsigned int),
34 .mode = 0644,
2ed81e76
YD
35 .proc_handler = proc_douintvec_minmax,
36 .extra1 = (void *)&sysctl_sched_dl_period_min,
84227c12
ZN
37 },
38 {
39 .procname = "sched_deadline_period_min_us",
40 .data = &sysctl_sched_dl_period_min,
41 .maxlen = sizeof(unsigned int),
42 .mode = 0644,
2ed81e76
YD
43 .proc_handler = proc_douintvec_minmax,
44 .extra2 = (void *)&sysctl_sched_dl_period_max,
84227c12 45 },
84227c12
ZN
46};
47
48static int __init sched_dl_sysctl_init(void)
49{
50 register_sysctl_init("kernel", sched_dl_sysctls);
51 return 0;
52}
53late_initcall(sched_dl_sysctl_init);
54#endif
55
63ba8422
PZ
56static bool dl_server(struct sched_dl_entity *dl_se)
57{
58 return dl_se->dl_server;
59}
60
aab03e05
DF
61static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
62{
63ba8422 63 BUG_ON(dl_server(dl_se));
aab03e05
DF
64 return container_of(dl_se, struct task_struct, dl);
65}
66
67static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
68{
69 return container_of(dl_rq, struct rq, dl);
70}
71
63ba8422 72static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
aab03e05 73{
63ba8422
PZ
74 struct rq *rq = dl_se->rq;
75
76 if (!dl_server(dl_se))
77 rq = task_rq(dl_task_of(dl_se));
aab03e05 78
63ba8422
PZ
79 return rq;
80}
81
82static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
83{
84 return &rq_of_dl_se(dl_se)->dl;
aab03e05
DF
85}
86
87static inline int on_dl_rq(struct sched_dl_entity *dl_se)
88{
89 return !RB_EMPTY_NODE(&dl_se->rb_node);
90}
91
2279f540
JL
92#ifdef CONFIG_RT_MUTEXES
93static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
94{
95 return dl_se->pi_se;
96}
97
98static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
99{
100 return pi_of(dl_se) != dl_se;
101}
102#else
103static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
104{
105 return dl_se;
106}
107
108static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
109{
110 return false;
111}
112#endif
113
06a76fe0
NP
114#ifdef CONFIG_SMP
115static inline struct dl_bw *dl_bw_of(int i)
116{
117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118 "sched RCU must be held");
119 return &cpu_rq(i)->rd->dl_bw;
120}
121
122static inline int dl_bw_cpus(int i)
123{
124 struct root_domain *rd = cpu_rq(i)->rd;
c81b8932 125 int cpus;
06a76fe0
NP
126
127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128 "sched RCU must be held");
c81b8932
DE
129
130 if (cpumask_subset(rd->span, cpu_active_mask))
131 return cpumask_weight(rd->span);
132
133 cpus = 0;
134
06a76fe0
NP
135 for_each_cpu_and(i, rd->span, cpu_active_mask)
136 cpus++;
137
138 return cpus;
139}
fc9dc698 140
6092478b 141static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
fc9dc698 142{
fc9dc698 143 unsigned long cap = 0;
6092478b 144 int i;
fc9dc698 145
6092478b 146 for_each_cpu_and(i, mask, cpu_active_mask)
7bc26384 147 cap += arch_scale_cpu_capacity(i);
fc9dc698
DE
148
149 return cap;
150}
151
152/*
153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154 * of the CPU the task is running on rather rd's \Sum CPU capacity.
155 */
156static inline unsigned long dl_bw_capacity(int i)
157{
740cf8a7 158 if (!sched_asym_cpucap_active() &&
7bc26384 159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
fc9dc698
DE
160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161 } else {
6092478b
DE
162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
163 "sched RCU must be held");
164
165 return __dl_bw_capacity(cpu_rq(i)->rd->span);
fc9dc698
DE
166 }
167}
26762423
PL
168
169static inline bool dl_bw_visited(int cpu, u64 gen)
170{
171 struct root_domain *rd = cpu_rq(cpu)->rd;
172
173 if (rd->visit_gen == gen)
174 return true;
175
176 rd->visit_gen = gen;
177 return false;
178}
f1304ecb
DE
179
180static inline
181void __dl_update(struct dl_bw *dl_b, s64 bw)
182{
183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
184 int i;
185
186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
187 "sched RCU must be held");
188 for_each_cpu_and(i, rd->span, cpu_active_mask) {
189 struct rq *rq = cpu_rq(i);
190
191 rq->dl.extra_bw += bw;
192 }
193}
06a76fe0
NP
194#else
195static inline struct dl_bw *dl_bw_of(int i)
196{
197 return &cpu_rq(i)->dl.dl_bw;
198}
199
200static inline int dl_bw_cpus(int i)
201{
202 return 1;
203}
fc9dc698
DE
204
205static inline unsigned long dl_bw_capacity(int i)
206{
207 return SCHED_CAPACITY_SCALE;
208}
26762423
PL
209
210static inline bool dl_bw_visited(int cpu, u64 gen)
211{
212 return false;
213}
f1304ecb
DE
214
215static inline
216void __dl_update(struct dl_bw *dl_b, s64 bw)
217{
218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
219
220 dl->extra_bw += bw;
221}
06a76fe0
NP
222#endif
223
f1304ecb
DE
224static inline
225void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
226{
227 dl_b->total_bw -= tsk_bw;
228 __dl_update(dl_b, (s32)tsk_bw / cpus);
229}
230
231static inline
232void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
233{
234 dl_b->total_bw += tsk_bw;
235 __dl_update(dl_b, -((s32)tsk_bw / cpus));
236}
237
238static inline bool
239__dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
240{
241 return dl_b->bw != -1 &&
242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
243}
244
e36d8677 245static inline
794a56eb 246void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
247{
248 u64 old = dl_rq->running_bw;
249
5cb9eaa3 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
e36d8677
LA
251 dl_rq->running_bw += dl_bw;
252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
8fd27231 253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
e0367b12 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
256}
257
258static inline
794a56eb 259void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
260{
261 u64 old = dl_rq->running_bw;
262
5cb9eaa3 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
e36d8677
LA
264 dl_rq->running_bw -= dl_bw;
265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266 if (dl_rq->running_bw > old)
267 dl_rq->running_bw = 0;
e0367b12 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
270}
271
8fd27231 272static inline
794a56eb 273void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
274{
275 u64 old = dl_rq->this_bw;
276
5cb9eaa3 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
8fd27231
LA
278 dl_rq->this_bw += dl_bw;
279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
280}
281
282static inline
794a56eb 283void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
284{
285 u64 old = dl_rq->this_bw;
286
5cb9eaa3 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
8fd27231
LA
288 dl_rq->this_bw -= dl_bw;
289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290 if (dl_rq->this_bw > old)
291 dl_rq->this_bw = 0;
292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
293}
294
794a56eb
JL
295static inline
296void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
297{
298 if (!dl_entity_is_special(dl_se))
299 __add_rq_bw(dl_se->dl_bw, dl_rq);
300}
301
302static inline
303void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304{
305 if (!dl_entity_is_special(dl_se))
306 __sub_rq_bw(dl_se->dl_bw, dl_rq);
307}
308
309static inline
310void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311{
312 if (!dl_entity_is_special(dl_se))
313 __add_running_bw(dl_se->dl_bw, dl_rq);
314}
315
316static inline
317void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
318{
319 if (!dl_entity_is_special(dl_se))
320 __sub_running_bw(dl_se->dl_bw, dl_rq);
321}
322
d741f297 323static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw)
209a0cbd 324{
d741f297
DBO
325 if (dl_se->dl_non_contending) {
326 sub_running_bw(dl_se, &rq->dl);
327 dl_se->dl_non_contending = 0;
209a0cbd 328
8fd27231
LA
329 /*
330 * If the timer handler is currently running and the
3b03706f 331 * timer cannot be canceled, inactive_task_timer()
8fd27231
LA
332 * will see that dl_not_contending is not set, and
333 * will not touch the rq's active utilization,
334 * so we are still safe.
335 */
d741f297
DBO
336 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
337 if (!dl_server(dl_se))
338 put_task_struct(dl_task_of(dl_se));
339 }
8fd27231 340 }
d741f297 341 __sub_rq_bw(dl_se->dl_bw, &rq->dl);
794a56eb 342 __add_rq_bw(new_bw, &rq->dl);
209a0cbd
LA
343}
344
d741f297
DBO
345static void dl_change_utilization(struct task_struct *p, u64 new_bw)
346{
347 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
348
349 if (task_on_rq_queued(p))
350 return;
351
352 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw);
353}
354
9e07d45c
PZ
355static void __dl_clear_params(struct sched_dl_entity *dl_se);
356
209a0cbd
LA
357/*
358 * The utilization of a task cannot be immediately removed from
359 * the rq active utilization (running_bw) when the task blocks.
360 * Instead, we have to wait for the so called "0-lag time".
361 *
362 * If a task blocks before the "0-lag time", a timer (the inactive
363 * timer) is armed, and running_bw is decreased when the timer
364 * fires.
365 *
366 * If the task wakes up again before the inactive timer fires,
3b03706f 367 * the timer is canceled, whereas if the task wakes up after the
209a0cbd
LA
368 * inactive timer fired (and running_bw has been decreased) the
369 * task's utilization has to be added to running_bw again.
370 * A flag in the deadline scheduling entity (dl_non_contending)
371 * is used to avoid race conditions between the inactive timer handler
372 * and task wakeups.
373 *
374 * The following diagram shows how running_bw is updated. A task is
375 * "ACTIVE" when its utilization contributes to running_bw; an
376 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
377 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
378 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
379 * time already passed, which does not contribute to running_bw anymore.
380 * +------------------+
381 * wakeup | ACTIVE |
382 * +------------------>+ contending |
383 * | add_running_bw | |
384 * | +----+------+------+
385 * | | ^
386 * | dequeue | |
387 * +--------+-------+ | |
388 * | | t >= 0-lag | | wakeup
389 * | INACTIVE |<---------------+ |
390 * | | sub_running_bw | |
391 * +--------+-------+ | |
392 * ^ | |
393 * | t < 0-lag | |
394 * | | |
395 * | V |
396 * | +----+------+------+
397 * | sub_running_bw | ACTIVE |
398 * +-------------------+ |
399 * inactive timer | non contending |
400 * fired +------------------+
401 *
402 * The task_non_contending() function is invoked when a task
403 * blocks, and checks if the 0-lag time already passed or
404 * not (in the first case, it directly updates running_bw;
405 * in the second case, it arms the inactive timer).
406 *
407 * The task_contending() function is invoked when a task wakes
408 * up, and checks if the task is still in the "ACTIVE non contending"
409 * state or not (in the second case, it updates running_bw).
410 */
2f7a0f58 411static void task_non_contending(struct sched_dl_entity *dl_se)
209a0cbd 412{
209a0cbd 413 struct hrtimer *timer = &dl_se->inactive_timer;
63ba8422
PZ
414 struct rq *rq = rq_of_dl_se(dl_se);
415 struct dl_rq *dl_rq = &rq->dl;
209a0cbd
LA
416 s64 zerolag_time;
417
418 /*
419 * If this is a non-deadline task that has been boosted,
420 * do nothing
421 */
422 if (dl_se->dl_runtime == 0)
423 return;
424
794a56eb
JL
425 if (dl_entity_is_special(dl_se))
426 return;
427
209a0cbd
LA
428 WARN_ON(dl_se->dl_non_contending);
429
430 zerolag_time = dl_se->deadline -
431 div64_long((dl_se->runtime * dl_se->dl_period),
432 dl_se->dl_runtime);
433
434 /*
435 * Using relative times instead of the absolute "0-lag time"
436 * allows to simplify the code
437 */
438 zerolag_time -= rq_clock(rq);
439
440 /*
441 * If the "0-lag time" already passed, decrease the active
442 * utilization now, instead of starting a timer
443 */
1b02cd6a 444 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
63ba8422 445 if (dl_server(dl_se)) {
794a56eb 446 sub_running_bw(dl_se, dl_rq);
63ba8422
PZ
447 } else {
448 struct task_struct *p = dl_task_of(dl_se);
2f7a0f58 449
63ba8422
PZ
450 if (dl_task(p))
451 sub_running_bw(dl_se, dl_rq);
387e3130 452
63ba8422
PZ
453 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
454 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
455
456 if (READ_ONCE(p->__state) == TASK_DEAD)
457 sub_rq_bw(dl_se, &rq->dl);
458 raw_spin_lock(&dl_b->lock);
459 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
460 raw_spin_unlock(&dl_b->lock);
461 __dl_clear_params(dl_se);
462 }
387e3130 463 }
209a0cbd
LA
464
465 return;
466 }
467
468 dl_se->dl_non_contending = 1;
63ba8422
PZ
469 if (!dl_server(dl_se))
470 get_task_struct(dl_task_of(dl_se));
471
850377a8 472 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
209a0cbd
LA
473}
474
8fd27231 475static void task_contending(struct sched_dl_entity *dl_se, int flags)
209a0cbd
LA
476{
477 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
478
479 /*
480 * If this is a non-deadline task that has been boosted,
481 * do nothing
482 */
483 if (dl_se->dl_runtime == 0)
484 return;
485
8fd27231 486 if (flags & ENQUEUE_MIGRATED)
794a56eb 487 add_rq_bw(dl_se, dl_rq);
8fd27231 488
209a0cbd
LA
489 if (dl_se->dl_non_contending) {
490 dl_se->dl_non_contending = 0;
491 /*
492 * If the timer handler is currently running and the
3b03706f 493 * timer cannot be canceled, inactive_task_timer()
209a0cbd
LA
494 * will see that dl_not_contending is not set, and
495 * will not touch the rq's active utilization,
496 * so we are still safe.
497 */
63ba8422
PZ
498 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
499 if (!dl_server(dl_se))
500 put_task_struct(dl_task_of(dl_se));
501 }
209a0cbd
LA
502 } else {
503 /*
504 * Since "dl_non_contending" is not set, the
505 * task's utilization has already been removed from
506 * active utilization (either when the task blocked,
507 * when the "inactive timer" fired).
508 * So, add it back.
509 */
794a56eb 510 add_running_bw(dl_se, dl_rq);
209a0cbd
LA
511 }
512}
513
63ba8422 514static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
aab03e05 515{
f4478e7c 516 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
aab03e05
DF
517}
518
ba4f7bc1
YC
519static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
520
332ac17e
DF
521void init_dl_bw(struct dl_bw *dl_b)
522{
523 raw_spin_lock_init(&dl_b->lock);
1724813d 524 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
525 dl_b->bw = -1;
526 else
1724813d 527 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
528 dl_b->total_bw = 0;
529}
530
07c54f7a 531void init_dl_rq(struct dl_rq *dl_rq)
aab03e05 532{
2161573e 533 dl_rq->root = RB_ROOT_CACHED;
1baca4ce
JL
534
535#ifdef CONFIG_SMP
536 /* zero means no -deadline tasks */
537 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
538
1baca4ce 539 dl_rq->overloaded = 0;
2161573e 540 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332ac17e
DF
541#else
542 init_dl_bw(&dl_rq->dl_bw);
1baca4ce 543#endif
e36d8677
LA
544
545 dl_rq->running_bw = 0;
8fd27231 546 dl_rq->this_bw = 0;
4da3abce 547 init_dl_rq_bw_ratio(dl_rq);
1baca4ce
JL
548}
549
550#ifdef CONFIG_SMP
551
552static inline int dl_overloaded(struct rq *rq)
553{
554 return atomic_read(&rq->rd->dlo_count);
555}
556
557static inline void dl_set_overload(struct rq *rq)
558{
559 if (!rq->online)
560 return;
561
562 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
563 /*
564 * Must be visible before the overload count is
565 * set (as in sched_rt.c).
566 *
567 * Matched by the barrier in pull_dl_task().
568 */
569 smp_wmb();
570 atomic_inc(&rq->rd->dlo_count);
571}
572
573static inline void dl_clear_overload(struct rq *rq)
574{
575 if (!rq->online)
576 return;
577
578 atomic_dec(&rq->rd->dlo_count);
579 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
580}
581
8ecca394
PZ
582#define __node_2_pdl(node) \
583 rb_entry((node), struct task_struct, pushable_dl_tasks)
584
585static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
586{
587 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
588}
589
5fe77659
VS
590static inline int has_pushable_dl_tasks(struct rq *rq)
591{
592 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
593}
594
1baca4ce
JL
595/*
596 * The list of pushable -deadline task is not a plist, like in
597 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
598 */
599static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
600{
8ecca394 601 struct rb_node *leftmost;
1baca4ce 602
09348d75 603 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
1baca4ce 604
8ecca394
PZ
605 leftmost = rb_add_cached(&p->pushable_dl_tasks,
606 &rq->dl.pushable_dl_tasks_root,
607 __pushable_less);
2161573e 608 if (leftmost)
8ecca394 609 rq->dl.earliest_dl.next = p->dl.deadline;
5fe77659
VS
610
611 if (!rq->dl.overloaded) {
612 dl_set_overload(rq);
613 rq->dl.overloaded = 1;
614 }
aab03e05
DF
615}
616
1baca4ce
JL
617static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
618{
619 struct dl_rq *dl_rq = &rq->dl;
8ecca394
PZ
620 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
621 struct rb_node *leftmost;
1baca4ce
JL
622
623 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624 return;
625
8ecca394
PZ
626 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
627 if (leftmost)
628 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
1baca4ce 629
1baca4ce 630 RB_CLEAR_NODE(&p->pushable_dl_tasks);
1baca4ce 631
5fe77659
VS
632 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
633 dl_clear_overload(rq);
634 rq->dl.overloaded = 0;
635 }
1baca4ce
JL
636}
637
638static int push_dl_task(struct rq *rq);
639
dc877341
PZ
640static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
641{
120455c5 642 return rq->online && dl_task(prev);
dc877341
PZ
643}
644
8e5bad7d
KC
645static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
646static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
e3fca9e7
PZ
647
648static void push_dl_tasks(struct rq *);
9916e214 649static void pull_dl_task(struct rq *);
e3fca9e7 650
02d8ec94 651static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 652{
e3fca9e7
PZ
653 if (!has_pushable_dl_tasks(rq))
654 return;
655
9916e214
PZ
656 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
657}
658
02d8ec94 659static inline void deadline_queue_pull_task(struct rq *rq)
9916e214
PZ
660{
661 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
dc877341
PZ
662}
663
fa9c9d10
WL
664static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
665
a649f237 666static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
fa9c9d10
WL
667{
668 struct rq *later_rq = NULL;
59d06cea 669 struct dl_bw *dl_b;
fa9c9d10
WL
670
671 later_rq = find_lock_later_rq(p, rq);
fa9c9d10
WL
672 if (!later_rq) {
673 int cpu;
674
675 /*
676 * If we cannot preempt any rq, fall back to pick any
97fb7a0a 677 * online CPU:
fa9c9d10 678 */
3bd37062 679 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
fa9c9d10
WL
680 if (cpu >= nr_cpu_ids) {
681 /*
97fb7a0a 682 * Failed to find any suitable CPU.
fa9c9d10
WL
683 * The task will never come back!
684 */
09348d75 685 WARN_ON_ONCE(dl_bandwidth_enabled());
fa9c9d10
WL
686
687 /*
688 * If admission control is disabled we
689 * try a little harder to let the task
690 * run.
691 */
692 cpu = cpumask_any(cpu_active_mask);
693 }
694 later_rq = cpu_rq(cpu);
695 double_lock_balance(rq, later_rq);
696 }
697
59d06cea
JL
698 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
699 /*
700 * Inactive timer is armed (or callback is running, but
701 * waiting for us to release rq locks). In any case, when it
702 * will fire (or continue), it will see running_bw of this
703 * task migrated to later_rq (and correctly handle it).
704 */
705 sub_running_bw(&p->dl, &rq->dl);
706 sub_rq_bw(&p->dl, &rq->dl);
707
708 add_rq_bw(&p->dl, &later_rq->dl);
709 add_running_bw(&p->dl, &later_rq->dl);
710 } else {
711 sub_rq_bw(&p->dl, &rq->dl);
712 add_rq_bw(&p->dl, &later_rq->dl);
713 }
714
715 /*
402de7fc 716 * And we finally need to fix up root_domain(s) bandwidth accounting,
59d06cea
JL
717 * since p is still hanging out in the old (now moved to default) root
718 * domain.
719 */
720 dl_b = &rq->rd->dl_bw;
721 raw_spin_lock(&dl_b->lock);
722 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
723 raw_spin_unlock(&dl_b->lock);
724
725 dl_b = &later_rq->rd->dl_bw;
726 raw_spin_lock(&dl_b->lock);
727 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
728 raw_spin_unlock(&dl_b->lock);
729
fa9c9d10 730 set_task_cpu(p, later_rq->cpu);
a649f237
PZ
731 double_unlock_balance(later_rq, rq);
732
733 return later_rq;
fa9c9d10
WL
734}
735
1baca4ce
JL
736#else
737
738static inline
739void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740{
741}
742
743static inline
744void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
745{
746}
747
748static inline
749void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750{
751}
752
753static inline
754void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755{
756}
757
02d8ec94 758static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 759{
dc877341
PZ
760}
761
02d8ec94 762static inline void deadline_queue_pull_task(struct rq *rq)
dc877341
PZ
763{
764}
1baca4ce
JL
765#endif /* CONFIG_SMP */
766
63ba8422
PZ
767static void
768enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
aab03e05 769static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
63ba8422 770static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
e23edc86 771static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
aab03e05 772
96458e7f
SX
773static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
774 struct rq *rq)
775{
776 /* for non-boosted task, pi_of(dl_se) == dl_se */
777 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
778 dl_se->runtime = pi_of(dl_se)->dl_runtime;
a110a81c
DBO
779
780 /*
781 * If it is a deferred reservation, and the server
782 * is not handling an starvation case, defer it.
783 */
784 if (dl_se->dl_defer & !dl_se->dl_defer_running) {
785 dl_se->dl_throttled = 1;
786 dl_se->dl_defer_armed = 1;
787 }
96458e7f
SX
788}
789
aab03e05
DF
790/*
791 * We are being explicitly informed that a new instance is starting,
792 * and this means that:
793 * - the absolute deadline of the entity has to be placed at
794 * current time + relative deadline;
795 * - the runtime of the entity has to be set to the maximum value.
796 *
797 * The capability of specifying such event is useful whenever a -deadline
798 * entity wants to (try to!) synchronize its behaviour with the scheduler's
799 * one, and to (try to!) reconcile itself with its own scheduling
800 * parameters.
801 */
98b0a857 802static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
803{
804 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
805 struct rq *rq = rq_of_dl_rq(dl_rq);
806
2279f540 807 WARN_ON(is_dl_boosted(dl_se));
72f9f3fd
LA
808 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
809
810 /*
811 * We are racing with the deadline timer. So, do nothing because
812 * the deadline timer handler will take care of properly recharging
813 * the runtime and postponing the deadline
814 */
815 if (dl_se->dl_throttled)
816 return;
aab03e05
DF
817
818 /*
819 * We use the regular wall clock time to set deadlines in the
820 * future; in fact, we must consider execution overheads (time
821 * spent on hardirq context, etc.).
822 */
96458e7f 823 replenish_dl_new_period(dl_se, rq);
aab03e05
DF
824}
825
a110a81c
DBO
826static int start_dl_timer(struct sched_dl_entity *dl_se);
827static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t);
828
aab03e05
DF
829/*
830 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
831 * possibility of a entity lasting more than what it declared, and thus
832 * exhausting its runtime.
833 *
834 * Here we are interested in making runtime overrun possible, but we do
835 * not want a entity which is misbehaving to affect the scheduling of all
836 * other entities.
837 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
838 * is used, in order to confine each entity within its own bandwidth.
839 *
840 * This function deals exactly with that, and ensures that when the runtime
841 * of a entity is replenished, its deadline is also postponed. That ensures
842 * the overrunning entity can't interfere with other entity in the system and
843 * can't make them miss their deadlines. Reasons why this kind of overruns
844 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 845 * runtime, or it just underestimated it during sched_setattr().
aab03e05 846 */
2279f540 847static void replenish_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
848{
849 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
850 struct rq *rq = rq_of_dl_rq(dl_rq);
851
09348d75 852 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
2d3d891d
DF
853
854 /*
855 * This could be the case for a !-dl task that is boosted.
856 * Just go with full inherited parameters.
a110a81c
DBO
857 *
858 * Or, it could be the case of a deferred reservation that
859 * was not able to consume its runtime in background and
860 * reached this point with current u > U.
861 *
862 * In both cases, set a new period.
2d3d891d 863 */
a110a81c
DBO
864 if (dl_se->dl_deadline == 0 ||
865 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) {
866 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
867 dl_se->runtime = pi_of(dl_se)->dl_runtime;
868 }
2d3d891d 869
48be3a67
PZ
870 if (dl_se->dl_yielded && dl_se->runtime > 0)
871 dl_se->runtime = 0;
872
aab03e05
DF
873 /*
874 * We keep moving the deadline away until we get some
875 * available runtime for the entity. This ensures correct
876 * handling of situations where the runtime overrun is
877 * arbitrary large.
878 */
879 while (dl_se->runtime <= 0) {
2279f540
JL
880 dl_se->deadline += pi_of(dl_se)->dl_period;
881 dl_se->runtime += pi_of(dl_se)->dl_runtime;
aab03e05
DF
882 }
883
884 /*
885 * At this point, the deadline really should be "in
886 * the future" with respect to rq->clock. If it's
887 * not, we are, for some reason, lagging too much!
888 * Anyway, after having warn userspace abut that,
889 * we still try to keep the things running by
890 * resetting the deadline and the budget of the
891 * entity.
892 */
893 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c219b7dd 894 printk_deferred_once("sched: DL replenish lagged too much\n");
96458e7f 895 replenish_dl_new_period(dl_se, rq);
aab03e05 896 }
1019a359
PZ
897
898 if (dl_se->dl_yielded)
899 dl_se->dl_yielded = 0;
900 if (dl_se->dl_throttled)
901 dl_se->dl_throttled = 0;
a110a81c
DBO
902
903 /*
904 * If this is the replenishment of a deferred reservation,
905 * clear the flag and return.
906 */
907 if (dl_se->dl_defer_armed) {
908 dl_se->dl_defer_armed = 0;
909 return;
910 }
911
912 /*
913 * A this point, if the deferred server is not armed, and the deadline
914 * is in the future, if it is not running already, throttle the server
915 * and arm the defer timer.
916 */
917 if (dl_se->dl_defer && !dl_se->dl_defer_running &&
918 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) {
919 if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) {
920
921 /*
922 * Set dl_se->dl_defer_armed and dl_throttled variables to
923 * inform the start_dl_timer() that this is a deferred
924 * activation.
925 */
926 dl_se->dl_defer_armed = 1;
927 dl_se->dl_throttled = 1;
928 if (!start_dl_timer(dl_se)) {
929 /*
930 * If for whatever reason (delays), a previous timer was
931 * queued but not serviced, cancel it and clean the
932 * deferrable server variables intended for start_dl_timer().
933 */
934 hrtimer_try_to_cancel(&dl_se->dl_timer);
935 dl_se->dl_defer_armed = 0;
936 dl_se->dl_throttled = 0;
937 }
938 }
939 }
aab03e05
DF
940}
941
942/*
943 * Here we check if --at time t-- an entity (which is probably being
944 * [re]activated or, in general, enqueued) can use its remaining runtime
945 * and its current deadline _without_ exceeding the bandwidth it is
946 * assigned (function returns true if it can't). We are in fact applying
947 * one of the CBS rules: when a task wakes up, if the residual runtime
948 * over residual deadline fits within the allocated bandwidth, then we
949 * can keep the current (absolute) deadline and residual budget without
950 * disrupting the schedulability of the system. Otherwise, we should
951 * refill the runtime and set the deadline a period in the future,
952 * because keeping the current (absolute) deadline of the task would
712e5e34 953 * result in breaking guarantees promised to other tasks (refer to
d6a3b247 954 * Documentation/scheduler/sched-deadline.rst for more information).
aab03e05
DF
955 *
956 * This function returns true if:
957 *
2317d5f1 958 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
aab03e05
DF
959 *
960 * IOW we can't recycle current parameters.
755378a4 961 *
2317d5f1 962 * Notice that the bandwidth check is done against the deadline. For
755378a4 963 * task with deadline equal to period this is the same of using
2317d5f1 964 * dl_period instead of dl_deadline in the equation above.
aab03e05 965 */
2279f540 966static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
aab03e05
DF
967{
968 u64 left, right;
969
970 /*
971 * left and right are the two sides of the equation above,
972 * after a bit of shuffling to use multiplications instead
973 * of divisions.
974 *
975 * Note that none of the time values involved in the two
976 * multiplications are absolute: dl_deadline and dl_runtime
977 * are the relative deadline and the maximum runtime of each
978 * instance, runtime is the runtime left for the last instance
979 * and (deadline - t), since t is rq->clock, is the time left
980 * to the (absolute) deadline. Even if overflowing the u64 type
981 * is very unlikely to occur in both cases, here we scale down
982 * as we want to avoid that risk at all. Scaling down by 10
983 * means that we reduce granularity to 1us. We are fine with it,
984 * since this is only a true/false check and, anyway, thinking
985 * of anything below microseconds resolution is actually fiction
986 * (but still we want to give the user that illusion >;).
987 */
2279f540 988 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
332ac17e 989 right = ((dl_se->deadline - t) >> DL_SCALE) *
2279f540 990 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
aab03e05
DF
991
992 return dl_time_before(right, left);
993}
994
995/*
3effcb42
DBO
996 * Revised wakeup rule [1]: For self-suspending tasks, rather then
997 * re-initializing task's runtime and deadline, the revised wakeup
998 * rule adjusts the task's runtime to avoid the task to overrun its
999 * density.
aab03e05 1000 *
3effcb42
DBO
1001 * Reasoning: a task may overrun the density if:
1002 * runtime / (deadline - t) > dl_runtime / dl_deadline
1003 *
1004 * Therefore, runtime can be adjusted to:
1005 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
1006 *
1007 * In such way that runtime will be equal to the maximum density
1008 * the task can use without breaking any rule.
1009 *
1010 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
1011 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
1012 */
1013static void
1014update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
1015{
1016 u64 laxity = dl_se->deadline - rq_clock(rq);
1017
1018 /*
1019 * If the task has deadline < period, and the deadline is in the past,
1020 * it should already be throttled before this check.
1021 *
1022 * See update_dl_entity() comments for further details.
1023 */
1024 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1025
1026 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
1027}
1028
1029/*
1030 * Regarding the deadline, a task with implicit deadline has a relative
1031 * deadline == relative period. A task with constrained deadline has a
1032 * relative deadline <= relative period.
1033 *
1034 * We support constrained deadline tasks. However, there are some restrictions
1035 * applied only for tasks which do not have an implicit deadline. See
1036 * update_dl_entity() to know more about such restrictions.
1037 *
1038 * The dl_is_implicit() returns true if the task has an implicit deadline.
1039 */
1040static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1041{
1042 return dl_se->dl_deadline == dl_se->dl_period;
1043}
1044
1045/*
1046 * When a deadline entity is placed in the runqueue, its runtime and deadline
1047 * might need to be updated. This is done by a CBS wake up rule. There are two
1048 * different rules: 1) the original CBS; and 2) the Revisited CBS.
1049 *
1050 * When the task is starting a new period, the Original CBS is used. In this
1051 * case, the runtime is replenished and a new absolute deadline is set.
1052 *
1053 * When a task is queued before the begin of the next period, using the
1054 * remaining runtime and deadline could make the entity to overflow, see
1055 * dl_entity_overflow() to find more about runtime overflow. When such case
1056 * is detected, the runtime and deadline need to be updated.
1057 *
1058 * If the task has an implicit deadline, i.e., deadline == period, the Original
402de7fc 1059 * CBS is applied. The runtime is replenished and a new absolute deadline is
3effcb42
DBO
1060 * set, as in the previous cases.
1061 *
1062 * However, the Original CBS does not work properly for tasks with
1063 * deadline < period, which are said to have a constrained deadline. By
1064 * applying the Original CBS, a constrained deadline task would be able to run
1065 * runtime/deadline in a period. With deadline < period, the task would
1066 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1067 *
1068 * In order to prevent this misbehave, the Revisited CBS is used for
1069 * constrained deadline tasks when a runtime overflow is detected. In the
1070 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1071 * the remaining runtime of the task is reduced to avoid runtime overflow.
1072 * Please refer to the comments update_dl_revised_wakeup() function to find
1073 * more about the Revised CBS rule.
aab03e05 1074 */
2279f540 1075static void update_dl_entity(struct sched_dl_entity *dl_se)
aab03e05 1076{
63ba8422 1077 struct rq *rq = rq_of_dl_se(dl_se);
aab03e05 1078
aab03e05 1079 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2279f540 1080 dl_entity_overflow(dl_se, rq_clock(rq))) {
3effcb42
DBO
1081
1082 if (unlikely(!dl_is_implicit(dl_se) &&
1083 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
2279f540 1084 !is_dl_boosted(dl_se))) {
3effcb42
DBO
1085 update_dl_revised_wakeup(dl_se, rq);
1086 return;
1087 }
1088
96458e7f 1089 replenish_dl_new_period(dl_se, rq);
a110a81c
DBO
1090 } else if (dl_server(dl_se) && dl_se->dl_defer) {
1091 /*
1092 * The server can still use its previous deadline, so check if
1093 * it left the dl_defer_running state.
1094 */
1095 if (!dl_se->dl_defer_running) {
1096 dl_se->dl_defer_armed = 1;
1097 dl_se->dl_throttled = 1;
1098 }
aab03e05
DF
1099 }
1100}
1101
5ac69d37
DBO
1102static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1103{
1104 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1105}
1106
aab03e05
DF
1107/*
1108 * If the entity depleted all its runtime, and if we want it to sleep
1109 * while waiting for some new execution time to become available, we
5ac69d37 1110 * set the bandwidth replenishment timer to the replenishment instant
aab03e05
DF
1111 * and try to activate it.
1112 *
1113 * Notice that it is important for the caller to know if the timer
1114 * actually started or not (i.e., the replenishment instant is in
1115 * the future or in the past).
1116 */
63ba8422 1117static int start_dl_timer(struct sched_dl_entity *dl_se)
aab03e05 1118{
a649f237 1119 struct hrtimer *timer = &dl_se->dl_timer;
63ba8422
PZ
1120 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1121 struct rq *rq = rq_of_dl_rq(dl_rq);
aab03e05 1122 ktime_t now, act;
aab03e05
DF
1123 s64 delta;
1124
5cb9eaa3 1125 lockdep_assert_rq_held(rq);
a649f237 1126
aab03e05
DF
1127 /*
1128 * We want the timer to fire at the deadline, but considering
1129 * that it is actually coming from rq->clock and not from
1130 * hrtimer's time base reading.
a110a81c
DBO
1131 *
1132 * The deferred reservation will have its timer set to
1133 * (deadline - runtime). At that point, the CBS rule will decide
1134 * if the current deadline can be used, or if a replenishment is
1135 * required to avoid add too much pressure on the system
1136 * (current u > U).
aab03e05 1137 */
a110a81c
DBO
1138 if (dl_se->dl_defer_armed) {
1139 WARN_ON_ONCE(!dl_se->dl_throttled);
1140 act = ns_to_ktime(dl_se->deadline - dl_se->runtime);
1141 } else {
1142 /* act = deadline - rel-deadline + period */
1143 act = ns_to_ktime(dl_next_period(dl_se));
1144 }
1145
a649f237 1146 now = hrtimer_cb_get_time(timer);
aab03e05
DF
1147 delta = ktime_to_ns(now) - rq_clock(rq);
1148 act = ktime_add_ns(act, delta);
1149
1150 /*
1151 * If the expiry time already passed, e.g., because the value
1152 * chosen as the deadline is too small, don't even try to
1153 * start the timer in the past!
1154 */
1155 if (ktime_us_delta(act, now) < 0)
1156 return 0;
1157
a649f237
PZ
1158 /*
1159 * !enqueued will guarantee another callback; even if one is already in
1160 * progress. This ensures a balanced {get,put}_task_struct().
1161 *
1162 * The race against __run_timer() clearing the enqueued state is
1163 * harmless because we're holding task_rq()->lock, therefore the timer
1164 * expiring after we've done the check will wait on its task_rq_lock()
1165 * and observe our state.
1166 */
1167 if (!hrtimer_is_queued(timer)) {
63ba8422
PZ
1168 if (!dl_server(dl_se))
1169 get_task_struct(dl_task_of(dl_se));
d5096aa6 1170 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
a649f237 1171 }
aab03e05 1172
cc9684d3 1173 return 1;
aab03e05
DF
1174}
1175
63ba8422
PZ
1176static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1177{
1178#ifdef CONFIG_SMP
1179 /*
1180 * Queueing this task back might have overloaded rq, check if we need
1181 * to kick someone away.
1182 */
1183 if (has_pushable_dl_tasks(rq)) {
1184 /*
1185 * Nothing relies on rq->lock after this, so its safe to drop
1186 * rq->lock.
1187 */
1188 rq_unpin_lock(rq, rf);
1189 push_dl_task(rq);
1190 rq_repin_lock(rq, rf);
1191 }
1192#endif
1193}
1194
a110a81c
DBO
1195/* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */
1196static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC;
1197
1198static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se)
1199{
1200 struct rq *rq = rq_of_dl_se(dl_se);
1201 u64 fw;
1202
1203 scoped_guard (rq_lock, rq) {
1204 struct rq_flags *rf = &scope.rf;
1205
1206 if (!dl_se->dl_throttled || !dl_se->dl_runtime)
1207 return HRTIMER_NORESTART;
1208
1209 sched_clock_tick();
1210 update_rq_clock(rq);
1211
1212 if (!dl_se->dl_runtime)
1213 return HRTIMER_NORESTART;
1214
1215 if (!dl_se->server_has_tasks(dl_se)) {
1216 replenish_dl_entity(dl_se);
1217 return HRTIMER_NORESTART;
1218 }
1219
1220 if (dl_se->dl_defer_armed) {
1221 /*
1222 * First check if the server could consume runtime in background.
1223 * If so, it is possible to push the defer timer for this amount
1224 * of time. The dl_server_min_res serves as a limit to avoid
1225 * forwarding the timer for a too small amount of time.
1226 */
1227 if (dl_time_before(rq_clock(dl_se->rq),
1228 (dl_se->deadline - dl_se->runtime - dl_server_min_res))) {
1229
1230 /* reset the defer timer */
1231 fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime;
1232
1233 hrtimer_forward_now(timer, ns_to_ktime(fw));
1234 return HRTIMER_RESTART;
1235 }
1236
1237 dl_se->dl_defer_running = 1;
1238 }
1239
1240 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1241
1242 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl))
1243 resched_curr(rq);
1244
1245 __push_dl_task(rq, rf);
1246 }
1247
1248 return HRTIMER_NORESTART;
1249}
1250
aab03e05
DF
1251/*
1252 * This is the bandwidth enforcement timer callback. If here, we know
1253 * a task is not on its dl_rq, since the fact that the timer was running
1254 * means the task is throttled and needs a runtime replenishment.
1255 *
1256 * However, what we actually do depends on the fact the task is active,
1257 * (it is on its rq) or has been removed from there by a call to
1258 * dequeue_task_dl(). In the former case we must issue the runtime
1259 * replenishment and add the task back to the dl_rq; in the latter, we just
1260 * do nothing but clearing dl_throttled, so that runtime and deadline
1261 * updating (and the queueing back to dl_rq) will be done by the
1262 * next call to enqueue_task_dl().
1263 */
1264static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1265{
1266 struct sched_dl_entity *dl_se = container_of(timer,
1267 struct sched_dl_entity,
1268 dl_timer);
63ba8422 1269 struct task_struct *p;
eb580751 1270 struct rq_flags rf;
0f397f2c 1271 struct rq *rq;
3960c8c0 1272
a110a81c
DBO
1273 if (dl_server(dl_se))
1274 return dl_server_timer(timer, dl_se);
63ba8422
PZ
1275
1276 p = dl_task_of(dl_se);
eb580751 1277 rq = task_rq_lock(p, &rf);
0f397f2c 1278
aab03e05 1279 /*
a649f237 1280 * The task might have changed its scheduling policy to something
9846d50d 1281 * different than SCHED_DEADLINE (through switched_from_dl()).
a649f237 1282 */
209a0cbd 1283 if (!dl_task(p))
a649f237 1284 goto unlock;
a649f237 1285
a649f237
PZ
1286 /*
1287 * The task might have been boosted by someone else and might be in the
1288 * boosting/deboosting path, its not throttled.
1289 */
2279f540 1290 if (is_dl_boosted(dl_se))
a649f237 1291 goto unlock;
a79ec89f 1292
fa9c9d10 1293 /*
a649f237
PZ
1294 * Spurious timer due to start_dl_timer() race; or we already received
1295 * a replenishment from rt_mutex_setprio().
fa9c9d10 1296 */
a649f237 1297 if (!dl_se->dl_throttled)
fa9c9d10 1298 goto unlock;
a649f237
PZ
1299
1300 sched_clock_tick();
1301 update_rq_clock(rq);
fa9c9d10 1302
a79ec89f
KT
1303 /*
1304 * If the throttle happened during sched-out; like:
1305 *
1306 * schedule()
1307 * deactivate_task()
1308 * dequeue_task_dl()
1309 * update_curr_dl()
1310 * start_dl_timer()
1311 * __dequeue_task_dl()
1312 * prev->on_rq = 0;
1313 *
1314 * We can be both throttled and !queued. Replenish the counter
1315 * but do not enqueue -- wait for our wakeup to do that.
1316 */
1317 if (!task_on_rq_queued(p)) {
2279f540 1318 replenish_dl_entity(dl_se);
a79ec89f
KT
1319 goto unlock;
1320 }
1321
1baca4ce 1322#ifdef CONFIG_SMP
c0c8c9fa 1323 if (unlikely(!rq->online)) {
61c7aca6
WL
1324 /*
1325 * If the runqueue is no longer available, migrate the
1326 * task elsewhere. This necessarily changes rq.
1327 */
9ef7e7e3 1328 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
a649f237 1329 rq = dl_task_offline_migration(rq, p);
9ef7e7e3 1330 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
dcc3b5ff 1331 update_rq_clock(rq);
61c7aca6
WL
1332
1333 /*
1334 * Now that the task has been migrated to the new RQ and we
1335 * have that locked, proceed as normal and enqueue the task
1336 * there.
1337 */
c0c8c9fa 1338 }
61c7aca6 1339#endif
a649f237 1340
61c7aca6
WL
1341 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1342 if (dl_task(rq->curr))
e23edc86 1343 wakeup_preempt_dl(rq, p, 0);
61c7aca6
WL
1344 else
1345 resched_curr(rq);
a649f237 1346
63ba8422 1347 __push_dl_task(rq, &rf);
a649f237 1348
aab03e05 1349unlock:
eb580751 1350 task_rq_unlock(rq, p, &rf);
aab03e05 1351
a649f237
PZ
1352 /*
1353 * This can free the task_struct, including this hrtimer, do not touch
1354 * anything related to that after this.
1355 */
1356 put_task_struct(p);
1357
aab03e05
DF
1358 return HRTIMER_NORESTART;
1359}
1360
9e07d45c 1361static void init_dl_task_timer(struct sched_dl_entity *dl_se)
aab03e05
DF
1362{
1363 struct hrtimer *timer = &dl_se->dl_timer;
1364
d5096aa6 1365 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
aab03e05
DF
1366 timer->function = dl_task_timer;
1367}
1368
df8eac8c
DBO
1369/*
1370 * During the activation, CBS checks if it can reuse the current task's
1371 * runtime and period. If the deadline of the task is in the past, CBS
1372 * cannot use the runtime, and so it replenishes the task. This rule
1373 * works fine for implicit deadline tasks (deadline == period), and the
1374 * CBS was designed for implicit deadline tasks. However, a task with
c4969417 1375 * constrained deadline (deadline < period) might be awakened after the
df8eac8c
DBO
1376 * deadline, but before the next period. In this case, replenishing the
1377 * task would allow it to run for runtime / deadline. As in this case
1378 * deadline < period, CBS enables a task to run for more than the
1379 * runtime / period. In a very loaded system, this can cause a domino
1380 * effect, making other tasks miss their deadlines.
1381 *
1382 * To avoid this problem, in the activation of a constrained deadline
1383 * task after the deadline but before the next period, throttle the
1384 * task and set the replenishing timer to the begin of the next period,
1385 * unless it is boosted.
1386 */
1387static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1388{
63ba8422 1389 struct rq *rq = rq_of_dl_se(dl_se);
df8eac8c
DBO
1390
1391 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1392 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
63ba8422 1393 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
df8eac8c
DBO
1394 return;
1395 dl_se->dl_throttled = 1;
ae83b56a
XP
1396 if (dl_se->runtime > 0)
1397 dl_se->runtime = 0;
df8eac8c
DBO
1398 }
1399}
1400
aab03e05 1401static
6fab5410 1402int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
aab03e05 1403{
269ad801 1404 return (dl_se->runtime <= 0);
aab03e05
DF
1405}
1406
c52f14d3 1407/*
6a9d623a
VP
1408 * This function implements the GRUB accounting rule. According to the
1409 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1410 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
daec5798
LA
1411 * where u is the utilization of the task, Umax is the maximum reclaimable
1412 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1413 * as the difference between the "total runqueue utilization" and the
6a9d623a 1414 * "runqueue active utilization", and Uextra is the (per runqueue) extra
daec5798 1415 * reclaimable utilization.
6a9d623a
VP
1416 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1417 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1418 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
402de7fc 1419 * is multiplied by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
6a9d623a
VP
1420 * Since delta is a 64 bit variable, to have an overflow its value should be
1421 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1422 * not an issue here.
c52f14d3 1423 */
3febfc8a 1424static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
c52f14d3 1425{
9f0d1a50 1426 u64 u_act;
6a9d623a 1427 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
c52f14d3 1428
9f0d1a50 1429 /*
6a9d623a
VP
1430 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1431 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1432 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1433 * negative leading to wrong results.
9f0d1a50 1434 */
6a9d623a
VP
1435 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1436 u_act = dl_se->dl_bw;
9f0d1a50 1437 else
6a9d623a 1438 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
9f0d1a50 1439
6a9d623a 1440 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
9f0d1a50 1441 return (delta * u_act) >> BW_SHIFT;
c52f14d3
LA
1442}
1443
a110a81c 1444s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
aab03e05 1445{
63ba8422 1446 s64 scaled_delta_exec;
aab03e05 1447
07881166
JL
1448 /*
1449 * For tasks that participate in GRUB, we implement GRUB-PA: the
1450 * spare reclaimed bandwidth is used to clock down frequency.
1451 *
1452 * For the others, we still need to scale reservation parameters
1453 * according to current frequency and CPU maximum capacity.
1454 */
1455 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
63ba8422 1456 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
07881166 1457 } else {
63ba8422 1458 int cpu = cpu_of(rq);
07881166 1459 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
8ec59c0f 1460 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
07881166
JL
1461
1462 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1463 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1464 }
1465
a110a81c
DBO
1466 return scaled_delta_exec;
1467}
1468
1469static inline void
1470update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1471 int flags);
1472static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
1473{
1474 s64 scaled_delta_exec;
1475
1476 if (unlikely(delta_exec <= 0)) {
1477 if (unlikely(dl_se->dl_yielded))
1478 goto throttle;
1479 return;
1480 }
1481
1482 if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer)
1483 return;
1484
1485 if (dl_entity_is_special(dl_se))
1486 return;
1487
1488 scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
1489
07881166 1490 dl_se->runtime -= scaled_delta_exec;
48be3a67 1491
a110a81c
DBO
1492 /*
1493 * The fair server can consume its runtime while throttled (not queued/
1494 * running as regular CFS).
1495 *
1496 * If the server consumes its entire runtime in this state. The server
1497 * is not required for the current period. Thus, reset the server by
1498 * starting a new period, pushing the activation.
1499 */
1500 if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) {
1501 /*
1502 * If the server was previously activated - the starving condition
1503 * took place, it this point it went away because the fair scheduler
1504 * was able to get runtime in background. So return to the initial
1505 * state.
1506 */
1507 dl_se->dl_defer_running = 0;
1508
1509 hrtimer_try_to_cancel(&dl_se->dl_timer);
1510
1511 replenish_dl_new_period(dl_se, dl_se->rq);
1512
1513 /*
1514 * Not being able to start the timer seems problematic. If it could not
1515 * be started for whatever reason, we need to "unthrottle" the DL server
1516 * and queue right away. Otherwise nothing might queue it. That's similar
1517 * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn.
1518 */
1519 WARN_ON_ONCE(!start_dl_timer(dl_se));
1520
1521 return;
1522 }
1523
48be3a67
PZ
1524throttle:
1525 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1019a359 1526 dl_se->dl_throttled = 1;
34be3930
JL
1527
1528 /* If requested, inform the user about runtime overruns. */
1529 if (dl_runtime_exceeded(dl_se) &&
1530 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1531 dl_se->dl_overrun = 1;
1532
63ba8422
PZ
1533 dequeue_dl_entity(dl_se, 0);
1534 if (!dl_server(dl_se)) {
1535 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1536 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1537 }
aab03e05 1538
63ba8422
PZ
1539 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1540 if (dl_server(dl_se))
1541 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1542 else
1543 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1544 }
1545
1546 if (!is_leftmost(dl_se, &rq->dl))
8875125e 1547 resched_curr(rq);
aab03e05 1548 }
1724813d 1549
557a6bfc
PZ
1550 /*
1551 * The fair server (sole dl_server) does not account for real-time
1552 * workload because it is running fair work.
1553 */
1554 if (dl_se == &rq->fair_server)
1555 return;
1556
5f6bd380 1557#ifdef CONFIG_RT_GROUP_SCHED
1724813d
PZ
1558 /*
1559 * Because -- for now -- we share the rt bandwidth, we need to
1560 * account our runtime there too, otherwise actual rt tasks
1561 * would be able to exceed the shared quota.
1562 *
1563 * Account to the root rt group for now.
1564 *
1565 * The solution we're working towards is having the RT groups scheduled
1566 * using deadline servers -- however there's a few nasties to figure
1567 * out before that can happen.
1568 */
1569 if (rt_bandwidth_enabled()) {
1570 struct rt_rq *rt_rq = &rq->rt;
1571
1572 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
1573 /*
1574 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
1575 * have our own CBS to keep us inline; only account when RT
1576 * bandwidth is relevant.
1724813d 1577 */
faa59937
JL
1578 if (sched_rt_bandwidth_account(rt_rq))
1579 rt_rq->rt_time += delta_exec;
1724813d
PZ
1580 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1581 }
5f6bd380 1582#endif
aab03e05
DF
1583}
1584
a110a81c
DBO
1585/*
1586 * In the non-defer mode, the idle time is not accounted, as the
1587 * server provides a guarantee.
1588 *
1589 * If the dl_server is in defer mode, the idle time is also considered
1590 * as time available for the fair server, avoiding a penalty for the
1591 * rt scheduler that did not consumed that time.
1592 */
1593void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
1594{
1595 s64 delta_exec, scaled_delta_exec;
1596
1597 if (!rq->fair_server.dl_defer)
1598 return;
1599
1600 /* no need to discount more */
1601 if (rq->fair_server.runtime < 0)
1602 return;
1603
1604 delta_exec = rq_clock_task(rq) - p->se.exec_start;
1605 if (delta_exec < 0)
1606 return;
1607
1608 scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
1609
1610 rq->fair_server.runtime -= scaled_delta_exec;
1611
1612 if (rq->fair_server.runtime < 0) {
1613 rq->fair_server.dl_defer_running = 0;
1614 rq->fair_server.runtime = 0;
1615 }
1616
1617 p->se.exec_start = rq_clock_task(rq);
1618}
1619
63ba8422
PZ
1620void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1621{
a110a81c
DBO
1622 /* 0 runtime = fair server disabled */
1623 if (dl_se->dl_runtime)
1624 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
63ba8422
PZ
1625}
1626
1627void dl_server_start(struct sched_dl_entity *dl_se)
1628{
557a6bfc
PZ
1629 struct rq *rq = dl_se->rq;
1630
d741f297
DBO
1631 /*
1632 * XXX: the apply do not work fine at the init phase for the
1633 * fair server because things are not yet set. We need to improve
1634 * this before getting generic.
1635 */
63ba8422 1636 if (!dl_server(dl_se)) {
5f6bd380 1637 u64 runtime = 50 * NSEC_PER_MSEC;
d741f297
DBO
1638 u64 period = 1000 * NSEC_PER_MSEC;
1639
1640 dl_server_apply_params(dl_se, runtime, period, 1);
557a6bfc 1641
63ba8422 1642 dl_se->dl_server = 1;
a110a81c 1643 dl_se->dl_defer = 1;
63ba8422
PZ
1644 setup_new_dl_entity(dl_se);
1645 }
557a6bfc
PZ
1646
1647 if (!dl_se->dl_runtime)
1648 return;
1649
63ba8422 1650 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
557a6bfc
PZ
1651 if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl))
1652 resched_curr(dl_se->rq);
63ba8422
PZ
1653}
1654
1655void dl_server_stop(struct sched_dl_entity *dl_se)
1656{
557a6bfc
PZ
1657 if (!dl_se->dl_runtime)
1658 return;
1659
63ba8422 1660 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
a110a81c
DBO
1661 hrtimer_try_to_cancel(&dl_se->dl_timer);
1662 dl_se->dl_defer_armed = 0;
1663 dl_se->dl_throttled = 0;
63ba8422
PZ
1664}
1665
1666void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1667 dl_server_has_tasks_f has_tasks,
c8a85394
JFG
1668 dl_server_pick_f pick_next,
1669 dl_server_pick_f pick_task)
63ba8422
PZ
1670{
1671 dl_se->rq = rq;
1672 dl_se->server_has_tasks = has_tasks;
c8a85394
JFG
1673 dl_se->server_pick_next = pick_next;
1674 dl_se->server_pick_task = pick_task;
63ba8422
PZ
1675}
1676
d741f297
DBO
1677void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq)
1678{
1679 u64 new_bw = dl_se->dl_bw;
1680 int cpu = cpu_of(rq);
1681 struct dl_bw *dl_b;
1682
1683 dl_b = dl_bw_of(cpu_of(rq));
1684 guard(raw_spinlock)(&dl_b->lock);
1685
1686 if (!dl_bw_cpus(cpu))
1687 return;
1688
1689 __dl_add(dl_b, new_bw, dl_bw_cpus(cpu));
1690}
1691
1692int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init)
1693{
1694 u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1695 u64 new_bw = to_ratio(period, runtime);
1696 struct rq *rq = dl_se->rq;
1697 int cpu = cpu_of(rq);
1698 struct dl_bw *dl_b;
1699 unsigned long cap;
1700 int retval = 0;
1701 int cpus;
1702
1703 dl_b = dl_bw_of(cpu);
1704 guard(raw_spinlock)(&dl_b->lock);
1705
1706 cpus = dl_bw_cpus(cpu);
1707 cap = dl_bw_capacity(cpu);
1708
1709 if (__dl_overflow(dl_b, cap, old_bw, new_bw))
1710 return -EBUSY;
1711
1712 if (init) {
1713 __add_rq_bw(new_bw, &rq->dl);
1714 __dl_add(dl_b, new_bw, cpus);
1715 } else {
1716 __dl_sub(dl_b, dl_se->dl_bw, cpus);
1717 __dl_add(dl_b, new_bw, cpus);
1718
1719 dl_rq_change_utilization(rq, dl_se, new_bw);
1720 }
1721
1722 dl_se->dl_runtime = runtime;
1723 dl_se->dl_deadline = period;
1724 dl_se->dl_period = period;
1725
1726 dl_se->runtime = 0;
1727 dl_se->deadline = 0;
1728
1729 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
1730 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
1731
1732 return retval;
1733}
1734
63ba8422
PZ
1735/*
1736 * Update the current task's runtime statistics (provided it is still
1737 * a -deadline task and has not been removed from the dl_rq).
1738 */
1739static void update_curr_dl(struct rq *rq)
1740{
1741 struct task_struct *curr = rq->curr;
1742 struct sched_dl_entity *dl_se = &curr->dl;
1743 s64 delta_exec;
1744
1745 if (!dl_task(curr) || !on_dl_rq(dl_se))
1746 return;
1747
1748 /*
1749 * Consumed budget is computed considering the time as
1750 * observed by schedulable tasks (excluding time spent
1751 * in hardirq context, etc.). Deadlines are instead
1752 * computed using hard walltime. This seems to be the more
1753 * natural solution, but the full ramifications of this
1754 * approach need further study.
1755 */
1756 delta_exec = update_curr_common(rq);
1757 update_curr_dl_se(rq, dl_se, delta_exec);
1758}
1759
209a0cbd
LA
1760static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1761{
1762 struct sched_dl_entity *dl_se = container_of(timer,
1763 struct sched_dl_entity,
1764 inactive_timer);
63ba8422 1765 struct task_struct *p = NULL;
209a0cbd
LA
1766 struct rq_flags rf;
1767 struct rq *rq;
1768
63ba8422
PZ
1769 if (!dl_server(dl_se)) {
1770 p = dl_task_of(dl_se);
1771 rq = task_rq_lock(p, &rf);
1772 } else {
1773 rq = dl_se->rq;
1774 rq_lock(rq, &rf);
1775 }
209a0cbd 1776
ecda2b66
JL
1777 sched_clock_tick();
1778 update_rq_clock(rq);
1779
63ba8422
PZ
1780 if (dl_server(dl_se))
1781 goto no_task;
1782
2f064a59 1783 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
387e3130
LA
1784 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1785
2f064a59 1786 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
794a56eb
JL
1787 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1788 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
209a0cbd
LA
1789 dl_se->dl_non_contending = 0;
1790 }
387e3130
LA
1791
1792 raw_spin_lock(&dl_b->lock);
8c0944ce 1793 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
387e3130 1794 raw_spin_unlock(&dl_b->lock);
9e07d45c 1795 __dl_clear_params(dl_se);
209a0cbd
LA
1796
1797 goto unlock;
1798 }
63ba8422
PZ
1799
1800no_task:
209a0cbd
LA
1801 if (dl_se->dl_non_contending == 0)
1802 goto unlock;
1803
794a56eb 1804 sub_running_bw(dl_se, &rq->dl);
209a0cbd
LA
1805 dl_se->dl_non_contending = 0;
1806unlock:
63ba8422
PZ
1807
1808 if (!dl_server(dl_se)) {
1809 task_rq_unlock(rq, p, &rf);
1810 put_task_struct(p);
1811 } else {
1812 rq_unlock(rq, &rf);
1813 }
209a0cbd
LA
1814
1815 return HRTIMER_NORESTART;
1816}
1817
9e07d45c 1818static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
209a0cbd
LA
1819{
1820 struct hrtimer *timer = &dl_se->inactive_timer;
1821
850377a8 1822 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
209a0cbd
LA
1823 timer->function = inactive_task_timer;
1824}
1825
f4478e7c
DE
1826#define __node_2_dle(node) \
1827 rb_entry((node), struct sched_dl_entity, rb_node)
1828
1baca4ce
JL
1829#ifdef CONFIG_SMP
1830
1baca4ce
JL
1831static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1832{
1833 struct rq *rq = rq_of_dl_rq(dl_rq);
1834
1835 if (dl_rq->earliest_dl.curr == 0 ||
1836 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
b13772f8
PZ
1837 if (dl_rq->earliest_dl.curr == 0)
1838 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1baca4ce 1839 dl_rq->earliest_dl.curr = deadline;
d8206bb3 1840 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1baca4ce
JL
1841 }
1842}
1843
1844static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1845{
1846 struct rq *rq = rq_of_dl_rq(dl_rq);
1847
1848 /*
1849 * Since we may have removed our earliest (and/or next earliest)
1850 * task we must recompute them.
1851 */
1852 if (!dl_rq->dl_nr_running) {
1853 dl_rq->earliest_dl.curr = 0;
1854 dl_rq->earliest_dl.next = 0;
d8206bb3 1855 cpudl_clear(&rq->rd->cpudl, rq->cpu);
b13772f8 1856 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1baca4ce 1857 } else {
f4478e7c
DE
1858 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1859 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1baca4ce 1860
1baca4ce 1861 dl_rq->earliest_dl.curr = entry->deadline;
d8206bb3 1862 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1baca4ce
JL
1863 }
1864}
1865
1866#else
1867
1868static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1869static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1870
1871#endif /* CONFIG_SMP */
1872
1873static inline
1874void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1875{
1baca4ce
JL
1876 u64 deadline = dl_se->deadline;
1877
1baca4ce 1878 dl_rq->dl_nr_running++;
72465447 1879 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1880
1881 inc_dl_deadline(dl_rq, deadline);
1baca4ce
JL
1882}
1883
1884static inline
1885void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1886{
1baca4ce
JL
1887 WARN_ON(!dl_rq->dl_nr_running);
1888 dl_rq->dl_nr_running--;
72465447 1889 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1890
1891 dec_dl_deadline(dl_rq, dl_se->deadline);
1baca4ce
JL
1892}
1893
8ecca394
PZ
1894static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1895{
1896 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1897}
1898
b5eb4a5f
YS
1899static inline struct sched_statistics *
1900__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1901{
1902 return &dl_task_of(dl_se)->stats;
1903}
1904
1905static inline void
1906update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1907{
1908 struct sched_statistics *stats;
1909
1910 if (!schedstat_enabled())
1911 return;
1912
1913 stats = __schedstats_from_dl_se(dl_se);
1914 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1915}
1916
1917static inline void
1918update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1919{
1920 struct sched_statistics *stats;
1921
1922 if (!schedstat_enabled())
1923 return;
1924
1925 stats = __schedstats_from_dl_se(dl_se);
1926 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1927}
1928
1929static inline void
1930update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1931{
1932 struct sched_statistics *stats;
1933
1934 if (!schedstat_enabled())
1935 return;
1936
1937 stats = __schedstats_from_dl_se(dl_se);
1938 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1939}
1940
1941static inline void
1942update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1943 int flags)
1944{
1945 if (!schedstat_enabled())
1946 return;
1947
1948 if (flags & ENQUEUE_WAKEUP)
1949 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1950}
1951
1952static inline void
1953update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1954 int flags)
1955{
1956 struct task_struct *p = dl_task_of(dl_se);
1957
1958 if (!schedstat_enabled())
1959 return;
1960
1961 if ((flags & DEQUEUE_SLEEP)) {
1962 unsigned int state;
1963
1964 state = READ_ONCE(p->__state);
1965 if (state & TASK_INTERRUPTIBLE)
1966 __schedstat_set(p->stats.sleep_start,
1967 rq_clock(rq_of_dl_rq(dl_rq)));
1968
1969 if (state & TASK_UNINTERRUPTIBLE)
1970 __schedstat_set(p->stats.block_start,
1971 rq_clock(rq_of_dl_rq(dl_rq)));
1972 }
1973}
1974
aab03e05
DF
1975static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1976{
1977 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
aab03e05 1978
09348d75 1979 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
aab03e05 1980
8ecca394 1981 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
aab03e05 1982
1baca4ce 1983 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1984}
1985
1986static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1987{
1988 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1989
1990 if (RB_EMPTY_NODE(&dl_se->rb_node))
1991 return;
1992
2161573e 1993 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
8ecca394 1994
aab03e05
DF
1995 RB_CLEAR_NODE(&dl_se->rb_node);
1996
1baca4ce 1997 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1998}
1999
2000static void
2279f540 2001enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
aab03e05 2002{
09348d75 2003 WARN_ON_ONCE(on_dl_rq(dl_se));
aab03e05 2004
b5eb4a5f
YS
2005 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
2006
2f7a0f58
PZ
2007 /*
2008 * Check if a constrained deadline task was activated
2009 * after the deadline but before the next period.
2010 * If that is the case, the task will be throttled and
2011 * the replenishment timer will be set to the next period.
2012 */
2013 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
2014 dl_check_constrained_dl(dl_se);
2015
2016 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
2017 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2018
2019 add_rq_bw(dl_se, dl_rq);
2020 add_running_bw(dl_se, dl_rq);
2021 }
2022
2023 /*
2024 * If p is throttled, we do not enqueue it. In fact, if it exhausted
2025 * its budget it needs a replenishment and, since it now is on
2026 * its rq, the bandwidth timer callback (which clearly has not
2027 * run yet) will take care of this.
2028 * However, the active utilization does not depend on the fact
2029 * that the task is on the runqueue or not (but depends on the
2030 * task's state - in GRUB parlance, "inactive" vs "active contending").
2031 * In other words, even if a task is throttled its utilization must
2032 * be counted in the active utilization; hence, we need to call
2033 * add_running_bw().
2034 */
a110a81c 2035 if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
2f7a0f58
PZ
2036 if (flags & ENQUEUE_WAKEUP)
2037 task_contending(dl_se, flags);
2038
2039 return;
2040 }
2041
aab03e05
DF
2042 /*
2043 * If this is a wakeup or a new instance, the scheduling
2044 * parameters of the task might need updating. Otherwise,
2045 * we want a replenishment of its runtime.
2046 */
e36d8677 2047 if (flags & ENQUEUE_WAKEUP) {
8fd27231 2048 task_contending(dl_se, flags);
2279f540 2049 update_dl_entity(dl_se);
e36d8677 2050 } else if (flags & ENQUEUE_REPLENISH) {
2279f540 2051 replenish_dl_entity(dl_se);
295d6d5e 2052 } else if ((flags & ENQUEUE_RESTORE) &&
63ba8422 2053 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
295d6d5e 2054 setup_new_dl_entity(dl_se);
e36d8677 2055 }
aab03e05 2056
a110a81c
DBO
2057 /*
2058 * If the reservation is still throttled, e.g., it got replenished but is a
2059 * deferred task and still got to wait, don't enqueue.
2060 */
2061 if (dl_se->dl_throttled && start_dl_timer(dl_se))
2062 return;
2063
2064 /*
2065 * We're about to enqueue, make sure we're not ->dl_throttled!
2066 * In case the timer was not started, say because the defer time
2067 * has passed, mark as not throttled and mark unarmed.
2068 * Also cancel earlier timers, since letting those run is pointless.
2069 */
2070 if (dl_se->dl_throttled) {
2071 hrtimer_try_to_cancel(&dl_se->dl_timer);
2072 dl_se->dl_defer_armed = 0;
2073 dl_se->dl_throttled = 0;
2074 }
2075
aab03e05
DF
2076 __enqueue_dl_entity(dl_se);
2077}
2078
2f7a0f58 2079static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
aab03e05
DF
2080{
2081 __dequeue_dl_entity(dl_se);
2f7a0f58
PZ
2082
2083 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
2084 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2085
2086 sub_running_bw(dl_se, dl_rq);
2087 sub_rq_bw(dl_se, dl_rq);
2088 }
2089
2090 /*
2091 * This check allows to start the inactive timer (or to immediately
2092 * decrease the active utilization, if needed) in two cases:
2093 * when the task blocks and when it is terminating
2094 * (p->state == TASK_DEAD). We can handle the two cases in the same
2095 * way, because from GRUB's point of view the same thing is happening
2096 * (the task moves from "active contending" to "active non contending"
2097 * or "inactive")
2098 */
2099 if (flags & DEQUEUE_SLEEP)
2100 task_non_contending(dl_se);
aab03e05
DF
2101}
2102
2103static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2104{
2279f540 2105 if (is_dl_boosted(&p->dl)) {
feff2e65
DBO
2106 /*
2107 * Because of delays in the detection of the overrun of a
2108 * thread's runtime, it might be the case that a thread
2109 * goes to sleep in a rt mutex with negative runtime. As
2110 * a consequence, the thread will be throttled.
2111 *
2112 * While waiting for the mutex, this thread can also be
2113 * boosted via PI, resulting in a thread that is throttled
2114 * and boosted at the same time.
2115 *
2116 * In this case, the boost overrides the throttle.
2117 */
2118 if (p->dl.dl_throttled) {
2119 /*
2120 * The replenish timer needs to be canceled. No
2121 * problem if it fires concurrently: boosted threads
2122 * are ignored in dl_task_timer().
b58652db
WLC
2123 *
2124 * If the timer callback was running (hrtimer_try_to_cancel == -1),
2125 * it will eventually call put_task_struct().
feff2e65 2126 */
b58652db
WLC
2127 if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
2128 !dl_server(&p->dl))
2129 put_task_struct(p);
feff2e65
DBO
2130 p->dl.dl_throttled = 0;
2131 }
64be6f1f
JL
2132 } else if (!dl_prio(p->normal_prio)) {
2133 /*
46fcc4b0
LS
2134 * Special case in which we have a !SCHED_DEADLINE task that is going
2135 * to be deboosted, but exceeds its runtime while doing so. No point in
2136 * replenishing it, as it's going to return back to its original
2137 * scheduling class after this. If it has been throttled, we need to
2138 * clear the flag, otherwise the task may wake up as throttled after
2139 * being boosted again with no means to replenish the runtime and clear
2140 * the throttle.
64be6f1f 2141 */
46fcc4b0 2142 p->dl.dl_throttled = 0;
ddfc7103
JL
2143 if (!(flags & ENQUEUE_REPLENISH))
2144 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
2145 task_pid_nr(p));
2146
64be6f1f
JL
2147 return;
2148 }
2d3d891d 2149
b5eb4a5f
YS
2150 check_schedstat_required();
2151 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
2152
2f7a0f58
PZ
2153 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2154 flags |= ENQUEUE_MIGRATING;
2155
2279f540 2156 enqueue_dl_entity(&p->dl, flags);
1baca4ce 2157
63ba8422
PZ
2158 if (dl_server(&p->dl))
2159 return;
2160
2f7a0f58 2161 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1baca4ce 2162 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
2163}
2164
863ccdbb 2165static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
aab03e05
DF
2166{
2167 update_curr_dl(rq);
e36d8677 2168
2f7a0f58
PZ
2169 if (p->on_rq == TASK_ON_RQ_MIGRATING)
2170 flags |= DEQUEUE_MIGRATING;
e36d8677 2171
63ba8422
PZ
2172 dequeue_dl_entity(&p->dl, flags);
2173 if (!p->dl.dl_throttled && !dl_server(&p->dl))
2174 dequeue_pushable_dl_task(rq, p);
863ccdbb
PZ
2175
2176 return true;
aab03e05
DF
2177}
2178
2179/*
2180 * Yield task semantic for -deadline tasks is:
2181 *
2182 * get off from the CPU until our next instance, with
2183 * a new runtime. This is of little use now, since we
2184 * don't have a bandwidth reclaiming mechanism. Anyway,
2185 * bandwidth reclaiming is planned for the future, and
2186 * yield_task_dl will indicate that some spare budget
2187 * is available for other task instances to use it.
2188 */
2189static void yield_task_dl(struct rq *rq)
2190{
aab03e05
DF
2191 /*
2192 * We make the task go to sleep until its current deadline by
2193 * forcing its runtime to zero. This way, update_curr_dl() stops
2194 * it and the bandwidth timer will wake it up and will give it
5bfd126e 2195 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05 2196 */
48be3a67
PZ
2197 rq->curr->dl.dl_yielded = 1;
2198
6f1607f1 2199 update_rq_clock(rq);
aab03e05 2200 update_curr_dl(rq);
44fb085b
WL
2201 /*
2202 * Tell update_rq_clock() that we've just updated,
2203 * so we don't do microscopic update in schedule()
2204 * and double the fastpath cost.
2205 */
adcc8da8 2206 rq_clock_skip_update(rq);
aab03e05
DF
2207}
2208
1baca4ce
JL
2209#ifdef CONFIG_SMP
2210
973bee49
SX
2211static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
2212 struct rq *rq)
2213{
2214 return (!rq->dl.dl_nr_running ||
2215 dl_time_before(p->dl.deadline,
2216 rq->dl.earliest_dl.curr));
2217}
2218
1baca4ce 2219static int find_later_rq(struct task_struct *task);
1baca4ce
JL
2220
2221static int
3aef1551 2222select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1baca4ce
JL
2223{
2224 struct task_struct *curr;
b4118988 2225 bool select_rq;
1baca4ce
JL
2226 struct rq *rq;
2227
3aef1551 2228 if (!(flags & WF_TTWU))
1baca4ce
JL
2229 goto out;
2230
2231 rq = cpu_rq(cpu);
2232
2233 rcu_read_lock();
316c1608 2234 curr = READ_ONCE(rq->curr); /* unlocked access */
1baca4ce
JL
2235
2236 /*
2237 * If we are dealing with a -deadline task, we must
2238 * decide where to wake it up.
2239 * If it has a later deadline and the current task
2240 * on this rq can't move (provided the waking task
2241 * can!) we prefer to send it somewhere else. On the
2242 * other hand, if it has a shorter deadline, we
2243 * try to make it stay here, it might be important.
2244 */
b4118988
LA
2245 select_rq = unlikely(dl_task(curr)) &&
2246 (curr->nr_cpus_allowed < 2 ||
2247 !dl_entity_preempt(&p->dl, &curr->dl)) &&
2248 p->nr_cpus_allowed > 1;
2249
2250 /*
2251 * Take the capacity of the CPU into account to
2252 * ensure it fits the requirement of the task.
2253 */
740cf8a7 2254 if (sched_asym_cpucap_active())
b4118988
LA
2255 select_rq |= !dl_task_fits_capacity(p, cpu);
2256
2257 if (select_rq) {
1baca4ce
JL
2258 int target = find_later_rq(p);
2259
9d514262 2260 if (target != -1 &&
973bee49 2261 dl_task_is_earliest_deadline(p, cpu_rq(target)))
1baca4ce
JL
2262 cpu = target;
2263 }
2264 rcu_read_unlock();
2265
2266out:
2267 return cpu;
2268}
2269
1327237a 2270static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
209a0cbd 2271{
2679a837 2272 struct rq_flags rf;
209a0cbd
LA
2273 struct rq *rq;
2274
2f064a59 2275 if (READ_ONCE(p->__state) != TASK_WAKING)
209a0cbd
LA
2276 return;
2277
2278 rq = task_rq(p);
2279 /*
2280 * Since p->state == TASK_WAKING, set_task_cpu() has been called
2281 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2282 * rq->lock is not... So, lock it
2283 */
2679a837 2284 rq_lock(rq, &rf);
8fd27231 2285 if (p->dl.dl_non_contending) {
b4da13aa 2286 update_rq_clock(rq);
794a56eb 2287 sub_running_bw(&p->dl, &rq->dl);
8fd27231
LA
2288 p->dl.dl_non_contending = 0;
2289 /*
2290 * If the timer handler is currently running and the
3b03706f 2291 * timer cannot be canceled, inactive_task_timer()
8fd27231
LA
2292 * will see that dl_not_contending is not set, and
2293 * will not touch the rq's active utilization,
2294 * so we are still safe.
2295 */
2296 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2297 put_task_struct(p);
2298 }
794a56eb 2299 sub_rq_bw(&p->dl, &rq->dl);
2679a837 2300 rq_unlock(rq, &rf);
209a0cbd
LA
2301}
2302
1baca4ce
JL
2303static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2304{
2305 /*
2306 * Current can't be migrated, useless to reschedule,
2307 * let's hope p can move out.
2308 */
4b53a341 2309 if (rq->curr->nr_cpus_allowed == 1 ||
3261ed0b 2310 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1baca4ce
JL
2311 return;
2312
2313 /*
2314 * p is migratable, so let's not schedule it and
2315 * see if it is pushed or pulled somewhere else.
2316 */
4b53a341 2317 if (p->nr_cpus_allowed != 1 &&
3261ed0b 2318 cpudl_find(&rq->rd->cpudl, p, NULL))
1baca4ce
JL
2319 return;
2320
8875125e 2321 resched_curr(rq);
1baca4ce
JL
2322}
2323
6e2df058
PZ
2324static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2325{
2326 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2327 /*
2328 * This is OK, because current is on_cpu, which avoids it being
2329 * picked for load-balance and preemption/IRQs are still
2330 * disabled avoiding further scheduler activity on it and we've
2331 * not yet started the picking loop.
2332 */
2333 rq_unpin_lock(rq, rf);
2334 pull_dl_task(rq);
2335 rq_repin_lock(rq, rf);
2336 }
2337
2338 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2339}
1baca4ce
JL
2340#endif /* CONFIG_SMP */
2341
aab03e05
DF
2342/*
2343 * Only called when both the current and waking task are -deadline
2344 * tasks.
2345 */
e23edc86 2346static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
aab03e05
DF
2347 int flags)
2348{
1baca4ce 2349 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 2350 resched_curr(rq);
1baca4ce
JL
2351 return;
2352 }
2353
2354#ifdef CONFIG_SMP
2355 /*
2356 * In the unlikely case current and p have the same deadline
2357 * let us try to decide what's the best thing to do...
2358 */
332ac17e
DF
2359 if ((p->dl.deadline == rq->curr->dl.deadline) &&
2360 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
2361 check_preempt_equal_dl(rq, p);
2362#endif /* CONFIG_SMP */
aab03e05
DF
2363}
2364
2365#ifdef CONFIG_SCHED_HRTICK
63ba8422 2366static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
aab03e05 2367{
63ba8422 2368 hrtick_start(rq, dl_se->runtime);
aab03e05 2369}
36ce9881 2370#else /* !CONFIG_SCHED_HRTICK */
63ba8422 2371static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
36ce9881
WL
2372{
2373}
aab03e05
DF
2374#endif
2375
a0e813f2 2376static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
ff1cdc94 2377{
b5eb4a5f
YS
2378 struct sched_dl_entity *dl_se = &p->dl;
2379 struct dl_rq *dl_rq = &rq->dl;
2380
ff1cdc94 2381 p->se.exec_start = rq_clock_task(rq);
b5eb4a5f
YS
2382 if (on_dl_rq(&p->dl))
2383 update_stats_wait_end_dl(dl_rq, dl_se);
ff1cdc94
MS
2384
2385 /* You can't push away the running task */
2386 dequeue_pushable_dl_task(rq, p);
f95d4eae 2387
a0e813f2
PZ
2388 if (!first)
2389 return;
2390
f95d4eae
PZ
2391 if (rq->curr->sched_class != &dl_sched_class)
2392 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2393
2394 deadline_queue_push_tasks(rq);
ff1cdc94
MS
2395}
2396
821aecd0 2397static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
aab03e05 2398{
2161573e 2399 struct rb_node *left = rb_first_cached(&dl_rq->root);
aab03e05
DF
2400
2401 if (!left)
2402 return NULL;
2403
f4478e7c 2404 return __node_2_dle(left);
aab03e05
DF
2405}
2406
c8a85394
JFG
2407/*
2408 * __pick_next_task_dl - Helper to pick the next -deadline task to run.
2409 * @rq: The runqueue to pick the next task from.
2410 * @peek: If true, just peek at the next task. Only relevant for dlserver.
2411 */
2412static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek)
aab03e05
DF
2413{
2414 struct sched_dl_entity *dl_se;
6e2df058 2415 struct dl_rq *dl_rq = &rq->dl;
aab03e05 2416 struct task_struct *p;
aab03e05 2417
63ba8422 2418again:
6e2df058 2419 if (!sched_dl_runnable(rq))
aab03e05
DF
2420 return NULL;
2421
821aecd0 2422 dl_se = pick_next_dl_entity(dl_rq);
09348d75 2423 WARN_ON_ONCE(!dl_se);
63ba8422
PZ
2424
2425 if (dl_server(dl_se)) {
c8a85394
JFG
2426 if (IS_ENABLED(CONFIG_SMP) && peek)
2427 p = dl_se->server_pick_task(dl_se);
2428 else
2429 p = dl_se->server_pick_next(dl_se);
63ba8422 2430 if (!p) {
63ba8422
PZ
2431 dl_se->dl_yielded = 1;
2432 update_curr_dl_se(rq, dl_se, 0);
2433 goto again;
2434 }
2435 p->dl_server = dl_se;
2436 } else {
2437 p = dl_task_of(dl_se);
2438 }
21f56ffe
PZ
2439
2440 return p;
2441}
2442
c8a85394
JFG
2443#ifdef CONFIG_SMP
2444static struct task_struct *pick_task_dl(struct rq *rq)
2445{
2446 return __pick_next_task_dl(rq, true);
2447}
2448#endif
2449
21f56ffe
PZ
2450static struct task_struct *pick_next_task_dl(struct rq *rq)
2451{
2452 struct task_struct *p;
2453
c8a85394 2454 p = __pick_next_task_dl(rq, false);
63ba8422
PZ
2455 if (!p)
2456 return p;
2457
2458 if (!p->dl_server)
21f56ffe
PZ
2459 set_next_task_dl(rq, p, true);
2460
63ba8422
PZ
2461 if (hrtick_enabled(rq))
2462 start_hrtick_dl(rq, &p->dl);
2463
aab03e05
DF
2464 return p;
2465}
2466
6e2df058 2467static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
aab03e05 2468{
b5eb4a5f
YS
2469 struct sched_dl_entity *dl_se = &p->dl;
2470 struct dl_rq *dl_rq = &rq->dl;
2471
2472 if (on_dl_rq(&p->dl))
2473 update_stats_wait_start_dl(dl_rq, dl_se);
2474
aab03e05 2475 update_curr_dl(rq);
1baca4ce 2476
23127296 2477 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
4b53a341 2478 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1baca4ce 2479 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
2480}
2481
d84b3131
FW
2482/*
2483 * scheduler tick hitting a task of our scheduling class.
2484 *
2485 * NOTE: This function can be called remotely by the tick offload that
2486 * goes along full dynticks. Therefore no local assumption can be made
2487 * and everything must be accessed through the @rq and @curr passed in
2488 * parameters.
2489 */
aab03e05
DF
2490static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2491{
2492 update_curr_dl(rq);
2493
23127296 2494 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
a7bebf48
WL
2495 /*
2496 * Even when we have runtime, update_curr_dl() might have resulted in us
2497 * not being the leftmost task anymore. In that case NEED_RESCHED will
2498 * be set and schedule() will start a new hrtick for the next task.
2499 */
e0ee463c 2500 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
63ba8422
PZ
2501 is_leftmost(&p->dl, &rq->dl))
2502 start_hrtick_dl(rq, &p->dl);
aab03e05
DF
2503}
2504
2505static void task_fork_dl(struct task_struct *p)
2506{
2507 /*
2508 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2509 * sched_fork()
2510 */
2511}
2512
1baca4ce
JL
2513#ifdef CONFIG_SMP
2514
2515/* Only try algorithms three times */
2516#define DL_MAX_TRIES 3
2517
2518static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2519{
0b9d46fc 2520 if (!task_on_cpu(rq, p) &&
95158a89 2521 cpumask_test_cpu(cpu, &p->cpus_mask))
1baca4ce 2522 return 1;
1baca4ce
JL
2523 return 0;
2524}
2525
8b5e770e
WL
2526/*
2527 * Return the earliest pushable rq's task, which is suitable to be executed
2528 * on the CPU, NULL otherwise:
2529 */
2530static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2531{
8b5e770e 2532 struct task_struct *p = NULL;
f4478e7c 2533 struct rb_node *next_node;
8b5e770e
WL
2534
2535 if (!has_pushable_dl_tasks(rq))
2536 return NULL;
2537
f4478e7c
DE
2538 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2539
8b5e770e
WL
2540next_node:
2541 if (next_node) {
f4478e7c 2542 p = __node_2_pdl(next_node);
8b5e770e
WL
2543
2544 if (pick_dl_task(rq, p, cpu))
2545 return p;
2546
2547 next_node = rb_next(next_node);
2548 goto next_node;
2549 }
2550
2551 return NULL;
2552}
2553
1baca4ce
JL
2554static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2555
2556static int find_later_rq(struct task_struct *task)
2557{
2558 struct sched_domain *sd;
4ba29684 2559 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1baca4ce 2560 int this_cpu = smp_processor_id();
b18c3ca1 2561 int cpu = task_cpu(task);
1baca4ce
JL
2562
2563 /* Make sure the mask is initialized first */
2564 if (unlikely(!later_mask))
2565 return -1;
2566
4b53a341 2567 if (task->nr_cpus_allowed == 1)
1baca4ce
JL
2568 return -1;
2569
91ec6778
JL
2570 /*
2571 * We have to consider system topology and task affinity
97fb7a0a 2572 * first, then we can look for a suitable CPU.
91ec6778 2573 */
3261ed0b 2574 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1baca4ce
JL
2575 return -1;
2576
2577 /*
b18c3ca1
BP
2578 * If we are here, some targets have been found, including
2579 * the most suitable which is, among the runqueues where the
2580 * current tasks have later deadlines than the task's one, the
2581 * rq with the latest possible one.
1baca4ce
JL
2582 *
2583 * Now we check how well this matches with task's
2584 * affinity and system topology.
2585 *
97fb7a0a 2586 * The last CPU where the task run is our first
1baca4ce
JL
2587 * guess, since it is most likely cache-hot there.
2588 */
2589 if (cpumask_test_cpu(cpu, later_mask))
2590 return cpu;
2591 /*
2592 * Check if this_cpu is to be skipped (i.e., it is
2593 * not in the mask) or not.
2594 */
2595 if (!cpumask_test_cpu(this_cpu, later_mask))
2596 this_cpu = -1;
2597
2598 rcu_read_lock();
2599 for_each_domain(cpu, sd) {
2600 if (sd->flags & SD_WAKE_AFFINE) {
b18c3ca1 2601 int best_cpu;
1baca4ce
JL
2602
2603 /*
2604 * If possible, preempting this_cpu is
2605 * cheaper than migrating.
2606 */
2607 if (this_cpu != -1 &&
2608 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2609 rcu_read_unlock();
2610 return this_cpu;
2611 }
2612
14e292f8
PZ
2613 best_cpu = cpumask_any_and_distribute(later_mask,
2614 sched_domain_span(sd));
1baca4ce 2615 /*
97fb7a0a 2616 * Last chance: if a CPU being in both later_mask
b18c3ca1 2617 * and current sd span is valid, that becomes our
97fb7a0a 2618 * choice. Of course, the latest possible CPU is
b18c3ca1 2619 * already under consideration through later_mask.
1baca4ce 2620 */
b18c3ca1 2621 if (best_cpu < nr_cpu_ids) {
1baca4ce
JL
2622 rcu_read_unlock();
2623 return best_cpu;
2624 }
2625 }
2626 }
2627 rcu_read_unlock();
2628
2629 /*
2630 * At this point, all our guesses failed, we just return
2631 * 'something', and let the caller sort the things out.
2632 */
2633 if (this_cpu != -1)
2634 return this_cpu;
2635
14e292f8 2636 cpu = cpumask_any_distribute(later_mask);
1baca4ce
JL
2637 if (cpu < nr_cpu_ids)
2638 return cpu;
2639
2640 return -1;
2641}
2642
2643/* Locks the rq it finds */
2644static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2645{
2646 struct rq *later_rq = NULL;
2647 int tries;
2648 int cpu;
2649
2650 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2651 cpu = find_later_rq(task);
2652
2653 if ((cpu == -1) || (cpu == rq->cpu))
2654 break;
2655
2656 later_rq = cpu_rq(cpu);
2657
973bee49 2658 if (!dl_task_is_earliest_deadline(task, later_rq)) {
9d514262
WL
2659 /*
2660 * Target rq has tasks of equal or earlier deadline,
2661 * retrying does not release any lock and is unlikely
2662 * to yield a different result.
2663 */
2664 later_rq = NULL;
2665 break;
2666 }
2667
1baca4ce
JL
2668 /* Retry if something changed. */
2669 if (double_lock_balance(rq, later_rq)) {
2670 if (unlikely(task_rq(task) != rq ||
95158a89 2671 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
0b9d46fc 2672 task_on_cpu(rq, task) ||
13b5ab02 2673 !dl_task(task) ||
feffe5bb 2674 is_migration_disabled(task) ||
da0c1e65 2675 !task_on_rq_queued(task))) {
1baca4ce
JL
2676 double_unlock_balance(rq, later_rq);
2677 later_rq = NULL;
2678 break;
2679 }
2680 }
2681
2682 /*
2683 * If the rq we found has no -deadline task, or
2684 * its earliest one has a later deadline than our
2685 * task, the rq is a good one.
2686 */
973bee49 2687 if (dl_task_is_earliest_deadline(task, later_rq))
1baca4ce
JL
2688 break;
2689
2690 /* Otherwise we try again. */
2691 double_unlock_balance(rq, later_rq);
2692 later_rq = NULL;
2693 }
2694
2695 return later_rq;
2696}
2697
2698static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2699{
2700 struct task_struct *p;
2701
2702 if (!has_pushable_dl_tasks(rq))
2703 return NULL;
2704
f4478e7c 2705 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
1baca4ce 2706
09348d75
IM
2707 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2708 WARN_ON_ONCE(task_current(rq, p));
2709 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
1baca4ce 2710
09348d75
IM
2711 WARN_ON_ONCE(!task_on_rq_queued(p));
2712 WARN_ON_ONCE(!dl_task(p));
1baca4ce
JL
2713
2714 return p;
2715}
2716
2717/*
2718 * See if the non running -deadline tasks on this rq
2719 * can be sent to some other CPU where they can preempt
2720 * and start executing.
2721 */
2722static int push_dl_task(struct rq *rq)
2723{
2724 struct task_struct *next_task;
2725 struct rq *later_rq;
c51b8ab5 2726 int ret = 0;
1baca4ce 2727
1baca4ce
JL
2728 next_task = pick_next_pushable_dl_task(rq);
2729 if (!next_task)
2730 return 0;
2731
2732retry:
1baca4ce
JL
2733 /*
2734 * If next_task preempts rq->curr, and rq->curr
2735 * can move away, it makes sense to just reschedule
2736 * without going further in pushing next_task.
2737 */
2738 if (dl_task(rq->curr) &&
2739 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
4b53a341 2740 rq->curr->nr_cpus_allowed > 1) {
8875125e 2741 resched_curr(rq);
1baca4ce
JL
2742 return 0;
2743 }
2744
49bef33e
VS
2745 if (is_migration_disabled(next_task))
2746 return 0;
2747
2748 if (WARN_ON(next_task == rq->curr))
2749 return 0;
2750
1baca4ce
JL
2751 /* We might release rq lock */
2752 get_task_struct(next_task);
2753
2754 /* Will lock the rq it'll find */
2755 later_rq = find_lock_later_rq(next_task, rq);
2756 if (!later_rq) {
2757 struct task_struct *task;
2758
2759 /*
2760 * We must check all this again, since
2761 * find_lock_later_rq releases rq->lock and it is
2762 * then possible that next_task has migrated.
2763 */
2764 task = pick_next_pushable_dl_task(rq);
a776b968 2765 if (task == next_task) {
1baca4ce
JL
2766 /*
2767 * The task is still there. We don't try
97fb7a0a 2768 * again, some other CPU will pull it when ready.
1baca4ce 2769 */
1baca4ce
JL
2770 goto out;
2771 }
2772
2773 if (!task)
2774 /* No more tasks */
2775 goto out;
2776
2777 put_task_struct(next_task);
2778 next_task = task;
2779 goto retry;
2780 }
2781
2782 deactivate_task(rq, next_task, 0);
2783 set_task_cpu(next_task, later_rq->cpu);
734387ec 2784 activate_task(later_rq, next_task, 0);
c51b8ab5 2785 ret = 1;
1baca4ce 2786
8875125e 2787 resched_curr(later_rq);
1baca4ce
JL
2788
2789 double_unlock_balance(rq, later_rq);
2790
2791out:
2792 put_task_struct(next_task);
2793
c51b8ab5 2794 return ret;
1baca4ce
JL
2795}
2796
2797static void push_dl_tasks(struct rq *rq)
2798{
4ffa08ed 2799 /* push_dl_task() will return true if it moved a -deadline task */
1baca4ce
JL
2800 while (push_dl_task(rq))
2801 ;
aab03e05
DF
2802}
2803
0ea60c20 2804static void pull_dl_task(struct rq *this_rq)
1baca4ce 2805{
0ea60c20 2806 int this_cpu = this_rq->cpu, cpu;
a7c81556 2807 struct task_struct *p, *push_task;
0ea60c20 2808 bool resched = false;
1baca4ce
JL
2809 struct rq *src_rq;
2810 u64 dmin = LONG_MAX;
2811
2812 if (likely(!dl_overloaded(this_rq)))
0ea60c20 2813 return;
1baca4ce
JL
2814
2815 /*
2816 * Match the barrier from dl_set_overloaded; this guarantees that if we
2817 * see overloaded we must also see the dlo_mask bit.
2818 */
2819 smp_rmb();
2820
2821 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2822 if (this_cpu == cpu)
2823 continue;
2824
2825 src_rq = cpu_rq(cpu);
2826
2827 /*
402de7fc 2828 * It looks racy, and it is! However, as in sched_rt.c,
1baca4ce
JL
2829 * we are fine with this.
2830 */
2831 if (this_rq->dl.dl_nr_running &&
2832 dl_time_before(this_rq->dl.earliest_dl.curr,
2833 src_rq->dl.earliest_dl.next))
2834 continue;
2835
2836 /* Might drop this_rq->lock */
a7c81556 2837 push_task = NULL;
1baca4ce
JL
2838 double_lock_balance(this_rq, src_rq);
2839
2840 /*
2841 * If there are no more pullable tasks on the
2842 * rq, we're done with it.
2843 */
2844 if (src_rq->dl.dl_nr_running <= 1)
2845 goto skip;
2846
8b5e770e 2847 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1baca4ce
JL
2848
2849 /*
2850 * We found a task to be pulled if:
2851 * - it preempts our current (if there's one),
2852 * - it will preempt the last one we pulled (if any).
2853 */
2854 if (p && dl_time_before(p->dl.deadline, dmin) &&
973bee49 2855 dl_task_is_earliest_deadline(p, this_rq)) {
1baca4ce 2856 WARN_ON(p == src_rq->curr);
da0c1e65 2857 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
2858
2859 /*
2860 * Then we pull iff p has actually an earlier
2861 * deadline than the current task of its runqueue.
2862 */
2863 if (dl_time_before(p->dl.deadline,
2864 src_rq->curr->dl.deadline))
2865 goto skip;
2866
a7c81556
PZ
2867 if (is_migration_disabled(p)) {
2868 push_task = get_push_task(src_rq);
2869 } else {
2870 deactivate_task(src_rq, p, 0);
2871 set_task_cpu(p, this_cpu);
2872 activate_task(this_rq, p, 0);
2873 dmin = p->dl.deadline;
2874 resched = true;
2875 }
1baca4ce
JL
2876
2877 /* Is there any other task even earlier? */
2878 }
2879skip:
2880 double_unlock_balance(this_rq, src_rq);
a7c81556
PZ
2881
2882 if (push_task) {
f0498d2a 2883 preempt_disable();
5cb9eaa3 2884 raw_spin_rq_unlock(this_rq);
a7c81556
PZ
2885 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2886 push_task, &src_rq->push_work);
f0498d2a 2887 preempt_enable();
5cb9eaa3 2888 raw_spin_rq_lock(this_rq);
a7c81556 2889 }
1baca4ce
JL
2890 }
2891
0ea60c20
PZ
2892 if (resched)
2893 resched_curr(this_rq);
1baca4ce
JL
2894}
2895
2896/*
2897 * Since the task is not running and a reschedule is not going to happen
2898 * anytime soon on its runqueue, we try pushing it away now.
2899 */
2900static void task_woken_dl(struct rq *rq, struct task_struct *p)
2901{
0b9d46fc 2902 if (!task_on_cpu(rq, p) &&
1baca4ce 2903 !test_tsk_need_resched(rq->curr) &&
4b53a341 2904 p->nr_cpus_allowed > 1 &&
1baca4ce 2905 dl_task(rq->curr) &&
4b53a341 2906 (rq->curr->nr_cpus_allowed < 2 ||
6b0a563f 2907 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1baca4ce
JL
2908 push_dl_tasks(rq);
2909 }
2910}
2911
2912static void set_cpus_allowed_dl(struct task_struct *p,
713a2e21 2913 struct affinity_context *ctx)
1baca4ce 2914{
7f51412a 2915 struct root_domain *src_rd;
6c37067e 2916 struct rq *rq;
1baca4ce 2917
09348d75 2918 WARN_ON_ONCE(!dl_task(p));
1baca4ce 2919
7f51412a
JL
2920 rq = task_rq(p);
2921 src_rd = rq->rd;
2922 /*
2923 * Migrating a SCHED_DEADLINE task between exclusive
2924 * cpusets (different root_domains) entails a bandwidth
2925 * update. We already made space for us in the destination
2926 * domain (see cpuset_can_attach()).
2927 */
713a2e21 2928 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
7f51412a
JL
2929 struct dl_bw *src_dl_b;
2930
2931 src_dl_b = dl_bw_of(cpu_of(rq));
2932 /*
2933 * We now free resources of the root_domain we are migrating
2934 * off. In the worst case, sched_setattr() may temporary fail
2935 * until we complete the update.
2936 */
2937 raw_spin_lock(&src_dl_b->lock);
8c0944ce 2938 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
7f51412a
JL
2939 raw_spin_unlock(&src_dl_b->lock);
2940 }
2941
713a2e21 2942 set_cpus_allowed_common(p, ctx);
1baca4ce
JL
2943}
2944
2945/* Assumes rq->lock is held */
2946static void rq_online_dl(struct rq *rq)
2947{
2948 if (rq->dl.overloaded)
2949 dl_set_overload(rq);
6bfd6d72 2950
16b26943 2951 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
6bfd6d72 2952 if (rq->dl.dl_nr_running > 0)
d8206bb3 2953 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1baca4ce
JL
2954}
2955
2956/* Assumes rq->lock is held */
2957static void rq_offline_dl(struct rq *rq)
2958{
2959 if (rq->dl.overloaded)
2960 dl_clear_overload(rq);
6bfd6d72 2961
d8206bb3 2962 cpudl_clear(&rq->rd->cpudl, rq->cpu);
16b26943 2963 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1baca4ce
JL
2964}
2965
a6c0e746 2966void __init init_sched_dl_class(void)
1baca4ce
JL
2967{
2968 unsigned int i;
2969
2970 for_each_possible_cpu(i)
2971 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2972 GFP_KERNEL, cpu_to_node(i));
2973}
2974
f9a25f77
MP
2975void dl_add_task_root_domain(struct task_struct *p)
2976{
2977 struct rq_flags rf;
2978 struct rq *rq;
2979 struct dl_bw *dl_b;
2980
de40f33e
DE
2981 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2982 if (!dl_task(p)) {
2983 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2984 return;
2985 }
2986
2987 rq = __task_rq_lock(p, &rf);
f9a25f77
MP
2988
2989 dl_b = &rq->rd->dl_bw;
2990 raw_spin_lock(&dl_b->lock);
2991
2992 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2993
2994 raw_spin_unlock(&dl_b->lock);
2995
f9a25f77
MP
2996 task_rq_unlock(rq, p, &rf);
2997}
2998
2999void dl_clear_root_domain(struct root_domain *rd)
3000{
3001 unsigned long flags;
3002
3003 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
3004 rd->dl_bw.total_bw = 0;
3005 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
3006}
3007
1baca4ce
JL
3008#endif /* CONFIG_SMP */
3009
aab03e05
DF
3010static void switched_from_dl(struct rq *rq, struct task_struct *p)
3011{
a649f237 3012 /*
209a0cbd
LA
3013 * task_non_contending() can start the "inactive timer" (if the 0-lag
3014 * time is in the future). If the task switches back to dl before
3015 * the "inactive timer" fires, it can continue to consume its current
3016 * runtime using its current deadline. If it stays outside of
3017 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
3018 * will reset the task parameters.
a649f237 3019 */
209a0cbd 3020 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2f7a0f58 3021 task_non_contending(&p->dl);
209a0cbd 3022
6c24849f
JL
3023 /*
3024 * In case a task is setscheduled out from SCHED_DEADLINE we need to
3025 * keep track of that on its cpuset (for correct bandwidth tracking).
3026 */
3027 dec_dl_tasks_cs(p);
3028
e117cb52
JL
3029 if (!task_on_rq_queued(p)) {
3030 /*
3031 * Inactive timer is armed. However, p is leaving DEADLINE and
3032 * might migrate away from this rq while continuing to run on
3033 * some other class. We need to remove its contribution from
3034 * this rq running_bw now, or sub_rq_bw (below) will complain.
3035 */
3036 if (p->dl.dl_non_contending)
3037 sub_running_bw(&p->dl, &rq->dl);
794a56eb 3038 sub_rq_bw(&p->dl, &rq->dl);
e117cb52 3039 }
8fd27231 3040
209a0cbd
LA
3041 /*
3042 * We cannot use inactive_task_timer() to invoke sub_running_bw()
3043 * at the 0-lag time, because the task could have been migrated
3044 * while SCHED_OTHER in the meanwhile.
3045 */
3046 if (p->dl.dl_non_contending)
3047 p->dl.dl_non_contending = 0;
a5e7be3b 3048
1baca4ce
JL
3049 /*
3050 * Since this might be the only -deadline task on the rq,
3051 * this is the right place to try to pull some other one
97fb7a0a 3052 * from an overloaded CPU, if any.
1baca4ce 3053 */
cd660911
WL
3054 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3055 return;
3056
02d8ec94 3057 deadline_queue_pull_task(rq);
aab03e05
DF
3058}
3059
1baca4ce
JL
3060/*
3061 * When switching to -deadline, we may overload the rq, then
3062 * we try to push someone off, if possible.
3063 */
aab03e05
DF
3064static void switched_to_dl(struct rq *rq, struct task_struct *p)
3065{
209a0cbd
LA
3066 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
3067 put_task_struct(p);
98b0a857 3068
6c24849f
JL
3069 /*
3070 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
3071 * track of that on its cpuset (for correct bandwidth tracking).
3072 */
3073 inc_dl_tasks_cs(p);
3074
98b0a857 3075 /* If p is not queued we will update its parameters at next wakeup. */
8fd27231 3076 if (!task_on_rq_queued(p)) {
794a56eb 3077 add_rq_bw(&p->dl, &rq->dl);
98b0a857 3078
8fd27231
LA
3079 return;
3080 }
72f9f3fd 3081
98b0a857 3082 if (rq->curr != p) {
1baca4ce 3083#ifdef CONFIG_SMP
4b53a341 3084 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
02d8ec94 3085 deadline_queue_push_tasks(rq);
619bd4a7 3086#endif
9916e214 3087 if (dl_task(rq->curr))
e23edc86 3088 wakeup_preempt_dl(rq, p, 0);
9916e214
PZ
3089 else
3090 resched_curr(rq);
d7d60709
VD
3091 } else {
3092 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
aab03e05
DF
3093 }
3094}
3095
1baca4ce
JL
3096/*
3097 * If the scheduling parameters of a -deadline task changed,
3098 * a push or pull operation might be needed.
3099 */
aab03e05
DF
3100static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3101 int oldprio)
3102{
7ea98dfa
VS
3103 if (!task_on_rq_queued(p))
3104 return;
3105
aab03e05 3106#ifdef CONFIG_SMP
7ea98dfa
VS
3107 /*
3108 * This might be too much, but unfortunately
3109 * we don't have the old deadline value, and
3110 * we can't argue if the task is increasing
3111 * or lowering its prio, so...
3112 */
3113 if (!rq->dl.overloaded)
3114 deadline_queue_pull_task(rq);
1baca4ce 3115
7ea98dfa 3116 if (task_current(rq, p)) {
1baca4ce
JL
3117 /*
3118 * If we now have a earlier deadline task than p,
3119 * then reschedule, provided p is still on this
3120 * runqueue.
3121 */
9916e214 3122 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
8875125e 3123 resched_curr(rq);
7ea98dfa 3124 } else {
1baca4ce 3125 /*
7ea98dfa
VS
3126 * Current may not be deadline in case p was throttled but we
3127 * have just replenished it (e.g. rt_mutex_setprio()).
3128 *
3129 * Otherwise, if p was given an earlier deadline, reschedule.
1baca4ce 3130 */
7ea98dfa
VS
3131 if (!dl_task(rq->curr) ||
3132 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
3133 resched_curr(rq);
801ccdbf 3134 }
7ea98dfa
VS
3135#else
3136 /*
3137 * We don't know if p has a earlier or later deadline, so let's blindly
3138 * set a (maybe not needed) rescheduling point.
3139 */
3140 resched_curr(rq);
3141#endif
aab03e05 3142}
aab03e05 3143
530bfad1
HJ
3144#ifdef CONFIG_SCHED_CORE
3145static int task_is_throttled_dl(struct task_struct *p, int cpu)
3146{
3147 return p->dl.dl_throttled;
3148}
3149#endif
3150
43c31ac0
PZ
3151DEFINE_SCHED_CLASS(dl) = {
3152
aab03e05
DF
3153 .enqueue_task = enqueue_task_dl,
3154 .dequeue_task = dequeue_task_dl,
3155 .yield_task = yield_task_dl,
3156
e23edc86 3157 .wakeup_preempt = wakeup_preempt_dl,
aab03e05
DF
3158
3159 .pick_next_task = pick_next_task_dl,
3160 .put_prev_task = put_prev_task_dl,
03b7fad1 3161 .set_next_task = set_next_task_dl,
aab03e05
DF
3162
3163#ifdef CONFIG_SMP
6e2df058 3164 .balance = balance_dl,
21f56ffe 3165 .pick_task = pick_task_dl,
aab03e05 3166 .select_task_rq = select_task_rq_dl,
209a0cbd 3167 .migrate_task_rq = migrate_task_rq_dl,
1baca4ce
JL
3168 .set_cpus_allowed = set_cpus_allowed_dl,
3169 .rq_online = rq_online_dl,
3170 .rq_offline = rq_offline_dl,
1baca4ce 3171 .task_woken = task_woken_dl,
a7c81556 3172 .find_lock_rq = find_lock_later_rq,
aab03e05
DF
3173#endif
3174
aab03e05
DF
3175 .task_tick = task_tick_dl,
3176 .task_fork = task_fork_dl,
aab03e05
DF
3177
3178 .prio_changed = prio_changed_dl,
3179 .switched_from = switched_from_dl,
3180 .switched_to = switched_to_dl,
6e998916
SG
3181
3182 .update_curr = update_curr_dl,
530bfad1
HJ
3183#ifdef CONFIG_SCHED_CORE
3184 .task_is_throttled = task_is_throttled_dl,
3185#endif
aab03e05 3186};
acb32132 3187
26762423
PL
3188/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
3189static u64 dl_generation;
3190
06a76fe0
NP
3191int sched_dl_global_validate(void)
3192{
3193 u64 runtime = global_rt_runtime();
3194 u64 period = global_rt_period();
3195 u64 new_bw = to_ratio(period, runtime);
26762423 3196 u64 gen = ++dl_generation;
06a76fe0 3197 struct dl_bw *dl_b;
a57415f5 3198 int cpu, cpus, ret = 0;
06a76fe0
NP
3199 unsigned long flags;
3200
3201 /*
3202 * Here we want to check the bandwidth not being set to some
3203 * value smaller than the currently allocated bandwidth in
3204 * any of the root_domains.
06a76fe0
NP
3205 */
3206 for_each_possible_cpu(cpu) {
3207 rcu_read_lock_sched();
26762423
PL
3208
3209 if (dl_bw_visited(cpu, gen))
3210 goto next;
3211
06a76fe0 3212 dl_b = dl_bw_of(cpu);
a57415f5 3213 cpus = dl_bw_cpus(cpu);
06a76fe0
NP
3214
3215 raw_spin_lock_irqsave(&dl_b->lock, flags);
a57415f5 3216 if (new_bw * cpus < dl_b->total_bw)
06a76fe0
NP
3217 ret = -EBUSY;
3218 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3219
26762423 3220next:
06a76fe0
NP
3221 rcu_read_unlock_sched();
3222
3223 if (ret)
3224 break;
3225 }
3226
3227 return ret;
3228}
3229
ba4f7bc1 3230static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
06a76fe0
NP
3231{
3232 if (global_rt_runtime() == RUNTIME_INF) {
3233 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
6a9d623a 3234 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
06a76fe0
NP
3235 } else {
3236 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3237 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
6a9d623a
VP
3238 dl_rq->max_bw = dl_rq->extra_bw =
3239 to_ratio(global_rt_period(), global_rt_runtime());
06a76fe0
NP
3240 }
3241}
3242
3243void sched_dl_do_global(void)
3244{
3245 u64 new_bw = -1;
26762423 3246 u64 gen = ++dl_generation;
06a76fe0
NP
3247 struct dl_bw *dl_b;
3248 int cpu;
3249 unsigned long flags;
3250
06a76fe0
NP
3251 if (global_rt_runtime() != RUNTIME_INF)
3252 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3253
06a76fe0
NP
3254 for_each_possible_cpu(cpu) {
3255 rcu_read_lock_sched();
26762423
PL
3256
3257 if (dl_bw_visited(cpu, gen)) {
3258 rcu_read_unlock_sched();
3259 continue;
3260 }
3261
06a76fe0
NP
3262 dl_b = dl_bw_of(cpu);
3263
3264 raw_spin_lock_irqsave(&dl_b->lock, flags);
3265 dl_b->bw = new_bw;
3266 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3267
3268 rcu_read_unlock_sched();
3269 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3270 }
3271}
3272
3273/*
3274 * We must be sure that accepting a new task (or allowing changing the
3275 * parameters of an existing one) is consistent with the bandwidth
3276 * constraints. If yes, this function also accordingly updates the currently
3277 * allocated bandwidth to reflect the new situation.
3278 *
3279 * This function is called while holding p's rq->lock.
3280 */
3281int sched_dl_overflow(struct task_struct *p, int policy,
3282 const struct sched_attr *attr)
3283{
06a76fe0
NP
3284 u64 period = attr->sched_period ?: attr->sched_deadline;
3285 u64 runtime = attr->sched_runtime;
3286 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
60ffd5ed
LA
3287 int cpus, err = -1, cpu = task_cpu(p);
3288 struct dl_bw *dl_b = dl_bw_of(cpu);
3289 unsigned long cap;
06a76fe0 3290
794a56eb
JL
3291 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3292 return 0;
3293
06a76fe0
NP
3294 /* !deadline task may carry old deadline bandwidth */
3295 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3296 return 0;
3297
3298 /*
3299 * Either if a task, enters, leave, or stays -deadline but changes
3300 * its parameters, we may need to update accordingly the total
3301 * allocated bandwidth of the container.
3302 */
3303 raw_spin_lock(&dl_b->lock);
60ffd5ed
LA
3304 cpus = dl_bw_cpus(cpu);
3305 cap = dl_bw_capacity(cpu);
3306
06a76fe0 3307 if (dl_policy(policy) && !task_has_dl_policy(p) &&
60ffd5ed 3308 !__dl_overflow(dl_b, cap, 0, new_bw)) {
06a76fe0 3309 if (hrtimer_active(&p->dl.inactive_timer))
8c0944ce 3310 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
3311 __dl_add(dl_b, new_bw, cpus);
3312 err = 0;
3313 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
60ffd5ed 3314 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
06a76fe0
NP
3315 /*
3316 * XXX this is slightly incorrect: when the task
3317 * utilization decreases, we should delay the total
3318 * utilization change until the task's 0-lag point.
3319 * But this would require to set the task's "inactive
3320 * timer" when the task is not inactive.
3321 */
8c0944ce 3322 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
3323 __dl_add(dl_b, new_bw, cpus);
3324 dl_change_utilization(p, new_bw);
3325 err = 0;
3326 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3327 /*
3328 * Do not decrease the total deadline utilization here,
3329 * switched_from_dl() will take care to do it at the correct
3330 * (0-lag) time.
3331 */
3332 err = 0;
3333 }
3334 raw_spin_unlock(&dl_b->lock);
3335
3336 return err;
3337}
3338
3339/*
3340 * This function initializes the sched_dl_entity of a newly becoming
3341 * SCHED_DEADLINE task.
3342 *
3343 * Only the static values are considered here, the actual runtime and the
3344 * absolute deadline will be properly calculated when the task is enqueued
3345 * for the first time with its new policy.
3346 */
3347void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3348{
3349 struct sched_dl_entity *dl_se = &p->dl;
3350
3351 dl_se->dl_runtime = attr->sched_runtime;
3352 dl_se->dl_deadline = attr->sched_deadline;
3353 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
f9509153 3354 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
06a76fe0
NP
3355 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3356 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3357}
3358
3359void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3360{
3361 struct sched_dl_entity *dl_se = &p->dl;
3362
3363 attr->sched_priority = p->rt_priority;
3364 attr->sched_runtime = dl_se->dl_runtime;
3365 attr->sched_deadline = dl_se->dl_deadline;
3366 attr->sched_period = dl_se->dl_period;
f9509153
QP
3367 attr->sched_flags &= ~SCHED_DL_FLAGS;
3368 attr->sched_flags |= dl_se->flags;
06a76fe0
NP
3369}
3370
3371/*
3372 * This function validates the new parameters of a -deadline task.
3373 * We ask for the deadline not being zero, and greater or equal
3374 * than the runtime, as well as the period of being zero or
3375 * greater than deadline. Furthermore, we have to be sure that
3376 * user parameters are above the internal resolution of 1us (we
3377 * check sched_runtime only since it is always the smaller one) and
3378 * below 2^63 ns (we have to check both sched_deadline and
3379 * sched_period, as the latter can be zero).
3380 */
3381bool __checkparam_dl(const struct sched_attr *attr)
3382{
b4098bfc
PZ
3383 u64 period, max, min;
3384
794a56eb
JL
3385 /* special dl tasks don't actually use any parameter */
3386 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3387 return true;
3388
06a76fe0
NP
3389 /* deadline != 0 */
3390 if (attr->sched_deadline == 0)
3391 return false;
3392
3393 /*
3394 * Since we truncate DL_SCALE bits, make sure we're at least
3395 * that big.
3396 */
3397 if (attr->sched_runtime < (1ULL << DL_SCALE))
3398 return false;
3399
3400 /*
3401 * Since we use the MSB for wrap-around and sign issues, make
3402 * sure it's not set (mind that period can be equal to zero).
3403 */
3404 if (attr->sched_deadline & (1ULL << 63) ||
3405 attr->sched_period & (1ULL << 63))
3406 return false;
3407
b4098bfc
PZ
3408 period = attr->sched_period;
3409 if (!period)
3410 period = attr->sched_deadline;
3411
06a76fe0 3412 /* runtime <= deadline <= period (if period != 0) */
b4098bfc 3413 if (period < attr->sched_deadline ||
06a76fe0
NP
3414 attr->sched_deadline < attr->sched_runtime)
3415 return false;
3416
b4098bfc
PZ
3417 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3418 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3419
3420 if (period < min || period > max)
3421 return false;
3422
06a76fe0
NP
3423 return true;
3424}
3425
3426/*
3427 * This function clears the sched_dl_entity static params.
3428 */
9e07d45c 3429static void __dl_clear_params(struct sched_dl_entity *dl_se)
06a76fe0 3430{
97fb7a0a
IM
3431 dl_se->dl_runtime = 0;
3432 dl_se->dl_deadline = 0;
3433 dl_se->dl_period = 0;
3434 dl_se->flags = 0;
3435 dl_se->dl_bw = 0;
3436 dl_se->dl_density = 0;
06a76fe0 3437
97fb7a0a
IM
3438 dl_se->dl_throttled = 0;
3439 dl_se->dl_yielded = 0;
3440 dl_se->dl_non_contending = 0;
3441 dl_se->dl_overrun = 0;
63ba8422 3442 dl_se->dl_server = 0;
2279f540
JL
3443
3444#ifdef CONFIG_RT_MUTEXES
3445 dl_se->pi_se = dl_se;
3446#endif
06a76fe0
NP
3447}
3448
9e07d45c
PZ
3449void init_dl_entity(struct sched_dl_entity *dl_se)
3450{
3451 RB_CLEAR_NODE(&dl_se->rb_node);
3452 init_dl_task_timer(dl_se);
3453 init_dl_inactive_task_timer(dl_se);
3454 __dl_clear_params(dl_se);
3455}
3456
06a76fe0
NP
3457bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3458{
3459 struct sched_dl_entity *dl_se = &p->dl;
3460
3461 if (dl_se->dl_runtime != attr->sched_runtime ||
3462 dl_se->dl_deadline != attr->sched_deadline ||
3463 dl_se->dl_period != attr->sched_period ||
f9509153 3464 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
06a76fe0
NP
3465 return true;
3466
3467 return false;
3468}
3469
3470#ifdef CONFIG_SMP
06a76fe0
NP
3471int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3472 const struct cpumask *trial)
3473{
6092478b 3474 unsigned long flags, cap;
06a76fe0 3475 struct dl_bw *cur_dl_b;
6092478b 3476 int ret = 1;
06a76fe0
NP
3477
3478 rcu_read_lock_sched();
3479 cur_dl_b = dl_bw_of(cpumask_any(cur));
6092478b 3480 cap = __dl_bw_capacity(trial);
06a76fe0 3481 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
6092478b 3482 if (__dl_overflow(cur_dl_b, cap, 0, 0))
06a76fe0
NP
3483 ret = 0;
3484 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3485 rcu_read_unlock_sched();
97fb7a0a 3486
06a76fe0
NP
3487 return ret;
3488}
3489
85989106
DE
3490enum dl_bw_request {
3491 dl_bw_req_check_overflow = 0,
3492 dl_bw_req_alloc,
3493 dl_bw_req_free
3494};
3495
3496static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
06a76fe0 3497{
85989106 3498 unsigned long flags;
06a76fe0 3499 struct dl_bw *dl_b;
85989106 3500 bool overflow = 0;
06a76fe0
NP
3501
3502 rcu_read_lock_sched();
3503 dl_b = dl_bw_of(cpu);
3504 raw_spin_lock_irqsave(&dl_b->lock, flags);
772b6539 3505
85989106
DE
3506 if (req == dl_bw_req_free) {
3507 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3508 } else {
3509 unsigned long cap = dl_bw_capacity(cpu);
3510
3511 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3512
3513 if (req == dl_bw_req_alloc && !overflow) {
3514 /*
3515 * We reserve space in the destination
3516 * root_domain, as we can't fail after this point.
3517 * We will free resources in the source root_domain
3518 * later on (see set_cpus_allowed_dl()).
3519 */
3520 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3521 }
772b6539
DE
3522 }
3523
06a76fe0
NP
3524 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3525 rcu_read_unlock_sched();
97fb7a0a 3526
772b6539 3527 return overflow ? -EBUSY : 0;
06a76fe0 3528}
85989106
DE
3529
3530int dl_bw_check_overflow(int cpu)
3531{
3532 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3533}
3534
3535int dl_bw_alloc(int cpu, u64 dl_bw)
3536{
3537 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3538}
3539
3540void dl_bw_free(int cpu, u64 dl_bw)
3541{
3542 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3543}
06a76fe0
NP
3544#endif
3545
acb32132 3546#ifdef CONFIG_SCHED_DEBUG
acb32132
WL
3547void print_dl_stats(struct seq_file *m, int cpu)
3548{
3549 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3550}
3551#endif /* CONFIG_SCHED_DEBUG */
This page took 1.056163 seconds and 4 git commands to generate.