]> Git Repo - linux.git/blame - kernel/sched/rt.c
sched: Move sched.h sysctl bits into separate header
[linux.git] / kernel / sched / rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
029632fb
PZ
6#include "sched.h"
7
8#include <linux/slab.h>
9
10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11
12struct rt_bandwidth def_rt_bandwidth;
13
14static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15{
16 struct rt_bandwidth *rt_b =
17 container_of(timer, struct rt_bandwidth, rt_period_timer);
18 ktime_t now;
19 int overrun;
20 int idle = 0;
21
22 for (;;) {
23 now = hrtimer_cb_get_time(timer);
24 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25
26 if (!overrun)
27 break;
28
29 idle = do_sched_rt_period_timer(rt_b, overrun);
30 }
31
32 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33}
34
35void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36{
37 rt_b->rt_period = ns_to_ktime(period);
38 rt_b->rt_runtime = runtime;
39
40 raw_spin_lock_init(&rt_b->rt_runtime_lock);
41
42 hrtimer_init(&rt_b->rt_period_timer,
43 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 rt_b->rt_period_timer.function = sched_rt_period_timer;
45}
46
47static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48{
49 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50 return;
51
52 if (hrtimer_active(&rt_b->rt_period_timer))
53 return;
54
55 raw_spin_lock(&rt_b->rt_runtime_lock);
56 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 raw_spin_unlock(&rt_b->rt_runtime_lock);
58}
59
60void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61{
62 struct rt_prio_array *array;
63 int i;
64
65 array = &rt_rq->active;
66 for (i = 0; i < MAX_RT_PRIO; i++) {
67 INIT_LIST_HEAD(array->queue + i);
68 __clear_bit(i, array->bitmap);
69 }
70 /* delimiter for bitsearch: */
71 __set_bit(MAX_RT_PRIO, array->bitmap);
72
73#if defined CONFIG_SMP
74 rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 rt_rq->highest_prio.next = MAX_RT_PRIO;
76 rt_rq->rt_nr_migratory = 0;
77 rt_rq->overloaded = 0;
78 plist_head_init(&rt_rq->pushable_tasks);
79#endif
80
81 rt_rq->rt_time = 0;
82 rt_rq->rt_throttled = 0;
83 rt_rq->rt_runtime = 0;
84 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85}
86
8f48894f 87#ifdef CONFIG_RT_GROUP_SCHED
029632fb
PZ
88static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89{
90 hrtimer_cancel(&rt_b->rt_period_timer);
91}
8f48894f
PZ
92
93#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94
398a153b
GH
95static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96{
8f48894f
PZ
97#ifdef CONFIG_SCHED_DEBUG
98 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99#endif
398a153b
GH
100 return container_of(rt_se, struct task_struct, rt);
101}
102
398a153b
GH
103static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104{
105 return rt_rq->rq;
106}
107
108static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109{
110 return rt_se->rt_rq;
111}
112
029632fb
PZ
113void free_rt_sched_group(struct task_group *tg)
114{
115 int i;
116
117 if (tg->rt_se)
118 destroy_rt_bandwidth(&tg->rt_bandwidth);
119
120 for_each_possible_cpu(i) {
121 if (tg->rt_rq)
122 kfree(tg->rt_rq[i]);
123 if (tg->rt_se)
124 kfree(tg->rt_se[i]);
125 }
126
127 kfree(tg->rt_rq);
128 kfree(tg->rt_se);
129}
130
131void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 struct sched_rt_entity *rt_se, int cpu,
133 struct sched_rt_entity *parent)
134{
135 struct rq *rq = cpu_rq(cpu);
136
137 rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 rt_rq->rt_nr_boosted = 0;
139 rt_rq->rq = rq;
140 rt_rq->tg = tg;
141
142 tg->rt_rq[cpu] = rt_rq;
143 tg->rt_se[cpu] = rt_se;
144
145 if (!rt_se)
146 return;
147
148 if (!parent)
149 rt_se->rt_rq = &rq->rt;
150 else
151 rt_se->rt_rq = parent->my_q;
152
153 rt_se->my_q = rt_rq;
154 rt_se->parent = parent;
155 INIT_LIST_HEAD(&rt_se->run_list);
156}
157
158int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159{
160 struct rt_rq *rt_rq;
161 struct sched_rt_entity *rt_se;
162 int i;
163
164 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165 if (!tg->rt_rq)
166 goto err;
167 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168 if (!tg->rt_se)
169 goto err;
170
171 init_rt_bandwidth(&tg->rt_bandwidth,
172 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173
174 for_each_possible_cpu(i) {
175 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 GFP_KERNEL, cpu_to_node(i));
177 if (!rt_rq)
178 goto err;
179
180 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 GFP_KERNEL, cpu_to_node(i));
182 if (!rt_se)
183 goto err_free_rq;
184
185 init_rt_rq(rt_rq, cpu_rq(i));
186 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188 }
189
190 return 1;
191
192err_free_rq:
193 kfree(rt_rq);
194err:
195 return 0;
196}
197
398a153b
GH
198#else /* CONFIG_RT_GROUP_SCHED */
199
a1ba4d8b
PZ
200#define rt_entity_is_task(rt_se) (1)
201
8f48894f
PZ
202static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203{
204 return container_of(rt_se, struct task_struct, rt);
205}
206
398a153b
GH
207static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208{
209 return container_of(rt_rq, struct rq, rt);
210}
211
212static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213{
214 struct task_struct *p = rt_task_of(rt_se);
215 struct rq *rq = task_rq(p);
216
217 return &rq->rt;
218}
219
029632fb
PZ
220void free_rt_sched_group(struct task_group *tg) { }
221
222int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223{
224 return 1;
225}
398a153b
GH
226#endif /* CONFIG_RT_GROUP_SCHED */
227
4fd29176 228#ifdef CONFIG_SMP
84de4274 229
637f5085 230static inline int rt_overloaded(struct rq *rq)
4fd29176 231{
637f5085 232 return atomic_read(&rq->rd->rto_count);
4fd29176 233}
84de4274 234
4fd29176
SR
235static inline void rt_set_overload(struct rq *rq)
236{
1f11eb6a
GH
237 if (!rq->online)
238 return;
239
c6c4927b 240 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
241 /*
242 * Make sure the mask is visible before we set
243 * the overload count. That is checked to determine
244 * if we should look at the mask. It would be a shame
245 * if we looked at the mask, but the mask was not
246 * updated yet.
247 */
248 wmb();
637f5085 249 atomic_inc(&rq->rd->rto_count);
4fd29176 250}
84de4274 251
4fd29176
SR
252static inline void rt_clear_overload(struct rq *rq)
253{
1f11eb6a
GH
254 if (!rq->online)
255 return;
256
4fd29176 257 /* the order here really doesn't matter */
637f5085 258 atomic_dec(&rq->rd->rto_count);
c6c4927b 259 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 260}
73fe6aae 261
398a153b 262static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 263{
a1ba4d8b 264 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
265 if (!rt_rq->overloaded) {
266 rt_set_overload(rq_of_rt_rq(rt_rq));
267 rt_rq->overloaded = 1;
cdc8eb98 268 }
398a153b
GH
269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
637f5085 272 }
73fe6aae 273}
4fd29176 274
398a153b
GH
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{
29baa747
PZ
277 struct task_struct *p;
278
a1ba4d8b
PZ
279 if (!rt_entity_is_task(rt_se))
280 return;
281
29baa747 282 p = rt_task_of(rt_se);
a1ba4d8b
PZ
283 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
284
285 rt_rq->rt_nr_total++;
29baa747 286 if (p->nr_cpus_allowed > 1)
398a153b
GH
287 rt_rq->rt_nr_migratory++;
288
289 update_rt_migration(rt_rq);
290}
291
292static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
293{
29baa747
PZ
294 struct task_struct *p;
295
a1ba4d8b
PZ
296 if (!rt_entity_is_task(rt_se))
297 return;
298
29baa747 299 p = rt_task_of(rt_se);
a1ba4d8b
PZ
300 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
301
302 rt_rq->rt_nr_total--;
29baa747 303 if (p->nr_cpus_allowed > 1)
398a153b
GH
304 rt_rq->rt_nr_migratory--;
305
306 update_rt_migration(rt_rq);
307}
308
5181f4a4
SR
309static inline int has_pushable_tasks(struct rq *rq)
310{
311 return !plist_head_empty(&rq->rt.pushable_tasks);
312}
313
917b627d
GH
314static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
315{
316 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
317 plist_node_init(&p->pushable_tasks, p->prio);
318 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a4
SR
319
320 /* Update the highest prio pushable task */
321 if (p->prio < rq->rt.highest_prio.next)
322 rq->rt.highest_prio.next = p->prio;
917b627d
GH
323}
324
325static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
326{
327 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d 328
5181f4a4
SR
329 /* Update the new highest prio pushable task */
330 if (has_pushable_tasks(rq)) {
331 p = plist_first_entry(&rq->rt.pushable_tasks,
332 struct task_struct, pushable_tasks);
333 rq->rt.highest_prio.next = p->prio;
334 } else
335 rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3
IM
336}
337
917b627d
GH
338#else
339
ceacc2c1 340static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 341{
6f505b16
PZ
342}
343
ceacc2c1
PZ
344static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
345{
346}
347
b07430ac 348static inline
ceacc2c1
PZ
349void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
350{
351}
352
398a153b 353static inline
ceacc2c1
PZ
354void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
355{
356}
917b627d 357
4fd29176
SR
358#endif /* CONFIG_SMP */
359
6f505b16
PZ
360static inline int on_rt_rq(struct sched_rt_entity *rt_se)
361{
362 return !list_empty(&rt_se->run_list);
363}
364
052f1dc7 365#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 366
9f0c1e56 367static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
368{
369 if (!rt_rq->tg)
9f0c1e56 370 return RUNTIME_INF;
6f505b16 371
ac086bc2
PZ
372 return rt_rq->rt_runtime;
373}
374
375static inline u64 sched_rt_period(struct rt_rq *rt_rq)
376{
377 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
378}
379
ec514c48
CX
380typedef struct task_group *rt_rq_iter_t;
381
1c09ab0d
YZ
382static inline struct task_group *next_task_group(struct task_group *tg)
383{
384 do {
385 tg = list_entry_rcu(tg->list.next,
386 typeof(struct task_group), list);
387 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
388
389 if (&tg->list == &task_groups)
390 tg = NULL;
391
392 return tg;
393}
394
395#define for_each_rt_rq(rt_rq, iter, rq) \
396 for (iter = container_of(&task_groups, typeof(*iter), list); \
397 (iter = next_task_group(iter)) && \
398 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c48 399
3d4b47b4
PZ
400static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
401{
402 list_add_rcu(&rt_rq->leaf_rt_rq_list,
403 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
404}
405
406static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
407{
408 list_del_rcu(&rt_rq->leaf_rt_rq_list);
409}
410
6f505b16 411#define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4 412 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b16 413
6f505b16
PZ
414#define for_each_sched_rt_entity(rt_se) \
415 for (; rt_se; rt_se = rt_se->parent)
416
417static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
418{
419 return rt_se->my_q;
420}
421
37dad3fc 422static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b16
PZ
423static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
424
9f0c1e56 425static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 426{
f6121f4f 427 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb58
YZ
428 struct sched_rt_entity *rt_se;
429
0c3b9168
BS
430 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
431
432 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 433
f6121f4f
DF
434 if (rt_rq->rt_nr_running) {
435 if (rt_se && !on_rt_rq(rt_se))
37dad3fc 436 enqueue_rt_entity(rt_se, false);
e864c499 437 if (rt_rq->highest_prio.curr < curr->prio)
1020387f 438 resched_task(curr);
6f505b16
PZ
439 }
440}
441
9f0c1e56 442static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 443{
74b7eb58 444 struct sched_rt_entity *rt_se;
0c3b9168 445 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 446
0c3b9168 447 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16
PZ
448
449 if (rt_se && on_rt_rq(rt_se))
450 dequeue_rt_entity(rt_se);
451}
452
23b0fdfc
PZ
453static inline int rt_rq_throttled(struct rt_rq *rt_rq)
454{
455 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
456}
457
458static int rt_se_boosted(struct sched_rt_entity *rt_se)
459{
460 struct rt_rq *rt_rq = group_rt_rq(rt_se);
461 struct task_struct *p;
462
463 if (rt_rq)
464 return !!rt_rq->rt_nr_boosted;
465
466 p = rt_task_of(rt_se);
467 return p->prio != p->normal_prio;
468}
469
d0b27fa7 470#ifdef CONFIG_SMP
c6c4927b 471static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7
PZ
472{
473 return cpu_rq(smp_processor_id())->rd->span;
474}
6f505b16 475#else
c6c4927b 476static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 477{
c6c4927b 478 return cpu_online_mask;
d0b27fa7
PZ
479}
480#endif
6f505b16 481
d0b27fa7
PZ
482static inline
483struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 484{
d0b27fa7
PZ
485 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
486}
9f0c1e56 487
ac086bc2
PZ
488static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
489{
490 return &rt_rq->tg->rt_bandwidth;
491}
492
55e12e5e 493#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
494
495static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
496{
ac086bc2
PZ
497 return rt_rq->rt_runtime;
498}
499
500static inline u64 sched_rt_period(struct rt_rq *rt_rq)
501{
502 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
503}
504
ec514c48
CX
505typedef struct rt_rq *rt_rq_iter_t;
506
507#define for_each_rt_rq(rt_rq, iter, rq) \
508 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
509
3d4b47b4
PZ
510static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
511{
512}
513
514static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
515{
516}
517
6f505b16
PZ
518#define for_each_leaf_rt_rq(rt_rq, rq) \
519 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
520
6f505b16
PZ
521#define for_each_sched_rt_entity(rt_se) \
522 for (; rt_se; rt_se = NULL)
523
524static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
525{
526 return NULL;
527}
528
9f0c1e56 529static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 530{
f3ade837
JB
531 if (rt_rq->rt_nr_running)
532 resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b16
PZ
533}
534
9f0c1e56 535static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
536{
537}
538
23b0fdfc
PZ
539static inline int rt_rq_throttled(struct rt_rq *rt_rq)
540{
541 return rt_rq->rt_throttled;
542}
d0b27fa7 543
c6c4927b 544static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 545{
c6c4927b 546 return cpu_online_mask;
d0b27fa7
PZ
547}
548
549static inline
550struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
551{
552 return &cpu_rq(cpu)->rt;
553}
554
ac086bc2
PZ
555static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
556{
557 return &def_rt_bandwidth;
558}
559
55e12e5e 560#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 561
ac086bc2 562#ifdef CONFIG_SMP
78333cdd
PZ
563/*
564 * We ran out of runtime, see if we can borrow some from our neighbours.
565 */
b79f3833 566static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570 int i, weight, more = 0;
571 u64 rt_period;
572
c6c4927b 573 weight = cpumask_weight(rd->span);
ac086bc2 574
0986b11b 575 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 576 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 577 for_each_cpu(i, rd->span) {
ac086bc2
PZ
578 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
579 s64 diff;
580
581 if (iter == rt_rq)
582 continue;
583
0986b11b 584 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
585 /*
586 * Either all rqs have inf runtime and there's nothing to steal
587 * or __disable_runtime() below sets a specific rq to inf to
588 * indicate its been disabled and disalow stealing.
589 */
7def2be1
PZ
590 if (iter->rt_runtime == RUNTIME_INF)
591 goto next;
592
78333cdd
PZ
593 /*
594 * From runqueues with spare time, take 1/n part of their
595 * spare time, but no more than our period.
596 */
ac086bc2
PZ
597 diff = iter->rt_runtime - iter->rt_time;
598 if (diff > 0) {
58838cf3 599 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
600 if (rt_rq->rt_runtime + diff > rt_period)
601 diff = rt_period - rt_rq->rt_runtime;
602 iter->rt_runtime -= diff;
603 rt_rq->rt_runtime += diff;
604 more = 1;
605 if (rt_rq->rt_runtime == rt_period) {
0986b11b 606 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
607 break;
608 }
609 }
7def2be1 610next:
0986b11b 611 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 612 }
0986b11b 613 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2
PZ
614
615 return more;
616}
7def2be1 617
78333cdd
PZ
618/*
619 * Ensure this RQ takes back all the runtime it lend to its neighbours.
620 */
7def2be1
PZ
621static void __disable_runtime(struct rq *rq)
622{
623 struct root_domain *rd = rq->rd;
ec514c48 624 rt_rq_iter_t iter;
7def2be1
PZ
625 struct rt_rq *rt_rq;
626
627 if (unlikely(!scheduler_running))
628 return;
629
ec514c48 630 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
631 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
632 s64 want;
633 int i;
634
0986b11b
TG
635 raw_spin_lock(&rt_b->rt_runtime_lock);
636 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
637 /*
638 * Either we're all inf and nobody needs to borrow, or we're
639 * already disabled and thus have nothing to do, or we have
640 * exactly the right amount of runtime to take out.
641 */
7def2be1
PZ
642 if (rt_rq->rt_runtime == RUNTIME_INF ||
643 rt_rq->rt_runtime == rt_b->rt_runtime)
644 goto balanced;
0986b11b 645 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 646
78333cdd
PZ
647 /*
648 * Calculate the difference between what we started out with
649 * and what we current have, that's the amount of runtime
650 * we lend and now have to reclaim.
651 */
7def2be1
PZ
652 want = rt_b->rt_runtime - rt_rq->rt_runtime;
653
78333cdd
PZ
654 /*
655 * Greedy reclaim, take back as much as we can.
656 */
c6c4927b 657 for_each_cpu(i, rd->span) {
7def2be1
PZ
658 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
659 s64 diff;
660
78333cdd
PZ
661 /*
662 * Can't reclaim from ourselves or disabled runqueues.
663 */
f1679d08 664 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
665 continue;
666
0986b11b 667 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
668 if (want > 0) {
669 diff = min_t(s64, iter->rt_runtime, want);
670 iter->rt_runtime -= diff;
671 want -= diff;
672 } else {
673 iter->rt_runtime -= want;
674 want -= want;
675 }
0986b11b 676 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
677
678 if (!want)
679 break;
680 }
681
0986b11b 682 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
683 /*
684 * We cannot be left wanting - that would mean some runtime
685 * leaked out of the system.
686 */
7def2be1
PZ
687 BUG_ON(want);
688balanced:
78333cdd
PZ
689 /*
690 * Disable all the borrow logic by pretending we have inf
691 * runtime - in which case borrowing doesn't make sense.
692 */
7def2be1 693 rt_rq->rt_runtime = RUNTIME_INF;
a4c96ae3 694 rt_rq->rt_throttled = 0;
0986b11b
TG
695 raw_spin_unlock(&rt_rq->rt_runtime_lock);
696 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
697 }
698}
699
700static void disable_runtime(struct rq *rq)
701{
702 unsigned long flags;
703
05fa785c 704 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 705 __disable_runtime(rq);
05fa785c 706 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
707}
708
709static void __enable_runtime(struct rq *rq)
710{
ec514c48 711 rt_rq_iter_t iter;
7def2be1
PZ
712 struct rt_rq *rt_rq;
713
714 if (unlikely(!scheduler_running))
715 return;
716
78333cdd
PZ
717 /*
718 * Reset each runqueue's bandwidth settings
719 */
ec514c48 720 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
721 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
722
0986b11b
TG
723 raw_spin_lock(&rt_b->rt_runtime_lock);
724 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
725 rt_rq->rt_runtime = rt_b->rt_runtime;
726 rt_rq->rt_time = 0;
baf25731 727 rt_rq->rt_throttled = 0;
0986b11b
TG
728 raw_spin_unlock(&rt_rq->rt_runtime_lock);
729 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
730 }
731}
732
733static void enable_runtime(struct rq *rq)
734{
735 unsigned long flags;
736
05fa785c 737 raw_spin_lock_irqsave(&rq->lock, flags);
7def2be1 738 __enable_runtime(rq);
05fa785c 739 raw_spin_unlock_irqrestore(&rq->lock, flags);
7def2be1
PZ
740}
741
029632fb
PZ
742int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
743{
744 int cpu = (int)(long)hcpu;
745
746 switch (action) {
747 case CPU_DOWN_PREPARE:
748 case CPU_DOWN_PREPARE_FROZEN:
749 disable_runtime(cpu_rq(cpu));
750 return NOTIFY_OK;
751
752 case CPU_DOWN_FAILED:
753 case CPU_DOWN_FAILED_FROZEN:
754 case CPU_ONLINE:
755 case CPU_ONLINE_FROZEN:
756 enable_runtime(cpu_rq(cpu));
757 return NOTIFY_OK;
758
759 default:
760 return NOTIFY_DONE;
761 }
762}
763
eff6549b
PZ
764static int balance_runtime(struct rt_rq *rt_rq)
765{
766 int more = 0;
767
4a6184ce
PZ
768 if (!sched_feat(RT_RUNTIME_SHARE))
769 return more;
770
eff6549b 771 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 772 raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b 773 more = do_balance_runtime(rt_rq);
0986b11b 774 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
775 }
776
777 return more;
778}
55e12e5e 779#else /* !CONFIG_SMP */
eff6549b
PZ
780static inline int balance_runtime(struct rt_rq *rt_rq)
781{
782 return 0;
783}
55e12e5e 784#endif /* CONFIG_SMP */
ac086bc2 785
eff6549b
PZ
786static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
787{
42c62a58 788 int i, idle = 1, throttled = 0;
c6c4927b 789 const struct cpumask *span;
eff6549b 790
eff6549b 791 span = sched_rt_period_mask();
e221d028
MG
792#ifdef CONFIG_RT_GROUP_SCHED
793 /*
794 * FIXME: isolated CPUs should really leave the root task group,
795 * whether they are isolcpus or were isolated via cpusets, lest
796 * the timer run on a CPU which does not service all runqueues,
797 * potentially leaving other CPUs indefinitely throttled. If
798 * isolation is really required, the user will turn the throttle
799 * off to kill the perturbations it causes anyway. Meanwhile,
800 * this maintains functionality for boot and/or troubleshooting.
801 */
802 if (rt_b == &root_task_group.rt_bandwidth)
803 span = cpu_online_mask;
804#endif
c6c4927b 805 for_each_cpu(i, span) {
eff6549b
PZ
806 int enqueue = 0;
807 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
808 struct rq *rq = rq_of_rt_rq(rt_rq);
809
05fa785c 810 raw_spin_lock(&rq->lock);
eff6549b
PZ
811 if (rt_rq->rt_time) {
812 u64 runtime;
813
0986b11b 814 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
815 if (rt_rq->rt_throttled)
816 balance_runtime(rt_rq);
817 runtime = rt_rq->rt_runtime;
818 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
819 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
820 rt_rq->rt_throttled = 0;
821 enqueue = 1;
61eadef6
MG
822
823 /*
824 * Force a clock update if the CPU was idle,
825 * lest wakeup -> unthrottle time accumulate.
826 */
827 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
828 rq->skip_clock_update = -1;
eff6549b
PZ
829 }
830 if (rt_rq->rt_time || rt_rq->rt_nr_running)
831 idle = 0;
0986b11b 832 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 833 } else if (rt_rq->rt_nr_running) {
6c3df255 834 idle = 0;
0c3b9168
BS
835 if (!rt_rq_throttled(rt_rq))
836 enqueue = 1;
837 }
42c62a58
PZ
838 if (rt_rq->rt_throttled)
839 throttled = 1;
eff6549b
PZ
840
841 if (enqueue)
842 sched_rt_rq_enqueue(rt_rq);
05fa785c 843 raw_spin_unlock(&rq->lock);
eff6549b
PZ
844 }
845
42c62a58
PZ
846 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
847 return 1;
848
eff6549b
PZ
849 return idle;
850}
ac086bc2 851
6f505b16
PZ
852static inline int rt_se_prio(struct sched_rt_entity *rt_se)
853{
052f1dc7 854#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
855 struct rt_rq *rt_rq = group_rt_rq(rt_se);
856
857 if (rt_rq)
e864c499 858 return rt_rq->highest_prio.curr;
6f505b16
PZ
859#endif
860
861 return rt_task_of(rt_se)->prio;
862}
863
9f0c1e56 864static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 865{
9f0c1e56 866 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 867
fa85ae24 868 if (rt_rq->rt_throttled)
23b0fdfc 869 return rt_rq_throttled(rt_rq);
fa85ae24 870
5b680fd6 871 if (runtime >= sched_rt_period(rt_rq))
ac086bc2
PZ
872 return 0;
873
b79f3833
PZ
874 balance_runtime(rt_rq);
875 runtime = sched_rt_runtime(rt_rq);
876 if (runtime == RUNTIME_INF)
877 return 0;
ac086bc2 878
9f0c1e56 879 if (rt_rq->rt_time > runtime) {
7abc63b1
PZ
880 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
881
882 /*
883 * Don't actually throttle groups that have no runtime assigned
884 * but accrue some time due to boosting.
885 */
886 if (likely(rt_b->rt_runtime)) {
3ccf3e83
PZ
887 static bool once = false;
888
7abc63b1 889 rt_rq->rt_throttled = 1;
3ccf3e83
PZ
890
891 if (!once) {
892 once = true;
893 printk_sched("sched: RT throttling activated\n");
894 }
7abc63b1
PZ
895 } else {
896 /*
897 * In case we did anyway, make it go away,
898 * replenishment is a joke, since it will replenish us
899 * with exactly 0 ns.
900 */
901 rt_rq->rt_time = 0;
902 }
903
23b0fdfc 904 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 905 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
906 return 1;
907 }
fa85ae24
PZ
908 }
909
910 return 0;
911}
912
bb44e5d1
IM
913/*
914 * Update the current task's runtime statistics. Skip current tasks that
915 * are not in our scheduling class.
916 */
a9957449 917static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
918{
919 struct task_struct *curr = rq->curr;
6f505b16
PZ
920 struct sched_rt_entity *rt_se = &curr->rt;
921 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
922 u64 delta_exec;
923
06c3bc65 924 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
925 return;
926
305e6835 927 delta_exec = rq->clock_task - curr->se.exec_start;
fc79e240
KT
928 if (unlikely((s64)delta_exec <= 0))
929 return;
6cfb0d5d 930
42c62a58
PZ
931 schedstat_set(curr->se.statistics.exec_max,
932 max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
933
934 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
935 account_group_exec_runtime(curr, delta_exec);
936
305e6835 937 curr->se.exec_start = rq->clock_task;
d842de87 938 cpuacct_charge(curr, delta_exec);
fa85ae24 939
e9e9250b
PZ
940 sched_rt_avg_update(rq, delta_exec);
941
0b148fa0
PZ
942 if (!rt_bandwidth_enabled())
943 return;
944
354d60c2
DG
945 for_each_sched_rt_entity(rt_se) {
946 rt_rq = rt_rq_of_se(rt_se);
947
cc2991cf 948 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 949 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
950 rt_rq->rt_time += delta_exec;
951 if (sched_rt_runtime_exceeded(rt_rq))
952 resched_task(curr);
0986b11b 953 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 954 }
354d60c2 955 }
bb44e5d1
IM
956}
957
398a153b 958#if defined CONFIG_SMP
e864c499 959
398a153b
GH
960static void
961inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 962{
4d984277 963 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 964
5181f4a4
SR
965 if (rq->online && prio < prev_prio)
966 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b 967}
73fe6aae 968
398a153b
GH
969static void
970dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
971{
972 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 973
398a153b
GH
974 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
975 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
976}
977
398a153b
GH
978#else /* CONFIG_SMP */
979
6f505b16 980static inline
398a153b
GH
981void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
982static inline
983void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
984
985#endif /* CONFIG_SMP */
6e0534f2 986
052f1dc7 987#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
988static void
989inc_rt_prio(struct rt_rq *rt_rq, int prio)
990{
991 int prev_prio = rt_rq->highest_prio.curr;
992
993 if (prio < prev_prio)
994 rt_rq->highest_prio.curr = prio;
995
996 inc_rt_prio_smp(rt_rq, prio, prev_prio);
997}
998
999static void
1000dec_rt_prio(struct rt_rq *rt_rq, int prio)
1001{
1002 int prev_prio = rt_rq->highest_prio.curr;
1003
6f505b16 1004 if (rt_rq->rt_nr_running) {
764a9d6f 1005
398a153b 1006 WARN_ON(prio < prev_prio);
764a9d6f 1007
e864c499 1008 /*
398a153b
GH
1009 * This may have been our highest task, and therefore
1010 * we may have some recomputation to do
e864c499 1011 */
398a153b 1012 if (prio == prev_prio) {
e864c499
GH
1013 struct rt_prio_array *array = &rt_rq->active;
1014
1015 rt_rq->highest_prio.curr =
764a9d6f 1016 sched_find_first_bit(array->bitmap);
e864c499
GH
1017 }
1018
764a9d6f 1019 } else
e864c499 1020 rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae 1021
398a153b
GH
1022 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1023}
1f11eb6a 1024
398a153b
GH
1025#else
1026
1027static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1028static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1029
1030#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 1031
052f1dc7 1032#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
1033
1034static void
1035inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1036{
1037 if (rt_se_boosted(rt_se))
1038 rt_rq->rt_nr_boosted++;
1039
1040 if (rt_rq->tg)
1041 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1042}
1043
1044static void
1045dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1046{
23b0fdfc
PZ
1047 if (rt_se_boosted(rt_se))
1048 rt_rq->rt_nr_boosted--;
1049
1050 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
1051}
1052
1053#else /* CONFIG_RT_GROUP_SCHED */
1054
1055static void
1056inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1057{
1058 start_rt_bandwidth(&def_rt_bandwidth);
1059}
1060
1061static inline
1062void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1063
1064#endif /* CONFIG_RT_GROUP_SCHED */
1065
1066static inline
1067void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1068{
1069 int prio = rt_se_prio(rt_se);
1070
1071 WARN_ON(!rt_prio(prio));
1072 rt_rq->rt_nr_running++;
1073
1074 inc_rt_prio(rt_rq, prio);
1075 inc_rt_migration(rt_se, rt_rq);
1076 inc_rt_group(rt_se, rt_rq);
1077}
1078
1079static inline
1080void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1081{
1082 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1083 WARN_ON(!rt_rq->rt_nr_running);
1084 rt_rq->rt_nr_running--;
1085
1086 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1087 dec_rt_migration(rt_se, rt_rq);
1088 dec_rt_group(rt_se, rt_rq);
63489e45
SR
1089}
1090
37dad3fc 1091static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1 1092{
6f505b16
PZ
1093 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1094 struct rt_prio_array *array = &rt_rq->active;
1095 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 1096 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 1097
ad2a3f13
PZ
1098 /*
1099 * Don't enqueue the group if its throttled, or when empty.
1100 * The latter is a consequence of the former when a child group
1101 * get throttled and the current group doesn't have any other
1102 * active members.
1103 */
1104 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b16 1105 return;
63489e45 1106
3d4b47b4
PZ
1107 if (!rt_rq->rt_nr_running)
1108 list_add_leaf_rt_rq(rt_rq);
1109
37dad3fc
TG
1110 if (head)
1111 list_add(&rt_se->run_list, queue);
1112 else
1113 list_add_tail(&rt_se->run_list, queue);
6f505b16 1114 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 1115
6f505b16
PZ
1116 inc_rt_tasks(rt_se, rt_rq);
1117}
1118
ad2a3f13 1119static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b16
PZ
1120{
1121 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1122 struct rt_prio_array *array = &rt_rq->active;
1123
1124 list_del_init(&rt_se->run_list);
1125 if (list_empty(array->queue + rt_se_prio(rt_se)))
1126 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1127
1128 dec_rt_tasks(rt_se, rt_rq);
3d4b47b4
PZ
1129 if (!rt_rq->rt_nr_running)
1130 list_del_leaf_rt_rq(rt_rq);
6f505b16
PZ
1131}
1132
1133/*
1134 * Because the prio of an upper entry depends on the lower
1135 * entries, we must remove entries top - down.
6f505b16 1136 */
ad2a3f13 1137static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b16 1138{
ad2a3f13 1139 struct sched_rt_entity *back = NULL;
6f505b16 1140
58d6c2d7
PZ
1141 for_each_sched_rt_entity(rt_se) {
1142 rt_se->back = back;
1143 back = rt_se;
1144 }
1145
1146 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1147 if (on_rt_rq(rt_se))
ad2a3f13
PZ
1148 __dequeue_rt_entity(rt_se);
1149 }
1150}
1151
37dad3fc 1152static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13
PZ
1153{
1154 dequeue_rt_stack(rt_se);
1155 for_each_sched_rt_entity(rt_se)
37dad3fc 1156 __enqueue_rt_entity(rt_se, head);
ad2a3f13
PZ
1157}
1158
1159static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1160{
1161 dequeue_rt_stack(rt_se);
1162
1163 for_each_sched_rt_entity(rt_se) {
1164 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1165
1166 if (rt_rq && rt_rq->rt_nr_running)
37dad3fc 1167 __enqueue_rt_entity(rt_se, false);
58d6c2d7 1168 }
bb44e5d1
IM
1169}
1170
1171/*
1172 * Adding/removing a task to/from a priority array:
1173 */
ea87bb78 1174static void
371fd7e7 1175enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
1176{
1177 struct sched_rt_entity *rt_se = &p->rt;
1178
371fd7e7 1179 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
1180 rt_se->timeout = 0;
1181
371fd7e7 1182 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f6 1183
29baa747 1184 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
917b627d 1185 enqueue_pushable_task(rq, p);
953bfcd1
PT
1186
1187 inc_nr_running(rq);
6f505b16
PZ
1188}
1189
371fd7e7 1190static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1191{
6f505b16 1192 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 1193
f1e14ef6 1194 update_curr_rt(rq);
ad2a3f13 1195 dequeue_rt_entity(rt_se);
c09595f6 1196
917b627d 1197 dequeue_pushable_task(rq, p);
953bfcd1
PT
1198
1199 dec_nr_running(rq);
bb44e5d1
IM
1200}
1201
1202/*
60686317
RW
1203 * Put task to the head or the end of the run list without the overhead of
1204 * dequeue followed by enqueue.
bb44e5d1 1205 */
7ebefa8c
DA
1206static void
1207requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 1208{
1cdad715 1209 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
1210 struct rt_prio_array *array = &rt_rq->active;
1211 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1212
1213 if (head)
1214 list_move(&rt_se->run_list, queue);
1215 else
1216 list_move_tail(&rt_se->run_list, queue);
1cdad715 1217 }
6f505b16
PZ
1218}
1219
7ebefa8c 1220static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 1221{
6f505b16
PZ
1222 struct sched_rt_entity *rt_se = &p->rt;
1223 struct rt_rq *rt_rq;
bb44e5d1 1224
6f505b16
PZ
1225 for_each_sched_rt_entity(rt_se) {
1226 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 1227 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 1228 }
bb44e5d1
IM
1229}
1230
6f505b16 1231static void yield_task_rt(struct rq *rq)
bb44e5d1 1232{
7ebefa8c 1233 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
1234}
1235
e7693a36 1236#ifdef CONFIG_SMP
318e0893
GH
1237static int find_lowest_rq(struct task_struct *task);
1238
0017d735 1239static int
7608dec2 1240select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a36 1241{
7608dec2
PZ
1242 struct task_struct *curr;
1243 struct rq *rq;
1244 int cpu;
1245
7608dec2 1246 cpu = task_cpu(p);
c37495fd 1247
29baa747 1248 if (p->nr_cpus_allowed == 1)
76854c7e
MG
1249 goto out;
1250
c37495fd
SR
1251 /* For anything but wake ups, just return the task_cpu */
1252 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1253 goto out;
1254
7608dec2
PZ
1255 rq = cpu_rq(cpu);
1256
1257 rcu_read_lock();
1258 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1259
318e0893 1260 /*
7608dec2 1261 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1262 * try to see if we can wake this RT task up on another
1263 * runqueue. Otherwise simply start this RT task
1264 * on its current runqueue.
1265 *
43fa5460
SR
1266 * We want to avoid overloading runqueues. If the woken
1267 * task is a higher priority, then it will stay on this CPU
1268 * and the lower prio task should be moved to another CPU.
1269 * Even though this will probably make the lower prio task
1270 * lose its cache, we do not want to bounce a higher task
1271 * around just because it gave up its CPU, perhaps for a
1272 * lock?
1273 *
1274 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1275 *
1276 * Otherwise, just let it ride on the affined RQ and the
1277 * post-schedule router will push the preempted task away
1278 *
1279 * This test is optimistic, if we get it wrong the load-balancer
1280 * will have to sort it out.
318e0893 1281 */
7608dec2 1282 if (curr && unlikely(rt_task(curr)) &&
29baa747 1283 (curr->nr_cpus_allowed < 2 ||
3be209a8 1284 curr->prio <= p->prio) &&
29baa747 1285 (p->nr_cpus_allowed > 1)) {
7608dec2 1286 int target = find_lowest_rq(p);
318e0893 1287
7608dec2
PZ
1288 if (target != -1)
1289 cpu = target;
318e0893 1290 }
7608dec2 1291 rcu_read_unlock();
318e0893 1292
c37495fd 1293out:
7608dec2 1294 return cpu;
e7693a36 1295}
7ebefa8c
DA
1296
1297static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1298{
29baa747 1299 if (rq->curr->nr_cpus_allowed == 1)
7ebefa8c
DA
1300 return;
1301
29baa747 1302 if (p->nr_cpus_allowed != 1
13b8bd0a
RR
1303 && cpupri_find(&rq->rd->cpupri, p, NULL))
1304 return;
24600ce8 1305
13b8bd0a
RR
1306 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1307 return;
7ebefa8c
DA
1308
1309 /*
1310 * There appears to be other cpus that can accept
1311 * current and none to run 'p', so lets reschedule
1312 * to try and push current away:
1313 */
1314 requeue_task_rt(rq, p, 1);
1315 resched_task(rq->curr);
1316}
1317
e7693a36
GH
1318#endif /* CONFIG_SMP */
1319
bb44e5d1
IM
1320/*
1321 * Preempt the current task with a newly woken task if needed:
1322 */
7d478721 1323static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1324{
45c01e82 1325 if (p->prio < rq->curr->prio) {
bb44e5d1 1326 resched_task(rq->curr);
45c01e82
GH
1327 return;
1328 }
1329
1330#ifdef CONFIG_SMP
1331 /*
1332 * If:
1333 *
1334 * - the newly woken task is of equal priority to the current task
1335 * - the newly woken task is non-migratable while current is migratable
1336 * - current will be preempted on the next reschedule
1337 *
1338 * we should check to see if current can readily move to a different
1339 * cpu. If so, we will reschedule to allow the push logic to try
1340 * to move current somewhere else, making room for our non-migratable
1341 * task.
1342 */
8dd0de8b 1343 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1344 check_preempt_equal_prio(rq, p);
45c01e82 1345#endif
bb44e5d1
IM
1346}
1347
6f505b16
PZ
1348static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1349 struct rt_rq *rt_rq)
bb44e5d1 1350{
6f505b16
PZ
1351 struct rt_prio_array *array = &rt_rq->active;
1352 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1353 struct list_head *queue;
1354 int idx;
1355
1356 idx = sched_find_first_bit(array->bitmap);
6f505b16 1357 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1358
1359 queue = array->queue + idx;
6f505b16 1360 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1361
6f505b16
PZ
1362 return next;
1363}
bb44e5d1 1364
917b627d 1365static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1366{
1367 struct sched_rt_entity *rt_se;
1368 struct task_struct *p;
1369 struct rt_rq *rt_rq;
bb44e5d1 1370
6f505b16
PZ
1371 rt_rq = &rq->rt;
1372
8e54a2c0 1373 if (!rt_rq->rt_nr_running)
6f505b16
PZ
1374 return NULL;
1375
23b0fdfc 1376 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
1377 return NULL;
1378
1379 do {
1380 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1381 BUG_ON(!rt_se);
6f505b16
PZ
1382 rt_rq = group_rt_rq(rt_se);
1383 } while (rt_rq);
1384
1385 p = rt_task_of(rt_se);
305e6835 1386 p->se.exec_start = rq->clock_task;
917b627d
GH
1387
1388 return p;
1389}
1390
1391static struct task_struct *pick_next_task_rt(struct rq *rq)
1392{
1393 struct task_struct *p = _pick_next_task_rt(rq);
1394
1395 /* The running task is never eligible for pushing */
1396 if (p)
1397 dequeue_pushable_task(rq, p);
1398
bcf08df3 1399#ifdef CONFIG_SMP
3f029d3c
GH
1400 /*
1401 * We detect this state here so that we can avoid taking the RQ
1402 * lock again later if there is no need to push
1403 */
1404 rq->post_schedule = has_pushable_tasks(rq);
bcf08df3 1405#endif
3f029d3c 1406
6f505b16 1407 return p;
bb44e5d1
IM
1408}
1409
31ee529c 1410static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1411{
f1e14ef6 1412 update_curr_rt(rq);
917b627d
GH
1413
1414 /*
1415 * The previous task needs to be made eligible for pushing
1416 * if it is still active
1417 */
29baa747 1418 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
917b627d 1419 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1420}
1421
681f3e68 1422#ifdef CONFIG_SMP
6f505b16 1423
e8fa1362
SR
1424/* Only try algorithms three times */
1425#define RT_MAX_TRIES 3
1426
f65eda4f
SR
1427static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1428{
1429 if (!task_running(rq, p) &&
60334caf 1430 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
f65eda4f
SR
1431 return 1;
1432 return 0;
1433}
1434
e8fa1362 1435/* Return the second highest RT task, NULL otherwise */
79064fbf 1436static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 1437{
6f505b16
PZ
1438 struct task_struct *next = NULL;
1439 struct sched_rt_entity *rt_se;
1440 struct rt_prio_array *array;
1441 struct rt_rq *rt_rq;
e8fa1362
SR
1442 int idx;
1443
6f505b16
PZ
1444 for_each_leaf_rt_rq(rt_rq, rq) {
1445 array = &rt_rq->active;
1446 idx = sched_find_first_bit(array->bitmap);
49246274 1447next_idx:
6f505b16
PZ
1448 if (idx >= MAX_RT_PRIO)
1449 continue;
1b028abc 1450 if (next && next->prio <= idx)
6f505b16
PZ
1451 continue;
1452 list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b
PZ
1453 struct task_struct *p;
1454
1455 if (!rt_entity_is_task(rt_se))
1456 continue;
1457
1458 p = rt_task_of(rt_se);
6f505b16
PZ
1459 if (pick_rt_task(rq, p, cpu)) {
1460 next = p;
1461 break;
1462 }
1463 }
1464 if (!next) {
1465 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1466 goto next_idx;
1467 }
f65eda4f
SR
1468 }
1469
e8fa1362
SR
1470 return next;
1471}
1472
0e3900e6 1473static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1474
6e1254d2
GH
1475static int find_lowest_rq(struct task_struct *task)
1476{
1477 struct sched_domain *sd;
96f874e2 1478 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2
GH
1479 int this_cpu = smp_processor_id();
1480 int cpu = task_cpu(task);
06f90dbd 1481
0da938c4
SR
1482 /* Make sure the mask is initialized first */
1483 if (unlikely(!lowest_mask))
1484 return -1;
1485
29baa747 1486 if (task->nr_cpus_allowed == 1)
6e0534f2 1487 return -1; /* No other targets possible */
6e1254d2 1488
6e0534f2
GH
1489 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1490 return -1; /* No targets found */
6e1254d2
GH
1491
1492 /*
1493 * At this point we have built a mask of cpus representing the
1494 * lowest priority tasks in the system. Now we want to elect
1495 * the best one based on our affinity and topology.
1496 *
1497 * We prioritize the last cpu that the task executed on since
1498 * it is most likely cache-hot in that location.
1499 */
96f874e2 1500 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1501 return cpu;
1502
1503 /*
1504 * Otherwise, we consult the sched_domains span maps to figure
1505 * out which cpu is logically closest to our hot cache data.
1506 */
e2c88063
RR
1507 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1508 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1509
cd4ae6ad 1510 rcu_read_lock();
e2c88063
RR
1511 for_each_domain(cpu, sd) {
1512 if (sd->flags & SD_WAKE_AFFINE) {
1513 int best_cpu;
6e1254d2 1514
e2c88063
RR
1515 /*
1516 * "this_cpu" is cheaper to preempt than a
1517 * remote processor.
1518 */
1519 if (this_cpu != -1 &&
cd4ae6ad
XF
1520 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1521 rcu_read_unlock();
e2c88063 1522 return this_cpu;
cd4ae6ad 1523 }
e2c88063
RR
1524
1525 best_cpu = cpumask_first_and(lowest_mask,
1526 sched_domain_span(sd));
cd4ae6ad
XF
1527 if (best_cpu < nr_cpu_ids) {
1528 rcu_read_unlock();
e2c88063 1529 return best_cpu;
cd4ae6ad 1530 }
6e1254d2
GH
1531 }
1532 }
cd4ae6ad 1533 rcu_read_unlock();
6e1254d2
GH
1534
1535 /*
1536 * And finally, if there were no matches within the domains
1537 * just give the caller *something* to work with from the compatible
1538 * locations.
1539 */
e2c88063
RR
1540 if (this_cpu != -1)
1541 return this_cpu;
1542
1543 cpu = cpumask_any(lowest_mask);
1544 if (cpu < nr_cpu_ids)
1545 return cpu;
1546 return -1;
07b4032c
GH
1547}
1548
1549/* Will lock the rq it finds */
4df64c0b 1550static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1551{
1552 struct rq *lowest_rq = NULL;
07b4032c 1553 int tries;
4df64c0b 1554 int cpu;
e8fa1362 1555
07b4032c
GH
1556 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1557 cpu = find_lowest_rq(task);
1558
2de0b463 1559 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1560 break;
1561
07b4032c
GH
1562 lowest_rq = cpu_rq(cpu);
1563
e8fa1362 1564 /* if the prio of this runqueue changed, try again */
07b4032c 1565 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1566 /*
1567 * We had to unlock the run queue. In
1568 * the mean time, task could have
1569 * migrated already or had its affinity changed.
1570 * Also make sure that it wasn't scheduled on its rq.
1571 */
07b4032c 1572 if (unlikely(task_rq(task) != rq ||
96f874e2 1573 !cpumask_test_cpu(lowest_rq->cpu,
fa17b507 1574 tsk_cpus_allowed(task)) ||
07b4032c 1575 task_running(rq, task) ||
fd2f4419 1576 !task->on_rq)) {
4df64c0b 1577
7f1b4393 1578 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1579 lowest_rq = NULL;
1580 break;
1581 }
1582 }
1583
1584 /* If this rq is still suitable use it. */
e864c499 1585 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1586 break;
1587
1588 /* try again */
1b12bbc7 1589 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1590 lowest_rq = NULL;
1591 }
1592
1593 return lowest_rq;
1594}
1595
917b627d
GH
1596static struct task_struct *pick_next_pushable_task(struct rq *rq)
1597{
1598 struct task_struct *p;
1599
1600 if (!has_pushable_tasks(rq))
1601 return NULL;
1602
1603 p = plist_first_entry(&rq->rt.pushable_tasks,
1604 struct task_struct, pushable_tasks);
1605
1606 BUG_ON(rq->cpu != task_cpu(p));
1607 BUG_ON(task_current(rq, p));
29baa747 1608 BUG_ON(p->nr_cpus_allowed <= 1);
917b627d 1609
fd2f4419 1610 BUG_ON(!p->on_rq);
917b627d
GH
1611 BUG_ON(!rt_task(p));
1612
1613 return p;
1614}
1615
e8fa1362
SR
1616/*
1617 * If the current CPU has more than one RT task, see if the non
1618 * running task can migrate over to a CPU that is running a task
1619 * of lesser priority.
1620 */
697f0a48 1621static int push_rt_task(struct rq *rq)
e8fa1362
SR
1622{
1623 struct task_struct *next_task;
1624 struct rq *lowest_rq;
311e800e 1625 int ret = 0;
e8fa1362 1626
a22d7fc1
GH
1627 if (!rq->rt.overloaded)
1628 return 0;
1629
917b627d 1630 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1631 if (!next_task)
1632 return 0;
1633
49246274 1634retry:
697f0a48 1635 if (unlikely(next_task == rq->curr)) {
f65eda4f 1636 WARN_ON(1);
e8fa1362 1637 return 0;
f65eda4f 1638 }
e8fa1362
SR
1639
1640 /*
1641 * It's possible that the next_task slipped in of
1642 * higher priority than current. If that's the case
1643 * just reschedule current.
1644 */
697f0a48
GH
1645 if (unlikely(next_task->prio < rq->curr->prio)) {
1646 resched_task(rq->curr);
e8fa1362
SR
1647 return 0;
1648 }
1649
697f0a48 1650 /* We might release rq lock */
e8fa1362
SR
1651 get_task_struct(next_task);
1652
1653 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1654 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1655 if (!lowest_rq) {
1656 struct task_struct *task;
1657 /*
311e800e 1658 * find_lock_lowest_rq releases rq->lock
1563513d
GH
1659 * so it is possible that next_task has migrated.
1660 *
1661 * We need to make sure that the task is still on the same
1662 * run-queue and is also still the next task eligible for
1663 * pushing.
e8fa1362 1664 */
917b627d 1665 task = pick_next_pushable_task(rq);
1563513d
GH
1666 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1667 /*
311e800e
HD
1668 * The task hasn't migrated, and is still the next
1669 * eligible task, but we failed to find a run-queue
1670 * to push it to. Do not retry in this case, since
1671 * other cpus will pull from us when ready.
1563513d 1672 */
1563513d 1673 goto out;
e8fa1362 1674 }
917b627d 1675
1563513d
GH
1676 if (!task)
1677 /* No more tasks, just exit */
1678 goto out;
1679
917b627d 1680 /*
1563513d 1681 * Something has shifted, try again.
917b627d 1682 */
1563513d
GH
1683 put_task_struct(next_task);
1684 next_task = task;
1685 goto retry;
e8fa1362
SR
1686 }
1687
697f0a48 1688 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1689 set_task_cpu(next_task, lowest_rq->cpu);
1690 activate_task(lowest_rq, next_task, 0);
311e800e 1691 ret = 1;
e8fa1362
SR
1692
1693 resched_task(lowest_rq->curr);
1694
1b12bbc7 1695 double_unlock_balance(rq, lowest_rq);
e8fa1362 1696
e8fa1362
SR
1697out:
1698 put_task_struct(next_task);
1699
311e800e 1700 return ret;
e8fa1362
SR
1701}
1702
e8fa1362
SR
1703static void push_rt_tasks(struct rq *rq)
1704{
1705 /* push_rt_task will return true if it moved an RT */
1706 while (push_rt_task(rq))
1707 ;
1708}
1709
f65eda4f
SR
1710static int pull_rt_task(struct rq *this_rq)
1711{
80bf3171 1712 int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944 1713 struct task_struct *p;
f65eda4f 1714 struct rq *src_rq;
f65eda4f 1715
637f5085 1716 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1717 return 0;
1718
c6c4927b 1719 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1720 if (this_cpu == cpu)
1721 continue;
1722
1723 src_rq = cpu_rq(cpu);
74ab8e4f
GH
1724
1725 /*
1726 * Don't bother taking the src_rq->lock if the next highest
1727 * task is known to be lower-priority than our current task.
1728 * This may look racy, but if this value is about to go
1729 * logically higher, the src_rq will push this task away.
1730 * And if its going logically lower, we do not care
1731 */
1732 if (src_rq->rt.highest_prio.next >=
1733 this_rq->rt.highest_prio.curr)
1734 continue;
1735
f65eda4f
SR
1736 /*
1737 * We can potentially drop this_rq's lock in
1738 * double_lock_balance, and another CPU could
a8728944 1739 * alter this_rq
f65eda4f 1740 */
a8728944 1741 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
1742
1743 /*
1744 * Are there still pullable RT tasks?
1745 */
614ee1f6
MG
1746 if (src_rq->rt.rt_nr_running <= 1)
1747 goto skip;
f65eda4f 1748
f65eda4f
SR
1749 p = pick_next_highest_task_rt(src_rq, this_cpu);
1750
1751 /*
1752 * Do we have an RT task that preempts
1753 * the to-be-scheduled task?
1754 */
a8728944 1755 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 1756 WARN_ON(p == src_rq->curr);
fd2f4419 1757 WARN_ON(!p->on_rq);
f65eda4f
SR
1758
1759 /*
1760 * There's a chance that p is higher in priority
1761 * than what's currently running on its cpu.
1762 * This is just that p is wakeing up and hasn't
1763 * had a chance to schedule. We only pull
1764 * p if it is lower in priority than the
a8728944 1765 * current task on the run queue
f65eda4f 1766 */
a8728944 1767 if (p->prio < src_rq->curr->prio)
614ee1f6 1768 goto skip;
f65eda4f
SR
1769
1770 ret = 1;
1771
1772 deactivate_task(src_rq, p, 0);
1773 set_task_cpu(p, this_cpu);
1774 activate_task(this_rq, p, 0);
1775 /*
1776 * We continue with the search, just in
1777 * case there's an even higher prio task
25985edc 1778 * in another runqueue. (low likelihood
f65eda4f 1779 * but possible)
f65eda4f 1780 */
f65eda4f 1781 }
49246274 1782skip:
1b12bbc7 1783 double_unlock_balance(this_rq, src_rq);
f65eda4f
SR
1784 }
1785
1786 return ret;
1787}
1788
9a897c5a 1789static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1790{
1791 /* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c6 1792 if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f
SR
1793 pull_rt_task(rq);
1794}
1795
9a897c5a 1796static void post_schedule_rt(struct rq *rq)
e8fa1362 1797{
967fc046 1798 push_rt_tasks(rq);
e8fa1362
SR
1799}
1800
8ae121ac
GH
1801/*
1802 * If we are not running and we are not going to reschedule soon, we should
1803 * try to push tasks away now
1804 */
efbbd05a 1805static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 1806{
9a897c5a 1807 if (!task_running(rq, p) &&
8ae121ac 1808 !test_tsk_need_resched(rq->curr) &&
917b627d 1809 has_pushable_tasks(rq) &&
29baa747 1810 p->nr_cpus_allowed > 1 &&
43fa5460 1811 rt_task(rq->curr) &&
29baa747 1812 (rq->curr->nr_cpus_allowed < 2 ||
3be209a8 1813 rq->curr->prio <= p->prio))
4642dafd
SR
1814 push_rt_tasks(rq);
1815}
1816
cd8ba7cd 1817static void set_cpus_allowed_rt(struct task_struct *p,
96f874e2 1818 const struct cpumask *new_mask)
73fe6aae 1819{
8d3d5ada
KT
1820 struct rq *rq;
1821 int weight;
73fe6aae
GH
1822
1823 BUG_ON(!rt_task(p));
1824
8d3d5ada
KT
1825 if (!p->on_rq)
1826 return;
917b627d 1827
8d3d5ada 1828 weight = cpumask_weight(new_mask);
917b627d 1829
8d3d5ada
KT
1830 /*
1831 * Only update if the process changes its state from whether it
1832 * can migrate or not.
1833 */
29baa747 1834 if ((p->nr_cpus_allowed > 1) == (weight > 1))
8d3d5ada 1835 return;
917b627d 1836
8d3d5ada 1837 rq = task_rq(p);
73fe6aae 1838
8d3d5ada
KT
1839 /*
1840 * The process used to be able to migrate OR it can now migrate
1841 */
1842 if (weight <= 1) {
1843 if (!task_current(rq, p))
1844 dequeue_pushable_task(rq, p);
1845 BUG_ON(!rq->rt.rt_nr_migratory);
1846 rq->rt.rt_nr_migratory--;
1847 } else {
1848 if (!task_current(rq, p))
1849 enqueue_pushable_task(rq, p);
1850 rq->rt.rt_nr_migratory++;
73fe6aae 1851 }
8d3d5ada
KT
1852
1853 update_rt_migration(&rq->rt);
73fe6aae 1854}
deeeccd4 1855
bdd7c81b 1856/* Assumes rq->lock is held */
1f11eb6a 1857static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1858{
1859 if (rq->rt.overloaded)
1860 rt_set_overload(rq);
6e0534f2 1861
7def2be1
PZ
1862 __enable_runtime(rq);
1863
e864c499 1864 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
1865}
1866
1867/* Assumes rq->lock is held */
1f11eb6a 1868static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1869{
1870 if (rq->rt.overloaded)
1871 rt_clear_overload(rq);
6e0534f2 1872
7def2be1
PZ
1873 __disable_runtime(rq);
1874
6e0534f2 1875 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1876}
cb469845
SR
1877
1878/*
1879 * When switch from the rt queue, we bring ourselves to a position
1880 * that we might want to pull RT tasks from other runqueues.
1881 */
da7a735e 1882static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1883{
1884 /*
1885 * If there are other RT tasks then we will reschedule
1886 * and the scheduling of the other RT tasks will handle
1887 * the balancing. But if we are the last RT task
1888 * we may need to handle the pulling of RT tasks
1889 * now.
1890 */
1158ddb5
KT
1891 if (!p->on_rq || rq->rt.rt_nr_running)
1892 return;
1893
1894 if (pull_rt_task(rq))
1895 resched_task(rq->curr);
cb469845 1896}
3d8cbdf8 1897
029632fb 1898void init_sched_rt_class(void)
3d8cbdf8
RR
1899{
1900 unsigned int i;
1901
029632fb 1902 for_each_possible_cpu(i) {
eaa95840 1903 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 1904 GFP_KERNEL, cpu_to_node(i));
029632fb 1905 }
3d8cbdf8 1906}
cb469845
SR
1907#endif /* CONFIG_SMP */
1908
1909/*
1910 * When switching a task to RT, we may overload the runqueue
1911 * with RT tasks. In this case we try to push them off to
1912 * other runqueues.
1913 */
da7a735e 1914static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1915{
1916 int check_resched = 1;
1917
1918 /*
1919 * If we are already running, then there's nothing
1920 * that needs to be done. But if we are not running
1921 * we may need to preempt the current running task.
1922 * If that current running task is also an RT task
1923 * then see if we can move to another run queue.
1924 */
fd2f4419 1925 if (p->on_rq && rq->curr != p) {
cb469845
SR
1926#ifdef CONFIG_SMP
1927 if (rq->rt.overloaded && push_rt_task(rq) &&
1928 /* Don't resched if we changed runqueues */
1929 rq != task_rq(p))
1930 check_resched = 0;
1931#endif /* CONFIG_SMP */
1932 if (check_resched && p->prio < rq->curr->prio)
1933 resched_task(rq->curr);
1934 }
1935}
1936
1937/*
1938 * Priority of the task has changed. This may cause
1939 * us to initiate a push or pull.
1940 */
da7a735e
PZ
1941static void
1942prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 1943{
fd2f4419 1944 if (!p->on_rq)
da7a735e
PZ
1945 return;
1946
1947 if (rq->curr == p) {
cb469845
SR
1948#ifdef CONFIG_SMP
1949 /*
1950 * If our priority decreases while running, we
1951 * may need to pull tasks to this runqueue.
1952 */
1953 if (oldprio < p->prio)
1954 pull_rt_task(rq);
1955 /*
1956 * If there's a higher priority task waiting to run
6fa46fa5
SR
1957 * then reschedule. Note, the above pull_rt_task
1958 * can release the rq lock and p could migrate.
1959 * Only reschedule if p is still on the same runqueue.
cb469845 1960 */
e864c499 1961 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb469845
SR
1962 resched_task(p);
1963#else
1964 /* For UP simply resched on drop of prio */
1965 if (oldprio < p->prio)
1966 resched_task(p);
e8fa1362 1967#endif /* CONFIG_SMP */
cb469845
SR
1968 } else {
1969 /*
1970 * This task is not running, but if it is
1971 * greater than the current running task
1972 * then reschedule.
1973 */
1974 if (p->prio < rq->curr->prio)
1975 resched_task(rq->curr);
1976 }
1977}
1978
78f2c7db
PZ
1979static void watchdog(struct rq *rq, struct task_struct *p)
1980{
1981 unsigned long soft, hard;
1982
78d7d407
JS
1983 /* max may change after cur was read, this will be fixed next tick */
1984 soft = task_rlimit(p, RLIMIT_RTTIME);
1985 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
1986
1987 if (soft != RLIM_INFINITY) {
1988 unsigned long next;
1989
57d2aa00
YX
1990 if (p->rt.watchdog_stamp != jiffies) {
1991 p->rt.timeout++;
1992 p->rt.watchdog_stamp = jiffies;
1993 }
1994
78f2c7db 1995 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1996 if (p->rt.timeout > next)
f06febc9 1997 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db
PZ
1998 }
1999}
bb44e5d1 2000
8f4d37ec 2001static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 2002{
454c7999
CC
2003 struct sched_rt_entity *rt_se = &p->rt;
2004
67e2be02
PZ
2005 update_curr_rt(rq);
2006
78f2c7db
PZ
2007 watchdog(rq, p);
2008
bb44e5d1
IM
2009 /*
2010 * RR tasks need a special form of timeslice management.
2011 * FIFO tasks have no timeslices.
2012 */
2013 if (p->policy != SCHED_RR)
2014 return;
2015
fa717060 2016 if (--p->rt.time_slice)
bb44e5d1
IM
2017 return;
2018
de5bdff7 2019 p->rt.time_slice = RR_TIMESLICE;
bb44e5d1 2020
98fbc798 2021 /*
454c7999
CC
2022 * Requeue to the end of queue if we (and all of our ancestors) are the
2023 * only element on the queue
98fbc798 2024 */
454c7999
CC
2025 for_each_sched_rt_entity(rt_se) {
2026 if (rt_se->run_list.prev != rt_se->run_list.next) {
2027 requeue_task_rt(rq, p, 0);
2028 set_tsk_need_resched(p);
2029 return;
2030 }
98fbc798 2031 }
bb44e5d1
IM
2032}
2033
83b699ed
SV
2034static void set_curr_task_rt(struct rq *rq)
2035{
2036 struct task_struct *p = rq->curr;
2037
305e6835 2038 p->se.exec_start = rq->clock_task;
917b627d
GH
2039
2040 /* The running task is never eligible for pushing */
2041 dequeue_pushable_task(rq, p);
83b699ed
SV
2042}
2043
6d686f45 2044static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
2045{
2046 /*
2047 * Time slice is 0 for SCHED_FIFO tasks
2048 */
2049 if (task->policy == SCHED_RR)
de5bdff7 2050 return RR_TIMESLICE;
0d721cea
PW
2051 else
2052 return 0;
2053}
2054
029632fb 2055const struct sched_class rt_sched_class = {
5522d5d5 2056 .next = &fair_sched_class,
bb44e5d1
IM
2057 .enqueue_task = enqueue_task_rt,
2058 .dequeue_task = dequeue_task_rt,
2059 .yield_task = yield_task_rt,
2060
2061 .check_preempt_curr = check_preempt_curr_rt,
2062
2063 .pick_next_task = pick_next_task_rt,
2064 .put_prev_task = put_prev_task_rt,
2065
681f3e68 2066#ifdef CONFIG_SMP
4ce72a2c
LZ
2067 .select_task_rq = select_task_rq_rt,
2068
73fe6aae 2069 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
2070 .rq_online = rq_online_rt,
2071 .rq_offline = rq_offline_rt,
9a897c5a
SR
2072 .pre_schedule = pre_schedule_rt,
2073 .post_schedule = post_schedule_rt,
efbbd05a 2074 .task_woken = task_woken_rt,
cb469845 2075 .switched_from = switched_from_rt,
681f3e68 2076#endif
bb44e5d1 2077
83b699ed 2078 .set_curr_task = set_curr_task_rt,
bb44e5d1 2079 .task_tick = task_tick_rt,
cb469845 2080
0d721cea
PW
2081 .get_rr_interval = get_rr_interval_rt,
2082
cb469845
SR
2083 .prio_changed = prio_changed_rt,
2084 .switched_to = switched_to_rt,
bb44e5d1 2085};
ada18de2
PZ
2086
2087#ifdef CONFIG_SCHED_DEBUG
2088extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2089
029632fb 2090void print_rt_stats(struct seq_file *m, int cpu)
ada18de2 2091{
ec514c48 2092 rt_rq_iter_t iter;
ada18de2
PZ
2093 struct rt_rq *rt_rq;
2094
2095 rcu_read_lock();
ec514c48 2096 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
2097 print_rt_rq(m, cpu, rt_rq);
2098 rcu_read_unlock();
2099}
55e12e5e 2100#endif /* CONFIG_SCHED_DEBUG */
This page took 0.797412 seconds and 4 git commands to generate.