]>
Commit | Line | Data |
---|---|---|
fa72e9e4 IM |
1 | /* |
2 | * idle-task scheduling class. | |
3 | * | |
4 | * (NOTE: these are not related to SCHED_IDLE tasks which are | |
5 | * handled in sched_fair.c) | |
6 | */ | |
7 | ||
e7693a36 GH |
8 | #ifdef CONFIG_SMP |
9 | static int select_task_rq_idle(struct task_struct *p, int sync) | |
10 | { | |
11 | return task_cpu(p); /* IDLE tasks as never migrated */ | |
12 | } | |
13 | #endif /* CONFIG_SMP */ | |
fa72e9e4 IM |
14 | /* |
15 | * Idle tasks are unconditionally rescheduled: | |
16 | */ | |
15afe09b | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) |
fa72e9e4 IM |
18 | { |
19 | resched_task(rq->idle); | |
20 | } | |
21 | ||
fb8d4724 | 22 | static struct task_struct *pick_next_task_idle(struct rq *rq) |
fa72e9e4 IM |
23 | { |
24 | schedstat_inc(rq, sched_goidle); | |
dce48a84 TG |
25 | /* adjust the active tasks as we might go into a long sleep */ |
26 | calc_load_account_active(rq); | |
fa72e9e4 IM |
27 | return rq->idle; |
28 | } | |
29 | ||
30 | /* | |
31 | * It is not legal to sleep in the idle task - print a warning | |
32 | * message if some code attempts to do it: | |
33 | */ | |
34 | static void | |
f02231e5 | 35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
fa72e9e4 IM |
36 | { |
37 | spin_unlock_irq(&rq->lock); | |
38 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | |
39 | dump_stack(); | |
40 | spin_lock_irq(&rq->lock); | |
41 | } | |
42 | ||
31ee529c | 43 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
fa72e9e4 IM |
44 | { |
45 | } | |
46 | ||
681f3e68 | 47 | #ifdef CONFIG_SMP |
43010659 | 48 | static unsigned long |
fa72e9e4 | 49 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, |
e1d1484f PW |
50 | unsigned long max_load_move, |
51 | struct sched_domain *sd, enum cpu_idle_type idle, | |
52 | int *all_pinned, int *this_best_prio) | |
53 | { | |
54 | return 0; | |
55 | } | |
56 | ||
57 | static int | |
58 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | |
59 | struct sched_domain *sd, enum cpu_idle_type idle) | |
fa72e9e4 IM |
60 | { |
61 | return 0; | |
62 | } | |
681f3e68 | 63 | #endif |
fa72e9e4 | 64 | |
8f4d37ec | 65 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
fa72e9e4 IM |
66 | { |
67 | } | |
68 | ||
83b699ed SV |
69 | static void set_curr_task_idle(struct rq *rq) |
70 | { | |
71 | } | |
72 | ||
cb469845 SR |
73 | static void switched_to_idle(struct rq *rq, struct task_struct *p, |
74 | int running) | |
75 | { | |
76 | /* Can this actually happen?? */ | |
77 | if (running) | |
78 | resched_task(rq->curr); | |
79 | else | |
15afe09b | 80 | check_preempt_curr(rq, p, 0); |
cb469845 SR |
81 | } |
82 | ||
83 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | |
84 | int oldprio, int running) | |
85 | { | |
86 | /* This can happen for hot plug CPUS */ | |
87 | ||
88 | /* | |
89 | * Reschedule if we are currently running on this runqueue and | |
90 | * our priority decreased, or if we are not currently running on | |
91 | * this runqueue and our priority is higher than the current's | |
92 | */ | |
93 | if (running) { | |
94 | if (p->prio > oldprio) | |
95 | resched_task(rq->curr); | |
96 | } else | |
15afe09b | 97 | check_preempt_curr(rq, p, 0); |
cb469845 SR |
98 | } |
99 | ||
fa72e9e4 IM |
100 | /* |
101 | * Simple, special scheduling class for the per-CPU idle tasks: | |
102 | */ | |
2abdad0a | 103 | static const struct sched_class idle_sched_class = { |
5522d5d5 | 104 | /* .next is NULL */ |
fa72e9e4 IM |
105 | /* no enqueue/yield_task for idle tasks */ |
106 | ||
107 | /* dequeue is not valid, we print a debug message there: */ | |
108 | .dequeue_task = dequeue_task_idle, | |
109 | ||
110 | .check_preempt_curr = check_preempt_curr_idle, | |
111 | ||
112 | .pick_next_task = pick_next_task_idle, | |
113 | .put_prev_task = put_prev_task_idle, | |
114 | ||
681f3e68 | 115 | #ifdef CONFIG_SMP |
4ce72a2c LZ |
116 | .select_task_rq = select_task_rq_idle, |
117 | ||
fa72e9e4 | 118 | .load_balance = load_balance_idle, |
e1d1484f | 119 | .move_one_task = move_one_task_idle, |
681f3e68 | 120 | #endif |
fa72e9e4 | 121 | |
83b699ed | 122 | .set_curr_task = set_curr_task_idle, |
fa72e9e4 | 123 | .task_tick = task_tick_idle, |
cb469845 SR |
124 | |
125 | .prio_changed = prio_changed_idle, | |
126 | .switched_to = switched_to_idle, | |
127 | ||
fa72e9e4 IM |
128 | /* no .task_new for idle tasks */ |
129 | }; |