]>
Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * cpuidle.c - core cpuidle infrastructure | |
3 | * | |
4 | * (C) 2006-2007 Venkatesh Pallipadi <[email protected]> | |
5 | * Shaohua Li <[email protected]> | |
6 | * Adam Belay <[email protected]> | |
7 | * | |
8 | * This code is licenced under the GPL. | |
9 | */ | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/notifier.h> | |
d82b3518 | 15 | #include <linux/pm_qos_params.h> |
4f86d3a8 LB |
16 | #include <linux/cpu.h> |
17 | #include <linux/cpuidle.h> | |
9a0b8415 | 18 | #include <linux/ktime.h> |
2e94d1f7 | 19 | #include <linux/hrtimer.h> |
288f023e | 20 | #include <trace/events/power.h> |
4f86d3a8 LB |
21 | |
22 | #include "cpuidle.h" | |
23 | ||
24 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |
4f86d3a8 LB |
25 | |
26 | DEFINE_MUTEX(cpuidle_lock); | |
27 | LIST_HEAD(cpuidle_detected_devices); | |
28 | static void (*pm_idle_old)(void); | |
29 | ||
30 | static int enabled_devices; | |
31 | ||
a6869cc4 VP |
32 | #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) |
33 | static void cpuidle_kick_cpus(void) | |
34 | { | |
35 | cpu_idle_wait(); | |
36 | } | |
37 | #elif defined(CONFIG_SMP) | |
38 | # error "Arch needs cpu_idle_wait() equivalent here" | |
39 | #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ | |
40 | static void cpuidle_kick_cpus(void) {} | |
41 | #endif | |
42 | ||
dcb84f33 VP |
43 | static int __cpuidle_register_device(struct cpuidle_device *dev); |
44 | ||
4f86d3a8 LB |
45 | /** |
46 | * cpuidle_idle_call - the main idle loop | |
47 | * | |
48 | * NOTE: no locks or semaphores should be used here | |
49 | */ | |
50 | static void cpuidle_idle_call(void) | |
51 | { | |
52 | struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices); | |
53 | struct cpuidle_state *target_state; | |
54 | int next_state; | |
55 | ||
56 | /* check if the device is ready */ | |
57 | if (!dev || !dev->enabled) { | |
58 | if (pm_idle_old) | |
59 | pm_idle_old(); | |
60 | else | |
89cedfef VP |
61 | #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) |
62 | default_idle(); | |
63 | #else | |
4f86d3a8 | 64 | local_irq_enable(); |
89cedfef | 65 | #endif |
4f86d3a8 LB |
66 | return; |
67 | } | |
68 | ||
9a655837 AV |
69 | #if 0 |
70 | /* shows regressions, re-enable for 2.6.29 */ | |
2e94d1f7 AV |
71 | /* |
72 | * run any timers that can be run now, at this point | |
73 | * before calculating the idle duration etc. | |
74 | */ | |
75 | hrtimer_peek_ahead_timers(); | |
9a655837 | 76 | #endif |
4f86d3a8 LB |
77 | /* ask the governor for the next state */ |
78 | next_state = cpuidle_curr_governor->select(dev); | |
246eb7f0 KH |
79 | if (need_resched()) { |
80 | local_irq_enable(); | |
4f86d3a8 | 81 | return; |
246eb7f0 KH |
82 | } |
83 | ||
4f86d3a8 LB |
84 | target_state = &dev->states[next_state]; |
85 | ||
86 | /* enter the state and update stats */ | |
4f86d3a8 | 87 | dev->last_state = target_state; |
887e301a VP |
88 | dev->last_residency = target_state->enter(dev, target_state); |
89 | if (dev->last_state) | |
90 | target_state = dev->last_state; | |
91 | ||
8b78cf60 | 92 | target_state->time += (unsigned long long)dev->last_residency; |
4f86d3a8 LB |
93 | target_state->usage++; |
94 | ||
95 | /* give the governor an opportunity to reflect on the outcome */ | |
96 | if (cpuidle_curr_governor->reflect) | |
97 | cpuidle_curr_governor->reflect(dev); | |
288f023e | 98 | trace_power_end(0); |
4f86d3a8 LB |
99 | } |
100 | ||
101 | /** | |
102 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | |
103 | */ | |
104 | void cpuidle_install_idle_handler(void) | |
105 | { | |
106 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | |
107 | /* Make sure all changes finished before we switch to new idle */ | |
108 | smp_wmb(); | |
109 | pm_idle = cpuidle_idle_call; | |
110 | } | |
111 | } | |
112 | ||
113 | /** | |
114 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | |
115 | */ | |
116 | void cpuidle_uninstall_idle_handler(void) | |
117 | { | |
b032bf70 | 118 | if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { |
4f86d3a8 | 119 | pm_idle = pm_idle_old; |
a6869cc4 | 120 | cpuidle_kick_cpus(); |
4f86d3a8 LB |
121 | } |
122 | } | |
123 | ||
124 | /** | |
125 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | |
126 | */ | |
127 | void cpuidle_pause_and_lock(void) | |
128 | { | |
129 | mutex_lock(&cpuidle_lock); | |
130 | cpuidle_uninstall_idle_handler(); | |
131 | } | |
132 | ||
133 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | |
134 | ||
135 | /** | |
136 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | |
137 | */ | |
138 | void cpuidle_resume_and_unlock(void) | |
139 | { | |
140 | cpuidle_install_idle_handler(); | |
141 | mutex_unlock(&cpuidle_lock); | |
142 | } | |
143 | ||
144 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | |
145 | ||
146 | /** | |
147 | * cpuidle_enable_device - enables idle PM for a CPU | |
148 | * @dev: the CPU | |
149 | * | |
150 | * This function must be called between cpuidle_pause_and_lock and | |
151 | * cpuidle_resume_and_unlock when used externally. | |
152 | */ | |
153 | int cpuidle_enable_device(struct cpuidle_device *dev) | |
154 | { | |
155 | int ret, i; | |
156 | ||
157 | if (dev->enabled) | |
158 | return 0; | |
159 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | |
160 | return -EIO; | |
161 | if (!dev->state_count) | |
162 | return -EINVAL; | |
163 | ||
dcb84f33 VP |
164 | if (dev->registered == 0) { |
165 | ret = __cpuidle_register_device(dev); | |
166 | if (ret) | |
167 | return ret; | |
168 | } | |
169 | ||
4f86d3a8 LB |
170 | if ((ret = cpuidle_add_state_sysfs(dev))) |
171 | return ret; | |
172 | ||
173 | if (cpuidle_curr_governor->enable && | |
174 | (ret = cpuidle_curr_governor->enable(dev))) | |
175 | goto fail_sysfs; | |
176 | ||
177 | for (i = 0; i < dev->state_count; i++) { | |
178 | dev->states[i].usage = 0; | |
179 | dev->states[i].time = 0; | |
180 | } | |
181 | dev->last_residency = 0; | |
182 | dev->last_state = NULL; | |
183 | ||
184 | smp_wmb(); | |
185 | ||
186 | dev->enabled = 1; | |
187 | ||
188 | enabled_devices++; | |
189 | return 0; | |
190 | ||
191 | fail_sysfs: | |
192 | cpuidle_remove_state_sysfs(dev); | |
193 | ||
194 | return ret; | |
195 | } | |
196 | ||
197 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |
198 | ||
199 | /** | |
200 | * cpuidle_disable_device - disables idle PM for a CPU | |
201 | * @dev: the CPU | |
202 | * | |
203 | * This function must be called between cpuidle_pause_and_lock and | |
204 | * cpuidle_resume_and_unlock when used externally. | |
205 | */ | |
206 | void cpuidle_disable_device(struct cpuidle_device *dev) | |
207 | { | |
208 | if (!dev->enabled) | |
209 | return; | |
210 | if (!cpuidle_curr_driver || !cpuidle_curr_governor) | |
211 | return; | |
212 | ||
213 | dev->enabled = 0; | |
214 | ||
215 | if (cpuidle_curr_governor->disable) | |
216 | cpuidle_curr_governor->disable(dev); | |
217 | ||
218 | cpuidle_remove_state_sysfs(dev); | |
219 | enabled_devices--; | |
220 | } | |
221 | ||
222 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |
223 | ||
9a0b8415 | 224 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
225 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | |
226 | { | |
227 | ktime_t t1, t2; | |
228 | s64 diff; | |
229 | int ret; | |
230 | ||
231 | t1 = ktime_get(); | |
232 | local_irq_enable(); | |
233 | while (!need_resched()) | |
234 | cpu_relax(); | |
235 | ||
236 | t2 = ktime_get(); | |
237 | diff = ktime_to_us(ktime_sub(t2, t1)); | |
238 | if (diff > INT_MAX) | |
239 | diff = INT_MAX; | |
240 | ||
241 | ret = (int) diff; | |
242 | return ret; | |
243 | } | |
244 | ||
245 | static void poll_idle_init(struct cpuidle_device *dev) | |
246 | { | |
247 | struct cpuidle_state *state = &dev->states[0]; | |
248 | ||
249 | cpuidle_set_statedata(state, NULL); | |
250 | ||
4fcb2fcd VP |
251 | snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); |
252 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | |
9a0b8415 | 253 | state->exit_latency = 0; |
254 | state->target_residency = 0; | |
255 | state->power_usage = -1; | |
8e92b660 | 256 | state->flags = CPUIDLE_FLAG_POLL; |
9a0b8415 | 257 | state->enter = poll_idle; |
258 | } | |
259 | #else | |
260 | static void poll_idle_init(struct cpuidle_device *dev) {} | |
261 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | |
262 | ||
4f86d3a8 | 263 | /** |
dcb84f33 VP |
264 | * __cpuidle_register_device - internal register function called before register |
265 | * and enable routines | |
4f86d3a8 | 266 | * @dev: the cpu |
dcb84f33 VP |
267 | * |
268 | * cpuidle_lock mutex must be held before this is called | |
4f86d3a8 | 269 | */ |
dcb84f33 | 270 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
4f86d3a8 LB |
271 | { |
272 | int ret; | |
273 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | |
274 | ||
275 | if (!sys_dev) | |
276 | return -EINVAL; | |
277 | if (!try_module_get(cpuidle_curr_driver->owner)) | |
278 | return -EINVAL; | |
279 | ||
280 | init_completion(&dev->kobj_unregister); | |
281 | ||
9a0b8415 | 282 | poll_idle_init(dev); |
283 | ||
4f86d3a8 LB |
284 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
285 | list_add(&dev->device_list, &cpuidle_detected_devices); | |
286 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | |
4f86d3a8 LB |
287 | module_put(cpuidle_curr_driver->owner); |
288 | return ret; | |
289 | } | |
290 | ||
dcb84f33 VP |
291 | dev->registered = 1; |
292 | return 0; | |
293 | } | |
294 | ||
295 | /** | |
296 | * cpuidle_register_device - registers a CPU's idle PM feature | |
297 | * @dev: the cpu | |
298 | */ | |
299 | int cpuidle_register_device(struct cpuidle_device *dev) | |
300 | { | |
301 | int ret; | |
302 | ||
303 | mutex_lock(&cpuidle_lock); | |
304 | ||
305 | if ((ret = __cpuidle_register_device(dev))) { | |
306 | mutex_unlock(&cpuidle_lock); | |
307 | return ret; | |
308 | } | |
309 | ||
4f86d3a8 LB |
310 | cpuidle_enable_device(dev); |
311 | cpuidle_install_idle_handler(); | |
312 | ||
313 | mutex_unlock(&cpuidle_lock); | |
314 | ||
315 | return 0; | |
316 | ||
317 | } | |
318 | ||
319 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | |
320 | ||
321 | /** | |
322 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | |
323 | * @dev: the cpu | |
324 | */ | |
325 | void cpuidle_unregister_device(struct cpuidle_device *dev) | |
326 | { | |
327 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); | |
328 | ||
dcb84f33 VP |
329 | if (dev->registered == 0) |
330 | return; | |
331 | ||
4f86d3a8 LB |
332 | cpuidle_pause_and_lock(); |
333 | ||
334 | cpuidle_disable_device(dev); | |
335 | ||
336 | cpuidle_remove_sysfs(sys_dev); | |
337 | list_del(&dev->device_list); | |
338 | wait_for_completion(&dev->kobj_unregister); | |
339 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | |
340 | ||
341 | cpuidle_resume_and_unlock(); | |
342 | ||
343 | module_put(cpuidle_curr_driver->owner); | |
344 | } | |
345 | ||
346 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | |
347 | ||
348 | #ifdef CONFIG_SMP | |
349 | ||
350 | static void smp_callback(void *v) | |
351 | { | |
352 | /* we already woke the CPU up, nothing more to do */ | |
353 | } | |
354 | ||
355 | /* | |
356 | * This function gets called when a part of the kernel has a new latency | |
357 | * requirement. This means we need to get all processors out of their C-state, | |
358 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | |
359 | * wakes them all right up. | |
360 | */ | |
361 | static int cpuidle_latency_notify(struct notifier_block *b, | |
362 | unsigned long l, void *v) | |
363 | { | |
8691e5a8 | 364 | smp_call_function(smp_callback, NULL, 1); |
4f86d3a8 LB |
365 | return NOTIFY_OK; |
366 | } | |
367 | ||
368 | static struct notifier_block cpuidle_latency_notifier = { | |
369 | .notifier_call = cpuidle_latency_notify, | |
370 | }; | |
371 | ||
d82b3518 MG |
372 | static inline void latency_notifier_init(struct notifier_block *n) |
373 | { | |
374 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); | |
375 | } | |
4f86d3a8 LB |
376 | |
377 | #else /* CONFIG_SMP */ | |
378 | ||
379 | #define latency_notifier_init(x) do { } while (0) | |
380 | ||
381 | #endif /* CONFIG_SMP */ | |
382 | ||
383 | /** | |
384 | * cpuidle_init - core initializer | |
385 | */ | |
386 | static int __init cpuidle_init(void) | |
387 | { | |
388 | int ret; | |
389 | ||
390 | pm_idle_old = pm_idle; | |
391 | ||
392 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | |
393 | if (ret) | |
394 | return ret; | |
395 | ||
396 | latency_notifier_init(&cpuidle_latency_notifier); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | core_initcall(cpuidle_init); |