]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * cpuidle-powernv - idle state cpuidle driver. | |
4 | * Adapted from drivers/cpuidle/cpuidle-pseries | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/init.h> | |
11 | #include <linux/moduleparam.h> | |
12 | #include <linux/cpuidle.h> | |
13 | #include <linux/cpu.h> | |
14 | #include <linux/notifier.h> | |
15 | #include <linux/clockchips.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/slab.h> | |
18 | ||
19 | #include <asm/machdep.h> | |
20 | #include <asm/firmware.h> | |
21 | #include <asm/opal.h> | |
22 | #include <asm/runlatch.h> | |
23 | #include <asm/cpuidle.h> | |
24 | ||
25 | /* | |
26 | * Expose only those Hardware idle states via the cpuidle framework | |
27 | * that have latency value below POWERNV_THRESHOLD_LATENCY_NS. | |
28 | */ | |
29 | #define POWERNV_THRESHOLD_LATENCY_NS 200000 | |
30 | ||
31 | static struct cpuidle_driver powernv_idle_driver = { | |
32 | .name = "powernv_idle", | |
33 | .owner = THIS_MODULE, | |
34 | }; | |
35 | ||
36 | static int max_idle_state __read_mostly; | |
37 | static struct cpuidle_state *cpuidle_state_table __read_mostly; | |
38 | ||
39 | struct stop_psscr_table { | |
40 | u64 val; | |
41 | u64 mask; | |
42 | }; | |
43 | ||
44 | static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly; | |
45 | ||
46 | static u64 default_snooze_timeout __read_mostly; | |
47 | static bool snooze_timeout_en __read_mostly; | |
48 | ||
49 | static u64 get_snooze_timeout(struct cpuidle_device *dev, | |
50 | struct cpuidle_driver *drv, | |
51 | int index) | |
52 | { | |
53 | int i; | |
54 | ||
55 | if (unlikely(!snooze_timeout_en)) | |
56 | return default_snooze_timeout; | |
57 | ||
58 | for (i = index + 1; i < drv->state_count; i++) { | |
59 | struct cpuidle_state *s = &drv->states[i]; | |
60 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | |
61 | ||
62 | if (s->disabled || su->disable) | |
63 | continue; | |
64 | ||
65 | return s->target_residency * tb_ticks_per_usec; | |
66 | } | |
67 | ||
68 | return default_snooze_timeout; | |
69 | } | |
70 | ||
71 | static int snooze_loop(struct cpuidle_device *dev, | |
72 | struct cpuidle_driver *drv, | |
73 | int index) | |
74 | { | |
75 | u64 snooze_exit_time; | |
76 | ||
77 | set_thread_flag(TIF_POLLING_NRFLAG); | |
78 | ||
79 | local_irq_enable(); | |
80 | ||
81 | snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index); | |
82 | ppc64_runlatch_off(); | |
83 | HMT_very_low(); | |
84 | while (!need_resched()) { | |
85 | if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { | |
86 | /* | |
87 | * Task has not woken up but we are exiting the polling | |
88 | * loop anyway. Require a barrier after polling is | |
89 | * cleared to order subsequent test of need_resched(). | |
90 | */ | |
91 | clear_thread_flag(TIF_POLLING_NRFLAG); | |
92 | smp_mb(); | |
93 | break; | |
94 | } | |
95 | } | |
96 | ||
97 | HMT_medium(); | |
98 | ppc64_runlatch_on(); | |
99 | clear_thread_flag(TIF_POLLING_NRFLAG); | |
100 | ||
101 | local_irq_disable(); | |
102 | ||
103 | return index; | |
104 | } | |
105 | ||
106 | static int nap_loop(struct cpuidle_device *dev, | |
107 | struct cpuidle_driver *drv, | |
108 | int index) | |
109 | { | |
110 | power7_idle_type(PNV_THREAD_NAP); | |
111 | ||
112 | return index; | |
113 | } | |
114 | ||
115 | /* Register for fastsleep only in oneshot mode of broadcast */ | |
116 | #ifdef CONFIG_TICK_ONESHOT | |
117 | static int fastsleep_loop(struct cpuidle_device *dev, | |
118 | struct cpuidle_driver *drv, | |
119 | int index) | |
120 | { | |
121 | unsigned long old_lpcr = mfspr(SPRN_LPCR); | |
122 | unsigned long new_lpcr; | |
123 | ||
124 | if (unlikely(system_state < SYSTEM_RUNNING)) | |
125 | return index; | |
126 | ||
127 | new_lpcr = old_lpcr; | |
128 | /* Do not exit powersave upon decrementer as we've setup the timer | |
129 | * offload. | |
130 | */ | |
131 | new_lpcr &= ~LPCR_PECE1; | |
132 | ||
133 | mtspr(SPRN_LPCR, new_lpcr); | |
134 | ||
135 | power7_idle_type(PNV_THREAD_SLEEP); | |
136 | ||
137 | mtspr(SPRN_LPCR, old_lpcr); | |
138 | ||
139 | return index; | |
140 | } | |
141 | #endif | |
142 | ||
143 | static int stop_loop(struct cpuidle_device *dev, | |
144 | struct cpuidle_driver *drv, | |
145 | int index) | |
146 | { | |
147 | power9_idle_type(stop_psscr_table[index].val, | |
148 | stop_psscr_table[index].mask); | |
149 | return index; | |
150 | } | |
151 | ||
152 | /* | |
153 | * States for dedicated partition case. | |
154 | */ | |
155 | static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = { | |
156 | { /* Snooze */ | |
157 | .name = "snooze", | |
158 | .desc = "snooze", | |
159 | .exit_latency = 0, | |
160 | .target_residency = 0, | |
161 | .enter = snooze_loop }, | |
162 | }; | |
163 | ||
164 | static int powernv_cpuidle_cpu_online(unsigned int cpu) | |
165 | { | |
166 | struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); | |
167 | ||
168 | if (dev && cpuidle_get_driver()) { | |
169 | cpuidle_pause_and_lock(); | |
170 | cpuidle_enable_device(dev); | |
171 | cpuidle_resume_and_unlock(); | |
172 | } | |
173 | return 0; | |
174 | } | |
175 | ||
176 | static int powernv_cpuidle_cpu_dead(unsigned int cpu) | |
177 | { | |
178 | struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); | |
179 | ||
180 | if (dev && cpuidle_get_driver()) { | |
181 | cpuidle_pause_and_lock(); | |
182 | cpuidle_disable_device(dev); | |
183 | cpuidle_resume_and_unlock(); | |
184 | } | |
185 | return 0; | |
186 | } | |
187 | ||
188 | /* | |
189 | * powernv_cpuidle_driver_init() | |
190 | */ | |
191 | static int powernv_cpuidle_driver_init(void) | |
192 | { | |
193 | int idle_state; | |
194 | struct cpuidle_driver *drv = &powernv_idle_driver; | |
195 | ||
196 | drv->state_count = 0; | |
197 | ||
198 | for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { | |
199 | /* Is the state not enabled? */ | |
200 | if (cpuidle_state_table[idle_state].enter == NULL) | |
201 | continue; | |
202 | ||
203 | drv->states[drv->state_count] = /* structure copy */ | |
204 | cpuidle_state_table[idle_state]; | |
205 | ||
206 | drv->state_count += 1; | |
207 | } | |
208 | ||
209 | /* | |
210 | * On the PowerNV platform cpu_present may be less than cpu_possible in | |
211 | * cases when firmware detects the CPU, but it is not available to the | |
212 | * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at | |
213 | * run time and hence cpu_devices are not created for those CPUs by the | |
214 | * generic topology_init(). | |
215 | * | |
216 | * drv->cpumask defaults to cpu_possible_mask in | |
217 | * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where | |
218 | * cpu_devices are not created for CPUs in cpu_possible_mask that | |
219 | * cannot be hot-added later at run time. | |
220 | * | |
221 | * Trying cpuidle_register_device() on a CPU without a cpu_device is | |
222 | * incorrect, so pass a correct CPU mask to the generic cpuidle driver. | |
223 | */ | |
224 | ||
225 | drv->cpumask = (struct cpumask *)cpu_present_mask; | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | static inline void add_powernv_state(int index, const char *name, | |
231 | unsigned int flags, | |
232 | int (*idle_fn)(struct cpuidle_device *, | |
233 | struct cpuidle_driver *, | |
234 | int), | |
235 | unsigned int target_residency, | |
236 | unsigned int exit_latency, | |
237 | u64 psscr_val, u64 psscr_mask) | |
238 | { | |
239 | strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN); | |
240 | strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN); | |
241 | powernv_states[index].flags = flags; | |
242 | powernv_states[index].target_residency = target_residency; | |
243 | powernv_states[index].exit_latency = exit_latency; | |
244 | powernv_states[index].enter = idle_fn; | |
245 | /* For power8 and below psscr_* will be 0 */ | |
246 | stop_psscr_table[index].val = psscr_val; | |
247 | stop_psscr_table[index].mask = psscr_mask; | |
248 | } | |
249 | ||
250 | /* | |
251 | * Returns 0 if prop1_len == prop2_len. Else returns -1 | |
252 | */ | |
253 | static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, | |
254 | const char *prop2, int prop2_len) | |
255 | { | |
256 | if (prop1_len == prop2_len) | |
257 | return 0; | |
258 | ||
259 | pr_warn("cpuidle-powernv: array sizes don't match for %s and %s\n", | |
260 | prop1, prop2); | |
261 | return -1; | |
262 | } | |
263 | ||
264 | extern u32 pnv_get_supported_cpuidle_states(void); | |
265 | static int powernv_add_idle_states(void) | |
266 | { | |
267 | int nr_idle_states = 1; /* Snooze */ | |
268 | int dt_idle_states; | |
269 | u32 has_stop_states = 0; | |
270 | int i; | |
271 | u32 supported_flags = pnv_get_supported_cpuidle_states(); | |
272 | ||
273 | ||
274 | /* Currently we have snooze statically defined */ | |
275 | if (nr_pnv_idle_states <= 0) { | |
276 | pr_warn("cpuidle-powernv : Only Snooze is available\n"); | |
277 | goto out; | |
278 | } | |
279 | ||
280 | /* TODO: Count only states which are eligible for cpuidle */ | |
281 | dt_idle_states = nr_pnv_idle_states; | |
282 | ||
283 | /* | |
284 | * Since snooze is used as first idle state, max idle states allowed is | |
285 | * CPUIDLE_STATE_MAX -1 | |
286 | */ | |
287 | if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) { | |
288 | pr_warn("cpuidle-powernv: discovered idle states more than allowed"); | |
289 | dt_idle_states = CPUIDLE_STATE_MAX - 1; | |
290 | } | |
291 | ||
292 | /* | |
293 | * If the idle states use stop instruction, probe for psscr values | |
294 | * and psscr mask which are necessary to specify required stop level. | |
295 | */ | |
296 | has_stop_states = (pnv_idle_states[0].flags & | |
297 | (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP)); | |
298 | ||
299 | for (i = 0; i < dt_idle_states; i++) { | |
300 | unsigned int exit_latency, target_residency; | |
301 | bool stops_timebase = false; | |
302 | struct pnv_idle_states_t *state = &pnv_idle_states[i]; | |
303 | ||
304 | /* | |
305 | * Skip the platform idle state whose flag isn't in | |
306 | * the supported_cpuidle_states flag mask. | |
307 | */ | |
308 | if ((state->flags & supported_flags) != state->flags) | |
309 | continue; | |
310 | /* | |
311 | * If an idle state has exit latency beyond | |
312 | * POWERNV_THRESHOLD_LATENCY_NS then don't use it | |
313 | * in cpu-idle. | |
314 | */ | |
315 | if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS) | |
316 | continue; | |
317 | /* | |
318 | * Firmware passes residency and latency values in ns. | |
319 | * cpuidle expects it in us. | |
320 | */ | |
321 | exit_latency = DIV_ROUND_UP(state->latency_ns, 1000); | |
322 | target_residency = DIV_ROUND_UP(state->residency_ns, 1000); | |
323 | ||
324 | if (has_stop_states && !(state->valid)) | |
325 | continue; | |
326 | ||
327 | if (state->flags & OPAL_PM_TIMEBASE_STOP) | |
328 | stops_timebase = true; | |
329 | ||
330 | if (state->flags & OPAL_PM_NAP_ENABLED) { | |
331 | /* Add NAP state */ | |
332 | add_powernv_state(nr_idle_states, "Nap", | |
333 | CPUIDLE_FLAG_NONE, nap_loop, | |
334 | target_residency, exit_latency, 0, 0); | |
335 | } else if (has_stop_states && !stops_timebase) { | |
336 | add_powernv_state(nr_idle_states, state->name, | |
337 | CPUIDLE_FLAG_NONE, stop_loop, | |
338 | target_residency, exit_latency, | |
339 | state->psscr_val, | |
340 | state->psscr_mask); | |
341 | } | |
342 | ||
343 | /* | |
344 | * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come | |
345 | * within this config dependency check. | |
346 | */ | |
347 | #ifdef CONFIG_TICK_ONESHOT | |
348 | else if (state->flags & OPAL_PM_SLEEP_ENABLED || | |
349 | state->flags & OPAL_PM_SLEEP_ENABLED_ER1) { | |
350 | /* Add FASTSLEEP state */ | |
351 | add_powernv_state(nr_idle_states, "FastSleep", | |
352 | CPUIDLE_FLAG_TIMER_STOP, | |
353 | fastsleep_loop, | |
354 | target_residency, exit_latency, 0, 0); | |
355 | } else if (has_stop_states && stops_timebase) { | |
356 | add_powernv_state(nr_idle_states, state->name, | |
357 | CPUIDLE_FLAG_TIMER_STOP, stop_loop, | |
358 | target_residency, exit_latency, | |
359 | state->psscr_val, | |
360 | state->psscr_mask); | |
361 | } | |
362 | #endif | |
363 | else | |
364 | continue; | |
365 | nr_idle_states++; | |
366 | } | |
367 | out: | |
368 | return nr_idle_states; | |
369 | } | |
370 | ||
371 | /* | |
372 | * powernv_idle_probe() | |
373 | * Choose state table for shared versus dedicated partition | |
374 | */ | |
375 | static int powernv_idle_probe(void) | |
376 | { | |
377 | if (cpuidle_disable != IDLE_NO_OVERRIDE) | |
378 | return -ENODEV; | |
379 | ||
380 | if (firmware_has_feature(FW_FEATURE_OPAL)) { | |
381 | cpuidle_state_table = powernv_states; | |
382 | /* Device tree can indicate more idle states */ | |
383 | max_idle_state = powernv_add_idle_states(); | |
384 | default_snooze_timeout = TICK_USEC * tb_ticks_per_usec; | |
385 | if (max_idle_state > 1) | |
386 | snooze_timeout_en = true; | |
387 | } else | |
388 | return -ENODEV; | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
393 | static int __init powernv_processor_idle_init(void) | |
394 | { | |
395 | int retval; | |
396 | ||
397 | retval = powernv_idle_probe(); | |
398 | if (retval) | |
399 | return retval; | |
400 | ||
401 | powernv_cpuidle_driver_init(); | |
402 | retval = cpuidle_register(&powernv_idle_driver, NULL); | |
403 | if (retval) { | |
404 | printk(KERN_DEBUG "Registration of powernv driver failed.\n"); | |
405 | return retval; | |
406 | } | |
407 | ||
408 | retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | |
409 | "cpuidle/powernv:online", | |
410 | powernv_cpuidle_cpu_online, NULL); | |
411 | WARN_ON(retval < 0); | |
412 | retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, | |
413 | "cpuidle/powernv:dead", NULL, | |
414 | powernv_cpuidle_cpu_dead); | |
415 | WARN_ON(retval < 0); | |
416 | printk(KERN_DEBUG "powernv_idle_driver registered\n"); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | device_initcall(powernv_processor_idle_init); |