]>
Commit | Line | Data |
---|---|---|
81d549e0 LP |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * PSCI CPU idle driver. | |
4 | * | |
5 | * Copyright (C) 2019 ARM Ltd. | |
6 | * Author: Lorenzo Pieralisi <[email protected]> | |
7 | */ | |
8 | ||
9 | #define pr_fmt(fmt) "CPUidle PSCI: " fmt | |
10 | ||
11 | #include <linux/cpuidle.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/cpu_pm.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/psci.h> | |
ce85aef5 | 19 | #include <linux/pm_runtime.h> |
81d549e0 LP |
20 | #include <linux/slab.h> |
21 | ||
22 | #include <asm/cpuidle.h> | |
23 | ||
8554951a | 24 | #include "cpuidle-psci.h" |
81d549e0 LP |
25 | #include "dt_idle_states.h" |
26 | ||
8554951a UH |
27 | struct psci_cpuidle_data { |
28 | u32 *psci_states; | |
29 | struct device *dev; | |
30 | }; | |
31 | ||
32 | static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); | |
a0cf3194 UH |
33 | static DEFINE_PER_CPU(u32, domain_state); |
34 | ||
35 | static inline void psci_set_domain_state(u32 state) | |
36 | { | |
37 | __this_cpu_write(domain_state, state); | |
38 | } | |
39 | ||
40 | static inline u32 psci_get_domain_state(void) | |
41 | { | |
42 | return __this_cpu_read(domain_state); | |
43 | } | |
44 | ||
45 | static inline int psci_enter_state(int idx, u32 state) | |
46 | { | |
47 | return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state); | |
48 | } | |
49 | ||
50 | static int psci_enter_domain_idle_state(struct cpuidle_device *dev, | |
51 | struct cpuidle_driver *drv, int idx) | |
52 | { | |
53 | struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); | |
54 | u32 *states = data->psci_states; | |
ce85aef5 UH |
55 | struct device *pd_dev = data->dev; |
56 | u32 state; | |
a0cf3194 UH |
57 | int ret; |
58 | ||
ce85aef5 UH |
59 | /* Do runtime PM to manage a hierarchical CPU toplogy. */ |
60 | pm_runtime_put_sync_suspend(pd_dev); | |
61 | ||
62 | state = psci_get_domain_state(); | |
a0cf3194 UH |
63 | if (!state) |
64 | state = states[idx]; | |
65 | ||
66 | ret = psci_enter_state(idx, state); | |
67 | ||
ce85aef5 UH |
68 | pm_runtime_get_sync(pd_dev); |
69 | ||
a0cf3194 UH |
70 | /* Clear the domain state to start fresh when back from idle. */ |
71 | psci_set_domain_state(0); | |
72 | return ret; | |
73 | } | |
9ffeb6d0 | 74 | |
81d549e0 LP |
75 | static int psci_enter_idle_state(struct cpuidle_device *dev, |
76 | struct cpuidle_driver *drv, int idx) | |
77 | { | |
8554951a | 78 | u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); |
9ffeb6d0 | 79 | |
a0cf3194 | 80 | return psci_enter_state(idx, state[idx]); |
81d549e0 LP |
81 | } |
82 | ||
83 | static struct cpuidle_driver psci_idle_driver __initdata = { | |
84 | .name = "psci_idle", | |
85 | .owner = THIS_MODULE, | |
86 | /* | |
87 | * PSCI idle states relies on architectural WFI to | |
88 | * be represented as state index 0. | |
89 | */ | |
90 | .states[0] = { | |
91 | .enter = psci_enter_idle_state, | |
92 | .exit_latency = 1, | |
93 | .target_residency = 1, | |
94 | .power_usage = UINT_MAX, | |
95 | .name = "WFI", | |
96 | .desc = "ARM WFI", | |
97 | } | |
98 | }; | |
99 | ||
100 | static const struct of_device_id psci_idle_state_match[] __initconst = { | |
101 | { .compatible = "arm,idle-state", | |
102 | .data = psci_enter_idle_state }, | |
103 | { }, | |
104 | }; | |
105 | ||
9ffeb6d0 LP |
106 | static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) |
107 | { | |
108 | int err = of_property_read_u32(np, "arm,psci-suspend-param", state); | |
109 | ||
110 | if (err) { | |
111 | pr_warn("%pOF missing arm,psci-suspend-param property\n", np); | |
112 | return err; | |
113 | } | |
114 | ||
115 | if (!psci_power_state_is_valid(*state)) { | |
116 | pr_warn("Invalid PSCI power state %#x\n", *state); | |
117 | return -EINVAL; | |
118 | } | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
a0cf3194 UH |
123 | static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, |
124 | struct device_node *cpu_node, | |
1595e4b0 | 125 | unsigned int state_count, int cpu) |
9ffeb6d0 | 126 | { |
1595e4b0 | 127 | int i, ret = 0; |
9ffeb6d0 LP |
128 | u32 *psci_states; |
129 | struct device_node *state_node; | |
8554951a | 130 | struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); |
9ffeb6d0 | 131 | |
1595e4b0 UH |
132 | state_count++; /* Add WFI state too */ |
133 | psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL); | |
9ffeb6d0 LP |
134 | if (!psci_states) |
135 | return -ENOMEM; | |
136 | ||
1595e4b0 | 137 | for (i = 1; i < state_count; i++) { |
f08cfbfa | 138 | state_node = of_get_cpu_state_node(cpu_node, i - 1); |
1595e4b0 UH |
139 | if (!state_node) |
140 | break; | |
141 | ||
9ffeb6d0 LP |
142 | ret = psci_dt_parse_state_node(state_node, &psci_states[i]); |
143 | of_node_put(state_node); | |
144 | ||
145 | if (ret) | |
146 | goto free_mem; | |
147 | ||
148 | pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); | |
149 | } | |
150 | ||
1595e4b0 UH |
151 | if (i != state_count) { |
152 | ret = -ENODEV; | |
153 | goto free_mem; | |
154 | } | |
155 | ||
8554951a UH |
156 | /* Currently limit the hierarchical topology to be used in OSI mode. */ |
157 | if (psci_has_osi_support()) { | |
158 | data->dev = psci_dt_attach_cpu(cpu); | |
159 | if (IS_ERR(data->dev)) { | |
160 | ret = PTR_ERR(data->dev); | |
161 | goto free_mem; | |
162 | } | |
a0cf3194 UH |
163 | |
164 | /* | |
165 | * Using the deepest state for the CPU to trigger a potential | |
166 | * selection of a shared state for the domain, assumes the | |
167 | * domain states are all deeper states. | |
168 | */ | |
169 | if (data->dev) | |
170 | drv->states[state_count - 1].enter = | |
171 | psci_enter_domain_idle_state; | |
8554951a UH |
172 | } |
173 | ||
174 | /* Idle states parsed correctly, store them in the per-cpu struct. */ | |
175 | data->psci_states = psci_states; | |
9ffeb6d0 LP |
176 | return 0; |
177 | ||
178 | free_mem: | |
179 | kfree(psci_states); | |
180 | return ret; | |
181 | } | |
182 | ||
a0cf3194 UH |
183 | static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, |
184 | unsigned int cpu, unsigned int state_count) | |
9ffeb6d0 LP |
185 | { |
186 | struct device_node *cpu_node; | |
187 | int ret; | |
188 | ||
189 | /* | |
190 | * If the PSCI cpu_suspend function hook has not been initialized | |
191 | * idle states must not be enabled, so bail out | |
192 | */ | |
193 | if (!psci_ops.cpu_suspend) | |
194 | return -EOPNOTSUPP; | |
195 | ||
196 | cpu_node = of_cpu_device_node_get(cpu); | |
197 | if (!cpu_node) | |
198 | return -ENODEV; | |
199 | ||
a0cf3194 | 200 | ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu); |
9ffeb6d0 LP |
201 | |
202 | of_node_put(cpu_node); | |
203 | ||
204 | return ret; | |
205 | } | |
206 | ||
81d549e0 LP |
207 | static int __init psci_idle_init_cpu(int cpu) |
208 | { | |
209 | struct cpuidle_driver *drv; | |
210 | struct device_node *cpu_node; | |
211 | const char *enable_method; | |
212 | int ret = 0; | |
213 | ||
214 | cpu_node = of_cpu_device_node_get(cpu); | |
215 | if (!cpu_node) | |
216 | return -ENODEV; | |
217 | ||
218 | /* | |
219 | * Check whether the enable-method for the cpu is PSCI, fail | |
220 | * if it is not. | |
221 | */ | |
222 | enable_method = of_get_property(cpu_node, "enable-method", NULL); | |
223 | if (!enable_method || (strcmp(enable_method, "psci"))) | |
224 | ret = -ENODEV; | |
225 | ||
226 | of_node_put(cpu_node); | |
227 | if (ret) | |
228 | return ret; | |
229 | ||
230 | drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL); | |
231 | if (!drv) | |
232 | return -ENOMEM; | |
233 | ||
234 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); | |
235 | ||
236 | /* | |
237 | * Initialize idle states data, starting at index 1, since | |
238 | * by default idle state 0 is the quiescent state reached | |
239 | * by the cpu by executing the wfi instruction. | |
240 | * | |
241 | * If no DT idle states are detected (ret == 0) let the driver | |
242 | * initialization fail accordingly since there is no reason to | |
243 | * initialize the idle driver if only wfi is supported, the | |
244 | * default archictectural back-end already executes wfi | |
245 | * on idle entry. | |
246 | */ | |
247 | ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); | |
248 | if (ret <= 0) { | |
249 | ret = ret ? : -ENODEV; | |
250 | goto out_kfree_drv; | |
251 | } | |
252 | ||
253 | /* | |
254 | * Initialize PSCI idle states. | |
255 | */ | |
a0cf3194 | 256 | ret = psci_cpu_init_idle(drv, cpu, ret); |
81d549e0 LP |
257 | if (ret) { |
258 | pr_err("CPU %d failed to PSCI idle\n", cpu); | |
259 | goto out_kfree_drv; | |
260 | } | |
261 | ||
262 | ret = cpuidle_register(drv, NULL); | |
263 | if (ret) | |
264 | goto out_kfree_drv; | |
265 | ||
266 | return 0; | |
267 | ||
268 | out_kfree_drv: | |
269 | kfree(drv); | |
270 | return ret; | |
271 | } | |
272 | ||
273 | /* | |
274 | * psci_idle_init - Initializes PSCI cpuidle driver | |
275 | * | |
276 | * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails | |
277 | * to register cpuidle driver then rollback to cancel all CPUs | |
278 | * registration. | |
279 | */ | |
280 | static int __init psci_idle_init(void) | |
281 | { | |
282 | int cpu, ret; | |
283 | struct cpuidle_driver *drv; | |
284 | struct cpuidle_device *dev; | |
285 | ||
286 | for_each_possible_cpu(cpu) { | |
287 | ret = psci_idle_init_cpu(cpu); | |
288 | if (ret) | |
289 | goto out_fail; | |
290 | } | |
291 | ||
292 | return 0; | |
293 | ||
294 | out_fail: | |
295 | while (--cpu >= 0) { | |
296 | dev = per_cpu(cpuidle_devices, cpu); | |
297 | drv = cpuidle_get_cpu_driver(dev); | |
298 | cpuidle_unregister(drv); | |
299 | kfree(drv); | |
300 | } | |
301 | ||
302 | return ret; | |
303 | } | |
304 | device_initcall(psci_idle_init); |