]>
Commit | Line | Data |
---|---|---|
11b277ea NP |
1 | /* |
2 | * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support | |
3 | * | |
4 | * Created by: Nicolas Pitre, October 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * Some portions of this file were originally written by Achin Gupta | |
8 | * Copyright: (C) 2012 ARM Limited | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | */ | |
14 | ||
33cb667a | 15 | #include <linux/delay.h> |
11b277ea NP |
16 | #include <linux/init.h> |
17 | #include <linux/io.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/of_address.h> | |
f7cd2d83 | 20 | #include <linux/of_irq.h> |
11b277ea NP |
21 | #include <linux/spinlock.h> |
22 | #include <linux/errno.h> | |
9ee2ee0f | 23 | #include <linux/irqchip/arm-gic.h> |
11b277ea NP |
24 | |
25 | #include <asm/mcpm.h> | |
26 | #include <asm/proc-fns.h> | |
27 | #include <asm/cacheflush.h> | |
28 | #include <asm/cputype.h> | |
29 | #include <asm/cp15.h> | |
30 | ||
31 | #include <linux/arm-cci.h> | |
32 | ||
33 | #include "spc.h" | |
34 | ||
35 | /* SCC conf registers */ | |
33cb667a DM |
36 | #define RESET_CTRL 0x018 |
37 | #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) | |
38 | #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) | |
39 | ||
11b277ea NP |
40 | #define A15_CONF 0x400 |
41 | #define A7_CONF 0x500 | |
42 | #define SYS_INFO 0x700 | |
43 | #define SPC_BASE 0xb00 | |
44 | ||
33cb667a DM |
45 | static void __iomem *scc; |
46 | ||
11b277ea NP |
47 | /* |
48 | * We can't use regular spinlocks. In the switcher case, it is possible | |
49 | * for an outbound CPU to call power_down() after its inbound counterpart | |
50 | * is already live using the same logical CPU number which trips lockdep | |
51 | * debugging. | |
52 | */ | |
53 | static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
54 | ||
55 | #define TC2_CLUSTERS 2 | |
56 | #define TC2_MAX_CPUS_PER_CLUSTER 3 | |
57 | ||
58 | static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; | |
59 | ||
60 | /* Keep per-cpu usage count to cope with unordered up/down requests */ | |
61 | static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS]; | |
62 | ||
63 | #define tc2_cluster_unused(cluster) \ | |
64 | (!tc2_pm_use_count[0][cluster] && \ | |
65 | !tc2_pm_use_count[1][cluster] && \ | |
66 | !tc2_pm_use_count[2][cluster]) | |
67 | ||
68 | static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster) | |
69 | { | |
70 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
71 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) | |
72 | return -EINVAL; | |
73 | ||
74 | /* | |
75 | * Since this is called with IRQs enabled, and no arch_spin_lock_irq | |
76 | * variant exists, we need to disable IRQs manually here. | |
77 | */ | |
78 | local_irq_disable(); | |
79 | arch_spin_lock(&tc2_pm_lock); | |
80 | ||
81 | if (tc2_cluster_unused(cluster)) | |
82 | ve_spc_powerdown(cluster, false); | |
83 | ||
84 | tc2_pm_use_count[cpu][cluster]++; | |
85 | if (tc2_pm_use_count[cpu][cluster] == 1) { | |
86 | ve_spc_set_resume_addr(cluster, cpu, | |
87 | virt_to_phys(mcpm_entry_point)); | |
88 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | |
89 | } else if (tc2_pm_use_count[cpu][cluster] != 2) { | |
90 | /* | |
91 | * The only possible values are: | |
92 | * 0 = CPU down | |
93 | * 1 = CPU (still) up | |
94 | * 2 = CPU requested to be up before it had a chance | |
95 | * to actually make itself down. | |
96 | * Any other value is a bug. | |
97 | */ | |
98 | BUG(); | |
99 | } | |
100 | ||
101 | arch_spin_unlock(&tc2_pm_lock); | |
102 | local_irq_enable(); | |
103 | ||
104 | return 0; | |
105 | } | |
106 | ||
e607b0f9 | 107 | static void tc2_pm_down(u64 residency) |
11b277ea NP |
108 | { |
109 | unsigned int mpidr, cpu, cluster; | |
110 | bool last_man = false, skip_wfi = false; | |
111 | ||
112 | mpidr = read_cpuid_mpidr(); | |
113 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
114 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
115 | ||
116 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
117 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | |
118 | ||
119 | __mcpm_cpu_going_down(cpu, cluster); | |
120 | ||
121 | arch_spin_lock(&tc2_pm_lock); | |
122 | BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); | |
123 | tc2_pm_use_count[cpu][cluster]--; | |
124 | if (tc2_pm_use_count[cpu][cluster] == 0) { | |
125 | ve_spc_cpu_wakeup_irq(cluster, cpu, true); | |
126 | if (tc2_cluster_unused(cluster)) { | |
127 | ve_spc_powerdown(cluster, true); | |
128 | ve_spc_global_wakeup_irq(true); | |
129 | last_man = true; | |
130 | } | |
131 | } else if (tc2_pm_use_count[cpu][cluster] == 1) { | |
132 | /* | |
133 | * A power_up request went ahead of us. | |
134 | * Even if we do not want to shut this CPU down, | |
135 | * the caller expects a certain state as if the WFI | |
136 | * was aborted. So let's continue with cache cleaning. | |
137 | */ | |
138 | skip_wfi = true; | |
139 | } else | |
140 | BUG(); | |
141 | ||
64270d82 LP |
142 | /* |
143 | * If the CPU is committed to power down, make sure | |
144 | * the power controller will be in charge of waking it | |
145 | * up upon IRQ, ie IRQ lines are cut from GIC CPU IF | |
146 | * to the CPU by disabling the GIC CPU IF to prevent wfi | |
147 | * from completing execution behind power controller back | |
148 | */ | |
149 | if (!skip_wfi) | |
150 | gic_cpu_if_down(); | |
151 | ||
11b277ea NP |
152 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { |
153 | arch_spin_unlock(&tc2_pm_lock); | |
154 | ||
155 | if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { | |
156 | /* | |
157 | * On the Cortex-A15 we need to disable | |
158 | * L2 prefetching before flushing the cache. | |
159 | */ | |
160 | asm volatile( | |
161 | "mcr p15, 1, %0, c15, c0, 3 \n\t" | |
162 | "isb \n\t" | |
163 | "dsb " | |
164 | : : "r" (0x400) ); | |
165 | } | |
166 | ||
39792c7c | 167 | v7_exit_coherency_flush(all); |
11b277ea NP |
168 | |
169 | cci_disable_port_by_cpu(mpidr); | |
170 | ||
171 | __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); | |
172 | } else { | |
173 | /* | |
174 | * If last man then undo any setup done previously. | |
175 | */ | |
176 | if (last_man) { | |
177 | ve_spc_powerdown(cluster, false); | |
178 | ve_spc_global_wakeup_irq(false); | |
179 | } | |
180 | ||
181 | arch_spin_unlock(&tc2_pm_lock); | |
182 | ||
39792c7c | 183 | v7_exit_coherency_flush(louis); |
11b277ea NP |
184 | } |
185 | ||
186 | __mcpm_cpu_down(cpu, cluster); | |
187 | ||
188 | /* Now we are prepared for power-down, do it: */ | |
189 | if (!skip_wfi) | |
190 | wfi(); | |
191 | ||
192 | /* Not dead at this point? Let our caller cope. */ | |
193 | } | |
194 | ||
e607b0f9 NP |
195 | static void tc2_pm_power_down(void) |
196 | { | |
197 | tc2_pm_down(0); | |
198 | } | |
199 | ||
33cb667a DM |
200 | static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) |
201 | { | |
202 | u32 mask = cluster ? | |
203 | RESET_A7_NCORERESET(cpu) | |
204 | : RESET_A15_NCORERESET(cpu); | |
205 | ||
206 | return !(readl_relaxed(scc + RESET_CTRL) & mask); | |
207 | } | |
208 | ||
209 | #define POLL_MSEC 10 | |
210 | #define TIMEOUT_MSEC 1000 | |
211 | ||
212 | static int tc2_pm_power_down_finish(unsigned int cpu, unsigned int cluster) | |
213 | { | |
214 | unsigned tries; | |
215 | ||
216 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
217 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | |
218 | ||
219 | for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { | |
220 | /* | |
221 | * Only examine the hardware state if the target CPU has | |
222 | * caught up at least as far as tc2_pm_down(): | |
223 | */ | |
224 | if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) { | |
225 | pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", | |
226 | __func__, cpu, cluster, | |
227 | readl_relaxed(scc + RESET_CTRL)); | |
228 | ||
229 | /* | |
230 | * We need the CPU to reach WFI, but the power | |
231 | * controller may put the cluster in reset and | |
232 | * power it off as soon as that happens, before | |
233 | * we have a chance to see STANDBYWFI. | |
234 | * | |
235 | * So we need to check for both conditions: | |
236 | */ | |
237 | if (tc2_core_in_reset(cpu, cluster) || | |
238 | ve_spc_cpu_in_wfi(cpu, cluster)) | |
239 | return 0; /* success: the CPU is halted */ | |
240 | } | |
241 | ||
242 | /* Otherwise, wait and retry: */ | |
243 | msleep(POLL_MSEC); | |
244 | } | |
245 | ||
246 | return -ETIMEDOUT; /* timeout */ | |
247 | } | |
248 | ||
e607b0f9 NP |
249 | static void tc2_pm_suspend(u64 residency) |
250 | { | |
251 | unsigned int mpidr, cpu, cluster; | |
252 | ||
253 | mpidr = read_cpuid_mpidr(); | |
254 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
255 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
256 | ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); | |
257 | tc2_pm_down(residency); | |
258 | } | |
259 | ||
11b277ea NP |
260 | static void tc2_pm_powered_up(void) |
261 | { | |
262 | unsigned int mpidr, cpu, cluster; | |
263 | unsigned long flags; | |
264 | ||
265 | mpidr = read_cpuid_mpidr(); | |
266 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
267 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
268 | ||
269 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
270 | BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); | |
271 | ||
272 | local_irq_save(flags); | |
273 | arch_spin_lock(&tc2_pm_lock); | |
274 | ||
275 | if (tc2_cluster_unused(cluster)) { | |
276 | ve_spc_powerdown(cluster, false); | |
277 | ve_spc_global_wakeup_irq(false); | |
278 | } | |
279 | ||
280 | if (!tc2_pm_use_count[cpu][cluster]) | |
281 | tc2_pm_use_count[cpu][cluster] = 1; | |
282 | ||
283 | ve_spc_cpu_wakeup_irq(cluster, cpu, false); | |
284 | ve_spc_set_resume_addr(cluster, cpu, 0); | |
285 | ||
286 | arch_spin_unlock(&tc2_pm_lock); | |
287 | local_irq_restore(flags); | |
288 | } | |
289 | ||
290 | static const struct mcpm_platform_ops tc2_pm_power_ops = { | |
33cb667a DM |
291 | .power_up = tc2_pm_power_up, |
292 | .power_down = tc2_pm_power_down, | |
293 | .power_down_finish = tc2_pm_power_down_finish, | |
294 | .suspend = tc2_pm_suspend, | |
295 | .powered_up = tc2_pm_powered_up, | |
11b277ea NP |
296 | }; |
297 | ||
298 | static bool __init tc2_pm_usage_count_init(void) | |
299 | { | |
300 | unsigned int mpidr, cpu, cluster; | |
301 | ||
302 | mpidr = read_cpuid_mpidr(); | |
303 | cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
304 | cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
305 | ||
306 | pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); | |
307 | if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { | |
308 | pr_err("%s: boot CPU is out of bound!\n", __func__); | |
309 | return false; | |
310 | } | |
311 | tc2_pm_use_count[cpu][cluster] = 1; | |
312 | return true; | |
313 | } | |
314 | ||
315 | /* | |
316 | * Enable cluster-level coherency, in preparation for turning on the MMU. | |
317 | */ | |
318 | static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) | |
319 | { | |
320 | asm volatile (" \n" | |
321 | " cmp r0, #1 \n" | |
322 | " bxne lr \n" | |
323 | " b cci_enable_port_for_self "); | |
324 | } | |
325 | ||
326 | static int __init tc2_pm_init(void) | |
327 | { | |
f7cd2d83 | 328 | int ret, irq; |
11b277ea NP |
329 | u32 a15_cluster_id, a7_cluster_id, sys_info; |
330 | struct device_node *np; | |
331 | ||
332 | /* | |
333 | * The power management-related features are hidden behind | |
334 | * SCC registers. We need to extract runtime information like | |
335 | * cluster ids and number of CPUs really available in clusters. | |
336 | */ | |
337 | np = of_find_compatible_node(NULL, NULL, | |
338 | "arm,vexpress-scc,v2p-ca15_a7"); | |
339 | scc = of_iomap(np, 0); | |
340 | if (!scc) | |
341 | return -ENODEV; | |
342 | ||
343 | a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf; | |
344 | a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf; | |
345 | if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS) | |
346 | return -EINVAL; | |
347 | ||
348 | sys_info = readl_relaxed(scc + SYS_INFO); | |
349 | tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; | |
350 | tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; | |
351 | ||
f7cd2d83 SH |
352 | irq = irq_of_parse_and_map(np, 0); |
353 | ||
11b277ea NP |
354 | /* |
355 | * A subset of the SCC registers is also used to communicate | |
356 | * with the SPC (power controller). We need to be able to | |
357 | * drive it very early in the boot process to power up | |
358 | * processors, so we initialize the SPC driver here. | |
359 | */ | |
f7cd2d83 | 360 | ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq); |
11b277ea NP |
361 | if (ret) |
362 | return ret; | |
363 | ||
364 | if (!cci_probed()) | |
365 | return -ENODEV; | |
366 | ||
367 | if (!tc2_pm_usage_count_init()) | |
368 | return -EINVAL; | |
369 | ||
370 | ret = mcpm_platform_register(&tc2_pm_power_ops); | |
371 | if (!ret) { | |
372 | mcpm_sync_init(tc2_pm_power_up_setup); | |
373 | pr_info("TC2 power management initialized\n"); | |
374 | } | |
375 | return ret; | |
376 | } | |
377 | ||
378 | early_initcall(tc2_pm_init); |