]>
Commit | Line | Data |
---|---|---|
825482ad JCD |
1 | /* |
2 | * QEMU support -- ARM Power Control specific functions. | |
3 | * | |
4 | * Copyright (c) 2016 Jean-Christophe Dubois | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include "qemu/osdep.h" | |
a9c94277 MA |
12 | #include "cpu.h" |
13 | #include "cpu-qom.h" | |
825482ad JCD |
14 | #include "internals.h" |
15 | #include "arm-powerctl.h" | |
03dd024f | 16 | #include "qemu/log.h" |
062ba099 | 17 | #include "qemu/main-loop.h" |
825482ad JCD |
18 | |
19 | #ifndef DEBUG_ARM_POWERCTL | |
20 | #define DEBUG_ARM_POWERCTL 0 | |
21 | #endif | |
22 | ||
23 | #define DPRINTF(fmt, args...) \ | |
24 | do { \ | |
25 | if (DEBUG_ARM_POWERCTL) { \ | |
26 | fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \ | |
27 | } \ | |
28 | } while (0) | |
29 | ||
30 | CPUState *arm_get_cpu_by_id(uint64_t id) | |
31 | { | |
32 | CPUState *cpu; | |
33 | ||
34 | DPRINTF("cpu %" PRId64 "\n", id); | |
35 | ||
36 | CPU_FOREACH(cpu) { | |
37 | ARMCPU *armcpu = ARM_CPU(cpu); | |
38 | ||
39 | if (armcpu->mp_affinity == id) { | |
40 | return cpu; | |
41 | } | |
42 | } | |
43 | ||
44 | qemu_log_mask(LOG_GUEST_ERROR, | |
45 | "[ARM]%s: Requesting unknown CPU %" PRId64 "\n", | |
46 | __func__, id); | |
47 | ||
48 | return NULL; | |
49 | } | |
50 | ||
062ba099 AB |
51 | struct CpuOnInfo { |
52 | uint64_t entry; | |
53 | uint64_t context_id; | |
54 | uint32_t target_el; | |
55 | bool target_aa64; | |
56 | }; | |
57 | ||
58 | ||
59 | static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, | |
60 | run_on_cpu_data data) | |
61 | { | |
62 | ARMCPU *target_cpu = ARM_CPU(target_cpu_state); | |
63 | struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr; | |
64 | ||
65 | /* Initialize the cpu we are turning on */ | |
66 | cpu_reset(target_cpu_state); | |
67 | target_cpu_state->halted = 0; | |
68 | ||
69 | if (info->target_aa64) { | |
70 | if ((info->target_el < 3) && arm_feature(&target_cpu->env, | |
71 | ARM_FEATURE_EL3)) { | |
72 | /* | |
73 | * As target mode is AArch64, we need to set lower | |
74 | * exception level (the requested level 2) to AArch64 | |
75 | */ | |
76 | target_cpu->env.cp15.scr_el3 |= SCR_RW; | |
77 | } | |
78 | ||
79 | if ((info->target_el < 2) && arm_feature(&target_cpu->env, | |
80 | ARM_FEATURE_EL2)) { | |
81 | /* | |
82 | * As target mode is AArch64, we need to set lower | |
83 | * exception level (the requested level 1) to AArch64 | |
84 | */ | |
85 | target_cpu->env.cp15.hcr_el2 |= HCR_RW; | |
86 | } | |
87 | ||
88 | target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); | |
89 | } else { | |
90 | /* We are requested to boot in AArch32 mode */ | |
91 | static const uint32_t mode_for_el[] = { 0, | |
92 | ARM_CPU_MODE_SVC, | |
93 | ARM_CPU_MODE_HYP, | |
94 | ARM_CPU_MODE_SVC }; | |
95 | ||
96 | cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, | |
97 | CPSRWriteRaw); | |
98 | } | |
99 | ||
100 | if (info->target_el == 3) { | |
101 | /* Processor is in secure mode */ | |
102 | target_cpu->env.cp15.scr_el3 &= ~SCR_NS; | |
103 | } else { | |
104 | /* Processor is not in secure mode */ | |
105 | target_cpu->env.cp15.scr_el3 |= SCR_NS; | |
86278c33 EI |
106 | |
107 | /* | |
108 | * If QEMU is providing the equivalent of EL3 firmware, then we need | |
109 | * to make sure a CPU targeting EL2 comes out of reset with a | |
110 | * functional HVC insn. | |
111 | */ | |
112 | if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) | |
113 | && info->target_el == 2) { | |
114 | target_cpu->env.cp15.scr_el3 |= SCR_HCE; | |
115 | } | |
062ba099 AB |
116 | } |
117 | ||
118 | /* We check if the started CPU is now at the correct level */ | |
119 | assert(info->target_el == arm_current_el(&target_cpu->env)); | |
120 | ||
121 | if (info->target_aa64) { | |
122 | target_cpu->env.xregs[0] = info->context_id; | |
062ba099 AB |
123 | } else { |
124 | target_cpu->env.regs[0] = info->context_id; | |
062ba099 AB |
125 | } |
126 | ||
127 | /* Start the new CPU at the requested address */ | |
128 | cpu_set_pc(target_cpu_state, info->entry); | |
129 | ||
130 | g_free(info); | |
131 | ||
132 | /* Finally set the power status */ | |
133 | assert(qemu_mutex_iothread_locked()); | |
134 | target_cpu->power_state = PSCI_ON; | |
135 | } | |
136 | ||
825482ad JCD |
137 | int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, |
138 | uint32_t target_el, bool target_aa64) | |
139 | { | |
140 | CPUState *target_cpu_state; | |
141 | ARMCPU *target_cpu; | |
062ba099 AB |
142 | struct CpuOnInfo *info; |
143 | ||
144 | assert(qemu_mutex_iothread_locked()); | |
825482ad JCD |
145 | |
146 | DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 | |
147 | "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, | |
148 | context_id); | |
149 | ||
150 | /* requested EL level need to be in the 1 to 3 range */ | |
151 | assert((target_el > 0) && (target_el < 4)); | |
152 | ||
153 | if (target_aa64 && (entry & 3)) { | |
154 | /* | |
155 | * if we are booting in AArch64 mode then "entry" needs to be 4 bytes | |
156 | * aligned. | |
157 | */ | |
158 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
159 | } | |
160 | ||
161 | /* Retrieve the cpu we are powering up */ | |
162 | target_cpu_state = arm_get_cpu_by_id(cpuid); | |
163 | if (!target_cpu_state) { | |
164 | /* The cpu was not found */ | |
165 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
166 | } | |
167 | ||
168 | target_cpu = ARM_CPU(target_cpu_state); | |
062ba099 | 169 | if (target_cpu->power_state == PSCI_ON) { |
825482ad JCD |
170 | qemu_log_mask(LOG_GUEST_ERROR, |
171 | "[ARM]%s: CPU %" PRId64 " is already on\n", | |
172 | __func__, cpuid); | |
173 | return QEMU_ARM_POWERCTL_ALREADY_ON; | |
174 | } | |
175 | ||
176 | /* | |
177 | * The newly brought CPU is requested to enter the exception level | |
178 | * "target_el" and be in the requested mode (AArch64 or AArch32). | |
179 | */ | |
180 | ||
181 | if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) || | |
182 | ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) { | |
183 | /* | |
184 | * The CPU does not support requested level | |
185 | */ | |
186 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
187 | } | |
188 | ||
189 | if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) { | |
190 | /* | |
191 | * For now we don't support booting an AArch64 CPU in AArch32 mode | |
192 | * TODO: We should add this support later | |
193 | */ | |
194 | qemu_log_mask(LOG_UNIMP, | |
195 | "[ARM]%s: Starting AArch64 CPU %" PRId64 | |
196 | " in AArch32 mode is not supported yet\n", | |
197 | __func__, cpuid); | |
198 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
199 | } | |
200 | ||
062ba099 AB |
201 | /* |
202 | * If another CPU has powered the target on we are in the state | |
203 | * ON_PENDING and additional attempts to power on the CPU should | |
204 | * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI | |
205 | * spec) | |
206 | */ | |
207 | if (target_cpu->power_state == PSCI_ON_PENDING) { | |
208 | qemu_log_mask(LOG_GUEST_ERROR, | |
209 | "[ARM]%s: CPU %" PRId64 " is already powering on\n", | |
210 | __func__, cpuid); | |
211 | return QEMU_ARM_POWERCTL_ON_PENDING; | |
825482ad JCD |
212 | } |
213 | ||
062ba099 AB |
214 | /* To avoid racing with a CPU we are just kicking off we do the |
215 | * final bit of preparation for the work in the target CPUs | |
216 | * context. | |
217 | */ | |
218 | info = g_new(struct CpuOnInfo, 1); | |
219 | info->entry = entry; | |
220 | info->context_id = context_id; | |
221 | info->target_el = target_el; | |
222 | info->target_aa64 = target_aa64; | |
825482ad | 223 | |
062ba099 AB |
224 | async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work, |
225 | RUN_ON_CPU_HOST_PTR(info)); | |
548ebcaf | 226 | |
825482ad JCD |
227 | /* We are good to go */ |
228 | return QEMU_ARM_POWERCTL_RET_SUCCESS; | |
229 | } | |
230 | ||
ea824b97 PM |
231 | static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, |
232 | run_on_cpu_data data) | |
233 | { | |
234 | ARMCPU *target_cpu = ARM_CPU(target_cpu_state); | |
235 | ||
236 | /* Initialize the cpu we are turning on */ | |
237 | cpu_reset(target_cpu_state); | |
238 | target_cpu_state->halted = 0; | |
239 | ||
240 | /* Finally set the power status */ | |
241 | assert(qemu_mutex_iothread_locked()); | |
242 | target_cpu->power_state = PSCI_ON; | |
243 | } | |
244 | ||
245 | int arm_set_cpu_on_and_reset(uint64_t cpuid) | |
246 | { | |
247 | CPUState *target_cpu_state; | |
248 | ARMCPU *target_cpu; | |
249 | ||
250 | assert(qemu_mutex_iothread_locked()); | |
251 | ||
252 | /* Retrieve the cpu we are powering up */ | |
253 | target_cpu_state = arm_get_cpu_by_id(cpuid); | |
254 | if (!target_cpu_state) { | |
255 | /* The cpu was not found */ | |
256 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
257 | } | |
258 | ||
259 | target_cpu = ARM_CPU(target_cpu_state); | |
260 | if (target_cpu->power_state == PSCI_ON) { | |
261 | qemu_log_mask(LOG_GUEST_ERROR, | |
262 | "[ARM]%s: CPU %" PRId64 " is already on\n", | |
263 | __func__, cpuid); | |
264 | return QEMU_ARM_POWERCTL_ALREADY_ON; | |
265 | } | |
266 | ||
267 | /* | |
268 | * If another CPU has powered the target on we are in the state | |
269 | * ON_PENDING and additional attempts to power on the CPU should | |
270 | * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI | |
271 | * spec) | |
272 | */ | |
273 | if (target_cpu->power_state == PSCI_ON_PENDING) { | |
274 | qemu_log_mask(LOG_GUEST_ERROR, | |
275 | "[ARM]%s: CPU %" PRId64 " is already powering on\n", | |
276 | __func__, cpuid); | |
277 | return QEMU_ARM_POWERCTL_ON_PENDING; | |
278 | } | |
279 | ||
280 | async_run_on_cpu(target_cpu_state, arm_set_cpu_on_and_reset_async_work, | |
281 | RUN_ON_CPU_NULL); | |
282 | ||
283 | /* We are good to go */ | |
284 | return QEMU_ARM_POWERCTL_RET_SUCCESS; | |
285 | } | |
286 | ||
062ba099 AB |
287 | static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, |
288 | run_on_cpu_data data) | |
289 | { | |
290 | ARMCPU *target_cpu = ARM_CPU(target_cpu_state); | |
291 | ||
292 | assert(qemu_mutex_iothread_locked()); | |
293 | target_cpu->power_state = PSCI_OFF; | |
294 | target_cpu_state->halted = 1; | |
295 | target_cpu_state->exception_index = EXCP_HLT; | |
296 | } | |
297 | ||
825482ad JCD |
298 | int arm_set_cpu_off(uint64_t cpuid) |
299 | { | |
300 | CPUState *target_cpu_state; | |
301 | ARMCPU *target_cpu; | |
302 | ||
062ba099 AB |
303 | assert(qemu_mutex_iothread_locked()); |
304 | ||
825482ad JCD |
305 | DPRINTF("cpu %" PRId64 "\n", cpuid); |
306 | ||
307 | /* change to the cpu we are powering up */ | |
308 | target_cpu_state = arm_get_cpu_by_id(cpuid); | |
309 | if (!target_cpu_state) { | |
310 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
311 | } | |
312 | target_cpu = ARM_CPU(target_cpu_state); | |
062ba099 | 313 | if (target_cpu->power_state == PSCI_OFF) { |
825482ad JCD |
314 | qemu_log_mask(LOG_GUEST_ERROR, |
315 | "[ARM]%s: CPU %" PRId64 " is already off\n", | |
316 | __func__, cpuid); | |
317 | return QEMU_ARM_POWERCTL_IS_OFF; | |
318 | } | |
319 | ||
062ba099 AB |
320 | /* Queue work to run under the target vCPUs context */ |
321 | async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work, | |
322 | RUN_ON_CPU_NULL); | |
825482ad JCD |
323 | |
324 | return QEMU_ARM_POWERCTL_RET_SUCCESS; | |
325 | } | |
326 | ||
062ba099 AB |
327 | static void arm_reset_cpu_async_work(CPUState *target_cpu_state, |
328 | run_on_cpu_data data) | |
329 | { | |
330 | /* Reset the cpu */ | |
331 | cpu_reset(target_cpu_state); | |
332 | } | |
333 | ||
825482ad JCD |
334 | int arm_reset_cpu(uint64_t cpuid) |
335 | { | |
336 | CPUState *target_cpu_state; | |
337 | ARMCPU *target_cpu; | |
338 | ||
062ba099 AB |
339 | assert(qemu_mutex_iothread_locked()); |
340 | ||
825482ad JCD |
341 | DPRINTF("cpu %" PRId64 "\n", cpuid); |
342 | ||
343 | /* change to the cpu we are resetting */ | |
344 | target_cpu_state = arm_get_cpu_by_id(cpuid); | |
345 | if (!target_cpu_state) { | |
346 | return QEMU_ARM_POWERCTL_INVALID_PARAM; | |
347 | } | |
348 | target_cpu = ARM_CPU(target_cpu_state); | |
062ba099 AB |
349 | |
350 | if (target_cpu->power_state == PSCI_OFF) { | |
825482ad JCD |
351 | qemu_log_mask(LOG_GUEST_ERROR, |
352 | "[ARM]%s: CPU %" PRId64 " is off\n", | |
353 | __func__, cpuid); | |
354 | return QEMU_ARM_POWERCTL_IS_OFF; | |
355 | } | |
356 | ||
062ba099 AB |
357 | /* Queue work to run under the target vCPUs context */ |
358 | async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work, | |
359 | RUN_ON_CPU_NULL); | |
825482ad JCD |
360 | |
361 | return QEMU_ARM_POWERCTL_RET_SUCCESS; | |
362 | } |