]>
Commit | Line | Data |
---|---|---|
b197ebd4 PM |
1 | /* |
2 | * ARM implementation of KVM hooks, 32 bit specific code. | |
3 | * | |
4 | * Copyright Christoffer Dall 2009-2010 | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
74c21bd0 | 11 | #include "qemu/osdep.h" |
b197ebd4 | 12 | #include <sys/ioctl.h> |
b197ebd4 PM |
13 | |
14 | #include <linux/kvm.h> | |
15 | ||
16 | #include "qemu-common.h" | |
33c11879 | 17 | #include "cpu.h" |
b197ebd4 | 18 | #include "qemu/timer.h" |
e5ac4200 | 19 | #include "sysemu/runstate.h" |
b197ebd4 PM |
20 | #include "sysemu/kvm.h" |
21 | #include "kvm_arm.h" | |
ccd38087 | 22 | #include "internals.h" |
03dd024f | 23 | #include "qemu/log.h" |
b197ebd4 PM |
24 | |
25 | static inline void set_feature(uint64_t *features, int feature) | |
26 | { | |
27 | *features |= 1ULL << feature; | |
28 | } | |
29 | ||
b653c55f RH |
30 | static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) |
31 | { | |
32 | struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; | |
33 | ||
34 | assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32); | |
35 | return ioctl(fd, KVM_GET_ONE_REG, &idreg); | |
36 | } | |
37 | ||
c4487d76 | 38 | bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) |
b197ebd4 PM |
39 | { |
40 | /* Identify the feature bits corresponding to the host CPU, and | |
41 | * fill out the ARMHostCPUClass fields accordingly. To do this | |
42 | * we have to create a scratch VM, create a single CPU inside it, | |
43 | * and then query that CPU for the relevant ID registers. | |
44 | */ | |
b653c55f | 45 | int err = 0, fdarray[3]; |
3c3efcf7 | 46 | uint32_t midr, id_pfr0; |
b197ebd4 | 47 | uint64_t features = 0; |
b653c55f | 48 | |
b197ebd4 PM |
49 | /* Old kernels may not know about the PREFERRED_TARGET ioctl: however |
50 | * we know these will only support creating one kind of guest CPU, | |
51 | * which is its preferred CPU type. | |
52 | */ | |
53 | static const uint32_t cpus_to_try[] = { | |
54 | QEMU_KVM_ARM_TARGET_CORTEX_A15, | |
55 | QEMU_KVM_ARM_TARGET_NONE | |
56 | }; | |
0cdb4020 AJ |
57 | /* |
58 | * target = -1 informs kvm_arm_create_scratch_host_vcpu() | |
59 | * to use the preferred target | |
60 | */ | |
61 | struct kvm_vcpu_init init = { .target = -1, }; | |
b197ebd4 PM |
62 | |
63 | if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { | |
64 | return false; | |
65 | } | |
66 | ||
c4487d76 | 67 | ahcf->target = init.target; |
b197ebd4 PM |
68 | |
69 | /* This is not strictly blessed by the device tree binding docs yet, | |
70 | * but in practice the kernel does not care about this string so | |
71 | * there is no point maintaining an KVM_ARM_TARGET_* -> string table. | |
72 | */ | |
c4487d76 | 73 | ahcf->dtb_compatible = "arm,arm-v7"; |
b197ebd4 | 74 | |
b653c55f RH |
75 | err |= read_sys_reg32(fdarray[2], &midr, ARM_CP15_REG32(0, 0, 0, 0)); |
76 | err |= read_sys_reg32(fdarray[2], &id_pfr0, ARM_CP15_REG32(0, 0, 1, 0)); | |
3c3efcf7 RH |
77 | |
78 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, | |
79 | ARM_CP15_REG32(0, 0, 2, 0)); | |
80 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, | |
81 | ARM_CP15_REG32(0, 0, 2, 1)); | |
82 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, | |
83 | ARM_CP15_REG32(0, 0, 2, 2)); | |
84 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, | |
85 | ARM_CP15_REG32(0, 0, 2, 3)); | |
86 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, | |
87 | ARM_CP15_REG32(0, 0, 2, 4)); | |
88 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, | |
89 | ARM_CP15_REG32(0, 0, 2, 5)); | |
90 | if (read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, | |
91 | ARM_CP15_REG32(0, 0, 2, 7))) { | |
92 | /* | |
93 | * Older kernels don't support reading ID_ISAR6. This register was | |
94 | * only introduced in ARMv8, so we can assume that it is zero on a | |
95 | * CPU that a kernel this old is running on. | |
96 | */ | |
97 | ahcf->isar.id_isar6 = 0; | |
98 | } | |
99 | ||
1548a7b2 PM |
100 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, |
101 | ARM_CP15_REG32(0, 0, 1, 2)); | |
102 | ||
3c3efcf7 RH |
103 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, |
104 | KVM_REG_ARM | KVM_REG_SIZE_U32 | | |
105 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0); | |
106 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, | |
b653c55f RH |
107 | KVM_REG_ARM | KVM_REG_SIZE_U32 | |
108 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1); | |
3c3efcf7 RH |
109 | /* |
110 | * FIXME: There is not yet a way to read MVFR2. | |
111 | * Fortunately there is not yet anything in there that affects migration. | |
112 | */ | |
b197ebd4 | 113 | |
10054016 PM |
114 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, |
115 | ARM_CP15_REG32(0, 0, 1, 4)); | |
116 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, | |
117 | ARM_CP15_REG32(0, 0, 1, 5)); | |
118 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, | |
119 | ARM_CP15_REG32(0, 0, 1, 6)); | |
120 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, | |
121 | ARM_CP15_REG32(0, 0, 1, 7)); | |
122 | if (read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, | |
123 | ARM_CP15_REG32(0, 0, 2, 6))) { | |
124 | /* | |
125 | * Older kernels don't support reading ID_MMFR4 (a new in v8 | |
126 | * register); assume it's zero. | |
127 | */ | |
128 | ahcf->isar.id_mmfr4 = 0; | |
129 | } | |
130 | ||
1548a7b2 PM |
131 | /* |
132 | * There is no way to read DBGDIDR, because currently 32-bit KVM | |
133 | * doesn't implement debug at all. Leave it at zero. | |
134 | */ | |
135 | ||
b197ebd4 PM |
136 | kvm_arm_destroy_scratch_host_vcpu(fdarray); |
137 | ||
b653c55f | 138 | if (err < 0) { |
b197ebd4 PM |
139 | return false; |
140 | } | |
141 | ||
142 | /* Now we've retrieved all the register information we can | |
143 | * set the feature bits based on the ID register fields. | |
144 | * We can assume any KVM supporting CPU is at least a v7 | |
5110e683 AL |
145 | * with VFPv3, virtualization extensions, and the generic |
146 | * timers; this in turn implies most of the other feature | |
147 | * bits, but a few must be tested. | |
b197ebd4 | 148 | */ |
5110e683 | 149 | set_feature(&features, ARM_FEATURE_V7VE); |
b197ebd4 PM |
150 | set_feature(&features, ARM_FEATURE_GENERIC_TIMER); |
151 | ||
b197ebd4 PM |
152 | if (extract32(id_pfr0, 12, 4) == 1) { |
153 | set_feature(&features, ARM_FEATURE_THUMB2EE); | |
154 | } | |
3c3efcf7 | 155 | if (extract32(ahcf->isar.mvfr1, 12, 4) == 1) { |
b197ebd4 PM |
156 | set_feature(&features, ARM_FEATURE_NEON); |
157 | } | |
b197ebd4 | 158 | |
c4487d76 | 159 | ahcf->features = features; |
b197ebd4 PM |
160 | |
161 | return true; | |
162 | } | |
163 | ||
38df27c8 | 164 | bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) |
b197ebd4 PM |
165 | { |
166 | /* Return true if the regidx is a register we should synchronize | |
167 | * via the cpreg_tuples array (ie is not a core reg we sync by | |
168 | * hand in kvm_arch_get/put_registers()) | |
169 | */ | |
170 | switch (regidx & KVM_REG_ARM_COPROC_MASK) { | |
171 | case KVM_REG_ARM_CORE: | |
172 | case KVM_REG_ARM_VFP: | |
173 | return false; | |
174 | default: | |
175 | return true; | |
176 | } | |
177 | } | |
178 | ||
4b7a6bf4 CD |
179 | typedef struct CPRegStateLevel { |
180 | uint64_t regidx; | |
181 | int level; | |
182 | } CPRegStateLevel; | |
183 | ||
184 | /* All coprocessor registers not listed in the following table are assumed to | |
185 | * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less | |
186 | * often, you must add it to this table with a state of either | |
187 | * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. | |
188 | */ | |
189 | static const CPRegStateLevel non_runtime_cpregs[] = { | |
190 | { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, | |
191 | }; | |
192 | ||
193 | int kvm_arm_cpreg_level(uint64_t regidx) | |
194 | { | |
195 | int i; | |
196 | ||
197 | for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { | |
198 | const CPRegStateLevel *l = &non_runtime_cpregs[i]; | |
199 | if (l->regidx == regidx) { | |
200 | return l->level; | |
201 | } | |
202 | } | |
203 | ||
204 | return KVM_PUT_RUNTIME_STATE; | |
205 | } | |
206 | ||
eb5e1d3c PF |
207 | #define ARM_CPU_ID_MPIDR 0, 0, 0, 5 |
208 | ||
b197ebd4 PM |
209 | int kvm_arch_init_vcpu(CPUState *cs) |
210 | { | |
38df27c8 | 211 | int ret; |
b197ebd4 | 212 | uint64_t v; |
eb5e1d3c | 213 | uint32_t mpidr; |
b197ebd4 | 214 | struct kvm_one_reg r; |
b197ebd4 PM |
215 | ARMCPU *cpu = ARM_CPU(cs); |
216 | ||
217 | if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { | |
218 | fprintf(stderr, "KVM is not supported for this guest CPU type\n"); | |
219 | return -EINVAL; | |
220 | } | |
221 | ||
e5ac4200 AJ |
222 | qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs); |
223 | ||
228d5e04 PS |
224 | /* Determine init features for this CPU */ |
225 | memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); | |
b197ebd4 | 226 | if (cpu->start_powered_off) { |
228d5e04 | 227 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; |
b197ebd4 | 228 | } |
7cd62e53 | 229 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { |
dd032e34 | 230 | cpu->psci_version = 2; |
7cd62e53 PS |
231 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; |
232 | } | |
228d5e04 PS |
233 | |
234 | /* Do KVM_ARM_VCPU_INIT ioctl */ | |
235 | ret = kvm_arm_vcpu_init(cs); | |
b197ebd4 PM |
236 | if (ret) { |
237 | return ret; | |
238 | } | |
228d5e04 | 239 | |
b197ebd4 PM |
240 | /* Query the kernel to make sure it supports 32 VFP |
241 | * registers: QEMU's "cortex-a15" CPU is always a | |
242 | * VFP-D32 core. The simplest way to do this is just | |
243 | * to attempt to read register d31. | |
244 | */ | |
245 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31; | |
246 | r.addr = (uintptr_t)(&v); | |
247 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
248 | if (ret == -ENOENT) { | |
249 | return -EINVAL; | |
250 | } | |
251 | ||
eb5e1d3c PF |
252 | /* |
253 | * When KVM is in use, PSCI is emulated in-kernel and not by qemu. | |
254 | * Currently KVM has its own idea about MPIDR assignment, so we | |
255 | * override our defaults with what we get from KVM. | |
256 | */ | |
257 | ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr); | |
258 | if (ret) { | |
259 | return ret; | |
260 | } | |
0f4a9e45 | 261 | cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK; |
eb5e1d3c | 262 | |
202ccb6b DG |
263 | /* Check whether userspace can specify guest syndrome value */ |
264 | kvm_arm_init_serror_injection(cs); | |
265 | ||
38df27c8 | 266 | return kvm_arm_init_cpreg_list(cpu); |
b197ebd4 PM |
267 | } |
268 | ||
b1115c99 LA |
269 | int kvm_arch_destroy_vcpu(CPUState *cs) |
270 | { | |
271 | return 0; | |
272 | } | |
273 | ||
b197ebd4 PM |
274 | typedef struct Reg { |
275 | uint64_t id; | |
276 | int offset; | |
277 | } Reg; | |
278 | ||
279 | #define COREREG(KERNELNAME, QEMUFIELD) \ | |
280 | { \ | |
281 | KVM_REG_ARM | KVM_REG_SIZE_U32 | \ | |
282 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ | |
283 | offsetof(CPUARMState, QEMUFIELD) \ | |
284 | } | |
285 | ||
286 | #define VFPSYSREG(R) \ | |
287 | { \ | |
288 | KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \ | |
289 | KVM_REG_ARM_VFP_##R, \ | |
290 | offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \ | |
291 | } | |
292 | ||
a65f1de9 PM |
293 | /* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */ |
294 | #define COREREG64(KERNELNAME, QEMUFIELD) \ | |
295 | { \ | |
296 | KVM_REG_ARM | KVM_REG_SIZE_U32 | \ | |
297 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ | |
298 | offsetoflow32(CPUARMState, QEMUFIELD) \ | |
299 | } | |
300 | ||
b197ebd4 PM |
301 | static const Reg regs[] = { |
302 | /* R0_usr .. R14_usr */ | |
303 | COREREG(usr_regs.uregs[0], regs[0]), | |
304 | COREREG(usr_regs.uregs[1], regs[1]), | |
305 | COREREG(usr_regs.uregs[2], regs[2]), | |
306 | COREREG(usr_regs.uregs[3], regs[3]), | |
307 | COREREG(usr_regs.uregs[4], regs[4]), | |
308 | COREREG(usr_regs.uregs[5], regs[5]), | |
309 | COREREG(usr_regs.uregs[6], regs[6]), | |
310 | COREREG(usr_regs.uregs[7], regs[7]), | |
311 | COREREG(usr_regs.uregs[8], usr_regs[0]), | |
312 | COREREG(usr_regs.uregs[9], usr_regs[1]), | |
313 | COREREG(usr_regs.uregs[10], usr_regs[2]), | |
314 | COREREG(usr_regs.uregs[11], usr_regs[3]), | |
315 | COREREG(usr_regs.uregs[12], usr_regs[4]), | |
99a99c1f SB |
316 | COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]), |
317 | COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]), | |
b197ebd4 | 318 | /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */ |
99a99c1f SB |
319 | COREREG(svc_regs[0], banked_r13[BANK_SVC]), |
320 | COREREG(svc_regs[1], banked_r14[BANK_SVC]), | |
321 | COREREG64(svc_regs[2], banked_spsr[BANK_SVC]), | |
322 | COREREG(abt_regs[0], banked_r13[BANK_ABT]), | |
323 | COREREG(abt_regs[1], banked_r14[BANK_ABT]), | |
324 | COREREG64(abt_regs[2], banked_spsr[BANK_ABT]), | |
325 | COREREG(und_regs[0], banked_r13[BANK_UND]), | |
326 | COREREG(und_regs[1], banked_r14[BANK_UND]), | |
327 | COREREG64(und_regs[2], banked_spsr[BANK_UND]), | |
328 | COREREG(irq_regs[0], banked_r13[BANK_IRQ]), | |
329 | COREREG(irq_regs[1], banked_r14[BANK_IRQ]), | |
330 | COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]), | |
b197ebd4 PM |
331 | /* R8_fiq .. R14_fiq and SPSR_fiq */ |
332 | COREREG(fiq_regs[0], fiq_regs[0]), | |
333 | COREREG(fiq_regs[1], fiq_regs[1]), | |
334 | COREREG(fiq_regs[2], fiq_regs[2]), | |
335 | COREREG(fiq_regs[3], fiq_regs[3]), | |
336 | COREREG(fiq_regs[4], fiq_regs[4]), | |
99a99c1f SB |
337 | COREREG(fiq_regs[5], banked_r13[BANK_FIQ]), |
338 | COREREG(fiq_regs[6], banked_r14[BANK_FIQ]), | |
339 | COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]), | |
b197ebd4 PM |
340 | /* R15 */ |
341 | COREREG(usr_regs.uregs[15], regs[15]), | |
342 | /* VFP system registers */ | |
343 | VFPSYSREG(FPSID), | |
344 | VFPSYSREG(MVFR1), | |
345 | VFPSYSREG(MVFR0), | |
346 | VFPSYSREG(FPEXC), | |
347 | VFPSYSREG(FPINST), | |
348 | VFPSYSREG(FPINST2), | |
349 | }; | |
350 | ||
351 | int kvm_arch_put_registers(CPUState *cs, int level) | |
352 | { | |
353 | ARMCPU *cpu = ARM_CPU(cs); | |
354 | CPUARMState *env = &cpu->env; | |
355 | struct kvm_one_reg r; | |
356 | int mode, bn; | |
357 | int ret, i; | |
358 | uint32_t cpsr, fpscr; | |
359 | ||
360 | /* Make sure the banked regs are properly set */ | |
361 | mode = env->uncached_cpsr & CPSR_M; | |
362 | bn = bank_number(mode); | |
363 | if (mode == ARM_CPU_MODE_FIQ) { | |
364 | memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
365 | } else { | |
366 | memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
367 | } | |
368 | env->banked_r13[bn] = env->regs[13]; | |
b197ebd4 | 369 | env->banked_spsr[bn] = env->spsr; |
593cfa2b | 370 | env->banked_r14[r14_bank_number(mode)] = env->regs[14]; |
b197ebd4 PM |
371 | |
372 | /* Now we can safely copy stuff down to the kernel */ | |
373 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
374 | r.id = regs[i].id; | |
375 | r.addr = (uintptr_t)(env) + regs[i].offset; | |
376 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
377 | if (ret) { | |
378 | return ret; | |
379 | } | |
380 | } | |
381 | ||
382 | /* Special cases which aren't a single CPUARMState field */ | |
383 | cpsr = cpsr_read(env); | |
384 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | | |
385 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); | |
386 | r.addr = (uintptr_t)(&cpsr); | |
387 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
388 | if (ret) { | |
389 | return ret; | |
390 | } | |
391 | ||
392 | /* VFP registers */ | |
393 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | |
394 | for (i = 0; i < 32; i++) { | |
9a2b5256 | 395 | r.addr = (uintptr_t)aa32_vfp_dreg(env, i); |
b197ebd4 PM |
396 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); |
397 | if (ret) { | |
398 | return ret; | |
399 | } | |
400 | r.id++; | |
401 | } | |
402 | ||
403 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | | |
404 | KVM_REG_ARM_VFP_FPSCR; | |
405 | fpscr = vfp_get_fpscr(env); | |
406 | r.addr = (uintptr_t)&fpscr; | |
407 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
408 | if (ret) { | |
409 | return ret; | |
410 | } | |
411 | ||
b698e4ee PM |
412 | write_cpustate_to_list(cpu, true); |
413 | ||
4b7a6bf4 | 414 | if (!write_list_to_kvmstate(cpu, level)) { |
b197ebd4 PM |
415 | return EINVAL; |
416 | } | |
417 | ||
aca53be3 BM |
418 | /* |
419 | * Setting VCPU events should be triggered after syncing the registers | |
420 | * to avoid overwriting potential changes made by KVM upon calling | |
421 | * KVM_SET_VCPU_EVENTS ioctl | |
422 | */ | |
423 | ret = kvm_put_vcpu_events(cpu); | |
424 | if (ret) { | |
425 | return ret; | |
426 | } | |
427 | ||
1a1753f7 AB |
428 | kvm_arm_sync_mpstate_to_kvm(cpu); |
429 | ||
b197ebd4 PM |
430 | return ret; |
431 | } | |
432 | ||
433 | int kvm_arch_get_registers(CPUState *cs) | |
434 | { | |
435 | ARMCPU *cpu = ARM_CPU(cs); | |
436 | CPUARMState *env = &cpu->env; | |
437 | struct kvm_one_reg r; | |
438 | int mode, bn; | |
439 | int ret, i; | |
440 | uint32_t cpsr, fpscr; | |
441 | ||
442 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
443 | r.id = regs[i].id; | |
444 | r.addr = (uintptr_t)(env) + regs[i].offset; | |
445 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
446 | if (ret) { | |
447 | return ret; | |
448 | } | |
449 | } | |
450 | ||
451 | /* Special cases which aren't a single CPUARMState field */ | |
452 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | | |
453 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); | |
454 | r.addr = (uintptr_t)(&cpsr); | |
455 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
456 | if (ret) { | |
457 | return ret; | |
458 | } | |
50866ba5 | 459 | cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw); |
b197ebd4 PM |
460 | |
461 | /* Make sure the current mode regs are properly set */ | |
462 | mode = env->uncached_cpsr & CPSR_M; | |
463 | bn = bank_number(mode); | |
464 | if (mode == ARM_CPU_MODE_FIQ) { | |
465 | memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); | |
466 | } else { | |
467 | memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); | |
468 | } | |
469 | env->regs[13] = env->banked_r13[bn]; | |
b197ebd4 | 470 | env->spsr = env->banked_spsr[bn]; |
593cfa2b | 471 | env->regs[14] = env->banked_r14[r14_bank_number(mode)]; |
b197ebd4 PM |
472 | |
473 | /* VFP registers */ | |
474 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | |
475 | for (i = 0; i < 32; i++) { | |
9a2b5256 | 476 | r.addr = (uintptr_t)aa32_vfp_dreg(env, i); |
b197ebd4 PM |
477 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); |
478 | if (ret) { | |
479 | return ret; | |
480 | } | |
481 | r.id++; | |
482 | } | |
483 | ||
484 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | | |
485 | KVM_REG_ARM_VFP_FPSCR; | |
486 | r.addr = (uintptr_t)&fpscr; | |
487 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
488 | if (ret) { | |
489 | return ret; | |
490 | } | |
491 | vfp_set_fpscr(env, fpscr); | |
492 | ||
202ccb6b DG |
493 | ret = kvm_get_vcpu_events(cpu); |
494 | if (ret) { | |
495 | return ret; | |
496 | } | |
497 | ||
b197ebd4 PM |
498 | if (!write_kvmstate_to_list(cpu)) { |
499 | return EINVAL; | |
500 | } | |
501 | /* Note that it's OK to have registers which aren't in CPUState, | |
502 | * so we can ignore a failure return here. | |
503 | */ | |
504 | write_list_to_cpustate(cpu); | |
505 | ||
1a1753f7 AB |
506 | kvm_arm_sync_mpstate_to_qemu(cpu); |
507 | ||
b197ebd4 PM |
508 | return 0; |
509 | } | |
2ecb2027 AB |
510 | |
511 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
512 | { | |
513 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); | |
514 | return -EINVAL; | |
515 | } | |
516 | ||
517 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
518 | { | |
519 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); | |
520 | return -EINVAL; | |
521 | } | |
522 | ||
523 | bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) | |
524 | { | |
525 | qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__); | |
526 | return false; | |
527 | } | |
e4482ab7 AB |
528 | |
529 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, | |
530 | target_ulong len, int type) | |
531 | { | |
532 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
533 | return -EINVAL; | |
534 | } | |
535 | ||
536 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
537 | target_ulong len, int type) | |
538 | { | |
539 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
540 | return -EINVAL; | |
541 | } | |
542 | ||
543 | void kvm_arch_remove_all_hw_breakpoints(void) | |
544 | { | |
545 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
546 | } | |
547 | ||
548 | void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) | |
549 | { | |
550 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
551 | } | |
552 | ||
553 | bool kvm_arm_hw_debug_active(CPUState *cs) | |
554 | { | |
555 | return false; | |
556 | } | |
01fe6b60 | 557 | |
b2bfe9f7 | 558 | void kvm_arm_pmu_set_irq(CPUState *cs, int irq) |
3f07cb2a AJ |
559 | { |
560 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
3f07cb2a AJ |
561 | } |
562 | ||
b2bfe9f7 | 563 | void kvm_arm_pmu_init(CPUState *cs) |
01fe6b60 SZ |
564 | { |
565 | qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); | |
01fe6b60 | 566 | } |