]> Git Repo - qemu.git/blame - target/arm/kvm32.c
Merge remote-tracking branch 'remotes/kraxel/tags/usb-20181029-pull-request' into...
[qemu.git] / target / arm / kvm32.c
CommitLineData
b197ebd4
PM
1/*
2 * ARM implementation of KVM hooks, 32 bit specific code.
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
74c21bd0 11#include "qemu/osdep.h"
b197ebd4 12#include <sys/ioctl.h>
b197ebd4
PM
13
14#include <linux/kvm.h>
15
16#include "qemu-common.h"
33c11879 17#include "cpu.h"
b197ebd4
PM
18#include "qemu/timer.h"
19#include "sysemu/sysemu.h"
20#include "sysemu/kvm.h"
21#include "kvm_arm.h"
ccd38087 22#include "internals.h"
b197ebd4 23#include "hw/arm/arm.h"
03dd024f 24#include "qemu/log.h"
b197ebd4
PM
25
26static inline void set_feature(uint64_t *features, int feature)
27{
28 *features |= 1ULL << feature;
29}
30
c4487d76 31bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
b197ebd4
PM
32{
33 /* Identify the feature bits corresponding to the host CPU, and
34 * fill out the ARMHostCPUClass fields accordingly. To do this
35 * we have to create a scratch VM, create a single CPU inside it,
36 * and then query that CPU for the relevant ID registers.
37 */
38 int i, ret, fdarray[3];
a6070648 39 uint32_t midr, id_pfr0, mvfr1;
b197ebd4
PM
40 uint64_t features = 0;
41 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
42 * we know these will only support creating one kind of guest CPU,
43 * which is its preferred CPU type.
44 */
45 static const uint32_t cpus_to_try[] = {
46 QEMU_KVM_ARM_TARGET_CORTEX_A15,
47 QEMU_KVM_ARM_TARGET_NONE
48 };
49 struct kvm_vcpu_init init;
50 struct kvm_one_reg idregs[] = {
51 {
52 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 53 | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
b197ebd4
PM
54 .addr = (uintptr_t)&midr,
55 },
56 {
57 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 58 | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
b197ebd4
PM
59 .addr = (uintptr_t)&id_pfr0,
60 },
b197ebd4
PM
61 {
62 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
63 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
64 .addr = (uintptr_t)&mvfr1,
65 },
66 };
67
68 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
69 return false;
70 }
71
c4487d76 72 ahcf->target = init.target;
b197ebd4
PM
73
74 /* This is not strictly blessed by the device tree binding docs yet,
75 * but in practice the kernel does not care about this string so
76 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
77 */
c4487d76 78 ahcf->dtb_compatible = "arm,arm-v7";
b197ebd4
PM
79
80 for (i = 0; i < ARRAY_SIZE(idregs); i++) {
81 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
82 if (ret) {
83 break;
84 }
85 }
86
87 kvm_arm_destroy_scratch_host_vcpu(fdarray);
88
89 if (ret) {
90 return false;
91 }
92
93 /* Now we've retrieved all the register information we can
94 * set the feature bits based on the ID register fields.
95 * We can assume any KVM supporting CPU is at least a v7
5110e683
AL
96 * with VFPv3, virtualization extensions, and the generic
97 * timers; this in turn implies most of the other feature
98 * bits, but a few must be tested.
b197ebd4 99 */
5110e683 100 set_feature(&features, ARM_FEATURE_V7VE);
b197ebd4 101 set_feature(&features, ARM_FEATURE_VFP3);
b197ebd4
PM
102 set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
103
b197ebd4
PM
104 if (extract32(id_pfr0, 12, 4) == 1) {
105 set_feature(&features, ARM_FEATURE_THUMB2EE);
106 }
107 if (extract32(mvfr1, 20, 4) == 1) {
108 set_feature(&features, ARM_FEATURE_VFP_FP16);
109 }
110 if (extract32(mvfr1, 12, 4) == 1) {
111 set_feature(&features, ARM_FEATURE_NEON);
112 }
113 if (extract32(mvfr1, 28, 4) == 1) {
114 /* FMAC support implies VFPv4 */
115 set_feature(&features, ARM_FEATURE_VFP4);
116 }
117
c4487d76 118 ahcf->features = features;
b197ebd4
PM
119
120 return true;
121}
122
38df27c8 123bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
b197ebd4
PM
124{
125 /* Return true if the regidx is a register we should synchronize
126 * via the cpreg_tuples array (ie is not a core reg we sync by
127 * hand in kvm_arch_get/put_registers())
128 */
129 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
130 case KVM_REG_ARM_CORE:
131 case KVM_REG_ARM_VFP:
132 return false;
133 default:
134 return true;
135 }
136}
137
4b7a6bf4
CD
138typedef struct CPRegStateLevel {
139 uint64_t regidx;
140 int level;
141} CPRegStateLevel;
142
143/* All coprocessor registers not listed in the following table are assumed to
144 * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
145 * often, you must add it to this table with a state of either
146 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
147 */
148static const CPRegStateLevel non_runtime_cpregs[] = {
149 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
150};
151
152int kvm_arm_cpreg_level(uint64_t regidx)
153{
154 int i;
155
156 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
157 const CPRegStateLevel *l = &non_runtime_cpregs[i];
158 if (l->regidx == regidx) {
159 return l->level;
160 }
161 }
162
163 return KVM_PUT_RUNTIME_STATE;
164}
165
eb5e1d3c
PF
166#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
167
b197ebd4
PM
168int kvm_arch_init_vcpu(CPUState *cs)
169{
38df27c8 170 int ret;
b197ebd4 171 uint64_t v;
eb5e1d3c 172 uint32_t mpidr;
b197ebd4 173 struct kvm_one_reg r;
b197ebd4
PM
174 ARMCPU *cpu = ARM_CPU(cs);
175
176 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
177 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
178 return -EINVAL;
179 }
180
228d5e04
PS
181 /* Determine init features for this CPU */
182 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
b197ebd4 183 if (cpu->start_powered_off) {
228d5e04 184 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
b197ebd4 185 }
7cd62e53 186 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
dd032e34 187 cpu->psci_version = 2;
7cd62e53
PS
188 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
189 }
228d5e04
PS
190
191 /* Do KVM_ARM_VCPU_INIT ioctl */
192 ret = kvm_arm_vcpu_init(cs);
b197ebd4
PM
193 if (ret) {
194 return ret;
195 }
228d5e04 196
b197ebd4
PM
197 /* Query the kernel to make sure it supports 32 VFP
198 * registers: QEMU's "cortex-a15" CPU is always a
199 * VFP-D32 core. The simplest way to do this is just
200 * to attempt to read register d31.
201 */
202 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
203 r.addr = (uintptr_t)(&v);
204 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
205 if (ret == -ENOENT) {
206 return -EINVAL;
207 }
208
eb5e1d3c
PF
209 /*
210 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
211 * Currently KVM has its own idea about MPIDR assignment, so we
212 * override our defaults with what we get from KVM.
213 */
214 ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
215 if (ret) {
216 return ret;
217 }
0f4a9e45 218 cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
eb5e1d3c 219
202ccb6b
DG
220 /* Check whether userspace can specify guest syndrome value */
221 kvm_arm_init_serror_injection(cs);
222
38df27c8 223 return kvm_arm_init_cpreg_list(cpu);
b197ebd4
PM
224}
225
226typedef struct Reg {
227 uint64_t id;
228 int offset;
229} Reg;
230
231#define COREREG(KERNELNAME, QEMUFIELD) \
232 { \
233 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
234 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
235 offsetof(CPUARMState, QEMUFIELD) \
236 }
237
238#define VFPSYSREG(R) \
239 { \
240 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
241 KVM_REG_ARM_VFP_##R, \
242 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
243 }
244
a65f1de9
PM
245/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
246#define COREREG64(KERNELNAME, QEMUFIELD) \
247 { \
248 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
249 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
250 offsetoflow32(CPUARMState, QEMUFIELD) \
251 }
252
b197ebd4
PM
253static const Reg regs[] = {
254 /* R0_usr .. R14_usr */
255 COREREG(usr_regs.uregs[0], regs[0]),
256 COREREG(usr_regs.uregs[1], regs[1]),
257 COREREG(usr_regs.uregs[2], regs[2]),
258 COREREG(usr_regs.uregs[3], regs[3]),
259 COREREG(usr_regs.uregs[4], regs[4]),
260 COREREG(usr_regs.uregs[5], regs[5]),
261 COREREG(usr_regs.uregs[6], regs[6]),
262 COREREG(usr_regs.uregs[7], regs[7]),
263 COREREG(usr_regs.uregs[8], usr_regs[0]),
264 COREREG(usr_regs.uregs[9], usr_regs[1]),
265 COREREG(usr_regs.uregs[10], usr_regs[2]),
266 COREREG(usr_regs.uregs[11], usr_regs[3]),
267 COREREG(usr_regs.uregs[12], usr_regs[4]),
99a99c1f
SB
268 COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
269 COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
b197ebd4 270 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
99a99c1f
SB
271 COREREG(svc_regs[0], banked_r13[BANK_SVC]),
272 COREREG(svc_regs[1], banked_r14[BANK_SVC]),
273 COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
274 COREREG(abt_regs[0], banked_r13[BANK_ABT]),
275 COREREG(abt_regs[1], banked_r14[BANK_ABT]),
276 COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
277 COREREG(und_regs[0], banked_r13[BANK_UND]),
278 COREREG(und_regs[1], banked_r14[BANK_UND]),
279 COREREG64(und_regs[2], banked_spsr[BANK_UND]),
280 COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
281 COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
282 COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
b197ebd4
PM
283 /* R8_fiq .. R14_fiq and SPSR_fiq */
284 COREREG(fiq_regs[0], fiq_regs[0]),
285 COREREG(fiq_regs[1], fiq_regs[1]),
286 COREREG(fiq_regs[2], fiq_regs[2]),
287 COREREG(fiq_regs[3], fiq_regs[3]),
288 COREREG(fiq_regs[4], fiq_regs[4]),
99a99c1f
SB
289 COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
290 COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
291 COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
b197ebd4
PM
292 /* R15 */
293 COREREG(usr_regs.uregs[15], regs[15]),
294 /* VFP system registers */
295 VFPSYSREG(FPSID),
296 VFPSYSREG(MVFR1),
297 VFPSYSREG(MVFR0),
298 VFPSYSREG(FPEXC),
299 VFPSYSREG(FPINST),
300 VFPSYSREG(FPINST2),
301};
302
303int kvm_arch_put_registers(CPUState *cs, int level)
304{
305 ARMCPU *cpu = ARM_CPU(cs);
306 CPUARMState *env = &cpu->env;
307 struct kvm_one_reg r;
308 int mode, bn;
309 int ret, i;
310 uint32_t cpsr, fpscr;
311
312 /* Make sure the banked regs are properly set */
313 mode = env->uncached_cpsr & CPSR_M;
314 bn = bank_number(mode);
315 if (mode == ARM_CPU_MODE_FIQ) {
316 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
317 } else {
318 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
319 }
320 env->banked_r13[bn] = env->regs[13];
321 env->banked_r14[bn] = env->regs[14];
322 env->banked_spsr[bn] = env->spsr;
323
324 /* Now we can safely copy stuff down to the kernel */
325 for (i = 0; i < ARRAY_SIZE(regs); i++) {
326 r.id = regs[i].id;
327 r.addr = (uintptr_t)(env) + regs[i].offset;
328 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
329 if (ret) {
330 return ret;
331 }
332 }
333
334 /* Special cases which aren't a single CPUARMState field */
335 cpsr = cpsr_read(env);
336 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
337 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
338 r.addr = (uintptr_t)(&cpsr);
339 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
340 if (ret) {
341 return ret;
342 }
343
344 /* VFP registers */
345 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
346 for (i = 0; i < 32; i++) {
9a2b5256 347 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
b197ebd4
PM
348 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
349 if (ret) {
350 return ret;
351 }
352 r.id++;
353 }
354
355 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
356 KVM_REG_ARM_VFP_FPSCR;
357 fpscr = vfp_get_fpscr(env);
358 r.addr = (uintptr_t)&fpscr;
359 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
360 if (ret) {
361 return ret;
362 }
363
202ccb6b
DG
364 ret = kvm_put_vcpu_events(cpu);
365 if (ret) {
366 return ret;
367 }
368
b197ebd4
PM
369 /* Note that we do not call write_cpustate_to_list()
370 * here, so we are only writing the tuple list back to
371 * KVM. This is safe because nothing can change the
372 * CPUARMState cp15 fields (in particular gdb accesses cannot)
373 * and so there are no changes to sync. In fact syncing would
374 * be wrong at this point: for a constant register where TCG and
375 * KVM disagree about its value, the preceding write_list_to_cpustate()
376 * would not have had any effect on the CPUARMState value (since the
377 * register is read-only), and a write_cpustate_to_list() here would
378 * then try to write the TCG value back into KVM -- this would either
379 * fail or incorrectly change the value the guest sees.
380 *
381 * If we ever want to allow the user to modify cp15 registers via
382 * the gdb stub, we would need to be more clever here (for instance
383 * tracking the set of registers kvm_arch_get_registers() successfully
384 * managed to update the CPUARMState with, and only allowing those
385 * to be written back up into the kernel).
386 */
4b7a6bf4 387 if (!write_list_to_kvmstate(cpu, level)) {
b197ebd4
PM
388 return EINVAL;
389 }
390
1a1753f7
AB
391 kvm_arm_sync_mpstate_to_kvm(cpu);
392
b197ebd4
PM
393 return ret;
394}
395
396int kvm_arch_get_registers(CPUState *cs)
397{
398 ARMCPU *cpu = ARM_CPU(cs);
399 CPUARMState *env = &cpu->env;
400 struct kvm_one_reg r;
401 int mode, bn;
402 int ret, i;
403 uint32_t cpsr, fpscr;
404
405 for (i = 0; i < ARRAY_SIZE(regs); i++) {
406 r.id = regs[i].id;
407 r.addr = (uintptr_t)(env) + regs[i].offset;
408 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
409 if (ret) {
410 return ret;
411 }
412 }
413
414 /* Special cases which aren't a single CPUARMState field */
415 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
416 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
417 r.addr = (uintptr_t)(&cpsr);
418 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
419 if (ret) {
420 return ret;
421 }
50866ba5 422 cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
b197ebd4
PM
423
424 /* Make sure the current mode regs are properly set */
425 mode = env->uncached_cpsr & CPSR_M;
426 bn = bank_number(mode);
427 if (mode == ARM_CPU_MODE_FIQ) {
428 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
429 } else {
430 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
431 }
432 env->regs[13] = env->banked_r13[bn];
433 env->regs[14] = env->banked_r14[bn];
434 env->spsr = env->banked_spsr[bn];
435
436 /* VFP registers */
437 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
438 for (i = 0; i < 32; i++) {
9a2b5256 439 r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
b197ebd4
PM
440 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
441 if (ret) {
442 return ret;
443 }
444 r.id++;
445 }
446
447 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
448 KVM_REG_ARM_VFP_FPSCR;
449 r.addr = (uintptr_t)&fpscr;
450 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
451 if (ret) {
452 return ret;
453 }
454 vfp_set_fpscr(env, fpscr);
455
202ccb6b
DG
456 ret = kvm_get_vcpu_events(cpu);
457 if (ret) {
458 return ret;
459 }
460
b197ebd4
PM
461 if (!write_kvmstate_to_list(cpu)) {
462 return EINVAL;
463 }
464 /* Note that it's OK to have registers which aren't in CPUState,
465 * so we can ignore a failure return here.
466 */
467 write_list_to_cpustate(cpu);
468
1a1753f7
AB
469 kvm_arm_sync_mpstate_to_qemu(cpu);
470
b197ebd4
PM
471 return 0;
472}
2ecb2027
AB
473
474int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
475{
476 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
477 return -EINVAL;
478}
479
480int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
481{
482 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
483 return -EINVAL;
484}
485
486bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
487{
488 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
489 return false;
490}
e4482ab7
AB
491
492int kvm_arch_insert_hw_breakpoint(target_ulong addr,
493 target_ulong len, int type)
494{
495 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
496 return -EINVAL;
497}
498
499int kvm_arch_remove_hw_breakpoint(target_ulong addr,
500 target_ulong len, int type)
501{
502 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
503 return -EINVAL;
504}
505
506void kvm_arch_remove_all_hw_breakpoints(void)
507{
508 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
509}
510
511void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
512{
513 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
514}
515
516bool kvm_arm_hw_debug_active(CPUState *cs)
517{
518 return false;
519}
01fe6b60 520
b2bfe9f7 521void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
3f07cb2a
AJ
522{
523 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
3f07cb2a
AJ
524}
525
b2bfe9f7 526void kvm_arm_pmu_init(CPUState *cs)
01fe6b60
SZ
527{
528 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
01fe6b60 529}
This page took 0.342362 seconds and 4 git commands to generate.