]>
Commit | Line | Data |
---|---|---|
26861c7c MH |
1 | /* |
2 | * ARM implementation of KVM hooks, 64 bit specific code | |
3 | * | |
4 | * Copyright Mian-M. Hamayun 2013, Virtual Open Systems | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <stdio.h> | |
12 | #include <sys/types.h> | |
13 | #include <sys/ioctl.h> | |
14 | #include <sys/mman.h> | |
15 | ||
16 | #include <linux/kvm.h> | |
17 | ||
0e4b5869 | 18 | #include "config-host.h" |
26861c7c MH |
19 | #include "qemu-common.h" |
20 | #include "qemu/timer.h" | |
2ecb2027 | 21 | #include "qemu/error-report.h" |
26861c7c MH |
22 | #include "sysemu/sysemu.h" |
23 | #include "sysemu/kvm.h" | |
24 | #include "kvm_arm.h" | |
25 | #include "cpu.h" | |
9208b961 | 26 | #include "internals.h" |
26861c7c MH |
27 | #include "hw/arm/arm.h" |
28 | ||
29eb3d9a AB |
29 | static bool have_guest_debug; |
30 | ||
31 | /** | |
32 | * kvm_arm_init_debug() | |
33 | * @cs: CPUState | |
34 | * | |
35 | * Check for guest debug capabilities. | |
36 | * | |
37 | */ | |
38 | static void kvm_arm_init_debug(CPUState *cs) | |
39 | { | |
40 | have_guest_debug = kvm_check_extension(cs->kvm_state, | |
41 | KVM_CAP_SET_GUEST_DEBUG); | |
42 | return; | |
43 | } | |
44 | ||
26861c7c MH |
45 | static inline void set_feature(uint64_t *features, int feature) |
46 | { | |
47 | *features |= 1ULL << feature; | |
48 | } | |
49 | ||
50 | bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc) | |
51 | { | |
52 | /* Identify the feature bits corresponding to the host CPU, and | |
53 | * fill out the ARMHostCPUClass fields accordingly. To do this | |
54 | * we have to create a scratch VM, create a single CPU inside it, | |
55 | * and then query that CPU for the relevant ID registers. | |
56 | * For AArch64 we currently don't care about ID registers at | |
57 | * all; we just want to know the CPU type. | |
58 | */ | |
59 | int fdarray[3]; | |
60 | uint64_t features = 0; | |
61 | /* Old kernels may not know about the PREFERRED_TARGET ioctl: however | |
62 | * we know these will only support creating one kind of guest CPU, | |
63 | * which is its preferred CPU type. Fortunately these old kernels | |
64 | * support only a very limited number of CPUs. | |
65 | */ | |
66 | static const uint32_t cpus_to_try[] = { | |
67 | KVM_ARM_TARGET_AEM_V8, | |
68 | KVM_ARM_TARGET_FOUNDATION_V8, | |
69 | KVM_ARM_TARGET_CORTEX_A57, | |
70 | QEMU_KVM_ARM_TARGET_NONE | |
71 | }; | |
72 | struct kvm_vcpu_init init; | |
73 | ||
74 | if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { | |
75 | return false; | |
76 | } | |
77 | ||
78 | ahcc->target = init.target; | |
79 | ahcc->dtb_compatible = "arm,arm-v8"; | |
80 | ||
81 | kvm_arm_destroy_scratch_host_vcpu(fdarray); | |
82 | ||
83 | /* We can assume any KVM supporting CPU is at least a v8 | |
84 | * with VFPv4+Neon; this in turn implies most of the other | |
85 | * feature bits. | |
86 | */ | |
87 | set_feature(&features, ARM_FEATURE_V8); | |
88 | set_feature(&features, ARM_FEATURE_VFP4); | |
89 | set_feature(&features, ARM_FEATURE_NEON); | |
90 | set_feature(&features, ARM_FEATURE_AARCH64); | |
91 | ||
92 | ahcc->features = features; | |
93 | ||
94 | return true; | |
95 | } | |
96 | ||
eb5e1d3c PF |
97 | #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 |
98 | ||
26861c7c MH |
99 | int kvm_arch_init_vcpu(CPUState *cs) |
100 | { | |
26861c7c | 101 | int ret; |
eb5e1d3c | 102 | uint64_t mpidr; |
228d5e04 | 103 | ARMCPU *cpu = ARM_CPU(cs); |
26861c7c MH |
104 | |
105 | if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || | |
56073970 | 106 | !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { |
26861c7c MH |
107 | fprintf(stderr, "KVM is not supported for this guest CPU type\n"); |
108 | return -EINVAL; | |
109 | } | |
110 | ||
228d5e04 PS |
111 | /* Determine init features for this CPU */ |
112 | memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); | |
26861c7c | 113 | if (cpu->start_powered_off) { |
228d5e04 PS |
114 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; |
115 | } | |
7cd62e53 | 116 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { |
dd032e34 | 117 | cpu->psci_version = 2; |
7cd62e53 PS |
118 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; |
119 | } | |
56073970 GB |
120 | if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { |
121 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; | |
122 | } | |
228d5e04 PS |
123 | |
124 | /* Do KVM_ARM_VCPU_INIT ioctl */ | |
125 | ret = kvm_arm_vcpu_init(cs); | |
126 | if (ret) { | |
127 | return ret; | |
26861c7c | 128 | } |
26861c7c | 129 | |
eb5e1d3c PF |
130 | /* |
131 | * When KVM is in use, PSCI is emulated in-kernel and not by qemu. | |
132 | * Currently KVM has its own idea about MPIDR assignment, so we | |
133 | * override our defaults with what we get from KVM. | |
134 | */ | |
135 | ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); | |
136 | if (ret) { | |
137 | return ret; | |
138 | } | |
0f4a9e45 | 139 | cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; |
eb5e1d3c | 140 | |
29eb3d9a AB |
141 | kvm_arm_init_debug(cs); |
142 | ||
38df27c8 AB |
143 | return kvm_arm_init_cpreg_list(cpu); |
144 | } | |
26861c7c | 145 | |
38df27c8 AB |
146 | bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) |
147 | { | |
148 | /* Return true if the regidx is a register we should synchronize | |
149 | * via the cpreg_tuples array (ie is not a core reg we sync by | |
150 | * hand in kvm_arch_get/put_registers()) | |
151 | */ | |
152 | switch (regidx & KVM_REG_ARM_COPROC_MASK) { | |
153 | case KVM_REG_ARM_CORE: | |
154 | return false; | |
155 | default: | |
156 | return true; | |
157 | } | |
26861c7c MH |
158 | } |
159 | ||
4b7a6bf4 CD |
160 | typedef struct CPRegStateLevel { |
161 | uint64_t regidx; | |
162 | int level; | |
163 | } CPRegStateLevel; | |
164 | ||
165 | /* All system registers not listed in the following table are assumed to be | |
166 | * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less | |
167 | * often, you must add it to this table with a state of either | |
168 | * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. | |
169 | */ | |
170 | static const CPRegStateLevel non_runtime_cpregs[] = { | |
171 | { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, | |
172 | }; | |
173 | ||
174 | int kvm_arm_cpreg_level(uint64_t regidx) | |
175 | { | |
176 | int i; | |
177 | ||
178 | for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { | |
179 | const CPRegStateLevel *l = &non_runtime_cpregs[i]; | |
180 | if (l->regidx == regidx) { | |
181 | return l->level; | |
182 | } | |
183 | } | |
184 | ||
185 | return KVM_PUT_RUNTIME_STATE; | |
186 | } | |
187 | ||
26861c7c MH |
188 | #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ |
189 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
190 | ||
0e4b5869 AB |
191 | #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ |
192 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
193 | ||
194 | #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ | |
195 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
196 | ||
26861c7c MH |
197 | int kvm_arch_put_registers(CPUState *cs, int level) |
198 | { | |
199 | struct kvm_one_reg reg; | |
0e4b5869 | 200 | uint32_t fpr; |
26861c7c MH |
201 | uint64_t val; |
202 | int i; | |
203 | int ret; | |
25b9fb10 | 204 | unsigned int el; |
26861c7c MH |
205 | |
206 | ARMCPU *cpu = ARM_CPU(cs); | |
207 | CPUARMState *env = &cpu->env; | |
208 | ||
56073970 GB |
209 | /* If we are in AArch32 mode then we need to copy the AArch32 regs to the |
210 | * AArch64 registers before pushing them out to 64-bit KVM. | |
211 | */ | |
212 | if (!is_a64(env)) { | |
213 | aarch64_sync_32_to_64(env); | |
214 | } | |
215 | ||
26861c7c MH |
216 | for (i = 0; i < 31; i++) { |
217 | reg.id = AARCH64_CORE_REG(regs.regs[i]); | |
218 | reg.addr = (uintptr_t) &env->xregs[i]; | |
219 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
220 | if (ret) { | |
221 | return ret; | |
222 | } | |
223 | } | |
224 | ||
f502cfc2 PM |
225 | /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the |
226 | * QEMU side we keep the current SP in xregs[31] as well. | |
227 | */ | |
9208b961 | 228 | aarch64_save_sp(env, 1); |
f502cfc2 | 229 | |
26861c7c | 230 | reg.id = AARCH64_CORE_REG(regs.sp); |
f502cfc2 PM |
231 | reg.addr = (uintptr_t) &env->sp_el[0]; |
232 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
233 | if (ret) { | |
234 | return ret; | |
235 | } | |
236 | ||
237 | reg.id = AARCH64_CORE_REG(sp_el1); | |
238 | reg.addr = (uintptr_t) &env->sp_el[1]; | |
26861c7c MH |
239 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
240 | if (ret) { | |
241 | return ret; | |
242 | } | |
243 | ||
244 | /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ | |
56073970 GB |
245 | if (is_a64(env)) { |
246 | val = pstate_read(env); | |
247 | } else { | |
248 | val = cpsr_read(env); | |
249 | } | |
26861c7c MH |
250 | reg.id = AARCH64_CORE_REG(regs.pstate); |
251 | reg.addr = (uintptr_t) &val; | |
252 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
253 | if (ret) { | |
254 | return ret; | |
255 | } | |
256 | ||
257 | reg.id = AARCH64_CORE_REG(regs.pc); | |
258 | reg.addr = (uintptr_t) &env->pc; | |
259 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
260 | if (ret) { | |
261 | return ret; | |
262 | } | |
263 | ||
a0618a19 | 264 | reg.id = AARCH64_CORE_REG(elr_el1); |
6947f059 | 265 | reg.addr = (uintptr_t) &env->elr_el[1]; |
a0618a19 PM |
266 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
267 | if (ret) { | |
268 | return ret; | |
269 | } | |
270 | ||
25b9fb10 AB |
271 | /* Saved Program State Registers |
272 | * | |
273 | * Before we restore from the banked_spsr[] array we need to | |
274 | * ensure that any modifications to env->spsr are correctly | |
275 | * reflected in the banks. | |
276 | */ | |
277 | el = arm_current_el(env); | |
278 | if (el > 0 && !is_a64(env)) { | |
279 | i = bank_number(env->uncached_cpsr & CPSR_M); | |
280 | env->banked_spsr[i] = env->spsr; | |
281 | } | |
282 | ||
283 | /* KVM 0-4 map to QEMU banks 1-5 */ | |
a65f1de9 PM |
284 | for (i = 0; i < KVM_NR_SPSR; i++) { |
285 | reg.id = AARCH64_CORE_REG(spsr[i]); | |
25b9fb10 | 286 | reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; |
a65f1de9 PM |
287 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
288 | if (ret) { | |
289 | return ret; | |
290 | } | |
291 | } | |
292 | ||
0e4b5869 AB |
293 | /* Advanced SIMD and FP registers |
294 | * We map Qn = regs[2n+1]:regs[2n] | |
295 | */ | |
296 | for (i = 0; i < 32; i++) { | |
297 | int rd = i << 1; | |
298 | uint64_t fp_val[2]; | |
299 | #ifdef HOST_WORDS_BIGENDIAN | |
300 | fp_val[0] = env->vfp.regs[rd + 1]; | |
301 | fp_val[1] = env->vfp.regs[rd]; | |
302 | #else | |
303 | fp_val[1] = env->vfp.regs[rd + 1]; | |
304 | fp_val[0] = env->vfp.regs[rd]; | |
305 | #endif | |
306 | reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); | |
307 | reg.addr = (uintptr_t)(&fp_val); | |
308 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
309 | if (ret) { | |
310 | return ret; | |
311 | } | |
312 | } | |
313 | ||
314 | reg.addr = (uintptr_t)(&fpr); | |
315 | fpr = vfp_get_fpsr(env); | |
316 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); | |
317 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
318 | if (ret) { | |
319 | return ret; | |
320 | } | |
321 | ||
322 | fpr = vfp_get_fpcr(env); | |
323 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); | |
324 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
325 | if (ret) { | |
326 | return ret; | |
327 | } | |
328 | ||
4b7a6bf4 | 329 | if (!write_list_to_kvmstate(cpu, level)) { |
568bab1f PS |
330 | return EINVAL; |
331 | } | |
332 | ||
1a1753f7 AB |
333 | kvm_arm_sync_mpstate_to_kvm(cpu); |
334 | ||
26861c7c MH |
335 | return ret; |
336 | } | |
337 | ||
338 | int kvm_arch_get_registers(CPUState *cs) | |
339 | { | |
340 | struct kvm_one_reg reg; | |
341 | uint64_t val; | |
0e4b5869 | 342 | uint32_t fpr; |
25b9fb10 | 343 | unsigned int el; |
26861c7c MH |
344 | int i; |
345 | int ret; | |
346 | ||
347 | ARMCPU *cpu = ARM_CPU(cs); | |
348 | CPUARMState *env = &cpu->env; | |
349 | ||
350 | for (i = 0; i < 31; i++) { | |
351 | reg.id = AARCH64_CORE_REG(regs.regs[i]); | |
352 | reg.addr = (uintptr_t) &env->xregs[i]; | |
353 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
354 | if (ret) { | |
355 | return ret; | |
356 | } | |
357 | } | |
358 | ||
359 | reg.id = AARCH64_CORE_REG(regs.sp); | |
f502cfc2 PM |
360 | reg.addr = (uintptr_t) &env->sp_el[0]; |
361 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
362 | if (ret) { | |
363 | return ret; | |
364 | } | |
365 | ||
366 | reg.id = AARCH64_CORE_REG(sp_el1); | |
367 | reg.addr = (uintptr_t) &env->sp_el[1]; | |
26861c7c MH |
368 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
369 | if (ret) { | |
370 | return ret; | |
371 | } | |
372 | ||
373 | reg.id = AARCH64_CORE_REG(regs.pstate); | |
374 | reg.addr = (uintptr_t) &val; | |
375 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
376 | if (ret) { | |
377 | return ret; | |
378 | } | |
56073970 GB |
379 | |
380 | env->aarch64 = ((val & PSTATE_nRW) == 0); | |
381 | if (is_a64(env)) { | |
382 | pstate_write(env, val); | |
383 | } else { | |
384 | env->uncached_cpsr = val & CPSR_M; | |
385 | cpsr_write(env, val, 0xffffffff); | |
386 | } | |
26861c7c | 387 | |
f502cfc2 PM |
388 | /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the |
389 | * QEMU side we keep the current SP in xregs[31] as well. | |
390 | */ | |
9208b961 | 391 | aarch64_restore_sp(env, 1); |
f502cfc2 | 392 | |
26861c7c MH |
393 | reg.id = AARCH64_CORE_REG(regs.pc); |
394 | reg.addr = (uintptr_t) &env->pc; | |
395 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
396 | if (ret) { | |
397 | return ret; | |
398 | } | |
399 | ||
56073970 GB |
400 | /* If we are in AArch32 mode then we need to sync the AArch32 regs with the |
401 | * incoming AArch64 regs received from 64-bit KVM. | |
402 | * We must perform this after all of the registers have been acquired from | |
403 | * the kernel. | |
404 | */ | |
405 | if (!is_a64(env)) { | |
406 | aarch64_sync_64_to_32(env); | |
407 | } | |
408 | ||
a0618a19 | 409 | reg.id = AARCH64_CORE_REG(elr_el1); |
6947f059 | 410 | reg.addr = (uintptr_t) &env->elr_el[1]; |
a0618a19 PM |
411 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
412 | if (ret) { | |
413 | return ret; | |
414 | } | |
415 | ||
25b9fb10 AB |
416 | /* Fetch the SPSR registers |
417 | * | |
418 | * KVM SPSRs 0-4 map to QEMU banks 1-5 | |
419 | */ | |
a65f1de9 PM |
420 | for (i = 0; i < KVM_NR_SPSR; i++) { |
421 | reg.id = AARCH64_CORE_REG(spsr[i]); | |
25b9fb10 | 422 | reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; |
a65f1de9 PM |
423 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
424 | if (ret) { | |
425 | return ret; | |
426 | } | |
427 | } | |
428 | ||
25b9fb10 AB |
429 | el = arm_current_el(env); |
430 | if (el > 0 && !is_a64(env)) { | |
431 | i = bank_number(env->uncached_cpsr & CPSR_M); | |
432 | env->spsr = env->banked_spsr[i]; | |
433 | } | |
434 | ||
0e4b5869 AB |
435 | /* Advanced SIMD and FP registers |
436 | * We map Qn = regs[2n+1]:regs[2n] | |
437 | */ | |
438 | for (i = 0; i < 32; i++) { | |
439 | uint64_t fp_val[2]; | |
440 | reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); | |
441 | reg.addr = (uintptr_t)(&fp_val); | |
442 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
443 | if (ret) { | |
444 | return ret; | |
445 | } else { | |
446 | int rd = i << 1; | |
447 | #ifdef HOST_WORDS_BIGENDIAN | |
448 | env->vfp.regs[rd + 1] = fp_val[0]; | |
449 | env->vfp.regs[rd] = fp_val[1]; | |
450 | #else | |
451 | env->vfp.regs[rd + 1] = fp_val[1]; | |
452 | env->vfp.regs[rd] = fp_val[0]; | |
453 | #endif | |
454 | } | |
455 | } | |
456 | ||
457 | reg.addr = (uintptr_t)(&fpr); | |
458 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); | |
459 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
460 | if (ret) { | |
461 | return ret; | |
462 | } | |
463 | vfp_set_fpsr(env, fpr); | |
464 | ||
465 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); | |
466 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
467 | if (ret) { | |
468 | return ret; | |
469 | } | |
470 | vfp_set_fpcr(env, fpr); | |
471 | ||
568bab1f PS |
472 | if (!write_kvmstate_to_list(cpu)) { |
473 | return EINVAL; | |
474 | } | |
475 | /* Note that it's OK to have registers which aren't in CPUState, | |
476 | * so we can ignore a failure return here. | |
477 | */ | |
478 | write_list_to_cpustate(cpu); | |
479 | ||
1a1753f7 AB |
480 | kvm_arm_sync_mpstate_to_qemu(cpu); |
481 | ||
26861c7c MH |
482 | /* TODO: other registers */ |
483 | return ret; | |
484 | } | |
2ecb2027 AB |
485 | |
486 | /* C6.6.29 BRK instruction */ | |
487 | static const uint32_t brk_insn = 0xd4200000; | |
488 | ||
489 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
490 | { | |
491 | if (have_guest_debug) { | |
492 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || | |
493 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { | |
494 | return -EINVAL; | |
495 | } | |
496 | return 0; | |
497 | } else { | |
498 | error_report("guest debug not supported on this kernel"); | |
499 | return -EINVAL; | |
500 | } | |
501 | } | |
502 | ||
503 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
504 | { | |
505 | static uint32_t brk; | |
506 | ||
507 | if (have_guest_debug) { | |
508 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || | |
509 | brk != brk_insn || | |
510 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { | |
511 | return -EINVAL; | |
512 | } | |
513 | return 0; | |
514 | } else { | |
515 | error_report("guest debug not supported on this kernel"); | |
516 | return -EINVAL; | |
517 | } | |
518 | } | |
519 | ||
520 | /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register | |
521 | * | |
522 | * To minimise translating between kernel and user-space the kernel | |
523 | * ABI just provides user-space with the full exception syndrome | |
524 | * register value to be decoded in QEMU. | |
525 | */ | |
526 | ||
527 | bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) | |
528 | { | |
529 | int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT; | |
530 | ARMCPU *cpu = ARM_CPU(cs); | |
531 | CPUARMState *env = &cpu->env; | |
532 | ||
533 | /* Ensure PC is synchronised */ | |
534 | kvm_cpu_synchronize_state(cs); | |
535 | ||
536 | switch (hsr_ec) { | |
537 | case EC_AA64_BKPT: | |
538 | if (kvm_find_sw_breakpoint(cs, env->pc)) { | |
539 | return true; | |
540 | } | |
541 | break; | |
542 | default: | |
543 | error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n", | |
544 | __func__, debug_exit->hsr, env->pc); | |
545 | } | |
546 | ||
547 | /* If we don't handle this it could be it really is for the | |
548 | guest to handle */ | |
549 | qemu_log_mask(LOG_UNIMP, | |
550 | "%s: re-injecting exception not yet implemented" | |
551 | " (0x%"PRIx32", %"PRIx64")\n", | |
552 | __func__, hsr_ec, env->pc); | |
553 | ||
554 | return false; | |
555 | } |