]>
Commit | Line | Data |
---|---|---|
26861c7c MH |
1 | /* |
2 | * ARM implementation of KVM hooks, 64 bit specific code | |
3 | * | |
4 | * Copyright Mian-M. Hamayun 2013, Virtual Open Systems | |
e4482ab7 | 5 | * Copyright Alex Bennée 2014, Linaro |
26861c7c MH |
6 | * |
7 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
8 | * See the COPYING file in the top-level directory. | |
9 | * | |
10 | */ | |
11 | ||
74c21bd0 | 12 | #include "qemu/osdep.h" |
26861c7c | 13 | #include <sys/ioctl.h> |
e4482ab7 | 14 | #include <sys/ptrace.h> |
26861c7c | 15 | |
e4482ab7 | 16 | #include <linux/elf.h> |
26861c7c MH |
17 | #include <linux/kvm.h> |
18 | ||
19 | #include "qemu-common.h" | |
33c11879 | 20 | #include "cpu.h" |
26861c7c | 21 | #include "qemu/timer.h" |
2ecb2027 | 22 | #include "qemu/error-report.h" |
e4482ab7 | 23 | #include "qemu/host-utils.h" |
db725815 | 24 | #include "qemu/main-loop.h" |
e4482ab7 | 25 | #include "exec/gdbstub.h" |
26861c7c MH |
26 | #include "sysemu/sysemu.h" |
27 | #include "sysemu/kvm.h" | |
28 | #include "kvm_arm.h" | |
9208b961 | 29 | #include "internals.h" |
26861c7c | 30 | |
29eb3d9a AB |
31 | static bool have_guest_debug; |
32 | ||
e4482ab7 AB |
33 | /* |
34 | * Although the ARM implementation of hardware assisted debugging | |
35 | * allows for different breakpoints per-core, the current GDB | |
36 | * interface treats them as a global pool of registers (which seems to | |
37 | * be the case for x86, ppc and s390). As a result we store one copy | |
38 | * of registers which is used for all active cores. | |
39 | * | |
40 | * Write access is serialised by virtue of the GDB protocol which | |
41 | * updates things. Read access (i.e. when the values are copied to the | |
42 | * vCPU) is also gated by GDB's run control. | |
43 | * | |
44 | * This is not unreasonable as most of the time debugging kernels you | |
45 | * never know which core will eventually execute your function. | |
46 | */ | |
47 | ||
48 | typedef struct { | |
49 | uint64_t bcr; | |
50 | uint64_t bvr; | |
51 | } HWBreakpoint; | |
52 | ||
53 | /* The watchpoint registers can cover more area than the requested | |
54 | * watchpoint so we need to store the additional information | |
55 | * somewhere. We also need to supply a CPUWatchpoint to the GDB stub | |
56 | * when the watchpoint is hit. | |
57 | */ | |
58 | typedef struct { | |
59 | uint64_t wcr; | |
60 | uint64_t wvr; | |
61 | CPUWatchpoint details; | |
62 | } HWWatchpoint; | |
63 | ||
64 | /* Maximum and current break/watch point counts */ | |
65 | int max_hw_bps, max_hw_wps; | |
66 | GArray *hw_breakpoints, *hw_watchpoints; | |
67 | ||
68 | #define cur_hw_wps (hw_watchpoints->len) | |
69 | #define cur_hw_bps (hw_breakpoints->len) | |
70 | #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) | |
71 | #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) | |
72 | ||
29eb3d9a | 73 | /** |
e4482ab7 | 74 | * kvm_arm_init_debug() - check for guest debug capabilities |
29eb3d9a AB |
75 | * @cs: CPUState |
76 | * | |
e4482ab7 AB |
77 | * kvm_check_extension returns the number of debug registers we have |
78 | * or 0 if we have none. | |
29eb3d9a AB |
79 | * |
80 | */ | |
81 | static void kvm_arm_init_debug(CPUState *cs) | |
82 | { | |
83 | have_guest_debug = kvm_check_extension(cs->kvm_state, | |
84 | KVM_CAP_SET_GUEST_DEBUG); | |
e4482ab7 AB |
85 | |
86 | max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS); | |
87 | hw_watchpoints = g_array_sized_new(true, true, | |
88 | sizeof(HWWatchpoint), max_hw_wps); | |
89 | ||
90 | max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS); | |
91 | hw_breakpoints = g_array_sized_new(true, true, | |
92 | sizeof(HWBreakpoint), max_hw_bps); | |
29eb3d9a AB |
93 | return; |
94 | } | |
95 | ||
e4482ab7 AB |
96 | /** |
97 | * insert_hw_breakpoint() | |
98 | * @addr: address of breakpoint | |
99 | * | |
100 | * See ARM ARM D2.9.1 for details but here we are only going to create | |
101 | * simple un-linked breakpoints (i.e. we don't chain breakpoints | |
102 | * together to match address and context or vmid). The hardware is | |
103 | * capable of fancier matching but that will require exposing that | |
104 | * fanciness to GDB's interface | |
105 | * | |
864df205 | 106 | * DBGBCR<n>_EL1, Debug Breakpoint Control Registers |
e4482ab7 AB |
107 | * |
108 | * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0 | |
109 | * +------+------+-------+-----+----+------+-----+------+-----+---+ | |
110 | * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E | | |
111 | * +------+------+-------+-----+----+------+-----+------+-----+---+ | |
112 | * | |
113 | * BT: Breakpoint type (0 = unlinked address match) | |
114 | * LBN: Linked BP number (0 = unused) | |
115 | * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12) | |
116 | * BAS: Byte Address Select (RES1 for AArch64) | |
117 | * E: Enable bit | |
864df205 AB |
118 | * |
119 | * DBGBVR<n>_EL1, Debug Breakpoint Value Registers | |
120 | * | |
121 | * 63 53 52 49 48 2 1 0 | |
122 | * +------+-----------+----------+-----+ | |
123 | * | RESS | VA[52:49] | VA[48:2] | 0 0 | | |
124 | * +------+-----------+----------+-----+ | |
125 | * | |
126 | * Depending on the addressing mode bits the top bits of the register | |
127 | * are a sign extension of the highest applicable VA bit. Some | |
128 | * versions of GDB don't do it correctly so we ensure they are correct | |
129 | * here so future PC comparisons will work properly. | |
e4482ab7 | 130 | */ |
864df205 | 131 | |
e4482ab7 AB |
132 | static int insert_hw_breakpoint(target_ulong addr) |
133 | { | |
134 | HWBreakpoint brk = { | |
135 | .bcr = 0x1, /* BCR E=1, enable */ | |
864df205 | 136 | .bvr = sextract64(addr, 0, 53) |
e4482ab7 AB |
137 | }; |
138 | ||
139 | if (cur_hw_bps >= max_hw_bps) { | |
140 | return -ENOBUFS; | |
141 | } | |
142 | ||
143 | brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */ | |
144 | brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */ | |
145 | ||
146 | g_array_append_val(hw_breakpoints, brk); | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
151 | /** | |
152 | * delete_hw_breakpoint() | |
153 | * @pc: address of breakpoint | |
154 | * | |
155 | * Delete a breakpoint and shuffle any above down | |
156 | */ | |
157 | ||
158 | static int delete_hw_breakpoint(target_ulong pc) | |
159 | { | |
160 | int i; | |
161 | for (i = 0; i < hw_breakpoints->len; i++) { | |
162 | HWBreakpoint *brk = get_hw_bp(i); | |
163 | if (brk->bvr == pc) { | |
164 | g_array_remove_index(hw_breakpoints, i); | |
165 | return 0; | |
166 | } | |
167 | } | |
168 | return -ENOENT; | |
169 | } | |
170 | ||
171 | /** | |
172 | * insert_hw_watchpoint() | |
173 | * @addr: address of watch point | |
174 | * @len: size of area | |
175 | * @type: type of watch point | |
176 | * | |
177 | * See ARM ARM D2.10. As with the breakpoints we can do some advanced | |
178 | * stuff if we want to. The watch points can be linked with the break | |
179 | * points above to make them context aware. However for simplicity | |
180 | * currently we only deal with simple read/write watch points. | |
181 | * | |
182 | * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers | |
183 | * | |
184 | * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0 | |
185 | * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | |
186 | * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E | | |
187 | * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+ | |
188 | * | |
189 | * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes)) | |
190 | * WT: 0 - unlinked, 1 - linked (not currently used) | |
191 | * LBN: Linked BP number (not currently used) | |
192 | * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11) | |
193 | * BAS: Byte Address Select | |
194 | * LSC: Load/Store control (01: load, 10: store, 11: both) | |
195 | * E: Enable | |
196 | * | |
197 | * The bottom 2 bits of the value register are masked. Therefore to | |
198 | * break on any sizes smaller than an unaligned word you need to set | |
199 | * MASK=0, BAS=bit per byte in question. For larger regions (^2) you | |
200 | * need to ensure you mask the address as required and set BAS=0xff | |
201 | */ | |
202 | ||
203 | static int insert_hw_watchpoint(target_ulong addr, | |
204 | target_ulong len, int type) | |
205 | { | |
206 | HWWatchpoint wp = { | |
207 | .wcr = 1, /* E=1, enable */ | |
208 | .wvr = addr & (~0x7ULL), | |
209 | .details = { .vaddr = addr, .len = len } | |
210 | }; | |
211 | ||
212 | if (cur_hw_wps >= max_hw_wps) { | |
213 | return -ENOBUFS; | |
214 | } | |
215 | ||
216 | /* | |
217 | * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state, | |
218 | * valid whether EL3 is implemented or not | |
219 | */ | |
220 | wp.wcr = deposit32(wp.wcr, 1, 2, 3); | |
221 | ||
222 | switch (type) { | |
223 | case GDB_WATCHPOINT_READ: | |
224 | wp.wcr = deposit32(wp.wcr, 3, 2, 1); | |
225 | wp.details.flags = BP_MEM_READ; | |
226 | break; | |
227 | case GDB_WATCHPOINT_WRITE: | |
228 | wp.wcr = deposit32(wp.wcr, 3, 2, 2); | |
229 | wp.details.flags = BP_MEM_WRITE; | |
230 | break; | |
231 | case GDB_WATCHPOINT_ACCESS: | |
232 | wp.wcr = deposit32(wp.wcr, 3, 2, 3); | |
233 | wp.details.flags = BP_MEM_ACCESS; | |
234 | break; | |
235 | default: | |
236 | g_assert_not_reached(); | |
237 | break; | |
238 | } | |
239 | if (len <= 8) { | |
240 | /* we align the address and set the bits in BAS */ | |
241 | int off = addr & 0x7; | |
242 | int bas = (1 << len) - 1; | |
243 | ||
244 | wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas); | |
245 | } else { | |
246 | /* For ranges above 8 bytes we need to be a power of 2 */ | |
247 | if (is_power_of_2(len)) { | |
248 | int bits = ctz64(len); | |
249 | ||
250 | wp.wvr &= ~((1 << bits) - 1); | |
251 | wp.wcr = deposit32(wp.wcr, 24, 4, bits); | |
252 | wp.wcr = deposit32(wp.wcr, 5, 8, 0xff); | |
253 | } else { | |
254 | return -ENOBUFS; | |
255 | } | |
256 | } | |
257 | ||
258 | g_array_append_val(hw_watchpoints, wp); | |
259 | return 0; | |
260 | } | |
261 | ||
262 | ||
263 | static bool check_watchpoint_in_range(int i, target_ulong addr) | |
264 | { | |
265 | HWWatchpoint *wp = get_hw_wp(i); | |
266 | uint64_t addr_top, addr_bottom = wp->wvr; | |
267 | int bas = extract32(wp->wcr, 5, 8); | |
268 | int mask = extract32(wp->wcr, 24, 4); | |
269 | ||
270 | if (mask) { | |
271 | addr_top = addr_bottom + (1 << mask); | |
272 | } else { | |
273 | /* BAS must be contiguous but can offset against the base | |
274 | * address in DBGWVR */ | |
275 | addr_bottom = addr_bottom + ctz32(bas); | |
276 | addr_top = addr_bottom + clo32(bas); | |
277 | } | |
278 | ||
279 | if (addr >= addr_bottom && addr <= addr_top) { | |
280 | return true; | |
281 | } | |
282 | ||
283 | return false; | |
284 | } | |
285 | ||
286 | /** | |
287 | * delete_hw_watchpoint() | |
288 | * @addr: address of breakpoint | |
289 | * | |
290 | * Delete a breakpoint and shuffle any above down | |
291 | */ | |
292 | ||
293 | static int delete_hw_watchpoint(target_ulong addr, | |
294 | target_ulong len, int type) | |
295 | { | |
296 | int i; | |
297 | for (i = 0; i < cur_hw_wps; i++) { | |
298 | if (check_watchpoint_in_range(i, addr)) { | |
299 | g_array_remove_index(hw_watchpoints, i); | |
300 | return 0; | |
301 | } | |
302 | } | |
303 | return -ENOENT; | |
304 | } | |
305 | ||
306 | ||
307 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, | |
308 | target_ulong len, int type) | |
309 | { | |
310 | switch (type) { | |
311 | case GDB_BREAKPOINT_HW: | |
312 | return insert_hw_breakpoint(addr); | |
313 | break; | |
314 | case GDB_WATCHPOINT_READ: | |
315 | case GDB_WATCHPOINT_WRITE: | |
316 | case GDB_WATCHPOINT_ACCESS: | |
317 | return insert_hw_watchpoint(addr, len, type); | |
318 | default: | |
319 | return -ENOSYS; | |
320 | } | |
321 | } | |
322 | ||
323 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
324 | target_ulong len, int type) | |
325 | { | |
326 | switch (type) { | |
327 | case GDB_BREAKPOINT_HW: | |
328 | return delete_hw_breakpoint(addr); | |
329 | break; | |
330 | case GDB_WATCHPOINT_READ: | |
331 | case GDB_WATCHPOINT_WRITE: | |
332 | case GDB_WATCHPOINT_ACCESS: | |
333 | return delete_hw_watchpoint(addr, len, type); | |
334 | default: | |
335 | return -ENOSYS; | |
336 | } | |
337 | } | |
338 | ||
339 | ||
340 | void kvm_arch_remove_all_hw_breakpoints(void) | |
341 | { | |
342 | if (cur_hw_wps > 0) { | |
343 | g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); | |
344 | } | |
345 | if (cur_hw_bps > 0) { | |
346 | g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); | |
347 | } | |
348 | } | |
349 | ||
350 | void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) | |
351 | { | |
352 | int i; | |
353 | memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); | |
354 | ||
355 | for (i = 0; i < max_hw_wps; i++) { | |
356 | HWWatchpoint *wp = get_hw_wp(i); | |
357 | ptr->dbg_wcr[i] = wp->wcr; | |
358 | ptr->dbg_wvr[i] = wp->wvr; | |
359 | } | |
360 | for (i = 0; i < max_hw_bps; i++) { | |
361 | HWBreakpoint *bp = get_hw_bp(i); | |
362 | ptr->dbg_bcr[i] = bp->bcr; | |
363 | ptr->dbg_bvr[i] = bp->bvr; | |
364 | } | |
365 | } | |
366 | ||
367 | bool kvm_arm_hw_debug_active(CPUState *cs) | |
368 | { | |
369 | return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); | |
370 | } | |
371 | ||
372 | static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc) | |
373 | { | |
374 | int i; | |
375 | ||
376 | for (i = 0; i < cur_hw_bps; i++) { | |
377 | HWBreakpoint *bp = get_hw_bp(i); | |
378 | if (bp->bvr == pc) { | |
379 | return true; | |
380 | } | |
381 | } | |
382 | return false; | |
383 | } | |
384 | ||
385 | static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr) | |
386 | { | |
387 | int i; | |
388 | ||
389 | for (i = 0; i < cur_hw_wps; i++) { | |
390 | if (check_watchpoint_in_range(i, addr)) { | |
391 | return &get_hw_wp(i)->details; | |
392 | } | |
393 | } | |
394 | return NULL; | |
395 | } | |
396 | ||
3f07cb2a | 397 | static bool kvm_arm_pmu_set_attr(CPUState *cs, struct kvm_device_attr *attr) |
01fe6b60 SZ |
398 | { |
399 | int err; | |
400 | ||
3f07cb2a AJ |
401 | err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); |
402 | if (err != 0) { | |
b2bfe9f7 | 403 | error_report("PMU: KVM_HAS_DEVICE_ATTR: %s", strerror(-err)); |
3f07cb2a | 404 | return false; |
01fe6b60 SZ |
405 | } |
406 | ||
3f07cb2a | 407 | err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); |
b2bfe9f7 AJ |
408 | if (err != 0) { |
409 | error_report("PMU: KVM_SET_DEVICE_ATTR: %s", strerror(-err)); | |
410 | return false; | |
01fe6b60 SZ |
411 | } |
412 | ||
3f07cb2a AJ |
413 | return true; |
414 | } | |
01fe6b60 | 415 | |
b2bfe9f7 | 416 | void kvm_arm_pmu_init(CPUState *cs) |
3f07cb2a AJ |
417 | { |
418 | struct kvm_device_attr attr = { | |
419 | .group = KVM_ARM_VCPU_PMU_V3_CTRL, | |
420 | .attr = KVM_ARM_VCPU_PMU_V3_INIT, | |
421 | }; | |
422 | ||
b2bfe9f7 AJ |
423 | if (!ARM_CPU(cs)->has_pmu) { |
424 | return; | |
425 | } | |
426 | if (!kvm_arm_pmu_set_attr(cs, &attr)) { | |
427 | error_report("failed to init PMU"); | |
428 | abort(); | |
429 | } | |
3f07cb2a AJ |
430 | } |
431 | ||
b2bfe9f7 | 432 | void kvm_arm_pmu_set_irq(CPUState *cs, int irq) |
3f07cb2a AJ |
433 | { |
434 | struct kvm_device_attr attr = { | |
435 | .group = KVM_ARM_VCPU_PMU_V3_CTRL, | |
436 | .addr = (intptr_t)&irq, | |
437 | .attr = KVM_ARM_VCPU_PMU_V3_IRQ, | |
438 | }; | |
01fe6b60 | 439 | |
b2bfe9f7 AJ |
440 | if (!ARM_CPU(cs)->has_pmu) { |
441 | return; | |
442 | } | |
443 | if (!kvm_arm_pmu_set_attr(cs, &attr)) { | |
444 | error_report("failed to set irq for PMU"); | |
445 | abort(); | |
446 | } | |
01fe6b60 | 447 | } |
e4482ab7 | 448 | |
26861c7c MH |
449 | static inline void set_feature(uint64_t *features, int feature) |
450 | { | |
451 | *features |= 1ULL << feature; | |
452 | } | |
453 | ||
929e754d WH |
454 | static inline void unset_feature(uint64_t *features, int feature) |
455 | { | |
456 | *features &= ~(1ULL << feature); | |
457 | } | |
458 | ||
9d60dea9 RH |
459 | static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) |
460 | { | |
461 | uint64_t ret; | |
462 | struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; | |
463 | int err; | |
464 | ||
465 | assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); | |
466 | err = ioctl(fd, KVM_GET_ONE_REG, &idreg); | |
467 | if (err < 0) { | |
468 | return -1; | |
469 | } | |
470 | *pret = ret; | |
471 | return 0; | |
472 | } | |
473 | ||
474 | static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) | |
475 | { | |
476 | struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; | |
477 | ||
478 | assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); | |
479 | return ioctl(fd, KVM_GET_ONE_REG, &idreg); | |
480 | } | |
481 | ||
c4487d76 | 482 | bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) |
26861c7c MH |
483 | { |
484 | /* Identify the feature bits corresponding to the host CPU, and | |
485 | * fill out the ARMHostCPUClass fields accordingly. To do this | |
486 | * we have to create a scratch VM, create a single CPU inside it, | |
487 | * and then query that CPU for the relevant ID registers. | |
26861c7c MH |
488 | */ |
489 | int fdarray[3]; | |
490 | uint64_t features = 0; | |
9d60dea9 RH |
491 | int err; |
492 | ||
26861c7c MH |
493 | /* Old kernels may not know about the PREFERRED_TARGET ioctl: however |
494 | * we know these will only support creating one kind of guest CPU, | |
495 | * which is its preferred CPU type. Fortunately these old kernels | |
496 | * support only a very limited number of CPUs. | |
497 | */ | |
498 | static const uint32_t cpus_to_try[] = { | |
499 | KVM_ARM_TARGET_AEM_V8, | |
500 | KVM_ARM_TARGET_FOUNDATION_V8, | |
501 | KVM_ARM_TARGET_CORTEX_A57, | |
502 | QEMU_KVM_ARM_TARGET_NONE | |
503 | }; | |
504 | struct kvm_vcpu_init init; | |
505 | ||
506 | if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { | |
507 | return false; | |
508 | } | |
509 | ||
c4487d76 PM |
510 | ahcf->target = init.target; |
511 | ahcf->dtb_compatible = "arm,arm-v8"; | |
26861c7c | 512 | |
9d60dea9 RH |
513 | err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, |
514 | ARM64_SYS_REG(3, 0, 0, 4, 0)); | |
515 | if (unlikely(err < 0)) { | |
516 | /* | |
517 | * Before v4.15, the kernel only exposed a limited number of system | |
518 | * registers, not including any of the interesting AArch64 ID regs. | |
519 | * For the most part we could leave these fields as zero with minimal | |
520 | * effect, since this does not affect the values seen by the guest. | |
521 | * | |
522 | * However, it could cause problems down the line for QEMU, | |
523 | * so provide a minimal v8.0 default. | |
524 | * | |
525 | * ??? Could read MIDR and use knowledge from cpu64.c. | |
526 | * ??? Could map a page of memory into our temp guest and | |
527 | * run the tiniest of hand-crafted kernels to extract | |
528 | * the values seen by the guest. | |
529 | * ??? Either of these sounds like too much effort just | |
530 | * to work around running a modern host kernel. | |
531 | */ | |
532 | ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ | |
533 | err = 0; | |
534 | } else { | |
535 | err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, | |
536 | ARM64_SYS_REG(3, 0, 0, 4, 1)); | |
537 | err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, | |
538 | ARM64_SYS_REG(3, 0, 0, 6, 0)); | |
539 | err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, | |
540 | ARM64_SYS_REG(3, 0, 0, 6, 1)); | |
3dc91ddb PM |
541 | err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, |
542 | ARM64_SYS_REG(3, 0, 0, 7, 0)); | |
543 | err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, | |
544 | ARM64_SYS_REG(3, 0, 0, 7, 1)); | |
9d60dea9 RH |
545 | |
546 | /* | |
547 | * Note that if AArch32 support is not present in the host, | |
548 | * the AArch32 sysregs are present to be read, but will | |
549 | * return UNKNOWN values. This is neither better nor worse | |
550 | * than skipping the reads and leaving 0, as we must avoid | |
551 | * considering the values in every case. | |
552 | */ | |
553 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, | |
554 | ARM64_SYS_REG(3, 0, 0, 2, 0)); | |
555 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, | |
556 | ARM64_SYS_REG(3, 0, 0, 2, 1)); | |
557 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, | |
558 | ARM64_SYS_REG(3, 0, 0, 2, 2)); | |
559 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, | |
560 | ARM64_SYS_REG(3, 0, 0, 2, 3)); | |
561 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, | |
562 | ARM64_SYS_REG(3, 0, 0, 2, 4)); | |
563 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, | |
564 | ARM64_SYS_REG(3, 0, 0, 2, 5)); | |
565 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, | |
566 | ARM64_SYS_REG(3, 0, 0, 2, 7)); | |
567 | ||
568 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, | |
569 | ARM64_SYS_REG(3, 0, 0, 3, 0)); | |
570 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, | |
571 | ARM64_SYS_REG(3, 0, 0, 3, 1)); | |
572 | err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, | |
573 | ARM64_SYS_REG(3, 0, 0, 3, 2)); | |
574 | } | |
575 | ||
26861c7c MH |
576 | kvm_arm_destroy_scratch_host_vcpu(fdarray); |
577 | ||
9d60dea9 RH |
578 | if (err < 0) { |
579 | return false; | |
580 | } | |
581 | ||
26861c7c MH |
582 | /* We can assume any KVM supporting CPU is at least a v8 |
583 | * with VFPv4+Neon; this in turn implies most of the other | |
584 | * feature bits. | |
585 | */ | |
586 | set_feature(&features, ARM_FEATURE_V8); | |
587 | set_feature(&features, ARM_FEATURE_VFP4); | |
588 | set_feature(&features, ARM_FEATURE_NEON); | |
589 | set_feature(&features, ARM_FEATURE_AARCH64); | |
929e754d | 590 | set_feature(&features, ARM_FEATURE_PMU); |
26861c7c | 591 | |
c4487d76 | 592 | ahcf->features = features; |
26861c7c MH |
593 | |
594 | return true; | |
595 | } | |
596 | ||
eb5e1d3c PF |
597 | #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 |
598 | ||
26861c7c MH |
599 | int kvm_arch_init_vcpu(CPUState *cs) |
600 | { | |
26861c7c | 601 | int ret; |
eb5e1d3c | 602 | uint64_t mpidr; |
228d5e04 | 603 | ARMCPU *cpu = ARM_CPU(cs); |
929e754d | 604 | CPUARMState *env = &cpu->env; |
26861c7c MH |
605 | |
606 | if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || | |
56073970 | 607 | !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { |
26861c7c MH |
608 | fprintf(stderr, "KVM is not supported for this guest CPU type\n"); |
609 | return -EINVAL; | |
610 | } | |
611 | ||
228d5e04 PS |
612 | /* Determine init features for this CPU */ |
613 | memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); | |
26861c7c | 614 | if (cpu->start_powered_off) { |
228d5e04 PS |
615 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; |
616 | } | |
7cd62e53 | 617 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { |
dd032e34 | 618 | cpu->psci_version = 2; |
7cd62e53 PS |
619 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; |
620 | } | |
56073970 GB |
621 | if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { |
622 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; | |
623 | } | |
b1659527 | 624 | if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { |
929e754d WH |
625 | cpu->has_pmu = false; |
626 | } | |
627 | if (cpu->has_pmu) { | |
5c0a3819 | 628 | cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; |
929e754d WH |
629 | } else { |
630 | unset_feature(&env->features, ARM_FEATURE_PMU); | |
5c0a3819 | 631 | } |
228d5e04 PS |
632 | |
633 | /* Do KVM_ARM_VCPU_INIT ioctl */ | |
634 | ret = kvm_arm_vcpu_init(cs); | |
635 | if (ret) { | |
636 | return ret; | |
26861c7c | 637 | } |
26861c7c | 638 | |
eb5e1d3c PF |
639 | /* |
640 | * When KVM is in use, PSCI is emulated in-kernel and not by qemu. | |
641 | * Currently KVM has its own idea about MPIDR assignment, so we | |
642 | * override our defaults with what we get from KVM. | |
643 | */ | |
644 | ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); | |
645 | if (ret) { | |
646 | return ret; | |
647 | } | |
0f4a9e45 | 648 | cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; |
eb5e1d3c | 649 | |
29eb3d9a AB |
650 | kvm_arm_init_debug(cs); |
651 | ||
202ccb6b DG |
652 | /* Check whether user space can specify guest syndrome value */ |
653 | kvm_arm_init_serror_injection(cs); | |
654 | ||
38df27c8 AB |
655 | return kvm_arm_init_cpreg_list(cpu); |
656 | } | |
26861c7c | 657 | |
b1115c99 LA |
658 | int kvm_arch_destroy_vcpu(CPUState *cs) |
659 | { | |
660 | return 0; | |
661 | } | |
662 | ||
38df27c8 AB |
663 | bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) |
664 | { | |
665 | /* Return true if the regidx is a register we should synchronize | |
666 | * via the cpreg_tuples array (ie is not a core reg we sync by | |
667 | * hand in kvm_arch_get/put_registers()) | |
668 | */ | |
669 | switch (regidx & KVM_REG_ARM_COPROC_MASK) { | |
670 | case KVM_REG_ARM_CORE: | |
671 | return false; | |
672 | default: | |
673 | return true; | |
674 | } | |
26861c7c MH |
675 | } |
676 | ||
4b7a6bf4 CD |
677 | typedef struct CPRegStateLevel { |
678 | uint64_t regidx; | |
679 | int level; | |
680 | } CPRegStateLevel; | |
681 | ||
682 | /* All system registers not listed in the following table are assumed to be | |
683 | * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less | |
684 | * often, you must add it to this table with a state of either | |
685 | * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. | |
686 | */ | |
687 | static const CPRegStateLevel non_runtime_cpregs[] = { | |
688 | { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, | |
689 | }; | |
690 | ||
691 | int kvm_arm_cpreg_level(uint64_t regidx) | |
692 | { | |
693 | int i; | |
694 | ||
695 | for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { | |
696 | const CPRegStateLevel *l = &non_runtime_cpregs[i]; | |
697 | if (l->regidx == regidx) { | |
698 | return l->level; | |
699 | } | |
700 | } | |
701 | ||
702 | return KVM_PUT_RUNTIME_STATE; | |
703 | } | |
704 | ||
26861c7c MH |
705 | #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ |
706 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
707 | ||
0e4b5869 AB |
708 | #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ |
709 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
710 | ||
711 | #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ | |
712 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) | |
713 | ||
26861c7c MH |
714 | int kvm_arch_put_registers(CPUState *cs, int level) |
715 | { | |
716 | struct kvm_one_reg reg; | |
0e4b5869 | 717 | uint32_t fpr; |
26861c7c MH |
718 | uint64_t val; |
719 | int i; | |
720 | int ret; | |
25b9fb10 | 721 | unsigned int el; |
26861c7c MH |
722 | |
723 | ARMCPU *cpu = ARM_CPU(cs); | |
724 | CPUARMState *env = &cpu->env; | |
725 | ||
56073970 GB |
726 | /* If we are in AArch32 mode then we need to copy the AArch32 regs to the |
727 | * AArch64 registers before pushing them out to 64-bit KVM. | |
728 | */ | |
729 | if (!is_a64(env)) { | |
730 | aarch64_sync_32_to_64(env); | |
731 | } | |
732 | ||
26861c7c MH |
733 | for (i = 0; i < 31; i++) { |
734 | reg.id = AARCH64_CORE_REG(regs.regs[i]); | |
735 | reg.addr = (uintptr_t) &env->xregs[i]; | |
736 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
737 | if (ret) { | |
738 | return ret; | |
739 | } | |
740 | } | |
741 | ||
f502cfc2 PM |
742 | /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the |
743 | * QEMU side we keep the current SP in xregs[31] as well. | |
744 | */ | |
9208b961 | 745 | aarch64_save_sp(env, 1); |
f502cfc2 | 746 | |
26861c7c | 747 | reg.id = AARCH64_CORE_REG(regs.sp); |
f502cfc2 PM |
748 | reg.addr = (uintptr_t) &env->sp_el[0]; |
749 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
750 | if (ret) { | |
751 | return ret; | |
752 | } | |
753 | ||
754 | reg.id = AARCH64_CORE_REG(sp_el1); | |
755 | reg.addr = (uintptr_t) &env->sp_el[1]; | |
26861c7c MH |
756 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
757 | if (ret) { | |
758 | return ret; | |
759 | } | |
760 | ||
761 | /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ | |
56073970 GB |
762 | if (is_a64(env)) { |
763 | val = pstate_read(env); | |
764 | } else { | |
765 | val = cpsr_read(env); | |
766 | } | |
26861c7c MH |
767 | reg.id = AARCH64_CORE_REG(regs.pstate); |
768 | reg.addr = (uintptr_t) &val; | |
769 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
770 | if (ret) { | |
771 | return ret; | |
772 | } | |
773 | ||
774 | reg.id = AARCH64_CORE_REG(regs.pc); | |
775 | reg.addr = (uintptr_t) &env->pc; | |
776 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
777 | if (ret) { | |
778 | return ret; | |
779 | } | |
780 | ||
a0618a19 | 781 | reg.id = AARCH64_CORE_REG(elr_el1); |
6947f059 | 782 | reg.addr = (uintptr_t) &env->elr_el[1]; |
a0618a19 PM |
783 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
784 | if (ret) { | |
785 | return ret; | |
786 | } | |
787 | ||
25b9fb10 AB |
788 | /* Saved Program State Registers |
789 | * | |
790 | * Before we restore from the banked_spsr[] array we need to | |
791 | * ensure that any modifications to env->spsr are correctly | |
792 | * reflected in the banks. | |
793 | */ | |
794 | el = arm_current_el(env); | |
795 | if (el > 0 && !is_a64(env)) { | |
796 | i = bank_number(env->uncached_cpsr & CPSR_M); | |
797 | env->banked_spsr[i] = env->spsr; | |
798 | } | |
799 | ||
800 | /* KVM 0-4 map to QEMU banks 1-5 */ | |
a65f1de9 PM |
801 | for (i = 0; i < KVM_NR_SPSR; i++) { |
802 | reg.id = AARCH64_CORE_REG(spsr[i]); | |
25b9fb10 | 803 | reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; |
a65f1de9 PM |
804 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
805 | if (ret) { | |
806 | return ret; | |
807 | } | |
808 | } | |
809 | ||
9a2b5256 | 810 | /* Advanced SIMD and FP registers. */ |
0e4b5869 | 811 | for (i = 0; i < 32; i++) { |
9a2b5256 | 812 | uint64_t *q = aa64_vfp_qreg(env, i); |
0e4b5869 | 813 | #ifdef HOST_WORDS_BIGENDIAN |
9a2b5256 RH |
814 | uint64_t fp_val[2] = { q[1], q[0] }; |
815 | reg.addr = (uintptr_t)fp_val; | |
0e4b5869 | 816 | #else |
9a2b5256 | 817 | reg.addr = (uintptr_t)q; |
0e4b5869 AB |
818 | #endif |
819 | reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); | |
0e4b5869 AB |
820 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); |
821 | if (ret) { | |
822 | return ret; | |
823 | } | |
824 | } | |
825 | ||
826 | reg.addr = (uintptr_t)(&fpr); | |
827 | fpr = vfp_get_fpsr(env); | |
828 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); | |
829 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
830 | if (ret) { | |
831 | return ret; | |
832 | } | |
833 | ||
834 | fpr = vfp_get_fpcr(env); | |
835 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); | |
836 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); | |
837 | if (ret) { | |
838 | return ret; | |
839 | } | |
840 | ||
202ccb6b DG |
841 | ret = kvm_put_vcpu_events(cpu); |
842 | if (ret) { | |
843 | return ret; | |
844 | } | |
845 | ||
b698e4ee PM |
846 | write_cpustate_to_list(cpu, true); |
847 | ||
4b7a6bf4 | 848 | if (!write_list_to_kvmstate(cpu, level)) { |
568bab1f PS |
849 | return EINVAL; |
850 | } | |
851 | ||
1a1753f7 AB |
852 | kvm_arm_sync_mpstate_to_kvm(cpu); |
853 | ||
26861c7c MH |
854 | return ret; |
855 | } | |
856 | ||
857 | int kvm_arch_get_registers(CPUState *cs) | |
858 | { | |
859 | struct kvm_one_reg reg; | |
860 | uint64_t val; | |
0e4b5869 | 861 | uint32_t fpr; |
25b9fb10 | 862 | unsigned int el; |
26861c7c MH |
863 | int i; |
864 | int ret; | |
865 | ||
866 | ARMCPU *cpu = ARM_CPU(cs); | |
867 | CPUARMState *env = &cpu->env; | |
868 | ||
869 | for (i = 0; i < 31; i++) { | |
870 | reg.id = AARCH64_CORE_REG(regs.regs[i]); | |
871 | reg.addr = (uintptr_t) &env->xregs[i]; | |
872 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
873 | if (ret) { | |
874 | return ret; | |
875 | } | |
876 | } | |
877 | ||
878 | reg.id = AARCH64_CORE_REG(regs.sp); | |
f502cfc2 PM |
879 | reg.addr = (uintptr_t) &env->sp_el[0]; |
880 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
881 | if (ret) { | |
882 | return ret; | |
883 | } | |
884 | ||
885 | reg.id = AARCH64_CORE_REG(sp_el1); | |
886 | reg.addr = (uintptr_t) &env->sp_el[1]; | |
26861c7c MH |
887 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
888 | if (ret) { | |
889 | return ret; | |
890 | } | |
891 | ||
892 | reg.id = AARCH64_CORE_REG(regs.pstate); | |
893 | reg.addr = (uintptr_t) &val; | |
894 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
895 | if (ret) { | |
896 | return ret; | |
897 | } | |
56073970 GB |
898 | |
899 | env->aarch64 = ((val & PSTATE_nRW) == 0); | |
900 | if (is_a64(env)) { | |
901 | pstate_write(env, val); | |
902 | } else { | |
50866ba5 | 903 | cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); |
56073970 | 904 | } |
26861c7c | 905 | |
f502cfc2 PM |
906 | /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the |
907 | * QEMU side we keep the current SP in xregs[31] as well. | |
908 | */ | |
9208b961 | 909 | aarch64_restore_sp(env, 1); |
f502cfc2 | 910 | |
26861c7c MH |
911 | reg.id = AARCH64_CORE_REG(regs.pc); |
912 | reg.addr = (uintptr_t) &env->pc; | |
913 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
914 | if (ret) { | |
915 | return ret; | |
916 | } | |
917 | ||
56073970 GB |
918 | /* If we are in AArch32 mode then we need to sync the AArch32 regs with the |
919 | * incoming AArch64 regs received from 64-bit KVM. | |
920 | * We must perform this after all of the registers have been acquired from | |
921 | * the kernel. | |
922 | */ | |
923 | if (!is_a64(env)) { | |
924 | aarch64_sync_64_to_32(env); | |
925 | } | |
926 | ||
a0618a19 | 927 | reg.id = AARCH64_CORE_REG(elr_el1); |
6947f059 | 928 | reg.addr = (uintptr_t) &env->elr_el[1]; |
a0618a19 PM |
929 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
930 | if (ret) { | |
931 | return ret; | |
932 | } | |
933 | ||
25b9fb10 AB |
934 | /* Fetch the SPSR registers |
935 | * | |
936 | * KVM SPSRs 0-4 map to QEMU banks 1-5 | |
937 | */ | |
a65f1de9 PM |
938 | for (i = 0; i < KVM_NR_SPSR; i++) { |
939 | reg.id = AARCH64_CORE_REG(spsr[i]); | |
25b9fb10 | 940 | reg.addr = (uintptr_t) &env->banked_spsr[i + 1]; |
a65f1de9 PM |
941 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
942 | if (ret) { | |
943 | return ret; | |
944 | } | |
945 | } | |
946 | ||
25b9fb10 AB |
947 | el = arm_current_el(env); |
948 | if (el > 0 && !is_a64(env)) { | |
949 | i = bank_number(env->uncached_cpsr & CPSR_M); | |
950 | env->spsr = env->banked_spsr[i]; | |
951 | } | |
952 | ||
9a2b5256 | 953 | /* Advanced SIMD and FP registers */ |
0e4b5869 | 954 | for (i = 0; i < 32; i++) { |
9a2b5256 | 955 | uint64_t *q = aa64_vfp_qreg(env, i); |
0e4b5869 | 956 | reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]); |
9a2b5256 | 957 | reg.addr = (uintptr_t)q; |
0e4b5869 AB |
958 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); |
959 | if (ret) { | |
960 | return ret; | |
961 | } else { | |
0e4b5869 | 962 | #ifdef HOST_WORDS_BIGENDIAN |
9a2b5256 RH |
963 | uint64_t t; |
964 | t = q[0], q[0] = q[1], q[1] = t; | |
0e4b5869 AB |
965 | #endif |
966 | } | |
967 | } | |
968 | ||
969 | reg.addr = (uintptr_t)(&fpr); | |
970 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr); | |
971 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
972 | if (ret) { | |
973 | return ret; | |
974 | } | |
975 | vfp_set_fpsr(env, fpr); | |
976 | ||
977 | reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr); | |
978 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); | |
979 | if (ret) { | |
980 | return ret; | |
981 | } | |
982 | vfp_set_fpcr(env, fpr); | |
983 | ||
202ccb6b DG |
984 | ret = kvm_get_vcpu_events(cpu); |
985 | if (ret) { | |
986 | return ret; | |
987 | } | |
988 | ||
568bab1f PS |
989 | if (!write_kvmstate_to_list(cpu)) { |
990 | return EINVAL; | |
991 | } | |
992 | /* Note that it's OK to have registers which aren't in CPUState, | |
993 | * so we can ignore a failure return here. | |
994 | */ | |
995 | write_list_to_cpustate(cpu); | |
996 | ||
1a1753f7 AB |
997 | kvm_arm_sync_mpstate_to_qemu(cpu); |
998 | ||
26861c7c MH |
999 | /* TODO: other registers */ |
1000 | return ret; | |
1001 | } | |
2ecb2027 AB |
1002 | |
1003 | /* C6.6.29 BRK instruction */ | |
1004 | static const uint32_t brk_insn = 0xd4200000; | |
1005 | ||
1006 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
1007 | { | |
1008 | if (have_guest_debug) { | |
1009 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || | |
1010 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { | |
1011 | return -EINVAL; | |
1012 | } | |
1013 | return 0; | |
1014 | } else { | |
1015 | error_report("guest debug not supported on this kernel"); | |
1016 | return -EINVAL; | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
1021 | { | |
1022 | static uint32_t brk; | |
1023 | ||
1024 | if (have_guest_debug) { | |
1025 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || | |
1026 | brk != brk_insn || | |
1027 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { | |
1028 | return -EINVAL; | |
1029 | } | |
1030 | return 0; | |
1031 | } else { | |
1032 | error_report("guest debug not supported on this kernel"); | |
1033 | return -EINVAL; | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register | |
1038 | * | |
1039 | * To minimise translating between kernel and user-space the kernel | |
1040 | * ABI just provides user-space with the full exception syndrome | |
1041 | * register value to be decoded in QEMU. | |
1042 | */ | |
1043 | ||
1044 | bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) | |
1045 | { | |
64b91e3f | 1046 | int hsr_ec = syn_get_ec(debug_exit->hsr); |
2ecb2027 | 1047 | ARMCPU *cpu = ARM_CPU(cs); |
34c45d53 | 1048 | CPUClass *cc = CPU_GET_CLASS(cs); |
2ecb2027 AB |
1049 | CPUARMState *env = &cpu->env; |
1050 | ||
1051 | /* Ensure PC is synchronised */ | |
1052 | kvm_cpu_synchronize_state(cs); | |
1053 | ||
1054 | switch (hsr_ec) { | |
26ae5934 AB |
1055 | case EC_SOFTWARESTEP: |
1056 | if (cs->singlestep_enabled) { | |
1057 | return true; | |
1058 | } else { | |
34c45d53 AB |
1059 | /* |
1060 | * The kernel should have suppressed the guest's ability to | |
1061 | * single step at this point so something has gone wrong. | |
1062 | */ | |
1063 | error_report("%s: guest single-step while debugging unsupported" | |
dffc5851 | 1064 | " (%"PRIx64", %"PRIx32")", |
34c45d53 AB |
1065 | __func__, env->pc, debug_exit->hsr); |
1066 | return false; | |
26ae5934 AB |
1067 | } |
1068 | break; | |
2ecb2027 AB |
1069 | case EC_AA64_BKPT: |
1070 | if (kvm_find_sw_breakpoint(cs, env->pc)) { | |
1071 | return true; | |
1072 | } | |
1073 | break; | |
e4482ab7 AB |
1074 | case EC_BREAKPOINT: |
1075 | if (find_hw_breakpoint(cs, env->pc)) { | |
1076 | return true; | |
1077 | } | |
1078 | break; | |
1079 | case EC_WATCHPOINT: | |
1080 | { | |
1081 | CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); | |
1082 | if (wp) { | |
1083 | cs->watchpoint_hit = wp; | |
1084 | return true; | |
1085 | } | |
1086 | break; | |
1087 | } | |
2ecb2027 | 1088 | default: |
dffc5851 | 1089 | error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", |
2ecb2027 AB |
1090 | __func__, debug_exit->hsr, env->pc); |
1091 | } | |
1092 | ||
34c45d53 AB |
1093 | /* If we are not handling the debug exception it must belong to |
1094 | * the guest. Let's re-use the existing TCG interrupt code to set | |
1095 | * everything up properly. | |
1096 | */ | |
1097 | cs->exception_index = EXCP_BKPT; | |
1098 | env->exception.syndrome = debug_exit->hsr; | |
1099 | env->exception.vaddress = debug_exit->far; | |
14f9a5c0 | 1100 | env->exception.target_el = 1; |
9b16ec43 | 1101 | qemu_mutex_lock_iothread(); |
34c45d53 | 1102 | cc->do_interrupt(cs); |
9b16ec43 | 1103 | qemu_mutex_unlock_iothread(); |
2ecb2027 AB |
1104 | |
1105 | return false; | |
1106 | } |