]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * ARM implementation of KVM hooks, 32 bit specific code. | |
3 | * | |
4 | * Copyright Christoffer Dall 2009-2010 | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <stdio.h> | |
12 | #include <sys/types.h> | |
13 | #include <sys/ioctl.h> | |
14 | #include <sys/mman.h> | |
15 | ||
16 | #include <linux/kvm.h> | |
17 | ||
18 | #include "qemu-common.h" | |
19 | #include "qemu/timer.h" | |
20 | #include "sysemu/sysemu.h" | |
21 | #include "sysemu/kvm.h" | |
22 | #include "kvm_arm.h" | |
23 | #include "cpu.h" | |
24 | #include "internals.h" | |
25 | #include "hw/arm/arm.h" | |
26 | ||
27 | static inline void set_feature(uint64_t *features, int feature) | |
28 | { | |
29 | *features |= 1ULL << feature; | |
30 | } | |
31 | ||
32 | bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc) | |
33 | { | |
34 | /* Identify the feature bits corresponding to the host CPU, and | |
35 | * fill out the ARMHostCPUClass fields accordingly. To do this | |
36 | * we have to create a scratch VM, create a single CPU inside it, | |
37 | * and then query that CPU for the relevant ID registers. | |
38 | */ | |
39 | int i, ret, fdarray[3]; | |
40 | uint32_t midr, id_pfr0, id_isar0, mvfr1; | |
41 | uint64_t features = 0; | |
42 | /* Old kernels may not know about the PREFERRED_TARGET ioctl: however | |
43 | * we know these will only support creating one kind of guest CPU, | |
44 | * which is its preferred CPU type. | |
45 | */ | |
46 | static const uint32_t cpus_to_try[] = { | |
47 | QEMU_KVM_ARM_TARGET_CORTEX_A15, | |
48 | QEMU_KVM_ARM_TARGET_NONE | |
49 | }; | |
50 | struct kvm_vcpu_init init; | |
51 | struct kvm_one_reg idregs[] = { | |
52 | { | |
53 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
54 | | ENCODE_CP_REG(15, 0, 0, 0, 0, 0), | |
55 | .addr = (uintptr_t)&midr, | |
56 | }, | |
57 | { | |
58 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
59 | | ENCODE_CP_REG(15, 0, 0, 1, 0, 0), | |
60 | .addr = (uintptr_t)&id_pfr0, | |
61 | }, | |
62 | { | |
63 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
64 | | ENCODE_CP_REG(15, 0, 0, 2, 0, 0), | |
65 | .addr = (uintptr_t)&id_isar0, | |
66 | }, | |
67 | { | |
68 | .id = KVM_REG_ARM | KVM_REG_SIZE_U32 | |
69 | | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1, | |
70 | .addr = (uintptr_t)&mvfr1, | |
71 | }, | |
72 | }; | |
73 | ||
74 | if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { | |
75 | return false; | |
76 | } | |
77 | ||
78 | ahcc->target = init.target; | |
79 | ||
80 | /* This is not strictly blessed by the device tree binding docs yet, | |
81 | * but in practice the kernel does not care about this string so | |
82 | * there is no point maintaining an KVM_ARM_TARGET_* -> string table. | |
83 | */ | |
84 | ahcc->dtb_compatible = "arm,arm-v7"; | |
85 | ||
86 | for (i = 0; i < ARRAY_SIZE(idregs); i++) { | |
87 | ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]); | |
88 | if (ret) { | |
89 | break; | |
90 | } | |
91 | } | |
92 | ||
93 | kvm_arm_destroy_scratch_host_vcpu(fdarray); | |
94 | ||
95 | if (ret) { | |
96 | return false; | |
97 | } | |
98 | ||
99 | /* Now we've retrieved all the register information we can | |
100 | * set the feature bits based on the ID register fields. | |
101 | * We can assume any KVM supporting CPU is at least a v7 | |
102 | * with VFPv3, LPAE and the generic timers; this in turn implies | |
103 | * most of the other feature bits, but a few must be tested. | |
104 | */ | |
105 | set_feature(&features, ARM_FEATURE_V7); | |
106 | set_feature(&features, ARM_FEATURE_VFP3); | |
107 | set_feature(&features, ARM_FEATURE_LPAE); | |
108 | set_feature(&features, ARM_FEATURE_GENERIC_TIMER); | |
109 | ||
110 | switch (extract32(id_isar0, 24, 4)) { | |
111 | case 1: | |
112 | set_feature(&features, ARM_FEATURE_THUMB_DIV); | |
113 | break; | |
114 | case 2: | |
115 | set_feature(&features, ARM_FEATURE_ARM_DIV); | |
116 | set_feature(&features, ARM_FEATURE_THUMB_DIV); | |
117 | break; | |
118 | default: | |
119 | break; | |
120 | } | |
121 | ||
122 | if (extract32(id_pfr0, 12, 4) == 1) { | |
123 | set_feature(&features, ARM_FEATURE_THUMB2EE); | |
124 | } | |
125 | if (extract32(mvfr1, 20, 4) == 1) { | |
126 | set_feature(&features, ARM_FEATURE_VFP_FP16); | |
127 | } | |
128 | if (extract32(mvfr1, 12, 4) == 1) { | |
129 | set_feature(&features, ARM_FEATURE_NEON); | |
130 | } | |
131 | if (extract32(mvfr1, 28, 4) == 1) { | |
132 | /* FMAC support implies VFPv4 */ | |
133 | set_feature(&features, ARM_FEATURE_VFP4); | |
134 | } | |
135 | ||
136 | ahcc->features = features; | |
137 | ||
138 | return true; | |
139 | } | |
140 | ||
141 | static bool reg_syncs_via_tuple_list(uint64_t regidx) | |
142 | { | |
143 | /* Return true if the regidx is a register we should synchronize | |
144 | * via the cpreg_tuples array (ie is not a core reg we sync by | |
145 | * hand in kvm_arch_get/put_registers()) | |
146 | */ | |
147 | switch (regidx & KVM_REG_ARM_COPROC_MASK) { | |
148 | case KVM_REG_ARM_CORE: | |
149 | case KVM_REG_ARM_VFP: | |
150 | return false; | |
151 | default: | |
152 | return true; | |
153 | } | |
154 | } | |
155 | ||
156 | static int compare_u64(const void *a, const void *b) | |
157 | { | |
158 | if (*(uint64_t *)a > *(uint64_t *)b) { | |
159 | return 1; | |
160 | } | |
161 | if (*(uint64_t *)a < *(uint64_t *)b) { | |
162 | return -1; | |
163 | } | |
164 | return 0; | |
165 | } | |
166 | ||
167 | int kvm_arch_init_vcpu(CPUState *cs) | |
168 | { | |
169 | struct kvm_vcpu_init init; | |
170 | int i, ret, arraylen; | |
171 | uint64_t v; | |
172 | struct kvm_one_reg r; | |
173 | struct kvm_reg_list rl; | |
174 | struct kvm_reg_list *rlp; | |
175 | ARMCPU *cpu = ARM_CPU(cs); | |
176 | ||
177 | if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { | |
178 | fprintf(stderr, "KVM is not supported for this guest CPU type\n"); | |
179 | return -EINVAL; | |
180 | } | |
181 | ||
182 | init.target = cpu->kvm_target; | |
183 | memset(init.features, 0, sizeof(init.features)); | |
184 | if (cpu->start_powered_off) { | |
185 | init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF; | |
186 | } | |
187 | ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); | |
188 | if (ret) { | |
189 | return ret; | |
190 | } | |
191 | /* Query the kernel to make sure it supports 32 VFP | |
192 | * registers: QEMU's "cortex-a15" CPU is always a | |
193 | * VFP-D32 core. The simplest way to do this is just | |
194 | * to attempt to read register d31. | |
195 | */ | |
196 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31; | |
197 | r.addr = (uintptr_t)(&v); | |
198 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
199 | if (ret == -ENOENT) { | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | /* Populate the cpreg list based on the kernel's idea | |
204 | * of what registers exist (and throw away the TCG-created list). | |
205 | */ | |
206 | rl.n = 0; | |
207 | ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl); | |
208 | if (ret != -E2BIG) { | |
209 | return ret; | |
210 | } | |
211 | rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t)); | |
212 | rlp->n = rl.n; | |
213 | ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp); | |
214 | if (ret) { | |
215 | goto out; | |
216 | } | |
217 | /* Sort the list we get back from the kernel, since cpreg_tuples | |
218 | * must be in strictly ascending order. | |
219 | */ | |
220 | qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64); | |
221 | ||
222 | for (i = 0, arraylen = 0; i < rlp->n; i++) { | |
223 | if (!reg_syncs_via_tuple_list(rlp->reg[i])) { | |
224 | continue; | |
225 | } | |
226 | switch (rlp->reg[i] & KVM_REG_SIZE_MASK) { | |
227 | case KVM_REG_SIZE_U32: | |
228 | case KVM_REG_SIZE_U64: | |
229 | break; | |
230 | default: | |
231 | fprintf(stderr, "Can't handle size of register in kernel list\n"); | |
232 | ret = -EINVAL; | |
233 | goto out; | |
234 | } | |
235 | ||
236 | arraylen++; | |
237 | } | |
238 | ||
239 | cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); | |
240 | cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); | |
241 | cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, | |
242 | arraylen); | |
243 | cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, | |
244 | arraylen); | |
245 | cpu->cpreg_array_len = arraylen; | |
246 | cpu->cpreg_vmstate_array_len = arraylen; | |
247 | ||
248 | for (i = 0, arraylen = 0; i < rlp->n; i++) { | |
249 | uint64_t regidx = rlp->reg[i]; | |
250 | if (!reg_syncs_via_tuple_list(regidx)) { | |
251 | continue; | |
252 | } | |
253 | cpu->cpreg_indexes[arraylen] = regidx; | |
254 | arraylen++; | |
255 | } | |
256 | assert(cpu->cpreg_array_len == arraylen); | |
257 | ||
258 | if (!write_kvmstate_to_list(cpu)) { | |
259 | /* Shouldn't happen unless kernel is inconsistent about | |
260 | * what registers exist. | |
261 | */ | |
262 | fprintf(stderr, "Initial read of kernel register state failed\n"); | |
263 | ret = -EINVAL; | |
264 | goto out; | |
265 | } | |
266 | ||
267 | /* Save a copy of the initial register values so that we can | |
268 | * feed it back to the kernel on VCPU reset. | |
269 | */ | |
270 | cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values, | |
271 | cpu->cpreg_array_len * | |
272 | sizeof(cpu->cpreg_values[0])); | |
273 | ||
274 | out: | |
275 | g_free(rlp); | |
276 | return ret; | |
277 | } | |
278 | ||
279 | typedef struct Reg { | |
280 | uint64_t id; | |
281 | int offset; | |
282 | } Reg; | |
283 | ||
284 | #define COREREG(KERNELNAME, QEMUFIELD) \ | |
285 | { \ | |
286 | KVM_REG_ARM | KVM_REG_SIZE_U32 | \ | |
287 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \ | |
288 | offsetof(CPUARMState, QEMUFIELD) \ | |
289 | } | |
290 | ||
291 | #define VFPSYSREG(R) \ | |
292 | { \ | |
293 | KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \ | |
294 | KVM_REG_ARM_VFP_##R, \ | |
295 | offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \ | |
296 | } | |
297 | ||
298 | static const Reg regs[] = { | |
299 | /* R0_usr .. R14_usr */ | |
300 | COREREG(usr_regs.uregs[0], regs[0]), | |
301 | COREREG(usr_regs.uregs[1], regs[1]), | |
302 | COREREG(usr_regs.uregs[2], regs[2]), | |
303 | COREREG(usr_regs.uregs[3], regs[3]), | |
304 | COREREG(usr_regs.uregs[4], regs[4]), | |
305 | COREREG(usr_regs.uregs[5], regs[5]), | |
306 | COREREG(usr_regs.uregs[6], regs[6]), | |
307 | COREREG(usr_regs.uregs[7], regs[7]), | |
308 | COREREG(usr_regs.uregs[8], usr_regs[0]), | |
309 | COREREG(usr_regs.uregs[9], usr_regs[1]), | |
310 | COREREG(usr_regs.uregs[10], usr_regs[2]), | |
311 | COREREG(usr_regs.uregs[11], usr_regs[3]), | |
312 | COREREG(usr_regs.uregs[12], usr_regs[4]), | |
313 | COREREG(usr_regs.uregs[13], banked_r13[0]), | |
314 | COREREG(usr_regs.uregs[14], banked_r14[0]), | |
315 | /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */ | |
316 | COREREG(svc_regs[0], banked_r13[1]), | |
317 | COREREG(svc_regs[1], banked_r14[1]), | |
318 | COREREG(svc_regs[2], banked_spsr[1]), | |
319 | COREREG(abt_regs[0], banked_r13[2]), | |
320 | COREREG(abt_regs[1], banked_r14[2]), | |
321 | COREREG(abt_regs[2], banked_spsr[2]), | |
322 | COREREG(und_regs[0], banked_r13[3]), | |
323 | COREREG(und_regs[1], banked_r14[3]), | |
324 | COREREG(und_regs[2], banked_spsr[3]), | |
325 | COREREG(irq_regs[0], banked_r13[4]), | |
326 | COREREG(irq_regs[1], banked_r14[4]), | |
327 | COREREG(irq_regs[2], banked_spsr[4]), | |
328 | /* R8_fiq .. R14_fiq and SPSR_fiq */ | |
329 | COREREG(fiq_regs[0], fiq_regs[0]), | |
330 | COREREG(fiq_regs[1], fiq_regs[1]), | |
331 | COREREG(fiq_regs[2], fiq_regs[2]), | |
332 | COREREG(fiq_regs[3], fiq_regs[3]), | |
333 | COREREG(fiq_regs[4], fiq_regs[4]), | |
334 | COREREG(fiq_regs[5], banked_r13[5]), | |
335 | COREREG(fiq_regs[6], banked_r14[5]), | |
336 | COREREG(fiq_regs[7], banked_spsr[5]), | |
337 | /* R15 */ | |
338 | COREREG(usr_regs.uregs[15], regs[15]), | |
339 | /* VFP system registers */ | |
340 | VFPSYSREG(FPSID), | |
341 | VFPSYSREG(MVFR1), | |
342 | VFPSYSREG(MVFR0), | |
343 | VFPSYSREG(FPEXC), | |
344 | VFPSYSREG(FPINST), | |
345 | VFPSYSREG(FPINST2), | |
346 | }; | |
347 | ||
348 | int kvm_arch_put_registers(CPUState *cs, int level) | |
349 | { | |
350 | ARMCPU *cpu = ARM_CPU(cs); | |
351 | CPUARMState *env = &cpu->env; | |
352 | struct kvm_one_reg r; | |
353 | int mode, bn; | |
354 | int ret, i; | |
355 | uint32_t cpsr, fpscr; | |
356 | ||
357 | /* Make sure the banked regs are properly set */ | |
358 | mode = env->uncached_cpsr & CPSR_M; | |
359 | bn = bank_number(mode); | |
360 | if (mode == ARM_CPU_MODE_FIQ) { | |
361 | memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
362 | } else { | |
363 | memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
364 | } | |
365 | env->banked_r13[bn] = env->regs[13]; | |
366 | env->banked_r14[bn] = env->regs[14]; | |
367 | env->banked_spsr[bn] = env->spsr; | |
368 | ||
369 | /* Now we can safely copy stuff down to the kernel */ | |
370 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
371 | r.id = regs[i].id; | |
372 | r.addr = (uintptr_t)(env) + regs[i].offset; | |
373 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
374 | if (ret) { | |
375 | return ret; | |
376 | } | |
377 | } | |
378 | ||
379 | /* Special cases which aren't a single CPUARMState field */ | |
380 | cpsr = cpsr_read(env); | |
381 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | | |
382 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); | |
383 | r.addr = (uintptr_t)(&cpsr); | |
384 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
385 | if (ret) { | |
386 | return ret; | |
387 | } | |
388 | ||
389 | /* VFP registers */ | |
390 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | |
391 | for (i = 0; i < 32; i++) { | |
392 | r.addr = (uintptr_t)(&env->vfp.regs[i]); | |
393 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
394 | if (ret) { | |
395 | return ret; | |
396 | } | |
397 | r.id++; | |
398 | } | |
399 | ||
400 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | | |
401 | KVM_REG_ARM_VFP_FPSCR; | |
402 | fpscr = vfp_get_fpscr(env); | |
403 | r.addr = (uintptr_t)&fpscr; | |
404 | ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); | |
405 | if (ret) { | |
406 | return ret; | |
407 | } | |
408 | ||
409 | /* Note that we do not call write_cpustate_to_list() | |
410 | * here, so we are only writing the tuple list back to | |
411 | * KVM. This is safe because nothing can change the | |
412 | * CPUARMState cp15 fields (in particular gdb accesses cannot) | |
413 | * and so there are no changes to sync. In fact syncing would | |
414 | * be wrong at this point: for a constant register where TCG and | |
415 | * KVM disagree about its value, the preceding write_list_to_cpustate() | |
416 | * would not have had any effect on the CPUARMState value (since the | |
417 | * register is read-only), and a write_cpustate_to_list() here would | |
418 | * then try to write the TCG value back into KVM -- this would either | |
419 | * fail or incorrectly change the value the guest sees. | |
420 | * | |
421 | * If we ever want to allow the user to modify cp15 registers via | |
422 | * the gdb stub, we would need to be more clever here (for instance | |
423 | * tracking the set of registers kvm_arch_get_registers() successfully | |
424 | * managed to update the CPUARMState with, and only allowing those | |
425 | * to be written back up into the kernel). | |
426 | */ | |
427 | if (!write_list_to_kvmstate(cpu)) { | |
428 | return EINVAL; | |
429 | } | |
430 | ||
431 | return ret; | |
432 | } | |
433 | ||
434 | int kvm_arch_get_registers(CPUState *cs) | |
435 | { | |
436 | ARMCPU *cpu = ARM_CPU(cs); | |
437 | CPUARMState *env = &cpu->env; | |
438 | struct kvm_one_reg r; | |
439 | int mode, bn; | |
440 | int ret, i; | |
441 | uint32_t cpsr, fpscr; | |
442 | ||
443 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
444 | r.id = regs[i].id; | |
445 | r.addr = (uintptr_t)(env) + regs[i].offset; | |
446 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
447 | if (ret) { | |
448 | return ret; | |
449 | } | |
450 | } | |
451 | ||
452 | /* Special cases which aren't a single CPUARMState field */ | |
453 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | | |
454 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr); | |
455 | r.addr = (uintptr_t)(&cpsr); | |
456 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
457 | if (ret) { | |
458 | return ret; | |
459 | } | |
460 | cpsr_write(env, cpsr, 0xffffffff); | |
461 | ||
462 | /* Make sure the current mode regs are properly set */ | |
463 | mode = env->uncached_cpsr & CPSR_M; | |
464 | bn = bank_number(mode); | |
465 | if (mode == ARM_CPU_MODE_FIQ) { | |
466 | memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); | |
467 | } else { | |
468 | memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); | |
469 | } | |
470 | env->regs[13] = env->banked_r13[bn]; | |
471 | env->regs[14] = env->banked_r14[bn]; | |
472 | env->spsr = env->banked_spsr[bn]; | |
473 | ||
474 | /* VFP registers */ | |
475 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | |
476 | for (i = 0; i < 32; i++) { | |
477 | r.addr = (uintptr_t)(&env->vfp.regs[i]); | |
478 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
479 | if (ret) { | |
480 | return ret; | |
481 | } | |
482 | r.id++; | |
483 | } | |
484 | ||
485 | r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | | |
486 | KVM_REG_ARM_VFP_FPSCR; | |
487 | r.addr = (uintptr_t)&fpscr; | |
488 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); | |
489 | if (ret) { | |
490 | return ret; | |
491 | } | |
492 | vfp_set_fpscr(env, fpscr); | |
493 | ||
494 | if (!write_kvmstate_to_list(cpu)) { | |
495 | return EINVAL; | |
496 | } | |
497 | /* Note that it's OK to have registers which aren't in CPUState, | |
498 | * so we can ignore a failure return here. | |
499 | */ | |
500 | write_list_to_cpustate(cpu); | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | void kvm_arch_reset_vcpu(CPUState *cs) | |
506 | { | |
507 | /* Feed the kernel back its initial register state */ | |
508 | ARMCPU *cpu = ARM_CPU(cs); | |
509 | ||
510 | memmove(cpu->cpreg_values, cpu->cpreg_reset_values, | |
511 | cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0])); | |
512 | ||
513 | if (!write_list_to_kvmstate(cpu)) { | |
514 | abort(); | |
515 | } | |
516 | } |