]>
Commit | Line | Data |
---|---|---|
e2132e0b SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012-2014 Imagination Technologies Ltd. | |
9 | * Authors: Sanjay Lal <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <sys/types.h> | |
13 | #include <sys/ioctl.h> | |
14 | #include <sys/mman.h> | |
15 | ||
16 | #include <linux/kvm.h> | |
17 | ||
18 | #include "qemu-common.h" | |
19 | #include "qemu/error-report.h" | |
20 | #include "qemu/timer.h" | |
21 | #include "sysemu/sysemu.h" | |
22 | #include "sysemu/kvm.h" | |
23 | #include "cpu.h" | |
24 | #include "sysemu/cpus.h" | |
25 | #include "kvm_mips.h" | |
26 | ||
27 | #define DEBUG_KVM 0 | |
28 | ||
29 | #define DPRINTF(fmt, ...) \ | |
30 | do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) | |
31 | ||
32 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { | |
33 | KVM_CAP_LAST_INFO | |
34 | }; | |
35 | ||
36 | static void kvm_mips_update_state(void *opaque, int running, RunState state); | |
37 | ||
38 | unsigned long kvm_arch_vcpu_id(CPUState *cs) | |
39 | { | |
40 | return cs->cpu_index; | |
41 | } | |
42 | ||
43 | int kvm_arch_init(KVMState *s) | |
44 | { | |
45 | /* MIPS has 128 signals */ | |
46 | kvm_set_sigmask_len(s, 16); | |
47 | ||
48 | DPRINTF("%s\n", __func__); | |
49 | return 0; | |
50 | } | |
51 | ||
52 | int kvm_arch_init_vcpu(CPUState *cs) | |
53 | { | |
54 | int ret = 0; | |
55 | ||
56 | qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); | |
57 | ||
58 | DPRINTF("%s\n", __func__); | |
59 | return ret; | |
60 | } | |
61 | ||
62 | void kvm_mips_reset_vcpu(MIPSCPU *cpu) | |
63 | { | |
0e928b12 JH |
64 | CPUMIPSState *env = &cpu->env; |
65 | ||
66 | if (env->CP0_Config1 & (1 << CP0C1_FP)) { | |
67 | fprintf(stderr, "Warning: FPU not supported with KVM, disabling\n"); | |
68 | env->CP0_Config1 &= ~(1 << CP0C1_FP); | |
69 | } | |
70 | ||
e2132e0b SL |
71 | DPRINTF("%s\n", __func__); |
72 | } | |
73 | ||
74 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
75 | { | |
76 | DPRINTF("%s\n", __func__); | |
77 | return 0; | |
78 | } | |
79 | ||
80 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
81 | { | |
82 | DPRINTF("%s\n", __func__); | |
83 | return 0; | |
84 | } | |
85 | ||
86 | static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) | |
87 | { | |
88 | CPUMIPSState *env = &cpu->env; | |
89 | ||
90 | DPRINTF("%s: %#x\n", __func__, env->CP0_Cause & (1 << (2 + CP0Ca_IP))); | |
91 | return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); | |
92 | } | |
93 | ||
94 | ||
95 | void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) | |
96 | { | |
97 | MIPSCPU *cpu = MIPS_CPU(cs); | |
98 | int r; | |
99 | struct kvm_mips_interrupt intr; | |
100 | ||
101 | if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && | |
102 | cpu_mips_io_interrupts_pending(cpu)) { | |
103 | intr.cpu = -1; | |
104 | intr.irq = 2; | |
105 | r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
106 | if (r < 0) { | |
107 | error_report("%s: cpu %d: failed to inject IRQ %x", | |
108 | __func__, cs->cpu_index, intr.irq); | |
109 | } | |
110 | } | |
111 | } | |
112 | ||
113 | void kvm_arch_post_run(CPUState *cs, struct kvm_run *run) | |
114 | { | |
115 | DPRINTF("%s\n", __func__); | |
116 | } | |
117 | ||
118 | int kvm_arch_process_async_events(CPUState *cs) | |
119 | { | |
120 | return cs->halted; | |
121 | } | |
122 | ||
123 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) | |
124 | { | |
125 | int ret; | |
126 | ||
127 | DPRINTF("%s\n", __func__); | |
128 | switch (run->exit_reason) { | |
129 | default: | |
130 | error_report("%s: unknown exit reason %d", | |
131 | __func__, run->exit_reason); | |
132 | ret = -1; | |
133 | break; | |
134 | } | |
135 | ||
136 | return ret; | |
137 | } | |
138 | ||
139 | bool kvm_arch_stop_on_emulation_error(CPUState *cs) | |
140 | { | |
141 | DPRINTF("%s\n", __func__); | |
142 | return true; | |
143 | } | |
144 | ||
145 | int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr) | |
146 | { | |
147 | DPRINTF("%s\n", __func__); | |
148 | return 1; | |
149 | } | |
150 | ||
151 | int kvm_arch_on_sigbus(int code, void *addr) | |
152 | { | |
153 | DPRINTF("%s\n", __func__); | |
154 | return 1; | |
155 | } | |
156 | ||
157 | void kvm_arch_init_irq_routing(KVMState *s) | |
158 | { | |
159 | } | |
160 | ||
161 | int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) | |
162 | { | |
163 | CPUState *cs = CPU(cpu); | |
164 | struct kvm_mips_interrupt intr; | |
165 | ||
166 | if (!kvm_enabled()) { | |
167 | return 0; | |
168 | } | |
169 | ||
170 | intr.cpu = -1; | |
171 | ||
172 | if (level) { | |
173 | intr.irq = irq; | |
174 | } else { | |
175 | intr.irq = -irq; | |
176 | } | |
177 | ||
178 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) | |
184 | { | |
185 | CPUState *cs = current_cpu; | |
186 | CPUState *dest_cs = CPU(cpu); | |
187 | struct kvm_mips_interrupt intr; | |
188 | ||
189 | if (!kvm_enabled()) { | |
190 | return 0; | |
191 | } | |
192 | ||
193 | intr.cpu = dest_cs->cpu_index; | |
194 | ||
195 | if (level) { | |
196 | intr.irq = irq; | |
197 | } else { | |
198 | intr.irq = -irq; | |
199 | } | |
200 | ||
201 | DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); | |
202 | ||
203 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
204 | ||
205 | return 0; | |
206 | } | |
207 | ||
208 | #define MIPS_CP0_32(_R, _S) \ | |
209 | (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) | |
210 | ||
211 | #define MIPS_CP0_64(_R, _S) \ | |
212 | (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) | |
213 | ||
214 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | |
215 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | |
216 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | |
217 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | |
218 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | |
219 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | |
220 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | |
221 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | |
222 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | |
223 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | |
224 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | |
225 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | |
226 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | |
227 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) | |
228 | ||
229 | /* CP0_Count control */ | |
230 | #define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | |
231 | 0x20000 | 0) | |
232 | #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 /* master disable */ | |
233 | /* CP0_Count resume monotonic nanoseconds */ | |
234 | #define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | |
235 | 0x20000 | 1) | |
236 | /* CP0_Count rate in Hz */ | |
237 | #define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ | |
238 | 0x20000 | 2) | |
239 | ||
240 | static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, | |
241 | int32_t *addr) | |
242 | { | |
243 | uint64_t val64 = *addr; | |
244 | struct kvm_one_reg cp0reg = { | |
245 | .id = reg_id, | |
246 | .addr = (uintptr_t)&val64 | |
247 | }; | |
248 | ||
249 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
250 | } | |
251 | ||
252 | static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, | |
253 | target_ulong *addr) | |
254 | { | |
255 | uint64_t val64 = *addr; | |
256 | struct kvm_one_reg cp0reg = { | |
257 | .id = reg_id, | |
258 | .addr = (uintptr_t)&val64 | |
259 | }; | |
260 | ||
261 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
262 | } | |
263 | ||
264 | static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, | |
265 | uint64_t *addr) | |
266 | { | |
267 | struct kvm_one_reg cp0reg = { | |
268 | .id = reg_id, | |
269 | .addr = (uintptr_t)addr | |
270 | }; | |
271 | ||
272 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
273 | } | |
274 | ||
275 | static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, | |
276 | int32_t *addr) | |
277 | { | |
278 | int ret; | |
279 | uint64_t val64 = 0; | |
280 | struct kvm_one_reg cp0reg = { | |
281 | .id = reg_id, | |
282 | .addr = (uintptr_t)&val64 | |
283 | }; | |
284 | ||
285 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
286 | if (ret >= 0) { | |
287 | *addr = val64; | |
288 | } | |
289 | return ret; | |
290 | } | |
291 | ||
292 | static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64 reg_id, | |
293 | target_ulong *addr) | |
294 | { | |
295 | int ret; | |
296 | uint64_t val64 = 0; | |
297 | struct kvm_one_reg cp0reg = { | |
298 | .id = reg_id, | |
299 | .addr = (uintptr_t)&val64 | |
300 | }; | |
301 | ||
302 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
303 | if (ret >= 0) { | |
304 | *addr = val64; | |
305 | } | |
306 | return ret; | |
307 | } | |
308 | ||
309 | static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64 reg_id, | |
310 | uint64_t *addr) | |
311 | { | |
312 | struct kvm_one_reg cp0reg = { | |
313 | .id = reg_id, | |
314 | .addr = (uintptr_t)addr | |
315 | }; | |
316 | ||
317 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
318 | } | |
319 | ||
320 | /* | |
321 | * We freeze the KVM timer when either the VM clock is stopped or the state is | |
322 | * saved (the state is dirty). | |
323 | */ | |
324 | ||
325 | /* | |
326 | * Save the state of the KVM timer when VM clock is stopped or state is synced | |
327 | * to QEMU. | |
328 | */ | |
329 | static int kvm_mips_save_count(CPUState *cs) | |
330 | { | |
331 | MIPSCPU *cpu = MIPS_CPU(cs); | |
332 | CPUMIPSState *env = &cpu->env; | |
333 | uint64_t count_ctl; | |
334 | int err, ret = 0; | |
335 | ||
336 | /* freeze KVM timer */ | |
337 | err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); | |
338 | if (err < 0) { | |
339 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); | |
340 | ret = err; | |
341 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
342 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
343 | err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); | |
344 | if (err < 0) { | |
345 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
346 | ret = err; | |
347 | } | |
348 | } | |
349 | ||
350 | /* read CP0_Cause */ | |
351 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
352 | if (err < 0) { | |
353 | DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); | |
354 | ret = err; | |
355 | } | |
356 | ||
357 | /* read CP0_Count */ | |
358 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
359 | if (err < 0) { | |
360 | DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); | |
361 | ret = err; | |
362 | } | |
363 | ||
364 | return ret; | |
365 | } | |
366 | ||
367 | /* | |
368 | * Restore the state of the KVM timer when VM clock is restarted or state is | |
369 | * synced to KVM. | |
370 | */ | |
371 | static int kvm_mips_restore_count(CPUState *cs) | |
372 | { | |
373 | MIPSCPU *cpu = MIPS_CPU(cs); | |
374 | CPUMIPSState *env = &cpu->env; | |
375 | uint64_t count_ctl; | |
376 | int err_dc, err, ret = 0; | |
377 | ||
378 | /* check the timer is frozen */ | |
379 | err_dc = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); | |
380 | if (err_dc < 0) { | |
381 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); | |
382 | ret = err_dc; | |
383 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
384 | /* freeze timer (sets COUNT_RESUME for us) */ | |
385 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
386 | err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); | |
387 | if (err < 0) { | |
388 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
389 | ret = err; | |
390 | } | |
391 | } | |
392 | ||
393 | /* load CP0_Cause */ | |
394 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
395 | if (err < 0) { | |
396 | DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); | |
397 | ret = err; | |
398 | } | |
399 | ||
400 | /* load CP0_Count */ | |
401 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
402 | if (err < 0) { | |
403 | DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); | |
404 | ret = err; | |
405 | } | |
406 | ||
407 | /* resume KVM timer */ | |
408 | if (err_dc >= 0) { | |
409 | count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; | |
410 | err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); | |
411 | if (err < 0) { | |
412 | DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); | |
413 | ret = err; | |
414 | } | |
415 | } | |
416 | ||
417 | return ret; | |
418 | } | |
419 | ||
420 | /* | |
421 | * Handle the VM clock being started or stopped | |
422 | */ | |
423 | static void kvm_mips_update_state(void *opaque, int running, RunState state) | |
424 | { | |
425 | CPUState *cs = opaque; | |
426 | int ret; | |
427 | uint64_t count_resume; | |
428 | ||
429 | /* | |
430 | * If state is already dirty (synced to QEMU) then the KVM timer state is | |
431 | * already saved and can be restored when it is synced back to KVM. | |
432 | */ | |
433 | if (!running) { | |
434 | if (!cs->kvm_vcpu_dirty) { | |
435 | ret = kvm_mips_save_count(cs); | |
436 | if (ret < 0) { | |
437 | fprintf(stderr, "Failed saving count\n"); | |
438 | } | |
439 | } | |
440 | } else { | |
441 | /* Set clock restore time to now */ | |
906b53a2 | 442 | count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
e2132e0b SL |
443 | ret = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_COUNT_RESUME, |
444 | &count_resume); | |
445 | if (ret < 0) { | |
446 | fprintf(stderr, "Failed setting COUNT_RESUME\n"); | |
447 | return; | |
448 | } | |
449 | ||
450 | if (!cs->kvm_vcpu_dirty) { | |
451 | ret = kvm_mips_restore_count(cs); | |
452 | if (ret < 0) { | |
453 | fprintf(stderr, "Failed restoring count\n"); | |
454 | } | |
455 | } | |
456 | } | |
457 | } | |
458 | ||
459 | static int kvm_mips_put_cp0_registers(CPUState *cs, int level) | |
460 | { | |
461 | MIPSCPU *cpu = MIPS_CPU(cs); | |
462 | CPUMIPSState *env = &cpu->env; | |
463 | int err, ret = 0; | |
464 | ||
465 | (void)level; | |
466 | ||
467 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
468 | if (err < 0) { | |
469 | DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); | |
470 | ret = err; | |
471 | } | |
472 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
473 | &env->CP0_Context); | |
474 | if (err < 0) { | |
475 | DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); | |
476 | ret = err; | |
477 | } | |
478 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
479 | &env->active_tc.CP0_UserLocal); | |
480 | if (err < 0) { | |
481 | DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); | |
482 | ret = err; | |
483 | } | |
484 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
485 | &env->CP0_PageMask); | |
486 | if (err < 0) { | |
487 | DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); | |
488 | ret = err; | |
489 | } | |
490 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
491 | if (err < 0) { | |
492 | DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); | |
493 | ret = err; | |
494 | } | |
495 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
496 | if (err < 0) { | |
497 | DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); | |
498 | ret = err; | |
499 | } | |
500 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
501 | &env->CP0_BadVAddr); | |
502 | if (err < 0) { | |
503 | DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); | |
504 | ret = err; | |
505 | } | |
506 | ||
507 | /* If VM clock stopped then state will be restored when it is restarted */ | |
508 | if (runstate_is_running()) { | |
509 | err = kvm_mips_restore_count(cs); | |
510 | if (err < 0) { | |
511 | ret = err; | |
512 | } | |
513 | } | |
514 | ||
515 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
516 | &env->CP0_EntryHi); | |
517 | if (err < 0) { | |
518 | DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); | |
519 | ret = err; | |
520 | } | |
521 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
522 | &env->CP0_Compare); | |
523 | if (err < 0) { | |
524 | DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); | |
525 | ret = err; | |
526 | } | |
527 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
528 | if (err < 0) { | |
529 | DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); | |
530 | ret = err; | |
531 | } | |
532 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
533 | if (err < 0) { | |
534 | DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); | |
535 | ret = err; | |
536 | } | |
537 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, | |
538 | &env->CP0_ErrorEPC); | |
539 | if (err < 0) { | |
540 | DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); | |
541 | ret = err; | |
542 | } | |
543 | ||
544 | return ret; | |
545 | } | |
546 | ||
547 | static int kvm_mips_get_cp0_registers(CPUState *cs) | |
548 | { | |
549 | MIPSCPU *cpu = MIPS_CPU(cs); | |
550 | CPUMIPSState *env = &cpu->env; | |
551 | int err, ret = 0; | |
552 | ||
553 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
554 | if (err < 0) { | |
555 | DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); | |
556 | ret = err; | |
557 | } | |
558 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
559 | &env->CP0_Context); | |
560 | if (err < 0) { | |
561 | DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); | |
562 | ret = err; | |
563 | } | |
564 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
565 | &env->active_tc.CP0_UserLocal); | |
566 | if (err < 0) { | |
567 | DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); | |
568 | ret = err; | |
569 | } | |
570 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
571 | &env->CP0_PageMask); | |
572 | if (err < 0) { | |
573 | DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); | |
574 | ret = err; | |
575 | } | |
576 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
577 | if (err < 0) { | |
578 | DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); | |
579 | ret = err; | |
580 | } | |
581 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
582 | if (err < 0) { | |
583 | DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); | |
584 | ret = err; | |
585 | } | |
586 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
587 | &env->CP0_BadVAddr); | |
588 | if (err < 0) { | |
589 | DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); | |
590 | ret = err; | |
591 | } | |
592 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
593 | &env->CP0_EntryHi); | |
594 | if (err < 0) { | |
595 | DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); | |
596 | ret = err; | |
597 | } | |
598 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
599 | &env->CP0_Compare); | |
600 | if (err < 0) { | |
601 | DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); | |
602 | ret = err; | |
603 | } | |
604 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
605 | if (err < 0) { | |
606 | DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); | |
607 | ret = err; | |
608 | } | |
609 | ||
610 | /* If VM clock stopped then state was already saved when it was stopped */ | |
611 | if (runstate_is_running()) { | |
612 | err = kvm_mips_save_count(cs); | |
613 | if (err < 0) { | |
614 | ret = err; | |
615 | } | |
616 | } | |
617 | ||
618 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
619 | if (err < 0) { | |
620 | DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); | |
621 | ret = err; | |
622 | } | |
623 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, | |
624 | &env->CP0_ErrorEPC); | |
625 | if (err < 0) { | |
626 | DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); | |
627 | ret = err; | |
628 | } | |
629 | ||
630 | return ret; | |
631 | } | |
632 | ||
633 | int kvm_arch_put_registers(CPUState *cs, int level) | |
634 | { | |
635 | MIPSCPU *cpu = MIPS_CPU(cs); | |
636 | CPUMIPSState *env = &cpu->env; | |
637 | struct kvm_regs regs; | |
638 | int ret; | |
639 | int i; | |
640 | ||
641 | /* Set the registers based on QEMU's view of things */ | |
642 | for (i = 0; i < 32; i++) { | |
643 | regs.gpr[i] = env->active_tc.gpr[i]; | |
644 | } | |
645 | ||
646 | regs.hi = env->active_tc.HI[0]; | |
647 | regs.lo = env->active_tc.LO[0]; | |
648 | regs.pc = env->active_tc.PC; | |
649 | ||
650 | ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); | |
651 | ||
652 | if (ret < 0) { | |
653 | return ret; | |
654 | } | |
655 | ||
656 | ret = kvm_mips_put_cp0_registers(cs, level); | |
657 | if (ret < 0) { | |
658 | return ret; | |
659 | } | |
660 | ||
661 | return ret; | |
662 | } | |
663 | ||
664 | int kvm_arch_get_registers(CPUState *cs) | |
665 | { | |
666 | MIPSCPU *cpu = MIPS_CPU(cs); | |
667 | CPUMIPSState *env = &cpu->env; | |
668 | int ret = 0; | |
669 | struct kvm_regs regs; | |
670 | int i; | |
671 | ||
672 | /* Get the current register set as KVM seems it */ | |
673 | ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); | |
674 | ||
675 | if (ret < 0) { | |
676 | return ret; | |
677 | } | |
678 | ||
679 | for (i = 0; i < 32; i++) { | |
680 | env->active_tc.gpr[i] = regs.gpr[i]; | |
681 | } | |
682 | ||
683 | env->active_tc.HI[0] = regs.hi; | |
684 | env->active_tc.LO[0] = regs.lo; | |
685 | env->active_tc.PC = regs.pc; | |
686 | ||
687 | kvm_mips_get_cp0_registers(cs); | |
688 | ||
689 | return ret; | |
690 | } |