2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012-2014 Imagination Technologies Ltd.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
15 #include <linux/kvm.h>
17 #include "qemu-common.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/cpus.h"
25 #include "exec/memattrs.h"
29 #define DPRINTF(fmt, ...) \
30 do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
32 static int kvm_mips_fpu_cap;
33 static int kvm_mips_msa_cap;
35 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
39 static void kvm_mips_update_state(void *opaque, int running, RunState state);
41 unsigned long kvm_arch_vcpu_id(CPUState *cs)
46 int kvm_arch_init(MachineState *ms, KVMState *s)
48 /* MIPS has 128 signals */
49 kvm_set_sigmask_len(s, 16);
51 kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
52 kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
54 DPRINTF("%s\n", __func__);
58 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
63 int kvm_arch_init_vcpu(CPUState *cs)
65 MIPSCPU *cpu = MIPS_CPU(cs);
66 CPUMIPSState *env = &cpu->env;
69 qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
71 if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
72 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
74 /* mark unsupported so it gets disabled on reset */
80 if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
81 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
83 /* mark unsupported so it gets disabled on reset */
89 DPRINTF("%s\n", __func__);
93 void kvm_mips_reset_vcpu(MIPSCPU *cpu)
95 CPUMIPSState *env = &cpu->env;
97 if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
98 fprintf(stderr, "Warning: KVM does not support FPU, disabling\n");
99 env->CP0_Config1 &= ~(1 << CP0C1_FP);
101 if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
102 fprintf(stderr, "Warning: KVM does not support MSA, disabling\n");
103 env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
106 DPRINTF("%s\n", __func__);
109 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
111 DPRINTF("%s\n", __func__);
115 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
117 DPRINTF("%s\n", __func__);
121 static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
123 CPUMIPSState *env = &cpu->env;
125 return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
129 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
131 MIPSCPU *cpu = MIPS_CPU(cs);
133 struct kvm_mips_interrupt intr;
135 qemu_mutex_lock_iothread();
137 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
138 cpu_mips_io_interrupts_pending(cpu)) {
141 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
143 error_report("%s: cpu %d: failed to inject IRQ %x",
144 __func__, cs->cpu_index, intr.irq);
148 qemu_mutex_unlock_iothread();
151 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
153 return MEMTXATTRS_UNSPECIFIED;
156 int kvm_arch_process_async_events(CPUState *cs)
161 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
165 DPRINTF("%s\n", __func__);
166 switch (run->exit_reason) {
168 error_report("%s: unknown exit reason %d",
169 __func__, run->exit_reason);
177 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
179 DPRINTF("%s\n", __func__);
183 void kvm_arch_init_irq_routing(KVMState *s)
187 int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
189 CPUState *cs = CPU(cpu);
190 struct kvm_mips_interrupt intr;
192 if (!kvm_enabled()) {
204 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
209 int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
211 CPUState *cs = current_cpu;
212 CPUState *dest_cs = CPU(cpu);
213 struct kvm_mips_interrupt intr;
215 if (!kvm_enabled()) {
219 intr.cpu = dest_cs->cpu_index;
227 DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
229 kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
234 #define MIPS_CP0_32(_R, _S) \
235 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
237 #define MIPS_CP0_64(_R, _S) \
238 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
240 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
241 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
242 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
243 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
244 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
245 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
246 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
247 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
248 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
249 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
250 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
251 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
252 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
253 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
254 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
255 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
256 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
257 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
258 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
259 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
260 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
262 static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
265 struct kvm_one_reg cp0reg = {
267 .addr = (uintptr_t)addr
270 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
273 static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
276 struct kvm_one_reg cp0reg = {
278 .addr = (uintptr_t)addr
281 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
284 static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
287 uint64_t val64 = *addr;
288 struct kvm_one_reg cp0reg = {
290 .addr = (uintptr_t)&val64
293 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
296 static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
299 struct kvm_one_reg cp0reg = {
301 .addr = (uintptr_t)addr
304 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
307 static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
310 struct kvm_one_reg cp0reg = {
312 .addr = (uintptr_t)addr
315 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
318 static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
321 struct kvm_one_reg cp0reg = {
323 .addr = (uintptr_t)addr
326 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
329 static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
332 struct kvm_one_reg cp0reg = {
334 .addr = (uintptr_t)addr
337 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
340 static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
345 struct kvm_one_reg cp0reg = {
347 .addr = (uintptr_t)&val64
350 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
357 static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
360 struct kvm_one_reg cp0reg = {
362 .addr = (uintptr_t)addr
365 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
368 static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
371 struct kvm_one_reg cp0reg = {
373 .addr = (uintptr_t)addr
376 return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
379 #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
380 #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
382 #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
383 #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
385 #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
386 #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
387 (1U << CP0C5_UFE) | \
388 (1U << CP0C5_FRE) | \
391 static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
392 int32_t *addr, int32_t mask)
397 err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
402 /* only change bits in mask */
403 change = (*addr ^ tmp) & mask;
409 return kvm_mips_put_one_reg(cs, reg_id, &tmp);
413 * We freeze the KVM timer when either the VM clock is stopped or the state is
414 * saved (the state is dirty).
418 * Save the state of the KVM timer when VM clock is stopped or state is synced
421 static int kvm_mips_save_count(CPUState *cs)
423 MIPSCPU *cpu = MIPS_CPU(cs);
424 CPUMIPSState *env = &cpu->env;
428 /* freeze KVM timer */
429 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
431 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
433 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
434 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
435 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
437 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
443 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
445 DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
450 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
452 DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
460 * Restore the state of the KVM timer when VM clock is restarted or state is
463 static int kvm_mips_restore_count(CPUState *cs)
465 MIPSCPU *cpu = MIPS_CPU(cs);
466 CPUMIPSState *env = &cpu->env;
468 int err_dc, err, ret = 0;
470 /* check the timer is frozen */
471 err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
473 DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
475 } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
476 /* freeze timer (sets COUNT_RESUME for us) */
477 count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
478 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
480 DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
486 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
488 DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
493 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
495 DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
499 /* resume KVM timer */
501 count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
502 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
504 DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
513 * Handle the VM clock being started or stopped
515 static void kvm_mips_update_state(void *opaque, int running, RunState state)
517 CPUState *cs = opaque;
519 uint64_t count_resume;
522 * If state is already dirty (synced to QEMU) then the KVM timer state is
523 * already saved and can be restored when it is synced back to KVM.
526 if (!cs->kvm_vcpu_dirty) {
527 ret = kvm_mips_save_count(cs);
529 fprintf(stderr, "Failed saving count\n");
533 /* Set clock restore time to now */
534 count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
535 ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
538 fprintf(stderr, "Failed setting COUNT_RESUME\n");
542 if (!cs->kvm_vcpu_dirty) {
543 ret = kvm_mips_restore_count(cs);
545 fprintf(stderr, "Failed restoring count\n");
551 static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
553 MIPSCPU *cpu = MIPS_CPU(cs);
554 CPUMIPSState *env = &cpu->env;
558 /* Only put FPU state if we're emulating a CPU with an FPU */
559 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
560 /* FPU Control Registers */
561 if (level == KVM_PUT_FULL_STATE) {
562 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
563 &env->active_fpu.fcr0);
565 DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
569 err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
570 &env->active_fpu.fcr31);
572 DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
577 * FPU register state is a subset of MSA vector state, so don't put FPU
578 * registers if we're emulating a CPU with MSA.
580 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
581 /* Floating point registers */
582 for (i = 0; i < 32; ++i) {
583 if (env->CP0_Status & (1 << CP0St_FR)) {
584 err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
585 &env->active_fpu.fpr[i].d);
587 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
588 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
591 DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
598 /* Only put MSA state if we're emulating a CPU with MSA */
599 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
600 /* MSA Control Registers */
601 if (level == KVM_PUT_FULL_STATE) {
602 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
605 DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
609 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
610 &env->active_tc.msacsr);
612 DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
616 /* Vector registers (includes FP registers) */
617 for (i = 0; i < 32; ++i) {
618 /* Big endian MSA not supported by QEMU yet anyway */
619 err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
620 env->active_fpu.fpr[i].wr.d);
622 DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
631 static int kvm_mips_get_fpu_registers(CPUState *cs)
633 MIPSCPU *cpu = MIPS_CPU(cs);
634 CPUMIPSState *env = &cpu->env;
638 /* Only get FPU state if we're emulating a CPU with an FPU */
639 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
640 /* FPU Control Registers */
641 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
642 &env->active_fpu.fcr0);
644 DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
647 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
648 &env->active_fpu.fcr31);
650 DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
653 restore_fp_status(env);
657 * FPU register state is a subset of MSA vector state, so don't save FPU
658 * registers if we're emulating a CPU with MSA.
660 if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
661 /* Floating point registers */
662 for (i = 0; i < 32; ++i) {
663 if (env->CP0_Status & (1 << CP0St_FR)) {
664 err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
665 &env->active_fpu.fpr[i].d);
667 err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
668 &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
671 DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
678 /* Only get MSA state if we're emulating a CPU with MSA */
679 if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
680 /* MSA Control Registers */
681 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
684 DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
687 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
688 &env->active_tc.msacsr);
690 DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
693 restore_msa_fp_status(env);
696 /* Vector registers (includes FP registers) */
697 for (i = 0; i < 32; ++i) {
698 /* Big endian MSA not supported by QEMU yet anyway */
699 err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
700 env->active_fpu.fpr[i].wr.d);
702 DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
712 static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
714 MIPSCPU *cpu = MIPS_CPU(cs);
715 CPUMIPSState *env = &cpu->env;
720 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
722 DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
725 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
728 DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
731 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
732 &env->active_tc.CP0_UserLocal);
734 DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
737 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
740 DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
743 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
745 DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
748 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
750 DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
753 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
756 DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
760 /* If VM clock stopped then state will be restored when it is restarted */
761 if (runstate_is_running()) {
762 err = kvm_mips_restore_count(cs);
768 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
771 DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
774 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
777 DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
780 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
782 DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
785 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
787 DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
790 err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
792 DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
795 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
797 KVM_REG_MIPS_CP0_CONFIG_MASK);
799 DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
802 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
804 KVM_REG_MIPS_CP0_CONFIG1_MASK);
806 DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
809 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
811 KVM_REG_MIPS_CP0_CONFIG2_MASK);
813 DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
816 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
818 KVM_REG_MIPS_CP0_CONFIG3_MASK);
820 DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
823 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
825 KVM_REG_MIPS_CP0_CONFIG4_MASK);
827 DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
830 err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
832 KVM_REG_MIPS_CP0_CONFIG5_MASK);
834 DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
837 err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
840 DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
847 static int kvm_mips_get_cp0_registers(CPUState *cs)
849 MIPSCPU *cpu = MIPS_CPU(cs);
850 CPUMIPSState *env = &cpu->env;
853 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
855 DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
858 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
861 DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
864 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
865 &env->active_tc.CP0_UserLocal);
867 DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
870 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
873 DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
876 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
878 DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
881 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
883 DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
886 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
889 DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
892 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
895 DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
898 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
901 DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
904 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
906 DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
910 /* If VM clock stopped then state was already saved when it was stopped */
911 if (runstate_is_running()) {
912 err = kvm_mips_save_count(cs);
918 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
920 DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
923 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
925 DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
928 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
930 DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
933 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
935 DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
938 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
940 DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
943 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
945 DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
948 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
950 DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
953 err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
955 DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
958 err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
961 DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
968 int kvm_arch_put_registers(CPUState *cs, int level)
970 MIPSCPU *cpu = MIPS_CPU(cs);
971 CPUMIPSState *env = &cpu->env;
972 struct kvm_regs regs;
976 /* Set the registers based on QEMU's view of things */
977 for (i = 0; i < 32; i++) {
978 regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
981 regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
982 regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
983 regs.pc = (int64_t)(target_long)env->active_tc.PC;
985 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
991 ret = kvm_mips_put_cp0_registers(cs, level);
996 ret = kvm_mips_put_fpu_registers(cs, level);
1004 int kvm_arch_get_registers(CPUState *cs)
1006 MIPSCPU *cpu = MIPS_CPU(cs);
1007 CPUMIPSState *env = &cpu->env;
1009 struct kvm_regs regs;
1012 /* Get the current register set as KVM seems it */
1013 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1019 for (i = 0; i < 32; i++) {
1020 env->active_tc.gpr[i] = regs.gpr[i];
1023 env->active_tc.HI[0] = regs.hi;
1024 env->active_tc.LO[0] = regs.lo;
1025 env->active_tc.PC = regs.pc;
1027 kvm_mips_get_cp0_registers(cs);
1028 kvm_mips_get_fpu_registers(cs);
1033 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1034 uint64_t address, uint32_t data, PCIDevice *dev)
1039 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
1040 int vector, PCIDevice *dev)
1045 int kvm_arch_release_virq_post(int virq)
1050 int kvm_arch_msi_data_to_gsi(uint32_t data)