2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kdebug.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
21 #include <asm/cacheflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/pgtable.h>
25 #include <linux/kvm_host.h>
27 #include "interrupt.h"
30 #define CREATE_TRACE_POINTS
34 #define VECTORSPACING 0x100 /* for EI/VI mode */
37 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
38 struct kvm_stats_debugfs_item debugfs_entries[] = {
39 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
40 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
41 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
42 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
43 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
44 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
47 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
48 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
49 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
50 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
51 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
52 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
53 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
54 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
55 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
58 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
62 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
66 for_each_possible_cpu(i) {
67 vcpu->arch.guest_kernel_asid[i] = 0;
68 vcpu->arch.guest_user_asid[i] = 0;
75 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
76 * Config7, so we are "runnable" if interrupts are pending
78 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
80 return !!(vcpu->arch.pending_exceptions);
83 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
88 int kvm_arch_hardware_enable(void)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_check_processor_compat(void *rtn)
103 static void kvm_mips_init_tlbs(struct kvm *kvm)
108 * Add a wired entry to the TLB, it is used to map the commpage to
111 wired = read_c0_wired();
112 write_c0_wired(wired + 1);
114 kvm->arch.commpage_tlb = wired;
116 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
117 kvm->arch.commpage_tlb);
120 static void kvm_mips_init_vm_percpu(void *arg)
122 struct kvm *kvm = (struct kvm *)arg;
124 kvm_mips_init_tlbs(kvm);
125 kvm_mips_callbacks->vm_init(kvm);
129 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131 if (atomic_inc_return(&kvm_mips_instance) == 1) {
132 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
134 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
140 void kvm_mips_free_vcpus(struct kvm *kvm)
143 struct kvm_vcpu *vcpu;
145 /* Put the pages we reserved for the guest pmap */
146 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
147 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
148 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
150 kfree(kvm->arch.guest_pmap);
152 kvm_for_each_vcpu(i, vcpu, kvm) {
153 kvm_arch_vcpu_free(vcpu);
156 mutex_lock(&kvm->lock);
158 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
159 kvm->vcpus[i] = NULL;
161 atomic_set(&kvm->online_vcpus, 0);
163 mutex_unlock(&kvm->lock);
166 static void kvm_mips_uninit_tlbs(void *arg)
168 /* Restore wired count */
171 /* Clear out all the TLBs */
172 kvm_local_flush_tlb_all();
175 void kvm_arch_destroy_vm(struct kvm *kvm)
177 kvm_mips_free_vcpus(kvm);
179 /* If this is the last instance, restore wired count */
180 if (atomic_dec_return(&kvm_mips_instance) == 0) {
181 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
183 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
187 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
193 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
194 unsigned long npages)
199 int kvm_arch_prepare_memory_region(struct kvm *kvm,
200 struct kvm_memory_slot *memslot,
201 const struct kvm_userspace_memory_region *mem,
202 enum kvm_mr_change change)
207 void kvm_arch_commit_memory_region(struct kvm *kvm,
208 const struct kvm_userspace_memory_region *mem,
209 const struct kvm_memory_slot *old,
210 const struct kvm_memory_slot *new,
211 enum kvm_mr_change change)
213 unsigned long npages = 0;
216 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
217 __func__, kvm, mem->slot, mem->guest_phys_addr,
218 mem->memory_size, mem->userspace_addr);
220 /* Setup Guest PMAP table */
221 if (!kvm->arch.guest_pmap) {
223 npages = mem->memory_size >> PAGE_SHIFT;
226 kvm->arch.guest_pmap_npages = npages;
227 kvm->arch.guest_pmap =
228 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
230 if (!kvm->arch.guest_pmap) {
231 kvm_err("Failed to allocate guest PMAP");
235 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
236 npages, kvm->arch.guest_pmap);
238 /* Now setup the page table */
239 for (i = 0; i < npages; i++)
240 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
245 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
247 int err, size, offset;
251 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
258 err = kvm_vcpu_init(vcpu, kvm, id);
263 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
266 * Allocate space for host mode exception handlers that handle
269 if (cpu_has_veic || cpu_has_vint)
270 size = 0x200 + VECTORSPACING * 64;
274 /* Save Linux EBASE */
275 vcpu->arch.host_ebase = (void *)read_c0_ebase();
277 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
283 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
284 ALIGN(size, PAGE_SIZE), gebase);
287 vcpu->arch.guest_ebase = gebase;
289 /* Copy L1 Guest Exception handler to correct offset */
291 /* TLB Refill, EXL = 0 */
292 memcpy(gebase, mips32_exception,
293 mips32_exceptionEnd - mips32_exception);
295 /* General Exception Entry point */
296 memcpy(gebase + 0x180, mips32_exception,
297 mips32_exceptionEnd - mips32_exception);
299 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
300 for (i = 0; i < 8; i++) {
301 kvm_debug("L1 Vectored handler @ %p\n",
302 gebase + 0x200 + (i * VECTORSPACING));
303 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
304 mips32_exceptionEnd - mips32_exception);
307 /* General handler, relocate to unmapped space for sanity's sake */
309 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
311 mips32_GuestExceptionEnd - mips32_GuestException);
313 memcpy(gebase + offset, mips32_GuestException,
314 mips32_GuestExceptionEnd - mips32_GuestException);
316 /* Invalidate the icache for these ranges */
317 local_flush_icache_range((unsigned long)gebase,
318 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
321 * Allocate comm page for guest kernel, a TLB will be reserved for
322 * mapping GVA @ 0xFFFF8000 to this page
324 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
326 if (!vcpu->arch.kseg0_commpage) {
328 goto out_free_gebase;
331 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
332 kvm_mips_commpage_init(vcpu);
335 vcpu->arch.last_sched_cpu = -1;
337 /* Start off the timer */
338 kvm_mips_init_count(vcpu);
352 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
354 hrtimer_cancel(&vcpu->arch.comparecount_timer);
356 kvm_vcpu_uninit(vcpu);
358 kvm_mips_dump_stats(vcpu);
360 kfree(vcpu->arch.guest_ebase);
361 kfree(vcpu->arch.kseg0_commpage);
365 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
367 kvm_arch_vcpu_free(vcpu);
370 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
371 struct kvm_guest_debug *dbg)
376 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
381 if (vcpu->sigset_active)
382 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
384 if (vcpu->mmio_needed) {
385 if (!vcpu->mmio_is_write)
386 kvm_mips_complete_mmio_load(vcpu, run);
387 vcpu->mmio_needed = 0;
393 /* Check if we have any exceptions/interrupts pending */
394 kvm_mips_deliver_interrupts(vcpu,
395 kvm_read_c0_guest_cause(vcpu->arch.cop0));
399 /* Disable hardware page table walking while in guest */
402 r = __kvm_mips_vcpu_run(run, vcpu);
404 /* Re-enable HTW before enabling interrupts */
410 if (vcpu->sigset_active)
411 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
416 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
417 struct kvm_mips_interrupt *irq)
419 int intr = (int)irq->irq;
420 struct kvm_vcpu *dvcpu = NULL;
422 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
423 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
429 dvcpu = vcpu->kvm->vcpus[irq->cpu];
431 if (intr == 2 || intr == 3 || intr == 4) {
432 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
434 } else if (intr == -2 || intr == -3 || intr == -4) {
435 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
437 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
442 dvcpu->arch.wait = 0;
444 if (waitqueue_active(&dvcpu->wq))
445 wake_up_interruptible(&dvcpu->wq);
450 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451 struct kvm_mp_state *mp_state)
456 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
457 struct kvm_mp_state *mp_state)
462 static u64 kvm_mips_get_one_regs[] = {
500 KVM_REG_MIPS_CP0_INDEX,
501 KVM_REG_MIPS_CP0_CONTEXT,
502 KVM_REG_MIPS_CP0_USERLOCAL,
503 KVM_REG_MIPS_CP0_PAGEMASK,
504 KVM_REG_MIPS_CP0_WIRED,
505 KVM_REG_MIPS_CP0_HWRENA,
506 KVM_REG_MIPS_CP0_BADVADDR,
507 KVM_REG_MIPS_CP0_COUNT,
508 KVM_REG_MIPS_CP0_ENTRYHI,
509 KVM_REG_MIPS_CP0_COMPARE,
510 KVM_REG_MIPS_CP0_STATUS,
511 KVM_REG_MIPS_CP0_CAUSE,
512 KVM_REG_MIPS_CP0_EPC,
513 KVM_REG_MIPS_CP0_PRID,
514 KVM_REG_MIPS_CP0_CONFIG,
515 KVM_REG_MIPS_CP0_CONFIG1,
516 KVM_REG_MIPS_CP0_CONFIG2,
517 KVM_REG_MIPS_CP0_CONFIG3,
518 KVM_REG_MIPS_CP0_CONFIG4,
519 KVM_REG_MIPS_CP0_CONFIG5,
520 KVM_REG_MIPS_CP0_CONFIG7,
521 KVM_REG_MIPS_CP0_ERROREPC,
523 KVM_REG_MIPS_COUNT_CTL,
524 KVM_REG_MIPS_COUNT_RESUME,
525 KVM_REG_MIPS_COUNT_HZ,
528 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
529 const struct kvm_one_reg *reg)
531 struct mips_coproc *cop0 = vcpu->arch.cop0;
532 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
539 /* General purpose registers */
540 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
541 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
543 case KVM_REG_MIPS_HI:
544 v = (long)vcpu->arch.hi;
546 case KVM_REG_MIPS_LO:
547 v = (long)vcpu->arch.lo;
549 case KVM_REG_MIPS_PC:
550 v = (long)vcpu->arch.pc;
553 /* Floating point registers */
554 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
555 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
557 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
558 /* Odd singles in top of even double when FR=0 */
559 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
560 v = get_fpr32(&fpu->fpr[idx], 0);
562 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
564 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
565 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
567 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
568 /* Can't access odd doubles in FR=0 mode */
569 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
571 v = get_fpr64(&fpu->fpr[idx], 0);
573 case KVM_REG_MIPS_FCR_IR:
574 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
576 v = boot_cpu_data.fpu_id;
578 case KVM_REG_MIPS_FCR_CSR:
579 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
584 /* MIPS SIMD Architecture (MSA) registers */
585 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
586 if (!kvm_mips_guest_has_msa(&vcpu->arch))
588 /* Can't access MSA registers in FR=0 mode */
589 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
591 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
592 #ifdef CONFIG_CPU_LITTLE_ENDIAN
593 /* least significant byte first */
594 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
595 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
597 /* most significant byte first */
598 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
599 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
602 case KVM_REG_MIPS_MSA_IR:
603 if (!kvm_mips_guest_has_msa(&vcpu->arch))
605 v = boot_cpu_data.msa_id;
607 case KVM_REG_MIPS_MSA_CSR:
608 if (!kvm_mips_guest_has_msa(&vcpu->arch))
613 /* Co-processor 0 registers */
614 case KVM_REG_MIPS_CP0_INDEX:
615 v = (long)kvm_read_c0_guest_index(cop0);
617 case KVM_REG_MIPS_CP0_CONTEXT:
618 v = (long)kvm_read_c0_guest_context(cop0);
620 case KVM_REG_MIPS_CP0_USERLOCAL:
621 v = (long)kvm_read_c0_guest_userlocal(cop0);
623 case KVM_REG_MIPS_CP0_PAGEMASK:
624 v = (long)kvm_read_c0_guest_pagemask(cop0);
626 case KVM_REG_MIPS_CP0_WIRED:
627 v = (long)kvm_read_c0_guest_wired(cop0);
629 case KVM_REG_MIPS_CP0_HWRENA:
630 v = (long)kvm_read_c0_guest_hwrena(cop0);
632 case KVM_REG_MIPS_CP0_BADVADDR:
633 v = (long)kvm_read_c0_guest_badvaddr(cop0);
635 case KVM_REG_MIPS_CP0_ENTRYHI:
636 v = (long)kvm_read_c0_guest_entryhi(cop0);
638 case KVM_REG_MIPS_CP0_COMPARE:
639 v = (long)kvm_read_c0_guest_compare(cop0);
641 case KVM_REG_MIPS_CP0_STATUS:
642 v = (long)kvm_read_c0_guest_status(cop0);
644 case KVM_REG_MIPS_CP0_CAUSE:
645 v = (long)kvm_read_c0_guest_cause(cop0);
647 case KVM_REG_MIPS_CP0_EPC:
648 v = (long)kvm_read_c0_guest_epc(cop0);
650 case KVM_REG_MIPS_CP0_PRID:
651 v = (long)kvm_read_c0_guest_prid(cop0);
653 case KVM_REG_MIPS_CP0_CONFIG:
654 v = (long)kvm_read_c0_guest_config(cop0);
656 case KVM_REG_MIPS_CP0_CONFIG1:
657 v = (long)kvm_read_c0_guest_config1(cop0);
659 case KVM_REG_MIPS_CP0_CONFIG2:
660 v = (long)kvm_read_c0_guest_config2(cop0);
662 case KVM_REG_MIPS_CP0_CONFIG3:
663 v = (long)kvm_read_c0_guest_config3(cop0);
665 case KVM_REG_MIPS_CP0_CONFIG4:
666 v = (long)kvm_read_c0_guest_config4(cop0);
668 case KVM_REG_MIPS_CP0_CONFIG5:
669 v = (long)kvm_read_c0_guest_config5(cop0);
671 case KVM_REG_MIPS_CP0_CONFIG7:
672 v = (long)kvm_read_c0_guest_config7(cop0);
674 case KVM_REG_MIPS_CP0_ERROREPC:
675 v = (long)kvm_read_c0_guest_errorepc(cop0);
677 /* registers to be handled specially */
678 case KVM_REG_MIPS_CP0_COUNT:
679 case KVM_REG_MIPS_COUNT_CTL:
680 case KVM_REG_MIPS_COUNT_RESUME:
681 case KVM_REG_MIPS_COUNT_HZ:
682 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
689 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
690 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
692 return put_user(v, uaddr64);
693 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
694 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
697 return put_user(v32, uaddr32);
698 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
699 void __user *uaddr = (void __user *)(long)reg->addr;
701 return copy_to_user(uaddr, vs, 16);
707 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
708 const struct kvm_one_reg *reg)
710 struct mips_coproc *cop0 = vcpu->arch.cop0;
711 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
716 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
717 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
719 if (get_user(v, uaddr64) != 0)
721 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
722 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
725 if (get_user(v32, uaddr32) != 0)
728 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
729 void __user *uaddr = (void __user *)(long)reg->addr;
731 return copy_from_user(vs, uaddr, 16);
737 /* General purpose registers */
738 case KVM_REG_MIPS_R0:
739 /* Silently ignore requests to set $0 */
741 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
742 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
744 case KVM_REG_MIPS_HI:
747 case KVM_REG_MIPS_LO:
750 case KVM_REG_MIPS_PC:
754 /* Floating point registers */
755 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
756 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
758 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
759 /* Odd singles in top of even double when FR=0 */
760 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
761 set_fpr32(&fpu->fpr[idx], 0, v);
763 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
765 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
766 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
768 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
769 /* Can't access odd doubles in FR=0 mode */
770 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
772 set_fpr64(&fpu->fpr[idx], 0, v);
774 case KVM_REG_MIPS_FCR_IR:
775 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
779 case KVM_REG_MIPS_FCR_CSR:
780 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
785 /* MIPS SIMD Architecture (MSA) registers */
786 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
787 if (!kvm_mips_guest_has_msa(&vcpu->arch))
789 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
790 #ifdef CONFIG_CPU_LITTLE_ENDIAN
791 /* least significant byte first */
792 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
793 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
795 /* most significant byte first */
796 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
797 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
800 case KVM_REG_MIPS_MSA_IR:
801 if (!kvm_mips_guest_has_msa(&vcpu->arch))
805 case KVM_REG_MIPS_MSA_CSR:
806 if (!kvm_mips_guest_has_msa(&vcpu->arch))
811 /* Co-processor 0 registers */
812 case KVM_REG_MIPS_CP0_INDEX:
813 kvm_write_c0_guest_index(cop0, v);
815 case KVM_REG_MIPS_CP0_CONTEXT:
816 kvm_write_c0_guest_context(cop0, v);
818 case KVM_REG_MIPS_CP0_USERLOCAL:
819 kvm_write_c0_guest_userlocal(cop0, v);
821 case KVM_REG_MIPS_CP0_PAGEMASK:
822 kvm_write_c0_guest_pagemask(cop0, v);
824 case KVM_REG_MIPS_CP0_WIRED:
825 kvm_write_c0_guest_wired(cop0, v);
827 case KVM_REG_MIPS_CP0_HWRENA:
828 kvm_write_c0_guest_hwrena(cop0, v);
830 case KVM_REG_MIPS_CP0_BADVADDR:
831 kvm_write_c0_guest_badvaddr(cop0, v);
833 case KVM_REG_MIPS_CP0_ENTRYHI:
834 kvm_write_c0_guest_entryhi(cop0, v);
836 case KVM_REG_MIPS_CP0_STATUS:
837 kvm_write_c0_guest_status(cop0, v);
839 case KVM_REG_MIPS_CP0_EPC:
840 kvm_write_c0_guest_epc(cop0, v);
842 case KVM_REG_MIPS_CP0_PRID:
843 kvm_write_c0_guest_prid(cop0, v);
845 case KVM_REG_MIPS_CP0_ERROREPC:
846 kvm_write_c0_guest_errorepc(cop0, v);
848 /* registers to be handled specially */
849 case KVM_REG_MIPS_CP0_COUNT:
850 case KVM_REG_MIPS_CP0_COMPARE:
851 case KVM_REG_MIPS_CP0_CAUSE:
852 case KVM_REG_MIPS_CP0_CONFIG:
853 case KVM_REG_MIPS_CP0_CONFIG1:
854 case KVM_REG_MIPS_CP0_CONFIG2:
855 case KVM_REG_MIPS_CP0_CONFIG3:
856 case KVM_REG_MIPS_CP0_CONFIG4:
857 case KVM_REG_MIPS_CP0_CONFIG5:
858 case KVM_REG_MIPS_COUNT_CTL:
859 case KVM_REG_MIPS_COUNT_RESUME:
860 case KVM_REG_MIPS_COUNT_HZ:
861 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
868 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
869 struct kvm_enable_cap *cap)
873 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
881 case KVM_CAP_MIPS_FPU:
882 vcpu->arch.fpu_enabled = true;
884 case KVM_CAP_MIPS_MSA:
885 vcpu->arch.msa_enabled = true;
895 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
898 struct kvm_vcpu *vcpu = filp->private_data;
899 void __user *argp = (void __user *)arg;
903 case KVM_SET_ONE_REG:
904 case KVM_GET_ONE_REG: {
905 struct kvm_one_reg reg;
907 if (copy_from_user(®, argp, sizeof(reg)))
909 if (ioctl == KVM_SET_ONE_REG)
910 return kvm_mips_set_reg(vcpu, ®);
912 return kvm_mips_get_reg(vcpu, ®);
914 case KVM_GET_REG_LIST: {
915 struct kvm_reg_list __user *user_list = argp;
916 u64 __user *reg_dest;
917 struct kvm_reg_list reg_list;
920 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
923 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
924 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
928 reg_dest = user_list->reg;
929 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
930 sizeof(kvm_mips_get_one_regs)))
935 /* Treat the NMI as a CPU reset */
936 r = kvm_mips_reset_vcpu(vcpu);
940 struct kvm_mips_interrupt irq;
943 if (copy_from_user(&irq, argp, sizeof(irq)))
946 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
949 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
952 case KVM_ENABLE_CAP: {
953 struct kvm_enable_cap cap;
956 if (copy_from_user(&cap, argp, sizeof(cap)))
958 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
969 /* Get (and clear) the dirty memory log for a memory slot. */
970 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
972 struct kvm_memslots *slots;
973 struct kvm_memory_slot *memslot;
974 unsigned long ga, ga_end;
979 mutex_lock(&kvm->slots_lock);
981 r = kvm_get_dirty_log(kvm, log, &is_dirty);
985 /* If nothing is dirty, don't bother messing with page tables. */
987 slots = kvm_memslots(kvm);
988 memslot = id_to_memslot(slots, log->slot);
990 ga = memslot->base_gfn << PAGE_SHIFT;
991 ga_end = ga + (memslot->npages << PAGE_SHIFT);
993 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
996 n = kvm_dirty_bitmap_bytes(memslot);
997 memset(memslot->dirty_bitmap, 0, n);
1002 mutex_unlock(&kvm->slots_lock);
1007 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1019 int kvm_arch_init(void *opaque)
1021 if (kvm_mips_callbacks) {
1022 kvm_err("kvm: module already exists\n");
1026 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1029 void kvm_arch_exit(void)
1031 kvm_mips_callbacks = NULL;
1034 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1035 struct kvm_sregs *sregs)
1037 return -ENOIOCTLCMD;
1040 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1041 struct kvm_sregs *sregs)
1043 return -ENOIOCTLCMD;
1046 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1050 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1052 return -ENOIOCTLCMD;
1055 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1057 return -ENOIOCTLCMD;
1060 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1062 return VM_FAULT_SIGBUS;
1065 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1070 case KVM_CAP_ONE_REG:
1071 case KVM_CAP_ENABLE_CAP:
1074 case KVM_CAP_COALESCED_MMIO:
1075 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1077 case KVM_CAP_MIPS_FPU:
1080 case KVM_CAP_MIPS_MSA:
1082 * We don't support MSA vector partitioning yet:
1083 * 1) It would require explicit support which can't be tested
1084 * yet due to lack of support in current hardware.
1085 * 2) It extends the state that would need to be saved/restored
1086 * by e.g. QEMU for migration.
1088 * When vector partitioning hardware becomes available, support
1089 * could be added by requiring a flag when enabling
1090 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1091 * to save/restore the appropriate extra state.
1093 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1102 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1104 return kvm_mips_pending_timer(vcpu);
1107 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1110 struct mips_coproc *cop0;
1115 kvm_debug("VCPU Register Dump:\n");
1116 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1117 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1119 for (i = 0; i < 32; i += 4) {
1120 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1122 vcpu->arch.gprs[i + 1],
1123 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1125 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1126 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1128 cop0 = vcpu->arch.cop0;
1129 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1130 kvm_read_c0_guest_status(cop0),
1131 kvm_read_c0_guest_cause(cop0));
1133 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1138 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1142 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1143 vcpu->arch.gprs[i] = regs->gpr[i];
1144 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1145 vcpu->arch.hi = regs->hi;
1146 vcpu->arch.lo = regs->lo;
1147 vcpu->arch.pc = regs->pc;
1152 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1156 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1157 regs->gpr[i] = vcpu->arch.gprs[i];
1159 regs->hi = vcpu->arch.hi;
1160 regs->lo = vcpu->arch.lo;
1161 regs->pc = vcpu->arch.pc;
1166 static void kvm_mips_comparecount_func(unsigned long data)
1168 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1170 kvm_mips_callbacks->queue_timer_int(vcpu);
1172 vcpu->arch.wait = 0;
1173 if (waitqueue_active(&vcpu->wq))
1174 wake_up_interruptible(&vcpu->wq);
1177 /* low level hrtimer wake routine */
1178 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1180 struct kvm_vcpu *vcpu;
1182 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1183 kvm_mips_comparecount_func((unsigned long) vcpu);
1184 return kvm_mips_count_timeout(vcpu);
1187 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1189 kvm_mips_callbacks->vcpu_init(vcpu);
1190 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1192 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1196 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1197 struct kvm_translation *tr)
1202 /* Initial guest state */
1203 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1205 return kvm_mips_callbacks->vcpu_setup(vcpu);
1208 static void kvm_mips_set_c0_status(void)
1210 uint32_t status = read_c0_status();
1215 write_c0_status(status);
1220 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1222 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1224 uint32_t cause = vcpu->arch.host_cp0_cause;
1225 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1226 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1227 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1228 enum emulation_result er = EMULATE_DONE;
1229 int ret = RESUME_GUEST;
1231 /* re-enable HTW before enabling interrupts */
1234 /* Set a default exit reason */
1235 run->exit_reason = KVM_EXIT_UNKNOWN;
1236 run->ready_for_interrupt_injection = 1;
1239 * Set the appropriate status bits based on host CPU features,
1240 * before we hit the scheduler
1242 kvm_mips_set_c0_status();
1246 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1247 cause, opc, run, vcpu);
1250 * Do a privilege check, if in UM most of these exit conditions end up
1251 * causing an exception to be delivered to the Guest Kernel
1253 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1254 if (er == EMULATE_PRIV_FAIL) {
1256 } else if (er == EMULATE_FAIL) {
1257 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1264 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1266 ++vcpu->stat.int_exits;
1267 trace_kvm_exit(vcpu, INT_EXITS);
1275 case T_COP_UNUSABLE:
1276 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1278 ++vcpu->stat.cop_unusable_exits;
1279 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1280 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1281 /* XXXKYMA: Might need to return to user space */
1282 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1287 ++vcpu->stat.tlbmod_exits;
1288 trace_kvm_exit(vcpu, TLBMOD_EXITS);
1289 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1293 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1294 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1297 ++vcpu->stat.tlbmiss_st_exits;
1298 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1299 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1303 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1304 cause, opc, badvaddr);
1306 ++vcpu->stat.tlbmiss_ld_exits;
1307 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1308 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1312 ++vcpu->stat.addrerr_st_exits;
1313 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1314 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1318 ++vcpu->stat.addrerr_ld_exits;
1319 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1320 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1324 ++vcpu->stat.syscall_exits;
1325 trace_kvm_exit(vcpu, SYSCALL_EXITS);
1326 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1330 ++vcpu->stat.resvd_inst_exits;
1331 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1332 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1336 ++vcpu->stat.break_inst_exits;
1337 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1338 ret = kvm_mips_callbacks->handle_break(vcpu);
1342 ++vcpu->stat.trap_inst_exits;
1343 trace_kvm_exit(vcpu, TRAP_INST_EXITS);
1344 ret = kvm_mips_callbacks->handle_trap(vcpu);
1348 ++vcpu->stat.msa_fpe_exits;
1349 trace_kvm_exit(vcpu, MSA_FPE_EXITS);
1350 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1354 ++vcpu->stat.fpe_exits;
1355 trace_kvm_exit(vcpu, FPE_EXITS);
1356 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1360 ++vcpu->stat.msa_disabled_exits;
1361 trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
1362 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1366 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1367 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1368 kvm_read_c0_guest_status(vcpu->arch.cop0));
1369 kvm_arch_vcpu_dump_regs(vcpu);
1370 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1377 local_irq_disable();
1379 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1380 kvm_mips_deliver_interrupts(vcpu, cause);
1382 if (!(ret & RESUME_HOST)) {
1383 /* Only check for signals if not already exiting to userspace */
1384 if (signal_pending(current)) {
1385 run->exit_reason = KVM_EXIT_INTR;
1386 ret = (-EINTR << 2) | RESUME_HOST;
1387 ++vcpu->stat.signal_exits;
1388 trace_kvm_exit(vcpu, SIGNAL_EXITS);
1392 if (ret == RESUME_GUEST) {
1394 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1395 * is live), restore FCR31 / MSACSR.
1397 * This should be before returning to the guest exception
1398 * vector, as it may well cause an [MSA] FP exception if there
1399 * are pending exception bits unmasked. (see
1400 * kvm_mips_csr_die_notifier() for how that is handled).
1402 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1403 read_c0_status() & ST0_CU1)
1404 __kvm_restore_fcsr(&vcpu->arch);
1406 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1407 read_c0_config5() & MIPS_CONF5_MSAEN)
1408 __kvm_restore_msacsr(&vcpu->arch);
1411 /* Disable HTW before returning to guest or host */
1417 /* Enable FPU for guest and restore context */
1418 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1420 struct mips_coproc *cop0 = vcpu->arch.cop0;
1421 unsigned int sr, cfg5;
1425 sr = kvm_read_c0_guest_status(cop0);
1428 * If MSA state is already live, it is undefined how it interacts with
1429 * FR=0 FPU state, and we don't want to hit reserved instruction
1430 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1431 * play it safe and save it first.
1433 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1434 * get called when guest CU1 is set, however we can't trust the guest
1435 * not to clobber the status register directly via the commpage.
1437 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1438 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1442 * Enable FPU for guest
1443 * We set FR and FRE according to guest context
1445 change_c0_status(ST0_CU1 | ST0_FR, sr);
1447 cfg5 = kvm_read_c0_guest_config5(cop0);
1448 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1450 enable_fpu_hazard();
1452 /* If guest FPU state not active, restore it now */
1453 if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
1454 __kvm_restore_fpu(&vcpu->arch);
1455 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1461 #ifdef CONFIG_CPU_HAS_MSA
1462 /* Enable MSA for guest and restore context */
1463 void kvm_own_msa(struct kvm_vcpu *vcpu)
1465 struct mips_coproc *cop0 = vcpu->arch.cop0;
1466 unsigned int sr, cfg5;
1471 * Enable FPU if enabled in guest, since we're restoring FPU context
1472 * anyway. We set FR and FRE according to guest context.
1474 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1475 sr = kvm_read_c0_guest_status(cop0);
1478 * If FR=0 FPU state is already live, it is undefined how it
1479 * interacts with MSA state, so play it safe and save it first.
1481 if (!(sr & ST0_FR) &&
1482 (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
1483 KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
1486 change_c0_status(ST0_CU1 | ST0_FR, sr);
1487 if (sr & ST0_CU1 && cpu_has_fre) {
1488 cfg5 = kvm_read_c0_guest_config5(cop0);
1489 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1493 /* Enable MSA for guest */
1494 set_c0_config5(MIPS_CONF5_MSAEN);
1495 enable_fpu_hazard();
1497 switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
1498 case KVM_MIPS_FPU_FPU:
1500 * Guest FPU state already loaded, only restore upper MSA state
1502 __kvm_restore_msa_upper(&vcpu->arch);
1503 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1506 /* Neither FPU or MSA already active, restore full MSA state */
1507 __kvm_restore_msa(&vcpu->arch);
1508 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
1509 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1510 vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
1520 /* Drop FPU & MSA without saving it */
1521 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1524 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1526 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
1528 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1529 clear_c0_status(ST0_CU1 | ST0_FR);
1530 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1535 /* Save and disable FPU & MSA */
1536 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1539 * FPU & MSA get disabled in root context (hardware) when it is disabled
1540 * in guest context (software), but the register state in the hardware
1541 * may still be in use. This is why we explicitly re-enable the hardware
1546 if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
1547 set_c0_config5(MIPS_CONF5_MSAEN);
1548 enable_fpu_hazard();
1550 __kvm_save_msa(&vcpu->arch);
1552 /* Disable MSA & FPU */
1554 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1555 clear_c0_status(ST0_CU1 | ST0_FR);
1556 vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
1557 } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
1558 set_c0_status(ST0_CU1);
1559 enable_fpu_hazard();
1561 __kvm_save_fpu(&vcpu->arch);
1562 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
1565 clear_c0_status(ST0_CU1 | ST0_FR);
1571 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1572 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1573 * exception if cause bits are set in the value being written.
1575 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1576 unsigned long cmd, void *ptr)
1578 struct die_args *args = (struct die_args *)ptr;
1579 struct pt_regs *regs = args->regs;
1582 /* Only interested in FPE and MSAFPE */
1583 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1586 /* Return immediately if guest context isn't active */
1587 if (!(current->flags & PF_VCPU))
1590 /* Should never get here from user mode */
1591 BUG_ON(user_mode(regs));
1593 pc = instruction_pointer(regs);
1596 /* match 2nd instruction in __kvm_restore_fcsr */
1597 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1601 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1603 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1604 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1609 /* Move PC forward a little and continue executing */
1610 instruction_pointer(regs) += 4;
1615 static struct notifier_block kvm_mips_csr_die_notifier = {
1616 .notifier_call = kvm_mips_csr_die_notify,
1619 int __init kvm_mips_init(void)
1623 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1628 register_die_notifier(&kvm_mips_csr_die_notifier);
1631 * On MIPS, kernel modules are executed from "mapped space", which
1632 * requires TLBs. The TLB handling code is statically linked with
1633 * the rest of the kernel (tlb.c) to avoid the possibility of
1634 * double faulting. The issue is that the TLB code references
1635 * routines that are part of the the KVM module, which are only
1636 * available once the module is loaded.
1638 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1639 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1640 kvm_mips_is_error_pfn = is_error_pfn;
1645 void __exit kvm_mips_exit(void)
1649 kvm_mips_gfn_to_pfn = NULL;
1650 kvm_mips_release_pfn_clean = NULL;
1651 kvm_mips_is_error_pfn = NULL;
1653 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1656 module_init(kvm_mips_init);
1657 module_exit(kvm_mips_exit);
1659 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);