2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
20 #include "kvm_mips_comm.h"
22 #define SYNCI_TEMPLATE 0x041f0000
23 #define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
24 #define SYNCI_OFFSET ((x) & 0xffff)
26 #define LW_TEMPLATE 0x8c000000
27 #define CLEAR_TEMPLATE 0x00000020
28 #define SW_TEMPLATE 0xac000000
31 kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 struct kvm_vcpu *vcpu)
35 unsigned long kseg0_opc;
36 uint32_t synci_inst = 0x0;
38 /* Replace the CACHE instruction, with a NOP */
40 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 (vcpu, (unsigned long) opc));
42 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 mips32_SyncICache(kseg0_opc, 32);
49 * Address based CACHE instructions are transformed into synci(s). A little heavy
50 * for just D-cache invalidates, but avoids an expensive trap
53 kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54 struct kvm_vcpu *vcpu)
57 unsigned long kseg0_opc;
58 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
60 base = (inst >> 21) & 0x1f;
61 offset = inst & 0xffff;
62 synci_inst |= (base << 21);
66 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
67 (vcpu, (unsigned long) opc));
68 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
69 mips32_SyncICache(kseg0_opc, 32);
75 kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
79 unsigned long kseg0_opc, flags;
81 rt = (inst >> 16) & 0x1f;
82 rd = (inst >> 11) & 0x1f;
85 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
86 mfc0_inst = CLEAR_TEMPLATE;
87 mfc0_inst |= ((rt & 0x1f) << 16);
89 mfc0_inst = LW_TEMPLATE;
90 mfc0_inst |= ((rt & 0x1f) << 16);
92 offsetof(struct mips_coproc,
93 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
97 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
99 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
100 (vcpu, (unsigned long) opc));
101 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
102 mips32_SyncICache(kseg0_opc, 32);
103 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
104 local_irq_save(flags);
105 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
106 mips32_SyncICache((unsigned long) opc, 32);
107 local_irq_restore(flags);
109 kvm_err("%s: Invalid address: %p\n", __func__, opc);
117 kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
120 uint32_t mtc0_inst = SW_TEMPLATE;
121 unsigned long kseg0_opc, flags;
123 rt = (inst >> 16) & 0x1f;
124 rd = (inst >> 11) & 0x1f;
127 mtc0_inst |= ((rt & 0x1f) << 16);
129 offsetof(struct mips_coproc,
130 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
132 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
134 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
135 (vcpu, (unsigned long) opc));
136 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
137 mips32_SyncICache(kseg0_opc, 32);
138 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
139 local_irq_save(flags);
140 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
141 mips32_SyncICache((unsigned long) opc, 32);
142 local_irq_restore(flags);
144 kvm_err("%s: Invalid address: %p\n", __func__, opc);