]>
Commit | Line | Data |
---|---|---|
f5c236dd SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <[email protected]> | |
10 | */ | |
11 | ||
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/vmalloc.h> | |
16 | ||
17 | #include <linux/kvm_host.h> | |
18 | ||
19 | #include "kvm_mips_opcode.h" | |
20 | #include "kvm_mips_int.h" | |
21 | ||
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |
23 | { | |
24 | gpa_t gpa; | |
25 | uint32_t kseg = KSEGX(gva); | |
26 | ||
27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | |
28 | gpa = CPHYSADDR(gva); | |
29 | else { | |
30 | printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | |
31 | kvm_mips_dump_host_tlbs(); | |
32 | gpa = KVM_INVALID_ADDR; | |
33 | } | |
34 | ||
35 | #ifdef DEBUG | |
36 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); | |
37 | #endif | |
38 | ||
39 | return gpa; | |
40 | } | |
41 | ||
42 | ||
43 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | |
44 | { | |
45 | struct kvm_run *run = vcpu->run; | |
46 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
47 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
48 | enum emulation_result er = EMULATE_DONE; | |
49 | int ret = RESUME_GUEST; | |
50 | ||
51 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | |
52 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | |
53 | } else | |
54 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | |
55 | ||
56 | switch (er) { | |
57 | case EMULATE_DONE: | |
58 | ret = RESUME_GUEST; | |
59 | break; | |
60 | ||
61 | case EMULATE_FAIL: | |
62 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
63 | ret = RESUME_HOST; | |
64 | break; | |
65 | ||
66 | case EMULATE_WAIT: | |
67 | run->exit_reason = KVM_EXIT_INTR; | |
68 | ret = RESUME_HOST; | |
69 | break; | |
70 | ||
71 | default: | |
72 | BUG(); | |
73 | } | |
74 | return ret; | |
75 | } | |
76 | ||
77 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |
78 | { | |
79 | struct kvm_run *run = vcpu->run; | |
80 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
81 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
82 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
83 | enum emulation_result er = EMULATE_DONE; | |
84 | int ret = RESUME_GUEST; | |
85 | ||
86 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
87 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
88 | #ifdef DEBUG | |
89 | kvm_debug | |
90 | ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
91 | cause, opc, badvaddr); | |
92 | #endif | |
93 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | |
94 | ||
95 | if (er == EMULATE_DONE) | |
96 | ret = RESUME_GUEST; | |
97 | else { | |
98 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
99 | ret = RESUME_HOST; | |
100 | } | |
101 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
102 | /* XXXKYMA: The guest kernel does not expect to get this fault when we are not | |
103 | * using HIGHMEM. Need to address this in a HIGHMEM kernel | |
104 | */ | |
105 | printk | |
106 | ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
107 | cause, opc, badvaddr); | |
108 | kvm_mips_dump_host_tlbs(); | |
109 | kvm_arch_vcpu_dump_regs(vcpu); | |
110 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
111 | ret = RESUME_HOST; | |
112 | } else { | |
113 | printk | |
114 | ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
115 | cause, opc, badvaddr); | |
116 | kvm_mips_dump_host_tlbs(); | |
117 | kvm_arch_vcpu_dump_regs(vcpu); | |
118 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
119 | ret = RESUME_HOST; | |
120 | } | |
121 | return ret; | |
122 | } | |
123 | ||
124 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |
125 | { | |
126 | struct kvm_run *run = vcpu->run; | |
127 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
128 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
129 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
130 | enum emulation_result er = EMULATE_DONE; | |
131 | int ret = RESUME_GUEST; | |
132 | ||
133 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
134 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
135 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
136 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
137 | ret = RESUME_HOST; | |
138 | } | |
139 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
140 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
141 | #ifdef DEBUG | |
142 | kvm_debug | |
143 | ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
144 | cause, opc, badvaddr); | |
145 | #endif | |
146 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | |
147 | if (er == EMULATE_DONE) | |
148 | ret = RESUME_GUEST; | |
149 | else { | |
150 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
151 | ret = RESUME_HOST; | |
152 | } | |
153 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
154 | /* All KSEG0 faults are handled by KVM, as the guest kernel does not | |
155 | * expect to ever get them | |
156 | */ | |
157 | if (kvm_mips_handle_kseg0_tlb_fault | |
158 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | |
159 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
160 | ret = RESUME_HOST; | |
161 | } | |
162 | } else { | |
163 | kvm_err | |
164 | ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
165 | cause, opc, badvaddr); | |
166 | kvm_mips_dump_host_tlbs(); | |
167 | kvm_arch_vcpu_dump_regs(vcpu); | |
168 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
169 | ret = RESUME_HOST; | |
170 | } | |
171 | return ret; | |
172 | } | |
173 | ||
174 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |
175 | { | |
176 | struct kvm_run *run = vcpu->run; | |
177 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
178 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
179 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
180 | enum emulation_result er = EMULATE_DONE; | |
181 | int ret = RESUME_GUEST; | |
182 | ||
183 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | |
184 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | |
185 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | |
186 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
187 | ret = RESUME_HOST; | |
188 | } | |
189 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | |
190 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | |
191 | #ifdef DEBUG | |
192 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", | |
193 | vcpu->arch.pc, badvaddr); | |
194 | #endif | |
195 | ||
196 | /* User Address (UA) fault, this could happen if | |
197 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | |
198 | * case we pass on the fault to the guest kernel and let it handle it. | |
199 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | |
200 | * case we inject the TLB from the Guest TLB into the shadow host TLB | |
201 | */ | |
202 | ||
203 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | |
204 | if (er == EMULATE_DONE) | |
205 | ret = RESUME_GUEST; | |
206 | else { | |
207 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
208 | ret = RESUME_HOST; | |
209 | } | |
210 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | |
211 | if (kvm_mips_handle_kseg0_tlb_fault | |
212 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | |
213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
214 | ret = RESUME_HOST; | |
215 | } | |
216 | } else { | |
217 | printk | |
218 | ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
219 | cause, opc, badvaddr); | |
220 | kvm_mips_dump_host_tlbs(); | |
221 | kvm_arch_vcpu_dump_regs(vcpu); | |
222 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
223 | ret = RESUME_HOST; | |
224 | } | |
225 | return ret; | |
226 | } | |
227 | ||
228 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |
229 | { | |
230 | struct kvm_run *run = vcpu->run; | |
231 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
232 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
233 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
234 | enum emulation_result er = EMULATE_DONE; | |
235 | int ret = RESUME_GUEST; | |
236 | ||
237 | if (KVM_GUEST_KERNEL_MODE(vcpu) | |
238 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | |
239 | #ifdef DEBUG | |
240 | kvm_debug("Emulate Store to MMIO space\n"); | |
241 | #endif | |
242 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | |
243 | if (er == EMULATE_FAIL) { | |
244 | printk("Emulate Store to MMIO space failed\n"); | |
245 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
246 | ret = RESUME_HOST; | |
247 | } else { | |
248 | run->exit_reason = KVM_EXIT_MMIO; | |
249 | ret = RESUME_HOST; | |
250 | } | |
251 | } else { | |
252 | printk | |
253 | ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
254 | cause, opc, badvaddr); | |
255 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
256 | ret = RESUME_HOST; | |
257 | } | |
258 | return ret; | |
259 | } | |
260 | ||
261 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |
262 | { | |
263 | struct kvm_run *run = vcpu->run; | |
264 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
265 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
266 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
267 | enum emulation_result er = EMULATE_DONE; | |
268 | int ret = RESUME_GUEST; | |
269 | ||
270 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | |
271 | #ifdef DEBUG | |
272 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | |
273 | #endif | |
274 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | |
275 | if (er == EMULATE_FAIL) { | |
276 | printk("Emulate Load from MMIO space failed\n"); | |
277 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
278 | ret = RESUME_HOST; | |
279 | } else { | |
280 | run->exit_reason = KVM_EXIT_MMIO; | |
281 | ret = RESUME_HOST; | |
282 | } | |
283 | } else { | |
284 | printk | |
285 | ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", | |
286 | cause, opc, badvaddr); | |
287 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
288 | ret = RESUME_HOST; | |
289 | er = EMULATE_FAIL; | |
290 | } | |
291 | return ret; | |
292 | } | |
293 | ||
294 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | |
295 | { | |
296 | struct kvm_run *run = vcpu->run; | |
297 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
298 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
299 | enum emulation_result er = EMULATE_DONE; | |
300 | int ret = RESUME_GUEST; | |
301 | ||
302 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); | |
303 | if (er == EMULATE_DONE) | |
304 | ret = RESUME_GUEST; | |
305 | else { | |
306 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
307 | ret = RESUME_HOST; | |
308 | } | |
309 | return ret; | |
310 | } | |
311 | ||
312 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | |
313 | { | |
314 | struct kvm_run *run = vcpu->run; | |
315 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
316 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
317 | enum emulation_result er = EMULATE_DONE; | |
318 | int ret = RESUME_GUEST; | |
319 | ||
320 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); | |
321 | if (er == EMULATE_DONE) | |
322 | ret = RESUME_GUEST; | |
323 | else { | |
324 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
325 | ret = RESUME_HOST; | |
326 | } | |
327 | return ret; | |
328 | } | |
329 | ||
330 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |
331 | { | |
332 | struct kvm_run *run = vcpu->run; | |
333 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | |
334 | unsigned long cause = vcpu->arch.host_cp0_cause; | |
335 | enum emulation_result er = EMULATE_DONE; | |
336 | int ret = RESUME_GUEST; | |
337 | ||
338 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); | |
339 | if (er == EMULATE_DONE) | |
340 | ret = RESUME_GUEST; | |
341 | else { | |
342 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
343 | ret = RESUME_HOST; | |
344 | } | |
345 | return ret; | |
346 | } | |
347 | ||
f5c236dd SL |
348 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
349 | { | |
350 | return 0; | |
351 | } | |
352 | ||
353 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | |
354 | { | |
355 | return 0; | |
356 | } | |
357 | ||
358 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |
359 | { | |
360 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
361 | uint32_t config1; | |
362 | int vcpu_id = vcpu->vcpu_id; | |
363 | ||
364 | /* Arch specific stuff, set up config registers properly so that the | |
365 | * guest will come up as expected, for now we simulate a | |
366 | * MIPS 24kc | |
367 | */ | |
368 | kvm_write_c0_guest_prid(cop0, 0x00019300); | |
369 | kvm_write_c0_guest_config(cop0, | |
370 | MIPS_CONFIG0 | (0x1 << CP0C0_AR) | | |
371 | (MMU_TYPE_R4000 << CP0C0_MT)); | |
372 | ||
373 | /* Read the cache characteristics from the host Config1 Register */ | |
374 | config1 = (read_c0_config1() & ~0x7f); | |
375 | ||
376 | /* Set up MMU size */ | |
377 | config1 &= ~(0x3f << 25); | |
378 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | |
379 | ||
380 | /* We unset some bits that we aren't emulating */ | |
381 | config1 &= | |
382 | ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | | |
383 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); | |
384 | kvm_write_c0_guest_config1(cop0, config1); | |
385 | ||
386 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); | |
387 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ | |
388 | kvm_write_c0_guest_config3(cop0, | |
389 | MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << | |
390 | CP0C3_ULRI)); | |
391 | ||
392 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | |
393 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | |
394 | ||
395 | /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ | |
396 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); | |
397 | ||
398 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | |
399 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); | |
400 | ||
401 | return 0; | |
402 | } | |
403 | ||
404 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |
405 | /* exit handlers */ | |
406 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | |
407 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | |
408 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | |
409 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | |
410 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | |
411 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | |
412 | .handle_syscall = kvm_trap_emul_handle_syscall, | |
413 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | |
414 | .handle_break = kvm_trap_emul_handle_break, | |
415 | ||
416 | .vm_init = kvm_trap_emul_vm_init, | |
417 | .vcpu_init = kvm_trap_emul_vcpu_init, | |
418 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | |
419 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | |
420 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | |
421 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | |
422 | .queue_io_int = kvm_mips_queue_io_int_cb, | |
423 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | |
424 | .irq_deliver = kvm_mips_irq_deliver_cb, | |
425 | .irq_clear = kvm_mips_irq_clear_cb, | |
f5c236dd SL |
426 | }; |
427 | ||
428 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | |
429 | { | |
430 | *install_callbacks = &kvm_trap_emul_callbacks; | |
431 | return 0; | |
432 | } |