1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
10 * This file is derived from arch/powerpc/kvm/44x.c,
14 #include <linux/kvm_host.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/miscdevice.h>
20 #include <linux/gfp.h>
21 #include <linux/sched.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
26 #include <asm/cputable.h>
27 #include <asm/cacheflush.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
39 /* #define EXIT_DEBUG */
41 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
42 KVM_GENERIC_VM_STATS(),
43 STATS_DESC_ICOUNTER(VM, num_2M_pages),
44 STATS_DESC_ICOUNTER(VM, num_1G_pages)
47 const struct kvm_stats_header kvm_vm_stats_header = {
48 .name_size = KVM_STATS_NAME_SIZE,
49 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
50 .id_offset = sizeof(struct kvm_stats_header),
51 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
52 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
53 sizeof(kvm_vm_stats_desc),
56 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
57 KVM_GENERIC_VCPU_STATS(),
58 STATS_DESC_COUNTER(VCPU, sum_exits),
59 STATS_DESC_COUNTER(VCPU, mmio_exits),
60 STATS_DESC_COUNTER(VCPU, signal_exits),
61 STATS_DESC_COUNTER(VCPU, light_exits),
62 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
63 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
64 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
65 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
66 STATS_DESC_COUNTER(VCPU, syscall_exits),
67 STATS_DESC_COUNTER(VCPU, isi_exits),
68 STATS_DESC_COUNTER(VCPU, dsi_exits),
69 STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
70 STATS_DESC_COUNTER(VCPU, dec_exits),
71 STATS_DESC_COUNTER(VCPU, ext_intr_exits),
72 STATS_DESC_COUNTER(VCPU, halt_successful_wait),
73 STATS_DESC_COUNTER(VCPU, dbell_exits),
74 STATS_DESC_COUNTER(VCPU, gdbell_exits),
75 STATS_DESC_COUNTER(VCPU, ld),
76 STATS_DESC_COUNTER(VCPU, st),
77 STATS_DESC_COUNTER(VCPU, pf_storage),
78 STATS_DESC_COUNTER(VCPU, pf_instruc),
79 STATS_DESC_COUNTER(VCPU, sp_storage),
80 STATS_DESC_COUNTER(VCPU, sp_instruc),
81 STATS_DESC_COUNTER(VCPU, queue_intr),
82 STATS_DESC_COUNTER(VCPU, ld_slow),
83 STATS_DESC_COUNTER(VCPU, st_slow),
84 STATS_DESC_COUNTER(VCPU, pthru_all),
85 STATS_DESC_COUNTER(VCPU, pthru_host),
86 STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
89 const struct kvm_stats_header kvm_vcpu_stats_header = {
90 .name_size = KVM_STATS_NAME_SIZE,
91 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
92 .id_offset = sizeof(struct kvm_stats_header),
93 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
94 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
95 sizeof(kvm_vcpu_stats_desc),
98 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
99 unsigned long pending_now, unsigned long old_pending)
101 if (is_kvmppc_hv_enabled(vcpu->kvm))
104 kvmppc_set_int_pending(vcpu, 1);
105 else if (old_pending)
106 kvmppc_set_int_pending(vcpu, 0);
109 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
115 if (is_kvmppc_hv_enabled(vcpu->kvm))
118 crit_raw = kvmppc_get_critical(vcpu);
119 crit_r1 = kvmppc_get_gpr(vcpu, 1);
121 /* Truncate crit indicators in 32 bit mode */
122 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
123 crit_raw &= 0xffffffff;
124 crit_r1 &= 0xffffffff;
127 /* Critical section when crit == r1 */
128 crit = (crit_raw == crit_r1);
129 /* ... and we're in supervisor mode */
130 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
135 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
137 vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
140 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
145 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
146 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
147 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
148 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
149 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
150 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
151 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
152 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
153 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
154 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
155 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
156 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
157 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
158 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
159 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
160 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
161 default: prio = BOOK3S_IRQPRIO_MAX; break;
167 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
170 unsigned long old_pending = vcpu->arch.pending_exceptions;
172 clear_bit(kvmppc_book3s_vec2irqprio(vec),
173 &vcpu->arch.pending_exceptions);
175 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
179 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
181 vcpu->stat.queue_intr++;
183 set_bit(kvmppc_book3s_vec2irqprio(vec),
184 &vcpu->arch.pending_exceptions);
186 printk(KERN_INFO "Queueing interrupt %x\n", vec);
189 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
191 void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
193 /* might as well deliver this straight away */
194 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
196 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
198 void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
200 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
202 EXPORT_SYMBOL(kvmppc_core_queue_syscall);
204 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
206 /* might as well deliver this straight away */
207 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
209 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
211 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
213 /* might as well deliver this straight away */
214 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
217 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
219 /* might as well deliver this straight away */
220 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
223 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
225 /* might as well deliver this straight away */
226 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
229 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
231 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
233 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
235 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
237 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
239 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
241 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
243 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
245 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
247 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
248 struct kvm_interrupt *irq)
251 * This case (KVM_INTERRUPT_SET) should never actually arise for
252 * a pseries guest (because pseries guests expect their interrupt
253 * controllers to continue asserting an external interrupt request
254 * until it is acknowledged at the interrupt controller), but is
255 * included to avoid ABI breakage and potentially for other
258 * There is a subtlety here: HV KVM does not test the
259 * external_oneshot flag in the code that synthesizes
260 * external interrupts for the guest just before entering
261 * the guest. That is OK even if userspace did do a
262 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
263 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
264 * which ends up doing a smp_send_reschedule(), which will
265 * pull the guest all the way out to the host, meaning that
266 * we will call kvmppc_core_prepare_to_enter() before entering
267 * the guest again, and that will handle the external_oneshot
270 if (irq->irq == KVM_INTERRUPT_SET)
271 vcpu->arch.external_oneshot = 1;
273 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
276 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
278 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
281 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
282 ulong dar, ulong dsisr)
284 kvmppc_set_dar(vcpu, dar);
285 kvmppc_set_dsisr(vcpu, dsisr);
286 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
288 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
290 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
292 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
294 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
296 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
297 unsigned int priority)
301 bool crit = kvmppc_critical_section(vcpu);
304 case BOOK3S_IRQPRIO_DECREMENTER:
305 deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
306 vec = BOOK3S_INTERRUPT_DECREMENTER;
308 case BOOK3S_IRQPRIO_EXTERNAL:
309 deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
310 vec = BOOK3S_INTERRUPT_EXTERNAL;
312 case BOOK3S_IRQPRIO_SYSTEM_RESET:
313 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
315 case BOOK3S_IRQPRIO_MACHINE_CHECK:
316 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
318 case BOOK3S_IRQPRIO_DATA_STORAGE:
319 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
321 case BOOK3S_IRQPRIO_INST_STORAGE:
322 vec = BOOK3S_INTERRUPT_INST_STORAGE;
324 case BOOK3S_IRQPRIO_DATA_SEGMENT:
325 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
327 case BOOK3S_IRQPRIO_INST_SEGMENT:
328 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
330 case BOOK3S_IRQPRIO_ALIGNMENT:
331 vec = BOOK3S_INTERRUPT_ALIGNMENT;
333 case BOOK3S_IRQPRIO_PROGRAM:
334 vec = BOOK3S_INTERRUPT_PROGRAM;
336 case BOOK3S_IRQPRIO_VSX:
337 vec = BOOK3S_INTERRUPT_VSX;
339 case BOOK3S_IRQPRIO_ALTIVEC:
340 vec = BOOK3S_INTERRUPT_ALTIVEC;
342 case BOOK3S_IRQPRIO_FP_UNAVAIL:
343 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
345 case BOOK3S_IRQPRIO_SYSCALL:
346 vec = BOOK3S_INTERRUPT_SYSCALL;
348 case BOOK3S_IRQPRIO_DEBUG:
349 vec = BOOK3S_INTERRUPT_TRACE;
351 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
352 vec = BOOK3S_INTERRUPT_PERFMON;
354 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
355 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
359 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
364 kvmppc_inject_interrupt(vcpu, vec, 0);
370 * This function determines if an irqprio should be cleared once issued.
372 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
375 case BOOK3S_IRQPRIO_DECREMENTER:
376 /* DEC interrupts get cleared by mtdec */
378 case BOOK3S_IRQPRIO_EXTERNAL:
380 * External interrupts get cleared by userspace
381 * except when set by the KVM_INTERRUPT ioctl with
382 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
384 if (vcpu->arch.external_oneshot) {
385 vcpu->arch.external_oneshot = 0;
394 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
396 unsigned long *pending = &vcpu->arch.pending_exceptions;
397 unsigned long old_pending = vcpu->arch.pending_exceptions;
398 unsigned int priority;
401 if (vcpu->arch.pending_exceptions)
402 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
404 priority = __ffs(*pending);
405 while (priority < BOOK3S_IRQPRIO_MAX) {
406 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
407 clear_irqprio(vcpu, priority)) {
408 clear_bit(priority, &vcpu->arch.pending_exceptions);
412 priority = find_next_bit(pending,
413 BITS_PER_BYTE * sizeof(*pending),
417 /* Tell the guest about our interrupt status */
418 kvmppc_update_int_pending(vcpu, *pending, old_pending);
422 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
424 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
427 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
428 gfn_t gfn = gpa >> PAGE_SHIFT;
430 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
431 mp_pa = (uint32_t)mp_pa;
433 /* Magic page override */
435 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
436 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
439 pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
440 get_page(pfn_to_page(pfn));
446 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
448 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
450 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
451 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
453 bool data = (xlid == XLATE_DATA);
454 bool iswrite = (xlrw == XLATE_WRITE);
455 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
459 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
462 pte->raddr = eaddr & KVM_PAM;
463 pte->vpage = VSID_REAL | eaddr >> 12;
464 pte->may_read = true;
465 pte->may_write = true;
466 pte->may_execute = true;
469 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
471 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
472 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
473 pte->raddr &= ~SPLIT_HACK_MASK;
481 * Returns prefixed instructions with the prefix in the high 32 bits
482 * of *inst and suffix in the low 32 bits. This is the same convention
483 * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
484 * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
485 * half of the value needs byte-swapping if the guest endianness is
486 * different from the host endianness.
488 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
489 enum instruction_fetch_type type, unsigned long *inst)
491 ulong pc = kvmppc_get_pc(vcpu);
498 r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
499 if (r != EMULATE_DONE)
500 return EMULATE_AGAIN;
502 * If [H]SRR1 indicates that the instruction that caused the
503 * current interrupt is a prefixed instruction, get the suffix.
505 if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
508 r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
509 if (r != EMULATE_DONE)
510 return EMULATE_AGAIN;
511 *inst = ((u64)iw << 32) | suffix;
517 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
519 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
524 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
528 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
529 struct kvm_sregs *sregs)
534 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
540 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
541 struct kvm_sregs *sregs)
546 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
552 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
556 regs->pc = kvmppc_get_pc(vcpu);
557 regs->cr = kvmppc_get_cr(vcpu);
558 regs->ctr = kvmppc_get_ctr(vcpu);
559 regs->lr = kvmppc_get_lr(vcpu);
560 regs->xer = kvmppc_get_xer(vcpu);
561 regs->msr = kvmppc_get_msr(vcpu);
562 regs->srr0 = kvmppc_get_srr0(vcpu);
563 regs->srr1 = kvmppc_get_srr1(vcpu);
564 regs->pid = kvmppc_get_pid(vcpu);
565 regs->sprg0 = kvmppc_get_sprg0(vcpu);
566 regs->sprg1 = kvmppc_get_sprg1(vcpu);
567 regs->sprg2 = kvmppc_get_sprg2(vcpu);
568 regs->sprg3 = kvmppc_get_sprg3(vcpu);
569 regs->sprg4 = kvmppc_get_sprg4(vcpu);
570 regs->sprg5 = kvmppc_get_sprg5(vcpu);
571 regs->sprg6 = kvmppc_get_sprg6(vcpu);
572 regs->sprg7 = kvmppc_get_sprg7(vcpu);
574 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
575 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
580 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
584 kvmppc_set_pc(vcpu, regs->pc);
585 kvmppc_set_cr(vcpu, regs->cr);
586 kvmppc_set_ctr(vcpu, regs->ctr);
587 kvmppc_set_lr(vcpu, regs->lr);
588 kvmppc_set_xer(vcpu, regs->xer);
589 kvmppc_set_msr(vcpu, regs->msr);
590 kvmppc_set_srr0(vcpu, regs->srr0);
591 kvmppc_set_srr1(vcpu, regs->srr1);
592 kvmppc_set_sprg0(vcpu, regs->sprg0);
593 kvmppc_set_sprg1(vcpu, regs->sprg1);
594 kvmppc_set_sprg2(vcpu, regs->sprg2);
595 kvmppc_set_sprg3(vcpu, regs->sprg3);
596 kvmppc_set_sprg4(vcpu, regs->sprg4);
597 kvmppc_set_sprg5(vcpu, regs->sprg5);
598 kvmppc_set_sprg6(vcpu, regs->sprg6);
599 kvmppc_set_sprg7(vcpu, regs->sprg7);
601 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
602 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
607 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
612 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
617 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
618 union kvmppc_one_reg *val)
623 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
627 case KVM_REG_PPC_DAR:
628 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
630 case KVM_REG_PPC_DSISR:
631 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
633 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
634 i = id - KVM_REG_PPC_FPR0;
635 *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
637 case KVM_REG_PPC_FPSCR:
638 *val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
641 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
642 if (cpu_has_feature(CPU_FTR_VSX)) {
643 i = id - KVM_REG_PPC_VSR0;
644 val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
645 val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
650 #endif /* CONFIG_VSX */
651 case KVM_REG_PPC_DEBUG_INST:
652 *val = get_reg_val(id, INS_TW);
654 #ifdef CONFIG_KVM_XICS
655 case KVM_REG_PPC_ICP_STATE:
656 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
661 *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
663 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
665 #endif /* CONFIG_KVM_XICS */
666 #ifdef CONFIG_KVM_XIVE
667 case KVM_REG_PPC_VP_STATE:
668 if (!vcpu->arch.xive_vcpu) {
673 r = kvmppc_xive_native_get_vp(vcpu, val);
677 #endif /* CONFIG_KVM_XIVE */
678 case KVM_REG_PPC_FSCR:
679 *val = get_reg_val(id, vcpu->arch.fscr);
681 case KVM_REG_PPC_TAR:
682 *val = get_reg_val(id, kvmppc_get_tar(vcpu));
684 case KVM_REG_PPC_EBBHR:
685 *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
687 case KVM_REG_PPC_EBBRR:
688 *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
690 case KVM_REG_PPC_BESCR:
691 *val = get_reg_val(id, kvmppc_get_bescr(vcpu));
694 *val = get_reg_val(id, kvmppc_get_ic(vcpu));
705 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
706 union kvmppc_one_reg *val)
711 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
715 case KVM_REG_PPC_DAR:
716 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
718 case KVM_REG_PPC_DSISR:
719 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
721 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
722 i = id - KVM_REG_PPC_FPR0;
723 kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
725 case KVM_REG_PPC_FPSCR:
726 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
729 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
730 if (cpu_has_feature(CPU_FTR_VSX)) {
731 i = id - KVM_REG_PPC_VSR0;
732 kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
733 kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
738 #endif /* CONFIG_VSX */
739 #ifdef CONFIG_KVM_XICS
740 case KVM_REG_PPC_ICP_STATE:
741 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
746 r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
748 r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
750 #endif /* CONFIG_KVM_XICS */
751 #ifdef CONFIG_KVM_XIVE
752 case KVM_REG_PPC_VP_STATE:
753 if (!vcpu->arch.xive_vcpu) {
758 r = kvmppc_xive_native_set_vp(vcpu, val);
762 #endif /* CONFIG_KVM_XIVE */
763 case KVM_REG_PPC_FSCR:
764 kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
766 case KVM_REG_PPC_TAR:
767 kvmppc_set_tar(vcpu, set_reg_val(id, *val));
769 case KVM_REG_PPC_EBBHR:
770 kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
772 case KVM_REG_PPC_EBBRR:
773 kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
775 case KVM_REG_PPC_BESCR:
776 kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
779 kvmppc_set_ic(vcpu, set_reg_val(id, *val));
790 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
792 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
795 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
797 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
800 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
802 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
804 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
806 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
808 return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
811 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
812 struct kvm_translation *tr)
817 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
818 struct kvm_guest_debug *dbg)
821 vcpu->guest_debug = dbg->control;
826 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
828 kvmppc_core_queue_dec(vcpu);
832 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
834 return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
837 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
839 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
842 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
844 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
847 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
852 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
854 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
857 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
859 kvm->arch.kvm_ops->free_memslot(slot);
862 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
864 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
867 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
868 const struct kvm_memory_slot *old,
869 struct kvm_memory_slot *new,
870 enum kvm_mr_change change)
872 return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
875 void kvmppc_core_commit_memory_region(struct kvm *kvm,
876 struct kvm_memory_slot *old,
877 const struct kvm_memory_slot *new,
878 enum kvm_mr_change change)
880 kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
883 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
885 return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
888 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
890 return kvm->arch.kvm_ops->age_gfn(kvm, range);
893 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
895 return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
898 int kvmppc_core_init_vm(struct kvm *kvm)
902 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
903 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
904 mutex_init(&kvm->arch.rtas_token_lock);
907 return kvm->arch.kvm_ops->init_vm(kvm);
910 void kvmppc_core_destroy_vm(struct kvm *kvm)
912 kvm->arch.kvm_ops->destroy_vm(kvm);
915 kvmppc_rtas_tokens_free(kvm);
916 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
919 #ifdef CONFIG_KVM_XICS
921 * Free the XIVE and XICS devices which are not directly freed by the
922 * device 'release' method
924 kfree(kvm->arch.xive_devices.native);
925 kvm->arch.xive_devices.native = NULL;
926 kfree(kvm->arch.xive_devices.xics_on_xive);
927 kvm->arch.xive_devices.xics_on_xive = NULL;
928 kfree(kvm->arch.xics_device);
929 kvm->arch.xics_device = NULL;
930 #endif /* CONFIG_KVM_XICS */
933 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
935 unsigned long size = kvmppc_get_gpr(vcpu, 4);
936 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
941 if (!is_power_of_2(size) || (size > sizeof(buf)))
944 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
945 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
946 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
952 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
956 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
960 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
964 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
973 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
975 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
977 unsigned long size = kvmppc_get_gpr(vcpu, 4);
978 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
979 unsigned long val = kvmppc_get_gpr(vcpu, 6);
990 *(__be16 *)&buf = cpu_to_be16(val);
994 *(__be32 *)&buf = cpu_to_be32(val);
998 *(__be64 *)&buf = cpu_to_be64(val);
1005 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1006 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
1007 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1013 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
1015 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
1017 return kvm->arch.kvm_ops->hcall_implemented(hcall);
1020 #ifdef CONFIG_KVM_XICS
1021 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1025 return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
1028 return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1032 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1033 struct kvm *kvm, int irq_source_id,
1034 int level, bool line_status)
1036 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1037 level, line_status);
1039 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1040 struct kvm *kvm, int irq_source_id, int level,
1043 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1046 int kvm_irq_map_gsi(struct kvm *kvm,
1047 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1050 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1051 entries->set = kvmppc_book3s_set_irq;
1052 entries->irqchip.irqchip = 0;
1053 entries->irqchip.pin = gsi;
1057 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1062 #endif /* CONFIG_KVM_XICS */
1064 static int kvmppc_book3s_init(void)
1068 r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1071 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1072 r = kvmppc_book3s_init_pr();
1075 #ifdef CONFIG_KVM_XICS
1076 #ifdef CONFIG_KVM_XIVE
1077 if (xics_on_xive()) {
1078 kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1079 if (kvmppc_xive_native_supported())
1080 kvm_register_device_ops(&kvm_xive_native_ops,
1084 kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1089 static void kvmppc_book3s_exit(void)
1091 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1092 kvmppc_book3s_exit_pr();
1097 module_init(kvmppc_book3s_init);
1098 module_exit(kvmppc_book3s_exit);
1100 /* On 32bit this is our one and only kernel module */
1101 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1102 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1103 MODULE_ALIAS("devname:kvm");