1 /* SPDX-License-Identifier: GPL-2.0-only */
6 * Derived from book3s_rmhandlers.S and other files, which are:
8 * Copyright SUSE Linux Products GmbH 2009
13 #include <asm/ppc_asm.h>
14 #include <asm/code-patching-asm.h>
15 #include <asm/kvm_asm.h>
19 #include <asm/ptrace.h>
20 #include <asm/hvcall.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/exception-64s.h>
23 #include <asm/kvm_book3s_asm.h>
24 #include <asm/book3s/64/mmu-hash.h>
25 #include <asm/export.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-compat.h>
30 #include <asm/feature-fixups.h>
31 #include <asm/cpuidle.h>
33 /* Values in HSTATE_NAPPING(r13) */
34 #define NAPPING_CEDE 1
35 #define NAPPING_NOVCPU 2
36 #define NAPPING_UNSPLIT 3
38 /* Stack frame offsets for kvmppc_hv_entry */
40 #define STACK_SLOT_TRAP (SFS-4)
41 #define STACK_SLOT_TID (SFS-16)
42 #define STACK_SLOT_PSSCR (SFS-24)
43 #define STACK_SLOT_PID (SFS-32)
44 #define STACK_SLOT_IAMR (SFS-40)
45 #define STACK_SLOT_CIABR (SFS-48)
46 #define STACK_SLOT_DAWR0 (SFS-56)
47 #define STACK_SLOT_DAWRX0 (SFS-64)
48 #define STACK_SLOT_HFSCR (SFS-72)
49 #define STACK_SLOT_AMR (SFS-80)
50 #define STACK_SLOT_UAMOR (SFS-88)
51 #define STACK_SLOT_FSCR (SFS-96)
54 * Call kvmppc_hv_entry in real mode.
55 * Must be called with interrupts hard-disabled.
59 * LR = return address to continue at after eventually re-enabling MMU
61 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
63 std r0, PPC_LR_STKOFF(r1)
66 std r10, HSTATE_HOST_MSR(r13)
67 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
72 mtmsrd r0,1 /* clear RI in MSR */
78 ld r4, HSTATE_KVM_VCPU(r13)
81 /* Back from guest - restore host state and return to caller */
84 /* Restore host DABR and DABRX */
85 ld r5,HSTATE_DABR(r13)
89 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
92 ld r3,PACA_SPRG_VDSO(r13)
93 mtspr SPRN_SPRG_VDSO_WRITE,r3
95 /* Reload the host's PMU registers */
96 bl kvmhv_load_host_pmu
99 * Reload DEC. HDEC interrupts were disabled when
100 * we reloaded the host's LPCR value.
102 ld r3, HSTATE_DECEXP(r13)
107 /* hwthread_req may have got set by cede or no vcpu, so clear it */
109 stb r0, HSTATE_HWTHREAD_REQ(r13)
112 * For external interrupts we need to call the Linux
113 * handler to process the interrupt. We do that by jumping
114 * to absolute address 0x500 for external interrupts.
115 * The [h]rfid at the end of the handler will return to
116 * the book3s_hv_interrupts.S code. For other interrupts
117 * we do the rfid to get back to the book3s_hv_interrupts.S
120 ld r8, 112+PPC_LR_STKOFF(r1)
122 ld r7, HSTATE_HOST_MSR(r13)
124 /* Return the trap number on this thread as the return value */
127 /* RFI into the highmem handler */
131 mtmsrd r6, 1 /* Clear RI in MSR */
136 kvmppc_primary_no_guest:
137 /* We handle this much like a ceded vcpu */
138 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
139 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
140 /* HDEC value came from DEC in the first place, it will fit */
144 * Make sure the primary has finished the MMU switch.
145 * We should never get here on a secondary thread, but
146 * check it for robustness' sake.
148 ld r5, HSTATE_KVM_VCORE(r13)
149 65: lbz r0, VCORE_IN_GUEST(r5)
156 /* set our bit in napping_threads */
157 ld r5, HSTATE_KVM_VCORE(r13)
158 lbz r7, HSTATE_PTID(r13)
161 addi r6, r5, VCORE_NAPPING_THREADS
166 /* order napping_threads update vs testing entry_exit_map */
169 lwz r7, VCORE_ENTRY_EXIT(r5)
171 bge kvm_novcpu_exit /* another thread already exiting */
172 li r3, NAPPING_NOVCPU
173 stb r3, HSTATE_NAPPING(r13)
175 li r3, 0 /* Don't wake on privileged (OS) doorbell */
180 * Entered from kvm_start_guest if kvm_hstate.napping is set
186 ld r1, HSTATE_HOST_R1(r13)
187 ld r5, HSTATE_KVM_VCORE(r13)
189 stb r0, HSTATE_NAPPING(r13)
191 /* check the wake reason */
192 bl kvmppc_check_wake_reason
195 * Restore volatile registers since we could have called
196 * a C routine in kvmppc_check_wake_reason.
199 ld r5, HSTATE_KVM_VCORE(r13)
201 /* see if any other thread is already exiting */
202 lwz r0, VCORE_ENTRY_EXIT(r5)
206 /* clear our bit in napping_threads */
207 lbz r7, HSTATE_PTID(r13)
210 addi r6, r5, VCORE_NAPPING_THREADS
216 /* See if the wake reason means we need to exit */
220 /* See if our timeslice has expired (HDEC is negative) */
223 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
227 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
228 ld r4, HSTATE_KVM_VCPU(r13)
230 beq kvmppc_primary_no_guest
232 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
233 addi r3, r4, VCPU_TB_RMENTRY
234 bl kvmhv_start_timing
239 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
240 ld r4, HSTATE_KVM_VCPU(r13)
243 addi r3, r4, VCPU_TB_RMEXIT
244 bl kvmhv_accumulate_time
247 stw r12, STACK_SLOT_TRAP(r1)
248 bl kvmhv_commence_exit
250 b kvmhv_switch_to_host
253 * We come in here when wakened from Linux offline idle code.
255 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
257 _GLOBAL(idle_kvm_start_guest)
258 ld r4,PACAEMERGSP(r13)
264 subi r1,r4,STACK_FRAME_OVERHEAD
268 * Could avoid this and pass it through in r3. For now,
269 * code expects it to be in SRR1.
274 stb r0,PACA_FTRACE_ENABLED(r13)
276 li r0,KVM_HWTHREAD_IN_KVM
277 stb r0,HSTATE_HWTHREAD_STATE(r13)
279 /* kvm cede / napping does not come through here */
280 lbz r0,HSTATE_NAPPING(r13)
287 stb r0, HSTATE_NAPPING(r13)
292 * We weren't napping due to cede, so this must be a secondary
293 * thread being woken up to run a guest, or being woken up due
294 * to a stray IPI. (Or due to some machine check or hypervisor
295 * maintenance interrupt while the core is in KVM.)
298 /* Check the wake reason in SRR1 to see why we got here */
299 bl kvmppc_check_wake_reason
301 * kvmppc_check_wake_reason could invoke a C routine, but we
302 * have no volatile registers to restore when we return.
308 /* get vcore pointer, NULL if we have nothing to run */
309 ld r5,HSTATE_KVM_VCORE(r13)
311 /* if we have no vcore to run, go back to sleep */
314 kvm_secondary_got_guest:
316 /* Set HSTATE_DSCR(r13) to something sensible */
317 ld r6, PACA_DSCR_DEFAULT(r13)
318 std r6, HSTATE_DSCR(r13)
320 /* On thread 0 of a subcore, set HDEC to max */
321 lbz r4, HSTATE_PTID(r13)
324 lis r6,0x7fff /* MAX_INT@h */
326 /* and set per-LPAR registers, if doing dynamic micro-threading */
327 ld r6, HSTATE_SPLIT_MODE(r13)
330 ld r0, KVM_SPLIT_RPR(r6)
332 ld r0, KVM_SPLIT_PMMAR(r6)
334 ld r0, KVM_SPLIT_LDBAR(r6)
338 /* Order load of vcpu after load of vcore */
340 ld r4, HSTATE_KVM_VCPU(r13)
343 /* Back from the guest, go back to nap */
344 /* Clear our vcpu and vcore pointers so we don't come back in early */
346 std r0, HSTATE_KVM_VCPU(r13)
348 * Once we clear HSTATE_KVM_VCORE(r13), the code in
349 * kvmppc_run_core() is going to assume that all our vcpu
350 * state is visible in memory. This lwsync makes sure
354 std r0, HSTATE_KVM_VCORE(r13)
357 * All secondaries exiting guest will fall through this path.
358 * Before proceeding, just check for HMI interrupt and
359 * invoke opal hmi handler. By now we are sure that the
360 * primary thread on this core/subcore has already made partition
361 * switch/TB resync and we are good to call opal hmi handler.
363 cmpwi r12, BOOK3S_INTERRUPT_HMI
366 li r3,0 /* NULL argument */
367 bl hmi_exception_realmode
369 * At this point we have finished executing in the guest.
370 * We need to wait for hwthread_req to become zero, since
371 * we may not turn on the MMU while hwthread_req is non-zero.
372 * While waiting we also need to check if we get given a vcpu to run.
375 lbz r3, HSTATE_HWTHREAD_REQ(r13)
379 li r0, KVM_HWTHREAD_IN_KERNEL
380 stb r0, HSTATE_HWTHREAD_STATE(r13)
381 /* need to recheck hwthread_req after a barrier, to avoid race */
383 lbz r3, HSTATE_HWTHREAD_REQ(r13)
388 * Jump to idle_return_gpr_loss, which returns to the
389 * idle_kvm_start_guest caller.
393 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
395 /* set up r3 for return */
398 addi r1, r1, STACK_FRAME_OVERHEAD
408 ld r5, HSTATE_KVM_VCORE(r13)
411 ld r3, HSTATE_SPLIT_MODE(r13)
414 lbz r0, KVM_SPLIT_DO_NAP(r3)
420 b kvm_secondary_got_guest
422 54: li r0, KVM_HWTHREAD_IN_KVM
423 stb r0, HSTATE_HWTHREAD_STATE(r13)
427 * Here the primary thread is trying to return the core to
428 * whole-core mode, so we need to nap.
432 * When secondaries are napping in kvm_unsplit_nap() with
433 * hwthread_req = 1, HMI goes ignored even though subcores are
434 * already exited the guest. Hence HMI keeps waking up secondaries
435 * from nap in a loop and secondaries always go back to nap since
436 * no vcore is assigned to them. This makes impossible for primary
437 * thread to get hold of secondary threads resulting into a soft
438 * lockup in KVM path.
440 * Let us check if HMI is pending and handle it before we go to nap.
442 cmpwi r12, BOOK3S_INTERRUPT_HMI
444 li r3, 0 /* NULL argument */
445 bl hmi_exception_realmode
448 * Ensure that secondary doesn't nap when it has
449 * its vcore pointer set.
451 sync /* matches smp_mb() before setting split_info.do_nap */
452 ld r0, HSTATE_KVM_VCORE(r13)
455 /* clear any pending message */
457 lis r6, (PPC_DBELL_SERVER << (63-36))@h
459 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
460 /* Set kvm_split_mode.napped[tid] = 1 */
461 ld r3, HSTATE_SPLIT_MODE(r13)
463 lhz r4, PACAPACAINDEX(r13)
464 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
465 addi r4, r4, KVM_SPLIT_NAPPED
467 /* Check the do_nap flag again after setting napped[] */
469 lbz r0, KVM_SPLIT_DO_NAP(r3)
472 li r3, NAPPING_UNSPLIT
473 stb r3, HSTATE_NAPPING(r13)
474 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
476 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
483 /******************************************************************************
487 *****************************************************************************/
489 .global kvmppc_hv_entry
494 * R4 = vcpu pointer (or NULL)
499 * all other volatile GPRS = free
500 * Does not preserve non-volatile GPRs or CR fields
503 std r0, PPC_LR_STKOFF(r1)
506 /* Save R1 in the PACA */
507 std r1, HSTATE_HOST_R1(r13)
509 li r6, KVM_GUEST_MODE_HOST_HV
510 stb r6, HSTATE_IN_GUEST(r13)
512 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
513 /* Store initial timestamp */
516 addi r3, r4, VCPU_TB_RMENTRY
517 bl kvmhv_start_timing
521 ld r5, HSTATE_KVM_VCORE(r13)
522 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
525 * POWER7/POWER8 host -> guest partition switch code.
526 * We don't have to lock against concurrent tlbies,
527 * but we do have to coordinate across hardware threads.
529 /* Set bit in entry map iff exit map is zero. */
531 lbz r6, HSTATE_PTID(r13)
533 addi r8, r5, VCORE_ENTRY_EXIT
535 cmpwi r3, 0x100 /* any threads starting to exit? */
536 bge secondary_too_late /* if so we're too late to the party */
541 /* Primary thread switches to guest partition. */
547 li r0,LPID_RSVD /* switch to reserved LPID */
550 mtspr SPRN_SDR1,r6 /* switch to partition page table */
554 /* See if we need to flush the TLB. */
555 mr r3, r9 /* kvm pointer */
556 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
557 li r5, 0 /* nested vcpu pointer */
558 bl kvmppc_check_need_tlb_flush
560 ld r5, HSTATE_KVM_VCORE(r13)
562 /* Add timebase offset onto timebase */
563 22: ld r8,VCORE_TB_OFFSET(r5)
566 std r8, VCORE_TB_OFFSET_APPL(r5)
567 mftb r6 /* current host timebase */
569 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
570 mftb r7 /* check if lower 24 bits overflowed */
575 addis r8,r8,0x100 /* if so, increment upper 40 bits */
578 /* Load guest PCR value to select appropriate compat mode */
579 37: ld r7, VCORE_PCR(r5)
580 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
588 /* DPDES and VTB are shared between threads */
589 ld r8, VCORE_DPDES(r5)
593 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
595 /* Mark the subcore state as inside guest */
596 bl kvmppc_subcore_enter_guest
598 ld r5, HSTATE_KVM_VCORE(r13)
599 ld r4, HSTATE_KVM_VCPU(r13)
601 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
603 /* Do we have a guest vcpu to run? */
605 beq kvmppc_primary_no_guest
607 /* Increment yield count if they have a VPA */
611 li r6, LPPACA_YIELDCOUNT
616 stb r6, VCPU_VPA_DIRTY(r4)
619 /* Save purr/spurr */
622 std r5,HSTATE_PURR(r13)
623 std r6,HSTATE_SPURR(r13)
629 /* Save host values of some registers */
633 mfspr r7, SPRN_DAWRX0
635 std r5, STACK_SLOT_CIABR(r1)
636 std r6, STACK_SLOT_DAWR0(r1)
637 std r7, STACK_SLOT_DAWRX0(r1)
638 std r8, STACK_SLOT_IAMR(r1)
640 std r5, STACK_SLOT_FSCR(r1)
641 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
644 std r5, STACK_SLOT_AMR(r1)
646 std r6, STACK_SLOT_UAMOR(r1)
649 /* Set partition DABR */
650 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
651 lwz r5,VCPU_DABRX(r4)
656 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
658 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
661 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
663 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
667 li r5, 0 /* don't preserve non-vol regs */
668 bl kvmppc_restore_tm_hv
670 ld r4, HSTATE_KVM_VCPU(r13)
674 /* Load guest PMU registers; r4 = vcpu pointer here */
676 bl kvmhv_load_guest_pmu
678 /* Load up FP, VMX and VSX registers */
679 ld r4, HSTATE_KVM_VCPU(r13)
682 ld r14, VCPU_GPR(R14)(r4)
683 ld r15, VCPU_GPR(R15)(r4)
684 ld r16, VCPU_GPR(R16)(r4)
685 ld r17, VCPU_GPR(R17)(r4)
686 ld r18, VCPU_GPR(R18)(r4)
687 ld r19, VCPU_GPR(R19)(r4)
688 ld r20, VCPU_GPR(R20)(r4)
689 ld r21, VCPU_GPR(R21)(r4)
690 ld r22, VCPU_GPR(R22)(r4)
691 ld r23, VCPU_GPR(R23)(r4)
692 ld r24, VCPU_GPR(R24)(r4)
693 ld r25, VCPU_GPR(R25)(r4)
694 ld r26, VCPU_GPR(R26)(r4)
695 ld r27, VCPU_GPR(R27)(r4)
696 ld r28, VCPU_GPR(R28)(r4)
697 ld r29, VCPU_GPR(R29)(r4)
698 ld r30, VCPU_GPR(R30)(r4)
699 ld r31, VCPU_GPR(R31)(r4)
701 /* Switch DSCR to guest value */
706 /* Skip next section on POWER7 */
708 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
709 /* Load up POWER8-specific registers */
711 lwz r6, VCPU_PSPB(r4)
717 * Handle broken DAWR case by not writing it. This means we
718 * can still store the DAWR register for migration.
720 LOAD_REG_ADDR(r5, dawr_force_enable)
724 ld r5, VCPU_DAWR0(r4)
725 ld r6, VCPU_DAWRX0(r4)
727 mtspr SPRN_DAWRX0, r6
729 ld r7, VCPU_CIABR(r4)
734 ld r8, VCPU_EBBHR(r4)
737 ld r5, VCPU_EBBRR(r4)
738 ld r6, VCPU_BESCR(r4)
739 lwz r7, VCPU_GUEST_PID(r4)
745 /* POWER8-only registers */
746 ld r5, VCPU_TCSCR(r4)
748 ld r7, VCPU_CSIGR(r4)
757 ld r5, VCPU_SPRG0(r4)
758 ld r6, VCPU_SPRG1(r4)
759 ld r7, VCPU_SPRG2(r4)
760 ld r8, VCPU_SPRG3(r4)
766 /* Load up DAR and DSISR */
768 lwz r6, VCPU_DSISR(r4)
772 /* Restore AMR and UAMOR, set AMOR to all 1s */
780 /* Restore state of CTRL run bit; assume 1 on entry */
788 /* Secondary threads wait for primary to have done partition switch */
789 ld r5, HSTATE_KVM_VCORE(r13)
790 lbz r6, HSTATE_PTID(r13)
793 lbz r0, VCORE_IN_GUEST(r5)
797 20: lwz r3, VCORE_ENTRY_EXIT(r5)
800 lbz r0, VCORE_IN_GUEST(r5)
811 * Set the decrementer to the guest decrementer.
813 ld r8,VCPU_DEC_EXPIRES(r4)
814 /* r8 is a host timebase value here, convert to guest TB */
815 ld r5,HSTATE_KVM_VCORE(r13)
816 ld r6,VCORE_TB_OFFSET_APPL(r5)
822 /* Check if HDEC expires soon */
825 cmpdi r3, 512 /* 1 microsecond */
828 /* Clear out and reload the SLB */
834 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
835 lwz r5,VCPU_SLB_MAX(r4)
840 1: ld r8,VCPU_SLB_E(r6)
843 addi r6,r6,VCPU_SLB_SIZE
847 deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
848 /* Check if we can deliver an external or decrementer interrupt now */
849 ld r0, VCPU_PENDING_EXC(r4)
853 bl kvmppc_guest_entry_inject_int
854 ld r4, HSTATE_KVM_VCPU(r13)
863 /* r11 = vcpu->arch.msr & ~MSR_HV */
864 rldicl r11, r11, 63 - MSR_HV_LG, 1
865 rotldi r11, r11, 1 + MSR_HV_LG
876 * R10: value for HSRR0
877 * R11: value for HSRR1
882 stb r0,VCPU_CEDED(r4) /* cancel cede */
886 /* Activate guest mode, so faults get handled by KVM */
887 li r9, KVM_GUEST_MODE_GUEST_HV
888 stb r9, HSTATE_IN_GUEST(r13)
890 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
891 /* Accumulate timing */
892 addi r3, r4, VCPU_TB_GUEST
893 bl kvmhv_accumulate_time
901 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
904 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
909 ld r1, VCPU_GPR(R1)(r4)
910 ld r5, VCPU_GPR(R5)(r4)
911 ld r8, VCPU_GPR(R8)(r4)
912 ld r9, VCPU_GPR(R9)(r4)
913 ld r10, VCPU_GPR(R10)(r4)
914 ld r11, VCPU_GPR(R11)(r4)
915 ld r12, VCPU_GPR(R12)(r4)
916 ld r13, VCPU_GPR(R13)(r4)
920 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
922 ld r6, VCPU_GPR(R6)(r4)
923 ld r7, VCPU_GPR(R7)(r4)
928 ld r0, VCPU_GPR(R0)(r4)
929 ld r2, VCPU_GPR(R2)(r4)
930 ld r3, VCPU_GPR(R3)(r4)
931 ld r4, VCPU_GPR(R4)(r4)
937 stw r12, STACK_SLOT_TRAP(r1)
940 stw r12, VCPU_TRAP(r4)
941 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
942 addi r3, r4, VCPU_TB_RMEXIT
943 bl kvmhv_accumulate_time
945 11: b kvmhv_switch_to_host
952 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
953 12: stw r12, VCPU_TRAP(r4)
955 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
956 addi r3, r4, VCPU_TB_RMEXIT
957 bl kvmhv_accumulate_time
961 /******************************************************************************
965 *****************************************************************************/
968 * We come here from the first-level interrupt handlers.
970 .globl kvmppc_interrupt_hv
974 * R9 = HSTATE_IN_GUEST
975 * R12 = (guest CR << 32) | interrupt vector
977 * guest R12 saved in shadow VCPU SCRATCH0
978 * guest R13 saved in SPRN_SCRATCH0
979 * guest R9 saved in HSTATE_SCRATCH2
981 /* We're now back in the host but in guest MMU context */
982 cmpwi r9,KVM_GUEST_MODE_HOST_HV
983 beq kvmppc_bad_host_intr
984 li r9, KVM_GUEST_MODE_HOST_HV
985 stb r9, HSTATE_IN_GUEST(r13)
987 ld r9, HSTATE_KVM_VCPU(r13)
991 std r0, VCPU_GPR(R0)(r9)
992 std r1, VCPU_GPR(R1)(r9)
993 std r2, VCPU_GPR(R2)(r9)
994 std r3, VCPU_GPR(R3)(r9)
995 std r4, VCPU_GPR(R4)(r9)
996 std r5, VCPU_GPR(R5)(r9)
997 std r6, VCPU_GPR(R6)(r9)
998 std r7, VCPU_GPR(R7)(r9)
999 std r8, VCPU_GPR(R8)(r9)
1000 ld r0, HSTATE_SCRATCH2(r13)
1001 std r0, VCPU_GPR(R9)(r9)
1002 std r10, VCPU_GPR(R10)(r9)
1003 std r11, VCPU_GPR(R11)(r9)
1004 ld r3, HSTATE_SCRATCH0(r13)
1005 std r3, VCPU_GPR(R12)(r9)
1006 /* CR is in the high half of r12 */
1010 ld r3, HSTATE_CFAR(r13)
1011 std r3, VCPU_CFAR(r9)
1012 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1014 ld r4, HSTATE_PPR(r13)
1015 std r4, VCPU_PPR(r9)
1016 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1018 /* Restore R1/R2 so we can handle faults */
1019 ld r1, HSTATE_HOST_R1(r13)
1022 mfspr r10, SPRN_SRR0
1023 mfspr r11, SPRN_SRR1
1024 std r10, VCPU_SRR0(r9)
1025 std r11, VCPU_SRR1(r9)
1026 /* trap is in the low half of r12, clear CR from the high half */
1028 andi. r0, r12, 2 /* need to read HSRR0/1? */
1030 mfspr r10, SPRN_HSRR0
1031 mfspr r11, SPRN_HSRR1
1033 1: std r10, VCPU_PC(r9)
1034 std r11, VCPU_MSR(r9)
1038 std r3, VCPU_GPR(R13)(r9)
1041 stw r12,VCPU_TRAP(r9)
1044 * Now that we have saved away SRR0/1 and HSRR0/1,
1045 * interrupts are recoverable in principle, so set MSR_RI.
1046 * This becomes important for relocation-on interrupts from
1047 * the guest, which we can get in radix mode on POWER9.
1052 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1053 addi r3, r9, VCPU_TB_RMINTR
1055 bl kvmhv_accumulate_time
1056 ld r5, VCPU_GPR(R5)(r9)
1057 ld r6, VCPU_GPR(R6)(r9)
1058 ld r7, VCPU_GPR(R7)(r9)
1059 ld r8, VCPU_GPR(R8)(r9)
1062 /* Save HEIR (HV emulation assist reg) in emul_inst
1063 if this is an HEI (HV emulation interrupt, e40) */
1064 li r3,KVM_INST_FETCH_FAILED
1065 stw r3,VCPU_LAST_INST(r9)
1066 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1069 11: stw r3,VCPU_HEIR(r9)
1071 /* these are volatile across C function calls */
1074 std r3, VCPU_CTR(r9)
1075 std r4, VCPU_XER(r9)
1077 /* Save more register state */
1080 std r3, VCPU_DAR(r9)
1081 stw r4, VCPU_DSISR(r9)
1083 /* If this is a page table miss then see if it's theirs or ours */
1084 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1086 std r3, VCPU_FAULT_DAR(r9)
1087 stw r4, VCPU_FAULT_DSISR(r9)
1088 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1091 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1092 /* For softpatch interrupt, go off and do TM instruction emulation */
1093 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1097 /* See if this is a leftover HDEC interrupt */
1098 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1104 bge fast_guest_return
1106 /* See if this is an hcall we can handle in real mode */
1107 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1108 beq hcall_try_real_mode
1110 /* Hypervisor doorbell - exit only if host IPI flag set */
1111 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1113 lbz r0, HSTATE_HOST_IPI(r13)
1115 beq maybe_reenter_guest
1118 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1119 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1121 mfspr r3, SPRN_HFSCR
1122 std r3, VCPU_HFSCR(r9)
1125 /* External interrupt ? */
1126 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1127 beq kvmppc_guest_external
1128 /* See if it is a machine check */
1129 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1130 beq machine_check_realmode
1131 /* Or a hypervisor maintenance interrupt */
1132 cmpwi r12, BOOK3S_INTERRUPT_HMI
1135 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1137 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1138 addi r3, r9, VCPU_TB_RMEXIT
1140 bl kvmhv_accumulate_time
1144 * Possibly flush the link stack here, before we do a blr in
1145 * kvmhv_switch_to_host.
1148 patch_site 1b patch__call_kvm_flush_link_stack
1150 /* For hash guest, read the guest SLB and save it away */
1152 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1157 andis. r0,r8,SLB_ESID_V@h
1159 add r8,r8,r6 /* put index in */
1161 std r8,VCPU_SLB_E(r7)
1162 std r3,VCPU_SLB_V(r7)
1163 addi r7,r7,VCPU_SLB_SIZE
1167 /* Finally clear out the SLB */
1172 stw r5,VCPU_SLB_MAX(r9)
1174 /* load host SLB entries */
1175 ld r8,PACA_SLBSHADOWPTR(r13)
1177 .rept SLB_NUM_BOLTED
1178 li r3, SLBSHADOW_SAVEAREA
1182 andis. r7,r5,SLB_ESID_V@h
1189 stw r12, STACK_SLOT_TRAP(r1)
1192 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1193 ld r3, HSTATE_KVM_VCORE(r13)
1198 /* r5 is a guest timebase value here, convert to host TB */
1199 ld r4,VCORE_TB_OFFSET_APPL(r3)
1201 std r5,VCPU_DEC_EXPIRES(r9)
1203 /* Increment exit count, poke other threads to exit */
1205 bl kvmhv_commence_exit
1207 ld r9, HSTATE_KVM_VCPU(r13)
1209 /* Stop others sending VCPU interrupts to this physical CPU */
1211 stw r0, VCPU_CPU(r9)
1212 stw r0, VCPU_THREAD_CPU(r9)
1214 /* Save guest CTRL register, set runlatch to 1 */
1216 stw r6,VCPU_CTRL(r9)
1223 * Save the guest PURR/SPURR
1228 ld r8,VCPU_SPURR(r9)
1229 std r5,VCPU_PURR(r9)
1230 std r6,VCPU_SPURR(r9)
1235 * Restore host PURR/SPURR and add guest times
1236 * so that the time in the guest gets accounted.
1238 ld r3,HSTATE_PURR(r13)
1239 ld r4,HSTATE_SPURR(r13)
1247 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1248 /* Save POWER8-specific registers */
1252 std r5, VCPU_IAMR(r9)
1253 stw r6, VCPU_PSPB(r9)
1254 std r7, VCPU_FSCR(r9)
1258 std r7, VCPU_TAR(r9)
1259 mfspr r8, SPRN_EBBHR
1260 std r8, VCPU_EBBHR(r9)
1261 mfspr r5, SPRN_EBBRR
1262 mfspr r6, SPRN_BESCR
1265 std r5, VCPU_EBBRR(r9)
1266 std r6, VCPU_BESCR(r9)
1267 stw r7, VCPU_GUEST_PID(r9)
1268 std r8, VCPU_WORT(r9)
1269 mfspr r5, SPRN_TCSCR
1271 mfspr r7, SPRN_CSIGR
1273 std r5, VCPU_TCSCR(r9)
1274 std r6, VCPU_ACOP(r9)
1275 std r7, VCPU_CSIGR(r9)
1276 std r8, VCPU_TACR(r9)
1278 ld r5, STACK_SLOT_FSCR(r1)
1280 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1282 * Restore various registers to 0, where non-zero values
1283 * set by the guest could disrupt the host.
1288 mtspr SPRN_TCSCR, r0
1289 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1292 mtspr SPRN_MMCRS, r0
1294 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1295 ld r8, STACK_SLOT_IAMR(r1)
1298 8: /* Power7 jumps back in here */
1302 std r6,VCPU_UAMOR(r9)
1303 ld r5,STACK_SLOT_AMR(r1)
1304 ld r6,STACK_SLOT_UAMOR(r1)
1306 mtspr SPRN_UAMOR, r6
1308 /* Switch DSCR back to host value */
1310 ld r7, HSTATE_DSCR(r13)
1311 std r8, VCPU_DSCR(r9)
1314 /* Save non-volatile GPRs */
1315 std r14, VCPU_GPR(R14)(r9)
1316 std r15, VCPU_GPR(R15)(r9)
1317 std r16, VCPU_GPR(R16)(r9)
1318 std r17, VCPU_GPR(R17)(r9)
1319 std r18, VCPU_GPR(R18)(r9)
1320 std r19, VCPU_GPR(R19)(r9)
1321 std r20, VCPU_GPR(R20)(r9)
1322 std r21, VCPU_GPR(R21)(r9)
1323 std r22, VCPU_GPR(R22)(r9)
1324 std r23, VCPU_GPR(R23)(r9)
1325 std r24, VCPU_GPR(R24)(r9)
1326 std r25, VCPU_GPR(R25)(r9)
1327 std r26, VCPU_GPR(R26)(r9)
1328 std r27, VCPU_GPR(R27)(r9)
1329 std r28, VCPU_GPR(R28)(r9)
1330 std r29, VCPU_GPR(R29)(r9)
1331 std r30, VCPU_GPR(R30)(r9)
1332 std r31, VCPU_GPR(R31)(r9)
1335 mfspr r3, SPRN_SPRG0
1336 mfspr r4, SPRN_SPRG1
1337 mfspr r5, SPRN_SPRG2
1338 mfspr r6, SPRN_SPRG3
1339 std r3, VCPU_SPRG0(r9)
1340 std r4, VCPU_SPRG1(r9)
1341 std r5, VCPU_SPRG2(r9)
1342 std r6, VCPU_SPRG3(r9)
1348 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1351 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1353 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1357 li r5, 0 /* don't preserve non-vol regs */
1358 bl kvmppc_save_tm_hv
1360 ld r9, HSTATE_KVM_VCPU(r13)
1364 /* Increment yield count if they have a VPA */
1365 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1368 li r4, LPPACA_YIELDCOUNT
1373 stb r3, VCPU_VPA_DIRTY(r9)
1375 /* Save PMU registers if requested */
1376 /* r8 and cr0.eq are live here */
1379 beq 21f /* if no VPA, save PMU stuff anyway */
1380 lbz r4, LPPACA_PMCINUSE(r8)
1381 21: bl kvmhv_save_guest_pmu
1382 ld r9, HSTATE_KVM_VCPU(r13)
1384 /* Restore host values of some registers */
1386 ld r5, STACK_SLOT_CIABR(r1)
1387 ld r6, STACK_SLOT_DAWR0(r1)
1388 ld r7, STACK_SLOT_DAWRX0(r1)
1389 mtspr SPRN_CIABR, r5
1391 * If the DAWR doesn't work, it's ok to write these here as
1392 * this value should always be zero
1394 mtspr SPRN_DAWR0, r6
1395 mtspr SPRN_DAWRX0, r7
1396 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1399 * POWER7/POWER8 guest -> host partition switch code.
1400 * We don't have to lock against tlbies but we do
1401 * have to coordinate the hardware threads.
1402 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1404 kvmhv_switch_to_host:
1405 /* Secondary threads wait for primary to do partition switch */
1406 ld r5,HSTATE_KVM_VCORE(r13)
1407 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1408 lbz r3,HSTATE_PTID(r13)
1412 13: lbz r3,VCORE_IN_GUEST(r5)
1418 /* Primary thread waits for all the secondaries to exit guest */
1419 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1420 rlwinm r0,r3,32-8,0xff
1426 /* Did we actually switch to the guest at all? */
1427 lbz r6, VCORE_IN_GUEST(r5)
1431 /* Primary thread switches back to host partition */
1432 lwz r7,KVM_HOST_LPID(r4)
1433 ld r6,KVM_HOST_SDR1(r4)
1434 li r8,LPID_RSVD /* switch to reserved LPID */
1437 mtspr SPRN_SDR1,r6 /* switch to host page table */
1442 /* DPDES and VTB are shared between threads */
1443 mfspr r7, SPRN_DPDES
1445 std r7, VCORE_DPDES(r5)
1446 std r8, VCORE_VTB(r5)
1447 /* clear DPDES so we don't get guest doorbells in the host */
1449 mtspr SPRN_DPDES, r8
1450 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1452 /* Subtract timebase offset from timebase */
1453 ld r8, VCORE_TB_OFFSET_APPL(r5)
1457 std r0, VCORE_TB_OFFSET_APPL(r5)
1458 mftb r6 /* current guest timebase */
1460 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1461 mftb r7 /* check if lower 24 bits overflowed */
1466 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1471 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1472 * above, which may or may not have already called
1473 * kvmppc_subcore_exit_guest. Fortunately, all that
1474 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1475 * it again here is benign even if kvmppc_realmode_hmi_handler
1476 * has already called it.
1478 bl kvmppc_subcore_exit_guest
1480 30: ld r5,HSTATE_KVM_VCORE(r13)
1481 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1484 ld r0, VCORE_PCR(r5)
1485 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1490 /* Signal secondary CPUs to continue */
1492 stb r0,VCORE_IN_GUEST(r5)
1493 19: lis r8,0x7fff /* MAX_INT@h */
1496 16: ld r8,KVM_HOST_LPCR(r4)
1500 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1501 /* Finish timing, if we have a vcpu */
1502 ld r4, HSTATE_KVM_VCPU(r13)
1506 bl kvmhv_accumulate_time
1509 /* Unset guest mode */
1510 li r0, KVM_GUEST_MODE_NONE
1511 stb r0, HSTATE_IN_GUEST(r13)
1513 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1514 ld r0, SFS+PPC_LR_STKOFF(r1)
1520 .global kvm_flush_link_stack
1521 kvm_flush_link_stack:
1522 /* Save LR into r0 */
1525 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
1530 /* And on Power9 it's up to 64. */
1535 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1541 kvmppc_guest_external:
1542 /* External interrupt, first check for host_ipi. If this is
1543 * set, we know the host wants us out so let's do it now
1548 * Restore the active volatile registers after returning from
1551 ld r9, HSTATE_KVM_VCPU(r13)
1552 li r12, BOOK3S_INTERRUPT_EXTERNAL
1555 * kvmppc_read_intr return codes:
1557 * Exit to host (r3 > 0)
1558 * 1 An interrupt is pending that needs to be handled by the host
1559 * Exit guest and return to host by branching to guest_exit_cont
1561 * 2 Passthrough that needs completion in the host
1562 * Exit guest and return to host by branching to guest_exit_cont
1563 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1564 * to indicate to the host to complete handling the interrupt
1566 * Before returning to guest, we check if any CPU is heading out
1567 * to the host and if so, we head out also. If no CPUs are heading
1568 * check return values <= 0.
1570 * Return to guest (r3 <= 0)
1571 * 0 No external interrupt is pending
1572 * -1 A guest wakeup IPI (which has now been cleared)
1573 * In either case, we return to guest to deliver any pending
1576 * -2 A PCI passthrough external interrupt was handled
1577 * (interrupt was delivered directly to guest)
1578 * Return to guest to deliver any pending guest interrupts.
1584 /* Return code = 2 */
1585 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1586 stw r12, VCPU_TRAP(r9)
1589 1: /* Return code <= 1 */
1593 /* Return code <= 0 */
1594 maybe_reenter_guest:
1595 ld r5, HSTATE_KVM_VCORE(r13)
1596 lwz r0, VCORE_ENTRY_EXIT(r5)
1599 blt deliver_guest_interrupt
1602 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1604 * Softpatch interrupt for transactional memory emulation cases
1605 * on POWER9 DD2.2. This is early in the guest exit path - we
1606 * haven't saved registers or done a treclaim yet.
1609 /* Save instruction image in HEIR */
1611 stw r3, VCPU_HEIR(r9)
1614 * The cases we want to handle here are those where the guest
1615 * is in real suspend mode and is trying to transition to
1616 * transactional mode.
1618 lbz r0, HSTATE_FAKE_SUSPEND(r13)
1619 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
1621 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
1622 cmpwi r3, 1 /* or if not in suspend state */
1625 /* Call C code to do the emulation */
1627 bl kvmhv_p9_tm_emulation_early
1629 ld r9, HSTATE_KVM_VCPU(r13)
1630 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1632 beq guest_exit_cont /* continue exiting if not handled */
1634 ld r11, VCPU_MSR(r9)
1635 b fast_interrupt_c_return /* go back to guest if handled */
1636 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1639 * Check whether an HDSI is an HPTE not found fault or something else.
1640 * If it is an HPTE not found fault that is due to the guest accessing
1641 * a page that they have mapped but which we have paged out, then
1642 * we continue on with the guest exit path. In all other cases,
1643 * reflect the HDSI to the guest as a DSI.
1647 mfspr r6, SPRN_HDSISR
1648 /* HPTE not found fault or protection fault? */
1649 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1650 beq 1f /* if not, send it to the guest */
1651 andi. r0, r11, MSR_DR /* data relocation enabled? */
1654 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1655 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1656 bne 7f /* if no SLB entry found */
1657 4: std r4, VCPU_FAULT_DAR(r9)
1658 stw r6, VCPU_FAULT_DSISR(r9)
1660 /* Search the hash table. */
1661 mr r3, r9 /* vcpu pointer */
1662 li r7, 1 /* data fault */
1663 bl kvmppc_hpte_hv_fault
1664 ld r9, HSTATE_KVM_VCPU(r13)
1666 ld r11, VCPU_MSR(r9)
1667 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1668 cmpdi r3, 0 /* retry the instruction */
1670 cmpdi r3, -1 /* handle in kernel mode */
1672 cmpdi r3, -2 /* MMIO emulation; need instr word */
1675 /* Synthesize a DSI (or DSegI) for the guest */
1676 ld r4, VCPU_FAULT_DAR(r9)
1678 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1679 mtspr SPRN_DSISR, r6
1680 7: mtspr SPRN_DAR, r4
1681 mtspr SPRN_SRR0, r10
1682 mtspr SPRN_SRR1, r11
1684 bl kvmppc_msr_interrupt
1685 fast_interrupt_c_return:
1686 6: ld r7, VCPU_CTR(r9)
1693 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1694 ld r5, KVM_VRMA_SLB_V(r5)
1697 /* If this is for emulated MMIO, load the instruction word */
1698 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1700 /* Set guest mode to 'jump over instruction' so if lwz faults
1701 * we'll just continue at the next IP. */
1702 li r0, KVM_GUEST_MODE_SKIP
1703 stb r0, HSTATE_IN_GUEST(r13)
1705 /* Do the access with MSR:DR enabled */
1707 ori r4, r3, MSR_DR /* Enable paging for data */
1712 /* Store the result */
1713 stw r8, VCPU_LAST_INST(r9)
1715 /* Unset guest mode. */
1716 li r0, KVM_GUEST_MODE_HOST_HV
1717 stb r0, HSTATE_IN_GUEST(r13)
1721 * Similarly for an HISI, reflect it to the guest as an ISI unless
1722 * it is an HPTE not found fault for a page that we have paged out.
1725 andis. r0, r11, SRR1_ISI_NOPT@h
1727 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1730 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1731 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1732 bne 7f /* if no SLB entry found */
1734 /* Search the hash table. */
1735 mr r3, r9 /* vcpu pointer */
1738 li r7, 0 /* instruction fault */
1739 bl kvmppc_hpte_hv_fault
1740 ld r9, HSTATE_KVM_VCPU(r13)
1742 ld r11, VCPU_MSR(r9)
1743 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1744 cmpdi r3, 0 /* retry the instruction */
1745 beq fast_interrupt_c_return
1746 cmpdi r3, -1 /* handle in kernel mode */
1749 /* Synthesize an ISI (or ISegI) for the guest */
1751 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
1752 7: mtspr SPRN_SRR0, r10
1753 mtspr SPRN_SRR1, r11
1755 bl kvmppc_msr_interrupt
1756 b fast_interrupt_c_return
1758 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1759 ld r5, KVM_VRMA_SLB_V(r6)
1763 * Try to handle an hcall in real mode.
1764 * Returns to the guest if we handle it, or continues on up to
1765 * the kernel if we can't (i.e. if we don't have a handler for
1766 * it, or if the handler returns H_TOO_HARD).
1768 * r5 - r8 contain hcall args,
1769 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1771 hcall_try_real_mode:
1772 ld r3,VCPU_GPR(R3)(r9)
1774 /* sc 1 from userspace - reflect to guest syscall */
1775 bne sc_1_fast_return
1777 cmpldi r3,hcall_real_table_end - hcall_real_table
1779 /* See if this hcall is enabled for in-kernel handling */
1781 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1782 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1784 ld r0, KVM_ENABLED_HCALLS(r4)
1785 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1789 /* Get pointer to handler, if any, and call it */
1790 LOAD_REG_ADDR(r4, hcall_real_table)
1796 mr r3,r9 /* get vcpu pointer */
1797 ld r4,VCPU_GPR(R4)(r9)
1800 beq hcall_real_fallback
1801 ld r4,HSTATE_KVM_VCPU(r13)
1802 std r3,VCPU_GPR(R3)(r4)
1810 li r10, BOOK3S_INTERRUPT_SYSCALL
1811 bl kvmppc_msr_interrupt
1815 /* We've attempted a real mode hcall, but it's punted it back
1816 * to userspace. We need to restore some clobbered volatiles
1817 * before resuming the pass-it-to-qemu path */
1818 hcall_real_fallback:
1819 li r12,BOOK3S_INTERRUPT_SYSCALL
1820 ld r9, HSTATE_KVM_VCPU(r13)
1824 .globl hcall_real_table
1826 .long 0 /* 0 - unused */
1827 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1828 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1829 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1830 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1831 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1832 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1833 #ifdef CONFIG_SPAPR_TCE_IOMMU
1834 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1835 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
1840 .long 0 /* 0x24 - H_SET_SPRG0 */
1841 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1842 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
1856 #ifdef CONFIG_KVM_XICS
1857 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1858 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1859 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1860 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
1861 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1863 .long 0 /* 0x64 - H_EOI */
1864 .long 0 /* 0x68 - H_CPPR */
1865 .long 0 /* 0x6c - H_IPI */
1866 .long 0 /* 0x70 - H_IPOLL */
1867 .long 0 /* 0x74 - H_XIRR */
1895 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1896 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1912 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1916 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1917 #ifdef CONFIG_SPAPR_TCE_IOMMU
1918 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
1919 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2035 #ifdef CONFIG_KVM_XICS
2036 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2038 .long 0 /* 0x2fc - H_XIRR_X*/
2040 .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
2041 .globl hcall_real_table_end
2042 hcall_real_table_end:
2044 _GLOBAL(kvmppc_h_set_xdabr)
2045 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2046 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2048 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2051 6: li r3, H_PARAMETER
2054 _GLOBAL(kvmppc_h_set_dabr)
2055 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2056 li r5, DABRX_USER | DABRX_KERNEL
2060 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2061 std r4,VCPU_DABR(r3)
2062 stw r5, VCPU_DABRX(r3)
2063 mtspr SPRN_DABRX, r5
2064 /* Work around P7 bug where DABR can get corrupted on mtspr */
2065 1: mtspr SPRN_DABR,r4
2074 LOAD_REG_ADDR(r11, dawr_force_enable)
2081 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2082 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2083 rlwimi r5, r4, 2, DAWRX_WT
2085 std r4, VCPU_DAWR0(r3)
2086 std r5, VCPU_DAWRX0(r3)
2088 * If came in through the real mode hcall handler then it is necessary
2089 * to write the registers since the return path won't. Otherwise it is
2090 * sufficient to store then in the vcpu struct as they will be loaded
2091 * next time the vcpu is run.
2094 andi. r6, r6, MSR_DR /* in real mode? */
2096 mtspr SPRN_DAWR0, r4
2097 mtspr SPRN_DAWRX0, r5
2101 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2103 std r11,VCPU_MSR(r3)
2105 stb r0,VCPU_CEDED(r3)
2106 sync /* order setting ceded vs. testing prodded */
2107 lbz r5,VCPU_PRODDED(r3)
2109 bne kvm_cede_prodded
2110 li r12,0 /* set trap to 0 to say hcall is handled */
2111 stw r12,VCPU_TRAP(r3)
2113 std r0,VCPU_GPR(R3)(r3)
2116 * Set our bit in the bitmask of napping threads unless all the
2117 * other threads are already napping, in which case we send this
2120 ld r5,HSTATE_KVM_VCORE(r13)
2121 lbz r6,HSTATE_PTID(r13)
2122 lwz r8,VCORE_ENTRY_EXIT(r5)
2126 addi r6,r5,VCORE_NAPPING_THREADS
2133 /* order napping_threads update vs testing entry_exit_map */
2136 stb r0,HSTATE_NAPPING(r13)
2137 lwz r7,VCORE_ENTRY_EXIT(r5)
2139 bge 33f /* another thread already exiting */
2142 * Although not specifically required by the architecture, POWER7
2143 * preserves the following registers in nap mode, even if an SMT mode
2144 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2145 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2147 /* Save non-volatile GPRs */
2148 std r14, VCPU_GPR(R14)(r3)
2149 std r15, VCPU_GPR(R15)(r3)
2150 std r16, VCPU_GPR(R16)(r3)
2151 std r17, VCPU_GPR(R17)(r3)
2152 std r18, VCPU_GPR(R18)(r3)
2153 std r19, VCPU_GPR(R19)(r3)
2154 std r20, VCPU_GPR(R20)(r3)
2155 std r21, VCPU_GPR(R21)(r3)
2156 std r22, VCPU_GPR(R22)(r3)
2157 std r23, VCPU_GPR(R23)(r3)
2158 std r24, VCPU_GPR(R24)(r3)
2159 std r25, VCPU_GPR(R25)(r3)
2160 std r26, VCPU_GPR(R26)(r3)
2161 std r27, VCPU_GPR(R27)(r3)
2162 std r28, VCPU_GPR(R28)(r3)
2163 std r29, VCPU_GPR(R29)(r3)
2164 std r30, VCPU_GPR(R30)(r3)
2165 std r31, VCPU_GPR(R31)(r3)
2170 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2173 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
2175 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2177 ld r3, HSTATE_KVM_VCPU(r13)
2179 li r5, 0 /* don't preserve non-vol regs */
2180 bl kvmppc_save_tm_hv
2186 * Set DEC to the smaller of DEC and HDEC, so that we wake
2187 * no later than the end of our timeslice (HDEC interrupts
2188 * don't wake us from nap).
2199 /* save expiry time of guest decrementer */
2201 ld r4, HSTATE_KVM_VCPU(r13)
2202 ld r5, HSTATE_KVM_VCORE(r13)
2203 ld r6, VCORE_TB_OFFSET_APPL(r5)
2204 subf r3, r6, r3 /* convert to host TB value */
2205 std r3, VCPU_DEC_EXPIRES(r4)
2207 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2208 ld r4, HSTATE_KVM_VCPU(r13)
2209 addi r3, r4, VCPU_TB_CEDE
2210 bl kvmhv_accumulate_time
2213 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2215 /* Go back to host stack */
2216 ld r1, HSTATE_HOST_R1(r13)
2219 * Take a nap until a decrementer or external or doobell interrupt
2220 * occurs, with PECE1 and PECE0 set in LPCR.
2221 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2222 * Also clear the runlatch bit before napping.
2225 mfspr r0, SPRN_CTRLF
2227 mtspr SPRN_CTRLT, r0
2230 stb r0,HSTATE_HWTHREAD_REQ(r13)
2232 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2234 ori r5, r5, LPCR_PECEDH
2235 rlwimi r5, r3, 0, LPCR_PECEDP
2236 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2238 kvm_nap_sequence: /* desired LPCR value in r5 */
2239 li r3, PNV_THREAD_NAP
2243 bl isa206_idle_insn_mayloss
2245 mfspr r0, SPRN_CTRLF
2247 mtspr SPRN_CTRLT, r0
2252 stb r0, PACA_FTRACE_ENABLED(r13)
2254 li r0, KVM_HWTHREAD_IN_KVM
2255 stb r0, HSTATE_HWTHREAD_STATE(r13)
2257 lbz r0, HSTATE_NAPPING(r13)
2258 cmpwi r0, NAPPING_CEDE
2260 cmpwi r0, NAPPING_NOVCPU
2261 beq kvm_novcpu_wakeup
2262 cmpwi r0, NAPPING_UNSPLIT
2263 beq kvm_unsplit_wakeup
2264 twi 31,0,0 /* Nap state must not be zero */
2272 /* Woken by external or decrementer interrupt */
2274 /* get vcpu pointer */
2275 ld r4, HSTATE_KVM_VCPU(r13)
2277 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2278 addi r3, r4, VCPU_TB_RMINTR
2279 bl kvmhv_accumulate_time
2282 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2285 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
2287 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2291 li r5, 0 /* don't preserve non-vol regs */
2292 bl kvmppc_restore_tm_hv
2294 ld r4, HSTATE_KVM_VCPU(r13)
2298 /* load up FP state */
2301 /* Restore guest decrementer */
2302 ld r3, VCPU_DEC_EXPIRES(r4)
2303 ld r5, HSTATE_KVM_VCORE(r13)
2304 ld r6, VCORE_TB_OFFSET_APPL(r5)
2305 add r3, r3, r6 /* convert host TB to guest TB value */
2311 ld r14, VCPU_GPR(R14)(r4)
2312 ld r15, VCPU_GPR(R15)(r4)
2313 ld r16, VCPU_GPR(R16)(r4)
2314 ld r17, VCPU_GPR(R17)(r4)
2315 ld r18, VCPU_GPR(R18)(r4)
2316 ld r19, VCPU_GPR(R19)(r4)
2317 ld r20, VCPU_GPR(R20)(r4)
2318 ld r21, VCPU_GPR(R21)(r4)
2319 ld r22, VCPU_GPR(R22)(r4)
2320 ld r23, VCPU_GPR(R23)(r4)
2321 ld r24, VCPU_GPR(R24)(r4)
2322 ld r25, VCPU_GPR(R25)(r4)
2323 ld r26, VCPU_GPR(R26)(r4)
2324 ld r27, VCPU_GPR(R27)(r4)
2325 ld r28, VCPU_GPR(R28)(r4)
2326 ld r29, VCPU_GPR(R29)(r4)
2327 ld r30, VCPU_GPR(R30)(r4)
2328 ld r31, VCPU_GPR(R31)(r4)
2330 /* Check the wake reason in SRR1 to see why we got here */
2331 bl kvmppc_check_wake_reason
2334 * Restore volatile registers since we could have called a
2335 * C routine in kvmppc_check_wake_reason
2337 * r3 tells us whether we need to return to host or not
2338 * WARNING: it gets checked further down:
2339 * should not modify r3 until this check is done.
2341 ld r4, HSTATE_KVM_VCPU(r13)
2343 /* clear our bit in vcore->napping_threads */
2344 34: ld r5,HSTATE_KVM_VCORE(r13)
2345 lbz r7,HSTATE_PTID(r13)
2348 addi r6,r5,VCORE_NAPPING_THREADS
2354 stb r0,HSTATE_NAPPING(r13)
2356 /* See if the wake reason saved in r3 means we need to exit */
2357 stw r12, VCPU_TRAP(r4)
2361 b maybe_reenter_guest
2363 /* cede when already previously prodded case */
2366 stb r0,VCPU_PRODDED(r3)
2367 sync /* order testing prodded vs. clearing ceded */
2368 stb r0,VCPU_CEDED(r3)
2372 /* we've ceded but we want to give control to the host */
2374 ld r9, HSTATE_KVM_VCPU(r13)
2377 /* Try to do machine check recovery in real mode */
2378 machine_check_realmode:
2379 mr r3, r9 /* get vcpu pointer */
2380 bl kvmppc_realmode_machine_check
2382 /* all machine checks go to virtual mode for further handling */
2383 ld r9, HSTATE_KVM_VCPU(r13)
2384 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2388 * Call C code to handle a HMI in real mode.
2389 * Only the primary thread does the call, secondary threads are handled
2390 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2391 * r9 points to the vcpu on entry
2394 lbz r0, HSTATE_PTID(r13)
2397 bl kvmppc_realmode_hmi_handler
2398 ld r9, HSTATE_KVM_VCPU(r13)
2399 li r12, BOOK3S_INTERRUPT_HMI
2403 * Check the reason we woke from nap, and take appropriate action.
2405 * 0 if nothing needs to be done
2406 * 1 if something happened that needs to be handled by the host
2407 * -1 if there was a guest wakeup (IPI or msgsnd)
2408 * -2 if we handled a PCI passthrough interrupt (returned by
2409 * kvmppc_read_intr only)
2411 * Also sets r12 to the interrupt vector for any interrupt that needs
2412 * to be handled now by the host (0x500 for external interrupt), or zero.
2413 * Modifies all volatile registers (since it may call a C function).
2414 * This routine calls kvmppc_read_intr, a C function, if an external
2415 * interrupt is pending.
2417 kvmppc_check_wake_reason:
2420 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2422 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2423 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2424 cmpwi r6, 8 /* was it an external interrupt? */
2425 beq 7f /* if so, see what it was */
2428 cmpwi r6, 6 /* was it the decrementer? */
2431 cmpwi r6, 5 /* privileged doorbell? */
2433 cmpwi r6, 3 /* hypervisor doorbell? */
2435 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2436 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2438 li r3, 1 /* anything else, return 1 */
2441 /* hypervisor doorbell */
2442 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2445 * Clear the doorbell as we will invoke the handler
2446 * explicitly in the guest exit path.
2448 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2450 /* see if it's a host IPI */
2452 lbz r0, HSTATE_HOST_IPI(r13)
2455 /* if not, return -1 */
2459 /* Woken up due to Hypervisor maintenance interrupt */
2460 4: li r12, BOOK3S_INTERRUPT_HMI
2464 /* external interrupt - create a stack frame so we can call C */
2466 std r0, PPC_LR_STKOFF(r1)
2467 stdu r1, -PPC_MIN_STKFRM(r1)
2470 li r12, BOOK3S_INTERRUPT_EXTERNAL
2475 * Return code of 2 means PCI passthrough interrupt, but
2476 * we need to return back to host to complete handling the
2477 * interrupt. Trap reason is expected in r12 by guest
2480 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2482 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2483 addi r1, r1, PPC_MIN_STKFRM
2488 * Save away FP, VMX and VSX registers.
2490 * N.B. r30 and r31 are volatile across this function,
2491 * thus it is not callable from C.
2498 #ifdef CONFIG_ALTIVEC
2500 oris r8,r8,MSR_VEC@h
2501 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2505 oris r8,r8,MSR_VSX@h
2506 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2509 addi r3,r3,VCPU_FPRS
2511 #ifdef CONFIG_ALTIVEC
2513 addi r3,r31,VCPU_VRS
2515 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2517 mfspr r6,SPRN_VRSAVE
2518 stw r6,VCPU_VRSAVE(r31)
2523 * Load up FP, VMX and VSX registers
2525 * N.B. r30 and r31 are volatile across this function,
2526 * thus it is not callable from C.
2533 #ifdef CONFIG_ALTIVEC
2535 oris r8,r8,MSR_VEC@h
2536 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2540 oris r8,r8,MSR_VSX@h
2541 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2544 addi r3,r4,VCPU_FPRS
2546 #ifdef CONFIG_ALTIVEC
2548 addi r3,r31,VCPU_VRS
2550 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2552 lwz r7,VCPU_VRSAVE(r31)
2553 mtspr SPRN_VRSAVE,r7
2558 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2560 * Save transactional state and TM-related registers.
2561 * Called with r3 pointing to the vcpu struct and r4 containing
2562 * the guest MSR value.
2563 * r5 is non-zero iff non-volatile register state needs to be maintained.
2564 * If r5 == 0, this can modify all checkpointed registers, but
2565 * restores r1 and r2 before exit.
2567 _GLOBAL_TOC(kvmppc_save_tm_hv)
2568 EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
2569 /* See if we need to handle fake suspend mode */
2572 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
2574 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
2576 beq __kvmppc_save_tm
2578 /* The following code handles the fake_suspend = 1 case */
2580 std r0, PPC_LR_STKOFF(r1)
2581 stdu r1, -PPC_MIN_STKFRM(r1)
2586 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2589 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
2592 bl pnv_power9_force_smt4_catch
2593 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
2596 /* We have to treclaim here because that's the only way to do S->N */
2597 li r3, TM_CAUSE_KVM_RESCHED
2601 * We were in fake suspend, so we are not going to save the
2602 * register state as the guest checkpointed state (since
2603 * we already have it), therefore we can now use any volatile GPR.
2604 * In fact treclaim in fake suspend state doesn't modify
2609 bl pnv_power9_force_smt4_release
2610 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
2614 mfspr r3, SPRN_PSSCR
2615 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
2616 li r0, PSSCR_FAKE_SUSPEND
2618 mtspr SPRN_PSSCR, r3
2620 /* Don't save TEXASR, use value from last exit in real suspend state */
2621 ld r9, HSTATE_KVM_VCPU(r13)
2622 mfspr r5, SPRN_TFHAR
2623 mfspr r6, SPRN_TFIAR
2624 std r5, VCPU_TFHAR(r9)
2625 std r6, VCPU_TFIAR(r9)
2627 addi r1, r1, PPC_MIN_STKFRM
2628 ld r0, PPC_LR_STKOFF(r1)
2633 * Restore transactional state and TM-related registers.
2634 * Called with r3 pointing to the vcpu struct
2635 * and r4 containing the guest MSR value.
2636 * r5 is non-zero iff non-volatile register state needs to be maintained.
2637 * This potentially modifies all checkpointed registers.
2638 * It restores r1 and r2 from the PACA.
2640 _GLOBAL_TOC(kvmppc_restore_tm_hv)
2641 EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
2643 * If we are doing TM emulation for the guest on a POWER9 DD2,
2644 * then we don't actually do a trechkpt -- we either set up
2645 * fake-suspend mode, or emulate a TM rollback.
2648 b __kvmppc_restore_tm
2649 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
2651 std r0, PPC_LR_STKOFF(r1)
2654 stb r0, HSTATE_FAKE_SUSPEND(r13)
2656 /* Turn on TM so we can restore TM SPRs */
2659 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
2663 * The user may change these outside of a transaction, so they must
2664 * always be context switched.
2666 ld r5, VCPU_TFHAR(r3)
2667 ld r6, VCPU_TFIAR(r3)
2668 ld r7, VCPU_TEXASR(r3)
2669 mtspr SPRN_TFHAR, r5
2670 mtspr SPRN_TFIAR, r6
2671 mtspr SPRN_TEXASR, r7
2673 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
2674 beqlr /* TM not active in guest */
2676 /* Make sure the failure summary is set */
2677 oris r7, r7, (TEXASR_FS)@h
2678 mtspr SPRN_TEXASR, r7
2680 cmpwi r5, 1 /* check for suspended state */
2682 stb r5, HSTATE_FAKE_SUSPEND(r13)
2683 b 9f /* and return */
2684 10: stdu r1, -PPC_MIN_STKFRM(r1)
2685 /* guest is in transactional state, so simulate rollback */
2686 bl kvmhv_emulate_tm_rollback
2688 addi r1, r1, PPC_MIN_STKFRM
2689 9: ld r0, PPC_LR_STKOFF(r1)
2692 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2695 * We come here if we get any exception or interrupt while we are
2696 * executing host real mode code while in guest MMU context.
2697 * r12 is (CR << 32) | vector
2698 * r13 points to our PACA
2699 * r12 is saved in HSTATE_SCRATCH0(r13)
2700 * r9 is saved in HSTATE_SCRATCH2(r13)
2701 * r13 is saved in HSPRG1
2702 * cfar is saved in HSTATE_CFAR(r13)
2703 * ppr is saved in HSTATE_PPR(r13)
2705 kvmppc_bad_host_intr:
2707 * Switch to the emergency stack, but start half-way down in
2708 * case we were already on it.
2712 ld r1, PACAEMERGSP(r13)
2713 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
2726 mfspr r3, SPRN_HSRR0
2727 mfspr r4, SPRN_HSRR1
2729 mfspr r6, SPRN_HDSISR
2731 1: mfspr r3, SPRN_SRR0
2734 mfspr r6, SPRN_DSISR
2739 ld r9, HSTATE_SCRATCH2(r13)
2740 ld r12, HSTATE_SCRATCH0(r13)
2745 ld r5, HSTATE_CFAR(r13)
2746 std r5, ORIG_GPR3(r1)
2750 lbz r6, PACAIRQSOFTMASK(r13)
2756 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
2757 std r3, STACK_FRAME_OVERHEAD-16(r1)
2760 * XXX On POWER7 and POWER8, we just spin here since we don't
2761 * know what the other threads are doing (and we don't want to
2762 * coordinate with them) - but at least we now have register state
2763 * in memory that we might be able to look at from another CPU.
2768 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2769 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2770 * r11 has the guest MSR value (in/out)
2771 * r9 has a vcpu pointer (in)
2772 * r0 is used as a scratch register
2774 kvmppc_msr_interrupt:
2775 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2776 cmpwi r0, 2 /* Check if we are in transactional state.. */
2777 ld r11, VCPU_INTR_MSR(r9)
2779 /* ... if transactional, change to suspended */
2781 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2785 * Load up guest PMU state. R3 points to the vcpu struct.
2787 _GLOBAL(kvmhv_load_guest_pmu)
2788 EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
2792 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
2793 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
2796 ld r3, VCPU_MMCR(r4)
2797 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
2798 cmpwi r5, MMCR0_PMAO
2799 beql kvmppc_fix_pmao
2800 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
2801 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
2802 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
2803 lwz r6, VCPU_PMC + 8(r4)
2804 lwz r7, VCPU_PMC + 12(r4)
2805 lwz r8, VCPU_PMC + 16(r4)
2806 lwz r9, VCPU_PMC + 20(r4)
2813 ld r3, VCPU_MMCR(r4)
2814 ld r5, VCPU_MMCR + 8(r4)
2815 ld r6, VCPU_MMCRA(r4)
2816 ld r7, VCPU_SIAR(r4)
2817 ld r8, VCPU_SDAR(r4)
2818 mtspr SPRN_MMCR1, r5
2819 mtspr SPRN_MMCRA, r6
2823 ld r5, VCPU_MMCR + 24(r4)
2824 ld r6, VCPU_SIER + 8(r4)
2825 ld r7, VCPU_SIER + 16(r4)
2826 mtspr SPRN_MMCR3, r5
2827 mtspr SPRN_SIER2, r6
2828 mtspr SPRN_SIER3, r7
2829 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
2831 ld r5, VCPU_MMCR + 16(r4)
2832 ld r6, VCPU_SIER(r4)
2833 mtspr SPRN_MMCR2, r5
2835 BEGIN_FTR_SECTION_NESTED(96)
2836 lwz r7, VCPU_PMC + 24(r4)
2837 lwz r8, VCPU_PMC + 28(r4)
2838 ld r9, VCPU_MMCRS(r4)
2839 mtspr SPRN_SPMC1, r7
2840 mtspr SPRN_SPMC2, r8
2841 mtspr SPRN_MMCRS, r9
2842 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
2843 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2844 mtspr SPRN_MMCR0, r3
2850 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
2852 _GLOBAL(kvmhv_load_host_pmu)
2853 EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
2855 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
2857 beq 23f /* skip if not */
2859 ld r3, HSTATE_MMCR0(r13)
2860 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
2861 cmpwi r4, MMCR0_PMAO
2862 beql kvmppc_fix_pmao
2863 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
2864 lwz r3, HSTATE_PMC1(r13)
2865 lwz r4, HSTATE_PMC2(r13)
2866 lwz r5, HSTATE_PMC3(r13)
2867 lwz r6, HSTATE_PMC4(r13)
2868 lwz r8, HSTATE_PMC5(r13)
2869 lwz r9, HSTATE_PMC6(r13)
2876 ld r3, HSTATE_MMCR0(r13)
2877 ld r4, HSTATE_MMCR1(r13)
2878 ld r5, HSTATE_MMCRA(r13)
2879 ld r6, HSTATE_SIAR(r13)
2880 ld r7, HSTATE_SDAR(r13)
2881 mtspr SPRN_MMCR1, r4
2882 mtspr SPRN_MMCRA, r5
2886 ld r8, HSTATE_MMCR2(r13)
2887 ld r9, HSTATE_SIER(r13)
2888 mtspr SPRN_MMCR2, r8
2890 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2892 ld r5, HSTATE_MMCR3(r13)
2893 ld r6, HSTATE_SIER2(r13)
2894 ld r7, HSTATE_SIER3(r13)
2895 mtspr SPRN_MMCR3, r5
2896 mtspr SPRN_SIER2, r6
2897 mtspr SPRN_SIER3, r7
2898 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
2899 mtspr SPRN_MMCR0, r3
2905 * Save guest PMU state into the vcpu struct.
2906 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
2908 _GLOBAL(kvmhv_save_guest_pmu)
2909 EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
2914 * POWER8 seems to have a hardware bug where setting
2915 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
2916 * when some counters are already negative doesn't seem
2917 * to cause a performance monitor alert (and hence interrupt).
2918 * The effect of this is that when saving the PMU state,
2919 * if there is no PMU alert pending when we read MMCR0
2920 * before freezing the counters, but one becomes pending
2921 * before we read the counters, we lose it.
2922 * To work around this, we need a way to freeze the counters
2923 * before reading MMCR0. Normally, freezing the counters
2924 * is done by writing MMCR0 (to set MMCR0[FC]) which
2925 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
2926 * we can also freeze the counters using MMCR2, by writing
2927 * 1s to all the counter freeze condition bits (there are
2928 * 9 bits each for 6 counters).
2930 li r3, -1 /* set all freeze bits */
2932 mfspr r10, SPRN_MMCR2
2933 mtspr SPRN_MMCR2, r3
2935 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2937 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
2938 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
2939 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
2940 mfspr r6, SPRN_MMCRA
2941 /* Clear MMCRA in order to disable SDAR updates */
2943 mtspr SPRN_MMCRA, r7
2945 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
2947 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
2949 21: mfspr r5, SPRN_MMCR1
2952 std r4, VCPU_MMCR(r9)
2953 std r5, VCPU_MMCR + 8(r9)
2954 std r6, VCPU_MMCRA(r9)
2956 std r10, VCPU_MMCR + 16(r9)
2957 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2959 mfspr r5, SPRN_MMCR3
2960 mfspr r6, SPRN_SIER2
2961 mfspr r7, SPRN_SIER3
2962 std r5, VCPU_MMCR + 24(r9)
2963 std r6, VCPU_SIER + 8(r9)
2964 std r7, VCPU_SIER + 16(r9)
2965 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
2966 std r7, VCPU_SIAR(r9)
2967 std r8, VCPU_SDAR(r9)
2974 stw r3, VCPU_PMC(r9)
2975 stw r4, VCPU_PMC + 4(r9)
2976 stw r5, VCPU_PMC + 8(r9)
2977 stw r6, VCPU_PMC + 12(r9)
2978 stw r7, VCPU_PMC + 16(r9)
2979 stw r8, VCPU_PMC + 20(r9)
2982 std r5, VCPU_SIER(r9)
2983 BEGIN_FTR_SECTION_NESTED(96)
2984 mfspr r6, SPRN_SPMC1
2985 mfspr r7, SPRN_SPMC2
2986 mfspr r8, SPRN_MMCRS
2987 stw r6, VCPU_PMC + 24(r9)
2988 stw r7, VCPU_PMC + 28(r9)
2989 std r8, VCPU_MMCRS(r9)
2991 mtspr SPRN_MMCRS, r4
2992 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
2993 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2997 * This works around a hardware bug on POWER8E processors, where
2998 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2999 * performance monitor interrupt. Instead, when we need to have
3000 * an interrupt pending, we have to arrange for a counter to overflow.
3004 mtspr SPRN_MMCR2, r3
3005 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3006 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3007 mtspr SPRN_MMCR0, r3
3014 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3016 * Start timing an activity
3017 * r3 = pointer to time accumulation struct, r4 = vcpu
3020 ld r5, HSTATE_KVM_VCORE(r13)
3021 ld r6, VCORE_TB_OFFSET_APPL(r5)
3023 subf r5, r6, r5 /* subtract current timebase offset */
3024 std r3, VCPU_CUR_ACTIVITY(r4)
3025 std r5, VCPU_ACTIVITY_START(r4)
3029 * Accumulate time to one activity and start another.
3030 * r3 = pointer to new time accumulation struct, r4 = vcpu
3032 kvmhv_accumulate_time:
3033 ld r5, HSTATE_KVM_VCORE(r13)
3034 ld r8, VCORE_TB_OFFSET_APPL(r5)
3035 ld r5, VCPU_CUR_ACTIVITY(r4)
3036 ld r6, VCPU_ACTIVITY_START(r4)
3037 std r3, VCPU_CUR_ACTIVITY(r4)
3039 subf r7, r8, r7 /* subtract current timebase offset */
3040 std r7, VCPU_ACTIVITY_START(r4)
3044 ld r8, TAS_SEQCOUNT(r5)
3047 std r8, TAS_SEQCOUNT(r5)
3049 ld r7, TAS_TOTAL(r5)
3051 std r7, TAS_TOTAL(r5)
3057 3: std r3, TAS_MIN(r5)
3063 std r8, TAS_SEQCOUNT(r5)