2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Derived from book3s_hv_rmhandlers.S, which is:
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/export.h>
22 #include <asm/cputable.h>
24 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
25 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
28 * Save transactional state and TM-related registers.
30 * - r3 pointing to the vcpu struct
31 * - r4 containing the MSR with current TS bits:
32 * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
33 * - r5 containing a flag indicating that non-volatile registers
35 * If r5 == 0, this can modify all checkpointed registers, but
36 * restores r1, r2 before exit. If r5 != 0, this restores the
37 * MSR TM/FP/VEC/VSX bits to their state on entry.
39 _GLOBAL(__kvmppc_save_tm)
41 std r0, PPC_LR_STKOFF(r1)
42 stdu r1, -SWITCH_FRAME_SIZE(r1)
51 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
53 oris r8, r8, (MSR_VEC | MSR_VSX)@h
56 rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
57 beq 1f /* TM not active in guest. */
59 std r1, HSTATE_SCRATCH2(r13)
60 std r3, HSTATE_SCRATCH1(r13)
62 /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
66 /* Save DSCR so we can restore it to avoid running with user value */
71 * We are going to do treclaim., which will modify all checkpointed
72 * registers. Save the non-volatile registers on the stack if
73 * preservation of non-volatile state has been requested.
78 /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
80 rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
81 SAVE_GPR(10, r1) /* final MSR value */
83 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
85 /* Emulation of the treclaim instruction needs TEXASR before treclaim */
87 std r6, VCPU_ORIG_TEXASR(r3)
88 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
91 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
95 li r3, TM_CAUSE_KVM_RESCHED
97 /* All GPRs are volatile at this point. */
100 /* Temporarily store r13 and r9 so we have some regs to play with */
103 std r9, PACATMSCRATCH(r13)
104 ld r9, HSTATE_SCRATCH1(r13)
106 /* Save away PPR soon so we don't run with user value. */
107 std r0, VCPU_GPRS_TM(0)(r9)
111 /* Reload stack pointer. */
112 std r1, VCPU_GPRS_TM(1)(r9)
113 ld r1, HSTATE_SCRATCH2(r13)
115 /* Set MSR RI now we have r1 and r13 back. */
116 std r2, VCPU_GPRS_TM(2)(r9)
120 /* Reload TOC pointer. */
123 /* Save all but r0-r2, r9 & r13 */
126 .if (reg != 9) && (reg != 13)
127 std reg, VCPU_GPRS_TM(reg)(r9)
131 /* ... now save r13 */
133 std r4, VCPU_GPRS_TM(13)(r9)
134 /* ... and save r9 */
135 ld r4, PACATMSCRATCH(r13)
136 std r4, VCPU_GPRS_TM(9)(r9)
138 /* Restore host DSCR and CR values, after saving guest values */
141 stw r6, VCPU_CR_TM(r9)
142 std r7, VCPU_DSCR_TM(r9)
148 /* Save away checkpointed SPRs. */
149 std r0, VCPU_PPR_TM(r9)
155 std r5, VCPU_LR_TM(r9)
156 std r7, VCPU_CTR_TM(r9)
157 std r8, VCPU_AMR_TM(r9)
158 std r10, VCPU_TAR_TM(r9)
159 std r11, VCPU_XER_TM(r9)
162 addi r3, r9, VCPU_FPRS_TM
164 addi r3, r9, VCPU_VRS_TM
166 mfspr r6, SPRN_VRSAVE
167 stw r6, VCPU_VRSAVE_TM(r9)
169 /* Restore non-volatile registers if requested to */
175 * We need to save these SPRs after the treclaim so that the software
176 * error code is recorded correctly in the TEXASR. Also the user may
177 * change these outside of a transaction, so they must always be
180 mfspr r7, SPRN_TEXASR
181 std r7, VCPU_TEXASR(r9)
184 std r5, VCPU_TFHAR(r9)
185 std r6, VCPU_TFIAR(r9)
187 /* Restore MSR state if requested */
191 addi r1, r1, SWITCH_FRAME_SIZE
192 ld r0, PPC_LR_STKOFF(r1)
197 * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
198 * be invoked from C function by PR KVM only.
200 _GLOBAL(_kvmppc_save_tm_pr)
202 std r0, PPC_LR_STKOFF(r1)
203 stdu r1, -PPC_MIN_STKFRM(r1)
206 std r8, PPC_MIN_STKFRM-8(r1)
208 li r5, 1 /* preserve non-volatile registers */
211 ld r8, PPC_MIN_STKFRM-8(r1)
214 addi r1, r1, PPC_MIN_STKFRM
215 ld r0, PPC_LR_STKOFF(r1)
219 EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
222 * Restore transactional state and TM-related registers.
224 * - r3 pointing to the vcpu struct.
225 * - r4 is the guest MSR with desired TS bits:
226 * For HV KVM, it is VCPU_MSR
227 * For PR KVM, it is provided by caller
228 * - r5 containing a flag indicating that non-volatile registers
230 * If r5 == 0, this potentially modifies all checkpointed registers, but
231 * restores r1, r2 from the PACA before exit.
232 * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
234 _GLOBAL(__kvmppc_restore_tm)
236 std r0, PPC_LR_STKOFF(r1)
240 /* Turn on TM/FP/VSX/VMX so we can restore them. */
247 oris r5, r5, (MSR_VEC | MSR_VSX)@h
251 * The user may change these outside of a transaction, so they must
252 * always be context switched.
254 ld r5, VCPU_TFHAR(r3)
255 ld r6, VCPU_TFIAR(r3)
256 ld r7, VCPU_TEXASR(r3)
259 mtspr SPRN_TEXASR, r7
262 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
263 beq 9f /* TM not active in guest */
265 /* Make sure the failure summary is set, otherwise we'll program check
266 * when we trechkpt. It's possible that this might have been not set
267 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
270 oris r7, r7, (TEXASR_FS)@h
271 mtspr SPRN_TEXASR, r7
274 * Make a stack frame and save non-volatile registers if requested.
276 stdu r1, -SWITCH_FRAME_SIZE(r1)
277 std r1, HSTATE_SCRATCH2(r13)
288 /* MSR[TS] will be 1 (suspended) once we do trechkpt */
290 rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
291 SAVE_GPR(10, r1) /* final MSR value */
294 * We need to load up the checkpointed state for the guest.
295 * We need to do this early as it will blow away any GPRs, VSRs and
300 addi r3, r31, VCPU_FPRS_TM
302 addi r3, r31, VCPU_VRS_TM
305 lwz r7, VCPU_VRSAVE_TM(r3)
306 mtspr SPRN_VRSAVE, r7
308 ld r5, VCPU_LR_TM(r3)
309 lwz r6, VCPU_CR_TM(r3)
310 ld r7, VCPU_CTR_TM(r3)
311 ld r8, VCPU_AMR_TM(r3)
312 ld r9, VCPU_TAR_TM(r3)
313 ld r10, VCPU_XER_TM(r3)
322 * Load up PPR and DSCR values but don't put them in the actual SPRs
323 * till the last moment to avoid running with userspace PPR and DSCR for
326 ld r29, VCPU_DSCR_TM(r3)
327 ld r30, VCPU_PPR_TM(r3)
329 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
333 /* Load GPRs r0-r28 */
336 ld reg, VCPU_GPRS_TM(reg)(r31)
343 /* Load final GPRs */
344 ld 29, VCPU_GPRS_TM(29)(r31)
345 ld 30, VCPU_GPRS_TM(30)(r31)
346 ld 31, VCPU_GPRS_TM(31)(r31)
348 /* TM checkpointed state is now setup. All GPRs are now volatile. */
351 /* Now let's get back the state we need. */
354 ld r1, HSTATE_SCRATCH2(r13)
358 /* Set the MSR RI since we have our registers back. */
362 /* Restore TOC pointer and CR */
367 /* Restore non-volatile registers if requested to. */
372 5: addi r1, r1, SWITCH_FRAME_SIZE
373 ld r0, PPC_LR_STKOFF(r1)
376 9: /* Restore MSR bits if requested */
382 * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
383 * can be invoked from C function by PR KVM only.
385 _GLOBAL(_kvmppc_restore_tm_pr)
387 std r0, PPC_LR_STKOFF(r1)
388 stdu r1, -PPC_MIN_STKFRM(r1)
390 /* save TAR so that it can be recovered later */
392 std r8, PPC_MIN_STKFRM-8(r1)
395 bl __kvmppc_restore_tm
397 ld r8, PPC_MIN_STKFRM-8(r1)
400 addi r1, r1, PPC_MIN_STKFRM
401 ld r0, PPC_LR_STKOFF(r1)
405 EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
406 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */