1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
8 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
12 #include <asm/kvm_ppc.h>
13 #include <asm/disassemble.h>
14 #include <asm/dbell.h>
15 #include <asm/reg_booke.h>
20 #define XOP_DCBTLS 166
21 #define XOP_MSGSND 206
22 #define XOP_MSGCLR 238
24 #define XOP_TLBIVAX 786
29 #define XOP_EHPRIV 270
31 #ifdef CONFIG_KVM_E500MC
32 static int dbell2prio(ulong param)
34 int msg = param & PPC_DBELL_TYPE_MASK;
38 case PPC_DBELL_TYPE(PPC_DBELL):
39 prio = BOOKE_IRQPRIO_DBELL;
41 case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42 prio = BOOKE_IRQPRIO_DBELL_CRIT;
51 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
53 ulong param = vcpu->arch.regs.gpr[rb];
54 int prio = dbell2prio(param);
59 clear_bit(prio, &vcpu->arch.pending_exceptions);
63 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
65 ulong param = vcpu->arch.regs.gpr[rb];
66 int prio = dbell2prio(rb);
67 int pir = param & PPC_DBELL_PIR_MASK;
69 struct kvm_vcpu *cvcpu;
74 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75 int cpir = cvcpu->arch.shared->pir;
76 if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77 set_bit(prio, &cvcpu->arch.pending_exceptions);
86 static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int inst, int *advance)
89 int emulated = EMULATE_DONE;
91 switch (get_oc(inst)) {
93 run->exit_reason = KVM_EXIT_DEBUG;
94 run->debug.arch.address = vcpu->arch.regs.nip;
95 run->debug.arch.status = 0;
96 kvmppc_account_exit(vcpu, DEBUG_EXITS);
97 emulated = EMULATE_EXIT_USER;
101 emulated = EMULATE_FAIL;
106 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
108 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
110 /* Always fail to lock the cache */
111 vcpu_e500->l1csr0 |= L1CSR0_CUL;
115 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
118 /* Expose one thread per vcpu */
119 if (get_tmrn(inst) == TMRN_TMCFG0) {
120 kvmppc_set_gpr(vcpu, rt,
121 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
128 int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
129 unsigned int inst, int *advance)
131 int emulated = EMULATE_DONE;
132 int ra = get_ra(inst);
133 int rb = get_rb(inst);
134 int rt = get_rt(inst);
137 switch (get_op(inst)) {
139 switch (get_xop(inst)) {
142 emulated = kvmppc_e500_emul_dcbtls(vcpu);
145 #ifdef CONFIG_KVM_E500MC
147 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
151 emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
156 emulated = kvmppc_e500_emul_tlbre(vcpu);
160 emulated = kvmppc_e500_emul_tlbwe(vcpu);
164 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
165 emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
170 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
171 emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
176 ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
177 emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
181 emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
185 emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
190 emulated = EMULATE_FAIL;
196 emulated = EMULATE_FAIL;
199 if (emulated == EMULATE_FAIL)
200 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
205 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
207 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
208 int emulated = EMULATE_DONE;
211 #ifndef CONFIG_KVM_BOOKE_HV
213 kvmppc_set_pid(vcpu, spr_val);
218 vcpu_e500->pid[1] = spr_val;
223 vcpu_e500->pid[2] = spr_val;
226 vcpu->arch.shared->mas0 = spr_val;
229 vcpu->arch.shared->mas1 = spr_val;
232 vcpu->arch.shared->mas2 = spr_val;
235 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
236 vcpu->arch.shared->mas7_3 |= spr_val;
239 vcpu->arch.shared->mas4 = spr_val;
242 vcpu->arch.shared->mas6 = spr_val;
245 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
246 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
250 vcpu_e500->l1csr0 = spr_val;
251 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
254 vcpu_e500->l1csr1 = spr_val;
255 vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
258 vcpu_e500->hid0 = spr_val;
261 vcpu_e500->hid1 = spr_val;
265 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
271 * Guest relies on host power management configurations
272 * Treat the request as a general store
274 vcpu->arch.pwrmgtcr0 = spr_val;
279 * If we are here, it means that we have already flushed the
280 * branch predictor, so just return to guest.
284 /* extra exceptions */
285 #ifdef CONFIG_SPE_POSSIBLE
287 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
290 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
293 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
296 #ifdef CONFIG_ALTIVEC
298 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
301 vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
305 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
307 #ifdef CONFIG_KVM_BOOKE_HV
309 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
312 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
316 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
322 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
324 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
325 int emulated = EMULATE_DONE;
328 #ifndef CONFIG_KVM_BOOKE_HV
330 *spr_val = vcpu_e500->pid[0];
333 *spr_val = vcpu_e500->pid[1];
336 *spr_val = vcpu_e500->pid[2];
339 *spr_val = vcpu->arch.shared->mas0;
342 *spr_val = vcpu->arch.shared->mas1;
345 *spr_val = vcpu->arch.shared->mas2;
348 *spr_val = (u32)vcpu->arch.shared->mas7_3;
351 *spr_val = vcpu->arch.shared->mas4;
354 *spr_val = vcpu->arch.shared->mas6;
357 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
361 *spr_val = vcpu->arch.decar;
364 *spr_val = vcpu->arch.tlbcfg[0];
367 *spr_val = vcpu->arch.tlbcfg[1];
370 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
372 *spr_val = vcpu->arch.tlbps[0];
375 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
377 *spr_val = vcpu->arch.tlbps[1];
380 *spr_val = vcpu_e500->l1csr0;
383 *spr_val = vcpu_e500->l1csr1;
386 *spr_val = vcpu_e500->hid0;
389 *spr_val = vcpu_e500->hid1;
392 *spr_val = vcpu_e500->svr;
400 *spr_val = vcpu->arch.mmucfg;
403 if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
406 * Legacy Linux guests access EPTCFG register even if the E.PT
407 * category is disabled in the VM. Give them a chance to live.
409 *spr_val = vcpu->arch.eptcfg;
413 *spr_val = vcpu->arch.pwrmgtcr0;
416 /* extra exceptions */
417 #ifdef CONFIG_SPE_POSSIBLE
419 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
422 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
425 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
428 #ifdef CONFIG_ALTIVEC
430 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
433 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
437 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
439 #ifdef CONFIG_KVM_BOOKE_HV
441 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
444 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
448 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);