1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARM64_KVM_NESTED_H
3 #define __ARM64_KVM_NESTED_H
5 #include <linux/bitfield.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_emulate.h>
8 #include <asm/kvm_pgtable.h>
10 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
12 return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
13 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
14 vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
17 /* Translation helpers from non-VHE EL2 to EL1 */
18 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
20 return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
23 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
25 return TCR_EPD1_MASK | /* disable TTBR1_EL1 */
26 ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
27 tcr_el2_ps_to_tcr_el1_ips(tcr) |
28 (tcr & TCR_EL2_TG0_MASK) |
29 (tcr & TCR_EL2_ORGN0_MASK) |
30 (tcr & TCR_EL2_IRGN0_MASK) |
31 (tcr & TCR_EL2_T0SZ_MASK);
34 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
36 u64 cpacr_el1 = CPACR_EL1_RES1;
38 if (cptr_el2 & CPTR_EL2_TTA)
39 cpacr_el1 |= CPACR_EL1_TTA;
40 if (!(cptr_el2 & CPTR_EL2_TFP))
41 cpacr_el1 |= CPACR_EL1_FPEN;
42 if (!(cptr_el2 & CPTR_EL2_TZ))
43 cpacr_el1 |= CPACR_EL1_ZEN;
45 cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
50 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
52 /* Only preserve the minimal set of bits we support */
53 val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
54 SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
55 val |= SCTLR_EL1_RES1;
60 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
62 /* Clear the ASID field */
63 return ttbr0 & ~GENMASK_ULL(63, 48);
66 extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
67 extern bool forward_debug_exception(struct kvm_vcpu *vcpu);
68 extern void kvm_init_nested(struct kvm *kvm);
69 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
70 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
71 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
75 extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
76 const union tlbi_info *info,
77 void (*)(struct kvm_s2_mmu *,
78 const union tlbi_info *));
79 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
80 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
82 extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
86 unsigned long block_size;
94 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
99 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
101 return trans->block_size;
104 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
109 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
111 return trans->readable;
114 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
116 return trans->writable;
119 static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
121 return !(trans->desc & BIT(54));
124 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
125 struct kvm_s2_trans *result);
126 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
127 struct kvm_s2_trans *trans);
128 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
129 extern void kvm_nested_s2_wp(struct kvm *kvm);
130 extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
131 extern void kvm_nested_s2_flush(struct kvm *kvm);
133 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
135 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
137 struct kvm *kvm = vpcu->kvm;
138 u8 CRm = sys_reg_CRm(instr);
140 if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
141 sys_reg_Op1(instr) == TLBI_Op1_EL1))
144 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
145 (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
146 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
149 if (CRm == TLBI_CRm_nROS &&
150 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
153 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
154 CRm == TLBI_CRm_RNS) &&
155 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
161 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
163 struct kvm *kvm = vpcu->kvm;
164 u8 CRm = sys_reg_CRm(instr);
166 if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
167 sys_reg_Op1(instr) == TLBI_Op1_EL2))
170 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
171 (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
172 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
175 if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
178 if (CRm == TLBI_CRm_nROS &&
179 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
182 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
183 CRm == TLBI_CRm_RNS) &&
184 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
190 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
192 #ifdef CONFIG_ARM64_PTR_AUTH
193 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
195 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
197 /* We really should never execute this... */
199 *elr = 0xbad9acc0debadbad;
204 #define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
206 static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
208 return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
211 /* Adjust alignment for the contiguous bit as per StageOA() */
212 #define contiguous_bit_shift(d, wi, l) \
216 if ((d) & PTE_CONT) { \
217 switch (BIT((wi)->pgshift)) { \
222 shift = (l) == 2 ? 5 : 7; \
233 static inline unsigned int ps_to_output_size(unsigned int ps)
247 #endif /* __ARM64_KVM_NESTED_H */