1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
10 #include <asm/hyp_image.h>
13 #include <asm/sysreg.h>
15 #define ARM_EXIT_WITH_SERROR_BIT 31
16 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
20 #define ARM_EXCEPTION_IRQ 0
21 #define ARM_EXCEPTION_EL1_SERROR 1
22 #define ARM_EXCEPTION_TRAP 2
23 #define ARM_EXCEPTION_IL 3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
27 #define kvm_arm_exception_type \
28 {ARM_EXCEPTION_IRQ, "IRQ" }, \
29 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
30 {ARM_EXCEPTION_TRAP, "TRAP" }, \
31 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
34 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
35 * that jumps over this.
37 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
39 #define KVM_HOST_SMCCC_ID(id) \
40 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
42 ARM_SMCCC_OWNER_VENDOR_HYP, \
45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
53 enum __kvm_host_smccc_func {
54 /* Hypercalls available only prior to pKVM finalisation */
55 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
56 __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
57 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
64 /* Hypercalls available after pKVM finalisation */
65 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
66 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
67 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
68 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
69 __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
70 __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
71 __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
72 __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
73 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
74 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
75 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
76 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
77 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
78 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
79 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
80 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
81 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
82 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
83 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
84 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
85 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
86 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
87 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
88 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
89 __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
92 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
93 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
96 * Define a pair of symbols sharing the same name but one defined in
97 * VHE and the other in nVHE hyp implementations.
99 #define DECLARE_KVM_HYP_SYM(sym) \
100 DECLARE_KVM_VHE_SYM(sym); \
101 DECLARE_KVM_NVHE_SYM(sym)
103 #define DECLARE_KVM_VHE_PER_CPU(type, sym) \
104 DECLARE_PER_CPU(type, sym)
105 #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
106 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
108 #define DECLARE_KVM_HYP_PER_CPU(type, sym) \
109 DECLARE_KVM_VHE_PER_CPU(type, sym); \
110 DECLARE_KVM_NVHE_PER_CPU(type, sym)
113 * Compute pointer to a symbol defined in nVHE percpu region.
114 * Returns NULL if percpu memory has not been allocated yet.
116 #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
117 #define per_cpu_ptr_nvhe_sym(sym, cpu) \
119 unsigned long base, off; \
120 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
121 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
122 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
123 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
126 #if defined(__KVM_NVHE_HYPERVISOR__)
128 #define CHOOSE_NVHE_SYM(sym) sym
129 #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
131 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
132 extern void *__nvhe_undefined_symbol;
133 #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
134 #define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
135 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
137 #elif defined(__KVM_VHE_HYPERVISOR__)
139 #define CHOOSE_VHE_SYM(sym) sym
140 #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
142 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
143 extern void *__vhe_undefined_symbol;
144 #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
145 #define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
146 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
153 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
154 * to has_vhe(). has_vhe() is implemented as a *final* capability,
155 * while this is used early at boot time, when the capabilities are
158 * - Don't let the nVHE hypervisor have access to this, as it will
159 * pick the *wrong* symbol (yes, it runs at EL2...).
161 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
162 ? CHOOSE_VHE_SYM(sym) \
163 : CHOOSE_NVHE_SYM(sym))
165 #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
166 ? this_cpu_ptr(&sym) \
167 : this_cpu_ptr_nvhe_sym(sym))
169 #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
170 ? per_cpu_ptr(&sym, cpu) \
171 : per_cpu_ptr_nvhe_sym(sym, cpu))
173 #define CHOOSE_VHE_SYM(sym) sym
174 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
178 struct kvm_nvhe_init_params {
179 unsigned long mair_el2;
180 unsigned long tcr_el2;
181 unsigned long tpidr_el2;
182 unsigned long stack_hyp_va;
183 unsigned long stack_pa;
185 unsigned long hcr_el2;
192 * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
193 * hyp_panic() in non-protected mode.
195 * @stack_base: hyp VA of the hyp_stack base.
196 * @overflow_stack_base: hyp VA of the hyp_overflow_stack base.
197 * @fp: hyp FP where the backtrace begins.
198 * @pc: hyp PC where the backtrace begins.
200 struct kvm_nvhe_stacktrace_info {
201 unsigned long stack_base;
202 unsigned long overflow_stack_base;
207 /* Translate a kernel address @ptr into its equivalent linear mapping */
208 #define kvm_ksym_ref(ptr) \
211 if (!is_kernel_in_hyp_mode()) \
212 val = lm_alias((ptr)); \
215 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
221 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
222 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
223 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
224 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
226 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
227 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
228 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
230 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
231 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
233 extern void __kvm_flush_vm_context(void);
234 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
235 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
237 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
240 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
241 phys_addr_t start, unsigned long pages);
242 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
244 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
246 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
247 extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
248 extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
249 extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
251 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
253 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
255 extern u64 __vgic_v3_get_gic_config(void);
256 extern void __vgic_v3_init_lrs(void);
258 #define __KVM_EXTABLE(from, to) \
259 " .pushsection __kvm_ex_table, \"a\"\n" \
261 " .long (" #from " - .), (" #to " - .)\n" \
265 #define __kvm_at(at_op, addr) \
267 int __kvm_at_err = 0; \
270 " mrs %1, spsr_el2\n" \
271 " mrs %2, elr_el2\n" \
272 "1: " __msr_s(at_op, "%3") "\n" \
275 "2: msr spsr_el2, %1\n" \
276 " msr elr_el2, %2\n" \
279 __KVM_EXTABLE(1b, 2b) \
280 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
281 : "r" (addr), "i" (-EFAULT)); \
285 void __noreturn hyp_panic(void);
286 asmlinkage void kvm_unexpected_el2_exception(void);
287 asmlinkage void __noreturn hyp_panic(void);
288 asmlinkage void __noreturn hyp_panic_bad_stack(void);
289 asmlinkage void kvm_unexpected_el2_exception(void);
290 struct kvm_cpu_context;
291 void handle_trap(struct kvm_cpu_context *host_ctxt);
292 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
293 void __noreturn __pkvm_init_finalise(void);
294 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
295 void kvm_patch_vector_branch(struct alt_instr *alt,
296 __le32 *origptr, __le32 *updptr, int nr_inst);
297 void kvm_get_kimage_voffset(struct alt_instr *alt,
298 __le32 *origptr, __le32 *updptr, int nr_inst);
299 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
300 __le32 *origptr, __le32 *updptr, int nr_inst);
301 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
302 u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
304 #else /* __ASSEMBLY__ */
306 .macro get_host_ctxt reg, tmp
307 adr_this_cpu \reg, kvm_host_data, \tmp
308 add \reg, \reg, #HOST_DATA_CONTEXT
311 .macro get_vcpu_ptr vcpu, ctxt
312 get_host_ctxt \ctxt, \vcpu
313 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
316 .macro get_loaded_vcpu vcpu, ctxt
317 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
318 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
321 .macro set_loaded_vcpu vcpu, ctxt, tmp
322 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
323 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
327 * KVM extable for unexpected exceptions.
328 * Create a struct kvm_exception_table_entry output to a section that can be
329 * mapped by EL2. The table is not sorted.
331 * The caller must ensure:
332 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
333 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
335 .macro _kvm_extable, from, to
336 .pushsection __kvm_ex_table, "a"
338 .long (\from - .), (\to - .)
342 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
343 #define CPU_LR_OFFSET CPU_XREG_OFFSET(30)
344 #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8)
347 * We treat x18 as callee-saved as the host may use it as a platform
348 * register (e.g. for shadow call stack).
350 .macro save_callee_saved_regs ctxt
351 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
352 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
353 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
354 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
355 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
356 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
357 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
360 .macro restore_callee_saved_regs ctxt
361 // We require \ctxt is not x18-x28
362 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
363 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
364 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
365 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
366 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
367 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
368 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
371 .macro save_sp_el0 ctxt, tmp
373 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
376 .macro restore_sp_el0 ctxt, tmp
377 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
383 #endif /* __ARM_KVM_ASM_H__ */