]> Git Repo - linux.git/blob - arch/arm64/include/asm/kvm_asm.h
Linux 6.14-rc3
[linux.git] / arch / arm64 / include / asm / kvm_asm.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <[email protected]>
5  */
6
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9
10 #include <asm/hyp_image.h>
11 #include <asm/insn.h>
12 #include <asm/virt.h>
13 #include <asm/sysreg.h>
14
15 #define ARM_EXIT_WITH_SERROR_BIT  31
16 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
19
20 #define ARM_EXCEPTION_IRQ         0
21 #define ARM_EXCEPTION_EL1_SERROR  1
22 #define ARM_EXCEPTION_TRAP        2
23 #define ARM_EXCEPTION_IL          3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
26
27 #define kvm_arm_exception_type                                  \
28         {ARM_EXCEPTION_IRQ,             "IRQ"           },      \
29         {ARM_EXCEPTION_EL1_SERROR,      "SERROR"        },      \
30         {ARM_EXCEPTION_TRAP,            "TRAP"          },      \
31         {ARM_EXCEPTION_HYP_GONE,        "HYP_GONE"      }
32
33 /*
34  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
35  * that jumps over this.
36  */
37 #define KVM_VECTOR_PREAMBLE     (2 * AARCH64_INSN_SIZE)
38
39 #define KVM_HOST_SMCCC_ID(id)                                           \
40         ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
41                            ARM_SMCCC_SMC_64,                            \
42                            ARM_SMCCC_OWNER_VENDOR_HYP,                  \
43                            (id))
44
45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
46
47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                    0
48
49 #ifndef __ASSEMBLY__
50
51 #include <linux/mm.h>
52
53 enum __kvm_host_smccc_func {
54         /* Hypercalls available only prior to pKVM finalisation */
55         /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
56         __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
57         __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
58         __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
59         __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
60         __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
61         __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
62         __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
63
64         /* Hypercalls available after pKVM finalisation */
65         __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
66         __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
67         __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
68         __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
69         __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
70         __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
71         __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
72         __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
73         __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
74         __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
75         __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
76         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
77         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
78         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
79         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
80         __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
81         __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
82         __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
83         __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
84         __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
85         __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
86         __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
87         __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
88         __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
89         __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
90 };
91
92 #define DECLARE_KVM_VHE_SYM(sym)        extern char sym[]
93 #define DECLARE_KVM_NVHE_SYM(sym)       extern char kvm_nvhe_sym(sym)[]
94
95 /*
96  * Define a pair of symbols sharing the same name but one defined in
97  * VHE and the other in nVHE hyp implementations.
98  */
99 #define DECLARE_KVM_HYP_SYM(sym)                \
100         DECLARE_KVM_VHE_SYM(sym);               \
101         DECLARE_KVM_NVHE_SYM(sym)
102
103 #define DECLARE_KVM_VHE_PER_CPU(type, sym)      \
104         DECLARE_PER_CPU(type, sym)
105 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)     \
106         DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
107
108 #define DECLARE_KVM_HYP_PER_CPU(type, sym)      \
109         DECLARE_KVM_VHE_PER_CPU(type, sym);     \
110         DECLARE_KVM_NVHE_PER_CPU(type, sym)
111
112 /*
113  * Compute pointer to a symbol defined in nVHE percpu region.
114  * Returns NULL if percpu memory has not been allocated yet.
115  */
116 #define this_cpu_ptr_nvhe_sym(sym)      per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
117 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                          \
118         ({                                                                      \
119                 unsigned long base, off;                                        \
120                 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];              \
121                 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
122                       (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
123                 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
124         })
125
126 #if defined(__KVM_NVHE_HYPERVISOR__)
127
128 #define CHOOSE_NVHE_SYM(sym)    sym
129 #define CHOOSE_HYP_SYM(sym)     CHOOSE_NVHE_SYM(sym)
130
131 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
132 extern void *__nvhe_undefined_symbol;
133 #define CHOOSE_VHE_SYM(sym)             __nvhe_undefined_symbol
134 #define this_cpu_ptr_hyp_sym(sym)       (&__nvhe_undefined_symbol)
135 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__nvhe_undefined_symbol)
136
137 #elif defined(__KVM_VHE_HYPERVISOR__)
138
139 #define CHOOSE_VHE_SYM(sym)     sym
140 #define CHOOSE_HYP_SYM(sym)     CHOOSE_VHE_SYM(sym)
141
142 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
143 extern void *__vhe_undefined_symbol;
144 #define CHOOSE_NVHE_SYM(sym)            __vhe_undefined_symbol
145 #define this_cpu_ptr_hyp_sym(sym)       (&__vhe_undefined_symbol)
146 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__vhe_undefined_symbol)
147
148 #else
149
150 /*
151  * BIG FAT WARNINGS:
152  *
153  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
154  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
155  *   while this is used early at boot time, when the capabilities are
156  *   not final yet....
157  *
158  * - Don't let the nVHE hypervisor have access to this, as it will
159  *   pick the *wrong* symbol (yes, it runs at EL2...).
160  */
161 #define CHOOSE_HYP_SYM(sym)             (is_kernel_in_hyp_mode()        \
162                                            ? CHOOSE_VHE_SYM(sym)        \
163                                            : CHOOSE_NVHE_SYM(sym))
164
165 #define this_cpu_ptr_hyp_sym(sym)       (is_kernel_in_hyp_mode()        \
166                                            ? this_cpu_ptr(&sym)         \
167                                            : this_cpu_ptr_nvhe_sym(sym))
168
169 #define per_cpu_ptr_hyp_sym(sym, cpu)   (is_kernel_in_hyp_mode()        \
170                                            ? per_cpu_ptr(&sym, cpu)     \
171                                            : per_cpu_ptr_nvhe_sym(sym, cpu))
172
173 #define CHOOSE_VHE_SYM(sym)     sym
174 #define CHOOSE_NVHE_SYM(sym)    kvm_nvhe_sym(sym)
175
176 #endif
177
178 struct kvm_nvhe_init_params {
179         unsigned long mair_el2;
180         unsigned long tcr_el2;
181         unsigned long tpidr_el2;
182         unsigned long stack_hyp_va;
183         unsigned long stack_pa;
184         phys_addr_t pgd_pa;
185         unsigned long hcr_el2;
186         unsigned long vttbr;
187         unsigned long vtcr;
188         unsigned long tmp;
189 };
190
191 /*
192  * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
193  * hyp_panic() in non-protected mode.
194  *
195  * @stack_base:                 hyp VA of the hyp_stack base.
196  * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
197  * @fp:                         hyp FP where the backtrace begins.
198  * @pc:                         hyp PC where the backtrace begins.
199  */
200 struct kvm_nvhe_stacktrace_info {
201         unsigned long stack_base;
202         unsigned long overflow_stack_base;
203         unsigned long fp;
204         unsigned long pc;
205 };
206
207 /* Translate a kernel address @ptr into its equivalent linear mapping */
208 #define kvm_ksym_ref(ptr)                                               \
209         ({                                                              \
210                 void *val = (ptr);                                      \
211                 if (!is_kernel_in_hyp_mode())                           \
212                         val = lm_alias((ptr));                          \
213                 val;                                                    \
214          })
215 #define kvm_ksym_ref_nvhe(sym)  kvm_ksym_ref(kvm_nvhe_sym(sym))
216
217 struct kvm;
218 struct kvm_vcpu;
219 struct kvm_s2_mmu;
220
221 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
222 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
223 #define __kvm_hyp_init          CHOOSE_NVHE_SYM(__kvm_hyp_init)
224 #define __kvm_hyp_vector        CHOOSE_HYP_SYM(__kvm_hyp_vector)
225
226 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
227 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
228 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
229
230 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
231 #define __bp_harden_hyp_vecs    CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
232
233 extern void __kvm_flush_vm_context(void);
234 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
235 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
236                                      int level);
237 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
238                                          phys_addr_t ipa,
239                                          int level);
240 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
241                                         phys_addr_t start, unsigned long pages);
242 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
243
244 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
245
246 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
247 extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
248 extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
249 extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
250
251 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
252
253 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
254
255 extern u64 __vgic_v3_get_gic_config(void);
256 extern void __vgic_v3_init_lrs(void);
257
258 #define __KVM_EXTABLE(from, to)                                         \
259         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
260         "       .align          3\n"                                    \
261         "       .long           (" #from " - .), (" #to " - .)\n"       \
262         "       .popsection\n"
263
264
265 #define __kvm_at(at_op, addr)                                           \
266 ( {                                                                     \
267         int __kvm_at_err = 0;                                           \
268         u64 spsr, elr;                                                  \
269         asm volatile(                                                   \
270         "       mrs     %1, spsr_el2\n"                                 \
271         "       mrs     %2, elr_el2\n"                                  \
272         "1:     " __msr_s(at_op, "%3") "\n"                             \
273         "       isb\n"                                                  \
274         "       b       9f\n"                                           \
275         "2:     msr     spsr_el2, %1\n"                                 \
276         "       msr     elr_el2, %2\n"                                  \
277         "       mov     %w0, %4\n"                                      \
278         "9:\n"                                                          \
279         __KVM_EXTABLE(1b, 2b)                                           \
280         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
281         : "r" (addr), "i" (-EFAULT));                                   \
282         __kvm_at_err;                                                   \
283 } )
284
285 void __noreturn hyp_panic(void);
286 asmlinkage void kvm_unexpected_el2_exception(void);
287 asmlinkage void __noreturn hyp_panic(void);
288 asmlinkage void __noreturn hyp_panic_bad_stack(void);
289 asmlinkage void kvm_unexpected_el2_exception(void);
290 struct kvm_cpu_context;
291 void handle_trap(struct kvm_cpu_context *host_ctxt);
292 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
293 void __noreturn __pkvm_init_finalise(void);
294 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
295 void kvm_patch_vector_branch(struct alt_instr *alt,
296         __le32 *origptr, __le32 *updptr, int nr_inst);
297 void kvm_get_kimage_voffset(struct alt_instr *alt,
298         __le32 *origptr, __le32 *updptr, int nr_inst);
299 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
300         __le32 *origptr, __le32 *updptr, int nr_inst);
301 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
302         u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
303
304 #else /* __ASSEMBLY__ */
305
306 .macro get_host_ctxt reg, tmp
307         adr_this_cpu \reg, kvm_host_data, \tmp
308         add     \reg, \reg, #HOST_DATA_CONTEXT
309 .endm
310
311 .macro get_vcpu_ptr vcpu, ctxt
312         get_host_ctxt \ctxt, \vcpu
313         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
314 .endm
315
316 .macro get_loaded_vcpu vcpu, ctxt
317         adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
318         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
319 .endm
320
321 .macro set_loaded_vcpu vcpu, ctxt, tmp
322         adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
323         str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
324 .endm
325
326 /*
327  * KVM extable for unexpected exceptions.
328  * Create a struct kvm_exception_table_entry output to a section that can be
329  * mapped by EL2. The table is not sorted.
330  *
331  * The caller must ensure:
332  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
333  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
334  */
335 .macro  _kvm_extable, from, to
336         .pushsection    __kvm_ex_table, "a"
337         .align          3
338         .long           (\from - .), (\to - .)
339         .popsection
340 .endm
341
342 #define CPU_XREG_OFFSET(x)      (CPU_USER_PT_REGS + 8*x)
343 #define CPU_LR_OFFSET           CPU_XREG_OFFSET(30)
344 #define CPU_SP_EL0_OFFSET       (CPU_LR_OFFSET + 8)
345
346 /*
347  * We treat x18 as callee-saved as the host may use it as a platform
348  * register (e.g. for shadow call stack).
349  */
350 .macro save_callee_saved_regs ctxt
351         str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
352         stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
353         stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
354         stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
355         stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
356         stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
357         stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
358 .endm
359
360 .macro restore_callee_saved_regs ctxt
361         // We require \ctxt is not x18-x28
362         ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
363         ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
364         ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
365         ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
366         ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
367         ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
368         ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
369 .endm
370
371 .macro save_sp_el0 ctxt, tmp
372         mrs     \tmp,   sp_el0
373         str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
374 .endm
375
376 .macro restore_sp_el0 ctxt, tmp
377         ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
378         msr     sp_el0, \tmp
379 .endm
380
381 #endif
382
383 #endif /* __ARM_KVM_ASM_H__ */
This page took 0.05124 seconds and 4 git commands to generate.