]> Git Repo - J-linux.git/blob - arch/arm64/include/asm/kvm_emulate.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / arm64 / include / asm / kvm_emulate.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <[email protected]>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <[email protected]>
9  */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/bitfield.h>
15 #include <linux/kvm_host.h>
16
17 #include <asm/debug-monitors.h>
18 #include <asm/esr.h>
19 #include <asm/kvm_arm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_nested.h>
22 #include <asm/ptrace.h>
23 #include <asm/cputype.h>
24 #include <asm/virt.h>
25
26 #define CURRENT_EL_SP_EL0_VECTOR        0x0
27 #define CURRENT_EL_SP_ELx_VECTOR        0x200
28 #define LOWER_EL_AArch64_VECTOR         0x400
29 #define LOWER_EL_AArch32_VECTOR         0x600
30
31 enum exception_type {
32         except_type_sync        = 0,
33         except_type_irq         = 0x80,
34         except_type_fiq         = 0x100,
35         except_type_serror      = 0x180,
36 };
37
38 #define kvm_exception_type_names                \
39         { except_type_sync,     "SYNC"   },     \
40         { except_type_irq,      "IRQ"    },     \
41         { except_type_fiq,      "FIQ"    },     \
42         { except_type_serror,   "SERROR" }
43
44 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
45 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
46
47 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
49 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
51 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
52
53 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
54
55 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
56 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
57 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
58
59 static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
60 {
61         u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
62                   ESR_ELx_IL;
63
64         kvm_inject_nested_sync(vcpu, esr);
65 }
66
67 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
68 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
69 {
70         return !(vcpu->arch.hcr_el2 & HCR_RW);
71 }
72 #else
73 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
74 {
75         return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
76 }
77 #endif
78
79 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
80 {
81         if (!vcpu_has_run_once(vcpu))
82                 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
83
84         /*
85          * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86          * get set in SCTLR_EL1 such that we can detect when the guest
87          * MMU gets turned on and do the necessary cache maintenance
88          * then.
89          */
90         if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
91                 vcpu->arch.hcr_el2 |= HCR_TVM;
92 }
93
94 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
95 {
96         return (unsigned long *)&vcpu->arch.hcr_el2;
97 }
98
99 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
100 {
101         vcpu->arch.hcr_el2 &= ~HCR_TWE;
102         if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
103             vcpu->kvm->arch.vgic.nassgireq)
104                 vcpu->arch.hcr_el2 &= ~HCR_TWI;
105         else
106                 vcpu->arch.hcr_el2 |= HCR_TWI;
107 }
108
109 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
110 {
111         vcpu->arch.hcr_el2 |= HCR_TWE;
112         vcpu->arch.hcr_el2 |= HCR_TWI;
113 }
114
115 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116 {
117         return vcpu->arch.vsesr_el2;
118 }
119
120 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121 {
122         vcpu->arch.vsesr_el2 = vsesr;
123 }
124
125 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126 {
127         return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
128 }
129
130 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131 {
132         return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
133 }
134
135 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
136 {
137         return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
138 }
139
140 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
141 {
142         if (vcpu_mode_is_32bit(vcpu))
143                 return kvm_condition_valid32(vcpu);
144
145         return true;
146 }
147
148 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149 {
150         *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
151 }
152
153 /*
154  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156  * AArch32 with banked registers.
157  */
158 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
159                                          u8 reg_num)
160 {
161         return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
162 }
163
164 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
165                                 unsigned long val)
166 {
167         if (reg_num != 31)
168                 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
169 }
170
171 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
172 {
173         switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
174         case PSR_MODE_EL2h:
175         case PSR_MODE_EL2t:
176                 return true;
177         default:
178                 return false;
179         }
180 }
181
182 static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
183 {
184         return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
185 }
186
187 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
188 {
189         return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
190                 (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
191 }
192
193 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
194 {
195         return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
196 }
197
198 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
199 {
200         return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
201 }
202
203 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
204 {
205         return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
206 }
207
208 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
209 {
210         /*
211          * We are in a hypervisor context if the vcpu mode is EL2 or
212          * E2H and TGE bits are set. The latter means we are in the user space
213          * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
214          *
215          * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
216          * rest of the KVM code, and will result in a misbehaving guest.
217          */
218         return vcpu_is_el2_ctxt(ctxt) ||
219                 (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
220                 __vcpu_el2_tge_is_set(ctxt);
221 }
222
223 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
224 {
225         return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
226 }
227
228 static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
229 {
230         return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
231 }
232
233 /*
234  * The layout of SPSR for an AArch32 state is different when observed from an
235  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
236  * view given an AArch64 view.
237  *
238  * In ARM DDI 0487E.a see:
239  *
240  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
241  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
242  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
243  *
244  * Which show the following differences:
245  *
246  * | Bit | AA64 | AA32 | Notes                       |
247  * +-----+------+------+-----------------------------|
248  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
249  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
250  *
251  * ... and all other bits are (currently) common.
252  */
253 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
254 {
255         const unsigned long overlap = BIT(24) | BIT(21);
256         unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
257
258         spsr &= ~overlap;
259
260         spsr |= dit << 21;
261
262         return spsr;
263 }
264
265 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
266 {
267         u32 mode;
268
269         if (vcpu_mode_is_32bit(vcpu)) {
270                 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
271                 return mode > PSR_AA32_MODE_USR;
272         }
273
274         mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
275
276         return mode != PSR_MODE_EL0t;
277 }
278
279 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
280 {
281         return vcpu->arch.fault.esr_el2;
282 }
283
284 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
285 {
286         u64 esr = kvm_vcpu_get_esr(vcpu);
287
288         if (esr & ESR_ELx_CV)
289                 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
290
291         return -1;
292 }
293
294 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
295 {
296         return vcpu->arch.fault.far_el2;
297 }
298
299 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
300 {
301         return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
302 }
303
304 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
305 {
306         return vcpu->arch.fault.disr_el1;
307 }
308
309 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
310 {
311         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
312 }
313
314 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
315 {
316         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
317 }
318
319 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
320 {
321         return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
322 }
323
324 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
325 {
326         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
327 }
328
329 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
330 {
331         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
332 }
333
334 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
335 {
336         return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
337 }
338
339 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
340 {
341         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
342 }
343
344 /* Always check for S1PTW *before* using this. */
345 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
346 {
347         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
348 }
349
350 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
351 {
352         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
353 }
354
355 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
356 {
357         return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
358 }
359
360 /* This one is not specific to Data Abort */
361 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
362 {
363         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
364 }
365
366 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
367 {
368         return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
369 }
370
371 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
372 {
373         return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
374 }
375
376 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
377 {
378         return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
379 }
380
381 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
382 {
383         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
384 }
385
386 static inline
387 bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
388 {
389         return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
390 }
391
392 static inline
393 bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
394 {
395         return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
396 }
397
398 static inline
399 u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
400 {
401         unsigned long esr = kvm_vcpu_get_esr(vcpu);
402
403         BUG_ON(!esr_fsc_is_permission_fault(esr));
404         return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
405 }
406
407 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
408 {
409         switch (kvm_vcpu_trap_get_fault(vcpu)) {
410         case ESR_ELx_FSC_EXTABT:
411         case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
412         case ESR_ELx_FSC_SECC:
413         case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
414                 return true;
415         default:
416                 return false;
417         }
418 }
419
420 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
421 {
422         u64 esr = kvm_vcpu_get_esr(vcpu);
423         return ESR_ELx_SYS64_ISS_RT(esr);
424 }
425
426 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
427 {
428         if (kvm_vcpu_abt_iss1tw(vcpu)) {
429                 /*
430                  * Only a permission fault on a S1PTW should be
431                  * considered as a write. Otherwise, page tables baked
432                  * in a read-only memslot will result in an exception
433                  * being delivered in the guest.
434                  *
435                  * The drawback is that we end-up faulting twice if the
436                  * guest is using any of HW AF/DB: a translation fault
437                  * to map the page containing the PT (read only at
438                  * first), then a permission fault to allow the flags
439                  * to be set.
440                  */
441                 return kvm_vcpu_trap_is_permission_fault(vcpu);
442         }
443
444         if (kvm_vcpu_trap_is_iabt(vcpu))
445                 return false;
446
447         return kvm_vcpu_dabt_iswrite(vcpu);
448 }
449
450 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
451 {
452         return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
453 }
454
455 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
456 {
457         if (vcpu_mode_is_32bit(vcpu)) {
458                 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
459         } else {
460                 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
461                 sctlr |= SCTLR_ELx_EE;
462                 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
463         }
464 }
465
466 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
467 {
468         if (vcpu_mode_is_32bit(vcpu))
469                 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
470
471         if (vcpu_mode_priv(vcpu))
472                 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
473         else
474                 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
475 }
476
477 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
478                                                     unsigned long data,
479                                                     unsigned int len)
480 {
481         if (kvm_vcpu_is_be(vcpu)) {
482                 switch (len) {
483                 case 1:
484                         return data & 0xff;
485                 case 2:
486                         return be16_to_cpu(data & 0xffff);
487                 case 4:
488                         return be32_to_cpu(data & 0xffffffff);
489                 default:
490                         return be64_to_cpu(data);
491                 }
492         } else {
493                 switch (len) {
494                 case 1:
495                         return data & 0xff;
496                 case 2:
497                         return le16_to_cpu(data & 0xffff);
498                 case 4:
499                         return le32_to_cpu(data & 0xffffffff);
500                 default:
501                         return le64_to_cpu(data);
502                 }
503         }
504
505         return data;            /* Leave LE untouched */
506 }
507
508 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
509                                                     unsigned long data,
510                                                     unsigned int len)
511 {
512         if (kvm_vcpu_is_be(vcpu)) {
513                 switch (len) {
514                 case 1:
515                         return data & 0xff;
516                 case 2:
517                         return cpu_to_be16(data & 0xffff);
518                 case 4:
519                         return cpu_to_be32(data & 0xffffffff);
520                 default:
521                         return cpu_to_be64(data);
522                 }
523         } else {
524                 switch (len) {
525                 case 1:
526                         return data & 0xff;
527                 case 2:
528                         return cpu_to_le16(data & 0xffff);
529                 case 4:
530                         return cpu_to_le32(data & 0xffffffff);
531                 default:
532                         return cpu_to_le64(data);
533                 }
534         }
535
536         return data;            /* Leave LE untouched */
537 }
538
539 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
540 {
541         WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
542         vcpu_set_flag(vcpu, INCREMENT_PC);
543 }
544
545 #define kvm_pend_exception(v, e)                                        \
546         do {                                                            \
547                 WARN_ON(vcpu_get_flag((v), INCREMENT_PC));              \
548                 vcpu_set_flag((v), PENDING_EXCEPTION);                  \
549                 vcpu_set_flag((v), e);                                  \
550         } while (0)
551
552 #define __build_check_all_or_none(r, bits)                              \
553         BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
554
555 #define __cpacr_to_cptr_clr(clr, set)                                   \
556         ({                                                              \
557                 u64 cptr = 0;                                           \
558                                                                         \
559                 if ((set) & CPACR_ELx_FPEN)                             \
560                         cptr |= CPTR_EL2_TFP;                           \
561                 if ((set) & CPACR_ELx_ZEN)                              \
562                         cptr |= CPTR_EL2_TZ;                            \
563                 if ((set) & CPACR_ELx_SMEN)                             \
564                         cptr |= CPTR_EL2_TSM;                           \
565                 if ((clr) & CPACR_ELx_TTA)                              \
566                         cptr |= CPTR_EL2_TTA;                           \
567                 if ((clr) & CPTR_EL2_TAM)                               \
568                         cptr |= CPTR_EL2_TAM;                           \
569                 if ((clr) & CPTR_EL2_TCPAC)                             \
570                         cptr |= CPTR_EL2_TCPAC;                         \
571                                                                         \
572                 cptr;                                                   \
573         })
574
575 #define __cpacr_to_cptr_set(clr, set)                                   \
576         ({                                                              \
577                 u64 cptr = 0;                                           \
578                                                                         \
579                 if ((clr) & CPACR_ELx_FPEN)                             \
580                         cptr |= CPTR_EL2_TFP;                           \
581                 if ((clr) & CPACR_ELx_ZEN)                              \
582                         cptr |= CPTR_EL2_TZ;                            \
583                 if ((clr) & CPACR_ELx_SMEN)                             \
584                         cptr |= CPTR_EL2_TSM;                           \
585                 if ((set) & CPACR_ELx_TTA)                              \
586                         cptr |= CPTR_EL2_TTA;                           \
587                 if ((set) & CPTR_EL2_TAM)                               \
588                         cptr |= CPTR_EL2_TAM;                           \
589                 if ((set) & CPTR_EL2_TCPAC)                             \
590                         cptr |= CPTR_EL2_TCPAC;                         \
591                                                                         \
592                 cptr;                                                   \
593         })
594
595 #define cpacr_clear_set(clr, set)                                       \
596         do {                                                            \
597                 BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0);                \
598                 BUILD_BUG_ON((clr) & CPACR_ELx_E0POE);                  \
599                 __build_check_all_or_none((clr), CPACR_ELx_FPEN);       \
600                 __build_check_all_or_none((set), CPACR_ELx_FPEN);       \
601                 __build_check_all_or_none((clr), CPACR_ELx_ZEN);        \
602                 __build_check_all_or_none((set), CPACR_ELx_ZEN);        \
603                 __build_check_all_or_none((clr), CPACR_ELx_SMEN);       \
604                 __build_check_all_or_none((set), CPACR_ELx_SMEN);       \
605                                                                         \
606                 if (has_vhe() || has_hvhe())                            \
607                         sysreg_clear_set(cpacr_el1, clr, set);          \
608                 else                                                    \
609                         sysreg_clear_set(cptr_el2,                      \
610                                          __cpacr_to_cptr_clr(clr, set), \
611                                          __cpacr_to_cptr_set(clr, set));\
612         } while (0)
613
614 static __always_inline void kvm_write_cptr_el2(u64 val)
615 {
616         if (has_vhe() || has_hvhe())
617                 write_sysreg(val, cpacr_el1);
618         else
619                 write_sysreg(val, cptr_el2);
620 }
621
622 static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
623 {
624         u64 val;
625
626         if (has_vhe()) {
627                 val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
628                 if (cpus_have_final_cap(ARM64_SME))
629                         val |= CPACR_EL1_SMEN_EL1EN;
630         } else if (has_hvhe()) {
631                 val = CPACR_ELx_FPEN;
632
633                 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
634                         val |= CPACR_ELx_ZEN;
635                 if (cpus_have_final_cap(ARM64_SME))
636                         val |= CPACR_ELx_SMEN;
637         } else {
638                 val = CPTR_NVHE_EL2_RES1;
639
640                 if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
641                         val |= CPTR_EL2_TZ;
642                 if (cpus_have_final_cap(ARM64_SME))
643                         val &= ~CPTR_EL2_TSM;
644         }
645
646         return val;
647 }
648
649 static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
650 {
651         u64 val = kvm_get_reset_cptr_el2(vcpu);
652
653         kvm_write_cptr_el2(val);
654 }
655
656 /*
657  * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
658  * format if E2H isn't set.
659  */
660 static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
661 {
662         u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
663
664         if (!vcpu_el2_e2h_is_set(vcpu))
665                 cptr = translate_cptr_el2_to_cpacr_el1(cptr);
666
667         return cptr;
668 }
669
670 static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
671                                              unsigned int xen)
672 {
673         switch (xen) {
674         case 0b00:
675         case 0b10:
676                 return true;
677         case 0b01:
678                 return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
679         case 0b11:
680         default:
681                 return false;
682         }
683 }
684
685 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen)                            \
686         (!vcpu_has_nv(vcpu) ? false :                                           \
687          ____cptr_xen_trap_enabled(vcpu,                                        \
688                                    SYS_FIELD_GET(CPACR_ELx, xen,                \
689                                                  vcpu_sanitised_cptr_el2(vcpu))))
690
691 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
692 {
693         return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
694 }
695
696 static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
697 {
698         return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
699 }
700
701 static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
702 {
703         vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
704 }
705 #endif /* __ARM64_KVM_EMULATE_H__ */
This page took 0.064703 seconds and 4 git commands to generate.