From: Catalin Marinas Date: Fri, 20 May 2022 17:51:54 +0000 (+0100) Subject: Merge branch 'for-next/esr-elx-64-bit' into for-next/core X-Git-Tag: v5.19-rc1~208^2 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/0616ea3f1b93a99264d84f3d002ae117f6526b62 Merge branch 'for-next/esr-elx-64-bit' into for-next/core * for-next/esr-elx-64-bit: : Treat ESR_ELx as a 64-bit register. KVM: arm64: uapi: Add kvm_debug_exit_arch.hsr_high KVM: arm64: Treat ESR_EL2 as a 64-bit register arm64: Treat ESR_ELx as a 64-bit register arm64: compat: Do not treat syscall number as ESR_ELx for a bad syscall arm64: Make ESR_ELx_xVC_IMM_MASK compatible with assembly --- 0616ea3f1b93a99264d84f3d002ae117f6526b62 diff --cc arch/arm64/include/asm/exception.h index 2add7f33b7c2,0e6535aa78c2..d94aecff9690 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@@ -57,24 -57,23 +57,24 @@@ asmlinkage void call_on_irq_stack(struc void (*func)(struct pt_regs *)); asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); - void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); + void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); - void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, struct pt_regs *regs); - void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs); - void do_sve_acc(unsigned int esr, struct pt_regs *regs); - void do_sme_acc(unsigned int esr, struct pt_regs *regs); - void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs); - void do_sysinstr(unsigned int esr, struct pt_regs *regs); - void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); - void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr); - void do_cp15instr(unsigned int esr, struct pt_regs *regs); + void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs); + void do_sve_acc(unsigned long esr, struct pt_regs *regs); ++void do_sme_acc(unsigned long esr, struct pt_regs *regs); + void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs); + void do_sysinstr(unsigned long esr, struct pt_regs *regs); + void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs); + void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr); + void do_cp15instr(unsigned long esr, struct pt_regs *regs); void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); - void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); - void do_serror(struct pt_regs *regs, unsigned int esr); + void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr); + void do_serror(struct pt_regs *regs, unsigned long esr); void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); - void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); + void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --cc arch/arm64/kernel/fpsimd.c index c5677aa2e9e6,22bf0cfe236b..819979398127 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@@ -1401,77 -1024,22 +1401,77 @@@ void do_sve_acc(unsigned long esr, stru WARN_ON(1); /* SVE access shouldn't have trapped */ /* - * Convert the FPSIMD state to SVE, zeroing all the state that - * is not shared with FPSIMD. If (as is likely) the current - * state is live in the registers then do this there and - * update our metadata for the current task including - * disabling the trap, otherwise update our in-memory copy. + * Even if the task can have used streaming mode we can only + * generate SVE access traps in normal SVE mode and + * transitioning out of streaming mode may discard any + * streaming mode state. Always clear the high bits to avoid + * any potential errors tracking what is properly initialised. + */ + sve_init_regs(); + + put_cpu_fpsimd_context(); +} + +/* + * Trapped SME access + * + * Storage is allocated for the full SVE and SME state, the current + * FPSIMD register contents are migrated to SVE if SVE is not already + * active, and the access trap is disabled. + * + * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() + * would have disabled the SME access trap for userspace during + * ret_to_user, making an SVE access trap impossible in that case. + */ - void do_sme_acc(unsigned int esr, struct pt_regs *regs) ++void do_sme_acc(unsigned long esr, struct pt_regs *regs) +{ + /* Even if we chose not to use SME, the hardware could still trap: */ + if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + /* + * If this not a trap due to SME being disabled then something + * is being used in the wrong mode, report as SIGILL. */ + if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) { + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); + return; + } + + sve_alloc(current); + sme_alloc(current); + if (!current->thread.sve_state || !current->thread.za_state) { + force_sig(SIGKILL); + return; + } + + get_cpu_fpsimd_context(); + + /* With TIF_SME userspace shouldn't generate any traps */ + if (test_and_set_thread_flag(TIF_SME)) + WARN_ON(1); + if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { unsigned long vq_minus_one = - sve_vq_from_vl(task_get_sve_vl(current)) - 1; - sve_set_vq(vq_minus_one); - sve_flush_live(true, vq_minus_one); + sve_vq_from_vl(task_get_sme_vl(current)) - 1; + sme_set_vq(vq_minus_one); + fpsimd_bind_task_to_cpu(); - } else { - fpsimd_to_sve(current); } + /* + * If SVE was not already active initialise the SVE registers, + * any non-shared state between the streaming and regular SVE + * registers is architecturally guaranteed to be zeroed when + * we enter streaming mode. We do not need to initialize ZA + * since ZA must be disabled at this point and enabling ZA is + * architecturally defined to zero ZA. + */ + if (system_supports_sve() && !test_thread_flag(TIF_SVE)) + sve_init_regs(); + put_cpu_fpsimd_context(); }