2 * FP/SIMD context switching and fault handling
4 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/bitmap.h>
21 #include <linux/bottom_half.h>
22 #include <linux/bug.h>
23 #include <linux/cache.h>
24 #include <linux/compat.h>
25 #include <linux/cpu.h>
26 #include <linux/cpu_pm.h>
27 #include <linux/kernel.h>
28 #include <linux/linkage.h>
29 #include <linux/irqflags.h>
30 #include <linux/init.h>
31 #include <linux/percpu.h>
32 #include <linux/prctl.h>
33 #include <linux/preempt.h>
34 #include <linux/ptrace.h>
35 #include <linux/sched/signal.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/signal.h>
38 #include <linux/slab.h>
39 #include <linux/stddef.h>
40 #include <linux/sysctl.h>
43 #include <asm/fpsimd.h>
44 #include <asm/cpufeature.h>
45 #include <asm/cputype.h>
46 #include <asm/processor.h>
48 #include <asm/sigcontext.h>
49 #include <asm/sysreg.h>
50 #include <asm/traps.h>
52 #define FPEXC_IOF (1 << 0)
53 #define FPEXC_DZF (1 << 1)
54 #define FPEXC_OFF (1 << 2)
55 #define FPEXC_UFF (1 << 3)
56 #define FPEXC_IXF (1 << 4)
57 #define FPEXC_IDF (1 << 7)
60 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
62 * In order to reduce the number of times the FPSIMD state is needlessly saved
63 * and restored, we need to keep track of two things:
64 * (a) for each task, we need to remember which CPU was the last one to have
65 * the task's FPSIMD state loaded into its FPSIMD registers;
66 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
67 * been loaded into its FPSIMD registers most recently, or whether it has
68 * been used to perform kernel mode NEON in the meantime.
70 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
71 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
72 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
73 * address of the userland FPSIMD state of the task that was loaded onto the CPU
74 * the most recently, or NULL if kernel mode NEON has been performed after that.
76 * With this in place, we no longer have to restore the next FPSIMD state right
77 * when switching between tasks. Instead, we can defer this check to userland
78 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
79 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
80 * can omit the FPSIMD restore.
82 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
83 * indicate whether or not the userland FPSIMD state of the current task is
84 * present in the registers. The flag is set unless the FPSIMD registers of this
85 * CPU currently contain the most recent userland FPSIMD state of the current
88 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
89 * save the task's FPSIMD context back to task_struct from softirq context.
90 * To prevent this from racing with the manipulation of the task's FPSIMD state
91 * from task context and thereby corrupting the state, it is necessary to
92 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
93 * flag with local_bh_disable() unless softirqs are already masked.
95 * For a certain task, the sequence may look something like this:
96 * - the task gets scheduled in; if both the task's fpsimd_cpu field
97 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
98 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
99 * cleared, otherwise it is set;
101 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
102 * userland FPSIMD state is copied from memory to the registers, the task's
103 * fpsimd_cpu field is set to the id of the current CPU, the current
104 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
105 * TIF_FOREIGN_FPSTATE flag is cleared;
107 * - the task executes an ordinary syscall; upon return to userland, the
108 * TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
111 * - the task executes a syscall which executes some NEON instructions; this is
112 * preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
113 * register contents to memory, clears the fpsimd_last_state per-cpu variable
114 * and sets the TIF_FOREIGN_FPSTATE flag;
116 * - the task gets preempted after kernel_neon_end() is called; as we have not
117 * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
118 * whatever is in the FPSIMD registers is not saved to memory, but discarded.
120 struct fpsimd_last_state_struct {
121 struct user_fpsimd_state *st;
124 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
126 /* Default VL for tasks that don't set it explicitly: */
127 static int sve_default_vl = -1;
129 #ifdef CONFIG_ARM64_SVE
131 /* Maximum supported vector length across all CPUs (initially poisoned) */
132 int __ro_after_init sve_max_vl = SVE_VL_MIN;
133 /* Set of available vector lengths, as vq_to_bit(vq): */
134 static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
135 static void __percpu *efi_sve_state;
137 #else /* ! CONFIG_ARM64_SVE */
139 /* Dummy declaration for code that will be optimised out: */
140 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
141 extern void __percpu *efi_sve_state;
143 #endif /* ! CONFIG_ARM64_SVE */
146 * Call __sve_free() directly only if you know task can't be scheduled
149 static void __sve_free(struct task_struct *task)
151 kfree(task->thread.sve_state);
152 task->thread.sve_state = NULL;
155 static void sve_free(struct task_struct *task)
157 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
163 * TIF_SVE controls whether a task can use SVE without trapping while
164 * in userspace, and also the way a task's FPSIMD/SVE state is stored
167 * The kernel uses this flag to track whether a user task is actively
168 * using SVE, and therefore whether full SVE register state needs to
169 * be tracked. If not, the cheaper FPSIMD context handling code can
170 * be used instead of the more costly SVE equivalents.
174 * The task can execute SVE instructions while in userspace without
175 * trapping to the kernel.
177 * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
178 * corresponding Zn), P0-P15 and FFR are encoded in in
179 * task->thread.sve_state, formatted appropriately for vector
180 * length task->thread.sve_vl.
182 * task->thread.sve_state must point to a valid buffer at least
183 * sve_state_size(task) bytes in size.
185 * During any syscall, the kernel may optionally clear TIF_SVE and
186 * discard the vector state except for the FPSIMD subset.
190 * An attempt by the user task to execute an SVE instruction causes
191 * do_sve_acc() to be called, which does some preparation and then
194 * When stored, FPSIMD registers V0-V31 are encoded in
195 * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
196 * logically zero but not stored anywhere; P0-P15 and FFR are not
197 * stored and have unspecified values from userspace's point of
198 * view. For hygiene purposes, the kernel zeroes them on next use,
199 * but userspace is discouraged from relying on this.
201 * task->thread.sve_state does not need to be non-NULL, valid or any
202 * particular size: it must not be dereferenced.
204 * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
205 * irrespective of whether TIF_SVE is clear or set, since these are
206 * not vector length dependent.
210 * Update current's FPSIMD/SVE registers from thread_struct.
212 * This function should be called only when the FPSIMD/SVE state in
213 * thread_struct is known to be up to date, when preparing to enter
216 * Softirqs (and preemption) must be disabled.
218 static void task_fpsimd_load(void)
220 WARN_ON(!in_softirq() && !irqs_disabled());
222 if (system_supports_sve() && test_thread_flag(TIF_SVE))
223 sve_load_state(sve_pffr(¤t->thread),
224 ¤t->thread.uw.fpsimd_state.fpsr,
225 sve_vq_from_vl(current->thread.sve_vl) - 1);
227 fpsimd_load_state(¤t->thread.uw.fpsimd_state);
231 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
232 * date with respect to the CPU registers.
234 * Softirqs (and preemption) must be disabled.
236 void fpsimd_save(void)
238 struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
239 /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
241 WARN_ON(!in_softirq() && !irqs_disabled());
243 if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
244 if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
245 if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
247 * Can't save the user regs, so current would
248 * re-enter user with corrupt state.
249 * There's no way to recover, so kill it:
251 force_signal_inject(SIGKILL, SI_KERNEL, 0);
255 sve_save_state(sve_pffr(¤t->thread), &st->fpsr);
257 fpsimd_save_state(st);
262 * Helpers to translate bit indices in sve_vq_map to VQ values (and
263 * vice versa). This allows find_next_bit() to be used to find the
264 * _maximum_ VQ not exceeding a certain value.
267 static unsigned int vq_to_bit(unsigned int vq)
269 return SVE_VQ_MAX - vq;
272 static unsigned int bit_to_vq(unsigned int bit)
274 if (WARN_ON(bit >= SVE_VQ_MAX))
275 bit = SVE_VQ_MAX - 1;
277 return SVE_VQ_MAX - bit;
281 * All vector length selection from userspace comes through here.
282 * We're on a slow path, so some sanity-checks are included.
283 * If things go wrong there's a bug somewhere, but try to fall back to a
286 static unsigned int find_supported_vector_length(unsigned int vl)
289 int max_vl = sve_max_vl;
291 if (WARN_ON(!sve_vl_valid(vl)))
294 if (WARN_ON(!sve_vl_valid(max_vl)))
300 bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
301 vq_to_bit(sve_vq_from_vl(vl)));
302 return sve_vl_from_vq(bit_to_vq(bit));
307 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
308 void __user *buffer, size_t *lenp,
312 int vl = sve_default_vl;
313 struct ctl_table tmp_table = {
315 .maxlen = sizeof(vl),
318 ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
322 /* Writing -1 has the special meaning "set to max": */
326 if (!sve_vl_valid(vl))
329 sve_default_vl = find_supported_vector_length(vl);
333 static struct ctl_table sve_default_vl_table[] = {
335 .procname = "sve_default_vector_length",
337 .proc_handler = sve_proc_do_default_vl,
342 static int __init sve_sysctl_init(void)
344 if (system_supports_sve())
345 if (!register_sysctl("abi", sve_default_vl_table))
351 #else /* ! CONFIG_SYSCTL */
352 static int __init sve_sysctl_init(void) { return 0; }
353 #endif /* ! CONFIG_SYSCTL */
355 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
356 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
359 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
360 * task->thread.sve_state.
362 * Task can be a non-runnable task, or current. In the latter case,
363 * softirqs (and preemption) must be disabled.
364 * task->thread.sve_state must point to at least sve_state_size(task)
365 * bytes of allocated kernel memory.
366 * task->thread.uw.fpsimd_state must be up to date before calling this
369 static void fpsimd_to_sve(struct task_struct *task)
372 void *sst = task->thread.sve_state;
373 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
376 if (!system_supports_sve())
379 vq = sve_vq_from_vl(task->thread.sve_vl);
380 for (i = 0; i < 32; ++i)
381 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
382 sizeof(fst->vregs[i]));
386 * Transfer the SVE state in task->thread.sve_state to
387 * task->thread.uw.fpsimd_state.
389 * Task can be a non-runnable task, or current. In the latter case,
390 * softirqs (and preemption) must be disabled.
391 * task->thread.sve_state must point to at least sve_state_size(task)
392 * bytes of allocated kernel memory.
393 * task->thread.sve_state must be up to date before calling this function.
395 static void sve_to_fpsimd(struct task_struct *task)
398 void const *sst = task->thread.sve_state;
399 struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
402 if (!system_supports_sve())
405 vq = sve_vq_from_vl(task->thread.sve_vl);
406 for (i = 0; i < 32; ++i)
407 memcpy(&fst->vregs[i], ZREG(sst, vq, i),
408 sizeof(fst->vregs[i]));
411 #ifdef CONFIG_ARM64_SVE
414 * Return how many bytes of memory are required to store the full SVE
415 * state for task, given task's currently configured vector length.
417 size_t sve_state_size(struct task_struct const *task)
419 return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
423 * Ensure that task->thread.sve_state is allocated and sufficiently large.
425 * This function should be used only in preparation for replacing
426 * task->thread.sve_state with new data. The memory is always zeroed
427 * here to prevent stale data from showing through: this is done in
428 * the interest of testability and predictability: except in the
429 * do_sve_acc() case, there is no ABI requirement to hide stale data
430 * written previously be task.
432 void sve_alloc(struct task_struct *task)
434 if (task->thread.sve_state) {
435 memset(task->thread.sve_state, 0, sve_state_size(current));
439 /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
440 task->thread.sve_state =
441 kzalloc(sve_state_size(task), GFP_KERNEL);
444 * If future SVE revisions can have larger vectors though,
445 * this may cease to be true:
447 BUG_ON(!task->thread.sve_state);
452 * Ensure that task->thread.sve_state is up to date with respect to
453 * the user task, irrespective of when SVE is in use or not.
455 * This should only be called by ptrace. task must be non-runnable.
456 * task->thread.sve_state must point to at least sve_state_size(task)
457 * bytes of allocated kernel memory.
459 void fpsimd_sync_to_sve(struct task_struct *task)
461 if (!test_tsk_thread_flag(task, TIF_SVE))
466 * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
467 * the user task, irrespective of whether SVE is in use or not.
469 * This should only be called by ptrace. task must be non-runnable.
470 * task->thread.sve_state must point to at least sve_state_size(task)
471 * bytes of allocated kernel memory.
473 void sve_sync_to_fpsimd(struct task_struct *task)
475 if (test_tsk_thread_flag(task, TIF_SVE))
480 * Ensure that task->thread.sve_state is up to date with respect to
481 * the task->thread.uw.fpsimd_state.
483 * This should only be called by ptrace to merge new FPSIMD register
484 * values into a task for which SVE is currently active.
485 * task must be non-runnable.
486 * task->thread.sve_state must point to at least sve_state_size(task)
487 * bytes of allocated kernel memory.
488 * task->thread.uw.fpsimd_state must already have been initialised with
489 * the new FPSIMD register values to be merged in.
491 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
494 void *sst = task->thread.sve_state;
495 struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
498 if (!test_tsk_thread_flag(task, TIF_SVE))
501 vq = sve_vq_from_vl(task->thread.sve_vl);
503 memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
505 for (i = 0; i < 32; ++i)
506 memcpy(ZREG(sst, vq, i), &fst->vregs[i],
507 sizeof(fst->vregs[i]));
510 int sve_set_vector_length(struct task_struct *task,
511 unsigned long vl, unsigned long flags)
513 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
514 PR_SVE_SET_VL_ONEXEC))
517 if (!sve_vl_valid(vl))
521 * Clamp to the maximum vector length that VL-agnostic SVE code can
522 * work with. A flag may be assigned in the future to allow setting
523 * of larger vector lengths without confusing older software.
525 if (vl > SVE_VL_ARCH_MAX)
526 vl = SVE_VL_ARCH_MAX;
528 vl = find_supported_vector_length(vl);
530 if (flags & (PR_SVE_VL_INHERIT |
531 PR_SVE_SET_VL_ONEXEC))
532 task->thread.sve_vl_onexec = vl;
534 /* Reset VL to system default on next exec: */
535 task->thread.sve_vl_onexec = 0;
537 /* Only actually set the VL if not deferred: */
538 if (flags & PR_SVE_SET_VL_ONEXEC)
541 if (vl == task->thread.sve_vl)
545 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
546 * write any live register state back to task_struct, and convert to a
549 if (task == current) {
553 set_thread_flag(TIF_FOREIGN_FPSTATE);
556 fpsimd_flush_task_state(task);
557 if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
564 * Force reallocation of task SVE state to the correct size
569 task->thread.sve_vl = vl;
572 update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
573 flags & PR_SVE_VL_INHERIT);
579 * Encode the current vector length and flags for return.
580 * This is only required for prctl(): ptrace has separate fields
582 * flags are as for sve_set_vector_length().
584 static int sve_prctl_status(unsigned long flags)
588 if (flags & PR_SVE_SET_VL_ONEXEC)
589 ret = current->thread.sve_vl_onexec;
591 ret = current->thread.sve_vl;
593 if (test_thread_flag(TIF_SVE_VL_INHERIT))
594 ret |= PR_SVE_VL_INHERIT;
600 int sve_set_current_vl(unsigned long arg)
602 unsigned long vl, flags;
605 vl = arg & PR_SVE_VL_LEN_MASK;
608 if (!system_supports_sve())
611 ret = sve_set_vector_length(current, vl, flags);
615 return sve_prctl_status(flags);
619 int sve_get_current_vl(void)
621 if (!system_supports_sve())
624 return sve_prctl_status(0);
628 * Bitmap for temporary storage of the per-CPU set of supported vector lengths
629 * during secondary boot.
631 static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
633 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
638 bitmap_zero(map, SVE_VQ_MAX);
640 zcr = ZCR_ELx_LEN_MASK;
641 zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
643 for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
644 write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
646 vq = sve_vq_from_vl(vl); /* skip intervening lengths */
647 set_bit(vq_to_bit(vq), map);
651 void __init sve_init_vq_map(void)
653 sve_probe_vqs(sve_vq_map);
657 * If we haven't committed to the set of supported VQs yet, filter out
658 * those not supported by the current CPU.
660 void sve_update_vq_map(void)
662 sve_probe_vqs(sve_secondary_vq_map);
663 bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
666 /* Check whether the current CPU supports all VQs in the committed set */
667 int sve_verify_vq_map(void)
671 sve_probe_vqs(sve_secondary_vq_map);
672 bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
674 if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
675 pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
683 static void __init sve_efi_setup(void)
685 if (!IS_ENABLED(CONFIG_EFI))
689 * alloc_percpu() warns and prints a backtrace if this goes wrong.
690 * This is evidence of a crippled system and we are returning void,
691 * so no attempt is made to handle this situation here.
693 if (!sve_vl_valid(sve_max_vl))
696 efi_sve_state = __alloc_percpu(
697 SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
704 panic("Cannot allocate percpu memory for EFI SVE save/restore");
708 * Enable SVE for EL1.
709 * Intended for use by the cpufeatures code during CPU boot.
711 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
713 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
718 * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
721 * Use only if SVE is present.
722 * This function clobbers the SVE vector length.
724 u64 read_zcr_features(void)
730 * Set the maximum possible VL, and write zeroes to all other
731 * bits to see if they stick.
733 sve_kernel_enable(NULL);
734 write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
736 zcr = read_sysreg_s(SYS_ZCR_EL1);
737 zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
738 vq_max = sve_vq_from_vl(sve_get_vl());
739 zcr |= vq_max - 1; /* set LEN field to maximum effective value */
744 void __init sve_setup(void)
748 if (!system_supports_sve())
752 * The SVE architecture mandates support for 128-bit vectors,
753 * so sve_vq_map must have at least SVE_VQ_MIN set.
754 * If something went wrong, at least try to patch it up:
756 if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
757 set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
759 zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
760 sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
763 * Sanity-check that the max VL we determined through CPU features
764 * corresponds properly to sve_vq_map. If not, do our best:
766 if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
767 sve_max_vl = find_supported_vector_length(sve_max_vl);
770 * For the default VL, pick the maximum supported value <= 64.
771 * VL == 64 is guaranteed not to grow the signal frame.
773 sve_default_vl = find_supported_vector_length(64);
775 pr_info("SVE: maximum available vector length %u bytes per vector\n",
777 pr_info("SVE: default vector length %u bytes per vector\n",
784 * Called from the put_task_struct() path, which cannot get here
785 * unless dead_task is really dead and not schedulable.
787 void fpsimd_release_task(struct task_struct *dead_task)
789 __sve_free(dead_task);
792 #endif /* CONFIG_ARM64_SVE */
797 * Storage is allocated for the full SVE state, the current FPSIMD
798 * register contents are migrated across, and TIF_SVE is set so that
799 * the SVE access trap will be disabled the next time this task
800 * reaches ret_to_user.
802 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
803 * would have disabled the SVE access trap for userspace during
804 * ret_to_user, making an SVE access trap impossible in that case.
806 asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
808 /* Even if we chose not to use SVE, the hardware could still trap: */
809 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
810 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
819 fpsimd_to_sve(current);
821 /* Force ret_to_user to reload the registers: */
822 fpsimd_flush_task_state(current);
823 set_thread_flag(TIF_FOREIGN_FPSTATE);
825 if (test_and_set_thread_flag(TIF_SVE))
826 WARN_ON(1); /* SVE access shouldn't have trapped */
832 * Trapped FP/ASIMD access.
834 asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
836 /* TODO: implement lazy context saving/restoring */
841 * Raise a SIGFPE for the current process.
843 asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
845 unsigned int si_code = FPE_FLTUNK;
847 if (esr & ESR_ELx_FP_EXC_TFV) {
849 si_code = FPE_FLTINV;
850 else if (esr & FPEXC_DZF)
851 si_code = FPE_FLTDIV;
852 else if (esr & FPEXC_OFF)
853 si_code = FPE_FLTOVF;
854 else if (esr & FPEXC_UFF)
855 si_code = FPE_FLTUND;
856 else if (esr & FPEXC_IXF)
857 si_code = FPE_FLTRES;
860 send_sig_fault(SIGFPE, si_code,
861 (void __user *)instruction_pointer(regs),
865 void fpsimd_thread_switch(struct task_struct *next)
867 bool wrong_task, wrong_cpu;
869 if (!system_supports_fpsimd())
872 /* Save unsaved fpsimd state, if any: */
876 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
877 * state. For kernel threads, FPSIMD registers are never loaded
878 * and wrong_task and wrong_cpu will always be true.
880 wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
881 &next->thread.uw.fpsimd_state;
882 wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
884 update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
885 wrong_task || wrong_cpu);
888 void fpsimd_flush_thread(void)
890 int vl, supported_vl;
892 if (!system_supports_fpsimd())
897 memset(¤t->thread.uw.fpsimd_state, 0,
898 sizeof(current->thread.uw.fpsimd_state));
899 fpsimd_flush_task_state(current);
901 if (system_supports_sve()) {
902 clear_thread_flag(TIF_SVE);
906 * Reset the task vector length as required.
907 * This is where we ensure that all user tasks have a valid
908 * vector length configured: no kernel task can become a user
909 * task without an exec and hence a call to this function.
910 * By the time the first call to this function is made, all
911 * early hardware probing is complete, so sve_default_vl
913 * If a bug causes this to go wrong, we make some noise and
914 * try to fudge thread.sve_vl to a safe value here.
916 vl = current->thread.sve_vl_onexec ?
917 current->thread.sve_vl_onexec : sve_default_vl;
919 if (WARN_ON(!sve_vl_valid(vl)))
922 supported_vl = find_supported_vector_length(vl);
923 if (WARN_ON(supported_vl != vl))
926 current->thread.sve_vl = vl;
929 * If the task is not set to inherit, ensure that the vector
930 * length will be reset by a subsequent exec:
932 if (!test_thread_flag(TIF_SVE_VL_INHERIT))
933 current->thread.sve_vl_onexec = 0;
936 set_thread_flag(TIF_FOREIGN_FPSTATE);
942 * Save the userland FPSIMD state of 'current' to memory, but only if the state
943 * currently held in the registers does in fact belong to 'current'
945 void fpsimd_preserve_current_state(void)
947 if (!system_supports_fpsimd())
956 * Like fpsimd_preserve_current_state(), but ensure that
957 * current->thread.uw.fpsimd_state is updated so that it can be copied to
960 void fpsimd_signal_preserve_current_state(void)
962 fpsimd_preserve_current_state();
963 if (system_supports_sve() && test_thread_flag(TIF_SVE))
964 sve_to_fpsimd(current);
968 * Associate current's FPSIMD context with this cpu
969 * Preemption must be disabled when calling this function.
971 void fpsimd_bind_task_to_cpu(void)
973 struct fpsimd_last_state_struct *last =
974 this_cpu_ptr(&fpsimd_last_state);
976 last->st = ¤t->thread.uw.fpsimd_state;
977 current->thread.fpsimd_cpu = smp_processor_id();
979 if (system_supports_sve()) {
980 /* Toggle SVE trapping for userspace if needed */
981 if (test_thread_flag(TIF_SVE))
986 /* Serialised by exception return to user */
990 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
992 struct fpsimd_last_state_struct *last =
993 this_cpu_ptr(&fpsimd_last_state);
995 WARN_ON(!in_softirq() && !irqs_disabled());
1001 * Load the userland FPSIMD state of 'current' from memory, but only if the
1002 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1003 * state of 'current'
1005 void fpsimd_restore_current_state(void)
1007 if (!system_supports_fpsimd())
1012 if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1014 fpsimd_bind_task_to_cpu();
1021 * Load an updated userland FPSIMD state for 'current' from memory and set the
1022 * flag that indicates that the FPSIMD register contents are the most recent
1023 * FPSIMD state of 'current'
1025 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1027 if (!system_supports_fpsimd())
1032 current->thread.uw.fpsimd_state = *state;
1033 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1034 fpsimd_to_sve(current);
1037 fpsimd_bind_task_to_cpu();
1039 clear_thread_flag(TIF_FOREIGN_FPSTATE);
1045 * Invalidate live CPU copies of task t's FPSIMD state
1047 void fpsimd_flush_task_state(struct task_struct *t)
1049 t->thread.fpsimd_cpu = NR_CPUS;
1052 void fpsimd_flush_cpu_state(void)
1054 __this_cpu_write(fpsimd_last_state.st, NULL);
1055 set_thread_flag(TIF_FOREIGN_FPSTATE);
1058 #ifdef CONFIG_KERNEL_MODE_NEON
1060 DEFINE_PER_CPU(bool, kernel_neon_busy);
1061 EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
1064 * Kernel-side NEON support functions
1068 * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1071 * Must not be called unless may_use_simd() returns true.
1072 * Task context in the FPSIMD registers is saved back to memory as necessary.
1074 * A matching call to kernel_neon_end() must be made before returning from the
1077 * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1080 void kernel_neon_begin(void)
1082 if (WARN_ON(!system_supports_fpsimd()))
1085 BUG_ON(!may_use_simd());
1089 __this_cpu_write(kernel_neon_busy, true);
1091 /* Save unsaved fpsimd state, if any: */
1094 /* Invalidate any task state remaining in the fpsimd regs: */
1095 fpsimd_flush_cpu_state();
1101 EXPORT_SYMBOL(kernel_neon_begin);
1104 * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1106 * Must be called from a context in which kernel_neon_begin() was previously
1107 * called, with no call to kernel_neon_end() in the meantime.
1109 * The caller must not use the FPSIMD registers after this function is called,
1110 * unless kernel_neon_begin() is called again in the meantime.
1112 void kernel_neon_end(void)
1116 if (!system_supports_fpsimd())
1119 busy = __this_cpu_xchg(kernel_neon_busy, false);
1120 WARN_ON(!busy); /* No matching kernel_neon_begin()? */
1124 EXPORT_SYMBOL(kernel_neon_end);
1128 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1129 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1130 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1133 * EFI runtime services support functions
1135 * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1136 * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1137 * is always used rather than being an optional accelerator.
1139 * These functions provide the necessary support for ensuring FPSIMD
1140 * save/restore in the contexts from which EFI is used.
1142 * Do not use them for any other purpose -- if tempted to do so, you are
1143 * either doing something wrong or you need to propose some refactoring.
1147 * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1149 void __efi_fpsimd_begin(void)
1151 if (!system_supports_fpsimd())
1154 WARN_ON(preemptible());
1156 if (may_use_simd()) {
1157 kernel_neon_begin();
1160 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1163 if (system_supports_sve() && likely(efi_sve_state)) {
1164 char *sve_state = this_cpu_ptr(efi_sve_state);
1166 __this_cpu_write(efi_sve_state_used, true);
1168 sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1169 &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1171 fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1174 __this_cpu_write(efi_fpsimd_state_used, true);
1179 * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1181 void __efi_fpsimd_end(void)
1183 if (!system_supports_fpsimd())
1186 if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1189 if (system_supports_sve() &&
1190 likely(__this_cpu_read(efi_sve_state_used))) {
1191 char const *sve_state = this_cpu_ptr(efi_sve_state);
1193 sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1194 &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1195 sve_vq_from_vl(sve_get_vl()) - 1);
1197 __this_cpu_write(efi_sve_state_used, false);
1199 fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1204 #endif /* CONFIG_EFI */
1206 #endif /* CONFIG_KERNEL_MODE_NEON */
1208 #ifdef CONFIG_CPU_PM
1209 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1210 unsigned long cmd, void *v)
1215 fpsimd_flush_cpu_state();
1219 case CPU_PM_ENTER_FAILED:
1226 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1227 .notifier_call = fpsimd_cpu_pm_notifier,
1230 static void __init fpsimd_pm_init(void)
1232 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1236 static inline void fpsimd_pm_init(void) { }
1237 #endif /* CONFIG_CPU_PM */
1239 #ifdef CONFIG_HOTPLUG_CPU
1240 static int fpsimd_cpu_dead(unsigned int cpu)
1242 per_cpu(fpsimd_last_state.st, cpu) = NULL;
1246 static inline void fpsimd_hotplug_init(void)
1248 cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1249 NULL, fpsimd_cpu_dead);
1253 static inline void fpsimd_hotplug_init(void) { }
1257 * FP/SIMD support code initialisation.
1259 static int __init fpsimd_init(void)
1261 if (elf_hwcap & HWCAP_FP) {
1263 fpsimd_hotplug_init();
1265 pr_notice("Floating-point is not implemented\n");
1268 if (!(elf_hwcap & HWCAP_ASIMD))
1269 pr_notice("Advanced SIMD is not implemented\n");
1271 return sve_sysctl_init();
1273 core_initcall(fpsimd_init);