1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
31 #include <linux/rseq.h>
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
47 struct pt_regs_offset {
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57 static const struct pt_regs_offset regoffset_table[] = {
89 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
92 REG_OFFSET_NAME(pstate),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name)
105 const struct pt_regs_offset *roff;
107 for (roff = regoffset_table; roff->name != NULL; roff++)
108 if (!strcmp(roff->name, name))
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
123 return ((addr & ~(THREAD_SIZE - 1)) ==
124 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
125 on_irq_stack(addr, sizeof(unsigned long));
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
139 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
142 if (regs_within_kernel_stack(regs, (unsigned long)addr))
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
154 * Called by kernel/ptrace.c when detaching..
156 void ptrace_disable(struct task_struct *child)
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
163 user_disable_single_step(child);
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 * Handle hitting a HW-breakpoint.
170 static void ptrace_hbptriggered(struct perf_event *bp,
171 struct perf_sample_data *data,
172 struct pt_regs *regs)
174 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
175 const char *desc = "Hardware breakpoint trap (ptrace)";
177 if (is_compat_task()) {
181 for (i = 0; i < ARM_MAX_BRP; ++i) {
182 if (current->thread.debug.hbp_break[i] == bp) {
183 si_errno = (i << 1) + 1;
188 for (i = 0; i < ARM_MAX_WRP; ++i) {
189 if (current->thread.debug.hbp_watch[i] == bp) {
190 si_errno = -((i << 1) + 1);
194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
203 * Unregister breakpoints from this task and reset the pointers in
206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 struct thread_struct *t = &tsk->thread;
211 for (i = 0; i < ARM_MAX_BRP; i++) {
212 if (t->debug.hbp_break[i]) {
213 unregister_hw_breakpoint(t->debug.hbp_break[i]);
214 t->debug.hbp_break[i] = NULL;
218 for (i = 0; i < ARM_MAX_WRP; i++) {
219 if (t->debug.hbp_watch[i]) {
220 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
221 t->debug.hbp_watch[i] = NULL;
226 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
232 struct task_struct *tsk,
235 struct perf_event *bp = ERR_PTR(-EINVAL);
238 case NT_ARM_HW_BREAK:
239 if (idx >= ARM_MAX_BRP)
241 idx = array_index_nospec(idx, ARM_MAX_BRP);
242 bp = tsk->thread.debug.hbp_break[idx];
244 case NT_ARM_HW_WATCH:
245 if (idx >= ARM_MAX_WRP)
247 idx = array_index_nospec(idx, ARM_MAX_WRP);
248 bp = tsk->thread.debug.hbp_watch[idx];
256 static int ptrace_hbp_set_event(unsigned int note_type,
257 struct task_struct *tsk,
259 struct perf_event *bp)
264 case NT_ARM_HW_BREAK:
265 if (idx >= ARM_MAX_BRP)
267 idx = array_index_nospec(idx, ARM_MAX_BRP);
268 tsk->thread.debug.hbp_break[idx] = bp;
271 case NT_ARM_HW_WATCH:
272 if (idx >= ARM_MAX_WRP)
274 idx = array_index_nospec(idx, ARM_MAX_WRP);
275 tsk->thread.debug.hbp_watch[idx] = bp;
284 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
285 struct task_struct *tsk,
288 struct perf_event *bp;
289 struct perf_event_attr attr;
293 case NT_ARM_HW_BREAK:
294 type = HW_BREAKPOINT_X;
296 case NT_ARM_HW_WATCH:
297 type = HW_BREAKPOINT_RW;
300 return ERR_PTR(-EINVAL);
303 ptrace_breakpoint_init(&attr);
306 * Initialise fields to sane defaults
307 * (i.e. values that will pass validation).
310 attr.bp_len = HW_BREAKPOINT_LEN_4;
314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
326 struct arch_hw_breakpoint_ctrl ctrl,
327 struct perf_event_attr *attr)
329 int err, len, type, offset, disabled = !ctrl.enabled;
331 attr->disabled = disabled;
335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
340 case NT_ARM_HW_BREAK:
341 if ((type & HW_BREAKPOINT_X) != type)
344 case NT_ARM_HW_WATCH:
345 if ((type & HW_BREAKPOINT_RW) != type)
353 attr->bp_type = type;
354 attr->bp_addr += offset;
359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
365 case NT_ARM_HW_BREAK:
366 num = hw_breakpoint_slots(TYPE_INST);
368 case NT_ARM_HW_WATCH:
369 num = hw_breakpoint_slots(TYPE_DATA);
375 reg |= debug_monitors_arch();
383 static int ptrace_hbp_get_ctrl(unsigned int note_type,
384 struct task_struct *tsk,
388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397 static int ptrace_hbp_get_addr(unsigned int note_type,
398 struct task_struct *tsk,
402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
407 *addr = bp ? counter_arch_bp(bp)->address : 0;
411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
412 struct task_struct *tsk,
415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 bp = ptrace_hbp_create(note_type, tsk, idx);
423 static int ptrace_hbp_set_ctrl(unsigned int note_type,
424 struct task_struct *tsk,
429 struct perf_event *bp;
430 struct perf_event_attr attr;
431 struct arch_hw_breakpoint_ctrl ctrl;
433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
440 decode_ctrl_reg(uctrl, &ctrl);
441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445 return modify_user_hw_breakpoint(bp, &attr);
448 static int ptrace_hbp_set_addr(unsigned int note_type,
449 struct task_struct *tsk,
454 struct perf_event *bp;
455 struct perf_event_attr attr;
457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
465 err = modify_user_hw_breakpoint(bp, &attr);
469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473 static int hw_break_get(struct task_struct *target,
474 const struct user_regset *regset,
477 unsigned int note_type = regset->core_note_type;
483 ret = ptrace_hbp_get_resource_info(note_type, &info);
487 membuf_write(&to, &info, sizeof(info));
488 membuf_zero(&to, sizeof(u32));
489 /* (address, ctrl) registers */
491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
497 membuf_store(&to, addr);
498 membuf_store(&to, ctrl);
499 membuf_zero(&to, sizeof(u32));
505 static int hw_break_set(struct task_struct *target,
506 const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 const void *kbuf, const void __user *ubuf)
510 unsigned int note_type = regset->core_note_type;
511 int ret, idx = 0, offset, limit;
515 /* Resource info and pad */
516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
517 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
519 /* (address, ctrl) registers */
520 limit = regset->n * regset->size;
521 while (count && offset < limit) {
522 if (count < PTRACE_HBP_ADDR_SZ)
524 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
525 offset, offset + PTRACE_HBP_ADDR_SZ);
528 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
531 offset += PTRACE_HBP_ADDR_SZ;
535 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
536 offset, offset + PTRACE_HBP_CTRL_SZ);
539 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
542 offset += PTRACE_HBP_CTRL_SZ;
544 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
545 offset, offset + PTRACE_HBP_PAD_SZ);
546 offset += PTRACE_HBP_PAD_SZ;
552 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
554 static int gpr_get(struct task_struct *target,
555 const struct user_regset *regset,
558 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
559 return membuf_write(&to, uregs, sizeof(*uregs));
562 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
563 unsigned int pos, unsigned int count,
564 const void *kbuf, const void __user *ubuf)
567 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
573 if (!valid_user_regs(&newregs, target))
576 task_pt_regs(target)->user_regs = newregs;
580 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
582 if (!system_supports_fpsimd())
588 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
590 static int __fpr_get(struct task_struct *target,
591 const struct user_regset *regset,
594 struct user_fpsimd_state *uregs;
596 sve_sync_to_fpsimd(target);
598 uregs = &target->thread.uw.fpsimd_state;
600 return membuf_write(&to, uregs, sizeof(*uregs));
603 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
606 if (!system_supports_fpsimd())
609 if (target == current)
610 fpsimd_preserve_current_state();
612 return __fpr_get(target, regset, to);
615 static int __fpr_set(struct task_struct *target,
616 const struct user_regset *regset,
617 unsigned int pos, unsigned int count,
618 const void *kbuf, const void __user *ubuf,
619 unsigned int start_pos)
622 struct user_fpsimd_state newstate;
625 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
626 * short copyin can't resurrect stale data.
628 sve_sync_to_fpsimd(target);
630 newstate = target->thread.uw.fpsimd_state;
632 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
633 start_pos, start_pos + sizeof(newstate));
637 target->thread.uw.fpsimd_state = newstate;
642 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
643 unsigned int pos, unsigned int count,
644 const void *kbuf, const void __user *ubuf)
648 if (!system_supports_fpsimd())
651 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
655 sve_sync_from_fpsimd_zeropad(target);
656 fpsimd_flush_task_state(target);
661 static int tls_get(struct task_struct *target, const struct user_regset *regset,
666 if (target == current)
667 tls_preserve_current_state();
669 ret = membuf_store(&to, target->thread.uw.tp_value);
670 if (system_supports_tpidr2())
671 ret = membuf_store(&to, target->thread.tpidr2_el0);
673 ret = membuf_zero(&to, sizeof(u64));
678 static int tls_set(struct task_struct *target, const struct user_regset *regset,
679 unsigned int pos, unsigned int count,
680 const void *kbuf, const void __user *ubuf)
683 unsigned long tls[2];
685 tls[0] = target->thread.uw.tp_value;
686 if (system_supports_tpidr2())
687 tls[1] = target->thread.tpidr2_el0;
689 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
693 target->thread.uw.tp_value = tls[0];
694 if (system_supports_tpidr2())
695 target->thread.tpidr2_el0 = tls[1];
700 static int fpmr_get(struct task_struct *target, const struct user_regset *regset,
703 if (!system_supports_fpmr())
706 if (target == current)
707 fpsimd_preserve_current_state();
709 return membuf_store(&to, target->thread.uw.fpmr);
712 static int fpmr_set(struct task_struct *target, const struct user_regset *regset,
713 unsigned int pos, unsigned int count,
714 const void *kbuf, const void __user *ubuf)
719 if (!system_supports_fpmr())
722 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
726 target->thread.uw.fpmr = fpmr;
728 fpsimd_flush_task_state(target);
733 static int system_call_get(struct task_struct *target,
734 const struct user_regset *regset,
737 return membuf_store(&to, task_pt_regs(target)->syscallno);
740 static int system_call_set(struct task_struct *target,
741 const struct user_regset *regset,
742 unsigned int pos, unsigned int count,
743 const void *kbuf, const void __user *ubuf)
745 int syscallno = task_pt_regs(target)->syscallno;
748 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
752 task_pt_regs(target)->syscallno = syscallno;
756 #ifdef CONFIG_ARM64_SVE
758 static void sve_init_header_from_task(struct user_sve_header *header,
759 struct task_struct *target,
765 enum vec_type task_type;
767 memset(header, 0, sizeof(*header));
769 /* Check if the requested registers are active for the task */
770 if (thread_sm_enabled(&target->thread))
771 task_type = ARM64_VEC_SME;
773 task_type = ARM64_VEC_SVE;
774 active = (task_type == type);
778 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
779 header->flags |= SVE_PT_VL_INHERIT;
780 fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
783 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
784 header->flags |= SVE_PT_VL_INHERIT;
794 header->flags |= SVE_PT_REGS_FPSIMD;
796 header->flags |= SVE_PT_REGS_SVE;
800 header->vl = task_get_vl(target, type);
801 vq = sve_vq_from_vl(header->vl);
803 header->max_vl = vec_max_vl(type);
804 header->size = SVE_PT_SIZE(vq, header->flags);
805 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
809 static unsigned int sve_size_from_header(struct user_sve_header const *header)
811 return ALIGN(header->size, SVE_VQ_BYTES);
814 static int sve_get_common(struct task_struct *target,
815 const struct user_regset *regset,
819 struct user_sve_header header;
821 unsigned long start, end;
824 sve_init_header_from_task(&header, target, type);
825 vq = sve_vq_from_vl(header.vl);
827 membuf_write(&to, &header, sizeof(header));
829 if (target == current)
830 fpsimd_preserve_current_state();
832 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
833 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
835 switch ((header.flags & SVE_PT_REGS_MASK)) {
836 case SVE_PT_REGS_FPSIMD:
837 return __fpr_get(target, regset, to);
839 case SVE_PT_REGS_SVE:
840 start = SVE_PT_SVE_OFFSET;
841 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
842 membuf_write(&to, target->thread.sve_state, end - start);
845 end = SVE_PT_SVE_FPSR_OFFSET(vq);
846 membuf_zero(&to, end - start);
849 * Copy fpsr, and fpcr which must follow contiguously in
850 * struct fpsimd_state:
853 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
854 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
858 end = sve_size_from_header(&header);
859 return membuf_zero(&to, end - start);
866 static int sve_get(struct task_struct *target,
867 const struct user_regset *regset,
870 if (!system_supports_sve())
873 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
876 static int sve_set_common(struct task_struct *target,
877 const struct user_regset *regset,
878 unsigned int pos, unsigned int count,
879 const void *kbuf, const void __user *ubuf,
883 struct user_sve_header header;
885 unsigned long start, end;
888 if (count < sizeof(header))
890 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
896 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
897 * vec_set_vector_length(), which will also validate them for us:
899 ret = vec_set_vector_length(target, type, header.vl,
900 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
904 /* Actual VL set may be less than the user asked for: */
905 vq = sve_vq_from_vl(task_get_vl(target, type));
907 /* Enter/exit streaming mode */
908 if (system_supports_sme()) {
909 u64 old_svcr = target->thread.svcr;
913 target->thread.svcr &= ~SVCR_SM_MASK;
916 target->thread.svcr |= SVCR_SM_MASK;
919 * Disable traps and ensure there is SME storage but
920 * preserve any currently set values in ZA/ZT.
922 sme_alloc(target, false);
923 set_tsk_thread_flag(target, TIF_SME);
932 * If we switched then invalidate any existing SVE
933 * state and ensure there's storage.
935 if (target->thread.svcr != old_svcr)
936 sve_alloc(target, true);
939 /* Registers: FPSIMD-only case */
941 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
942 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
943 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
944 SVE_PT_FPSIMD_OFFSET);
945 clear_tsk_thread_flag(target, TIF_SVE);
946 target->thread.fp_type = FP_STATE_FPSIMD;
951 * Otherwise: no registers or full SVE case. For backwards
952 * compatibility reasons we treat empty flags as SVE registers.
956 * If setting a different VL from the requested VL and there is
957 * register data, the data layout will be wrong: don't even
958 * try to set the registers in this case.
960 if (count && vq != sve_vq_from_vl(header.vl)) {
965 sve_alloc(target, true);
966 if (!target->thread.sve_state) {
968 clear_tsk_thread_flag(target, TIF_SVE);
969 target->thread.fp_type = FP_STATE_FPSIMD;
974 * Ensure target->thread.sve_state is up to date with target's
975 * FPSIMD regs, so that a short copyin leaves trailing
976 * registers unmodified. Only enable SVE if we are
977 * configuring normal SVE, a system with streaming SVE may not
980 fpsimd_sync_to_sve(target);
981 if (type == ARM64_VEC_SVE)
982 set_tsk_thread_flag(target, TIF_SVE);
983 target->thread.fp_type = FP_STATE_SVE;
985 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
986 start = SVE_PT_SVE_OFFSET;
987 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
988 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
989 target->thread.sve_state,
995 end = SVE_PT_SVE_FPSR_OFFSET(vq);
996 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
999 * Copy fpsr, and fpcr which must follow contiguously in
1000 * struct fpsimd_state:
1003 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
1004 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1005 &target->thread.uw.fpsimd_state.fpsr,
1009 fpsimd_flush_task_state(target);
1013 static int sve_set(struct task_struct *target,
1014 const struct user_regset *regset,
1015 unsigned int pos, unsigned int count,
1016 const void *kbuf, const void __user *ubuf)
1018 if (!system_supports_sve())
1021 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1025 #endif /* CONFIG_ARM64_SVE */
1027 #ifdef CONFIG_ARM64_SME
1029 static int ssve_get(struct task_struct *target,
1030 const struct user_regset *regset,
1033 if (!system_supports_sme())
1036 return sve_get_common(target, regset, to, ARM64_VEC_SME);
1039 static int ssve_set(struct task_struct *target,
1040 const struct user_regset *regset,
1041 unsigned int pos, unsigned int count,
1042 const void *kbuf, const void __user *ubuf)
1044 if (!system_supports_sme())
1047 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1051 static int za_get(struct task_struct *target,
1052 const struct user_regset *regset,
1055 struct user_za_header header;
1057 unsigned long start, end;
1059 if (!system_supports_sme())
1063 memset(&header, 0, sizeof(header));
1065 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1066 header.flags |= ZA_PT_VL_INHERIT;
1068 header.vl = task_get_sme_vl(target);
1069 vq = sve_vq_from_vl(header.vl);
1070 header.max_vl = sme_max_vl();
1071 header.max_size = ZA_PT_SIZE(vq);
1073 /* If ZA is not active there is only the header */
1074 if (thread_za_enabled(&target->thread))
1075 header.size = ZA_PT_SIZE(vq);
1077 header.size = ZA_PT_ZA_OFFSET;
1079 membuf_write(&to, &header, sizeof(header));
1081 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1082 end = ZA_PT_ZA_OFFSET;
1084 if (target == current)
1085 fpsimd_preserve_current_state();
1087 /* Any register data to include? */
1088 if (thread_za_enabled(&target->thread)) {
1090 end = ZA_PT_SIZE(vq);
1091 membuf_write(&to, target->thread.sme_state, end - start);
1094 /* Zero any trailing padding */
1096 end = ALIGN(header.size, SVE_VQ_BYTES);
1097 return membuf_zero(&to, end - start);
1100 static int za_set(struct task_struct *target,
1101 const struct user_regset *regset,
1102 unsigned int pos, unsigned int count,
1103 const void *kbuf, const void __user *ubuf)
1106 struct user_za_header header;
1108 unsigned long start, end;
1110 if (!system_supports_sme())
1114 if (count < sizeof(header))
1116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1122 * All current ZA_PT_* flags are consumed by
1123 * vec_set_vector_length(), which will also validate them for
1126 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1127 ((unsigned long)header.flags) << 16);
1131 /* Actual VL set may be less than the user asked for: */
1132 vq = sve_vq_from_vl(task_get_sme_vl(target));
1134 /* Ensure there is some SVE storage for streaming mode */
1135 if (!target->thread.sve_state) {
1136 sve_alloc(target, false);
1137 if (!target->thread.sve_state) {
1144 * Only flush the storage if PSTATE.ZA was not already set,
1145 * otherwise preserve any existing data.
1147 sme_alloc(target, !thread_za_enabled(&target->thread));
1148 if (!target->thread.sme_state)
1151 /* If there is no data then disable ZA */
1153 target->thread.svcr &= ~SVCR_ZA_MASK;
1158 * If setting a different VL from the requested VL and there is
1159 * register data, the data layout will be wrong: don't even
1160 * try to set the registers in this case.
1162 if (vq != sve_vq_from_vl(header.vl)) {
1167 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1168 start = ZA_PT_ZA_OFFSET;
1169 end = ZA_PT_SIZE(vq);
1170 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1171 target->thread.sme_state,
1176 /* Mark ZA as active and let userspace use it */
1177 set_tsk_thread_flag(target, TIF_SME);
1178 target->thread.svcr |= SVCR_ZA_MASK;
1181 fpsimd_flush_task_state(target);
1185 static int zt_get(struct task_struct *target,
1186 const struct user_regset *regset,
1189 if (!system_supports_sme2())
1193 * If PSTATE.ZA is not set then ZT will be zeroed when it is
1194 * enabled so report the current register value as zero.
1196 if (thread_za_enabled(&target->thread))
1197 membuf_write(&to, thread_zt_state(&target->thread),
1200 membuf_zero(&to, ZT_SIG_REG_BYTES);
1205 static int zt_set(struct task_struct *target,
1206 const struct user_regset *regset,
1207 unsigned int pos, unsigned int count,
1208 const void *kbuf, const void __user *ubuf)
1212 if (!system_supports_sme2())
1215 /* Ensure SVE storage in case this is first use of SME */
1216 sve_alloc(target, false);
1217 if (!target->thread.sve_state)
1220 if (!thread_za_enabled(&target->thread)) {
1221 sme_alloc(target, true);
1222 if (!target->thread.sme_state)
1226 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1227 thread_zt_state(&target->thread),
1228 0, ZT_SIG_REG_BYTES);
1230 target->thread.svcr |= SVCR_ZA_MASK;
1231 set_tsk_thread_flag(target, TIF_SME);
1234 fpsimd_flush_task_state(target);
1239 #endif /* CONFIG_ARM64_SME */
1241 #ifdef CONFIG_ARM64_PTR_AUTH
1242 static int pac_mask_get(struct task_struct *target,
1243 const struct user_regset *regset,
1247 * The PAC bits can differ across data and instruction pointers
1248 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1249 * we expose separate masks.
1251 unsigned long mask = ptrauth_user_pac_mask();
1252 struct user_pac_mask uregs = {
1257 if (!system_supports_address_auth())
1260 return membuf_write(&to, &uregs, sizeof(uregs));
1263 static int pac_enabled_keys_get(struct task_struct *target,
1264 const struct user_regset *regset,
1267 long enabled_keys = ptrauth_get_enabled_keys(target);
1269 if (IS_ERR_VALUE(enabled_keys))
1270 return enabled_keys;
1272 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1275 static int pac_enabled_keys_set(struct task_struct *target,
1276 const struct user_regset *regset,
1277 unsigned int pos, unsigned int count,
1278 const void *kbuf, const void __user *ubuf)
1281 long enabled_keys = ptrauth_get_enabled_keys(target);
1283 if (IS_ERR_VALUE(enabled_keys))
1284 return enabled_keys;
1286 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1291 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1295 #ifdef CONFIG_CHECKPOINT_RESTORE
1296 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1298 return (__uint128_t)key->hi << 64 | key->lo;
1301 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1303 struct ptrauth_key key = {
1304 .lo = (unsigned long)ukey,
1305 .hi = (unsigned long)(ukey >> 64),
1311 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1312 const struct ptrauth_keys_user *keys)
1314 ukeys->apiakey = pac_key_to_user(&keys->apia);
1315 ukeys->apibkey = pac_key_to_user(&keys->apib);
1316 ukeys->apdakey = pac_key_to_user(&keys->apda);
1317 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1320 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1321 const struct user_pac_address_keys *ukeys)
1323 keys->apia = pac_key_from_user(ukeys->apiakey);
1324 keys->apib = pac_key_from_user(ukeys->apibkey);
1325 keys->apda = pac_key_from_user(ukeys->apdakey);
1326 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1329 static int pac_address_keys_get(struct task_struct *target,
1330 const struct user_regset *regset,
1333 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1334 struct user_pac_address_keys user_keys;
1336 if (!system_supports_address_auth())
1339 pac_address_keys_to_user(&user_keys, keys);
1341 return membuf_write(&to, &user_keys, sizeof(user_keys));
1344 static int pac_address_keys_set(struct task_struct *target,
1345 const struct user_regset *regset,
1346 unsigned int pos, unsigned int count,
1347 const void *kbuf, const void __user *ubuf)
1349 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1350 struct user_pac_address_keys user_keys;
1353 if (!system_supports_address_auth())
1356 pac_address_keys_to_user(&user_keys, keys);
1357 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1361 pac_address_keys_from_user(keys, &user_keys);
1366 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1367 const struct ptrauth_keys_user *keys)
1369 ukeys->apgakey = pac_key_to_user(&keys->apga);
1372 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1373 const struct user_pac_generic_keys *ukeys)
1375 keys->apga = pac_key_from_user(ukeys->apgakey);
1378 static int pac_generic_keys_get(struct task_struct *target,
1379 const struct user_regset *regset,
1382 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1383 struct user_pac_generic_keys user_keys;
1385 if (!system_supports_generic_auth())
1388 pac_generic_keys_to_user(&user_keys, keys);
1390 return membuf_write(&to, &user_keys, sizeof(user_keys));
1393 static int pac_generic_keys_set(struct task_struct *target,
1394 const struct user_regset *regset,
1395 unsigned int pos, unsigned int count,
1396 const void *kbuf, const void __user *ubuf)
1398 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1399 struct user_pac_generic_keys user_keys;
1402 if (!system_supports_generic_auth())
1405 pac_generic_keys_to_user(&user_keys, keys);
1406 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1410 pac_generic_keys_from_user(keys, &user_keys);
1414 #endif /* CONFIG_CHECKPOINT_RESTORE */
1415 #endif /* CONFIG_ARM64_PTR_AUTH */
1417 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1418 static int tagged_addr_ctrl_get(struct task_struct *target,
1419 const struct user_regset *regset,
1422 long ctrl = get_tagged_addr_ctrl(target);
1424 if (IS_ERR_VALUE(ctrl))
1427 return membuf_write(&to, &ctrl, sizeof(ctrl));
1430 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1431 user_regset *regset, unsigned int pos,
1432 unsigned int count, const void *kbuf, const
1438 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1442 return set_tagged_addr_ctrl(target, ctrl);
1446 enum aarch64_regset {
1450 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1456 #ifdef CONFIG_ARM64_SVE
1459 #ifdef CONFIG_ARM64_SME
1464 #ifdef CONFIG_ARM64_PTR_AUTH
1466 REGSET_PAC_ENABLED_KEYS,
1467 #ifdef CONFIG_CHECKPOINT_RESTORE
1472 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1473 REGSET_TAGGED_ADDR_CTRL,
1477 static const struct user_regset aarch64_regsets[] = {
1479 .core_note_type = NT_PRSTATUS,
1480 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1481 .size = sizeof(u64),
1482 .align = sizeof(u64),
1483 .regset_get = gpr_get,
1487 .core_note_type = NT_PRFPREG,
1488 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1490 * We pretend we have 32-bit registers because the fpsr and
1491 * fpcr are 32-bits wide.
1493 .size = sizeof(u32),
1494 .align = sizeof(u32),
1495 .active = fpr_active,
1496 .regset_get = fpr_get,
1500 .core_note_type = NT_ARM_TLS,
1502 .size = sizeof(void *),
1503 .align = sizeof(void *),
1504 .regset_get = tls_get,
1507 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1508 [REGSET_HW_BREAK] = {
1509 .core_note_type = NT_ARM_HW_BREAK,
1510 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1511 .size = sizeof(u32),
1512 .align = sizeof(u32),
1513 .regset_get = hw_break_get,
1514 .set = hw_break_set,
1516 [REGSET_HW_WATCH] = {
1517 .core_note_type = NT_ARM_HW_WATCH,
1518 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1519 .size = sizeof(u32),
1520 .align = sizeof(u32),
1521 .regset_get = hw_break_get,
1522 .set = hw_break_set,
1525 [REGSET_SYSTEM_CALL] = {
1526 .core_note_type = NT_ARM_SYSTEM_CALL,
1528 .size = sizeof(int),
1529 .align = sizeof(int),
1530 .regset_get = system_call_get,
1531 .set = system_call_set,
1534 .core_note_type = NT_ARM_FPMR,
1536 .size = sizeof(u64),
1537 .align = sizeof(u64),
1538 .regset_get = fpmr_get,
1541 #ifdef CONFIG_ARM64_SVE
1542 [REGSET_SVE] = { /* Scalable Vector Extension */
1543 .core_note_type = NT_ARM_SVE,
1544 .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
1547 .size = SVE_VQ_BYTES,
1548 .align = SVE_VQ_BYTES,
1549 .regset_get = sve_get,
1553 #ifdef CONFIG_ARM64_SME
1554 [REGSET_SSVE] = { /* Streaming mode SVE */
1555 .core_note_type = NT_ARM_SSVE,
1556 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1558 .size = SVE_VQ_BYTES,
1559 .align = SVE_VQ_BYTES,
1560 .regset_get = ssve_get,
1563 [REGSET_ZA] = { /* SME ZA */
1564 .core_note_type = NT_ARM_ZA,
1566 * ZA is a single register but it's variably sized and
1567 * the ptrace core requires that the size of any data
1568 * be an exact multiple of the configured register
1569 * size so report as though we had SVE_VQ_BYTES
1570 * registers. These values aren't exposed to
1573 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1574 .size = SVE_VQ_BYTES,
1575 .align = SVE_VQ_BYTES,
1576 .regset_get = za_get,
1579 [REGSET_ZT] = { /* SME ZT */
1580 .core_note_type = NT_ARM_ZT,
1582 .size = ZT_SIG_REG_BYTES,
1583 .align = sizeof(u64),
1584 .regset_get = zt_get,
1588 #ifdef CONFIG_ARM64_PTR_AUTH
1589 [REGSET_PAC_MASK] = {
1590 .core_note_type = NT_ARM_PAC_MASK,
1591 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1592 .size = sizeof(u64),
1593 .align = sizeof(u64),
1594 .regset_get = pac_mask_get,
1595 /* this cannot be set dynamically */
1597 [REGSET_PAC_ENABLED_KEYS] = {
1598 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1600 .size = sizeof(long),
1601 .align = sizeof(long),
1602 .regset_get = pac_enabled_keys_get,
1603 .set = pac_enabled_keys_set,
1605 #ifdef CONFIG_CHECKPOINT_RESTORE
1606 [REGSET_PACA_KEYS] = {
1607 .core_note_type = NT_ARM_PACA_KEYS,
1608 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1609 .size = sizeof(__uint128_t),
1610 .align = sizeof(__uint128_t),
1611 .regset_get = pac_address_keys_get,
1612 .set = pac_address_keys_set,
1614 [REGSET_PACG_KEYS] = {
1615 .core_note_type = NT_ARM_PACG_KEYS,
1616 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1617 .size = sizeof(__uint128_t),
1618 .align = sizeof(__uint128_t),
1619 .regset_get = pac_generic_keys_get,
1620 .set = pac_generic_keys_set,
1624 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1625 [REGSET_TAGGED_ADDR_CTRL] = {
1626 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1628 .size = sizeof(long),
1629 .align = sizeof(long),
1630 .regset_get = tagged_addr_ctrl_get,
1631 .set = tagged_addr_ctrl_set,
1636 static const struct user_regset_view user_aarch64_view = {
1637 .name = "aarch64", .e_machine = EM_AARCH64,
1638 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1641 enum compat_regset {
1646 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1648 struct pt_regs *regs = task_pt_regs(task);
1654 return pstate_to_compat_psr(regs->pstate);
1656 return regs->orig_x0;
1658 return regs->regs[idx];
1662 static int compat_gpr_get(struct task_struct *target,
1663 const struct user_regset *regset,
1669 membuf_store(&to, compat_get_user_reg(target, i++));
1673 static int compat_gpr_set(struct task_struct *target,
1674 const struct user_regset *regset,
1675 unsigned int pos, unsigned int count,
1676 const void *kbuf, const void __user *ubuf)
1678 struct pt_regs newregs;
1680 unsigned int i, start, num_regs;
1682 /* Calculate the number of AArch32 registers contained in count */
1683 num_regs = count / regset->size;
1685 /* Convert pos into an register number */
1686 start = pos / regset->size;
1688 if (start + num_regs > regset->n)
1691 newregs = *task_pt_regs(target);
1693 for (i = 0; i < num_regs; ++i) {
1694 unsigned int idx = start + i;
1698 memcpy(®, kbuf, sizeof(reg));
1699 kbuf += sizeof(reg);
1701 ret = copy_from_user(®, ubuf, sizeof(reg));
1707 ubuf += sizeof(reg);
1715 reg = compat_psr_to_pstate(reg);
1716 newregs.pstate = reg;
1719 newregs.orig_x0 = reg;
1722 newregs.regs[idx] = reg;
1727 if (valid_user_regs(&newregs.user_regs, target))
1728 *task_pt_regs(target) = newregs;
1735 static int compat_vfp_get(struct task_struct *target,
1736 const struct user_regset *regset,
1739 struct user_fpsimd_state *uregs;
1740 compat_ulong_t fpscr;
1742 if (!system_supports_fpsimd())
1745 uregs = &target->thread.uw.fpsimd_state;
1747 if (target == current)
1748 fpsimd_preserve_current_state();
1751 * The VFP registers are packed into the fpsimd_state, so they all sit
1752 * nicely together for us. We just need to create the fpscr separately.
1754 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1755 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1756 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1757 return membuf_store(&to, fpscr);
1760 static int compat_vfp_set(struct task_struct *target,
1761 const struct user_regset *regset,
1762 unsigned int pos, unsigned int count,
1763 const void *kbuf, const void __user *ubuf)
1765 struct user_fpsimd_state *uregs;
1766 compat_ulong_t fpscr;
1767 int ret, vregs_end_pos;
1769 if (!system_supports_fpsimd())
1772 uregs = &target->thread.uw.fpsimd_state;
1774 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1775 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1778 if (count && !ret) {
1779 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1780 vregs_end_pos, VFP_STATE_SIZE);
1782 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1783 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1787 fpsimd_flush_task_state(target);
1791 static int compat_tls_get(struct task_struct *target,
1792 const struct user_regset *regset,
1795 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1798 static int compat_tls_set(struct task_struct *target,
1799 const struct user_regset *regset, unsigned int pos,
1800 unsigned int count, const void *kbuf,
1801 const void __user *ubuf)
1804 compat_ulong_t tls = target->thread.uw.tp_value;
1806 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1810 target->thread.uw.tp_value = tls;
1814 static const struct user_regset aarch32_regsets[] = {
1815 [REGSET_COMPAT_GPR] = {
1816 .core_note_type = NT_PRSTATUS,
1817 .n = COMPAT_ELF_NGREG,
1818 .size = sizeof(compat_elf_greg_t),
1819 .align = sizeof(compat_elf_greg_t),
1820 .regset_get = compat_gpr_get,
1821 .set = compat_gpr_set
1823 [REGSET_COMPAT_VFP] = {
1824 .core_note_type = NT_ARM_VFP,
1825 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1826 .size = sizeof(compat_ulong_t),
1827 .align = sizeof(compat_ulong_t),
1828 .active = fpr_active,
1829 .regset_get = compat_vfp_get,
1830 .set = compat_vfp_set
1834 static const struct user_regset_view user_aarch32_view = {
1835 .name = "aarch32", .e_machine = EM_ARM,
1836 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1839 static const struct user_regset aarch32_ptrace_regsets[] = {
1841 .core_note_type = NT_PRSTATUS,
1842 .n = COMPAT_ELF_NGREG,
1843 .size = sizeof(compat_elf_greg_t),
1844 .align = sizeof(compat_elf_greg_t),
1845 .regset_get = compat_gpr_get,
1846 .set = compat_gpr_set
1849 .core_note_type = NT_ARM_VFP,
1850 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1851 .size = sizeof(compat_ulong_t),
1852 .align = sizeof(compat_ulong_t),
1853 .regset_get = compat_vfp_get,
1854 .set = compat_vfp_set
1857 .core_note_type = NT_ARM_TLS,
1859 .size = sizeof(compat_ulong_t),
1860 .align = sizeof(compat_ulong_t),
1861 .regset_get = compat_tls_get,
1862 .set = compat_tls_set,
1864 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1865 [REGSET_HW_BREAK] = {
1866 .core_note_type = NT_ARM_HW_BREAK,
1867 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1868 .size = sizeof(u32),
1869 .align = sizeof(u32),
1870 .regset_get = hw_break_get,
1871 .set = hw_break_set,
1873 [REGSET_HW_WATCH] = {
1874 .core_note_type = NT_ARM_HW_WATCH,
1875 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1876 .size = sizeof(u32),
1877 .align = sizeof(u32),
1878 .regset_get = hw_break_get,
1879 .set = hw_break_set,
1882 [REGSET_SYSTEM_CALL] = {
1883 .core_note_type = NT_ARM_SYSTEM_CALL,
1885 .size = sizeof(int),
1886 .align = sizeof(int),
1887 .regset_get = system_call_get,
1888 .set = system_call_set,
1892 static const struct user_regset_view user_aarch32_ptrace_view = {
1893 .name = "aarch32", .e_machine = EM_ARM,
1894 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1897 #ifdef CONFIG_COMPAT
1898 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1899 compat_ulong_t __user *ret)
1906 if (off == COMPAT_PT_TEXT_ADDR)
1907 tmp = tsk->mm->start_code;
1908 else if (off == COMPAT_PT_DATA_ADDR)
1909 tmp = tsk->mm->start_data;
1910 else if (off == COMPAT_PT_TEXT_END_ADDR)
1911 tmp = tsk->mm->end_code;
1912 else if (off < sizeof(compat_elf_gregset_t))
1913 tmp = compat_get_user_reg(tsk, off >> 2);
1914 else if (off >= COMPAT_USER_SZ)
1919 return put_user(tmp, ret);
1922 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1925 struct pt_regs newregs = *task_pt_regs(tsk);
1926 unsigned int idx = off / 4;
1928 if (off & 3 || off >= COMPAT_USER_SZ)
1931 if (off >= sizeof(compat_elf_gregset_t))
1939 newregs.pstate = compat_psr_to_pstate(val);
1942 newregs.orig_x0 = val;
1945 newregs.regs[idx] = val;
1948 if (!valid_user_regs(&newregs.user_regs, tsk))
1951 *task_pt_regs(tsk) = newregs;
1955 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1958 * Convert a virtual register number into an index for a thread_info
1959 * breakpoint array. Breakpoints are identified using positive numbers
1960 * whilst watchpoints are negative. The registers are laid out as pairs
1961 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1962 * Register 0 is reserved for describing resource information.
1964 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1966 return (abs(num) - 1) >> 1;
1969 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1971 u8 num_brps, num_wrps, debug_arch, wp_len;
1974 num_brps = hw_breakpoint_slots(TYPE_INST);
1975 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1977 debug_arch = debug_monitors_arch();
1991 static int compat_ptrace_hbp_get(unsigned int note_type,
1992 struct task_struct *tsk,
1999 int err, idx = compat_ptrace_hbp_num_to_idx(num);
2002 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
2005 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
2012 static int compat_ptrace_hbp_set(unsigned int note_type,
2013 struct task_struct *tsk,
2020 int err, idx = compat_ptrace_hbp_num_to_idx(num);
2024 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
2027 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
2033 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
2034 compat_ulong_t __user *data)
2041 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
2043 } else if (num == 0) {
2044 ret = compat_ptrace_hbp_get_resource_info(&kdata);
2047 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
2051 ret = put_user(kdata, data);
2056 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
2057 compat_ulong_t __user *data)
2065 ret = get_user(kdata, data);
2070 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
2072 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
2076 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2078 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
2079 compat_ulong_t caddr, compat_ulong_t cdata)
2081 unsigned long addr = caddr;
2082 unsigned long data = cdata;
2083 void __user *datap = compat_ptr(data);
2087 case PTRACE_PEEKUSR:
2088 ret = compat_ptrace_read_user(child, addr, datap);
2091 case PTRACE_POKEUSR:
2092 ret = compat_ptrace_write_user(child, addr, data);
2095 case COMPAT_PTRACE_GETREGS:
2096 ret = copy_regset_to_user(child,
2099 0, sizeof(compat_elf_gregset_t),
2103 case COMPAT_PTRACE_SETREGS:
2104 ret = copy_regset_from_user(child,
2107 0, sizeof(compat_elf_gregset_t),
2111 case COMPAT_PTRACE_GET_THREAD_AREA:
2112 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
2113 (compat_ulong_t __user *)datap);
2116 case COMPAT_PTRACE_SET_SYSCALL:
2117 task_pt_regs(child)->syscallno = data;
2121 case COMPAT_PTRACE_GETVFPREGS:
2122 ret = copy_regset_to_user(child,
2129 case COMPAT_PTRACE_SETVFPREGS:
2130 ret = copy_regset_from_user(child,
2137 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2138 case COMPAT_PTRACE_GETHBPREGS:
2139 ret = compat_ptrace_gethbpregs(child, addr, datap);
2142 case COMPAT_PTRACE_SETHBPREGS:
2143 ret = compat_ptrace_sethbpregs(child, addr, datap);
2148 ret = compat_ptrace_request(child, request, addr,
2155 #endif /* CONFIG_COMPAT */
2157 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2160 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2161 * user_aarch32_view compatible with arm32. Native ptrace requests on
2162 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2163 * access to the TLS register.
2165 if (is_compat_task())
2166 return &user_aarch32_view;
2167 else if (is_compat_thread(task_thread_info(task)))
2168 return &user_aarch32_ptrace_view;
2170 return &user_aarch64_view;
2173 long arch_ptrace(struct task_struct *child, long request,
2174 unsigned long addr, unsigned long data)
2177 case PTRACE_PEEKMTETAGS:
2178 case PTRACE_POKEMTETAGS:
2179 return mte_ptrace_copy_tags(child, request, addr, data);
2182 return ptrace_request(child, request, addr, data);
2185 enum ptrace_syscall_dir {
2186 PTRACE_SYSCALL_ENTER = 0,
2187 PTRACE_SYSCALL_EXIT,
2190 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2193 unsigned long saved_reg;
2196 * We have some ABI weirdness here in the way that we handle syscall
2197 * exit stops because we indicate whether or not the stop has been
2198 * signalled from syscall entry or syscall exit by clobbering a general
2199 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2200 * and restoring its old value after the stop. This means that:
2202 * - Any writes by the tracer to this register during the stop are
2203 * ignored/discarded.
2205 * - The actual value of the register is not available during the stop,
2206 * so the tracer cannot save it and restore it later.
2208 * - Syscall stops behave differently to seccomp and pseudo-step traps
2209 * (the latter do not nobble any registers).
2211 regno = (is_compat_task() ? 12 : 7);
2212 saved_reg = regs->regs[regno];
2213 regs->regs[regno] = dir;
2215 if (dir == PTRACE_SYSCALL_ENTER) {
2216 if (ptrace_report_syscall_entry(regs))
2217 forget_syscall(regs);
2218 regs->regs[regno] = saved_reg;
2219 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2220 ptrace_report_syscall_exit(regs, 0);
2221 regs->regs[regno] = saved_reg;
2223 regs->regs[regno] = saved_reg;
2226 * Signal a pseudo-step exception since we are stepping but
2227 * tracer modifications to the registers may have rewound the
2230 ptrace_report_syscall_exit(regs, 1);
2234 int syscall_trace_enter(struct pt_regs *regs)
2236 unsigned long flags = read_thread_flags();
2238 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2239 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2240 if (flags & _TIF_SYSCALL_EMU)
2244 /* Do the secure computing after ptrace; failures should be fast. */
2245 if (secure_computing() == -1)
2248 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2249 trace_sys_enter(regs, regs->syscallno);
2251 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2252 regs->regs[2], regs->regs[3]);
2254 return regs->syscallno;
2257 void syscall_trace_exit(struct pt_regs *regs)
2259 unsigned long flags = read_thread_flags();
2261 audit_syscall_exit(regs);
2263 if (flags & _TIF_SYSCALL_TRACEPOINT)
2264 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2266 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2267 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2273 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2274 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2275 * not described in ARM DDI 0487D.a.
2276 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2277 * be allocated an EL0 meaning in future.
2278 * Userspace cannot use these until they have an architectural meaning.
2279 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2280 * We also reserve IL for the kernel; SS is handled dynamically.
2282 #define SPSR_EL1_AARCH64_RES0_BITS \
2283 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2284 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2285 #define SPSR_EL1_AARCH32_RES0_BITS \
2286 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2288 static int valid_compat_regs(struct user_pt_regs *regs)
2290 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2292 if (!system_supports_mixed_endian_el0()) {
2293 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2294 regs->pstate |= PSR_AA32_E_BIT;
2296 regs->pstate &= ~PSR_AA32_E_BIT;
2299 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2300 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2301 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2302 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2307 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2310 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2311 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2312 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2313 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2315 regs->pstate |= PSR_MODE32_BIT;
2320 static int valid_native_regs(struct user_pt_regs *regs)
2322 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2324 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2325 (regs->pstate & PSR_D_BIT) == 0 &&
2326 (regs->pstate & PSR_A_BIT) == 0 &&
2327 (regs->pstate & PSR_I_BIT) == 0 &&
2328 (regs->pstate & PSR_F_BIT) == 0) {
2332 /* Force PSR to a valid 64-bit EL0t */
2333 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2339 * Are the current registers suitable for user mode? (used to maintain
2340 * security in signal handlers)
2342 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2344 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2345 user_regs_reset_single_step(regs, task);
2347 if (is_compat_thread(task_thread_info(task)))
2348 return valid_compat_regs(regs);
2350 return valid_native_regs(regs);