1 // SPDX-License-Identifier: GPL-2.0
6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
9 * Copyright (C) 1992 Ross Biro
10 * Copyright (C) Linus Torvalds
11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12 * Copyright (C) 1996 David S. Miller
14 * Copyright (C) 1999 MIPS Technologies, Inc.
15 * Copyright (C) 2000 Ulf Carlsson
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
37 #include <asm/byteorder.h>
39 #include <asm/cpu-info.h>
42 #include <asm/loongarch.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
46 #include <asm/ptrace.h>
48 #include <asm/syscall.h>
50 static void init_fp_ctx(struct task_struct *target)
52 /* The target already has context */
53 if (tsk_used_math(target))
56 /* Begin with data registers set to all 1s... */
57 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
58 set_stopped_child_used_math(target);
62 * Called by kernel/ptrace.c when detaching..
64 * Make sure single step bits etc are not set.
66 void ptrace_disable(struct task_struct *child)
68 /* Don't load the watchpoint registers for the ex-child. */
69 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
70 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
73 /* regset get/set implementations */
75 static int gpr_get(struct task_struct *target,
76 const struct user_regset *regset,
80 struct pt_regs *regs = task_pt_regs(target);
82 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM);
83 r = membuf_write(&to, ®s->orig_a0, sizeof(u64));
84 r = membuf_write(&to, ®s->csr_era, sizeof(u64));
85 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64));
90 static int gpr_set(struct task_struct *target,
91 const struct user_regset *regset,
92 unsigned int pos, unsigned int count,
93 const void *kbuf, const void __user *ubuf)
96 int a0_start = sizeof(u64) * GPR_NUM;
97 int era_start = a0_start + sizeof(u64);
98 int badvaddr_start = era_start + sizeof(u64);
99 struct pt_regs *regs = task_pt_regs(target);
101 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
104 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
106 a0_start, a0_start + sizeof(u64));
107 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
109 era_start, era_start + sizeof(u64));
110 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
112 badvaddr_start, badvaddr_start + sizeof(u64));
119 * Get the general floating-point registers.
121 static int gfpr_get(struct task_struct *target, struct membuf *to)
123 return membuf_write(to, &target->thread.fpu.fpr,
124 sizeof(elf_fpreg_t) * NUM_FPU_REGS);
127 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
132 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
133 for (i = 0; i < NUM_FPU_REGS; i++) {
134 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
135 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
142 * Choose the appropriate helper for general registers, and then copy
143 * the FCC and FCSR registers separately.
145 static int fpr_get(struct task_struct *target,
146 const struct user_regset *regset,
151 save_fpu_regs(target);
153 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
154 r = gfpr_get(target, &to);
156 r = gfpr_get_simd(target, &to);
158 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
159 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
164 static int gfpr_set(struct task_struct *target,
165 unsigned int *pos, unsigned int *count,
166 const void **kbuf, const void __user **ubuf)
168 return user_regset_copyin(pos, count, kbuf, ubuf,
169 &target->thread.fpu.fpr,
170 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
173 static int gfpr_set_simd(struct task_struct *target,
174 unsigned int *pos, unsigned int *count,
175 const void **kbuf, const void __user **ubuf)
180 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
181 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
182 err = user_regset_copyin(pos, count, kbuf, ubuf,
183 &fpr_val, i * sizeof(elf_fpreg_t),
184 (i + 1) * sizeof(elf_fpreg_t));
187 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
194 * Choose the appropriate helper for general registers, and then copy
195 * the FCC register separately.
197 static int fpr_set(struct task_struct *target,
198 const struct user_regset *regset,
199 unsigned int pos, unsigned int count,
200 const void *kbuf, const void __user *ubuf)
202 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
203 const int fcsr_start = fcc_start + sizeof(u64);
206 BUG_ON(count % sizeof(elf_fpreg_t));
207 if (pos + count > sizeof(elf_fpregset_t))
212 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
213 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
215 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
219 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220 &target->thread.fpu.fcc, fcc_start,
221 fcc_start + sizeof(u64));
222 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
223 &target->thread.fpu.fcsr, fcsr_start,
224 fcsr_start + sizeof(u32));
229 static int cfg_get(struct task_struct *target,
230 const struct user_regset *regset,
237 while (to.left > 0) {
238 cfg_val = read_cpucfg(i++);
239 r = membuf_write(&to, &cfg_val, sizeof(u32));
246 * CFG registers are read-only.
248 static int cfg_set(struct task_struct *target,
249 const struct user_regset *regset,
250 unsigned int pos, unsigned int count,
251 const void *kbuf, const void __user *ubuf)
256 #ifdef CONFIG_CPU_HAS_LSX
258 static void copy_pad_fprs(struct task_struct *target,
259 const struct user_regset *regset,
260 struct membuf *to, unsigned int live_sz)
263 unsigned long long fill = ~0ull;
264 unsigned int cp_sz, pad_sz;
266 cp_sz = min(regset->size, live_sz);
267 pad_sz = regset->size - cp_sz;
268 WARN_ON(pad_sz % sizeof(fill));
270 for (i = 0; i < NUM_FPU_REGS; i++) {
271 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
272 for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
273 membuf_store(to, fill);
278 static int simd_get(struct task_struct *target,
279 const struct user_regset *regset,
282 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
284 save_fpu_regs(target);
286 if (!tsk_used_math(target)) {
287 /* The task hasn't used FP or LSX, fill with 0xff */
288 copy_pad_fprs(target, regset, &to, 0);
289 } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
290 /* Copy scalar FP context, fill the rest with 0xff */
291 copy_pad_fprs(target, regset, &to, 8);
292 #ifdef CONFIG_CPU_HAS_LASX
293 } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
294 /* Copy LSX 128 Bit context, fill the rest with 0xff */
295 copy_pad_fprs(target, regset, &to, 16);
297 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
298 /* Trivially copy the vector registers */
299 membuf_write(&to, &target->thread.fpu.fpr, wr_size);
301 /* Copy as much context as possible, fill the rest with 0xff */
302 copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
308 static int simd_set(struct task_struct *target,
309 const struct user_regset *regset,
310 unsigned int pos, unsigned int count,
311 const void *kbuf, const void __user *ubuf)
313 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
319 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
320 /* Trivially copy the vector registers */
321 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
322 &target->thread.fpu.fpr,
325 /* Copy as much context as possible */
326 cp_sz = min_t(unsigned int, regset->size,
327 sizeof(target->thread.fpu.fpr[0]));
330 for (; i < NUM_FPU_REGS; i++, start += regset->size) {
331 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
332 &target->thread.fpu.fpr[i],
333 start, start + cp_sz);
340 #endif /* CONFIG_CPU_HAS_LSX */
342 #ifdef CONFIG_CPU_HAS_LBT
343 static int lbt_get(struct task_struct *target,
344 const struct user_regset *regset,
349 r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
350 r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
351 r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
352 r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
353 r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
354 r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
359 static int lbt_set(struct task_struct *target,
360 const struct user_regset *regset,
361 unsigned int pos, unsigned int count,
362 const void *kbuf, const void __user *ubuf)
365 const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
366 const int ftop_start = eflags_start + sizeof(u32);
368 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
369 &target->thread.lbt.scr0,
370 0, 4 * sizeof(target->thread.lbt.scr0));
371 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
372 &target->thread.lbt.eflags,
373 eflags_start, ftop_start);
374 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
375 &target->thread.fpu.ftop,
376 ftop_start, ftop_start + sizeof(u32));
380 #endif /* CONFIG_CPU_HAS_LBT */
382 #ifdef CONFIG_HAVE_HW_BREAKPOINT
385 * Handle hitting a HW-breakpoint.
387 static void ptrace_hbptriggered(struct perf_event *bp,
388 struct perf_sample_data *data,
389 struct pt_regs *regs)
392 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
394 for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
395 if (current->thread.hbp_break[i] == bp)
398 for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
399 if (current->thread.hbp_watch[i] == bp)
402 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
405 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
406 struct task_struct *tsk,
409 struct perf_event *bp;
412 case NT_LOONGARCH_HW_BREAK:
413 if (idx >= LOONGARCH_MAX_BRP)
414 return ERR_PTR(-EINVAL);
415 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
416 bp = tsk->thread.hbp_break[idx];
418 case NT_LOONGARCH_HW_WATCH:
419 if (idx >= LOONGARCH_MAX_WRP)
420 return ERR_PTR(-EINVAL);
421 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
422 bp = tsk->thread.hbp_watch[idx];
429 static int ptrace_hbp_set_event(unsigned int note_type,
430 struct task_struct *tsk,
432 struct perf_event *bp)
435 case NT_LOONGARCH_HW_BREAK:
436 if (idx >= LOONGARCH_MAX_BRP)
438 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
439 tsk->thread.hbp_break[idx] = bp;
441 case NT_LOONGARCH_HW_WATCH:
442 if (idx >= LOONGARCH_MAX_WRP)
444 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
445 tsk->thread.hbp_watch[idx] = bp;
452 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
453 struct task_struct *tsk,
457 struct perf_event *bp;
458 struct perf_event_attr attr;
461 case NT_LOONGARCH_HW_BREAK:
462 type = HW_BREAKPOINT_X;
464 case NT_LOONGARCH_HW_WATCH:
465 type = HW_BREAKPOINT_RW;
468 return ERR_PTR(-EINVAL);
471 ptrace_breakpoint_init(&attr);
474 * Initialise fields to sane defaults
475 * (i.e. values that will pass validation).
478 attr.bp_len = HW_BREAKPOINT_LEN_4;
482 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
486 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
493 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
494 struct arch_hw_breakpoint_ctrl ctrl,
495 struct perf_event_attr *attr)
499 err = arch_bp_generic_fields(ctrl, &len, &type);
504 attr->bp_type = type;
509 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
515 case NT_LOONGARCH_HW_BREAK:
516 num = hw_breakpoint_slots(TYPE_INST);
518 case NT_LOONGARCH_HW_WATCH:
519 num = hw_breakpoint_slots(TYPE_DATA);
530 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
531 struct task_struct *tsk,
534 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
537 bp = ptrace_hbp_create(note_type, tsk, idx);
542 static int ptrace_hbp_get_ctrl(unsigned int note_type,
543 struct task_struct *tsk,
544 unsigned long idx, u32 *ctrl)
546 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
551 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
556 static int ptrace_hbp_get_mask(unsigned int note_type,
557 struct task_struct *tsk,
558 unsigned long idx, u64 *mask)
560 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
565 *mask = bp ? counter_arch_bp(bp)->mask : 0;
570 static int ptrace_hbp_get_addr(unsigned int note_type,
571 struct task_struct *tsk,
572 unsigned long idx, u64 *addr)
574 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
579 *addr = bp ? counter_arch_bp(bp)->address : 0;
584 static int ptrace_hbp_set_ctrl(unsigned int note_type,
585 struct task_struct *tsk,
586 unsigned long idx, u32 uctrl)
589 struct perf_event *bp;
590 struct perf_event_attr attr;
591 struct arch_hw_breakpoint_ctrl ctrl;
592 struct thread_info *ti = task_thread_info(tsk);
594 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
601 case NT_LOONGARCH_HW_BREAK:
602 ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
603 ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
605 case NT_LOONGARCH_HW_WATCH:
606 decode_ctrl_reg(uctrl, &ctrl);
612 if (uctrl & CTRL_PLV_ENABLE) {
613 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
617 set_ti_thread_flag(ti, TIF_LOAD_WATCH);
620 clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
623 return modify_user_hw_breakpoint(bp, &attr);
626 static int ptrace_hbp_set_mask(unsigned int note_type,
627 struct task_struct *tsk,
628 unsigned long idx, u64 mask)
630 struct perf_event *bp;
631 struct perf_event_attr attr;
632 struct arch_hw_breakpoint *info;
634 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
639 info = counter_arch_bp(bp);
642 return modify_user_hw_breakpoint(bp, &attr);
645 static int ptrace_hbp_set_addr(unsigned int note_type,
646 struct task_struct *tsk,
647 unsigned long idx, u64 addr)
649 struct perf_event *bp;
650 struct perf_event_attr attr;
652 /* Kernel-space address cannot be monitored by user-space */
653 if ((unsigned long)addr >= XKPRANGE)
656 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
663 return modify_user_hw_breakpoint(bp, &attr);
666 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
667 #define PTRACE_HBP_MASK_SZ sizeof(u64)
668 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
669 #define PTRACE_HBP_PAD_SZ sizeof(u32)
671 static int hw_break_get(struct task_struct *target,
672 const struct user_regset *regset,
679 unsigned int note_type = regset->core_note_type;
682 ret = ptrace_hbp_get_resource_info(note_type, &info);
686 membuf_write(&to, &info, sizeof(info));
688 /* (address, mask, ctrl) registers */
690 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
694 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
698 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
702 membuf_store(&to, addr);
703 membuf_store(&to, mask);
704 membuf_store(&to, ctrl);
705 membuf_zero(&to, sizeof(u32));
712 static int hw_break_set(struct task_struct *target,
713 const struct user_regset *regset,
714 unsigned int pos, unsigned int count,
715 const void *kbuf, const void __user *ubuf)
719 int ret, idx = 0, offset, limit;
720 unsigned int note_type = regset->core_note_type;
723 offset = offsetof(struct user_watch_state_v2, dbg_regs);
724 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
726 /* (address, mask, ctrl) registers */
727 limit = regset->n * regset->size;
728 while (count && offset < limit) {
729 if (count < PTRACE_HBP_ADDR_SZ)
732 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
733 offset, offset + PTRACE_HBP_ADDR_SZ);
737 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
740 offset += PTRACE_HBP_ADDR_SZ;
745 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
746 offset, offset + PTRACE_HBP_MASK_SZ);
750 ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
753 offset += PTRACE_HBP_MASK_SZ;
755 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
756 offset, offset + PTRACE_HBP_CTRL_SZ);
760 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
763 offset += PTRACE_HBP_CTRL_SZ;
765 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
766 offset, offset + PTRACE_HBP_PAD_SZ);
767 offset += PTRACE_HBP_PAD_SZ;
777 struct pt_regs_offset {
782 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
783 #define REG_OFFSET_END {.name = NULL, .offset = 0}
785 static const struct pt_regs_offset regoffset_table[] = {
786 REG_OFFSET_NAME(r0, regs[0]),
787 REG_OFFSET_NAME(r1, regs[1]),
788 REG_OFFSET_NAME(r2, regs[2]),
789 REG_OFFSET_NAME(r3, regs[3]),
790 REG_OFFSET_NAME(r4, regs[4]),
791 REG_OFFSET_NAME(r5, regs[5]),
792 REG_OFFSET_NAME(r6, regs[6]),
793 REG_OFFSET_NAME(r7, regs[7]),
794 REG_OFFSET_NAME(r8, regs[8]),
795 REG_OFFSET_NAME(r9, regs[9]),
796 REG_OFFSET_NAME(r10, regs[10]),
797 REG_OFFSET_NAME(r11, regs[11]),
798 REG_OFFSET_NAME(r12, regs[12]),
799 REG_OFFSET_NAME(r13, regs[13]),
800 REG_OFFSET_NAME(r14, regs[14]),
801 REG_OFFSET_NAME(r15, regs[15]),
802 REG_OFFSET_NAME(r16, regs[16]),
803 REG_OFFSET_NAME(r17, regs[17]),
804 REG_OFFSET_NAME(r18, regs[18]),
805 REG_OFFSET_NAME(r19, regs[19]),
806 REG_OFFSET_NAME(r20, regs[20]),
807 REG_OFFSET_NAME(r21, regs[21]),
808 REG_OFFSET_NAME(r22, regs[22]),
809 REG_OFFSET_NAME(r23, regs[23]),
810 REG_OFFSET_NAME(r24, regs[24]),
811 REG_OFFSET_NAME(r25, regs[25]),
812 REG_OFFSET_NAME(r26, regs[26]),
813 REG_OFFSET_NAME(r27, regs[27]),
814 REG_OFFSET_NAME(r28, regs[28]),
815 REG_OFFSET_NAME(r29, regs[29]),
816 REG_OFFSET_NAME(r30, regs[30]),
817 REG_OFFSET_NAME(r31, regs[31]),
818 REG_OFFSET_NAME(orig_a0, orig_a0),
819 REG_OFFSET_NAME(csr_era, csr_era),
820 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
821 REG_OFFSET_NAME(csr_crmd, csr_crmd),
822 REG_OFFSET_NAME(csr_prmd, csr_prmd),
823 REG_OFFSET_NAME(csr_euen, csr_euen),
824 REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
825 REG_OFFSET_NAME(csr_estat, csr_estat),
830 * regs_query_register_offset() - query register offset from its name
831 * @name: the name of a register
833 * regs_query_register_offset() returns the offset of a register in struct
834 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
836 int regs_query_register_offset(const char *name)
838 const struct pt_regs_offset *roff;
840 for (roff = regoffset_table; roff->name != NULL; roff++)
841 if (!strcmp(roff->name, name))
846 enum loongarch_regset {
850 #ifdef CONFIG_CPU_HAS_LSX
853 #ifdef CONFIG_CPU_HAS_LASX
856 #ifdef CONFIG_CPU_HAS_LBT
859 #ifdef CONFIG_HAVE_HW_BREAKPOINT
865 static const struct user_regset loongarch64_regsets[] = {
867 .core_note_type = NT_PRSTATUS,
869 .size = sizeof(elf_greg_t),
870 .align = sizeof(elf_greg_t),
871 .regset_get = gpr_get,
875 .core_note_type = NT_PRFPREG,
877 .size = sizeof(elf_fpreg_t),
878 .align = sizeof(elf_fpreg_t),
879 .regset_get = fpr_get,
883 .core_note_type = NT_LOONGARCH_CPUCFG,
886 .align = sizeof(u32),
887 .regset_get = cfg_get,
890 #ifdef CONFIG_CPU_HAS_LSX
892 .core_note_type = NT_LOONGARCH_LSX,
896 .regset_get = simd_get,
900 #ifdef CONFIG_CPU_HAS_LASX
902 .core_note_type = NT_LOONGARCH_LASX,
906 .regset_get = simd_get,
910 #ifdef CONFIG_CPU_HAS_LBT
912 .core_note_type = NT_LOONGARCH_LBT,
915 .align = sizeof(u64),
916 .regset_get = lbt_get,
920 #ifdef CONFIG_HAVE_HW_BREAKPOINT
921 [REGSET_HW_BREAK] = {
922 .core_note_type = NT_LOONGARCH_HW_BREAK,
923 .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
925 .align = sizeof(u32),
926 .regset_get = hw_break_get,
929 [REGSET_HW_WATCH] = {
930 .core_note_type = NT_LOONGARCH_HW_WATCH,
931 .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
933 .align = sizeof(u32),
934 .regset_get = hw_break_get,
940 static const struct user_regset_view user_loongarch64_view = {
941 .name = "loongarch64",
942 .e_machine = ELF_ARCH,
943 .regsets = loongarch64_regsets,
944 .n = ARRAY_SIZE(loongarch64_regsets),
948 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
950 return &user_loongarch64_view;
953 static inline int read_user(struct task_struct *target, unsigned long addr,
954 unsigned long __user *data)
956 unsigned long tmp = 0;
960 tmp = task_pt_regs(target)->regs[addr];
963 tmp = task_pt_regs(target)->orig_a0;
966 tmp = task_pt_regs(target)->csr_era;
969 tmp = task_pt_regs(target)->csr_badvaddr;
975 return put_user(tmp, data);
978 static inline int write_user(struct task_struct *target, unsigned long addr,
983 task_pt_regs(target)->regs[addr] = data;
986 task_pt_regs(target)->orig_a0 = data;
989 task_pt_regs(target)->csr_era = data;
992 task_pt_regs(target)->csr_badvaddr = data;
1001 long arch_ptrace(struct task_struct *child, long request,
1002 unsigned long addr, unsigned long data)
1005 unsigned long __user *datap = (void __user *) data;
1008 case PTRACE_PEEKUSR:
1009 ret = read_user(child, addr, datap);
1012 case PTRACE_POKEUSR:
1013 ret = write_user(child, addr, data);
1017 ret = ptrace_request(child, request, addr, data);
1024 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1025 static void ptrace_triggered(struct perf_event *bp,
1026 struct perf_sample_data *data, struct pt_regs *regs)
1028 struct perf_event_attr attr;
1031 attr.disabled = true;
1032 modify_user_hw_breakpoint(bp, &attr);
1035 static int set_single_step(struct task_struct *tsk, unsigned long addr)
1037 struct perf_event *bp;
1038 struct perf_event_attr attr;
1039 struct arch_hw_breakpoint *info;
1040 struct thread_struct *thread = &tsk->thread;
1042 bp = thread->hbp_break[0];
1044 ptrace_breakpoint_init(&attr);
1046 attr.bp_addr = addr;
1047 attr.bp_len = HW_BREAKPOINT_LEN_8;
1048 attr.bp_type = HW_BREAKPOINT_X;
1050 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
1055 thread->hbp_break[0] = bp;
1060 attr.bp_addr = addr;
1062 /* Reenable breakpoint */
1063 attr.disabled = false;
1064 err = modify_user_hw_breakpoint(bp, &attr);
1068 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1070 info = counter_arch_bp(bp);
1071 info->mask = TASK_SIZE - 1;
1077 void user_enable_single_step(struct task_struct *task)
1079 struct thread_info *ti = task_thread_info(task);
1081 set_single_step(task, task_pt_regs(task)->csr_era);
1082 task->thread.single_step = task_pt_regs(task)->csr_era;
1083 set_ti_thread_flag(ti, TIF_SINGLESTEP);
1086 void user_disable_single_step(struct task_struct *task)
1088 clear_tsk_thread_flag(task, TIF_SINGLESTEP);