1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/highmem.h>
4 #include <linux/ptrace.h>
5 #include <linux/uprobes.h>
7 #include "decode-insn.h"
9 #define UPROBE_TRAP_NR UINT_MAX
11 bool is_swbp_insn(uprobe_opcode_t *insn)
13 #ifdef CONFIG_RISCV_ISA_C
14 return (*insn & 0xffff) == UPROBE_SWBP_INSN;
16 return *insn == UPROBE_SWBP_INSN;
20 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
22 return instruction_pointer(regs);
25 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
28 probe_opcode_t opcode;
30 opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
32 auprobe->insn_size = GET_INSN_LENGTH(opcode);
34 switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
38 case INSN_GOOD_NO_SLOT:
39 auprobe->simulate = true;
43 auprobe->simulate = false;
53 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
55 struct uprobe_task *utask = current->utask;
57 utask->autask.saved_cause = current->thread.bad_cause;
58 current->thread.bad_cause = UPROBE_TRAP_NR;
60 instruction_pointer_set(regs, utask->xol_vaddr);
65 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
67 struct uprobe_task *utask = current->utask;
69 WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
71 instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
76 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
78 if (t->thread.bad_cause != UPROBE_TRAP_NR)
84 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
89 if (!auprobe->simulate)
92 insn = *(probe_opcode_t *)(&auprobe->insn[0]);
93 addr = instruction_pointer(regs);
95 if (auprobe->api.handler)
96 auprobe->api.handler(insn, addr, regs);
101 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
103 struct uprobe_task *utask = current->utask;
106 * Task has received a fatal signal, so reset back to probbed
109 instruction_pointer_set(regs, utask->vaddr);
112 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
113 struct pt_regs *regs)
115 if (ctx == RP_CHECK_CHAIN_CALL)
116 return regs->sp <= ret->stack;
118 return regs->sp < ret->stack;
122 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
123 struct pt_regs *regs)
129 regs->ra = trampoline_vaddr;
134 int arch_uprobe_exception_notify(struct notifier_block *self,
135 unsigned long val, void *data)
140 bool uprobe_breakpoint_handler(struct pt_regs *regs)
142 if (uprobe_pre_sstep_notifier(regs))
148 bool uprobe_single_step_handler(struct pt_regs *regs)
150 if (uprobe_post_sstep_notifier(regs))
156 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
157 void *src, unsigned long len)
159 /* Initialize the slot */
160 void *kaddr = kmap_atomic(page);
161 void *dst = kaddr + (vaddr & ~PAGE_MASK);
163 memcpy(dst, src, len);
165 /* Add ebreak behind opcode to simulate singlestep */
167 dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
168 *(uprobe_opcode_t *)dst = __BUG_INSN_32;
171 kunmap_atomic(kaddr);
174 * We probably need flush_icache_user_page() but it needs vma.
175 * This should work on most of architectures by default. If
176 * architecture needs to do something different it can define
177 * its own version of the function.
179 flush_dcache_page(page);