1 // SPDX-License-Identifier: GPL-2.0
3 * Code for replacing ftrace calls with jumps.
7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
9 * Added function graph tracer code, taken from x86 that was written
10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt
16 #include <linux/spinlock.h>
17 #include <linux/hardirq.h>
18 #include <linux/uaccess.h>
19 #include <linux/module.h>
20 #include <linux/ftrace.h>
21 #include <linux/percpu.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
25 #include <asm/cacheflush.h>
26 #include <asm/code-patching.h>
27 #include <asm/ftrace.h>
28 #include <asm/syscall.h>
31 #define NUM_FTRACE_TRAMPS 2
32 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
34 static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
38 WARN_ON(!is_offset_in_branch_range(addr - ip));
39 create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
44 static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
46 if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
47 pr_err("0x%lx: fetching instruction failed\n", ip);
54 static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
59 ret = ftrace_read_inst(ip, &op);
60 if (!ret && !ppc_inst_equal(op, inst)) {
61 pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
62 ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
69 static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
71 int ret = ftrace_validate_inst(ip, old);
74 ret = patch_instruction((u32 *)ip, new);
79 static int is_bl_op(ppc_inst_t op)
81 return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
84 static unsigned long find_ftrace_tramp(unsigned long ip)
88 for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
89 if (!ftrace_tramps[i])
91 else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
92 return ftrace_tramps[i];
97 static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
99 unsigned long ip = rec->ip;
102 if (is_offset_in_branch_range(addr - ip)) {
105 #ifdef CONFIG_MODULES
106 } else if (rec->arch.mod) {
107 /* Module code would be going to one of the module stubs */
108 stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
109 rec->arch.mod->arch.tramp_regs);
111 } else if (core_kernel_text(ip)) {
112 /* We would be branching to one of our ftrace stubs */
113 stub = find_ftrace_tramp(ip);
115 pr_err("0x%lx: No ftrace stubs reachable\n", ip);
122 *call_inst = ftrace_create_branch_inst(ip, stub, 1);
126 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
127 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
129 /* This should never be called since we override ftrace_replace_code() */
135 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
140 /* This can only ever be called during module load */
141 if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
144 old = ppc_inst(PPC_RAW_NOP());
145 ret = ftrace_get_call_inst(rec, addr, &new);
149 return ftrace_modify_code(rec->ip, old, new);
152 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
155 * This should never be called since we override ftrace_replace_code(),
156 * as well as ftrace_init_nop()
162 void ftrace_replace_code(int enable)
164 ppc_inst_t old, new, call_inst, new_call_inst;
165 ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
166 unsigned long ip, new_addr, addr;
167 struct ftrace_rec_iter *iter;
168 struct dyn_ftrace *rec;
171 for_ftrace_rec_iter(iter) {
172 rec = ftrace_rec_iter_record(iter);
175 if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
178 addr = ftrace_get_addr_curr(rec);
179 new_addr = ftrace_get_addr_new(rec);
180 update = ftrace_update_record(rec, enable);
183 case FTRACE_UPDATE_IGNORE:
186 case FTRACE_UPDATE_MODIFY_CALL:
187 ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
188 ret |= ftrace_get_call_inst(rec, addr, &call_inst);
192 case FTRACE_UPDATE_MAKE_NOP:
193 ret = ftrace_get_call_inst(rec, addr, &call_inst);
197 case FTRACE_UPDATE_MAKE_CALL:
198 ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
205 ret = ftrace_modify_code(ip, old, new);
212 ftrace_bug(ret, rec);
216 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
218 unsigned long addr, ip = rec->ip;
222 /* Verify instructions surrounding the ftrace location */
223 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
225 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
227 ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
228 } else if (IS_ENABLED(CONFIG_PPC32)) {
229 /* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
230 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
232 ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
233 } else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
234 /* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
235 ret = ftrace_read_inst(ip - 4, &old);
236 if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
237 ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
238 ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
247 if (!core_kernel_text(ip)) {
249 pr_err("0x%lx: No module provided for non-kernel address\n", ip);
255 /* Nop-out the ftrace location */
256 new = ppc_inst(PPC_RAW_NOP());
258 if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
259 /* we instead patch-in the 'mflr r0' */
260 old = ppc_inst(PPC_RAW_NOP());
261 new = ppc_inst(PPC_RAW_MFLR(_R0));
262 ret = ftrace_modify_code(ip - 4, old, new);
263 } else if (is_offset_in_branch_range(addr - ip)) {
265 old = ftrace_create_branch_inst(ip, addr, 1);
266 ret = ftrace_modify_code(ip, old, new);
267 } else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
269 * We would be branching to a linker-generated stub, or to the module _mcount
270 * stub. Let's just confirm we have a 'bl' here.
272 ret = ftrace_read_inst(ip, &old);
275 if (!is_bl_op(old)) {
276 pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
279 ret = patch_instruction((u32 *)ip, new);
287 int ftrace_update_ftrace_func(ftrace_func_t func)
289 unsigned long ip = (unsigned long)(&ftrace_call);
293 old = ppc_inst_read((u32 *)&ftrace_call);
294 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
295 ret = ftrace_modify_code(ip, old, new);
297 /* Also update the regs callback function */
298 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
299 ip = (unsigned long)(&ftrace_regs_call);
300 old = ppc_inst_read((u32 *)&ftrace_regs_call);
301 new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
302 ret = ftrace_modify_code(ip, old, new);
309 * Use the default ftrace_modify_all_code, but without
312 void arch_ftrace_update_code(int command)
314 ftrace_modify_all_code(command);
317 void ftrace_free_init_tramp(void)
321 for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
322 if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
323 ftrace_tramps[i] = 0;
328 static void __init add_ftrace_tramp(unsigned long tramp)
332 for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
333 if (!ftrace_tramps[i]) {
334 ftrace_tramps[i] = tramp;
339 int __init ftrace_dyn_arch_init(void)
341 unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
342 unsigned long addr = FTRACE_REGS_ADDR;
346 #ifdef CONFIG_PPC_KERNEL_PCREL
348 PPC_PREFIX_MLS | __PPC_PRFX_R(1),
349 PPC_INST_PADDI | ___PPC_RT(_R12),
352 #elif defined(CONFIG_PPC64)
353 PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
354 PPC_RAW_ADDIS(_R12, _R12, 0),
355 PPC_RAW_ADDI(_R12, _R12, 0),
359 PPC_RAW_LIS(_R12, 0),
360 PPC_RAW_ADDI(_R12, _R12, 0),
366 if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
367 for (i = 0; i < 2; i++) {
368 reladdr = addr - (unsigned long)tramp[i];
370 if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
371 pr_err("Address of %ps out of range of pcrel address.\n",
376 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
377 tramp[i][0] |= IMM_H18(reladdr);
378 tramp[i][1] |= IMM_L(reladdr);
379 add_ftrace_tramp((unsigned long)tramp[i]);
381 } else if (IS_ENABLED(CONFIG_PPC64)) {
382 reladdr = addr - kernel_toc_addr();
384 if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
385 pr_err("Address of %ps out of range of kernel_toc.\n",
390 for (i = 0; i < 2; i++) {
391 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
392 tramp[i][1] |= PPC_HA(reladdr);
393 tramp[i][2] |= PPC_LO(reladdr);
394 add_ftrace_tramp((unsigned long)tramp[i]);
397 for (i = 0; i < 2; i++) {
398 memcpy(tramp[i], stub_insns, sizeof(stub_insns));
399 tramp[i][0] |= PPC_HA(addr);
400 tramp[i][1] |= PPC_LO(addr);
401 add_ftrace_tramp((unsigned long)tramp[i]);
408 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
409 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
410 struct ftrace_ops *op, struct ftrace_regs *fregs)
412 unsigned long sp = fregs->regs.gpr[1];
415 if (unlikely(ftrace_graph_is_dead()))
418 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
421 bit = ftrace_test_recursion_trylock(ip, parent_ip);
425 if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
426 parent_ip = ppc_function_entry(return_to_handler);
428 ftrace_test_recursion_unlock(bit);
430 fregs->regs.link = parent_ip;
432 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */