1 // SPDX-License-Identifier: GPL-2.0
3 * Code for replacing ftrace calls with jumps.
6 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
9 * Thanks goes to Steven Rostedt for writing the original x86 version.
12 #include <linux/uaccess.h>
13 #include <linux/init.h>
14 #include <linux/ftrace.h>
15 #include <linux/syscalls.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/cacheflush.h>
20 #include <asm/syscall.h>
22 #include <asm/unistd.h>
24 #include <asm-generic/sections.h>
26 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
27 #define MCOUNT_OFFSET_INSNS 5
29 #define MCOUNT_OFFSET_INSNS 4
32 #ifdef CONFIG_DYNAMIC_FTRACE
34 /* Arch override because MIPS doesn't need to run this from stop_machine() */
35 void arch_ftrace_update_code(int command)
37 ftrace_modify_all_code(command);
40 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
41 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
42 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
44 #define INSN_NOP 0x00000000 /* nop */
45 #define INSN_JAL(addr) \
46 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
48 static unsigned int insn_jal_ftrace_caller __read_mostly;
49 static unsigned int insn_la_mcount[2] __read_mostly;
50 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
52 static inline void ftrace_dyn_arch_init_insns(void)
59 buf = (u32 *)&insn_la_mcount[0];
60 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
62 /* jal (ftrace_caller + 8), jump over the first two instruction */
63 buf = (u32 *)&insn_jal_ftrace_caller;
64 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
66 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
67 /* j ftrace_graph_caller */
68 buf = (u32 *)&insn_j_ftrace_graph_caller;
69 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
73 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
77 /* *(unsigned int *)ip = new_code; */
78 safe_store_code(new_code, ip, faulted);
80 if (unlikely(faulted))
83 flush_icache_range(ip, ip + 8);
89 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
90 unsigned int new_code2)
95 safe_store_code(new_code1, ip, faulted);
96 if (unlikely(faulted))
100 safe_store_code(new_code2, ip, faulted);
101 if (unlikely(faulted))
107 flush_icache_range(ip, ip + 8);
113 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
114 unsigned int new_code2)
120 safe_store_code(new_code2, ip, faulted);
121 if (unlikely(faulted))
125 safe_store_code(new_code1, ip, faulted);
126 if (unlikely(faulted))
131 flush_icache_range(ip, ip + 8);
139 * The details about the calling site of mcount on MIPS
144 * jal _mcount --> nop
145 * sub sp, sp, 8 --> nop (CONFIG_32BIT)
149 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
151 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
152 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
154 * move $12, ra_address
157 * 1: offset = 5 instructions
158 * 2.2 For the Other situations
160 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
161 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
164 * nop | move $12, ra_address | sub sp, sp, 8
165 * 1: offset = 4 instructions
168 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
170 int ftrace_make_nop(struct module *mod,
171 struct dyn_ftrace *rec, unsigned long addr)
174 unsigned long ip = rec->ip;
177 * If ip is in kernel space, no long call, otherwise, long call is
180 new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
182 return ftrace_modify_code(ip, new);
185 * On 32 bit MIPS platforms, gcc adds a stack adjust
186 * instruction in the delay slot after the branch to
187 * mcount and expects mcount to restore the sp on return.
188 * This is based on a legacy API and does nothing but
189 * waste instructions so it's being removed at runtime.
191 return ftrace_modify_code_2(ip, new, INSN_NOP);
195 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
198 unsigned long ip = rec->ip;
200 new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
203 return ftrace_modify_code(ip, new);
205 return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
206 INSN_NOP : insn_la_mcount[1]);
210 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
212 int ftrace_update_ftrace_func(ftrace_func_t func)
216 new = INSN_JAL((unsigned long)func);
218 return ftrace_modify_code(FTRACE_CALL_IP, new);
221 int __init ftrace_dyn_arch_init(void)
223 /* Encode the instructions when booting */
224 ftrace_dyn_arch_init_insns();
226 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
227 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
231 #endif /* CONFIG_DYNAMIC_FTRACE */
233 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
235 #ifdef CONFIG_DYNAMIC_FTRACE
237 extern void ftrace_graph_call(void);
238 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
240 int ftrace_enable_ftrace_graph_caller(void)
242 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
243 insn_j_ftrace_graph_caller);
246 int ftrace_disable_ftrace_graph_caller(void)
248 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
251 #endif /* CONFIG_DYNAMIC_FTRACE */
253 #ifndef KBUILD_MCOUNT_RA_ADDRESS
255 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
256 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
257 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
259 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
260 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
262 unsigned long sp, ip, tmp;
267 * For module, move the ip from the return address after the
268 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
269 * kernel, move after the instruction "move ra, at"(offset is 16)
271 ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
274 * search the text until finding the non-store instruction or "s{d,w}
275 * ra, offset(sp)" instruction
278 /* get the code at "ip": code = *(unsigned int *)ip; */
279 safe_load_code(code, ip, faulted);
281 if (unlikely(faulted))
284 * If we hit the non-store instruction before finding where the
285 * ra is stored, then this is a leaf function and it does not
286 * store the ra on the stack
288 if ((code & S_R_SP) != S_R_SP)
289 return parent_ra_addr;
291 /* Move to the next instruction */
293 } while ((code & S_RA_SP) != S_RA_SP);
295 sp = fp + (code & OFFSET_MASK);
297 /* tmp = *(unsigned long *)sp; */
298 safe_load_stack(tmp, sp, faulted);
299 if (unlikely(faulted))
302 if (tmp == old_parent_ra)
307 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
310 * Hook the return address and push it in the stack of return addrs
311 * in current thread info.
313 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
316 unsigned long old_parent_ra;
317 unsigned long return_hooker = (unsigned long)
321 if (unlikely(ftrace_graph_is_dead()))
324 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
328 * "parent_ra_addr" is the stack address where the return address of
329 * the caller of _mcount is saved.
331 * If gcc < 4.5, a leaf function does not save the return address
332 * in the stack address, so we "emulate" one in _mcount's stack space,
333 * and hijack it directly.
334 * For a non-leaf function, it does save the return address to its own
335 * stack space, so we can not hijack it directly, but need to find the
336 * real stack address, which is done by ftrace_get_parent_addr().
338 * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
339 * non-leaf function, the location of the return address will be saved
341 * For a leaf function, it just puts a zero into $12, so we handle
342 * it in ftrace_graph_caller() of mcount.S.
345 /* old_parent_ra = *parent_ra_addr; */
346 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
347 if (unlikely(faulted))
349 #ifndef KBUILD_MCOUNT_RA_ADDRESS
350 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
351 old_parent_ra, (unsigned long)parent_ra_addr, fp);
353 * If fails when getting the stack address of the non-leaf function's
354 * ra, stop function graph tracer and return
356 if (parent_ra_addr == NULL)
359 /* *parent_ra_addr = return_hooker; */
360 safe_store_stack(return_hooker, parent_ra_addr, faulted);
361 if (unlikely(faulted))
365 * Get the recorded ip of the current mcount calling site in the
366 * __mcount_loc section, which will be used to filter the function
367 * entries configured through the tracing/set_graph_function interface.
370 insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
371 self_ra -= (MCOUNT_INSN_SIZE * insns);
373 if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
374 *parent_ra_addr = old_parent_ra;
380 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
382 #ifdef CONFIG_FTRACE_SYSCALLS
385 unsigned long __init arch_syscall_addr(int nr)
387 return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
393 unsigned long __init arch_syscall_addr(int nr)
395 #ifdef CONFIG_MIPS32_N32
396 if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
397 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
399 if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
400 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
401 #ifdef CONFIG_MIPS32_O32
402 if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
403 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
406 return (unsigned long) &sys_ni_syscall;
410 #endif /* CONFIG_FTRACE_SYSCALLS */