2 * Code for replacing ftrace calls with jumps.
5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14 #include <linux/syscalls.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/cacheflush.h>
19 #include <asm/syscall.h>
21 #include <asm/unistd.h>
23 #include <asm-generic/sections.h>
25 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
26 #define MCOUNT_OFFSET_INSNS 5
28 #define MCOUNT_OFFSET_INSNS 4
31 #ifdef CONFIG_DYNAMIC_FTRACE
33 /* Arch override because MIPS doesn't need to run this from stop_machine() */
34 void arch_ftrace_update_code(int command)
36 ftrace_modify_all_code(command);
42 * Check if the address is in kernel space
44 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
45 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
47 static inline int in_kernel_space(unsigned long ip)
49 if (ip >= (unsigned long)_stext &&
50 ip <= (unsigned long)_etext)
55 #ifdef CONFIG_DYNAMIC_FTRACE
57 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
58 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
59 #define JUMP_RANGE_MASK ((1UL << 28) - 1)
61 #define INSN_NOP 0x00000000 /* nop */
62 #define INSN_JAL(addr) \
63 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
65 static unsigned int insn_jal_ftrace_caller __read_mostly;
66 static unsigned int insn_la_mcount[2] __read_mostly;
67 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
69 static inline void ftrace_dyn_arch_init_insns(void)
76 buf = (u32 *)&insn_la_mcount[0];
77 UASM_i_LA(&buf, v1, MCOUNT_ADDR);
79 /* jal (ftrace_caller + 8), jump over the first two instruction */
80 buf = (u32 *)&insn_jal_ftrace_caller;
81 uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
83 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
84 /* j ftrace_graph_caller */
85 buf = (u32 *)&insn_j_ftrace_graph_caller;
86 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
90 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
95 /* *(unsigned int *)ip = new_code; */
96 safe_store_code(new_code, ip, faulted);
98 if (unlikely(faulted))
103 flush_icache_range(ip, ip + 8);
110 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 unsigned int new_code2)
116 safe_store_code(new_code1, ip, faulted);
117 if (unlikely(faulted))
121 safe_store_code(new_code2, ip, faulted);
122 if (unlikely(faulted))
128 flush_icache_range(ip, ip + 8);
134 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
135 unsigned int new_code2)
141 safe_store_code(new_code2, ip, faulted);
142 if (unlikely(faulted))
146 safe_store_code(new_code1, ip, faulted);
147 if (unlikely(faulted))
152 flush_icache_range(ip, ip + 8);
160 * The details about the calling site of mcount on MIPS
165 * jal _mcount --> nop
166 * sub sp, sp, 8 --> nop (CONFIG_32BIT)
170 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
172 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
173 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
175 * move $12, ra_address
178 * 1: offset = 5 instructions
179 * 2.2 For the Other situations
181 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
182 * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
185 * nop | move $12, ra_address | sub sp, sp, 8
186 * 1: offset = 4 instructions
189 #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
191 int ftrace_make_nop(struct module *mod,
192 struct dyn_ftrace *rec, unsigned long addr)
195 unsigned long ip = rec->ip;
198 * If ip is in kernel space, no long call, otherwise, long call is
201 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
203 return ftrace_modify_code(ip, new);
206 * On 32 bit MIPS platforms, gcc adds a stack adjust
207 * instruction in the delay slot after the branch to
208 * mcount and expects mcount to restore the sp on return.
209 * This is based on a legacy API and does nothing but
210 * waste instructions so it's being removed at runtime.
212 return ftrace_modify_code_2(ip, new, INSN_NOP);
216 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
219 unsigned long ip = rec->ip;
221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
224 return ftrace_modify_code(ip, new);
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
227 INSN_NOP : insn_la_mcount[1]);
231 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
233 int ftrace_update_ftrace_func(ftrace_func_t func)
237 new = INSN_JAL((unsigned long)func);
239 return ftrace_modify_code(FTRACE_CALL_IP, new);
242 int __init ftrace_dyn_arch_init(void)
244 /* Encode the instructions when booting */
245 ftrace_dyn_arch_init_insns();
247 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
248 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
252 #endif /* CONFIG_DYNAMIC_FTRACE */
254 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
256 #ifdef CONFIG_DYNAMIC_FTRACE
258 extern void ftrace_graph_call(void);
259 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
261 int ftrace_enable_ftrace_graph_caller(void)
263 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
264 insn_j_ftrace_graph_caller);
267 int ftrace_disable_ftrace_graph_caller(void)
269 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
272 #endif /* CONFIG_DYNAMIC_FTRACE */
274 #ifndef KBUILD_MCOUNT_RA_ADDRESS
276 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
277 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
278 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
280 unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
281 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
283 unsigned long sp, ip, tmp;
288 * For module, move the ip from the return address after the
289 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
290 * kernel, move after the instruction "move ra, at"(offset is 16)
292 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
295 * search the text until finding the non-store instruction or "s{d,w}
296 * ra, offset(sp)" instruction
299 /* get the code at "ip": code = *(unsigned int *)ip; */
300 safe_load_code(code, ip, faulted);
302 if (unlikely(faulted))
305 * If we hit the non-store instruction before finding where the
306 * ra is stored, then this is a leaf function and it does not
307 * store the ra on the stack
309 if ((code & S_R_SP) != S_R_SP)
310 return parent_ra_addr;
312 /* Move to the next instruction */
314 } while ((code & S_RA_SP) != S_RA_SP);
316 sp = fp + (code & OFFSET_MASK);
318 /* tmp = *(unsigned long *)sp; */
319 safe_load_stack(tmp, sp, faulted);
320 if (unlikely(faulted))
323 if (tmp == old_parent_ra)
328 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
331 * Hook the return address and push it in the stack of return addrs
332 * in current thread info.
334 void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
337 unsigned long old_parent_ra;
338 struct ftrace_graph_ent trace;
339 unsigned long return_hooker = (unsigned long)
343 if (unlikely(ftrace_graph_is_dead()))
346 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
350 * "parent_ra_addr" is the stack address saved the return address of
351 * the caller of _mcount.
353 * if the gcc < 4.5, a leaf function does not save the return address
354 * in the stack address, so, we "emulate" one in _mcount's stack space,
355 * and hijack it directly, but for a non-leaf function, it save the
356 * return address to the its own stack space, we can not hijack it
357 * directly, but need to find the real stack address,
358 * ftrace_get_parent_addr() does it!
360 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
361 * non-leaf function, the location of the return address will be saved
362 * to $12 for us, and for a leaf function, only put a zero into $12. we
363 * do it in ftrace_graph_caller of mcount.S.
366 /* old_parent_ra = *parent_ra_addr; */
367 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
368 if (unlikely(faulted))
370 #ifndef KBUILD_MCOUNT_RA_ADDRESS
371 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
372 old_parent_ra, (unsigned long)parent_ra_addr, fp);
374 * If fails when getting the stack address of the non-leaf function's
375 * ra, stop function graph tracer and return
377 if (parent_ra_addr == 0)
380 /* *parent_ra_addr = return_hooker; */
381 safe_store_stack(return_hooker, parent_ra_addr, faulted);
382 if (unlikely(faulted))
385 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
387 *parent_ra_addr = old_parent_ra;
392 * Get the recorded ip of the current mcount calling site in the
393 * __mcount_loc section, which will be used to filter the function
394 * entries configured through the tracing/set_graph_function interface.
397 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
400 /* Only trace if the calling function expects to */
401 if (!ftrace_graph_entry(&trace)) {
402 current->curr_ret_stack--;
403 *parent_ra_addr = old_parent_ra;
410 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
412 #ifdef CONFIG_FTRACE_SYSCALLS
415 unsigned long __init arch_syscall_addr(int nr)
417 return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
423 unsigned long __init arch_syscall_addr(int nr)
425 #ifdef CONFIG_MIPS32_N32
426 if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
427 return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
429 if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
430 return (unsigned long)sys_call_table[nr - __NR_64_Linux];
431 #ifdef CONFIG_MIPS32_O32
432 if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
433 return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
436 return (unsigned long) &sys_ni_syscall;
440 #endif /* CONFIG_FTRACE_SYSCALLS */