2 * Code for replacing ftrace calls with jumps.
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
25 #include <asm/syscall.h>
28 #ifdef CONFIG_DYNAMIC_FTRACE
30 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
34 addr = ppc_function_entry((void *)addr);
36 /* if (link) set op to 'bl' else 'b' */
37 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
43 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
45 unsigned int replaced;
48 * Note: Due to modules and __init, code can
49 * disappear and change, we need to protect against faulting
50 * as well as code changing. We do this by using the
51 * probe_kernel_* functions.
53 * No real locking needed, this code is run through
54 * kstop_machine, or before SMP starts.
57 /* read the text we want to modify */
58 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
61 /* Make sure it is what we expect it to be */
65 /* replace the text with the new text */
66 if (patch_instruction((unsigned int *)ip, new))
73 * Helper functions that are the same for both PPC64 and PPC32.
75 static int test_24bit_addr(unsigned long ip, unsigned long addr)
77 addr = ppc_function_entry((void *)addr);
79 /* use the create_branch to verify that this offset can be branched */
80 return create_branch((unsigned int *)ip, addr, 0);
85 static int is_bl_op(unsigned int op)
87 return (op & 0xfc000003) == 0x48000001;
90 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
94 offset = (op & 0x03fffffc);
96 if (offset & 0x02000000)
99 return ip + (long)offset;
104 __ftrace_make_nop(struct module *mod,
105 struct dyn_ftrace *rec, unsigned long addr)
110 unsigned long ip = rec->ip;
114 /* read where this goes */
115 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
118 /* Make sure that that this is still a 24bit jump */
120 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
124 /* lets find where the pointer goes */
125 tramp = find_bl_target(ip, op);
128 * On PPC64 the trampoline looks like:
129 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
130 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
131 * Where the bytes 2,3,6 and 7 make up the 32bit offset
132 * to the TOC that holds the pointer.
134 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
135 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
136 * The actually address is 32 bytes from the offset
138 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
141 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
143 /* Find where the trampoline jumps to */
144 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
145 printk(KERN_ERR "Failed to read %lx\n", tramp);
149 pr_devel(" %08x %08x", jmp[0], jmp[1]);
151 /* verify that this is what we expect it to be */
152 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
153 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
154 (jmp[2] != 0xf8410028) ||
155 (jmp[3] != 0xe96c0020) ||
156 (jmp[4] != 0xe84c0028)) {
157 printk(KERN_ERR "Not a trampoline\n");
161 /* The bottom half is signed extended */
162 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
163 (int)((short)jmp[1]);
165 pr_devel(" %x ", offset);
167 /* get the address this jumps too */
168 tramp = mod->arch.toc + offset + 32;
169 pr_devel("toc: %lx", tramp);
171 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
172 printk(KERN_ERR "Failed to read %lx\n", tramp);
176 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
178 #ifdef __LITTLE_ENDIAN__
179 ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
181 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
184 /* This should match what was called */
185 if (ptr != ppc_function_entry((void *)addr)) {
186 printk(KERN_ERR "addr does not match %lx\n", ptr);
191 * We want to nop the line, but the next line is
192 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
193 * This needs to be turned to a nop too.
195 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
198 if (op != 0xe8410028) {
199 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
204 * Milton Miller pointed out that we can not blindly do nops.
205 * If a task was preempted when calling a trace function,
206 * the nops will remove the way to restore the TOC in r2
207 * and the r2 TOC will get corrupted.
212 * bl <tramp> <==== will be replaced with "b 1f"
216 op = 0x48000008; /* b +8 */
218 if (patch_instruction((unsigned int *)ip, op))
226 __ftrace_make_nop(struct module *mod,
227 struct dyn_ftrace *rec, unsigned long addr)
231 unsigned long ip = rec->ip;
234 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
237 /* Make sure that that this is still a 24bit jump */
239 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
243 /* lets find where the pointer goes */
244 tramp = find_bl_target(ip, op);
247 * On PPC32 the trampoline looks like:
248 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
249 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
250 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
251 * 0x4e, 0x80, 0x04, 0x20 bctr
254 pr_devel("ip:%lx jumps to %lx", ip, tramp);
256 /* Find where the trampoline jumps to */
257 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
258 printk(KERN_ERR "Failed to read %lx\n", tramp);
262 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
264 /* verify that this is what we expect it to be */
265 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
266 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
267 (jmp[2] != 0x7d8903a6) ||
268 (jmp[3] != 0x4e800420)) {
269 printk(KERN_ERR "Not a trampoline\n");
273 tramp = (jmp[1] & 0xffff) |
274 ((jmp[0] & 0xffff) << 16);
278 pr_devel(" %lx ", tramp);
282 "Trampoline location %08lx does not match addr\n",
289 if (patch_instruction((unsigned int *)ip, op))
295 #endif /* CONFIG_MODULES */
297 int ftrace_make_nop(struct module *mod,
298 struct dyn_ftrace *rec, unsigned long addr)
300 unsigned long ip = rec->ip;
301 unsigned int old, new;
304 * If the calling address is more that 24 bits away,
305 * then we had to use a trampoline to make the call.
306 * Otherwise just update the call site.
308 if (test_24bit_addr(ip, addr)) {
310 old = ftrace_call_replace(ip, addr, 1);
312 return ftrace_modify_code(ip, old, new);
315 #ifdef CONFIG_MODULES
317 * Out of range jumps are called from modules.
318 * We should either already have a pointer to the module
319 * or it has been passed in.
321 if (!rec->arch.mod) {
323 printk(KERN_ERR "No module loaded addr=%lx\n",
329 if (mod != rec->arch.mod) {
331 "Record mod %p not equal to passed in mod %p\n",
335 /* nothing to do if mod == rec->arch.mod */
339 return __ftrace_make_nop(mod, rec, addr);
341 /* We should not get here without modules */
343 #endif /* CONFIG_MODULES */
346 #ifdef CONFIG_MODULES
349 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
352 unsigned long ip = rec->ip;
354 /* read where this goes */
355 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
359 * It should be pointing to two nops or
362 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
363 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
364 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
368 /* If we never set up a trampoline to ftrace_caller, then bail */
369 if (!rec->arch.mod->arch.tramp) {
370 printk(KERN_ERR "No ftrace trampoline\n");
374 /* create the branch to the trampoline */
375 op[0] = create_branch((unsigned int *)ip,
376 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
378 printk(KERN_ERR "REL24 out of range!\n");
385 pr_devel("write to %lx\n", rec->ip);
387 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
390 flush_icache_range(ip, ip + 8);
396 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
399 unsigned long ip = rec->ip;
401 /* read where this goes */
402 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
405 /* It should be pointing to a nop */
406 if (op != PPC_INST_NOP) {
407 printk(KERN_ERR "Expected NOP but have %x\n", op);
411 /* If we never set up a trampoline to ftrace_caller, then bail */
412 if (!rec->arch.mod->arch.tramp) {
413 printk(KERN_ERR "No ftrace trampoline\n");
417 /* create the branch to the trampoline */
418 op = create_branch((unsigned int *)ip,
419 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
421 printk(KERN_ERR "REL24 out of range!\n");
425 pr_devel("write to %lx\n", rec->ip);
427 if (patch_instruction((unsigned int *)ip, op))
432 #endif /* CONFIG_PPC64 */
433 #endif /* CONFIG_MODULES */
435 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
437 unsigned long ip = rec->ip;
438 unsigned int old, new;
441 * If the calling address is more that 24 bits away,
442 * then we had to use a trampoline to make the call.
443 * Otherwise just update the call site.
445 if (test_24bit_addr(ip, addr)) {
448 new = ftrace_call_replace(ip, addr, 1);
449 return ftrace_modify_code(ip, old, new);
452 #ifdef CONFIG_MODULES
454 * Out of range jumps are called from modules.
455 * Being that we are converting from nop, it had better
456 * already have a module defined.
458 if (!rec->arch.mod) {
459 printk(KERN_ERR "No module loaded\n");
463 return __ftrace_make_call(rec, addr);
465 /* We should not get here without modules */
467 #endif /* CONFIG_MODULES */
470 int ftrace_update_ftrace_func(ftrace_func_t func)
472 unsigned long ip = (unsigned long)(&ftrace_call);
473 unsigned int old, new;
476 old = *(unsigned int *)&ftrace_call;
477 new = ftrace_call_replace(ip, (unsigned long)func, 1);
478 ret = ftrace_modify_code(ip, old, new);
483 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
485 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
488 ret = ftrace_update_record(rec, enable);
491 case FTRACE_UPDATE_IGNORE:
493 case FTRACE_UPDATE_MAKE_CALL:
494 return ftrace_make_call(rec, ftrace_addr);
495 case FTRACE_UPDATE_MAKE_NOP:
496 return ftrace_make_nop(NULL, rec, ftrace_addr);
502 void ftrace_replace_code(int enable)
504 struct ftrace_rec_iter *iter;
505 struct dyn_ftrace *rec;
508 for (iter = ftrace_rec_iter_start(); iter;
509 iter = ftrace_rec_iter_next(iter)) {
510 rec = ftrace_rec_iter_record(iter);
511 ret = __ftrace_replace_code(rec, enable);
513 ftrace_bug(ret, rec->ip);
519 void arch_ftrace_update_code(int command)
521 if (command & FTRACE_UPDATE_CALLS)
522 ftrace_replace_code(1);
523 else if (command & FTRACE_DISABLE_CALLS)
524 ftrace_replace_code(0);
526 if (command & FTRACE_UPDATE_TRACE_FUNC)
527 ftrace_update_ftrace_func(ftrace_trace_function);
529 if (command & FTRACE_START_FUNC_RET)
530 ftrace_enable_ftrace_graph_caller();
531 else if (command & FTRACE_STOP_FUNC_RET)
532 ftrace_disable_ftrace_graph_caller();
535 int __init ftrace_dyn_arch_init(void *data)
537 /* caller expects data to be zero */
538 unsigned long *p = data;
544 #endif /* CONFIG_DYNAMIC_FTRACE */
546 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
548 #ifdef CONFIG_DYNAMIC_FTRACE
549 extern void ftrace_graph_call(void);
550 extern void ftrace_graph_stub(void);
552 int ftrace_enable_ftrace_graph_caller(void)
554 unsigned long ip = (unsigned long)(&ftrace_graph_call);
555 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
556 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
557 unsigned int old, new;
559 old = ftrace_call_replace(ip, stub, 0);
560 new = ftrace_call_replace(ip, addr, 0);
562 return ftrace_modify_code(ip, old, new);
565 int ftrace_disable_ftrace_graph_caller(void)
567 unsigned long ip = (unsigned long)(&ftrace_graph_call);
568 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
569 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
570 unsigned int old, new;
572 old = ftrace_call_replace(ip, addr, 0);
573 new = ftrace_call_replace(ip, stub, 0);
575 return ftrace_modify_code(ip, old, new);
577 #endif /* CONFIG_DYNAMIC_FTRACE */
580 extern void mod_return_to_handler(void);
584 * Hook the return address and push it in the stack of return addrs
585 * in current thread info.
587 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
591 struct ftrace_graph_ent trace;
592 unsigned long return_hooker = (unsigned long)&return_to_handler;
594 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
598 /* non core kernel code needs to save and restore the TOC */
599 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
600 return_hooker = (unsigned long)&mod_return_to_handler;
603 return_hooker = ppc_function_entry((void *)return_hooker);
606 * Protect against fault, even if it shouldn't
607 * happen. This tool is too much intrusive to
608 * ignore such a protection.
611 "1: " PPC_LL "%[old], 0(%[parent])\n"
612 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
613 " li %[faulted], 0\n"
616 ".section .fixup, \"ax\"\n"
617 "4: li %[faulted], 1\n"
621 ".section __ex_table,\"a\"\n"
627 : [old] "=&r" (old), [faulted] "=r" (faulted)
628 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
632 if (unlikely(faulted)) {
638 trace.func = self_addr;
639 trace.depth = current->curr_ret_stack + 1;
641 /* Only trace if the calling function expects to */
642 if (!ftrace_graph_entry(&trace)) {
647 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
650 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
652 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
653 unsigned long __init arch_syscall_addr(int nr)
655 return sys_call_table[nr*2];
657 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */