]>
Commit | Line | Data |
---|---|---|
4e491d14 SR |
1 | /* |
2 | * Code for replacing ftrace calls with jumps. | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
5 | * | |
6 | * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. | |
7 | * | |
6794c782 SR |
8 | * Added function graph tracer code, taken from x86 that was written |
9 | * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. | |
10 | * | |
4e491d14 SR |
11 | */ |
12 | ||
072c4c01 ME |
13 | #define pr_fmt(fmt) "ftrace-powerpc: " fmt |
14 | ||
4e491d14 SR |
15 | #include <linux/spinlock.h> |
16 | #include <linux/hardirq.h> | |
e4486fe3 | 17 | #include <linux/uaccess.h> |
f48cb8b4 | 18 | #include <linux/module.h> |
4e491d14 SR |
19 | #include <linux/ftrace.h> |
20 | #include <linux/percpu.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/list.h> | |
23 | ||
24 | #include <asm/cacheflush.h> | |
f48cb8b4 | 25 | #include <asm/code-patching.h> |
395a59d0 | 26 | #include <asm/ftrace.h> |
02424d89 | 27 | #include <asm/syscall.h> |
4e491d14 | 28 | |
4e491d14 | 29 | |
6794c782 | 30 | #ifdef CONFIG_DYNAMIC_FTRACE |
b54dcfe1 | 31 | static unsigned int |
46542888 | 32 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) |
4e491d14 | 33 | { |
b54dcfe1 | 34 | unsigned int op; |
4e491d14 | 35 | |
4a9e3f8e | 36 | addr = ppc_function_entry((void *)addr); |
4e491d14 | 37 | |
46542888 | 38 | /* if (link) set op to 'bl' else 'b' */ |
bb9b9035 | 39 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); |
4e491d14 | 40 | |
b54dcfe1 | 41 | return op; |
4e491d14 SR |
42 | } |
43 | ||
8fd6e5a8 | 44 | static int |
b54dcfe1 | 45 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) |
4e491d14 | 46 | { |
b54dcfe1 | 47 | unsigned int replaced; |
4e491d14 | 48 | |
4e491d14 SR |
49 | /* |
50 | * Note: Due to modules and __init, code can | |
51 | * disappear and change, we need to protect against faulting | |
e4486fe3 SR |
52 | * as well as code changing. We do this by using the |
53 | * probe_kernel_* functions. | |
4e491d14 SR |
54 | * |
55 | * No real locking needed, this code is run through | |
e4486fe3 | 56 | * kstop_machine, or before SMP starts. |
4e491d14 | 57 | */ |
e4486fe3 SR |
58 | |
59 | /* read the text we want to modify */ | |
b54dcfe1 | 60 | if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) |
e4486fe3 SR |
61 | return -EFAULT; |
62 | ||
63 | /* Make sure it is what we expect it to be */ | |
b54dcfe1 | 64 | if (replaced != old) |
e4486fe3 SR |
65 | return -EINVAL; |
66 | ||
67 | /* replace the text with the new text */ | |
65b8c722 | 68 | if (patch_instruction((unsigned int *)ip, new)) |
e4486fe3 SR |
69 | return -EPERM; |
70 | ||
e4486fe3 | 71 | return 0; |
4e491d14 SR |
72 | } |
73 | ||
f48cb8b4 SR |
74 | /* |
75 | * Helper functions that are the same for both PPC64 and PPC32. | |
76 | */ | |
8fd6e5a8 SR |
77 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
78 | { | |
a95fc585 | 79 | addr = ppc_function_entry((void *)addr); |
8fd6e5a8 | 80 | |
0029ff87 SR |
81 | /* use the create_branch to verify that this offset can be branched */ |
82 | return create_branch((unsigned int *)ip, addr, 0); | |
8fd6e5a8 SR |
83 | } |
84 | ||
17be5b3d SR |
85 | #ifdef CONFIG_MODULES |
86 | ||
f48cb8b4 SR |
87 | static int is_bl_op(unsigned int op) |
88 | { | |
89 | return (op & 0xfc000003) == 0x48000001; | |
90 | } | |
91 | ||
f48cb8b4 SR |
92 | static unsigned long find_bl_target(unsigned long ip, unsigned int op) |
93 | { | |
94 | static int offset; | |
95 | ||
96 | offset = (op & 0x03fffffc); | |
97 | /* make it signed */ | |
98 | if (offset & 0x02000000) | |
99 | offset |= 0xfe000000; | |
100 | ||
101 | return ip + (long)offset; | |
102 | } | |
103 | ||
f48cb8b4 SR |
104 | #ifdef CONFIG_PPC64 |
105 | static int | |
106 | __ftrace_make_nop(struct module *mod, | |
107 | struct dyn_ftrace *rec, unsigned long addr) | |
108 | { | |
d9af12b7 | 109 | unsigned int op; |
d84e0d69 | 110 | unsigned long entry, ptr; |
f48cb8b4 | 111 | unsigned long ip = rec->ip; |
62c9da6a | 112 | void *tramp; |
f48cb8b4 SR |
113 | |
114 | /* read where this goes */ | |
d9af12b7 | 115 | if (probe_kernel_read(&op, (void *)ip, sizeof(int))) |
f48cb8b4 SR |
116 | return -EFAULT; |
117 | ||
118 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 | 119 | if (!is_bl_op(op)) { |
072c4c01 | 120 | pr_err("Not expected bl: opcode is %x\n", op); |
f48cb8b4 SR |
121 | return -EINVAL; |
122 | } | |
123 | ||
124 | /* lets find where the pointer goes */ | |
62c9da6a | 125 | tramp = (void *)find_bl_target(ip, op); |
f48cb8b4 | 126 | |
62c9da6a | 127 | pr_devel("ip:%lx jumps to %p", ip, tramp); |
f48cb8b4 | 128 | |
62c9da6a | 129 | if (!is_module_trampoline(tramp)) { |
072c4c01 | 130 | pr_err("Not a trampoline\n"); |
d9af12b7 SR |
131 | return -EINVAL; |
132 | } | |
f48cb8b4 | 133 | |
62c9da6a | 134 | if (module_trampoline_target(mod, tramp, &ptr)) { |
072c4c01 | 135 | pr_err("Failed to get trampoline target\n"); |
f48cb8b4 SR |
136 | return -EFAULT; |
137 | } | |
138 | ||
62c9da6a | 139 | pr_devel("trampoline target %lx", ptr); |
f48cb8b4 | 140 | |
d84e0d69 | 141 | entry = ppc_global_function_entry((void *)addr); |
f48cb8b4 | 142 | /* This should match what was called */ |
d84e0d69 | 143 | if (ptr != entry) { |
072c4c01 | 144 | pr_err("addr %lx does not match expected %lx\n", ptr, entry); |
f48cb8b4 SR |
145 | return -EINVAL; |
146 | } | |
147 | ||
148 | /* | |
62c9da6a AB |
149 | * Our original call site looks like: |
150 | * | |
151 | * bl <tramp> | |
152 | * ld r2,XX(r1) | |
153 | * | |
154 | * Milton Miller pointed out that we can not simply nop the branch. | |
155 | * If a task was preempted when calling a trace function, the nops | |
156 | * will remove the way to restore the TOC in r2 and the r2 TOC will | |
157 | * get corrupted. | |
158 | * | |
159 | * Use a b +8 to jump over the load. | |
f48cb8b4 | 160 | */ |
d9af12b7 | 161 | op = 0x48000008; /* b +8 */ |
f48cb8b4 | 162 | |
65b8c722 | 163 | if (patch_instruction((unsigned int *)ip, op)) |
f48cb8b4 SR |
164 | return -EPERM; |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
169 | #else /* !PPC64 */ | |
170 | static int | |
171 | __ftrace_make_nop(struct module *mod, | |
172 | struct dyn_ftrace *rec, unsigned long addr) | |
173 | { | |
d9af12b7 SR |
174 | unsigned int op; |
175 | unsigned int jmp[4]; | |
7cc45e64 SR |
176 | unsigned long ip = rec->ip; |
177 | unsigned long tramp; | |
7cc45e64 | 178 | |
d9af12b7 | 179 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
180 | return -EFAULT; |
181 | ||
182 | /* Make sure that that this is still a 24bit jump */ | |
d9af12b7 | 183 | if (!is_bl_op(op)) { |
072c4c01 | 184 | pr_err("Not expected bl: opcode is %x\n", op); |
7cc45e64 SR |
185 | return -EINVAL; |
186 | } | |
187 | ||
188 | /* lets find where the pointer goes */ | |
d9af12b7 | 189 | tramp = find_bl_target(ip, op); |
7cc45e64 SR |
190 | |
191 | /* | |
192 | * On PPC32 the trampoline looks like: | |
fd5a4298 | 193 | * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha |
194 | * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l | |
195 | * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 | |
d9af12b7 | 196 | * 0x4e, 0x80, 0x04, 0x20 bctr |
7cc45e64 SR |
197 | */ |
198 | ||
021376a3 | 199 | pr_devel("ip:%lx jumps to %lx", ip, tramp); |
7cc45e64 SR |
200 | |
201 | /* Find where the trampoline jumps to */ | |
d9af12b7 | 202 | if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { |
072c4c01 | 203 | pr_err("Failed to read %lx\n", tramp); |
7cc45e64 SR |
204 | return -EFAULT; |
205 | } | |
206 | ||
021376a3 | 207 | pr_devel(" %08x %08x ", jmp[0], jmp[1]); |
d9af12b7 SR |
208 | |
209 | /* verify that this is what we expect it to be */ | |
fd5a4298 | 210 | if (((jmp[0] & 0xffff0000) != 0x3d800000) || |
211 | ((jmp[1] & 0xffff0000) != 0x398c0000) || | |
212 | (jmp[2] != 0x7d8903a6) || | |
d9af12b7 | 213 | (jmp[3] != 0x4e800420)) { |
072c4c01 | 214 | pr_err("Not a trampoline\n"); |
d9af12b7 SR |
215 | return -EINVAL; |
216 | } | |
7cc45e64 | 217 | |
d9af12b7 SR |
218 | tramp = (jmp[1] & 0xffff) | |
219 | ((jmp[0] & 0xffff) << 16); | |
7cc45e64 SR |
220 | if (tramp & 0x8000) |
221 | tramp -= 0x10000; | |
222 | ||
021376a3 | 223 | pr_devel(" %lx ", tramp); |
7cc45e64 SR |
224 | |
225 | if (tramp != addr) { | |
072c4c01 | 226 | pr_err("Trampoline location %08lx does not match addr\n", |
7cc45e64 SR |
227 | tramp); |
228 | return -EINVAL; | |
229 | } | |
230 | ||
16c57b36 | 231 | op = PPC_INST_NOP; |
7cc45e64 | 232 | |
65b8c722 | 233 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
234 | return -EPERM; |
235 | ||
f48cb8b4 SR |
236 | return 0; |
237 | } | |
238 | #endif /* PPC64 */ | |
17be5b3d | 239 | #endif /* CONFIG_MODULES */ |
f48cb8b4 | 240 | |
8fd6e5a8 SR |
241 | int ftrace_make_nop(struct module *mod, |
242 | struct dyn_ftrace *rec, unsigned long addr) | |
243 | { | |
f48cb8b4 | 244 | unsigned long ip = rec->ip; |
b54dcfe1 | 245 | unsigned int old, new; |
8fd6e5a8 SR |
246 | |
247 | /* | |
248 | * If the calling address is more that 24 bits away, | |
249 | * then we had to use a trampoline to make the call. | |
250 | * Otherwise just update the call site. | |
251 | */ | |
f48cb8b4 | 252 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 253 | /* within range */ |
46542888 | 254 | old = ftrace_call_replace(ip, addr, 1); |
92e02a51 | 255 | new = PPC_INST_NOP; |
f48cb8b4 SR |
256 | return ftrace_modify_code(ip, old, new); |
257 | } | |
258 | ||
17be5b3d | 259 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
260 | /* |
261 | * Out of range jumps are called from modules. | |
262 | * We should either already have a pointer to the module | |
263 | * or it has been passed in. | |
264 | */ | |
265 | if (!rec->arch.mod) { | |
266 | if (!mod) { | |
072c4c01 | 267 | pr_err("No module loaded addr=%lx\n", addr); |
f48cb8b4 SR |
268 | return -EFAULT; |
269 | } | |
270 | rec->arch.mod = mod; | |
271 | } else if (mod) { | |
272 | if (mod != rec->arch.mod) { | |
072c4c01 | 273 | pr_err("Record mod %p not equal to passed in mod %p\n", |
f48cb8b4 SR |
274 | rec->arch.mod, mod); |
275 | return -EINVAL; | |
276 | } | |
277 | /* nothing to do if mod == rec->arch.mod */ | |
278 | } else | |
279 | mod = rec->arch.mod; | |
f48cb8b4 SR |
280 | |
281 | return __ftrace_make_nop(mod, rec, addr); | |
17be5b3d SR |
282 | #else |
283 | /* We should not get here without modules */ | |
284 | return -EINVAL; | |
285 | #endif /* CONFIG_MODULES */ | |
f48cb8b4 SR |
286 | } |
287 | ||
17be5b3d | 288 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
289 | #ifdef CONFIG_PPC64 |
290 | static int | |
291 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
292 | { | |
d9af12b7 | 293 | unsigned int op[2]; |
24a1bdc3 | 294 | void *ip = (void *)rec->ip; |
f48cb8b4 SR |
295 | |
296 | /* read where this goes */ | |
24a1bdc3 | 297 | if (probe_kernel_read(op, ip, sizeof(op))) |
f48cb8b4 SR |
298 | return -EFAULT; |
299 | ||
300 | /* | |
24a1bdc3 AB |
301 | * We expect to see: |
302 | * | |
303 | * b +8 | |
304 | * ld r2,XX(r1) | |
305 | * | |
306 | * The load offset is different depending on the ABI. For simplicity | |
307 | * just mask it out when doing the compare. | |
f48cb8b4 | 308 | */ |
dfc382a1 | 309 | if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) { |
072c4c01 | 310 | pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]); |
f48cb8b4 SR |
311 | return -EINVAL; |
312 | } | |
313 | ||
314 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
315 | if (!rec->arch.mod->arch.tramp) { | |
072c4c01 | 316 | pr_err("No ftrace trampoline\n"); |
f48cb8b4 SR |
317 | return -EINVAL; |
318 | } | |
319 | ||
24a1bdc3 | 320 | /* Ensure branch is within 24 bits */ |
b7b348c6 | 321 | if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { |
072c4c01 | 322 | pr_err("Branch out of range\n"); |
f48cb8b4 | 323 | return -EINVAL; |
8fd6e5a8 SR |
324 | } |
325 | ||
24a1bdc3 | 326 | if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { |
072c4c01 | 327 | pr_err("REL24 out of range!\n"); |
24a1bdc3 AB |
328 | return -EINVAL; |
329 | } | |
ec682cef | 330 | |
8fd6e5a8 SR |
331 | return 0; |
332 | } | |
f48cb8b4 SR |
333 | #else |
334 | static int | |
335 | __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
336 | { | |
d9af12b7 | 337 | unsigned int op; |
7cc45e64 | 338 | unsigned long ip = rec->ip; |
7cc45e64 SR |
339 | |
340 | /* read where this goes */ | |
d9af12b7 | 341 | if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) |
7cc45e64 SR |
342 | return -EFAULT; |
343 | ||
344 | /* It should be pointing to a nop */ | |
16c57b36 | 345 | if (op != PPC_INST_NOP) { |
072c4c01 | 346 | pr_err("Expected NOP but have %x\n", op); |
7cc45e64 SR |
347 | return -EINVAL; |
348 | } | |
349 | ||
350 | /* If we never set up a trampoline to ftrace_caller, then bail */ | |
351 | if (!rec->arch.mod->arch.tramp) { | |
072c4c01 | 352 | pr_err("No ftrace trampoline\n"); |
7cc45e64 SR |
353 | return -EINVAL; |
354 | } | |
355 | ||
0029ff87 SR |
356 | /* create the branch to the trampoline */ |
357 | op = create_branch((unsigned int *)ip, | |
358 | rec->arch.mod->arch.tramp, BRANCH_SET_LINK); | |
359 | if (!op) { | |
072c4c01 | 360 | pr_err("REL24 out of range!\n"); |
7cc45e64 SR |
361 | return -EINVAL; |
362 | } | |
363 | ||
021376a3 | 364 | pr_devel("write to %lx\n", rec->ip); |
7cc45e64 | 365 | |
65b8c722 | 366 | if (patch_instruction((unsigned int *)ip, op)) |
7cc45e64 SR |
367 | return -EPERM; |
368 | ||
f48cb8b4 SR |
369 | return 0; |
370 | } | |
371 | #endif /* CONFIG_PPC64 */ | |
17be5b3d | 372 | #endif /* CONFIG_MODULES */ |
8fd6e5a8 SR |
373 | |
374 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
375 | { | |
f48cb8b4 | 376 | unsigned long ip = rec->ip; |
b54dcfe1 | 377 | unsigned int old, new; |
8fd6e5a8 SR |
378 | |
379 | /* | |
380 | * If the calling address is more that 24 bits away, | |
381 | * then we had to use a trampoline to make the call. | |
382 | * Otherwise just update the call site. | |
383 | */ | |
f48cb8b4 | 384 | if (test_24bit_addr(ip, addr)) { |
8fd6e5a8 | 385 | /* within range */ |
92e02a51 | 386 | old = PPC_INST_NOP; |
46542888 | 387 | new = ftrace_call_replace(ip, addr, 1); |
f48cb8b4 | 388 | return ftrace_modify_code(ip, old, new); |
8fd6e5a8 SR |
389 | } |
390 | ||
17be5b3d | 391 | #ifdef CONFIG_MODULES |
f48cb8b4 SR |
392 | /* |
393 | * Out of range jumps are called from modules. | |
394 | * Being that we are converting from nop, it had better | |
395 | * already have a module defined. | |
396 | */ | |
397 | if (!rec->arch.mod) { | |
072c4c01 | 398 | pr_err("No module loaded\n"); |
f48cb8b4 SR |
399 | return -EINVAL; |
400 | } | |
f48cb8b4 SR |
401 | |
402 | return __ftrace_make_call(rec, addr); | |
17be5b3d SR |
403 | #else |
404 | /* We should not get here without modules */ | |
405 | return -EINVAL; | |
406 | #endif /* CONFIG_MODULES */ | |
8fd6e5a8 SR |
407 | } |
408 | ||
15adc048 | 409 | int ftrace_update_ftrace_func(ftrace_func_t func) |
4e491d14 SR |
410 | { |
411 | unsigned long ip = (unsigned long)(&ftrace_call); | |
b54dcfe1 | 412 | unsigned int old, new; |
4e491d14 SR |
413 | int ret; |
414 | ||
b54dcfe1 | 415 | old = *(unsigned int *)&ftrace_call; |
46542888 | 416 | new = ftrace_call_replace(ip, (unsigned long)func, 1); |
4e491d14 SR |
417 | ret = ftrace_modify_code(ip, old, new); |
418 | ||
419 | return ret; | |
420 | } | |
421 | ||
ee456bb3 SR |
422 | static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
423 | { | |
424 | unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; | |
425 | int ret; | |
426 | ||
427 | ret = ftrace_update_record(rec, enable); | |
428 | ||
429 | switch (ret) { | |
430 | case FTRACE_UPDATE_IGNORE: | |
431 | return 0; | |
432 | case FTRACE_UPDATE_MAKE_CALL: | |
433 | return ftrace_make_call(rec, ftrace_addr); | |
434 | case FTRACE_UPDATE_MAKE_NOP: | |
435 | return ftrace_make_nop(NULL, rec, ftrace_addr); | |
436 | } | |
437 | ||
438 | return 0; | |
439 | } | |
440 | ||
441 | void ftrace_replace_code(int enable) | |
442 | { | |
443 | struct ftrace_rec_iter *iter; | |
444 | struct dyn_ftrace *rec; | |
445 | int ret; | |
446 | ||
447 | for (iter = ftrace_rec_iter_start(); iter; | |
448 | iter = ftrace_rec_iter_next(iter)) { | |
449 | rec = ftrace_rec_iter_record(iter); | |
450 | ret = __ftrace_replace_code(rec, enable); | |
451 | if (ret) { | |
4fd3279b | 452 | ftrace_bug(ret, rec); |
ee456bb3 SR |
453 | return; |
454 | } | |
455 | } | |
456 | } | |
457 | ||
458 | void arch_ftrace_update_code(int command) | |
459 | { | |
460 | if (command & FTRACE_UPDATE_CALLS) | |
461 | ftrace_replace_code(1); | |
462 | else if (command & FTRACE_DISABLE_CALLS) | |
463 | ftrace_replace_code(0); | |
464 | ||
465 | if (command & FTRACE_UPDATE_TRACE_FUNC) | |
466 | ftrace_update_ftrace_func(ftrace_trace_function); | |
467 | ||
468 | if (command & FTRACE_START_FUNC_RET) | |
469 | ftrace_enable_ftrace_graph_caller(); | |
470 | else if (command & FTRACE_STOP_FUNC_RET) | |
471 | ftrace_disable_ftrace_graph_caller(); | |
472 | } | |
473 | ||
3a36cb11 | 474 | int __init ftrace_dyn_arch_init(void) |
4e491d14 | 475 | { |
4e491d14 SR |
476 | return 0; |
477 | } | |
6794c782 SR |
478 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
479 | ||
480 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
481 | ||
46542888 SR |
482 | #ifdef CONFIG_DYNAMIC_FTRACE |
483 | extern void ftrace_graph_call(void); | |
484 | extern void ftrace_graph_stub(void); | |
485 | ||
486 | int ftrace_enable_ftrace_graph_caller(void) | |
487 | { | |
488 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
489 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
490 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 491 | unsigned int old, new; |
46542888 | 492 | |
b54dcfe1 | 493 | old = ftrace_call_replace(ip, stub, 0); |
46542888 SR |
494 | new = ftrace_call_replace(ip, addr, 0); |
495 | ||
496 | return ftrace_modify_code(ip, old, new); | |
497 | } | |
498 | ||
499 | int ftrace_disable_ftrace_graph_caller(void) | |
500 | { | |
501 | unsigned long ip = (unsigned long)(&ftrace_graph_call); | |
502 | unsigned long addr = (unsigned long)(&ftrace_graph_caller); | |
503 | unsigned long stub = (unsigned long)(&ftrace_graph_stub); | |
b54dcfe1 | 504 | unsigned int old, new; |
46542888 | 505 | |
b54dcfe1 | 506 | old = ftrace_call_replace(ip, addr, 0); |
46542888 SR |
507 | new = ftrace_call_replace(ip, stub, 0); |
508 | ||
509 | return ftrace_modify_code(ip, old, new); | |
510 | } | |
511 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
512 | ||
6794c782 SR |
513 | /* |
514 | * Hook the return address and push it in the stack of return addrs | |
b3c18725 | 515 | * in current thread info. Return the address we want to divert to. |
6794c782 | 516 | */ |
b3c18725 | 517 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
6794c782 | 518 | { |
6794c782 | 519 | struct ftrace_graph_ent trace; |
7d56c65a | 520 | unsigned long return_hooker; |
6794c782 | 521 | |
96d4f43e | 522 | if (unlikely(ftrace_graph_is_dead())) |
b3c18725 | 523 | goto out; |
96d4f43e | 524 | |
6794c782 | 525 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
b3c18725 | 526 | goto out; |
6794c782 | 527 | |
7d56c65a | 528 | return_hooker = ppc_function_entry(return_to_handler); |
6794c782 | 529 | |
b3c18725 | 530 | trace.func = ip; |
bac821a6 | 531 | trace.depth = current->curr_ret_stack + 1; |
6794c782 SR |
532 | |
533 | /* Only trace if the calling function expects to */ | |
b3c18725 AB |
534 | if (!ftrace_graph_entry(&trace)) |
535 | goto out; | |
536 | ||
537 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) | |
538 | goto out; | |
bac821a6 | 539 | |
b3c18725 AB |
540 | parent = return_hooker; |
541 | out: | |
542 | return parent; | |
6794c782 SR |
543 | } |
544 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
02424d89 IM |
545 | |
546 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | |
547 | unsigned long __init arch_syscall_addr(int nr) | |
548 | { | |
549 | return sys_call_table[nr*2]; | |
550 | } | |
551 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ |