]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fad57feb | 2 | /* |
7780b6a2 | 3 | * Copyright (C) 2008 Matt Fleming <[email protected]> |
b5cfeac9 | 4 | * Copyright (C) 2008 Paul Mundt <[email protected]> |
fad57feb MF |
5 | * |
6 | * Code for replacing ftrace calls with jumps. | |
7 | * | |
8 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
9 | * | |
10 | * Thanks goes to Ingo Molnar, for suggesting the idea. | |
11 | * Mathieu Desnoyers, for suggesting postponing the modifications. | |
12 | * Arjan van de Ven, for keeping me straight, and explaining to me | |
13 | * the dangers of modifying code on the run. | |
14 | */ | |
15 | #include <linux/uaccess.h> | |
16 | #include <linux/ftrace.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/io.h> | |
327933f5 | 20 | #include <linux/kernel.h> |
fad57feb MF |
21 | #include <asm/ftrace.h> |
22 | #include <asm/cacheflush.h> | |
c652d780 MF |
23 | #include <asm/unistd.h> |
24 | #include <trace/syscall.h> | |
fad57feb | 25 | |
327933f5 | 26 | #ifdef CONFIG_DYNAMIC_FTRACE |
fad57feb MF |
27 | static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; |
28 | ||
9e28c46b MF |
29 | static unsigned char ftrace_nop[4]; |
30 | /* | |
31 | * If we're trying to nop out a call to a function, we instead | |
32 | * place a call to the address after the memory table. | |
33 | * | |
34 | * 8c011060 <a>: | |
35 | * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1 | |
36 | * 8c011062: 22 4f sts.l pr,@-r15 | |
37 | * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0 | |
38 | * 8c011066: 2b 41 jmp @r1 | |
39 | * 8c011068: 2a 40 lds r0,pr | |
40 | * 8c01106a: 09 00 nop | |
41 | * 8c01106c: 68 24 .word 0x2468 <--- ip | |
42 | * 8c01106e: 1d 8c .word 0x8c1d | |
43 | * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE | |
44 | * | |
45 | * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch | |
46 | * past the _mcount call and continue executing code like normal. | |
47 | */ | |
48 | static unsigned char *ftrace_nop_replace(unsigned long ip) | |
fad57feb | 49 | { |
9e28c46b | 50 | __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop); |
fad57feb MF |
51 | return ftrace_nop; |
52 | } | |
53 | ||
9e28c46b | 54 | static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) |
fad57feb MF |
55 | { |
56 | /* Place the address in the memory table. */ | |
9e28c46b | 57 | __raw_writel(addr, ftrace_replaced_code); |
fad57feb MF |
58 | |
59 | /* | |
60 | * No locking needed, this must be called via kstop_machine | |
61 | * which in essence is like running on a uniprocessor machine. | |
62 | */ | |
63 | return ftrace_replaced_code; | |
64 | } | |
65 | ||
e4b053d9 PM |
66 | /* |
67 | * Modifying code must take extra care. On an SMP machine, if | |
68 | * the code being modified is also being executed on another CPU | |
69 | * that CPU will have undefined results and possibly take a GPF. | |
70 | * We use kstop_machine to stop other CPUS from exectuing code. | |
71 | * But this does not stop NMIs from happening. We still need | |
72 | * to protect against that. We separate out the modification of | |
73 | * the code to take care of this. | |
74 | * | |
75 | * Two buffers are added: An IP buffer and a "code" buffer. | |
76 | * | |
77 | * 1) Put the instruction pointer into the IP buffer | |
78 | * and the new code into the "code" buffer. | |
79 | * 2) Wait for any running NMIs to finish and set a flag that says | |
80 | * we are modifying code, it is done in an atomic operation. | |
81 | * 3) Write the code | |
82 | * 4) clear the flag. | |
83 | * 5) Wait for any running NMIs to finish. | |
84 | * | |
85 | * If an NMI is executed, the first thing it does is to call | |
86 | * "ftrace_nmi_enter". This will check if the flag is set to write | |
87 | * and if it is, it will write what is in the IP and "code" buffers. | |
88 | * | |
89 | * The trick is, it does not matter if everyone is writing the same | |
90 | * content to the code location. Also, if a CPU is executing code | |
91 | * it is OK to write to that code location if the contents being written | |
92 | * are the same as what exists. | |
93 | */ | |
94 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ | |
95 | static atomic_t nmi_running = ATOMIC_INIT(0); | |
96 | static int mod_code_status; /* holds return value of text write */ | |
97 | static void *mod_code_ip; /* holds the IP to write to */ | |
98 | static void *mod_code_newcode; /* holds the text to write to the IP */ | |
99 | ||
e4b053d9 PM |
100 | static void clear_mod_flag(void) |
101 | { | |
102 | int old = atomic_read(&nmi_running); | |
103 | ||
104 | for (;;) { | |
105 | int new = old & ~MOD_CODE_WRITE_FLAG; | |
106 | ||
107 | if (old == new) | |
108 | break; | |
109 | ||
110 | old = atomic_cmpxchg(&nmi_running, old, new); | |
111 | } | |
112 | } | |
113 | ||
114 | static void ftrace_mod_code(void) | |
115 | { | |
116 | /* | |
117 | * Yes, more than one CPU process can be writing to mod_code_status. | |
118 | * (and the code itself) | |
119 | * But if one were to fail, then they all should, and if one were | |
120 | * to succeed, then they all should. | |
121 | */ | |
122 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | |
123 | MCOUNT_INSN_SIZE); | |
124 | ||
125 | /* if we fail, then kill any new writers */ | |
126 | if (mod_code_status) | |
127 | clear_mod_flag(); | |
128 | } | |
129 | ||
7b2c8625 | 130 | void arch_ftrace_nmi_enter(void) |
e4b053d9 PM |
131 | { |
132 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | |
133 | smp_rmb(); | |
134 | ftrace_mod_code(); | |
e4b053d9 PM |
135 | } |
136 | /* Must have previous changes seen before executions */ | |
137 | smp_mb(); | |
138 | } | |
139 | ||
7b2c8625 | 140 | void arch_ftrace_nmi_exit(void) |
e4b053d9 PM |
141 | { |
142 | /* Finish all executions before clearing nmi_running */ | |
143 | smp_mb(); | |
144 | atomic_dec(&nmi_running); | |
145 | } | |
146 | ||
147 | static void wait_for_nmi_and_set_mod_flag(void) | |
148 | { | |
149 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | |
150 | return; | |
151 | ||
152 | do { | |
153 | cpu_relax(); | |
154 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | |
e4b053d9 PM |
155 | } |
156 | ||
157 | static void wait_for_nmi(void) | |
158 | { | |
159 | if (!atomic_read(&nmi_running)) | |
160 | return; | |
161 | ||
162 | do { | |
163 | cpu_relax(); | |
164 | } while (atomic_read(&nmi_running)); | |
e4b053d9 PM |
165 | } |
166 | ||
167 | static int | |
168 | do_ftrace_mod_code(unsigned long ip, void *new_code) | |
169 | { | |
170 | mod_code_ip = (void *)ip; | |
171 | mod_code_newcode = new_code; | |
172 | ||
173 | /* The buffers need to be visible before we let NMIs write them */ | |
174 | smp_mb(); | |
175 | ||
176 | wait_for_nmi_and_set_mod_flag(); | |
177 | ||
178 | /* Make sure all running NMIs have finished before we write the code */ | |
179 | smp_mb(); | |
180 | ||
181 | ftrace_mod_code(); | |
182 | ||
183 | /* Make sure the write happens before clearing the bit */ | |
184 | smp_mb(); | |
185 | ||
186 | clear_mod_flag(); | |
187 | wait_for_nmi(); | |
188 | ||
189 | return mod_code_status; | |
190 | } | |
191 | ||
9e28c46b | 192 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
fad57feb MF |
193 | unsigned char *new_code) |
194 | { | |
195 | unsigned char replaced[MCOUNT_INSN_SIZE]; | |
196 | ||
197 | /* | |
5243238a LB |
198 | * Note: |
199 | * We are paranoid about modifying text, as if a bug was to happen, it | |
200 | * could cause us to read or write to someplace that could cause harm. | |
201 | * Carefully read and modify the code with probe_kernel_*(), and make | |
202 | * sure what we read is what we expected it to be before modifying it. | |
fad57feb MF |
203 | */ |
204 | ||
fad57feb MF |
205 | /* read the text we want to modify */ |
206 | if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) | |
207 | return -EFAULT; | |
208 | ||
209 | /* Make sure it is what we expect it to be */ | |
210 | if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) | |
211 | return -EINVAL; | |
212 | ||
213 | /* replace the text with the new text */ | |
e4b053d9 | 214 | if (do_ftrace_mod_code(ip, new_code)) |
fad57feb MF |
215 | return -EPERM; |
216 | ||
217 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | int ftrace_update_ftrace_func(ftrace_func_t func) | |
223 | { | |
9e28c46b | 224 | unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET; |
fad57feb MF |
225 | unsigned char old[MCOUNT_INSN_SIZE], *new; |
226 | ||
9e28c46b | 227 | memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE); |
fad57feb MF |
228 | new = ftrace_call_replace(ip, (unsigned long)func); |
229 | ||
9e28c46b | 230 | return ftrace_modify_code(ip, old, new); |
fad57feb MF |
231 | } |
232 | ||
b5cfeac9 PM |
233 | int ftrace_make_nop(struct module *mod, |
234 | struct dyn_ftrace *rec, unsigned long addr) | |
235 | { | |
236 | unsigned char *new, *old; | |
237 | unsigned long ip = rec->ip; | |
238 | ||
239 | old = ftrace_call_replace(ip, addr); | |
9e28c46b | 240 | new = ftrace_nop_replace(ip); |
b5cfeac9 PM |
241 | |
242 | return ftrace_modify_code(rec->ip, old, new); | |
243 | } | |
244 | ||
245 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |
246 | { | |
247 | unsigned char *new, *old; | |
248 | unsigned long ip = rec->ip; | |
249 | ||
9e28c46b | 250 | old = ftrace_nop_replace(ip); |
b5cfeac9 PM |
251 | new = ftrace_call_replace(ip, addr); |
252 | ||
253 | return ftrace_modify_code(rec->ip, old, new); | |
254 | } | |
255 | ||
3a36cb11 | 256 | int __init ftrace_dyn_arch_init(void) |
fad57feb | 257 | { |
fad57feb MF |
258 | return 0; |
259 | } | |
327933f5 MF |
260 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
261 | ||
262 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
263 | #ifdef CONFIG_DYNAMIC_FTRACE | |
264 | extern void ftrace_graph_call(void); | |
265 | ||
266 | static int ftrace_mod(unsigned long ip, unsigned long old_addr, | |
267 | unsigned long new_addr) | |
268 | { | |
269 | unsigned char code[MCOUNT_INSN_SIZE]; | |
270 | ||
271 | if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) | |
272 | return -EFAULT; | |
273 | ||
274 | if (old_addr != __raw_readl((unsigned long *)code)) | |
275 | return -EINVAL; | |
276 | ||
277 | __raw_writel(new_addr, ip); | |
278 | return 0; | |
279 | } | |
280 | ||
281 | int ftrace_enable_ftrace_graph_caller(void) | |
282 | { | |
283 | unsigned long ip, old_addr, new_addr; | |
284 | ||
285 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | |
286 | old_addr = (unsigned long)(&skip_trace); | |
287 | new_addr = (unsigned long)(&ftrace_graph_caller); | |
288 | ||
289 | return ftrace_mod(ip, old_addr, new_addr); | |
290 | } | |
291 | ||
292 | int ftrace_disable_ftrace_graph_caller(void) | |
293 | { | |
294 | unsigned long ip, old_addr, new_addr; | |
295 | ||
296 | ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; | |
297 | old_addr = (unsigned long)(&ftrace_graph_caller); | |
298 | new_addr = (unsigned long)(&skip_trace); | |
299 | ||
300 | return ftrace_mod(ip, old_addr, new_addr); | |
301 | } | |
302 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
303 | ||
304 | /* | |
305 | * Hook the return address and push it in the stack of return addrs | |
306 | * in the current thread info. | |
307 | * | |
308 | * This is the main routine for the function graph tracer. The function | |
309 | * graph tracer essentially works like this: | |
310 | * | |
311 | * parent is the stack address containing self_addr's return address. | |
312 | * We pull the real return address out of parent and store it in | |
313 | * current's ret_stack. Then, we replace the return address on the stack | |
314 | * with the address of return_to_handler. self_addr is the function that | |
315 | * called mcount. | |
316 | * | |
317 | * When self_addr returns, it will jump to return_to_handler which calls | |
318 | * ftrace_return_to_handler. ftrace_return_to_handler will pull the real | |
319 | * return address off of current's ret_stack and jump to it. | |
320 | */ | |
321 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |
322 | { | |
323 | unsigned long old; | |
bc715ee4 | 324 | int faulted; |
327933f5 MF |
325 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
326 | ||
7fa322db SRRH |
327 | if (unlikely(ftrace_graph_is_dead())) |
328 | return; | |
329 | ||
327933f5 MF |
330 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
331 | return; | |
332 | ||
333 | /* | |
334 | * Protect against fault, even if it shouldn't | |
335 | * happen. This tool is too much intrusive to | |
336 | * ignore such a protection. | |
337 | */ | |
338 | __asm__ __volatile__( | |
339 | "1: \n\t" | |
340 | "mov.l @%2, %0 \n\t" | |
341 | "2: \n\t" | |
342 | "mov.l %3, @%2 \n\t" | |
343 | "mov #0, %1 \n\t" | |
344 | "3: \n\t" | |
345 | ".section .fixup, \"ax\" \n\t" | |
346 | "4: \n\t" | |
347 | "mov.l 5f, %0 \n\t" | |
348 | "jmp @%0 \n\t" | |
349 | " mov #1, %1 \n\t" | |
350 | ".balign 4 \n\t" | |
351 | "5: .long 3b \n\t" | |
352 | ".previous \n\t" | |
353 | ".section __ex_table,\"a\" \n\t" | |
354 | ".long 1b, 4b \n\t" | |
355 | ".long 2b, 4b \n\t" | |
356 | ".previous \n\t" | |
357 | : "=&r" (old), "=r" (faulted) | |
358 | : "r" (parent), "r" (return_hooker) | |
359 | ); | |
360 | ||
361 | if (unlikely(faulted)) { | |
362 | ftrace_graph_stop(); | |
363 | WARN_ON(1); | |
364 | return; | |
365 | } | |
366 | ||
bc715ee4 | 367 | if (function_graph_enter(old, self_addr, 0, NULL)) |
327933f5 | 368 | __raw_writel(old, parent); |
327933f5 MF |
369 | } |
370 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |