]>
Commit | Line | Data |
---|---|---|
bcea3f96 | 1 | // SPDX-License-Identifier: GPL-2.0 |
16444a8a ACM |
2 | /* |
3 | * Infrastructure for profiling code inserted by 'gcc -pg'. | |
4 | * | |
5 | * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> | |
6 | * Copyright (C) 2004-2008 Ingo Molnar <[email protected]> | |
7 | * | |
8 | * Originally ported from the -rt patch by: | |
9 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <[email protected]> | |
10 | * | |
11 | * Based on code in the latency_tracer, that is: | |
12 | * | |
13 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 14 | * Copyright (C) 2004 Nadia Yvette Chambers |
16444a8a ACM |
15 | */ |
16 | ||
3d083395 SR |
17 | #include <linux/stop_machine.h> |
18 | #include <linux/clocksource.h> | |
29930025 | 19 | #include <linux/sched/task.h> |
3d083395 | 20 | #include <linux/kallsyms.h> |
5072c59f | 21 | #include <linux/seq_file.h> |
4a2b8dda | 22 | #include <linux/suspend.h> |
8434dc93 | 23 | #include <linux/tracefs.h> |
3d083395 | 24 | #include <linux/hardirq.h> |
2d8b820b | 25 | #include <linux/kthread.h> |
5072c59f | 26 | #include <linux/uaccess.h> |
5855fead | 27 | #include <linux/bsearch.h> |
56d82e00 | 28 | #include <linux/module.h> |
2d8b820b | 29 | #include <linux/ftrace.h> |
b0fc494f | 30 | #include <linux/sysctl.h> |
5a0e3ad6 | 31 | #include <linux/slab.h> |
5072c59f | 32 | #include <linux/ctype.h> |
68950619 | 33 | #include <linux/sort.h> |
3d083395 | 34 | #include <linux/list.h> |
59df055f | 35 | #include <linux/hash.h> |
3f379b03 | 36 | #include <linux/rcupdate.h> |
3d083395 | 37 | |
ad8d75ff | 38 | #include <trace/events/sched.h> |
8aef2d28 | 39 | |
b80f0f6c | 40 | #include <asm/sections.h> |
2af15d6a | 41 | #include <asm/setup.h> |
395a59d0 | 42 | |
0706f1c4 | 43 | #include "trace_output.h" |
bac429f0 | 44 | #include "trace_stat.h" |
16444a8a | 45 | |
6912896e | 46 | #define FTRACE_WARN_ON(cond) \ |
0778d9ad SR |
47 | ({ \ |
48 | int ___r = cond; \ | |
49 | if (WARN_ON(___r)) \ | |
6912896e | 50 | ftrace_kill(); \ |
0778d9ad SR |
51 | ___r; \ |
52 | }) | |
6912896e SR |
53 | |
54 | #define FTRACE_WARN_ON_ONCE(cond) \ | |
0778d9ad SR |
55 | ({ \ |
56 | int ___r = cond; \ | |
57 | if (WARN_ON_ONCE(___r)) \ | |
6912896e | 58 | ftrace_kill(); \ |
0778d9ad SR |
59 | ___r; \ |
60 | }) | |
6912896e | 61 | |
8fc0c701 SR |
62 | /* hash bits for specific function selection */ |
63 | #define FTRACE_HASH_BITS 7 | |
64 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | |
33dc9b12 SR |
65 | #define FTRACE_HASH_DEFAULT_BITS 10 |
66 | #define FTRACE_HASH_MAX_BITS 12 | |
8fc0c701 | 67 | |
f04f24fb | 68 | #ifdef CONFIG_DYNAMIC_FTRACE |
33b7f99c SRRH |
69 | #define INIT_OPS_HASH(opsname) \ |
70 | .func_hash = &opsname.local_hash, \ | |
71 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | |
5f151b24 SRRH |
72 | #define ASSIGN_OPS_HASH(opsname, val) \ |
73 | .func_hash = val, \ | |
74 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), | |
f04f24fb | 75 | #else |
33b7f99c | 76 | #define INIT_OPS_HASH(opsname) |
5f151b24 | 77 | #define ASSIGN_OPS_HASH(opsname, val) |
f04f24fb MH |
78 | #endif |
79 | ||
2f5f6ad9 SR |
80 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
81 | .func = ftrace_stub, | |
395b97a3 | 82 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
33b7f99c | 83 | INIT_OPS_HASH(ftrace_list_end) |
2f5f6ad9 SR |
84 | }; |
85 | ||
4eebcc81 SR |
86 | /* ftrace_enabled is a method to turn ftrace on or off */ |
87 | int ftrace_enabled __read_mostly; | |
d61f82d0 | 88 | static int last_ftrace_enabled; |
b0fc494f | 89 | |
2f5f6ad9 SR |
90 | /* Current function tracing op */ |
91 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; | |
405e1d83 SRRH |
92 | /* What to set function_trace_op to */ |
93 | static struct ftrace_ops *set_function_trace_op; | |
60a7ecf4 | 94 | |
345ddcc8 | 95 | static bool ftrace_pids_enabled(struct ftrace_ops *ops) |
e3eea140 | 96 | { |
345ddcc8 SRRH |
97 | struct trace_array *tr; |
98 | ||
99 | if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) | |
100 | return false; | |
101 | ||
102 | tr = ops->private; | |
103 | ||
104 | return tr->function_pids != NULL; | |
e3eea140 SRRH |
105 | } |
106 | ||
107 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | |
108 | ||
4eebcc81 SR |
109 | /* |
110 | * ftrace_disabled is set when an anomaly is discovered. | |
111 | * ftrace_disabled is much stronger than ftrace_enabled. | |
112 | */ | |
113 | static int ftrace_disabled __read_mostly; | |
114 | ||
52baf119 | 115 | static DEFINE_MUTEX(ftrace_lock); |
b0fc494f | 116 | |
f86f4180 | 117 | static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
16444a8a | 118 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
2b499381 | 119 | static struct ftrace_ops global_ops; |
16444a8a | 120 | |
2f5f6ad9 SR |
121 | #if ARCH_SUPPORTS_FTRACE_OPS |
122 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
a1e2e31d | 123 | struct ftrace_ops *op, struct pt_regs *regs); |
2f5f6ad9 SR |
124 | #else |
125 | /* See comment below, where ftrace_ops_list_func is defined */ | |
126 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); | |
127 | #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) | |
128 | #endif | |
b848914c | 129 | |
0a016409 SR |
130 | /* |
131 | * Traverse the ftrace_global_list, invoking all entries. The reason that we | |
1bb539ca | 132 | * can use rcu_dereference_raw_notrace() is that elements removed from this list |
0a016409 | 133 | * are simply leaked, so there is no need to interact with a grace-period |
1bb539ca | 134 | * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle |
0a016409 SR |
135 | * concurrent insertions into the ftrace_global_list. |
136 | * | |
137 | * Silly Alpha and silly pointer-speculation compiler optimizations! | |
138 | */ | |
139 | #define do_for_each_ftrace_op(op, list) \ | |
1bb539ca | 140 | op = rcu_dereference_raw_notrace(list); \ |
0a016409 SR |
141 | do |
142 | ||
143 | /* | |
144 | * Optimized for just a single item in the list (as that is the normal case). | |
145 | */ | |
146 | #define while_for_each_ftrace_op(op) \ | |
1bb539ca | 147 | while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ |
0a016409 SR |
148 | unlikely((op) != &ftrace_list_end)) |
149 | ||
f04f24fb MH |
150 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
151 | { | |
152 | #ifdef CONFIG_DYNAMIC_FTRACE | |
153 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { | |
33b7f99c SRRH |
154 | mutex_init(&ops->local_hash.regex_lock); |
155 | ops->func_hash = &ops->local_hash; | |
f04f24fb MH |
156 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
157 | } | |
158 | #endif | |
159 | } | |
160 | ||
2f5f6ad9 | 161 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 162 | struct ftrace_ops *op, struct pt_regs *regs) |
df4fc315 | 163 | { |
345ddcc8 SRRH |
164 | struct trace_array *tr = op->private; |
165 | ||
166 | if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) | |
df4fc315 SR |
167 | return; |
168 | ||
e3eea140 | 169 | op->saved_func(ip, parent_ip, op, regs); |
df4fc315 SR |
170 | } |
171 | ||
405e1d83 SRRH |
172 | static void ftrace_sync(struct work_struct *work) |
173 | { | |
174 | /* | |
175 | * This function is just a stub to implement a hard force | |
176 | * of synchronize_sched(). This requires synchronizing | |
177 | * tasks even in userspace and idle. | |
178 | * | |
179 | * Yes, function tracing is rude. | |
180 | */ | |
181 | } | |
182 | ||
183 | static void ftrace_sync_ipi(void *data) | |
184 | { | |
185 | /* Probably not needed, but do it anyway */ | |
186 | smp_rmb(); | |
187 | } | |
188 | ||
23a8e844 SRRH |
189 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
190 | static void update_function_graph_func(void); | |
55577204 SRRH |
191 | |
192 | /* Both enabled by default (can be cleared by function_graph tracer flags */ | |
193 | static bool fgraph_sleep_time = true; | |
194 | static bool fgraph_graph_time = true; | |
195 | ||
23a8e844 SRRH |
196 | #else |
197 | static inline void update_function_graph_func(void) { } | |
198 | #endif | |
199 | ||
00ccbf2f SRRH |
200 | |
201 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) | |
202 | { | |
203 | /* | |
ba27f2bc | 204 | * If this is a dynamic, RCU, or per CPU ops, or we force list func, |
00ccbf2f SRRH |
205 | * then it needs to call the list anyway. |
206 | */ | |
b3a88803 PZ |
207 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || |
208 | FTRACE_FORCE_LIST_FUNC) | |
00ccbf2f SRRH |
209 | return ftrace_ops_list_func; |
210 | ||
211 | return ftrace_ops_get_func(ops); | |
212 | } | |
213 | ||
2b499381 SR |
214 | static void update_ftrace_function(void) |
215 | { | |
216 | ftrace_func_t func; | |
217 | ||
f7aad4e1 SRRH |
218 | /* |
219 | * Prepare the ftrace_ops that the arch callback will use. | |
220 | * If there's only one ftrace_ops registered, the ftrace_ops_list | |
221 | * will point to the ops we want. | |
222 | */ | |
f86f4180 CZ |
223 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
224 | lockdep_is_held(&ftrace_lock)); | |
f7aad4e1 SRRH |
225 | |
226 | /* If there's no ftrace_ops registered, just call the stub function */ | |
f86f4180 | 227 | if (set_function_trace_op == &ftrace_list_end) { |
f7aad4e1 SRRH |
228 | func = ftrace_stub; |
229 | ||
cdbe61bf SR |
230 | /* |
231 | * If we are at the end of the list and this ops is | |
4740974a SR |
232 | * recursion safe and not dynamic and the arch supports passing ops, |
233 | * then have the mcount trampoline call the function directly. | |
cdbe61bf | 234 | */ |
f86f4180 CZ |
235 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
236 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
00ccbf2f | 237 | func = ftrace_ops_get_list_func(ftrace_ops_list); |
f7aad4e1 | 238 | |
2f5f6ad9 SR |
239 | } else { |
240 | /* Just use the default ftrace_ops */ | |
405e1d83 | 241 | set_function_trace_op = &ftrace_list_end; |
b848914c | 242 | func = ftrace_ops_list_func; |
2f5f6ad9 | 243 | } |
2b499381 | 244 | |
5f8bf2d2 SRRH |
245 | update_function_graph_func(); |
246 | ||
405e1d83 SRRH |
247 | /* If there's no change, then do nothing more here */ |
248 | if (ftrace_trace_function == func) | |
249 | return; | |
250 | ||
251 | /* | |
252 | * If we are using the list function, it doesn't care | |
253 | * about the function_trace_ops. | |
254 | */ | |
255 | if (func == ftrace_ops_list_func) { | |
256 | ftrace_trace_function = func; | |
257 | /* | |
258 | * Don't even bother setting function_trace_ops, | |
259 | * it would be racy to do so anyway. | |
260 | */ | |
261 | return; | |
262 | } | |
263 | ||
264 | #ifndef CONFIG_DYNAMIC_FTRACE | |
265 | /* | |
266 | * For static tracing, we need to be a bit more careful. | |
267 | * The function change takes affect immediately. Thus, | |
268 | * we need to coorditate the setting of the function_trace_ops | |
269 | * with the setting of the ftrace_trace_function. | |
270 | * | |
271 | * Set the function to the list ops, which will call the | |
272 | * function we want, albeit indirectly, but it handles the | |
273 | * ftrace_ops and doesn't depend on function_trace_op. | |
274 | */ | |
275 | ftrace_trace_function = ftrace_ops_list_func; | |
276 | /* | |
277 | * Make sure all CPUs see this. Yes this is slow, but static | |
278 | * tracing is slow and nasty to have enabled. | |
279 | */ | |
280 | schedule_on_each_cpu(ftrace_sync); | |
281 | /* Now all cpus are using the list ops. */ | |
282 | function_trace_op = set_function_trace_op; | |
283 | /* Make sure the function_trace_op is visible on all CPUs */ | |
284 | smp_wmb(); | |
285 | /* Nasty way to force a rmb on all cpus */ | |
286 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
287 | /* OK, we are all set to update the ftrace_trace_function now! */ | |
288 | #endif /* !CONFIG_DYNAMIC_FTRACE */ | |
289 | ||
491d0dcf | 290 | ftrace_trace_function = func; |
491d0dcf SR |
291 | } |
292 | ||
f86f4180 CZ |
293 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
294 | struct ftrace_ops *ops) | |
3d083395 | 295 | { |
f86f4180 CZ |
296 | rcu_assign_pointer(ops->next, *list); |
297 | ||
16444a8a | 298 | /* |
b848914c | 299 | * We are entering ops into the list but another |
16444a8a ACM |
300 | * CPU might be walking that list. We need to make sure |
301 | * the ops->next pointer is valid before another CPU sees | |
b848914c | 302 | * the ops pointer included into the list. |
16444a8a | 303 | */ |
2b499381 | 304 | rcu_assign_pointer(*list, ops); |
16444a8a ACM |
305 | } |
306 | ||
f86f4180 CZ |
307 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
308 | struct ftrace_ops *ops) | |
16444a8a | 309 | { |
16444a8a | 310 | struct ftrace_ops **p; |
16444a8a ACM |
311 | |
312 | /* | |
3d083395 SR |
313 | * If we are removing the last function, then simply point |
314 | * to the ftrace_stub. | |
16444a8a | 315 | */ |
f86f4180 CZ |
316 | if (rcu_dereference_protected(*list, |
317 | lockdep_is_held(&ftrace_lock)) == ops && | |
318 | rcu_dereference_protected(ops->next, | |
319 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
2b499381 | 320 | *list = &ftrace_list_end; |
e6ea44e9 | 321 | return 0; |
16444a8a ACM |
322 | } |
323 | ||
2b499381 | 324 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
16444a8a ACM |
325 | if (*p == ops) |
326 | break; | |
327 | ||
e6ea44e9 SR |
328 | if (*p != ops) |
329 | return -1; | |
16444a8a ACM |
330 | |
331 | *p = (*p)->next; | |
2b499381 SR |
332 | return 0; |
333 | } | |
16444a8a | 334 | |
f3bea491 SRRH |
335 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
336 | ||
2b499381 SR |
337 | static int __register_ftrace_function(struct ftrace_ops *ops) |
338 | { | |
591dffda SRRH |
339 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
340 | return -EINVAL; | |
341 | ||
b848914c SR |
342 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
343 | return -EBUSY; | |
344 | ||
06aeaaea | 345 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
08f6fba5 SR |
346 | /* |
347 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used | |
348 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. | |
349 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. | |
350 | */ | |
351 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && | |
352 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) | |
353 | return -EINVAL; | |
354 | ||
355 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) | |
356 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; | |
357 | #endif | |
358 | ||
cdbe61bf SR |
359 | if (!core_kernel_data((unsigned long)ops)) |
360 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; | |
361 | ||
ba27f2bc | 362 | add_ftrace_ops(&ftrace_ops_list, ops); |
b848914c | 363 | |
e3eea140 SRRH |
364 | /* Always save the function, and reset at unregistering */ |
365 | ops->saved_func = ops->func; | |
366 | ||
345ddcc8 | 367 | if (ftrace_pids_enabled(ops)) |
e3eea140 SRRH |
368 | ops->func = ftrace_pid_func; |
369 | ||
f3bea491 SRRH |
370 | ftrace_update_trampoline(ops); |
371 | ||
2b499381 SR |
372 | if (ftrace_enabled) |
373 | update_ftrace_function(); | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | |
379 | { | |
380 | int ret; | |
381 | ||
b848914c SR |
382 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
383 | return -EBUSY; | |
384 | ||
ba27f2bc | 385 | ret = remove_ftrace_ops(&ftrace_ops_list, ops); |
b848914c | 386 | |
2b499381 SR |
387 | if (ret < 0) |
388 | return ret; | |
b848914c | 389 | |
491d0dcf SR |
390 | if (ftrace_enabled) |
391 | update_ftrace_function(); | |
16444a8a | 392 | |
e3eea140 SRRH |
393 | ops->func = ops->saved_func; |
394 | ||
e6ea44e9 | 395 | return 0; |
3d083395 SR |
396 | } |
397 | ||
df4fc315 SR |
398 | static void ftrace_update_pid_func(void) |
399 | { | |
e3eea140 SRRH |
400 | struct ftrace_ops *op; |
401 | ||
491d0dcf | 402 | /* Only do something if we are tracing something */ |
df4fc315 | 403 | if (ftrace_trace_function == ftrace_stub) |
10dd3ebe | 404 | return; |
df4fc315 | 405 | |
e3eea140 SRRH |
406 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
407 | if (op->flags & FTRACE_OPS_FL_PID) { | |
345ddcc8 SRRH |
408 | op->func = ftrace_pids_enabled(op) ? |
409 | ftrace_pid_func : op->saved_func; | |
e3eea140 SRRH |
410 | ftrace_update_trampoline(op); |
411 | } | |
412 | } while_for_each_ftrace_op(op); | |
413 | ||
491d0dcf | 414 | update_ftrace_function(); |
df4fc315 SR |
415 | } |
416 | ||
493762fc SR |
417 | #ifdef CONFIG_FUNCTION_PROFILER |
418 | struct ftrace_profile { | |
419 | struct hlist_node node; | |
420 | unsigned long ip; | |
421 | unsigned long counter; | |
0706f1c4 SR |
422 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
423 | unsigned long long time; | |
e330b3bc | 424 | unsigned long long time_squared; |
0706f1c4 | 425 | #endif |
8fc0c701 SR |
426 | }; |
427 | ||
493762fc SR |
428 | struct ftrace_profile_page { |
429 | struct ftrace_profile_page *next; | |
430 | unsigned long index; | |
431 | struct ftrace_profile records[]; | |
d61f82d0 SR |
432 | }; |
433 | ||
cafb168a SR |
434 | struct ftrace_profile_stat { |
435 | atomic_t disabled; | |
436 | struct hlist_head *hash; | |
437 | struct ftrace_profile_page *pages; | |
438 | struct ftrace_profile_page *start; | |
439 | struct tracer_stat stat; | |
440 | }; | |
441 | ||
493762fc SR |
442 | #define PROFILE_RECORDS_SIZE \ |
443 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) | |
5072c59f | 444 | |
493762fc SR |
445 | #define PROFILES_PER_PAGE \ |
446 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) | |
3d083395 | 447 | |
fb9fb015 SR |
448 | static int ftrace_profile_enabled __read_mostly; |
449 | ||
450 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ | |
bac429f0 SR |
451 | static DEFINE_MUTEX(ftrace_profile_lock); |
452 | ||
cafb168a | 453 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
493762fc | 454 | |
20079ebe NK |
455 | #define FTRACE_PROFILE_HASH_BITS 10 |
456 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) | |
493762fc | 457 | |
bac429f0 SR |
458 | static void * |
459 | function_stat_next(void *v, int idx) | |
460 | { | |
493762fc SR |
461 | struct ftrace_profile *rec = v; |
462 | struct ftrace_profile_page *pg; | |
bac429f0 | 463 | |
493762fc | 464 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
bac429f0 SR |
465 | |
466 | again: | |
0296e425 LZ |
467 | if (idx != 0) |
468 | rec++; | |
469 | ||
bac429f0 SR |
470 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
471 | pg = pg->next; | |
472 | if (!pg) | |
473 | return NULL; | |
474 | rec = &pg->records[0]; | |
493762fc SR |
475 | if (!rec->counter) |
476 | goto again; | |
bac429f0 SR |
477 | } |
478 | ||
bac429f0 SR |
479 | return rec; |
480 | } | |
481 | ||
482 | static void *function_stat_start(struct tracer_stat *trace) | |
483 | { | |
cafb168a SR |
484 | struct ftrace_profile_stat *stat = |
485 | container_of(trace, struct ftrace_profile_stat, stat); | |
486 | ||
487 | if (!stat || !stat->start) | |
488 | return NULL; | |
489 | ||
490 | return function_stat_next(&stat->start->records[0], 0); | |
bac429f0 SR |
491 | } |
492 | ||
0706f1c4 SR |
493 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
494 | /* function graph compares on total time */ | |
495 | static int function_stat_cmp(void *p1, void *p2) | |
496 | { | |
497 | struct ftrace_profile *a = p1; | |
498 | struct ftrace_profile *b = p2; | |
499 | ||
500 | if (a->time < b->time) | |
501 | return -1; | |
502 | if (a->time > b->time) | |
503 | return 1; | |
504 | else | |
505 | return 0; | |
506 | } | |
507 | #else | |
508 | /* not function graph compares against hits */ | |
bac429f0 SR |
509 | static int function_stat_cmp(void *p1, void *p2) |
510 | { | |
493762fc SR |
511 | struct ftrace_profile *a = p1; |
512 | struct ftrace_profile *b = p2; | |
bac429f0 SR |
513 | |
514 | if (a->counter < b->counter) | |
515 | return -1; | |
516 | if (a->counter > b->counter) | |
517 | return 1; | |
518 | else | |
519 | return 0; | |
520 | } | |
0706f1c4 | 521 | #endif |
bac429f0 SR |
522 | |
523 | static int function_stat_headers(struct seq_file *m) | |
524 | { | |
0706f1c4 | 525 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
fa6f0cc7 RV |
526 | seq_puts(m, " Function " |
527 | "Hit Time Avg s^2\n" | |
528 | " -------- " | |
529 | "--- ---- --- ---\n"); | |
0706f1c4 | 530 | #else |
fa6f0cc7 RV |
531 | seq_puts(m, " Function Hit\n" |
532 | " -------- ---\n"); | |
0706f1c4 | 533 | #endif |
bac429f0 SR |
534 | return 0; |
535 | } | |
536 | ||
537 | static int function_stat_show(struct seq_file *m, void *v) | |
538 | { | |
493762fc | 539 | struct ftrace_profile *rec = v; |
bac429f0 | 540 | char str[KSYM_SYMBOL_LEN]; |
3aaba20f | 541 | int ret = 0; |
0706f1c4 | 542 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
34886c8b SR |
543 | static struct trace_seq s; |
544 | unsigned long long avg; | |
e330b3bc | 545 | unsigned long long stddev; |
0706f1c4 | 546 | #endif |
3aaba20f LZ |
547 | mutex_lock(&ftrace_profile_lock); |
548 | ||
549 | /* we raced with function_profile_reset() */ | |
550 | if (unlikely(rec->counter == 0)) { | |
551 | ret = -EBUSY; | |
552 | goto out; | |
553 | } | |
bac429f0 | 554 | |
8e436ca0 UT |
555 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
556 | avg = rec->time; | |
557 | do_div(avg, rec->counter); | |
558 | if (tracing_thresh && (avg < tracing_thresh)) | |
559 | goto out; | |
560 | #endif | |
561 | ||
bac429f0 | 562 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
0706f1c4 SR |
563 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
564 | ||
565 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
fa6f0cc7 | 566 | seq_puts(m, " "); |
34886c8b | 567 | |
e330b3bc CD |
568 | /* Sample standard deviation (s^2) */ |
569 | if (rec->counter <= 1) | |
570 | stddev = 0; | |
571 | else { | |
52d85d76 JL |
572 | /* |
573 | * Apply Welford's method: | |
574 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) | |
575 | */ | |
576 | stddev = rec->counter * rec->time_squared - | |
577 | rec->time * rec->time; | |
578 | ||
e330b3bc CD |
579 | /* |
580 | * Divide only 1000 for ns^2 -> us^2 conversion. | |
581 | * trace_print_graph_duration will divide 1000 again. | |
582 | */ | |
52d85d76 | 583 | do_div(stddev, rec->counter * (rec->counter - 1) * 1000); |
e330b3bc CD |
584 | } |
585 | ||
34886c8b SR |
586 | trace_seq_init(&s); |
587 | trace_print_graph_duration(rec->time, &s); | |
588 | trace_seq_puts(&s, " "); | |
589 | trace_print_graph_duration(avg, &s); | |
e330b3bc CD |
590 | trace_seq_puts(&s, " "); |
591 | trace_print_graph_duration(stddev, &s); | |
0706f1c4 | 592 | trace_print_seq(m, &s); |
0706f1c4 SR |
593 | #endif |
594 | seq_putc(m, '\n'); | |
3aaba20f LZ |
595 | out: |
596 | mutex_unlock(&ftrace_profile_lock); | |
bac429f0 | 597 | |
3aaba20f | 598 | return ret; |
bac429f0 SR |
599 | } |
600 | ||
cafb168a | 601 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
bac429f0 | 602 | { |
493762fc | 603 | struct ftrace_profile_page *pg; |
bac429f0 | 604 | |
cafb168a | 605 | pg = stat->pages = stat->start; |
bac429f0 | 606 | |
493762fc SR |
607 | while (pg) { |
608 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); | |
609 | pg->index = 0; | |
610 | pg = pg->next; | |
bac429f0 SR |
611 | } |
612 | ||
cafb168a | 613 | memset(stat->hash, 0, |
493762fc SR |
614 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
615 | } | |
bac429f0 | 616 | |
cafb168a | 617 | int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
493762fc SR |
618 | { |
619 | struct ftrace_profile_page *pg; | |
318e0a73 SR |
620 | int functions; |
621 | int pages; | |
493762fc | 622 | int i; |
bac429f0 | 623 | |
493762fc | 624 | /* If we already allocated, do nothing */ |
cafb168a | 625 | if (stat->pages) |
493762fc | 626 | return 0; |
bac429f0 | 627 | |
cafb168a SR |
628 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
629 | if (!stat->pages) | |
493762fc | 630 | return -ENOMEM; |
bac429f0 | 631 | |
318e0a73 SR |
632 | #ifdef CONFIG_DYNAMIC_FTRACE |
633 | functions = ftrace_update_tot_cnt; | |
634 | #else | |
635 | /* | |
636 | * We do not know the number of functions that exist because | |
637 | * dynamic tracing is what counts them. With past experience | |
638 | * we have around 20K functions. That should be more than enough. | |
639 | * It is highly unlikely we will execute every function in | |
640 | * the kernel. | |
641 | */ | |
642 | functions = 20000; | |
643 | #endif | |
644 | ||
cafb168a | 645 | pg = stat->start = stat->pages; |
bac429f0 | 646 | |
318e0a73 SR |
647 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
648 | ||
39e30cd1 | 649 | for (i = 1; i < pages; i++) { |
493762fc | 650 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
493762fc | 651 | if (!pg->next) |
318e0a73 | 652 | goto out_free; |
493762fc SR |
653 | pg = pg->next; |
654 | } | |
655 | ||
656 | return 0; | |
318e0a73 SR |
657 | |
658 | out_free: | |
659 | pg = stat->start; | |
660 | while (pg) { | |
661 | unsigned long tmp = (unsigned long)pg; | |
662 | ||
663 | pg = pg->next; | |
664 | free_page(tmp); | |
665 | } | |
666 | ||
318e0a73 SR |
667 | stat->pages = NULL; |
668 | stat->start = NULL; | |
669 | ||
670 | return -ENOMEM; | |
bac429f0 SR |
671 | } |
672 | ||
cafb168a | 673 | static int ftrace_profile_init_cpu(int cpu) |
bac429f0 | 674 | { |
cafb168a | 675 | struct ftrace_profile_stat *stat; |
493762fc | 676 | int size; |
bac429f0 | 677 | |
cafb168a SR |
678 | stat = &per_cpu(ftrace_profile_stats, cpu); |
679 | ||
680 | if (stat->hash) { | |
493762fc | 681 | /* If the profile is already created, simply reset it */ |
cafb168a | 682 | ftrace_profile_reset(stat); |
493762fc SR |
683 | return 0; |
684 | } | |
bac429f0 | 685 | |
493762fc SR |
686 | /* |
687 | * We are profiling all functions, but usually only a few thousand | |
688 | * functions are hit. We'll make a hash of 1024 items. | |
689 | */ | |
690 | size = FTRACE_PROFILE_HASH_SIZE; | |
bac429f0 | 691 | |
6396bb22 | 692 | stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); |
493762fc | 693 | |
cafb168a | 694 | if (!stat->hash) |
493762fc SR |
695 | return -ENOMEM; |
696 | ||
318e0a73 | 697 | /* Preallocate the function profiling pages */ |
cafb168a SR |
698 | if (ftrace_profile_pages_init(stat) < 0) { |
699 | kfree(stat->hash); | |
700 | stat->hash = NULL; | |
493762fc SR |
701 | return -ENOMEM; |
702 | } | |
703 | ||
704 | return 0; | |
bac429f0 SR |
705 | } |
706 | ||
cafb168a SR |
707 | static int ftrace_profile_init(void) |
708 | { | |
709 | int cpu; | |
710 | int ret = 0; | |
711 | ||
c4602c1c | 712 | for_each_possible_cpu(cpu) { |
cafb168a SR |
713 | ret = ftrace_profile_init_cpu(cpu); |
714 | if (ret) | |
715 | break; | |
716 | } | |
717 | ||
718 | return ret; | |
719 | } | |
720 | ||
493762fc | 721 | /* interrupts must be disabled */ |
cafb168a SR |
722 | static struct ftrace_profile * |
723 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) | |
bac429f0 | 724 | { |
493762fc | 725 | struct ftrace_profile *rec; |
bac429f0 | 726 | struct hlist_head *hhd; |
bac429f0 SR |
727 | unsigned long key; |
728 | ||
20079ebe | 729 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); |
cafb168a | 730 | hhd = &stat->hash[key]; |
bac429f0 SR |
731 | |
732 | if (hlist_empty(hhd)) | |
733 | return NULL; | |
734 | ||
1bb539ca | 735 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
bac429f0 | 736 | if (rec->ip == ip) |
493762fc SR |
737 | return rec; |
738 | } | |
739 | ||
740 | return NULL; | |
741 | } | |
742 | ||
cafb168a SR |
743 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
744 | struct ftrace_profile *rec) | |
493762fc SR |
745 | { |
746 | unsigned long key; | |
747 | ||
20079ebe | 748 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); |
cafb168a | 749 | hlist_add_head_rcu(&rec->node, &stat->hash[key]); |
493762fc SR |
750 | } |
751 | ||
318e0a73 SR |
752 | /* |
753 | * The memory is already allocated, this simply finds a new record to use. | |
754 | */ | |
493762fc | 755 | static struct ftrace_profile * |
318e0a73 | 756 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
493762fc SR |
757 | { |
758 | struct ftrace_profile *rec = NULL; | |
759 | ||
318e0a73 | 760 | /* prevent recursion (from NMIs) */ |
cafb168a | 761 | if (atomic_inc_return(&stat->disabled) != 1) |
493762fc SR |
762 | goto out; |
763 | ||
493762fc | 764 | /* |
318e0a73 SR |
765 | * Try to find the function again since an NMI |
766 | * could have added it | |
493762fc | 767 | */ |
cafb168a | 768 | rec = ftrace_find_profiled_func(stat, ip); |
493762fc | 769 | if (rec) |
cafb168a | 770 | goto out; |
493762fc | 771 | |
cafb168a SR |
772 | if (stat->pages->index == PROFILES_PER_PAGE) { |
773 | if (!stat->pages->next) | |
774 | goto out; | |
775 | stat->pages = stat->pages->next; | |
bac429f0 | 776 | } |
493762fc | 777 | |
cafb168a | 778 | rec = &stat->pages->records[stat->pages->index++]; |
493762fc | 779 | rec->ip = ip; |
cafb168a | 780 | ftrace_add_profile(stat, rec); |
493762fc | 781 | |
bac429f0 | 782 | out: |
cafb168a | 783 | atomic_dec(&stat->disabled); |
bac429f0 SR |
784 | |
785 | return rec; | |
786 | } | |
787 | ||
788 | static void | |
2f5f6ad9 | 789 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 790 | struct ftrace_ops *ops, struct pt_regs *regs) |
bac429f0 | 791 | { |
cafb168a | 792 | struct ftrace_profile_stat *stat; |
493762fc | 793 | struct ftrace_profile *rec; |
bac429f0 SR |
794 | unsigned long flags; |
795 | ||
796 | if (!ftrace_profile_enabled) | |
797 | return; | |
798 | ||
799 | local_irq_save(flags); | |
cafb168a | 800 | |
bdffd893 | 801 | stat = this_cpu_ptr(&ftrace_profile_stats); |
0f6ce3de | 802 | if (!stat->hash || !ftrace_profile_enabled) |
cafb168a SR |
803 | goto out; |
804 | ||
805 | rec = ftrace_find_profiled_func(stat, ip); | |
493762fc | 806 | if (!rec) { |
318e0a73 | 807 | rec = ftrace_profile_alloc(stat, ip); |
493762fc SR |
808 | if (!rec) |
809 | goto out; | |
810 | } | |
bac429f0 SR |
811 | |
812 | rec->counter++; | |
813 | out: | |
814 | local_irq_restore(flags); | |
815 | } | |
816 | ||
0706f1c4 SR |
817 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
818 | static int profile_graph_entry(struct ftrace_graph_ent *trace) | |
819 | { | |
b1b35f2e | 820 | int index = current->curr_ret_stack; |
8861dd30 | 821 | |
a1e2e31d | 822 | function_profile_call(trace->func, 0, NULL, NULL); |
8861dd30 | 823 | |
a8f0f9e4 SRV |
824 | /* If function graph is shutting down, ret_stack can be NULL */ |
825 | if (!current->ret_stack) | |
826 | return 0; | |
827 | ||
8861dd30 NK |
828 | if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) |
829 | current->ret_stack[index].subtime = 0; | |
830 | ||
0706f1c4 SR |
831 | return 1; |
832 | } | |
833 | ||
834 | static void profile_graph_return(struct ftrace_graph_ret *trace) | |
835 | { | |
cafb168a | 836 | struct ftrace_profile_stat *stat; |
a2a16d6a | 837 | unsigned long long calltime; |
0706f1c4 | 838 | struct ftrace_profile *rec; |
cafb168a | 839 | unsigned long flags; |
0706f1c4 SR |
840 | |
841 | local_irq_save(flags); | |
bdffd893 | 842 | stat = this_cpu_ptr(&ftrace_profile_stats); |
0f6ce3de | 843 | if (!stat->hash || !ftrace_profile_enabled) |
cafb168a SR |
844 | goto out; |
845 | ||
37e44bc5 SR |
846 | /* If the calltime was zero'd ignore it */ |
847 | if (!trace->calltime) | |
848 | goto out; | |
849 | ||
a2a16d6a SR |
850 | calltime = trace->rettime - trace->calltime; |
851 | ||
55577204 | 852 | if (!fgraph_graph_time) { |
a2a16d6a SR |
853 | int index; |
854 | ||
b1b35f2e | 855 | index = current->curr_ret_stack; |
a2a16d6a SR |
856 | |
857 | /* Append this call time to the parent time to subtract */ | |
858 | if (index) | |
859 | current->ret_stack[index - 1].subtime += calltime; | |
860 | ||
861 | if (current->ret_stack[index].subtime < calltime) | |
862 | calltime -= current->ret_stack[index].subtime; | |
863 | else | |
864 | calltime = 0; | |
865 | } | |
866 | ||
cafb168a | 867 | rec = ftrace_find_profiled_func(stat, trace->func); |
e330b3bc | 868 | if (rec) { |
a2a16d6a | 869 | rec->time += calltime; |
e330b3bc CD |
870 | rec->time_squared += calltime * calltime; |
871 | } | |
a2a16d6a | 872 | |
cafb168a | 873 | out: |
0706f1c4 SR |
874 | local_irq_restore(flags); |
875 | } | |
876 | ||
877 | static int register_ftrace_profiler(void) | |
878 | { | |
879 | return register_ftrace_graph(&profile_graph_return, | |
880 | &profile_graph_entry); | |
881 | } | |
882 | ||
883 | static void unregister_ftrace_profiler(void) | |
884 | { | |
885 | unregister_ftrace_graph(); | |
886 | } | |
887 | #else | |
bd38c0e6 | 888 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
fb9fb015 | 889 | .func = function_profile_call, |
f04f24fb | 890 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, |
33b7f99c | 891 | INIT_OPS_HASH(ftrace_profile_ops) |
bac429f0 SR |
892 | }; |
893 | ||
0706f1c4 SR |
894 | static int register_ftrace_profiler(void) |
895 | { | |
896 | return register_ftrace_function(&ftrace_profile_ops); | |
897 | } | |
898 | ||
899 | static void unregister_ftrace_profiler(void) | |
900 | { | |
901 | unregister_ftrace_function(&ftrace_profile_ops); | |
902 | } | |
903 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
904 | ||
bac429f0 SR |
905 | static ssize_t |
906 | ftrace_profile_write(struct file *filp, const char __user *ubuf, | |
907 | size_t cnt, loff_t *ppos) | |
908 | { | |
909 | unsigned long val; | |
bac429f0 SR |
910 | int ret; |
911 | ||
22fe9b54 PH |
912 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
913 | if (ret) | |
bac429f0 SR |
914 | return ret; |
915 | ||
916 | val = !!val; | |
917 | ||
918 | mutex_lock(&ftrace_profile_lock); | |
919 | if (ftrace_profile_enabled ^ val) { | |
920 | if (val) { | |
493762fc SR |
921 | ret = ftrace_profile_init(); |
922 | if (ret < 0) { | |
923 | cnt = ret; | |
924 | goto out; | |
925 | } | |
926 | ||
0706f1c4 SR |
927 | ret = register_ftrace_profiler(); |
928 | if (ret < 0) { | |
929 | cnt = ret; | |
930 | goto out; | |
931 | } | |
bac429f0 SR |
932 | ftrace_profile_enabled = 1; |
933 | } else { | |
934 | ftrace_profile_enabled = 0; | |
0f6ce3de SR |
935 | /* |
936 | * unregister_ftrace_profiler calls stop_machine | |
937 | * so this acts like an synchronize_sched. | |
938 | */ | |
0706f1c4 | 939 | unregister_ftrace_profiler(); |
bac429f0 SR |
940 | } |
941 | } | |
493762fc | 942 | out: |
bac429f0 SR |
943 | mutex_unlock(&ftrace_profile_lock); |
944 | ||
cf8517cf | 945 | *ppos += cnt; |
bac429f0 SR |
946 | |
947 | return cnt; | |
948 | } | |
949 | ||
493762fc SR |
950 | static ssize_t |
951 | ftrace_profile_read(struct file *filp, char __user *ubuf, | |
952 | size_t cnt, loff_t *ppos) | |
953 | { | |
fb9fb015 | 954 | char buf[64]; /* big enough to hold a number */ |
493762fc SR |
955 | int r; |
956 | ||
957 | r = sprintf(buf, "%u\n", ftrace_profile_enabled); | |
958 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | |
959 | } | |
960 | ||
bac429f0 SR |
961 | static const struct file_operations ftrace_profile_fops = { |
962 | .open = tracing_open_generic, | |
963 | .read = ftrace_profile_read, | |
964 | .write = ftrace_profile_write, | |
6038f373 | 965 | .llseek = default_llseek, |
bac429f0 SR |
966 | }; |
967 | ||
cafb168a SR |
968 | /* used to initialize the real stat files */ |
969 | static struct tracer_stat function_stats __initdata = { | |
fb9fb015 SR |
970 | .name = "functions", |
971 | .stat_start = function_stat_start, | |
972 | .stat_next = function_stat_next, | |
973 | .stat_cmp = function_stat_cmp, | |
974 | .stat_headers = function_stat_headers, | |
975 | .stat_show = function_stat_show | |
cafb168a SR |
976 | }; |
977 | ||
8434dc93 | 978 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
bac429f0 | 979 | { |
cafb168a | 980 | struct ftrace_profile_stat *stat; |
bac429f0 | 981 | struct dentry *entry; |
cafb168a | 982 | char *name; |
bac429f0 | 983 | int ret; |
cafb168a SR |
984 | int cpu; |
985 | ||
986 | for_each_possible_cpu(cpu) { | |
987 | stat = &per_cpu(ftrace_profile_stats, cpu); | |
988 | ||
6363c6b5 | 989 | name = kasprintf(GFP_KERNEL, "function%d", cpu); |
cafb168a SR |
990 | if (!name) { |
991 | /* | |
992 | * The files created are permanent, if something happens | |
993 | * we still do not free memory. | |
994 | */ | |
cafb168a SR |
995 | WARN(1, |
996 | "Could not allocate stat file for cpu %d\n", | |
997 | cpu); | |
998 | return; | |
999 | } | |
1000 | stat->stat = function_stats; | |
cafb168a SR |
1001 | stat->stat.name = name; |
1002 | ret = register_stat_tracer(&stat->stat); | |
1003 | if (ret) { | |
1004 | WARN(1, | |
1005 | "Could not register function stat for cpu %d\n", | |
1006 | cpu); | |
1007 | kfree(name); | |
1008 | return; | |
1009 | } | |
bac429f0 SR |
1010 | } |
1011 | ||
8434dc93 | 1012 | entry = tracefs_create_file("function_profile_enabled", 0644, |
bac429f0 SR |
1013 | d_tracer, NULL, &ftrace_profile_fops); |
1014 | if (!entry) | |
a395d6a7 | 1015 | pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); |
bac429f0 SR |
1016 | } |
1017 | ||
bac429f0 | 1018 | #else /* CONFIG_FUNCTION_PROFILER */ |
8434dc93 | 1019 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
bac429f0 SR |
1020 | { |
1021 | } | |
bac429f0 SR |
1022 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1023 | ||
1619dc3f PA |
1024 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1025 | static int ftrace_graph_active; | |
1026 | #else | |
1027 | # define ftrace_graph_active 0 | |
1028 | #endif | |
1029 | ||
493762fc SR |
1030 | #ifdef CONFIG_DYNAMIC_FTRACE |
1031 | ||
79922b80 SRRH |
1032 | static struct ftrace_ops *removed_ops; |
1033 | ||
e1effa01 SRRH |
1034 | /* |
1035 | * Set when doing a global update, like enabling all recs or disabling them. | |
1036 | * It is not set when just updating a single ftrace_ops. | |
1037 | */ | |
1038 | static bool update_all_ops; | |
1039 | ||
493762fc SR |
1040 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1041 | # error Dynamic ftrace depends on MCOUNT_RECORD | |
1042 | #endif | |
1043 | ||
b448c4e3 SR |
1044 | struct ftrace_func_entry { |
1045 | struct hlist_node hlist; | |
1046 | unsigned long ip; | |
1047 | }; | |
1048 | ||
7b60f3d8 SRV |
1049 | struct ftrace_func_probe { |
1050 | struct ftrace_probe_ops *probe_ops; | |
1051 | struct ftrace_ops ops; | |
1052 | struct trace_array *tr; | |
1053 | struct list_head list; | |
6e444319 | 1054 | void *data; |
7b60f3d8 SRV |
1055 | int ref; |
1056 | }; | |
1057 | ||
33dc9b12 SR |
1058 | /* |
1059 | * We make these constant because no one should touch them, | |
1060 | * but they are used as the default "empty hash", to avoid allocating | |
1061 | * it all the time. These are in a read only section such that if | |
1062 | * anyone does try to modify it, it will cause an exception. | |
1063 | */ | |
1064 | static const struct hlist_head empty_buckets[1]; | |
1065 | static const struct ftrace_hash empty_hash = { | |
1066 | .buckets = (struct hlist_head *)empty_buckets, | |
1cf41dd7 | 1067 | }; |
33dc9b12 | 1068 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
493762fc | 1069 | |
2b499381 | 1070 | static struct ftrace_ops global_ops = { |
33b7f99c SRRH |
1071 | .func = ftrace_stub, |
1072 | .local_hash.notrace_hash = EMPTY_HASH, | |
1073 | .local_hash.filter_hash = EMPTY_HASH, | |
1074 | INIT_OPS_HASH(global_ops) | |
1075 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | |
e3eea140 SRRH |
1076 | FTRACE_OPS_FL_INITIALIZED | |
1077 | FTRACE_OPS_FL_PID, | |
f45948e8 SR |
1078 | }; |
1079 | ||
aec0be2d | 1080 | /* |
6be7fa3c | 1081 | * Used by the stack undwinder to know about dynamic ftrace trampolines. |
aec0be2d | 1082 | */ |
6be7fa3c | 1083 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) |
aec0be2d | 1084 | { |
6be7fa3c | 1085 | struct ftrace_ops *op = NULL; |
aec0be2d SRRH |
1086 | |
1087 | /* | |
1088 | * Some of the ops may be dynamically allocated, | |
1089 | * they are freed after a synchronize_sched(). | |
1090 | */ | |
1091 | preempt_disable_notrace(); | |
1092 | ||
1093 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1094 | /* | |
1095 | * This is to check for dynamically allocated trampolines. | |
1096 | * Trampolines that are in kernel text will have | |
1097 | * core_kernel_text() return true. | |
1098 | */ | |
1099 | if (op->trampoline && op->trampoline_size) | |
1100 | if (addr >= op->trampoline && | |
1101 | addr < op->trampoline + op->trampoline_size) { | |
6be7fa3c SRV |
1102 | preempt_enable_notrace(); |
1103 | return op; | |
aec0be2d SRRH |
1104 | } |
1105 | } while_for_each_ftrace_op(op); | |
aec0be2d SRRH |
1106 | preempt_enable_notrace(); |
1107 | ||
6be7fa3c SRV |
1108 | return NULL; |
1109 | } | |
1110 | ||
1111 | /* | |
1112 | * This is used by __kernel_text_address() to return true if the | |
1113 | * address is on a dynamically allocated trampoline that would | |
1114 | * not return true for either core_kernel_text() or | |
1115 | * is_module_text_address(). | |
1116 | */ | |
1117 | bool is_ftrace_trampoline(unsigned long addr) | |
1118 | { | |
1119 | return ftrace_ops_trampoline(addr) != NULL; | |
aec0be2d SRRH |
1120 | } |
1121 | ||
493762fc SR |
1122 | struct ftrace_page { |
1123 | struct ftrace_page *next; | |
a7900875 | 1124 | struct dyn_ftrace *records; |
493762fc | 1125 | int index; |
a7900875 | 1126 | int size; |
493762fc SR |
1127 | }; |
1128 | ||
a7900875 SR |
1129 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1130 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) | |
493762fc SR |
1131 | |
1132 | /* estimate from running different kernels */ | |
1133 | #define NR_TO_INIT 10000 | |
1134 | ||
1135 | static struct ftrace_page *ftrace_pages_start; | |
1136 | static struct ftrace_page *ftrace_pages; | |
1137 | ||
2b0cce0e SRV |
1138 | static __always_inline unsigned long |
1139 | ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) | |
1140 | { | |
1141 | if (hash->size_bits > 0) | |
1142 | return hash_long(ip, hash->size_bits); | |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
2b2c279c SRV |
1147 | /* Only use this function if ftrace_hash_empty() has already been tested */ |
1148 | static __always_inline struct ftrace_func_entry * | |
1149 | __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |
b448c4e3 SR |
1150 | { |
1151 | unsigned long key; | |
1152 | struct ftrace_func_entry *entry; | |
1153 | struct hlist_head *hhd; | |
b448c4e3 | 1154 | |
2b0cce0e | 1155 | key = ftrace_hash_key(hash, ip); |
b448c4e3 SR |
1156 | hhd = &hash->buckets[key]; |
1157 | ||
1bb539ca | 1158 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
b448c4e3 SR |
1159 | if (entry->ip == ip) |
1160 | return entry; | |
1161 | } | |
1162 | return NULL; | |
1163 | } | |
1164 | ||
2b2c279c SRV |
1165 | /** |
1166 | * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash | |
1167 | * @hash: The hash to look at | |
1168 | * @ip: The instruction pointer to test | |
1169 | * | |
1170 | * Search a given @hash to see if a given instruction pointer (@ip) | |
1171 | * exists in it. | |
1172 | * | |
1173 | * Returns the entry that holds the @ip if found. NULL otherwise. | |
1174 | */ | |
1175 | struct ftrace_func_entry * | |
1176 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) | |
1177 | { | |
1178 | if (ftrace_hash_empty(hash)) | |
1179 | return NULL; | |
1180 | ||
1181 | return __ftrace_lookup_ip(hash, ip); | |
1182 | } | |
1183 | ||
33dc9b12 SR |
1184 | static void __add_hash_entry(struct ftrace_hash *hash, |
1185 | struct ftrace_func_entry *entry) | |
b448c4e3 | 1186 | { |
b448c4e3 SR |
1187 | struct hlist_head *hhd; |
1188 | unsigned long key; | |
1189 | ||
2b0cce0e | 1190 | key = ftrace_hash_key(hash, entry->ip); |
b448c4e3 SR |
1191 | hhd = &hash->buckets[key]; |
1192 | hlist_add_head(&entry->hlist, hhd); | |
1193 | hash->count++; | |
33dc9b12 SR |
1194 | } |
1195 | ||
1196 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) | |
1197 | { | |
1198 | struct ftrace_func_entry *entry; | |
1199 | ||
1200 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
1201 | if (!entry) | |
1202 | return -ENOMEM; | |
1203 | ||
1204 | entry->ip = ip; | |
1205 | __add_hash_entry(hash, entry); | |
b448c4e3 SR |
1206 | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static void | |
33dc9b12 | 1211 | free_hash_entry(struct ftrace_hash *hash, |
b448c4e3 SR |
1212 | struct ftrace_func_entry *entry) |
1213 | { | |
1214 | hlist_del(&entry->hlist); | |
1215 | kfree(entry); | |
1216 | hash->count--; | |
1217 | } | |
1218 | ||
33dc9b12 SR |
1219 | static void |
1220 | remove_hash_entry(struct ftrace_hash *hash, | |
1221 | struct ftrace_func_entry *entry) | |
1222 | { | |
eee8ded1 | 1223 | hlist_del_rcu(&entry->hlist); |
33dc9b12 SR |
1224 | hash->count--; |
1225 | } | |
1226 | ||
b448c4e3 SR |
1227 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1228 | { | |
1229 | struct hlist_head *hhd; | |
b67bfe0d | 1230 | struct hlist_node *tn; |
b448c4e3 SR |
1231 | struct ftrace_func_entry *entry; |
1232 | int size = 1 << hash->size_bits; | |
1233 | int i; | |
1234 | ||
33dc9b12 SR |
1235 | if (!hash->count) |
1236 | return; | |
1237 | ||
b448c4e3 SR |
1238 | for (i = 0; i < size; i++) { |
1239 | hhd = &hash->buckets[i]; | |
b67bfe0d | 1240 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
33dc9b12 | 1241 | free_hash_entry(hash, entry); |
b448c4e3 SR |
1242 | } |
1243 | FTRACE_WARN_ON(hash->count); | |
1244 | } | |
1245 | ||
673feb9d SRV |
1246 | static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) |
1247 | { | |
1248 | list_del(&ftrace_mod->list); | |
1249 | kfree(ftrace_mod->module); | |
1250 | kfree(ftrace_mod->func); | |
1251 | kfree(ftrace_mod); | |
1252 | } | |
1253 | ||
1254 | static void clear_ftrace_mod_list(struct list_head *head) | |
1255 | { | |
1256 | struct ftrace_mod_load *p, *n; | |
1257 | ||
1258 | /* stack tracer isn't supported yet */ | |
1259 | if (!head) | |
1260 | return; | |
1261 | ||
1262 | mutex_lock(&ftrace_lock); | |
1263 | list_for_each_entry_safe(p, n, head, list) | |
1264 | free_ftrace_mod(p); | |
1265 | mutex_unlock(&ftrace_lock); | |
1266 | } | |
1267 | ||
33dc9b12 SR |
1268 | static void free_ftrace_hash(struct ftrace_hash *hash) |
1269 | { | |
1270 | if (!hash || hash == EMPTY_HASH) | |
1271 | return; | |
1272 | ftrace_hash_clear(hash); | |
1273 | kfree(hash->buckets); | |
1274 | kfree(hash); | |
1275 | } | |
1276 | ||
07fd5515 SR |
1277 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) |
1278 | { | |
1279 | struct ftrace_hash *hash; | |
1280 | ||
1281 | hash = container_of(rcu, struct ftrace_hash, rcu); | |
1282 | free_ftrace_hash(hash); | |
1283 | } | |
1284 | ||
1285 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) | |
1286 | { | |
1287 | if (!hash || hash == EMPTY_HASH) | |
1288 | return; | |
1289 | call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); | |
1290 | } | |
1291 | ||
5500fa51 JO |
1292 | void ftrace_free_filter(struct ftrace_ops *ops) |
1293 | { | |
f04f24fb | 1294 | ftrace_ops_init(ops); |
33b7f99c SRRH |
1295 | free_ftrace_hash(ops->func_hash->filter_hash); |
1296 | free_ftrace_hash(ops->func_hash->notrace_hash); | |
5500fa51 JO |
1297 | } |
1298 | ||
33dc9b12 SR |
1299 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1300 | { | |
1301 | struct ftrace_hash *hash; | |
1302 | int size; | |
1303 | ||
1304 | hash = kzalloc(sizeof(*hash), GFP_KERNEL); | |
1305 | if (!hash) | |
1306 | return NULL; | |
1307 | ||
1308 | size = 1 << size_bits; | |
47b0edcb | 1309 | hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); |
33dc9b12 SR |
1310 | |
1311 | if (!hash->buckets) { | |
1312 | kfree(hash); | |
1313 | return NULL; | |
1314 | } | |
1315 | ||
1316 | hash->size_bits = size_bits; | |
1317 | ||
1318 | return hash; | |
1319 | } | |
1320 | ||
673feb9d SRV |
1321 | |
1322 | static int ftrace_add_mod(struct trace_array *tr, | |
1323 | const char *func, const char *module, | |
1324 | int enable) | |
1325 | { | |
1326 | struct ftrace_mod_load *ftrace_mod; | |
1327 | struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; | |
1328 | ||
1329 | ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); | |
1330 | if (!ftrace_mod) | |
1331 | return -ENOMEM; | |
1332 | ||
1333 | ftrace_mod->func = kstrdup(func, GFP_KERNEL); | |
1334 | ftrace_mod->module = kstrdup(module, GFP_KERNEL); | |
1335 | ftrace_mod->enable = enable; | |
1336 | ||
1337 | if (!ftrace_mod->func || !ftrace_mod->module) | |
1338 | goto out_free; | |
1339 | ||
1340 | list_add(&ftrace_mod->list, mod_head); | |
1341 | ||
1342 | return 0; | |
1343 | ||
1344 | out_free: | |
1345 | free_ftrace_mod(ftrace_mod); | |
1346 | ||
1347 | return -ENOMEM; | |
1348 | } | |
1349 | ||
33dc9b12 SR |
1350 | static struct ftrace_hash * |
1351 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) | |
1352 | { | |
1353 | struct ftrace_func_entry *entry; | |
1354 | struct ftrace_hash *new_hash; | |
33dc9b12 SR |
1355 | int size; |
1356 | int ret; | |
1357 | int i; | |
1358 | ||
1359 | new_hash = alloc_ftrace_hash(size_bits); | |
1360 | if (!new_hash) | |
1361 | return NULL; | |
1362 | ||
8c08f0d5 SRV |
1363 | if (hash) |
1364 | new_hash->flags = hash->flags; | |
1365 | ||
33dc9b12 | 1366 | /* Empty hash? */ |
06a51d93 | 1367 | if (ftrace_hash_empty(hash)) |
33dc9b12 SR |
1368 | return new_hash; |
1369 | ||
1370 | size = 1 << hash->size_bits; | |
1371 | for (i = 0; i < size; i++) { | |
b67bfe0d | 1372 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
33dc9b12 SR |
1373 | ret = add_hash_entry(new_hash, entry->ip); |
1374 | if (ret < 0) | |
1375 | goto free_hash; | |
1376 | } | |
1377 | } | |
1378 | ||
1379 | FTRACE_WARN_ON(new_hash->count != hash->count); | |
1380 | ||
1381 | return new_hash; | |
1382 | ||
1383 | free_hash: | |
1384 | free_ftrace_hash(new_hash); | |
1385 | return NULL; | |
1386 | } | |
1387 | ||
41fb61c2 | 1388 | static void |
84261912 | 1389 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
41fb61c2 | 1390 | static void |
84261912 | 1391 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
41fb61c2 | 1392 | |
f8b8be8a MH |
1393 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
1394 | struct ftrace_hash *new_hash); | |
1395 | ||
3e278c0d NK |
1396 | static struct ftrace_hash * |
1397 | __ftrace_hash_move(struct ftrace_hash *src) | |
33dc9b12 SR |
1398 | { |
1399 | struct ftrace_func_entry *entry; | |
b67bfe0d | 1400 | struct hlist_node *tn; |
33dc9b12 | 1401 | struct hlist_head *hhd; |
07fd5515 | 1402 | struct ftrace_hash *new_hash; |
33dc9b12 SR |
1403 | int size = src->count; |
1404 | int bits = 0; | |
1405 | int i; | |
1406 | ||
1407 | /* | |
3e278c0d | 1408 | * If the new source is empty, just return the empty_hash. |
33dc9b12 | 1409 | */ |
8c08f0d5 | 1410 | if (ftrace_hash_empty(src)) |
3e278c0d | 1411 | return EMPTY_HASH; |
33dc9b12 | 1412 | |
33dc9b12 SR |
1413 | /* |
1414 | * Make the hash size about 1/2 the # found | |
1415 | */ | |
1416 | for (size /= 2; size; size >>= 1) | |
1417 | bits++; | |
1418 | ||
1419 | /* Don't allocate too much */ | |
1420 | if (bits > FTRACE_HASH_MAX_BITS) | |
1421 | bits = FTRACE_HASH_MAX_BITS; | |
1422 | ||
07fd5515 SR |
1423 | new_hash = alloc_ftrace_hash(bits); |
1424 | if (!new_hash) | |
3e278c0d | 1425 | return NULL; |
33dc9b12 | 1426 | |
8c08f0d5 SRV |
1427 | new_hash->flags = src->flags; |
1428 | ||
33dc9b12 SR |
1429 | size = 1 << src->size_bits; |
1430 | for (i = 0; i < size; i++) { | |
1431 | hhd = &src->buckets[i]; | |
b67bfe0d | 1432 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
33dc9b12 | 1433 | remove_hash_entry(src, entry); |
07fd5515 | 1434 | __add_hash_entry(new_hash, entry); |
33dc9b12 SR |
1435 | } |
1436 | } | |
1437 | ||
3e278c0d NK |
1438 | return new_hash; |
1439 | } | |
1440 | ||
1441 | static int | |
1442 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | |
1443 | struct ftrace_hash **dst, struct ftrace_hash *src) | |
1444 | { | |
1445 | struct ftrace_hash *new_hash; | |
1446 | int ret; | |
1447 | ||
1448 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | |
1449 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | |
1450 | return -EINVAL; | |
1451 | ||
1452 | new_hash = __ftrace_hash_move(src); | |
1453 | if (!new_hash) | |
1454 | return -ENOMEM; | |
1455 | ||
f8b8be8a MH |
1456 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ |
1457 | if (enable) { | |
1458 | /* IPMODIFY should be updated only when filter_hash updating */ | |
1459 | ret = ftrace_hash_ipmodify_update(ops, new_hash); | |
1460 | if (ret < 0) { | |
1461 | free_ftrace_hash(new_hash); | |
1462 | return ret; | |
1463 | } | |
1464 | } | |
1465 | ||
5c27c775 MH |
1466 | /* |
1467 | * Remove the current set, update the hash and add | |
1468 | * them back. | |
1469 | */ | |
84261912 | 1470 | ftrace_hash_rec_disable_modify(ops, enable); |
5c27c775 | 1471 | |
07fd5515 | 1472 | rcu_assign_pointer(*dst, new_hash); |
07fd5515 | 1473 | |
84261912 | 1474 | ftrace_hash_rec_enable_modify(ops, enable); |
41fb61c2 | 1475 | |
5c27c775 | 1476 | return 0; |
33dc9b12 SR |
1477 | } |
1478 | ||
fef5aeee SRRH |
1479 | static bool hash_contains_ip(unsigned long ip, |
1480 | struct ftrace_ops_hash *hash) | |
1481 | { | |
1482 | /* | |
1483 | * The function record is a match if it exists in the filter | |
1484 | * hash and not in the notrace hash. Note, an emty hash is | |
1485 | * considered a match for the filter hash, but an empty | |
1486 | * notrace hash is considered not in the notrace hash. | |
1487 | */ | |
1488 | return (ftrace_hash_empty(hash->filter_hash) || | |
2b2c279c | 1489 | __ftrace_lookup_ip(hash->filter_hash, ip)) && |
fef5aeee | 1490 | (ftrace_hash_empty(hash->notrace_hash) || |
2b2c279c | 1491 | !__ftrace_lookup_ip(hash->notrace_hash, ip)); |
fef5aeee SRRH |
1492 | } |
1493 | ||
b848914c SR |
1494 | /* |
1495 | * Test the hashes for this ops to see if we want to call | |
1496 | * the ops->func or not. | |
1497 | * | |
1498 | * It's a match if the ip is in the ops->filter_hash or | |
1499 | * the filter_hash does not exist or is empty, | |
1500 | * AND | |
1501 | * the ip is not in the ops->notrace_hash. | |
cdbe61bf SR |
1502 | * |
1503 | * This needs to be called with preemption disabled as | |
1504 | * the hashes are freed with call_rcu_sched(). | |
b848914c SR |
1505 | */ |
1506 | static int | |
195a8afc | 1507 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
b848914c | 1508 | { |
fef5aeee | 1509 | struct ftrace_ops_hash hash; |
b848914c SR |
1510 | int ret; |
1511 | ||
195a8afc SRRH |
1512 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
1513 | /* | |
1514 | * There's a small race when adding ops that the ftrace handler | |
1515 | * that wants regs, may be called without them. We can not | |
1516 | * allow that handler to be called if regs is NULL. | |
1517 | */ | |
1518 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | |
1519 | return 0; | |
1520 | #endif | |
1521 | ||
f86f4180 CZ |
1522 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
1523 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); | |
b848914c | 1524 | |
fef5aeee | 1525 | if (hash_contains_ip(ip, &hash)) |
b848914c SR |
1526 | ret = 1; |
1527 | else | |
1528 | ret = 0; | |
b848914c SR |
1529 | |
1530 | return ret; | |
1531 | } | |
1532 | ||
493762fc SR |
1533 | /* |
1534 | * This is a double for. Do not use 'break' to break out of the loop, | |
1535 | * you must use a goto. | |
1536 | */ | |
1537 | #define do_for_each_ftrace_rec(pg, rec) \ | |
1538 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | |
1539 | int _____i; \ | |
1540 | for (_____i = 0; _____i < pg->index; _____i++) { \ | |
1541 | rec = &pg->records[_____i]; | |
1542 | ||
1543 | #define while_for_each_ftrace_rec() \ | |
1544 | } \ | |
1545 | } | |
1546 | ||
5855fead SR |
1547 | |
1548 | static int ftrace_cmp_recs(const void *a, const void *b) | |
1549 | { | |
a650e02a SR |
1550 | const struct dyn_ftrace *key = a; |
1551 | const struct dyn_ftrace *rec = b; | |
5855fead | 1552 | |
a650e02a | 1553 | if (key->flags < rec->ip) |
5855fead | 1554 | return -1; |
a650e02a SR |
1555 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) |
1556 | return 1; | |
5855fead SR |
1557 | return 0; |
1558 | } | |
1559 | ||
04cf31a7 ME |
1560 | /** |
1561 | * ftrace_location_range - return the first address of a traced location | |
1562 | * if it touches the given ip range | |
1563 | * @start: start of range to search. | |
1564 | * @end: end of range to search (inclusive). @end points to the last byte | |
1565 | * to check. | |
1566 | * | |
1567 | * Returns rec->ip if the related ftrace location is a least partly within | |
1568 | * the given address range. That is, the first address of the instruction | |
1569 | * that is either a NOP or call to the function tracer. It checks the ftrace | |
1570 | * internal tables to determine if the address belongs or not. | |
1571 | */ | |
1572 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) | |
c88fd863 SR |
1573 | { |
1574 | struct ftrace_page *pg; | |
1575 | struct dyn_ftrace *rec; | |
5855fead | 1576 | struct dyn_ftrace key; |
c88fd863 | 1577 | |
a650e02a SR |
1578 | key.ip = start; |
1579 | key.flags = end; /* overload flags, as it is unsigned long */ | |
5855fead SR |
1580 | |
1581 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | |
a650e02a SR |
1582 | if (end < pg->records[0].ip || |
1583 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | |
9644302e | 1584 | continue; |
5855fead SR |
1585 | rec = bsearch(&key, pg->records, pg->index, |
1586 | sizeof(struct dyn_ftrace), | |
1587 | ftrace_cmp_recs); | |
1588 | if (rec) | |
f0cf973a | 1589 | return rec->ip; |
5855fead | 1590 | } |
c88fd863 SR |
1591 | |
1592 | return 0; | |
1593 | } | |
1594 | ||
a650e02a SR |
1595 | /** |
1596 | * ftrace_location - return true if the ip giving is a traced location | |
1597 | * @ip: the instruction pointer to check | |
1598 | * | |
f0cf973a | 1599 | * Returns rec->ip if @ip given is a pointer to a ftrace location. |
a650e02a SR |
1600 | * That is, the instruction that is either a NOP or call to |
1601 | * the function tracer. It checks the ftrace internal tables to | |
1602 | * determine if the address belongs or not. | |
1603 | */ | |
f0cf973a | 1604 | unsigned long ftrace_location(unsigned long ip) |
a650e02a SR |
1605 | { |
1606 | return ftrace_location_range(ip, ip); | |
1607 | } | |
1608 | ||
1609 | /** | |
1610 | * ftrace_text_reserved - return true if range contains an ftrace location | |
1611 | * @start: start of range to search | |
1612 | * @end: end of range to search (inclusive). @end points to the last byte to check. | |
1613 | * | |
1614 | * Returns 1 if @start and @end contains a ftrace location. | |
1615 | * That is, the instruction that is either a NOP or call to | |
1616 | * the function tracer. It checks the ftrace internal tables to | |
1617 | * determine if the address belongs or not. | |
1618 | */ | |
d88471cb | 1619 | int ftrace_text_reserved(const void *start, const void *end) |
a650e02a | 1620 | { |
f0cf973a SR |
1621 | unsigned long ret; |
1622 | ||
1623 | ret = ftrace_location_range((unsigned long)start, | |
1624 | (unsigned long)end); | |
1625 | ||
1626 | return (int)!!ret; | |
a650e02a SR |
1627 | } |
1628 | ||
4fbb48cb SRRH |
1629 | /* Test if ops registered to this rec needs regs */ |
1630 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |
1631 | { | |
1632 | struct ftrace_ops *ops; | |
1633 | bool keep_regs = false; | |
1634 | ||
1635 | for (ops = ftrace_ops_list; | |
1636 | ops != &ftrace_list_end; ops = ops->next) { | |
1637 | /* pass rec in as regs to have non-NULL val */ | |
1638 | if (ftrace_ops_test(ops, rec->ip, rec)) { | |
1639 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1640 | keep_regs = true; | |
1641 | break; | |
1642 | } | |
1643 | } | |
1644 | } | |
1645 | ||
1646 | return keep_regs; | |
1647 | } | |
1648 | ||
84b6d3e6 | 1649 | static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, |
ed926f9b SR |
1650 | int filter_hash, |
1651 | bool inc) | |
1652 | { | |
1653 | struct ftrace_hash *hash; | |
1654 | struct ftrace_hash *other_hash; | |
1655 | struct ftrace_page *pg; | |
1656 | struct dyn_ftrace *rec; | |
84b6d3e6 | 1657 | bool update = false; |
ed926f9b | 1658 | int count = 0; |
8c08f0d5 | 1659 | int all = false; |
ed926f9b SR |
1660 | |
1661 | /* Only update if the ops has been registered */ | |
1662 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
84b6d3e6 | 1663 | return false; |
ed926f9b SR |
1664 | |
1665 | /* | |
1666 | * In the filter_hash case: | |
1667 | * If the count is zero, we update all records. | |
1668 | * Otherwise we just update the items in the hash. | |
1669 | * | |
1670 | * In the notrace_hash case: | |
1671 | * We enable the update in the hash. | |
1672 | * As disabling notrace means enabling the tracing, | |
1673 | * and enabling notrace means disabling, the inc variable | |
1674 | * gets inversed. | |
1675 | */ | |
1676 | if (filter_hash) { | |
33b7f99c SRRH |
1677 | hash = ops->func_hash->filter_hash; |
1678 | other_hash = ops->func_hash->notrace_hash; | |
06a51d93 | 1679 | if (ftrace_hash_empty(hash)) |
8c08f0d5 | 1680 | all = true; |
ed926f9b SR |
1681 | } else { |
1682 | inc = !inc; | |
33b7f99c SRRH |
1683 | hash = ops->func_hash->notrace_hash; |
1684 | other_hash = ops->func_hash->filter_hash; | |
ed926f9b SR |
1685 | /* |
1686 | * If the notrace hash has no items, | |
1687 | * then there's nothing to do. | |
1688 | */ | |
06a51d93 | 1689 | if (ftrace_hash_empty(hash)) |
84b6d3e6 | 1690 | return false; |
ed926f9b SR |
1691 | } |
1692 | ||
1693 | do_for_each_ftrace_rec(pg, rec) { | |
1694 | int in_other_hash = 0; | |
1695 | int in_hash = 0; | |
1696 | int match = 0; | |
1697 | ||
b7ffffbb SRRH |
1698 | if (rec->flags & FTRACE_FL_DISABLED) |
1699 | continue; | |
1700 | ||
ed926f9b SR |
1701 | if (all) { |
1702 | /* | |
1703 | * Only the filter_hash affects all records. | |
1704 | * Update if the record is not in the notrace hash. | |
1705 | */ | |
b848914c | 1706 | if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) |
ed926f9b SR |
1707 | match = 1; |
1708 | } else { | |
06a51d93 SR |
1709 | in_hash = !!ftrace_lookup_ip(hash, rec->ip); |
1710 | in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); | |
ed926f9b SR |
1711 | |
1712 | /* | |
19eab4a4 SRRH |
1713 | * If filter_hash is set, we want to match all functions |
1714 | * that are in the hash but not in the other hash. | |
ed926f9b | 1715 | * |
19eab4a4 SRRH |
1716 | * If filter_hash is not set, then we are decrementing. |
1717 | * That means we match anything that is in the hash | |
1718 | * and also in the other_hash. That is, we need to turn | |
1719 | * off functions in the other hash because they are disabled | |
1720 | * by this hash. | |
ed926f9b SR |
1721 | */ |
1722 | if (filter_hash && in_hash && !in_other_hash) | |
1723 | match = 1; | |
1724 | else if (!filter_hash && in_hash && | |
06a51d93 | 1725 | (in_other_hash || ftrace_hash_empty(other_hash))) |
ed926f9b SR |
1726 | match = 1; |
1727 | } | |
1728 | if (!match) | |
1729 | continue; | |
1730 | ||
1731 | if (inc) { | |
1732 | rec->flags++; | |
0376bde1 | 1733 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
84b6d3e6 | 1734 | return false; |
79922b80 SRRH |
1735 | |
1736 | /* | |
1737 | * If there's only a single callback registered to a | |
1738 | * function, and the ops has a trampoline registered | |
1739 | * for it, then we can call it directly. | |
1740 | */ | |
fef5aeee | 1741 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) |
79922b80 | 1742 | rec->flags |= FTRACE_FL_TRAMP; |
fef5aeee | 1743 | else |
79922b80 SRRH |
1744 | /* |
1745 | * If we are adding another function callback | |
1746 | * to this function, and the previous had a | |
bce0b6c5 SRRH |
1747 | * custom trampoline in use, then we need to go |
1748 | * back to the default trampoline. | |
79922b80 | 1749 | */ |
fef5aeee | 1750 | rec->flags &= ~FTRACE_FL_TRAMP; |
79922b80 | 1751 | |
08f6fba5 SR |
1752 | /* |
1753 | * If any ops wants regs saved for this function | |
1754 | * then all ops will get saved regs. | |
1755 | */ | |
1756 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) | |
1757 | rec->flags |= FTRACE_FL_REGS; | |
ed926f9b | 1758 | } else { |
0376bde1 | 1759 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
84b6d3e6 | 1760 | return false; |
ed926f9b | 1761 | rec->flags--; |
79922b80 | 1762 | |
4fbb48cb SRRH |
1763 | /* |
1764 | * If the rec had REGS enabled and the ops that is | |
1765 | * being removed had REGS set, then see if there is | |
1766 | * still any ops for this record that wants regs. | |
1767 | * If not, we can stop recording them. | |
1768 | */ | |
0376bde1 | 1769 | if (ftrace_rec_count(rec) > 0 && |
4fbb48cb SRRH |
1770 | rec->flags & FTRACE_FL_REGS && |
1771 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { | |
1772 | if (!test_rec_ops_needs_regs(rec)) | |
1773 | rec->flags &= ~FTRACE_FL_REGS; | |
1774 | } | |
79922b80 | 1775 | |
fef5aeee SRRH |
1776 | /* |
1777 | * If the rec had TRAMP enabled, then it needs to | |
1778 | * be cleared. As TRAMP can only be enabled iff | |
1779 | * there is only a single ops attached to it. | |
1780 | * In otherwords, always disable it on decrementing. | |
1781 | * In the future, we may set it if rec count is | |
1782 | * decremented to one, and the ops that is left | |
1783 | * has a trampoline. | |
1784 | */ | |
1785 | rec->flags &= ~FTRACE_FL_TRAMP; | |
1786 | ||
79922b80 SRRH |
1787 | /* |
1788 | * flags will be cleared in ftrace_check_record() | |
1789 | * if rec count is zero. | |
1790 | */ | |
ed926f9b SR |
1791 | } |
1792 | count++; | |
84b6d3e6 JO |
1793 | |
1794 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ | |
1795 | update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; | |
1796 | ||
ed926f9b SR |
1797 | /* Shortcut, if we handled all records, we are done. */ |
1798 | if (!all && count == hash->count) | |
84b6d3e6 | 1799 | return update; |
ed926f9b | 1800 | } while_for_each_ftrace_rec(); |
84b6d3e6 JO |
1801 | |
1802 | return update; | |
ed926f9b SR |
1803 | } |
1804 | ||
84b6d3e6 | 1805 | static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, |
ed926f9b SR |
1806 | int filter_hash) |
1807 | { | |
84b6d3e6 | 1808 | return __ftrace_hash_rec_update(ops, filter_hash, 0); |
ed926f9b SR |
1809 | } |
1810 | ||
84b6d3e6 | 1811 | static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, |
ed926f9b SR |
1812 | int filter_hash) |
1813 | { | |
84b6d3e6 | 1814 | return __ftrace_hash_rec_update(ops, filter_hash, 1); |
ed926f9b SR |
1815 | } |
1816 | ||
84261912 SRRH |
1817 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, |
1818 | int filter_hash, int inc) | |
1819 | { | |
1820 | struct ftrace_ops *op; | |
1821 | ||
1822 | __ftrace_hash_rec_update(ops, filter_hash, inc); | |
1823 | ||
1824 | if (ops->func_hash != &global_ops.local_hash) | |
1825 | return; | |
1826 | ||
1827 | /* | |
1828 | * If the ops shares the global_ops hash, then we need to update | |
1829 | * all ops that are enabled and use this hash. | |
1830 | */ | |
1831 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
1832 | /* Already done */ | |
1833 | if (op == ops) | |
1834 | continue; | |
1835 | if (op->func_hash == &global_ops.local_hash) | |
1836 | __ftrace_hash_rec_update(op, filter_hash, inc); | |
1837 | } while_for_each_ftrace_op(op); | |
1838 | } | |
1839 | ||
1840 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, | |
1841 | int filter_hash) | |
1842 | { | |
1843 | ftrace_hash_rec_update_modify(ops, filter_hash, 0); | |
1844 | } | |
1845 | ||
1846 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | |
1847 | int filter_hash) | |
1848 | { | |
1849 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | |
1850 | } | |
1851 | ||
f8b8be8a MH |
1852 | /* |
1853 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK | |
1854 | * or no-needed to update, -EBUSY if it detects a conflict of the flag | |
1855 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. | |
1856 | * Note that old_hash and new_hash has below meanings | |
1857 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) | |
1858 | * - If the hash is EMPTY_HASH, it hits nothing | |
1859 | * - Anything else hits the recs which match the hash entries. | |
1860 | */ | |
1861 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | |
1862 | struct ftrace_hash *old_hash, | |
1863 | struct ftrace_hash *new_hash) | |
1864 | { | |
1865 | struct ftrace_page *pg; | |
1866 | struct dyn_ftrace *rec, *end = NULL; | |
1867 | int in_old, in_new; | |
1868 | ||
1869 | /* Only update if the ops has been registered */ | |
1870 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
1871 | return 0; | |
1872 | ||
1873 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | |
1874 | return 0; | |
1875 | ||
1876 | /* | |
1877 | * Since the IPMODIFY is a very address sensitive action, we do not | |
1878 | * allow ftrace_ops to set all functions to new hash. | |
1879 | */ | |
1880 | if (!new_hash || !old_hash) | |
1881 | return -EINVAL; | |
1882 | ||
1883 | /* Update rec->flags */ | |
1884 | do_for_each_ftrace_rec(pg, rec) { | |
546fece4 SRRH |
1885 | |
1886 | if (rec->flags & FTRACE_FL_DISABLED) | |
1887 | continue; | |
1888 | ||
f8b8be8a MH |
1889 | /* We need to update only differences of filter_hash */ |
1890 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | |
1891 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | |
1892 | if (in_old == in_new) | |
1893 | continue; | |
1894 | ||
1895 | if (in_new) { | |
1896 | /* New entries must ensure no others are using it */ | |
1897 | if (rec->flags & FTRACE_FL_IPMODIFY) | |
1898 | goto rollback; | |
1899 | rec->flags |= FTRACE_FL_IPMODIFY; | |
1900 | } else /* Removed entry */ | |
1901 | rec->flags &= ~FTRACE_FL_IPMODIFY; | |
1902 | } while_for_each_ftrace_rec(); | |
1903 | ||
1904 | return 0; | |
1905 | ||
1906 | rollback: | |
1907 | end = rec; | |
1908 | ||
1909 | /* Roll back what we did above */ | |
1910 | do_for_each_ftrace_rec(pg, rec) { | |
546fece4 SRRH |
1911 | |
1912 | if (rec->flags & FTRACE_FL_DISABLED) | |
1913 | continue; | |
1914 | ||
f8b8be8a MH |
1915 | if (rec == end) |
1916 | goto err_out; | |
1917 | ||
1918 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | |
1919 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | |
1920 | if (in_old == in_new) | |
1921 | continue; | |
1922 | ||
1923 | if (in_new) | |
1924 | rec->flags &= ~FTRACE_FL_IPMODIFY; | |
1925 | else | |
1926 | rec->flags |= FTRACE_FL_IPMODIFY; | |
1927 | } while_for_each_ftrace_rec(); | |
1928 | ||
1929 | err_out: | |
1930 | return -EBUSY; | |
1931 | } | |
1932 | ||
1933 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) | |
1934 | { | |
1935 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | |
1936 | ||
1937 | if (ftrace_hash_empty(hash)) | |
1938 | hash = NULL; | |
1939 | ||
1940 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); | |
1941 | } | |
1942 | ||
1943 | /* Disabling always succeeds */ | |
1944 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) | |
1945 | { | |
1946 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | |
1947 | ||
1948 | if (ftrace_hash_empty(hash)) | |
1949 | hash = NULL; | |
1950 | ||
1951 | __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); | |
1952 | } | |
1953 | ||
1954 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | |
1955 | struct ftrace_hash *new_hash) | |
1956 | { | |
1957 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; | |
1958 | ||
1959 | if (ftrace_hash_empty(old_hash)) | |
1960 | old_hash = NULL; | |
1961 | ||
1962 | if (ftrace_hash_empty(new_hash)) | |
1963 | new_hash = NULL; | |
1964 | ||
1965 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); | |
1966 | } | |
1967 | ||
b05086c7 | 1968 | static void print_ip_ins(const char *fmt, const unsigned char *p) |
b17e8a37 SR |
1969 | { |
1970 | int i; | |
1971 | ||
1972 | printk(KERN_CONT "%s", fmt); | |
1973 | ||
1974 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | |
1975 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | |
1976 | } | |
1977 | ||
4fd3279b SRRH |
1978 | static struct ftrace_ops * |
1979 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); | |
39daa7b9 SRRH |
1980 | static struct ftrace_ops * |
1981 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); | |
4fd3279b | 1982 | |
02a392a0 | 1983 | enum ftrace_bug_type ftrace_bug_type; |
b05086c7 | 1984 | const void *ftrace_expected; |
02a392a0 SRRH |
1985 | |
1986 | static void print_bug_type(void) | |
1987 | { | |
1988 | switch (ftrace_bug_type) { | |
1989 | case FTRACE_BUG_UNKNOWN: | |
1990 | break; | |
1991 | case FTRACE_BUG_INIT: | |
1992 | pr_info("Initializing ftrace call sites\n"); | |
1993 | break; | |
1994 | case FTRACE_BUG_NOP: | |
1995 | pr_info("Setting ftrace call site to NOP\n"); | |
1996 | break; | |
1997 | case FTRACE_BUG_CALL: | |
1998 | pr_info("Setting ftrace call site to call ftrace function\n"); | |
1999 | break; | |
2000 | case FTRACE_BUG_UPDATE: | |
2001 | pr_info("Updating ftrace call site to call a different ftrace function\n"); | |
2002 | break; | |
2003 | } | |
2004 | } | |
2005 | ||
c88fd863 SR |
2006 | /** |
2007 | * ftrace_bug - report and shutdown function tracer | |
2008 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | |
4fd3279b | 2009 | * @rec: The record that failed |
c88fd863 SR |
2010 | * |
2011 | * The arch code that enables or disables the function tracing | |
2012 | * can call ftrace_bug() when it has detected a problem in | |
2013 | * modifying the code. @failed should be one of either: | |
2014 | * EFAULT - if the problem happens on reading the @ip address | |
2015 | * EINVAL - if what is read at @ip is not what was expected | |
2016 | * EPERM - if the problem happens on writting to the @ip address | |
2017 | */ | |
4fd3279b | 2018 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
b17e8a37 | 2019 | { |
4fd3279b SRRH |
2020 | unsigned long ip = rec ? rec->ip : 0; |
2021 | ||
b17e8a37 SR |
2022 | switch (failed) { |
2023 | case -EFAULT: | |
2024 | FTRACE_WARN_ON_ONCE(1); | |
2025 | pr_info("ftrace faulted on modifying "); | |
2026 | print_ip_sym(ip); | |
2027 | break; | |
2028 | case -EINVAL: | |
2029 | FTRACE_WARN_ON_ONCE(1); | |
2030 | pr_info("ftrace failed to modify "); | |
2031 | print_ip_sym(ip); | |
b05086c7 | 2032 | print_ip_ins(" actual: ", (unsigned char *)ip); |
4fd3279b | 2033 | pr_cont("\n"); |
b05086c7 SRRH |
2034 | if (ftrace_expected) { |
2035 | print_ip_ins(" expected: ", ftrace_expected); | |
2036 | pr_cont("\n"); | |
2037 | } | |
b17e8a37 SR |
2038 | break; |
2039 | case -EPERM: | |
2040 | FTRACE_WARN_ON_ONCE(1); | |
2041 | pr_info("ftrace faulted on writing "); | |
2042 | print_ip_sym(ip); | |
2043 | break; | |
2044 | default: | |
2045 | FTRACE_WARN_ON_ONCE(1); | |
2046 | pr_info("ftrace faulted on unknown error "); | |
2047 | print_ip_sym(ip); | |
2048 | } | |
02a392a0 | 2049 | print_bug_type(); |
4fd3279b SRRH |
2050 | if (rec) { |
2051 | struct ftrace_ops *ops = NULL; | |
2052 | ||
2053 | pr_info("ftrace record flags: %lx\n", rec->flags); | |
2054 | pr_cont(" (%ld)%s", ftrace_rec_count(rec), | |
2055 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | |
2056 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | |
2057 | ops = ftrace_find_tramp_ops_any(rec); | |
39daa7b9 SRRH |
2058 | if (ops) { |
2059 | do { | |
2060 | pr_cont("\ttramp: %pS (%pS)", | |
2061 | (void *)ops->trampoline, | |
2062 | (void *)ops->func); | |
2063 | ops = ftrace_find_tramp_ops_next(rec, ops); | |
2064 | } while (ops); | |
2065 | } else | |
4fd3279b SRRH |
2066 | pr_cont("\ttramp: ERROR!"); |
2067 | ||
2068 | } | |
2069 | ip = ftrace_get_addr_curr(rec); | |
39daa7b9 | 2070 | pr_cont("\n expected tramp: %lx\n", ip); |
4fd3279b | 2071 | } |
b17e8a37 SR |
2072 | } |
2073 | ||
c88fd863 | 2074 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
5072c59f | 2075 | { |
64fbcd16 | 2076 | unsigned long flag = 0UL; |
e7d3737e | 2077 | |
02a392a0 SRRH |
2078 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2079 | ||
b7ffffbb SRRH |
2080 | if (rec->flags & FTRACE_FL_DISABLED) |
2081 | return FTRACE_UPDATE_IGNORE; | |
2082 | ||
982c350b | 2083 | /* |
30fb6aa7 | 2084 | * If we are updating calls: |
982c350b | 2085 | * |
ed926f9b SR |
2086 | * If the record has a ref count, then we need to enable it |
2087 | * because someone is using it. | |
982c350b | 2088 | * |
ed926f9b SR |
2089 | * Otherwise we make sure its disabled. |
2090 | * | |
30fb6aa7 | 2091 | * If we are disabling calls, then disable all records that |
ed926f9b | 2092 | * are enabled. |
982c350b | 2093 | */ |
0376bde1 | 2094 | if (enable && ftrace_rec_count(rec)) |
ed926f9b | 2095 | flag = FTRACE_FL_ENABLED; |
982c350b | 2096 | |
08f6fba5 | 2097 | /* |
79922b80 SRRH |
2098 | * If enabling and the REGS flag does not match the REGS_EN, or |
2099 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore | |
2100 | * this record. Set flags to fail the compare against ENABLED. | |
08f6fba5 | 2101 | */ |
79922b80 SRRH |
2102 | if (flag) { |
2103 | if (!(rec->flags & FTRACE_FL_REGS) != | |
2104 | !(rec->flags & FTRACE_FL_REGS_EN)) | |
2105 | flag |= FTRACE_FL_REGS; | |
2106 | ||
2107 | if (!(rec->flags & FTRACE_FL_TRAMP) != | |
2108 | !(rec->flags & FTRACE_FL_TRAMP_EN)) | |
2109 | flag |= FTRACE_FL_TRAMP; | |
2110 | } | |
08f6fba5 | 2111 | |
64fbcd16 XG |
2112 | /* If the state of this record hasn't changed, then do nothing */ |
2113 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) | |
c88fd863 | 2114 | return FTRACE_UPDATE_IGNORE; |
982c350b | 2115 | |
64fbcd16 | 2116 | if (flag) { |
08f6fba5 SR |
2117 | /* Save off if rec is being enabled (for return value) */ |
2118 | flag ^= rec->flags & FTRACE_FL_ENABLED; | |
2119 | ||
2120 | if (update) { | |
c88fd863 | 2121 | rec->flags |= FTRACE_FL_ENABLED; |
08f6fba5 SR |
2122 | if (flag & FTRACE_FL_REGS) { |
2123 | if (rec->flags & FTRACE_FL_REGS) | |
2124 | rec->flags |= FTRACE_FL_REGS_EN; | |
2125 | else | |
2126 | rec->flags &= ~FTRACE_FL_REGS_EN; | |
2127 | } | |
79922b80 SRRH |
2128 | if (flag & FTRACE_FL_TRAMP) { |
2129 | if (rec->flags & FTRACE_FL_TRAMP) | |
2130 | rec->flags |= FTRACE_FL_TRAMP_EN; | |
2131 | else | |
2132 | rec->flags &= ~FTRACE_FL_TRAMP_EN; | |
2133 | } | |
08f6fba5 SR |
2134 | } |
2135 | ||
2136 | /* | |
2137 | * If this record is being updated from a nop, then | |
2138 | * return UPDATE_MAKE_CALL. | |
08f6fba5 SR |
2139 | * Otherwise, |
2140 | * return UPDATE_MODIFY_CALL to tell the caller to convert | |
f1b2f2bd | 2141 | * from the save regs, to a non-save regs function or |
79922b80 | 2142 | * vice versa, or from a trampoline call. |
08f6fba5 | 2143 | */ |
02a392a0 SRRH |
2144 | if (flag & FTRACE_FL_ENABLED) { |
2145 | ftrace_bug_type = FTRACE_BUG_CALL; | |
08f6fba5 | 2146 | return FTRACE_UPDATE_MAKE_CALL; |
02a392a0 | 2147 | } |
f1b2f2bd | 2148 | |
02a392a0 | 2149 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
f1b2f2bd | 2150 | return FTRACE_UPDATE_MODIFY_CALL; |
c88fd863 SR |
2151 | } |
2152 | ||
08f6fba5 SR |
2153 | if (update) { |
2154 | /* If there's no more users, clear all flags */ | |
0376bde1 | 2155 | if (!ftrace_rec_count(rec)) |
08f6fba5 SR |
2156 | rec->flags = 0; |
2157 | else | |
b24d443b SRRH |
2158 | /* |
2159 | * Just disable the record, but keep the ops TRAMP | |
2160 | * and REGS states. The _EN flags must be disabled though. | |
2161 | */ | |
2162 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | |
2163 | FTRACE_FL_REGS_EN); | |
08f6fba5 | 2164 | } |
c88fd863 | 2165 | |
02a392a0 | 2166 | ftrace_bug_type = FTRACE_BUG_NOP; |
c88fd863 SR |
2167 | return FTRACE_UPDATE_MAKE_NOP; |
2168 | } | |
2169 | ||
2170 | /** | |
2171 | * ftrace_update_record, set a record that now is tracing or not | |
2172 | * @rec: the record to update | |
2173 | * @enable: set to 1 if the record is tracing, zero to force disable | |
2174 | * | |
2175 | * The records that represent all functions that can be traced need | |
2176 | * to be updated when tracing has been enabled. | |
2177 | */ | |
2178 | int ftrace_update_record(struct dyn_ftrace *rec, int enable) | |
2179 | { | |
2180 | return ftrace_check_record(rec, enable, 1); | |
2181 | } | |
2182 | ||
2183 | /** | |
2184 | * ftrace_test_record, check if the record has been enabled or not | |
2185 | * @rec: the record to test | |
2186 | * @enable: set to 1 to check if enabled, 0 if it is disabled | |
2187 | * | |
2188 | * The arch code may need to test if a record is already set to | |
2189 | * tracing to determine how to modify the function code that it | |
2190 | * represents. | |
2191 | */ | |
2192 | int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |
2193 | { | |
2194 | return ftrace_check_record(rec, enable, 0); | |
2195 | } | |
2196 | ||
5fecaa04 SRRH |
2197 | static struct ftrace_ops * |
2198 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) | |
2199 | { | |
2200 | struct ftrace_ops *op; | |
fef5aeee | 2201 | unsigned long ip = rec->ip; |
5fecaa04 SRRH |
2202 | |
2203 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2204 | ||
2205 | if (!op->trampoline) | |
2206 | continue; | |
2207 | ||
fef5aeee | 2208 | if (hash_contains_ip(ip, op->func_hash)) |
5fecaa04 SRRH |
2209 | return op; |
2210 | } while_for_each_ftrace_op(op); | |
2211 | ||
2212 | return NULL; | |
2213 | } | |
2214 | ||
39daa7b9 SRRH |
2215 | static struct ftrace_ops * |
2216 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, | |
2217 | struct ftrace_ops *op) | |
2218 | { | |
2219 | unsigned long ip = rec->ip; | |
2220 | ||
2221 | while_for_each_ftrace_op(op) { | |
2222 | ||
2223 | if (!op->trampoline) | |
2224 | continue; | |
2225 | ||
2226 | if (hash_contains_ip(ip, op->func_hash)) | |
2227 | return op; | |
2228 | } | |
2229 | ||
2230 | return NULL; | |
2231 | } | |
2232 | ||
79922b80 SRRH |
2233 | static struct ftrace_ops * |
2234 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | |
2235 | { | |
2236 | struct ftrace_ops *op; | |
fef5aeee | 2237 | unsigned long ip = rec->ip; |
79922b80 | 2238 | |
fef5aeee SRRH |
2239 | /* |
2240 | * Need to check removed ops first. | |
2241 | * If they are being removed, and this rec has a tramp, | |
2242 | * and this rec is in the ops list, then it would be the | |
2243 | * one with the tramp. | |
2244 | */ | |
2245 | if (removed_ops) { | |
2246 | if (hash_contains_ip(ip, &removed_ops->old_hash)) | |
79922b80 SRRH |
2247 | return removed_ops; |
2248 | } | |
2249 | ||
fef5aeee SRRH |
2250 | /* |
2251 | * Need to find the current trampoline for a rec. | |
2252 | * Now, a trampoline is only attached to a rec if there | |
2253 | * was a single 'ops' attached to it. But this can be called | |
2254 | * when we are adding another op to the rec or removing the | |
2255 | * current one. Thus, if the op is being added, we can | |
2256 | * ignore it because it hasn't attached itself to the rec | |
4fc40904 SRRH |
2257 | * yet. |
2258 | * | |
2259 | * If an ops is being modified (hooking to different functions) | |
2260 | * then we don't care about the new functions that are being | |
2261 | * added, just the old ones (that are probably being removed). | |
2262 | * | |
2263 | * If we are adding an ops to a function that already is using | |
2264 | * a trampoline, it needs to be removed (trampolines are only | |
2265 | * for single ops connected), then an ops that is not being | |
2266 | * modified also needs to be checked. | |
fef5aeee | 2267 | */ |
79922b80 | 2268 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
fef5aeee SRRH |
2269 | |
2270 | if (!op->trampoline) | |
2271 | continue; | |
2272 | ||
2273 | /* | |
2274 | * If the ops is being added, it hasn't gotten to | |
2275 | * the point to be removed from this tree yet. | |
2276 | */ | |
2277 | if (op->flags & FTRACE_OPS_FL_ADDING) | |
79922b80 SRRH |
2278 | continue; |
2279 | ||
4fc40904 | 2280 | |
fef5aeee | 2281 | /* |
4fc40904 SRRH |
2282 | * If the ops is being modified and is in the old |
2283 | * hash, then it is probably being removed from this | |
2284 | * function. | |
fef5aeee | 2285 | */ |
fef5aeee SRRH |
2286 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && |
2287 | hash_contains_ip(ip, &op->old_hash)) | |
79922b80 | 2288 | return op; |
4fc40904 SRRH |
2289 | /* |
2290 | * If the ops is not being added or modified, and it's | |
2291 | * in its normal filter hash, then this must be the one | |
2292 | * we want! | |
2293 | */ | |
2294 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && | |
2295 | hash_contains_ip(ip, op->func_hash)) | |
2296 | return op; | |
79922b80 SRRH |
2297 | |
2298 | } while_for_each_ftrace_op(op); | |
2299 | ||
2300 | return NULL; | |
2301 | } | |
2302 | ||
2303 | static struct ftrace_ops * | |
2304 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) | |
2305 | { | |
2306 | struct ftrace_ops *op; | |
fef5aeee | 2307 | unsigned long ip = rec->ip; |
79922b80 SRRH |
2308 | |
2309 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
2310 | /* pass rec in as regs to have non-NULL val */ | |
fef5aeee | 2311 | if (hash_contains_ip(ip, op->func_hash)) |
79922b80 SRRH |
2312 | return op; |
2313 | } while_for_each_ftrace_op(op); | |
2314 | ||
2315 | return NULL; | |
2316 | } | |
2317 | ||
7413af1f SRRH |
2318 | /** |
2319 | * ftrace_get_addr_new - Get the call address to set to | |
2320 | * @rec: The ftrace record descriptor | |
2321 | * | |
2322 | * If the record has the FTRACE_FL_REGS set, that means that it | |
2323 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS | |
2324 | * is not not set, then it wants to convert to the normal callback. | |
2325 | * | |
2326 | * Returns the address of the trampoline to set to | |
2327 | */ | |
2328 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) | |
2329 | { | |
79922b80 SRRH |
2330 | struct ftrace_ops *ops; |
2331 | ||
2332 | /* Trampolines take precedence over regs */ | |
2333 | if (rec->flags & FTRACE_FL_TRAMP) { | |
2334 | ops = ftrace_find_tramp_ops_new(rec); | |
2335 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { | |
bce0b6c5 SRRH |
2336 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", |
2337 | (void *)rec->ip, (void *)rec->ip, rec->flags); | |
79922b80 SRRH |
2338 | /* Ftrace is shutting down, return anything */ |
2339 | return (unsigned long)FTRACE_ADDR; | |
2340 | } | |
2341 | return ops->trampoline; | |
2342 | } | |
2343 | ||
7413af1f SRRH |
2344 | if (rec->flags & FTRACE_FL_REGS) |
2345 | return (unsigned long)FTRACE_REGS_ADDR; | |
2346 | else | |
2347 | return (unsigned long)FTRACE_ADDR; | |
2348 | } | |
2349 | ||
2350 | /** | |
2351 | * ftrace_get_addr_curr - Get the call address that is already there | |
2352 | * @rec: The ftrace record descriptor | |
2353 | * | |
2354 | * The FTRACE_FL_REGS_EN is set when the record already points to | |
2355 | * a function that saves all the regs. Basically the '_EN' version | |
2356 | * represents the current state of the function. | |
2357 | * | |
2358 | * Returns the address of the trampoline that is currently being called | |
2359 | */ | |
2360 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) | |
2361 | { | |
79922b80 SRRH |
2362 | struct ftrace_ops *ops; |
2363 | ||
2364 | /* Trampolines take precedence over regs */ | |
2365 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | |
2366 | ops = ftrace_find_tramp_ops_curr(rec); | |
2367 | if (FTRACE_WARN_ON(!ops)) { | |
a395d6a7 JP |
2368 | pr_warn("Bad trampoline accounting at: %p (%pS)\n", |
2369 | (void *)rec->ip, (void *)rec->ip); | |
79922b80 SRRH |
2370 | /* Ftrace is shutting down, return anything */ |
2371 | return (unsigned long)FTRACE_ADDR; | |
2372 | } | |
2373 | return ops->trampoline; | |
2374 | } | |
2375 | ||
7413af1f SRRH |
2376 | if (rec->flags & FTRACE_FL_REGS_EN) |
2377 | return (unsigned long)FTRACE_REGS_ADDR; | |
2378 | else | |
2379 | return (unsigned long)FTRACE_ADDR; | |
2380 | } | |
2381 | ||
c88fd863 SR |
2382 | static int |
2383 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |
2384 | { | |
08f6fba5 | 2385 | unsigned long ftrace_old_addr; |
c88fd863 SR |
2386 | unsigned long ftrace_addr; |
2387 | int ret; | |
2388 | ||
7c0868e0 | 2389 | ftrace_addr = ftrace_get_addr_new(rec); |
c88fd863 | 2390 | |
7c0868e0 SRRH |
2391 | /* This needs to be done before we call ftrace_update_record */ |
2392 | ftrace_old_addr = ftrace_get_addr_curr(rec); | |
2393 | ||
2394 | ret = ftrace_update_record(rec, enable); | |
08f6fba5 | 2395 | |
02a392a0 SRRH |
2396 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2397 | ||
c88fd863 SR |
2398 | switch (ret) { |
2399 | case FTRACE_UPDATE_IGNORE: | |
2400 | return 0; | |
2401 | ||
2402 | case FTRACE_UPDATE_MAKE_CALL: | |
02a392a0 | 2403 | ftrace_bug_type = FTRACE_BUG_CALL; |
64fbcd16 | 2404 | return ftrace_make_call(rec, ftrace_addr); |
c88fd863 SR |
2405 | |
2406 | case FTRACE_UPDATE_MAKE_NOP: | |
02a392a0 | 2407 | ftrace_bug_type = FTRACE_BUG_NOP; |
39b5552c | 2408 | return ftrace_make_nop(NULL, rec, ftrace_old_addr); |
08f6fba5 | 2409 | |
08f6fba5 | 2410 | case FTRACE_UPDATE_MODIFY_CALL: |
02a392a0 | 2411 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
08f6fba5 | 2412 | return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); |
5072c59f SR |
2413 | } |
2414 | ||
c88fd863 | 2415 | return -1; /* unknow ftrace bug */ |
5072c59f SR |
2416 | } |
2417 | ||
e4f5d544 | 2418 | void __weak ftrace_replace_code(int enable) |
3c1720f0 | 2419 | { |
3c1720f0 SR |
2420 | struct dyn_ftrace *rec; |
2421 | struct ftrace_page *pg; | |
6a24a244 | 2422 | int failed; |
3c1720f0 | 2423 | |
45a4a237 SR |
2424 | if (unlikely(ftrace_disabled)) |
2425 | return; | |
2426 | ||
265c831c | 2427 | do_for_each_ftrace_rec(pg, rec) { |
546fece4 SRRH |
2428 | |
2429 | if (rec->flags & FTRACE_FL_DISABLED) | |
2430 | continue; | |
2431 | ||
e4f5d544 | 2432 | failed = __ftrace_replace_code(rec, enable); |
fa9d13cf | 2433 | if (failed) { |
4fd3279b | 2434 | ftrace_bug(failed, rec); |
3279ba37 SR |
2435 | /* Stop processing */ |
2436 | return; | |
3c1720f0 | 2437 | } |
265c831c | 2438 | } while_for_each_ftrace_rec(); |
3c1720f0 SR |
2439 | } |
2440 | ||
c88fd863 SR |
2441 | struct ftrace_rec_iter { |
2442 | struct ftrace_page *pg; | |
2443 | int index; | |
2444 | }; | |
2445 | ||
2446 | /** | |
2447 | * ftrace_rec_iter_start, start up iterating over traced functions | |
2448 | * | |
2449 | * Returns an iterator handle that is used to iterate over all | |
2450 | * the records that represent address locations where functions | |
2451 | * are traced. | |
2452 | * | |
2453 | * May return NULL if no records are available. | |
2454 | */ | |
2455 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) | |
2456 | { | |
2457 | /* | |
2458 | * We only use a single iterator. | |
2459 | * Protected by the ftrace_lock mutex. | |
2460 | */ | |
2461 | static struct ftrace_rec_iter ftrace_rec_iter; | |
2462 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; | |
2463 | ||
2464 | iter->pg = ftrace_pages_start; | |
2465 | iter->index = 0; | |
2466 | ||
2467 | /* Could have empty pages */ | |
2468 | while (iter->pg && !iter->pg->index) | |
2469 | iter->pg = iter->pg->next; | |
2470 | ||
2471 | if (!iter->pg) | |
2472 | return NULL; | |
2473 | ||
2474 | return iter; | |
2475 | } | |
2476 | ||
2477 | /** | |
2478 | * ftrace_rec_iter_next, get the next record to process. | |
2479 | * @iter: The handle to the iterator. | |
2480 | * | |
2481 | * Returns the next iterator after the given iterator @iter. | |
2482 | */ | |
2483 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) | |
2484 | { | |
2485 | iter->index++; | |
2486 | ||
2487 | if (iter->index >= iter->pg->index) { | |
2488 | iter->pg = iter->pg->next; | |
2489 | iter->index = 0; | |
2490 | ||
2491 | /* Could have empty pages */ | |
2492 | while (iter->pg && !iter->pg->index) | |
2493 | iter->pg = iter->pg->next; | |
2494 | } | |
2495 | ||
2496 | if (!iter->pg) | |
2497 | return NULL; | |
2498 | ||
2499 | return iter; | |
2500 | } | |
2501 | ||
2502 | /** | |
2503 | * ftrace_rec_iter_record, get the record at the iterator location | |
2504 | * @iter: The current iterator location | |
2505 | * | |
2506 | * Returns the record that the current @iter is at. | |
2507 | */ | |
2508 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | |
2509 | { | |
2510 | return &iter->pg->records[iter->index]; | |
2511 | } | |
2512 | ||
492a7ea5 | 2513 | static int |
31e88909 | 2514 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
3c1720f0 | 2515 | { |
593eb8a2 | 2516 | int ret; |
3c1720f0 | 2517 | |
45a4a237 SR |
2518 | if (unlikely(ftrace_disabled)) |
2519 | return 0; | |
2520 | ||
25aac9dc | 2521 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
593eb8a2 | 2522 | if (ret) { |
02a392a0 | 2523 | ftrace_bug_type = FTRACE_BUG_INIT; |
4fd3279b | 2524 | ftrace_bug(ret, rec); |
492a7ea5 | 2525 | return 0; |
37ad5084 | 2526 | } |
492a7ea5 | 2527 | return 1; |
3c1720f0 SR |
2528 | } |
2529 | ||
000ab691 SR |
2530 | /* |
2531 | * archs can override this function if they must do something | |
2532 | * before the modifying code is performed. | |
2533 | */ | |
2534 | int __weak ftrace_arch_code_modify_prepare(void) | |
2535 | { | |
2536 | return 0; | |
2537 | } | |
2538 | ||
2539 | /* | |
2540 | * archs can override this function if they must do something | |
2541 | * after the modifying code is performed. | |
2542 | */ | |
2543 | int __weak ftrace_arch_code_modify_post_process(void) | |
2544 | { | |
2545 | return 0; | |
2546 | } | |
2547 | ||
8ed3e2cf | 2548 | void ftrace_modify_all_code(int command) |
3d083395 | 2549 | { |
59338f75 | 2550 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
cd21067f | 2551 | int err = 0; |
59338f75 SRRH |
2552 | |
2553 | /* | |
2554 | * If the ftrace_caller calls a ftrace_ops func directly, | |
2555 | * we need to make sure that it only traces functions it | |
2556 | * expects to trace. When doing the switch of functions, | |
2557 | * we need to update to the ftrace_ops_list_func first | |
2558 | * before the transition between old and new calls are set, | |
2559 | * as the ftrace_ops_list_func will check the ops hashes | |
2560 | * to make sure the ops are having the right functions | |
2561 | * traced. | |
2562 | */ | |
cd21067f PM |
2563 | if (update) { |
2564 | err = ftrace_update_ftrace_func(ftrace_ops_list_func); | |
2565 | if (FTRACE_WARN_ON(err)) | |
2566 | return; | |
2567 | } | |
59338f75 | 2568 | |
8ed3e2cf | 2569 | if (command & FTRACE_UPDATE_CALLS) |
d61f82d0 | 2570 | ftrace_replace_code(1); |
8ed3e2cf | 2571 | else if (command & FTRACE_DISABLE_CALLS) |
d61f82d0 SR |
2572 | ftrace_replace_code(0); |
2573 | ||
405e1d83 SRRH |
2574 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2575 | function_trace_op = set_function_trace_op; | |
2576 | smp_wmb(); | |
2577 | /* If irqs are disabled, we are in stop machine */ | |
2578 | if (!irqs_disabled()) | |
2579 | smp_call_function(ftrace_sync_ipi, NULL, 1); | |
cd21067f PM |
2580 | err = ftrace_update_ftrace_func(ftrace_trace_function); |
2581 | if (FTRACE_WARN_ON(err)) | |
2582 | return; | |
405e1d83 | 2583 | } |
d61f82d0 | 2584 | |
8ed3e2cf | 2585 | if (command & FTRACE_START_FUNC_RET) |
cd21067f | 2586 | err = ftrace_enable_ftrace_graph_caller(); |
8ed3e2cf | 2587 | else if (command & FTRACE_STOP_FUNC_RET) |
cd21067f PM |
2588 | err = ftrace_disable_ftrace_graph_caller(); |
2589 | FTRACE_WARN_ON(err); | |
8ed3e2cf SR |
2590 | } |
2591 | ||
2592 | static int __ftrace_modify_code(void *data) | |
2593 | { | |
2594 | int *command = data; | |
2595 | ||
2596 | ftrace_modify_all_code(*command); | |
5a45cfe1 | 2597 | |
d61f82d0 | 2598 | return 0; |
3d083395 SR |
2599 | } |
2600 | ||
c88fd863 SR |
2601 | /** |
2602 | * ftrace_run_stop_machine, go back to the stop machine method | |
2603 | * @command: The command to tell ftrace what to do | |
2604 | * | |
2605 | * If an arch needs to fall back to the stop machine method, the | |
2606 | * it can call this function. | |
2607 | */ | |
2608 | void ftrace_run_stop_machine(int command) | |
2609 | { | |
2610 | stop_machine(__ftrace_modify_code, &command, NULL); | |
2611 | } | |
2612 | ||
2613 | /** | |
2614 | * arch_ftrace_update_code, modify the code to trace or not trace | |
2615 | * @command: The command that needs to be done | |
2616 | * | |
2617 | * Archs can override this function if it does not need to | |
2618 | * run stop_machine() to modify code. | |
2619 | */ | |
2620 | void __weak arch_ftrace_update_code(int command) | |
2621 | { | |
2622 | ftrace_run_stop_machine(command); | |
2623 | } | |
2624 | ||
e309b41d | 2625 | static void ftrace_run_update_code(int command) |
3d083395 | 2626 | { |
000ab691 SR |
2627 | int ret; |
2628 | ||
2629 | ret = ftrace_arch_code_modify_prepare(); | |
2630 | FTRACE_WARN_ON(ret); | |
2631 | if (ret) | |
2632 | return; | |
2633 | ||
c88fd863 SR |
2634 | /* |
2635 | * By default we use stop_machine() to modify the code. | |
2636 | * But archs can do what ever they want as long as it | |
2637 | * is safe. The stop_machine() is the safest, but also | |
2638 | * produces the most overhead. | |
2639 | */ | |
2640 | arch_ftrace_update_code(command); | |
2641 | ||
000ab691 SR |
2642 | ret = ftrace_arch_code_modify_post_process(); |
2643 | FTRACE_WARN_ON(ret); | |
3d083395 SR |
2644 | } |
2645 | ||
8252ecf3 | 2646 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
7485058e | 2647 | struct ftrace_ops_hash *old_hash) |
e1effa01 SRRH |
2648 | { |
2649 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | |
7485058e SRRH |
2650 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2651 | ops->old_hash.notrace_hash = old_hash->notrace_hash; | |
e1effa01 | 2652 | ftrace_run_update_code(command); |
8252ecf3 | 2653 | ops->old_hash.filter_hash = NULL; |
7485058e | 2654 | ops->old_hash.notrace_hash = NULL; |
e1effa01 SRRH |
2655 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2656 | } | |
2657 | ||
d61f82d0 | 2658 | static ftrace_func_t saved_ftrace_func; |
60a7ecf4 | 2659 | static int ftrace_start_up; |
df4fc315 | 2660 | |
12cce594 SRRH |
2661 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) |
2662 | { | |
2663 | } | |
2664 | ||
df4fc315 SR |
2665 | static void ftrace_startup_enable(int command) |
2666 | { | |
2667 | if (saved_ftrace_func != ftrace_trace_function) { | |
2668 | saved_ftrace_func = ftrace_trace_function; | |
2669 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
2670 | } | |
2671 | ||
2672 | if (!command || !ftrace_enabled) | |
2673 | return; | |
2674 | ||
2675 | ftrace_run_update_code(command); | |
2676 | } | |
d61f82d0 | 2677 | |
e1effa01 SRRH |
2678 | static void ftrace_startup_all(int command) |
2679 | { | |
2680 | update_all_ops = true; | |
2681 | ftrace_startup_enable(command); | |
2682 | update_all_ops = false; | |
2683 | } | |
2684 | ||
a1cd6173 | 2685 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
3d083395 | 2686 | { |
8a56d776 | 2687 | int ret; |
b848914c | 2688 | |
4eebcc81 | 2689 | if (unlikely(ftrace_disabled)) |
a1cd6173 | 2690 | return -ENODEV; |
4eebcc81 | 2691 | |
8a56d776 SRRH |
2692 | ret = __register_ftrace_function(ops); |
2693 | if (ret) | |
2694 | return ret; | |
2695 | ||
60a7ecf4 | 2696 | ftrace_start_up++; |
d61f82d0 | 2697 | |
e1effa01 SRRH |
2698 | /* |
2699 | * Note that ftrace probes uses this to start up | |
2700 | * and modify functions it will probe. But we still | |
2701 | * set the ADDING flag for modification, as probes | |
2702 | * do not have trampolines. If they add them in the | |
2703 | * future, then the probes will need to distinguish | |
2704 | * between adding and updating probes. | |
2705 | */ | |
2706 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; | |
66209a5b | 2707 | |
f8b8be8a MH |
2708 | ret = ftrace_hash_ipmodify_enable(ops); |
2709 | if (ret < 0) { | |
2710 | /* Rollback registration process */ | |
2711 | __unregister_ftrace_function(ops); | |
2712 | ftrace_start_up--; | |
2713 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | |
2714 | return ret; | |
2715 | } | |
2716 | ||
7f50d06b JO |
2717 | if (ftrace_hash_rec_enable(ops, 1)) |
2718 | command |= FTRACE_UPDATE_CALLS; | |
ed926f9b | 2719 | |
df4fc315 | 2720 | ftrace_startup_enable(command); |
a1cd6173 | 2721 | |
e1effa01 SRRH |
2722 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
2723 | ||
a1cd6173 | 2724 | return 0; |
3d083395 SR |
2725 | } |
2726 | ||
8a56d776 | 2727 | static int ftrace_shutdown(struct ftrace_ops *ops, int command) |
3d083395 | 2728 | { |
8a56d776 | 2729 | int ret; |
b848914c | 2730 | |
4eebcc81 | 2731 | if (unlikely(ftrace_disabled)) |
8a56d776 SRRH |
2732 | return -ENODEV; |
2733 | ||
2734 | ret = __unregister_ftrace_function(ops); | |
2735 | if (ret) | |
2736 | return ret; | |
4eebcc81 | 2737 | |
60a7ecf4 | 2738 | ftrace_start_up--; |
9ea1a153 FW |
2739 | /* |
2740 | * Just warn in case of unbalance, no need to kill ftrace, it's not | |
2741 | * critical but the ftrace_call callers may be never nopped again after | |
2742 | * further ftrace uses. | |
2743 | */ | |
2744 | WARN_ON_ONCE(ftrace_start_up < 0); | |
2745 | ||
f8b8be8a MH |
2746 | /* Disabling ipmodify never fails */ |
2747 | ftrace_hash_ipmodify_disable(ops); | |
ed926f9b | 2748 | |
7f50d06b JO |
2749 | if (ftrace_hash_rec_disable(ops, 1)) |
2750 | command |= FTRACE_UPDATE_CALLS; | |
b848914c | 2751 | |
7f50d06b | 2752 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3d083395 | 2753 | |
d61f82d0 SR |
2754 | if (saved_ftrace_func != ftrace_trace_function) { |
2755 | saved_ftrace_func = ftrace_trace_function; | |
2756 | command |= FTRACE_UPDATE_TRACE_FUNC; | |
2757 | } | |
3d083395 | 2758 | |
a4c35ed2 SRRH |
2759 | if (!command || !ftrace_enabled) { |
2760 | /* | |
edb096e0 SRV |
2761 | * If these are dynamic or per_cpu ops, they still |
2762 | * need their data freed. Since, function tracing is | |
a4c35ed2 SRRH |
2763 | * not currently active, we can just free them |
2764 | * without synchronizing all CPUs. | |
2765 | */ | |
b3a88803 | 2766 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
edb096e0 SRV |
2767 | goto free_ops; |
2768 | ||
8a56d776 | 2769 | return 0; |
a4c35ed2 | 2770 | } |
d61f82d0 | 2771 | |
79922b80 SRRH |
2772 | /* |
2773 | * If the ops uses a trampoline, then it needs to be | |
2774 | * tested first on update. | |
2775 | */ | |
e1effa01 | 2776 | ops->flags |= FTRACE_OPS_FL_REMOVING; |
79922b80 SRRH |
2777 | removed_ops = ops; |
2778 | ||
fef5aeee SRRH |
2779 | /* The trampoline logic checks the old hashes */ |
2780 | ops->old_hash.filter_hash = ops->func_hash->filter_hash; | |
2781 | ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; | |
2782 | ||
d61f82d0 | 2783 | ftrace_run_update_code(command); |
a4c35ed2 | 2784 | |
84bde62c SRRH |
2785 | /* |
2786 | * If there's no more ops registered with ftrace, run a | |
2787 | * sanity check to make sure all rec flags are cleared. | |
2788 | */ | |
f86f4180 CZ |
2789 | if (rcu_dereference_protected(ftrace_ops_list, |
2790 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { | |
84bde62c SRRH |
2791 | struct ftrace_page *pg; |
2792 | struct dyn_ftrace *rec; | |
2793 | ||
2794 | do_for_each_ftrace_rec(pg, rec) { | |
977c1f9c | 2795 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) |
84bde62c SRRH |
2796 | pr_warn(" %pS flags:%lx\n", |
2797 | (void *)rec->ip, rec->flags); | |
2798 | } while_for_each_ftrace_rec(); | |
2799 | } | |
2800 | ||
fef5aeee SRRH |
2801 | ops->old_hash.filter_hash = NULL; |
2802 | ops->old_hash.notrace_hash = NULL; | |
2803 | ||
2804 | removed_ops = NULL; | |
e1effa01 | 2805 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
79922b80 | 2806 | |
a4c35ed2 SRRH |
2807 | /* |
2808 | * Dynamic ops may be freed, we must make sure that all | |
2809 | * callers are done before leaving this function. | |
ba27f2bc | 2810 | * The same goes for freeing the per_cpu data of the per_cpu |
a4c35ed2 | 2811 | * ops. |
a4c35ed2 | 2812 | */ |
b3a88803 | 2813 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { |
0598e4f0 SRV |
2814 | /* |
2815 | * We need to do a hard force of sched synchronization. | |
2816 | * This is because we use preempt_disable() to do RCU, but | |
2817 | * the function tracers can be called where RCU is not watching | |
2818 | * (like before user_exit()). We can not rely on the RCU | |
2819 | * infrastructure to do the synchronization, thus we must do it | |
2820 | * ourselves. | |
2821 | */ | |
a4c35ed2 SRRH |
2822 | schedule_on_each_cpu(ftrace_sync); |
2823 | ||
0598e4f0 SRV |
2824 | /* |
2825 | * When the kernel is preeptive, tasks can be preempted | |
2826 | * while on a ftrace trampoline. Just scheduling a task on | |
2827 | * a CPU is not good enough to flush them. Calling | |
2828 | * synchornize_rcu_tasks() will wait for those tasks to | |
2829 | * execute and either schedule voluntarily or enter user space. | |
2830 | */ | |
2831 | if (IS_ENABLED(CONFIG_PREEMPT)) | |
2832 | synchronize_rcu_tasks(); | |
2833 | ||
edb096e0 | 2834 | free_ops: |
12cce594 | 2835 | arch_ftrace_trampoline_free(ops); |
a4c35ed2 SRRH |
2836 | } |
2837 | ||
8a56d776 | 2838 | return 0; |
3d083395 SR |
2839 | } |
2840 | ||
e309b41d | 2841 | static void ftrace_startup_sysctl(void) |
b0fc494f | 2842 | { |
1619dc3f PA |
2843 | int command; |
2844 | ||
4eebcc81 SR |
2845 | if (unlikely(ftrace_disabled)) |
2846 | return; | |
2847 | ||
d61f82d0 SR |
2848 | /* Force update next time */ |
2849 | saved_ftrace_func = NULL; | |
60a7ecf4 | 2850 | /* ftrace_start_up is true if we want ftrace running */ |
1619dc3f PA |
2851 | if (ftrace_start_up) { |
2852 | command = FTRACE_UPDATE_CALLS; | |
2853 | if (ftrace_graph_active) | |
2854 | command |= FTRACE_START_FUNC_RET; | |
524a3868 | 2855 | ftrace_startup_enable(command); |
1619dc3f | 2856 | } |
b0fc494f SR |
2857 | } |
2858 | ||
e309b41d | 2859 | static void ftrace_shutdown_sysctl(void) |
b0fc494f | 2860 | { |
1619dc3f PA |
2861 | int command; |
2862 | ||
4eebcc81 SR |
2863 | if (unlikely(ftrace_disabled)) |
2864 | return; | |
2865 | ||
60a7ecf4 | 2866 | /* ftrace_start_up is true if ftrace is running */ |
1619dc3f PA |
2867 | if (ftrace_start_up) { |
2868 | command = FTRACE_DISABLE_CALLS; | |
2869 | if (ftrace_graph_active) | |
2870 | command |= FTRACE_STOP_FUNC_RET; | |
2871 | ftrace_run_update_code(command); | |
2872 | } | |
b0fc494f SR |
2873 | } |
2874 | ||
a5a1d1c2 | 2875 | static u64 ftrace_update_time; |
3d083395 SR |
2876 | unsigned long ftrace_update_tot_cnt; |
2877 | ||
8c4f3c3f | 2878 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
f7bc8b61 | 2879 | { |
8c4f3c3f SRRH |
2880 | /* |
2881 | * Filter_hash being empty will default to trace module. | |
2882 | * But notrace hash requires a test of individual module functions. | |
2883 | */ | |
33b7f99c SRRH |
2884 | return ftrace_hash_empty(ops->func_hash->filter_hash) && |
2885 | ftrace_hash_empty(ops->func_hash->notrace_hash); | |
8c4f3c3f SRRH |
2886 | } |
2887 | ||
2888 | /* | |
2889 | * Check if the current ops references the record. | |
2890 | * | |
2891 | * If the ops traces all functions, then it was already accounted for. | |
2892 | * If the ops does not trace the current record function, skip it. | |
2893 | * If the ops ignores the function via notrace filter, skip it. | |
2894 | */ | |
2895 | static inline bool | |
2896 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
2897 | { | |
2898 | /* If ops isn't enabled, ignore it */ | |
2899 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | |
44ec3ec0 | 2900 | return false; |
8c4f3c3f | 2901 | |
b7ffffbb | 2902 | /* If ops traces all then it includes this function */ |
8c4f3c3f | 2903 | if (ops_traces_mod(ops)) |
44ec3ec0 | 2904 | return true; |
8c4f3c3f SRRH |
2905 | |
2906 | /* The function must be in the filter */ | |
33b7f99c | 2907 | if (!ftrace_hash_empty(ops->func_hash->filter_hash) && |
2b2c279c | 2908 | !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) |
44ec3ec0 | 2909 | return false; |
f7bc8b61 | 2910 | |
8c4f3c3f | 2911 | /* If in notrace hash, we ignore it too */ |
33b7f99c | 2912 | if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) |
44ec3ec0 | 2913 | return false; |
8c4f3c3f | 2914 | |
44ec3ec0 | 2915 | return true; |
8c4f3c3f SRRH |
2916 | } |
2917 | ||
1dc43cf0 | 2918 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
3d083395 | 2919 | { |
85ae32ae | 2920 | struct ftrace_page *pg; |
e94142a6 | 2921 | struct dyn_ftrace *p; |
a5a1d1c2 | 2922 | u64 start, stop; |
1dc43cf0 | 2923 | unsigned long update_cnt = 0; |
b7ffffbb | 2924 | unsigned long rec_flags = 0; |
85ae32ae | 2925 | int i; |
f7bc8b61 | 2926 | |
b7ffffbb SRRH |
2927 | start = ftrace_now(raw_smp_processor_id()); |
2928 | ||
f7bc8b61 | 2929 | /* |
b7ffffbb SRRH |
2930 | * When a module is loaded, this function is called to convert |
2931 | * the calls to mcount in its text to nops, and also to create | |
2932 | * an entry in the ftrace data. Now, if ftrace is activated | |
2933 | * after this call, but before the module sets its text to | |
2934 | * read-only, the modification of enabling ftrace can fail if | |
2935 | * the read-only is done while ftrace is converting the calls. | |
2936 | * To prevent this, the module's records are set as disabled | |
2937 | * and will be enabled after the call to set the module's text | |
2938 | * to read-only. | |
f7bc8b61 | 2939 | */ |
b7ffffbb SRRH |
2940 | if (mod) |
2941 | rec_flags |= FTRACE_FL_DISABLED; | |
3d083395 | 2942 | |
1dc43cf0 | 2943 | for (pg = new_pgs; pg; pg = pg->next) { |
3d083395 | 2944 | |
85ae32ae | 2945 | for (i = 0; i < pg->index; i++) { |
8c4f3c3f | 2946 | |
85ae32ae SR |
2947 | /* If something went wrong, bail without enabling anything */ |
2948 | if (unlikely(ftrace_disabled)) | |
2949 | return -1; | |
f22f9a89 | 2950 | |
85ae32ae | 2951 | p = &pg->records[i]; |
b7ffffbb | 2952 | p->flags = rec_flags; |
f22f9a89 | 2953 | |
2f4df001 | 2954 | #ifndef CC_USING_NOP_MCOUNT |
85ae32ae SR |
2955 | /* |
2956 | * Do the initial record conversion from mcount jump | |
2957 | * to the NOP instructions. | |
2958 | */ | |
2959 | if (!ftrace_code_disable(mod, p)) | |
2960 | break; | |
2f4df001 | 2961 | #endif |
5cb084bb | 2962 | |
1dc43cf0 | 2963 | update_cnt++; |
5cb084bb | 2964 | } |
3d083395 SR |
2965 | } |
2966 | ||
750ed1a4 | 2967 | stop = ftrace_now(raw_smp_processor_id()); |
3d083395 | 2968 | ftrace_update_time = stop - start; |
1dc43cf0 | 2969 | ftrace_update_tot_cnt += update_cnt; |
3d083395 | 2970 | |
16444a8a ACM |
2971 | return 0; |
2972 | } | |
2973 | ||
a7900875 | 2974 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
3c1720f0 | 2975 | { |
a7900875 | 2976 | int order; |
3c1720f0 | 2977 | int cnt; |
3c1720f0 | 2978 | |
a7900875 SR |
2979 | if (WARN_ON(!count)) |
2980 | return -EINVAL; | |
2981 | ||
2982 | order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); | |
3c1720f0 SR |
2983 | |
2984 | /* | |
a7900875 SR |
2985 | * We want to fill as much as possible. No more than a page |
2986 | * may be empty. | |
3c1720f0 | 2987 | */ |
a7900875 SR |
2988 | while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) |
2989 | order--; | |
3c1720f0 | 2990 | |
a7900875 SR |
2991 | again: |
2992 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | |
3c1720f0 | 2993 | |
a7900875 SR |
2994 | if (!pg->records) { |
2995 | /* if we can't allocate this size, try something smaller */ | |
2996 | if (!order) | |
2997 | return -ENOMEM; | |
2998 | order >>= 1; | |
2999 | goto again; | |
3000 | } | |
3c1720f0 | 3001 | |
a7900875 SR |
3002 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
3003 | pg->size = cnt; | |
3c1720f0 | 3004 | |
a7900875 SR |
3005 | if (cnt > count) |
3006 | cnt = count; | |
3007 | ||
3008 | return cnt; | |
3009 | } | |
3010 | ||
3011 | static struct ftrace_page * | |
3012 | ftrace_allocate_pages(unsigned long num_to_init) | |
3013 | { | |
3014 | struct ftrace_page *start_pg; | |
3015 | struct ftrace_page *pg; | |
3016 | int order; | |
3017 | int cnt; | |
3018 | ||
3019 | if (!num_to_init) | |
3020 | return 0; | |
3021 | ||
3022 | start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); | |
3023 | if (!pg) | |
3024 | return NULL; | |
3025 | ||
3026 | /* | |
3027 | * Try to allocate as much as possible in one continues | |
3028 | * location that fills in all of the space. We want to | |
3029 | * waste as little space as possible. | |
3030 | */ | |
3031 | for (;;) { | |
3032 | cnt = ftrace_allocate_records(pg, num_to_init); | |
3033 | if (cnt < 0) | |
3034 | goto free_pages; | |
3035 | ||
3036 | num_to_init -= cnt; | |
3037 | if (!num_to_init) | |
3c1720f0 SR |
3038 | break; |
3039 | ||
a7900875 SR |
3040 | pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); |
3041 | if (!pg->next) | |
3042 | goto free_pages; | |
3043 | ||
3c1720f0 SR |
3044 | pg = pg->next; |
3045 | } | |
3046 | ||
a7900875 SR |
3047 | return start_pg; |
3048 | ||
3049 | free_pages: | |
1f61be00 NK |
3050 | pg = start_pg; |
3051 | while (pg) { | |
a7900875 SR |
3052 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); |
3053 | free_pages((unsigned long)pg->records, order); | |
3054 | start_pg = pg->next; | |
3055 | kfree(pg); | |
3056 | pg = start_pg; | |
3057 | } | |
3058 | pr_info("ftrace: FAILED to allocate memory for functions\n"); | |
3059 | return NULL; | |
3060 | } | |
3061 | ||
5072c59f SR |
3062 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
3063 | ||
3064 | struct ftrace_iterator { | |
98c4fd04 | 3065 | loff_t pos; |
4aeb6967 | 3066 | loff_t func_pos; |
5985ea8b | 3067 | loff_t mod_pos; |
4aeb6967 SR |
3068 | struct ftrace_page *pg; |
3069 | struct dyn_ftrace *func; | |
3070 | struct ftrace_func_probe *probe; | |
eee8ded1 | 3071 | struct ftrace_func_entry *probe_entry; |
4aeb6967 | 3072 | struct trace_parser parser; |
1cf41dd7 | 3073 | struct ftrace_hash *hash; |
33dc9b12 | 3074 | struct ftrace_ops *ops; |
5985ea8b SRV |
3075 | struct trace_array *tr; |
3076 | struct list_head *mod_list; | |
eee8ded1 | 3077 | int pidx; |
4aeb6967 SR |
3078 | int idx; |
3079 | unsigned flags; | |
5072c59f SR |
3080 | }; |
3081 | ||
8fc0c701 | 3082 | static void * |
eee8ded1 | 3083 | t_probe_next(struct seq_file *m, loff_t *pos) |
8fc0c701 SR |
3084 | { |
3085 | struct ftrace_iterator *iter = m->private; | |
d2afd57a | 3086 | struct trace_array *tr = iter->ops->private; |
04ec7bb6 | 3087 | struct list_head *func_probes; |
eee8ded1 SRV |
3088 | struct ftrace_hash *hash; |
3089 | struct list_head *next; | |
4aeb6967 | 3090 | struct hlist_node *hnd = NULL; |
8fc0c701 | 3091 | struct hlist_head *hhd; |
eee8ded1 | 3092 | int size; |
8fc0c701 | 3093 | |
8fc0c701 | 3094 | (*pos)++; |
98c4fd04 | 3095 | iter->pos = *pos; |
8fc0c701 | 3096 | |
04ec7bb6 | 3097 | if (!tr) |
8fc0c701 SR |
3098 | return NULL; |
3099 | ||
04ec7bb6 SRV |
3100 | func_probes = &tr->func_probes; |
3101 | if (list_empty(func_probes)) | |
8fc0c701 SR |
3102 | return NULL; |
3103 | ||
eee8ded1 | 3104 | if (!iter->probe) { |
04ec7bb6 | 3105 | next = func_probes->next; |
7b60f3d8 | 3106 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
eee8ded1 SRV |
3107 | } |
3108 | ||
3109 | if (iter->probe_entry) | |
3110 | hnd = &iter->probe_entry->hlist; | |
3111 | ||
3112 | hash = iter->probe->ops.func_hash->filter_hash; | |
3113 | size = 1 << hash->size_bits; | |
3114 | ||
3115 | retry: | |
3116 | if (iter->pidx >= size) { | |
04ec7bb6 | 3117 | if (iter->probe->list.next == func_probes) |
eee8ded1 SRV |
3118 | return NULL; |
3119 | next = iter->probe->list.next; | |
7b60f3d8 | 3120 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
eee8ded1 SRV |
3121 | hash = iter->probe->ops.func_hash->filter_hash; |
3122 | size = 1 << hash->size_bits; | |
3123 | iter->pidx = 0; | |
3124 | } | |
3125 | ||
3126 | hhd = &hash->buckets[iter->pidx]; | |
8fc0c701 SR |
3127 | |
3128 | if (hlist_empty(hhd)) { | |
eee8ded1 | 3129 | iter->pidx++; |
8fc0c701 SR |
3130 | hnd = NULL; |
3131 | goto retry; | |
3132 | } | |
3133 | ||
3134 | if (!hnd) | |
3135 | hnd = hhd->first; | |
3136 | else { | |
3137 | hnd = hnd->next; | |
3138 | if (!hnd) { | |
eee8ded1 | 3139 | iter->pidx++; |
8fc0c701 SR |
3140 | goto retry; |
3141 | } | |
3142 | } | |
3143 | ||
4aeb6967 SR |
3144 | if (WARN_ON_ONCE(!hnd)) |
3145 | return NULL; | |
3146 | ||
eee8ded1 | 3147 | iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); |
4aeb6967 SR |
3148 | |
3149 | return iter; | |
8fc0c701 SR |
3150 | } |
3151 | ||
eee8ded1 | 3152 | static void *t_probe_start(struct seq_file *m, loff_t *pos) |
8fc0c701 SR |
3153 | { |
3154 | struct ftrace_iterator *iter = m->private; | |
3155 | void *p = NULL; | |
d82d6244 LZ |
3156 | loff_t l; |
3157 | ||
eee8ded1 | 3158 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
69a3083c SR |
3159 | return NULL; |
3160 | ||
5985ea8b | 3161 | if (iter->mod_pos > *pos) |
2bccfffd | 3162 | return NULL; |
8fc0c701 | 3163 | |
eee8ded1 SRV |
3164 | iter->probe = NULL; |
3165 | iter->probe_entry = NULL; | |
3166 | iter->pidx = 0; | |
5985ea8b | 3167 | for (l = 0; l <= (*pos - iter->mod_pos); ) { |
eee8ded1 | 3168 | p = t_probe_next(m, &l); |
d82d6244 LZ |
3169 | if (!p) |
3170 | break; | |
3171 | } | |
4aeb6967 SR |
3172 | if (!p) |
3173 | return NULL; | |
3174 | ||
98c4fd04 | 3175 | /* Only set this if we have an item */ |
eee8ded1 | 3176 | iter->flags |= FTRACE_ITER_PROBE; |
98c4fd04 | 3177 | |
4aeb6967 | 3178 | return iter; |
8fc0c701 SR |
3179 | } |
3180 | ||
4aeb6967 | 3181 | static int |
eee8ded1 | 3182 | t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) |
8fc0c701 | 3183 | { |
eee8ded1 | 3184 | struct ftrace_func_entry *probe_entry; |
7b60f3d8 SRV |
3185 | struct ftrace_probe_ops *probe_ops; |
3186 | struct ftrace_func_probe *probe; | |
8fc0c701 | 3187 | |
eee8ded1 SRV |
3188 | probe = iter->probe; |
3189 | probe_entry = iter->probe_entry; | |
8fc0c701 | 3190 | |
eee8ded1 | 3191 | if (WARN_ON_ONCE(!probe || !probe_entry)) |
4aeb6967 | 3192 | return -EIO; |
8fc0c701 | 3193 | |
7b60f3d8 | 3194 | probe_ops = probe->probe_ops; |
809dcf29 | 3195 | |
7b60f3d8 | 3196 | if (probe_ops->print) |
6e444319 | 3197 | return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); |
8fc0c701 | 3198 | |
7b60f3d8 SRV |
3199 | seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, |
3200 | (void *)probe_ops->func); | |
8fc0c701 SR |
3201 | |
3202 | return 0; | |
3203 | } | |
3204 | ||
5985ea8b SRV |
3205 | static void * |
3206 | t_mod_next(struct seq_file *m, loff_t *pos) | |
3207 | { | |
3208 | struct ftrace_iterator *iter = m->private; | |
3209 | struct trace_array *tr = iter->tr; | |
3210 | ||
3211 | (*pos)++; | |
3212 | iter->pos = *pos; | |
3213 | ||
3214 | iter->mod_list = iter->mod_list->next; | |
3215 | ||
3216 | if (iter->mod_list == &tr->mod_trace || | |
3217 | iter->mod_list == &tr->mod_notrace) { | |
3218 | iter->flags &= ~FTRACE_ITER_MOD; | |
3219 | return NULL; | |
3220 | } | |
3221 | ||
3222 | iter->mod_pos = *pos; | |
3223 | ||
3224 | return iter; | |
3225 | } | |
3226 | ||
3227 | static void *t_mod_start(struct seq_file *m, loff_t *pos) | |
3228 | { | |
3229 | struct ftrace_iterator *iter = m->private; | |
3230 | void *p = NULL; | |
3231 | loff_t l; | |
3232 | ||
3233 | if (iter->func_pos > *pos) | |
3234 | return NULL; | |
3235 | ||
3236 | iter->mod_pos = iter->func_pos; | |
3237 | ||
3238 | /* probes are only available if tr is set */ | |
3239 | if (!iter->tr) | |
3240 | return NULL; | |
3241 | ||
3242 | for (l = 0; l <= (*pos - iter->func_pos); ) { | |
3243 | p = t_mod_next(m, &l); | |
3244 | if (!p) | |
3245 | break; | |
3246 | } | |
3247 | if (!p) { | |
3248 | iter->flags &= ~FTRACE_ITER_MOD; | |
3249 | return t_probe_start(m, pos); | |
3250 | } | |
3251 | ||
3252 | /* Only set this if we have an item */ | |
3253 | iter->flags |= FTRACE_ITER_MOD; | |
3254 | ||
3255 | return iter; | |
3256 | } | |
3257 | ||
3258 | static int | |
3259 | t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) | |
3260 | { | |
3261 | struct ftrace_mod_load *ftrace_mod; | |
3262 | struct trace_array *tr = iter->tr; | |
3263 | ||
3264 | if (WARN_ON_ONCE(!iter->mod_list) || | |
3265 | iter->mod_list == &tr->mod_trace || | |
3266 | iter->mod_list == &tr->mod_notrace) | |
3267 | return -EIO; | |
3268 | ||
3269 | ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); | |
3270 | ||
3271 | if (ftrace_mod->func) | |
3272 | seq_printf(m, "%s", ftrace_mod->func); | |
3273 | else | |
3274 | seq_putc(m, '*'); | |
3275 | ||
3276 | seq_printf(m, ":mod:%s\n", ftrace_mod->module); | |
3277 | ||
3278 | return 0; | |
3279 | } | |
3280 | ||
e309b41d | 3281 | static void * |
5bd84629 | 3282 | t_func_next(struct seq_file *m, loff_t *pos) |
5072c59f SR |
3283 | { |
3284 | struct ftrace_iterator *iter = m->private; | |
3285 | struct dyn_ftrace *rec = NULL; | |
3286 | ||
3287 | (*pos)++; | |
0c75a3ed | 3288 | |
5072c59f SR |
3289 | retry: |
3290 | if (iter->idx >= iter->pg->index) { | |
3291 | if (iter->pg->next) { | |
3292 | iter->pg = iter->pg->next; | |
3293 | iter->idx = 0; | |
3294 | goto retry; | |
3295 | } | |
3296 | } else { | |
3297 | rec = &iter->pg->records[iter->idx++]; | |
c20489da SRV |
3298 | if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3299 | !ftrace_lookup_ip(iter->hash, rec->ip)) || | |
647bcd03 SR |
3300 | |
3301 | ((iter->flags & FTRACE_ITER_ENABLED) && | |
23ea9c4d | 3302 | !(rec->flags & FTRACE_FL_ENABLED))) { |
647bcd03 | 3303 | |
5072c59f SR |
3304 | rec = NULL; |
3305 | goto retry; | |
3306 | } | |
3307 | } | |
3308 | ||
4aeb6967 | 3309 | if (!rec) |
5bd84629 | 3310 | return NULL; |
4aeb6967 | 3311 | |
5bd84629 | 3312 | iter->pos = iter->func_pos = *pos; |
4aeb6967 SR |
3313 | iter->func = rec; |
3314 | ||
3315 | return iter; | |
5072c59f SR |
3316 | } |
3317 | ||
5bd84629 SRV |
3318 | static void * |
3319 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
3320 | { | |
3321 | struct ftrace_iterator *iter = m->private; | |
5985ea8b | 3322 | loff_t l = *pos; /* t_probe_start() must use original pos */ |
5bd84629 SRV |
3323 | void *ret; |
3324 | ||
3325 | if (unlikely(ftrace_disabled)) | |
3326 | return NULL; | |
3327 | ||
eee8ded1 SRV |
3328 | if (iter->flags & FTRACE_ITER_PROBE) |
3329 | return t_probe_next(m, pos); | |
5bd84629 | 3330 | |
5985ea8b SRV |
3331 | if (iter->flags & FTRACE_ITER_MOD) |
3332 | return t_mod_next(m, pos); | |
3333 | ||
5bd84629 | 3334 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
eee8ded1 | 3335 | /* next must increment pos, and t_probe_start does not */ |
5bd84629 | 3336 | (*pos)++; |
5985ea8b | 3337 | return t_mod_start(m, &l); |
5bd84629 SRV |
3338 | } |
3339 | ||
3340 | ret = t_func_next(m, pos); | |
3341 | ||
3342 | if (!ret) | |
5985ea8b | 3343 | return t_mod_start(m, &l); |
5bd84629 SRV |
3344 | |
3345 | return ret; | |
3346 | } | |
3347 | ||
98c4fd04 SR |
3348 | static void reset_iter_read(struct ftrace_iterator *iter) |
3349 | { | |
3350 | iter->pos = 0; | |
3351 | iter->func_pos = 0; | |
5985ea8b | 3352 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); |
5072c59f SR |
3353 | } |
3354 | ||
3355 | static void *t_start(struct seq_file *m, loff_t *pos) | |
3356 | { | |
3357 | struct ftrace_iterator *iter = m->private; | |
3358 | void *p = NULL; | |
694ce0a5 | 3359 | loff_t l; |
5072c59f | 3360 | |
8fc0c701 | 3361 | mutex_lock(&ftrace_lock); |
45a4a237 SR |
3362 | |
3363 | if (unlikely(ftrace_disabled)) | |
3364 | return NULL; | |
3365 | ||
98c4fd04 SR |
3366 | /* |
3367 | * If an lseek was done, then reset and start from beginning. | |
3368 | */ | |
3369 | if (*pos < iter->pos) | |
3370 | reset_iter_read(iter); | |
3371 | ||
0c75a3ed SR |
3372 | /* |
3373 | * For set_ftrace_filter reading, if we have the filter | |
3374 | * off, we can short cut and just print out that all | |
3375 | * functions are enabled. | |
3376 | */ | |
c20489da SRV |
3377 | if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3378 | ftrace_hash_empty(iter->hash)) { | |
43ff926a | 3379 | iter->func_pos = 1; /* Account for the message */ |
0c75a3ed | 3380 | if (*pos > 0) |
5985ea8b | 3381 | return t_mod_start(m, pos); |
0c75a3ed | 3382 | iter->flags |= FTRACE_ITER_PRINTALL; |
df091625 | 3383 | /* reset in case of seek/pread */ |
eee8ded1 | 3384 | iter->flags &= ~FTRACE_ITER_PROBE; |
0c75a3ed SR |
3385 | return iter; |
3386 | } | |
3387 | ||
5985ea8b SRV |
3388 | if (iter->flags & FTRACE_ITER_MOD) |
3389 | return t_mod_start(m, pos); | |
8fc0c701 | 3390 | |
98c4fd04 SR |
3391 | /* |
3392 | * Unfortunately, we need to restart at ftrace_pages_start | |
3393 | * every time we let go of the ftrace_mutex. This is because | |
3394 | * those pointers can change without the lock. | |
3395 | */ | |
694ce0a5 LZ |
3396 | iter->pg = ftrace_pages_start; |
3397 | iter->idx = 0; | |
3398 | for (l = 0; l <= *pos; ) { | |
5bd84629 | 3399 | p = t_func_next(m, &l); |
694ce0a5 LZ |
3400 | if (!p) |
3401 | break; | |
50cdaf08 | 3402 | } |
5821e1b7 | 3403 | |
69a3083c | 3404 | if (!p) |
5985ea8b | 3405 | return t_mod_start(m, pos); |
4aeb6967 SR |
3406 | |
3407 | return iter; | |
5072c59f SR |
3408 | } |
3409 | ||
3410 | static void t_stop(struct seq_file *m, void *p) | |
3411 | { | |
8fc0c701 | 3412 | mutex_unlock(&ftrace_lock); |
5072c59f SR |
3413 | } |
3414 | ||
15d5b02c SRRH |
3415 | void * __weak |
3416 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | |
3417 | { | |
3418 | return NULL; | |
3419 | } | |
3420 | ||
3421 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, | |
3422 | struct dyn_ftrace *rec) | |
3423 | { | |
3424 | void *ptr; | |
3425 | ||
3426 | ptr = arch_ftrace_trampoline_func(ops, rec); | |
3427 | if (ptr) | |
3428 | seq_printf(m, " ->%pS", ptr); | |
3429 | } | |
3430 | ||
5072c59f SR |
3431 | static int t_show(struct seq_file *m, void *v) |
3432 | { | |
0c75a3ed | 3433 | struct ftrace_iterator *iter = m->private; |
4aeb6967 | 3434 | struct dyn_ftrace *rec; |
5072c59f | 3435 | |
eee8ded1 SRV |
3436 | if (iter->flags & FTRACE_ITER_PROBE) |
3437 | return t_probe_show(m, iter); | |
8fc0c701 | 3438 | |
5985ea8b SRV |
3439 | if (iter->flags & FTRACE_ITER_MOD) |
3440 | return t_mod_show(m, iter); | |
3441 | ||
0c75a3ed | 3442 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
8c006cf7 | 3443 | if (iter->flags & FTRACE_ITER_NOTRACE) |
fa6f0cc7 | 3444 | seq_puts(m, "#### no functions disabled ####\n"); |
8c006cf7 | 3445 | else |
fa6f0cc7 | 3446 | seq_puts(m, "#### all functions enabled ####\n"); |
0c75a3ed SR |
3447 | return 0; |
3448 | } | |
3449 | ||
4aeb6967 SR |
3450 | rec = iter->func; |
3451 | ||
5072c59f SR |
3452 | if (!rec) |
3453 | return 0; | |
3454 | ||
647bcd03 | 3455 | seq_printf(m, "%ps", (void *)rec->ip); |
9674b2fa | 3456 | if (iter->flags & FTRACE_ITER_ENABLED) { |
030f4e1c | 3457 | struct ftrace_ops *ops; |
15d5b02c | 3458 | |
f8b8be8a | 3459 | seq_printf(m, " (%ld)%s%s", |
0376bde1 | 3460 | ftrace_rec_count(rec), |
f8b8be8a MH |
3461 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
3462 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); | |
9674b2fa | 3463 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
5fecaa04 | 3464 | ops = ftrace_find_tramp_ops_any(rec); |
39daa7b9 SRRH |
3465 | if (ops) { |
3466 | do { | |
3467 | seq_printf(m, "\ttramp: %pS (%pS)", | |
3468 | (void *)ops->trampoline, | |
3469 | (void *)ops->func); | |
030f4e1c | 3470 | add_trampoline_func(m, ops, rec); |
39daa7b9 SRRH |
3471 | ops = ftrace_find_tramp_ops_next(rec, ops); |
3472 | } while (ops); | |
3473 | } else | |
fa6f0cc7 | 3474 | seq_puts(m, "\ttramp: ERROR!"); |
030f4e1c SRRH |
3475 | } else { |
3476 | add_trampoline_func(m, NULL, rec); | |
9674b2fa SRRH |
3477 | } |
3478 | } | |
3479 | ||
fa6f0cc7 | 3480 | seq_putc(m, '\n'); |
5072c59f SR |
3481 | |
3482 | return 0; | |
3483 | } | |
3484 | ||
88e9d34c | 3485 | static const struct seq_operations show_ftrace_seq_ops = { |
5072c59f SR |
3486 | .start = t_start, |
3487 | .next = t_next, | |
3488 | .stop = t_stop, | |
3489 | .show = t_show, | |
3490 | }; | |
3491 | ||
e309b41d | 3492 | static int |
5072c59f SR |
3493 | ftrace_avail_open(struct inode *inode, struct file *file) |
3494 | { | |
3495 | struct ftrace_iterator *iter; | |
5072c59f | 3496 | |
4eebcc81 SR |
3497 | if (unlikely(ftrace_disabled)) |
3498 | return -ENODEV; | |
3499 | ||
50e18b94 | 3500 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
c1bc5919 SRV |
3501 | if (!iter) |
3502 | return -ENOMEM; | |
5072c59f | 3503 | |
c1bc5919 SRV |
3504 | iter->pg = ftrace_pages_start; |
3505 | iter->ops = &global_ops; | |
3506 | ||
3507 | return 0; | |
5072c59f SR |
3508 | } |
3509 | ||
647bcd03 SR |
3510 | static int |
3511 | ftrace_enabled_open(struct inode *inode, struct file *file) | |
3512 | { | |
3513 | struct ftrace_iterator *iter; | |
647bcd03 | 3514 | |
50e18b94 | 3515 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
c1bc5919 SRV |
3516 | if (!iter) |
3517 | return -ENOMEM; | |
647bcd03 | 3518 | |
c1bc5919 SRV |
3519 | iter->pg = ftrace_pages_start; |
3520 | iter->flags = FTRACE_ITER_ENABLED; | |
3521 | iter->ops = &global_ops; | |
3522 | ||
3523 | return 0; | |
647bcd03 SR |
3524 | } |
3525 | ||
fc13cb0c SR |
3526 | /** |
3527 | * ftrace_regex_open - initialize function tracer filter files | |
3528 | * @ops: The ftrace_ops that hold the hash filters | |
3529 | * @flag: The type of filter to process | |
3530 | * @inode: The inode, usually passed in to your open routine | |
3531 | * @file: The file, usually passed in to your open routine | |
3532 | * | |
3533 | * ftrace_regex_open() initializes the filter files for the | |
3534 | * @ops. Depending on @flag it may process the filter hash or | |
3535 | * the notrace hash of @ops. With this called from the open | |
3536 | * routine, you can use ftrace_filter_write() for the write | |
3537 | * routine if @flag has FTRACE_ITER_FILTER set, or | |
3538 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | |
098c879e | 3539 | * tracing_lseek() should be used as the lseek routine, and |
fc13cb0c SR |
3540 | * release must call ftrace_regex_release(). |
3541 | */ | |
3542 | int | |
f45948e8 | 3543 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
1cf41dd7 | 3544 | struct inode *inode, struct file *file) |
5072c59f SR |
3545 | { |
3546 | struct ftrace_iterator *iter; | |
f45948e8 | 3547 | struct ftrace_hash *hash; |
673feb9d SRV |
3548 | struct list_head *mod_head; |
3549 | struct trace_array *tr = ops->private; | |
5072c59f SR |
3550 | int ret = 0; |
3551 | ||
f04f24fb MH |
3552 | ftrace_ops_init(ops); |
3553 | ||
4eebcc81 SR |
3554 | if (unlikely(ftrace_disabled)) |
3555 | return -ENODEV; | |
3556 | ||
5072c59f SR |
3557 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3558 | if (!iter) | |
3559 | return -ENOMEM; | |
3560 | ||
689fd8b6 | 3561 | if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { |
3562 | kfree(iter); | |
3563 | return -ENOMEM; | |
3564 | } | |
3565 | ||
3f2367ba MH |
3566 | iter->ops = ops; |
3567 | iter->flags = flag; | |
5985ea8b | 3568 | iter->tr = tr; |
3f2367ba | 3569 | |
33b7f99c | 3570 | mutex_lock(&ops->func_hash->regex_lock); |
3f2367ba | 3571 | |
673feb9d | 3572 | if (flag & FTRACE_ITER_NOTRACE) { |
33b7f99c | 3573 | hash = ops->func_hash->notrace_hash; |
5985ea8b | 3574 | mod_head = tr ? &tr->mod_notrace : NULL; |
673feb9d | 3575 | } else { |
33b7f99c | 3576 | hash = ops->func_hash->filter_hash; |
5985ea8b | 3577 | mod_head = tr ? &tr->mod_trace : NULL; |
673feb9d | 3578 | } |
f45948e8 | 3579 | |
5985ea8b SRV |
3580 | iter->mod_list = mod_head; |
3581 | ||
33dc9b12 | 3582 | if (file->f_mode & FMODE_WRITE) { |
ef2fbe16 NK |
3583 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
3584 | ||
673feb9d | 3585 | if (file->f_flags & O_TRUNC) { |
ef2fbe16 | 3586 | iter->hash = alloc_ftrace_hash(size_bits); |
673feb9d SRV |
3587 | clear_ftrace_mod_list(mod_head); |
3588 | } else { | |
ef2fbe16 | 3589 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); |
673feb9d | 3590 | } |
ef2fbe16 | 3591 | |
33dc9b12 SR |
3592 | if (!iter->hash) { |
3593 | trace_parser_put(&iter->parser); | |
3594 | kfree(iter); | |
3f2367ba MH |
3595 | ret = -ENOMEM; |
3596 | goto out_unlock; | |
33dc9b12 | 3597 | } |
c20489da SRV |
3598 | } else |
3599 | iter->hash = hash; | |
1cf41dd7 | 3600 | |
5072c59f SR |
3601 | if (file->f_mode & FMODE_READ) { |
3602 | iter->pg = ftrace_pages_start; | |
5072c59f SR |
3603 | |
3604 | ret = seq_open(file, &show_ftrace_seq_ops); | |
3605 | if (!ret) { | |
3606 | struct seq_file *m = file->private_data; | |
3607 | m->private = iter; | |
79fe249c | 3608 | } else { |
33dc9b12 SR |
3609 | /* Failed */ |
3610 | free_ftrace_hash(iter->hash); | |
79fe249c | 3611 | trace_parser_put(&iter->parser); |
5072c59f | 3612 | kfree(iter); |
79fe249c | 3613 | } |
5072c59f SR |
3614 | } else |
3615 | file->private_data = iter; | |
3f2367ba MH |
3616 | |
3617 | out_unlock: | |
33b7f99c | 3618 | mutex_unlock(&ops->func_hash->regex_lock); |
5072c59f SR |
3619 | |
3620 | return ret; | |
3621 | } | |
3622 | ||
41c52c0d SR |
3623 | static int |
3624 | ftrace_filter_open(struct inode *inode, struct file *file) | |
3625 | { | |
e3b3e2e8 SRRH |
3626 | struct ftrace_ops *ops = inode->i_private; |
3627 | ||
3628 | return ftrace_regex_open(ops, | |
eee8ded1 | 3629 | FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
69a3083c | 3630 | inode, file); |
41c52c0d SR |
3631 | } |
3632 | ||
3633 | static int | |
3634 | ftrace_notrace_open(struct inode *inode, struct file *file) | |
3635 | { | |
e3b3e2e8 SRRH |
3636 | struct ftrace_ops *ops = inode->i_private; |
3637 | ||
3638 | return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, | |
1cf41dd7 | 3639 | inode, file); |
41c52c0d SR |
3640 | } |
3641 | ||
3ba00929 DS |
3642 | /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ |
3643 | struct ftrace_glob { | |
3644 | char *search; | |
3645 | unsigned len; | |
3646 | int type; | |
3647 | }; | |
3648 | ||
7132e2d6 TJB |
3649 | /* |
3650 | * If symbols in an architecture don't correspond exactly to the user-visible | |
3651 | * name of what they represent, it is possible to define this function to | |
3652 | * perform the necessary adjustments. | |
3653 | */ | |
3654 | char * __weak arch_ftrace_match_adjust(char *str, const char *search) | |
3655 | { | |
3656 | return str; | |
3657 | } | |
3658 | ||
3ba00929 | 3659 | static int ftrace_match(char *str, struct ftrace_glob *g) |
9f4801e3 | 3660 | { |
9f4801e3 | 3661 | int matched = 0; |
751e9983 | 3662 | int slen; |
9f4801e3 | 3663 | |
7132e2d6 TJB |
3664 | str = arch_ftrace_match_adjust(str, g->search); |
3665 | ||
3ba00929 | 3666 | switch (g->type) { |
9f4801e3 | 3667 | case MATCH_FULL: |
3ba00929 | 3668 | if (strcmp(str, g->search) == 0) |
9f4801e3 SR |
3669 | matched = 1; |
3670 | break; | |
3671 | case MATCH_FRONT_ONLY: | |
3ba00929 | 3672 | if (strncmp(str, g->search, g->len) == 0) |
9f4801e3 SR |
3673 | matched = 1; |
3674 | break; | |
3675 | case MATCH_MIDDLE_ONLY: | |
3ba00929 | 3676 | if (strstr(str, g->search)) |
9f4801e3 SR |
3677 | matched = 1; |
3678 | break; | |
3679 | case MATCH_END_ONLY: | |
751e9983 | 3680 | slen = strlen(str); |
3ba00929 DS |
3681 | if (slen >= g->len && |
3682 | memcmp(str + slen - g->len, g->search, g->len) == 0) | |
9f4801e3 SR |
3683 | matched = 1; |
3684 | break; | |
60f1d5e3 MH |
3685 | case MATCH_GLOB: |
3686 | if (glob_match(g->search, str)) | |
3687 | matched = 1; | |
3688 | break; | |
9f4801e3 SR |
3689 | } |
3690 | ||
3691 | return matched; | |
3692 | } | |
3693 | ||
b448c4e3 | 3694 | static int |
f0a3b154 | 3695 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) |
996e87be | 3696 | { |
b448c4e3 | 3697 | struct ftrace_func_entry *entry; |
b448c4e3 SR |
3698 | int ret = 0; |
3699 | ||
1cf41dd7 | 3700 | entry = ftrace_lookup_ip(hash, rec->ip); |
f0a3b154 | 3701 | if (clear_filter) { |
1cf41dd7 SR |
3702 | /* Do nothing if it doesn't exist */ |
3703 | if (!entry) | |
3704 | return 0; | |
b448c4e3 | 3705 | |
33dc9b12 | 3706 | free_hash_entry(hash, entry); |
1cf41dd7 SR |
3707 | } else { |
3708 | /* Do nothing if it exists */ | |
3709 | if (entry) | |
3710 | return 0; | |
b448c4e3 | 3711 | |
1cf41dd7 | 3712 | ret = add_hash_entry(hash, rec->ip); |
b448c4e3 SR |
3713 | } |
3714 | return ret; | |
996e87be SR |
3715 | } |
3716 | ||
64e7c440 | 3717 | static int |
0b507e1e DS |
3718 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
3719 | struct ftrace_glob *mod_g, int exclude_mod) | |
64e7c440 SR |
3720 | { |
3721 | char str[KSYM_SYMBOL_LEN]; | |
b9df92d2 SR |
3722 | char *modname; |
3723 | ||
3724 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | |
3725 | ||
0b507e1e DS |
3726 | if (mod_g) { |
3727 | int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; | |
3728 | ||
3729 | /* blank module name to match all modules */ | |
3730 | if (!mod_g->len) { | |
3731 | /* blank module globbing: modname xor exclude_mod */ | |
77c0edde | 3732 | if (!exclude_mod != !modname) |
0b507e1e DS |
3733 | goto func_match; |
3734 | return 0; | |
3735 | } | |
3736 | ||
77c0edde SRV |
3737 | /* |
3738 | * exclude_mod is set to trace everything but the given | |
3739 | * module. If it is set and the module matches, then | |
3740 | * return 0. If it is not set, and the module doesn't match | |
3741 | * also return 0. Otherwise, check the function to see if | |
3742 | * that matches. | |
3743 | */ | |
3744 | if (!mod_matches == !exclude_mod) | |
b9df92d2 | 3745 | return 0; |
0b507e1e | 3746 | func_match: |
b9df92d2 | 3747 | /* blank search means to match all funcs in the mod */ |
3ba00929 | 3748 | if (!func_g->len) |
b9df92d2 SR |
3749 | return 1; |
3750 | } | |
64e7c440 | 3751 | |
3ba00929 | 3752 | return ftrace_match(str, func_g); |
64e7c440 SR |
3753 | } |
3754 | ||
1cf41dd7 | 3755 | static int |
3ba00929 | 3756 | match_records(struct ftrace_hash *hash, char *func, int len, char *mod) |
9f4801e3 | 3757 | { |
9f4801e3 SR |
3758 | struct ftrace_page *pg; |
3759 | struct dyn_ftrace *rec; | |
3ba00929 | 3760 | struct ftrace_glob func_g = { .type = MATCH_FULL }; |
0b507e1e DS |
3761 | struct ftrace_glob mod_g = { .type = MATCH_FULL }; |
3762 | struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; | |
3763 | int exclude_mod = 0; | |
311d16da | 3764 | int found = 0; |
b448c4e3 | 3765 | int ret; |
2e028c4f | 3766 | int clear_filter = 0; |
9f4801e3 | 3767 | |
0b507e1e | 3768 | if (func) { |
3ba00929 DS |
3769 | func_g.type = filter_parse_regex(func, len, &func_g.search, |
3770 | &clear_filter); | |
3771 | func_g.len = strlen(func_g.search); | |
b9df92d2 | 3772 | } |
9f4801e3 | 3773 | |
0b507e1e DS |
3774 | if (mod) { |
3775 | mod_g.type = filter_parse_regex(mod, strlen(mod), | |
3776 | &mod_g.search, &exclude_mod); | |
3777 | mod_g.len = strlen(mod_g.search); | |
b9df92d2 | 3778 | } |
9f4801e3 | 3779 | |
52baf119 | 3780 | mutex_lock(&ftrace_lock); |
265c831c | 3781 | |
b9df92d2 SR |
3782 | if (unlikely(ftrace_disabled)) |
3783 | goto out_unlock; | |
9f4801e3 | 3784 | |
265c831c | 3785 | do_for_each_ftrace_rec(pg, rec) { |
546fece4 SRRH |
3786 | |
3787 | if (rec->flags & FTRACE_FL_DISABLED) | |
3788 | continue; | |
3789 | ||
0b507e1e | 3790 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { |
f0a3b154 | 3791 | ret = enter_record(hash, rec, clear_filter); |
b448c4e3 SR |
3792 | if (ret < 0) { |
3793 | found = ret; | |
3794 | goto out_unlock; | |
3795 | } | |
311d16da | 3796 | found = 1; |
265c831c SR |
3797 | } |
3798 | } while_for_each_ftrace_rec(); | |
b9df92d2 | 3799 | out_unlock: |
52baf119 | 3800 | mutex_unlock(&ftrace_lock); |
311d16da LZ |
3801 | |
3802 | return found; | |
5072c59f SR |
3803 | } |
3804 | ||
64e7c440 | 3805 | static int |
1cf41dd7 | 3806 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
64e7c440 | 3807 | { |
f0a3b154 | 3808 | return match_records(hash, buff, len, NULL); |
64e7c440 SR |
3809 | } |
3810 | ||
e16b35dd SRV |
3811 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
3812 | struct ftrace_ops_hash *old_hash) | |
3813 | { | |
3814 | struct ftrace_ops *op; | |
3815 | ||
3816 | if (!ftrace_enabled) | |
3817 | return; | |
3818 | ||
3819 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | |
3820 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | |
3821 | return; | |
3822 | } | |
3823 | ||
3824 | /* | |
3825 | * If this is the shared global_ops filter, then we need to | |
3826 | * check if there is another ops that shares it, is enabled. | |
3827 | * If so, we still need to run the modify code. | |
3828 | */ | |
3829 | if (ops->func_hash != &global_ops.local_hash) | |
3830 | return; | |
3831 | ||
3832 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
3833 | if (op->func_hash == &global_ops.local_hash && | |
3834 | op->flags & FTRACE_OPS_FL_ENABLED) { | |
3835 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | |
3836 | /* Only need to do this once */ | |
3837 | return; | |
3838 | } | |
3839 | } while_for_each_ftrace_op(op); | |
3840 | } | |
3841 | ||
3842 | static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, | |
3843 | struct ftrace_hash **orig_hash, | |
3844 | struct ftrace_hash *hash, | |
3845 | int enable) | |
3846 | { | |
3847 | struct ftrace_ops_hash old_hash_ops; | |
3848 | struct ftrace_hash *old_hash; | |
3849 | int ret; | |
3850 | ||
3851 | old_hash = *orig_hash; | |
3852 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | |
3853 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | |
3854 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | |
3855 | if (!ret) { | |
3856 | ftrace_ops_update_code(ops, &old_hash_ops); | |
3857 | free_ftrace_hash_rcu(old_hash); | |
3858 | } | |
3859 | return ret; | |
3860 | } | |
64e7c440 | 3861 | |
673feb9d SRV |
3862 | static bool module_exists(const char *module) |
3863 | { | |
3864 | /* All modules have the symbol __this_module */ | |
3865 | const char this_mod[] = "__this_module"; | |
419e9fe5 | 3866 | char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; |
673feb9d SRV |
3867 | unsigned long val; |
3868 | int n; | |
3869 | ||
419e9fe5 | 3870 | n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); |
673feb9d | 3871 | |
419e9fe5 | 3872 | if (n > sizeof(modname) - 1) |
673feb9d SRV |
3873 | return false; |
3874 | ||
3875 | val = module_kallsyms_lookup_name(modname); | |
3876 | return val != 0; | |
3877 | } | |
3878 | ||
3879 | static int cache_mod(struct trace_array *tr, | |
3880 | const char *func, char *module, int enable) | |
3881 | { | |
3882 | struct ftrace_mod_load *ftrace_mod, *n; | |
3883 | struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; | |
3884 | int ret; | |
3885 | ||
3886 | mutex_lock(&ftrace_lock); | |
3887 | ||
3888 | /* We do not cache inverse filters */ | |
3889 | if (func[0] == '!') { | |
3890 | func++; | |
3891 | ret = -EINVAL; | |
3892 | ||
3893 | /* Look to remove this hash */ | |
3894 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | |
3895 | if (strcmp(ftrace_mod->module, module) != 0) | |
3896 | continue; | |
3897 | ||
3898 | /* no func matches all */ | |
44925dff | 3899 | if (strcmp(func, "*") == 0 || |
673feb9d SRV |
3900 | (ftrace_mod->func && |
3901 | strcmp(ftrace_mod->func, func) == 0)) { | |
3902 | ret = 0; | |
3903 | free_ftrace_mod(ftrace_mod); | |
3904 | continue; | |
3905 | } | |
3906 | } | |
3907 | goto out; | |
3908 | } | |
3909 | ||
3910 | ret = -EINVAL; | |
3911 | /* We only care about modules that have not been loaded yet */ | |
3912 | if (module_exists(module)) | |
3913 | goto out; | |
3914 | ||
3915 | /* Save this string off, and execute it when the module is loaded */ | |
3916 | ret = ftrace_add_mod(tr, func, module, enable); | |
3917 | out: | |
3918 | mutex_unlock(&ftrace_lock); | |
3919 | ||
3920 | return ret; | |
3921 | } | |
3922 | ||
d7fbf8df SRV |
3923 | static int |
3924 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |
3925 | int reset, int enable); | |
3926 | ||
69449bbd | 3927 | #ifdef CONFIG_MODULES |
d7fbf8df SRV |
3928 | static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, |
3929 | char *mod, bool enable) | |
3930 | { | |
3931 | struct ftrace_mod_load *ftrace_mod, *n; | |
3932 | struct ftrace_hash **orig_hash, *new_hash; | |
3933 | LIST_HEAD(process_mods); | |
3934 | char *func; | |
3935 | int ret; | |
3936 | ||
3937 | mutex_lock(&ops->func_hash->regex_lock); | |
3938 | ||
3939 | if (enable) | |
3940 | orig_hash = &ops->func_hash->filter_hash; | |
3941 | else | |
3942 | orig_hash = &ops->func_hash->notrace_hash; | |
3943 | ||
3944 | new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, | |
3945 | *orig_hash); | |
3946 | if (!new_hash) | |
3b58a3c7 | 3947 | goto out; /* warn? */ |
d7fbf8df SRV |
3948 | |
3949 | mutex_lock(&ftrace_lock); | |
3950 | ||
3951 | list_for_each_entry_safe(ftrace_mod, n, head, list) { | |
3952 | ||
3953 | if (strcmp(ftrace_mod->module, mod) != 0) | |
3954 | continue; | |
3955 | ||
3956 | if (ftrace_mod->func) | |
3957 | func = kstrdup(ftrace_mod->func, GFP_KERNEL); | |
3958 | else | |
3959 | func = kstrdup("*", GFP_KERNEL); | |
3960 | ||
3961 | if (!func) /* warn? */ | |
3962 | continue; | |
3963 | ||
3964 | list_del(&ftrace_mod->list); | |
3965 | list_add(&ftrace_mod->list, &process_mods); | |
3966 | ||
3967 | /* Use the newly allocated func, as it may be "*" */ | |
3968 | kfree(ftrace_mod->func); | |
3969 | ftrace_mod->func = func; | |
3970 | } | |
3971 | ||
3972 | mutex_unlock(&ftrace_lock); | |
3973 | ||
3974 | list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { | |
3975 | ||
3976 | func = ftrace_mod->func; | |
3977 | ||
3978 | /* Grabs ftrace_lock, which is why we have this extra step */ | |
3979 | match_records(new_hash, func, strlen(func), mod); | |
3980 | free_ftrace_mod(ftrace_mod); | |
3981 | } | |
3982 | ||
8c08f0d5 SRV |
3983 | if (enable && list_empty(head)) |
3984 | new_hash->flags &= ~FTRACE_HASH_FL_MOD; | |
3985 | ||
d7fbf8df SRV |
3986 | mutex_lock(&ftrace_lock); |
3987 | ||
3988 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, | |
3989 | new_hash, enable); | |
3990 | mutex_unlock(&ftrace_lock); | |
3991 | ||
3b58a3c7 | 3992 | out: |
d7fbf8df SRV |
3993 | mutex_unlock(&ops->func_hash->regex_lock); |
3994 | ||
3995 | free_ftrace_hash(new_hash); | |
3996 | } | |
3997 | ||
3998 | static void process_cached_mods(const char *mod_name) | |
3999 | { | |
4000 | struct trace_array *tr; | |
4001 | char *mod; | |
4002 | ||
4003 | mod = kstrdup(mod_name, GFP_KERNEL); | |
4004 | if (!mod) | |
4005 | return; | |
4006 | ||
4007 | mutex_lock(&trace_types_lock); | |
4008 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
4009 | if (!list_empty(&tr->mod_trace)) | |
4010 | process_mod_list(&tr->mod_trace, tr->ops, mod, true); | |
4011 | if (!list_empty(&tr->mod_notrace)) | |
4012 | process_mod_list(&tr->mod_notrace, tr->ops, mod, false); | |
4013 | } | |
4014 | mutex_unlock(&trace_types_lock); | |
4015 | ||
4016 | kfree(mod); | |
4017 | } | |
69449bbd | 4018 | #endif |
d7fbf8df | 4019 | |
f6180773 SR |
4020 | /* |
4021 | * We register the module command as a template to show others how | |
4022 | * to register the a command as well. | |
4023 | */ | |
4024 | ||
4025 | static int | |
04ec7bb6 | 4026 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
673feb9d | 4027 | char *func_orig, char *cmd, char *module, int enable) |
f6180773 | 4028 | { |
673feb9d | 4029 | char *func; |
5e3949f0 | 4030 | int ret; |
f6180773 | 4031 | |
673feb9d SRV |
4032 | /* match_records() modifies func, and we need the original */ |
4033 | func = kstrdup(func_orig, GFP_KERNEL); | |
4034 | if (!func) | |
4035 | return -ENOMEM; | |
4036 | ||
f6180773 SR |
4037 | /* |
4038 | * cmd == 'mod' because we only registered this func | |
4039 | * for the 'mod' ftrace_func_command. | |
4040 | * But if you register one func with multiple commands, | |
4041 | * you can tell which command was used by the cmd | |
4042 | * parameter. | |
4043 | */ | |
f0a3b154 | 4044 | ret = match_records(hash, func, strlen(func), module); |
673feb9d SRV |
4045 | kfree(func); |
4046 | ||
b448c4e3 | 4047 | if (!ret) |
673feb9d | 4048 | return cache_mod(tr, func_orig, module, enable); |
b448c4e3 SR |
4049 | if (ret < 0) |
4050 | return ret; | |
b448c4e3 | 4051 | return 0; |
f6180773 SR |
4052 | } |
4053 | ||
4054 | static struct ftrace_func_command ftrace_mod_cmd = { | |
4055 | .name = "mod", | |
4056 | .func = ftrace_mod_callback, | |
4057 | }; | |
4058 | ||
4059 | static int __init ftrace_mod_cmd_init(void) | |
4060 | { | |
4061 | return register_ftrace_command(&ftrace_mod_cmd); | |
4062 | } | |
6f415672 | 4063 | core_initcall(ftrace_mod_cmd_init); |
f6180773 | 4064 | |
2f5f6ad9 | 4065 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
a1e2e31d | 4066 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
59df055f | 4067 | { |
eee8ded1 | 4068 | struct ftrace_probe_ops *probe_ops; |
7b60f3d8 | 4069 | struct ftrace_func_probe *probe; |
59df055f | 4070 | |
7b60f3d8 SRV |
4071 | probe = container_of(op, struct ftrace_func_probe, ops); |
4072 | probe_ops = probe->probe_ops; | |
59df055f SR |
4073 | |
4074 | /* | |
4075 | * Disable preemption for these calls to prevent a RCU grace | |
4076 | * period. This syncs the hash iteration and freeing of items | |
4077 | * on the hash. rcu_read_lock is too dangerous here. | |
4078 | */ | |
5168ae50 | 4079 | preempt_disable_notrace(); |
6e444319 | 4080 | probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); |
5168ae50 | 4081 | preempt_enable_notrace(); |
59df055f SR |
4082 | } |
4083 | ||
41794f19 SRV |
4084 | struct ftrace_func_map { |
4085 | struct ftrace_func_entry entry; | |
4086 | void *data; | |
59df055f SR |
4087 | }; |
4088 | ||
41794f19 SRV |
4089 | struct ftrace_func_mapper { |
4090 | struct ftrace_hash hash; | |
4091 | }; | |
59df055f | 4092 | |
41794f19 SRV |
4093 | /** |
4094 | * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper | |
4095 | * | |
4096 | * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. | |
4097 | */ | |
4098 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) | |
59df055f | 4099 | { |
41794f19 | 4100 | struct ftrace_hash *hash; |
59df055f | 4101 | |
41794f19 SRV |
4102 | /* |
4103 | * The mapper is simply a ftrace_hash, but since the entries | |
4104 | * in the hash are not ftrace_func_entry type, we define it | |
4105 | * as a separate structure. | |
4106 | */ | |
4107 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | |
4108 | return (struct ftrace_func_mapper *)hash; | |
4109 | } | |
59df055f | 4110 | |
41794f19 SRV |
4111 | /** |
4112 | * ftrace_func_mapper_find_ip - Find some data mapped to an ip | |
4113 | * @mapper: The mapper that has the ip maps | |
4114 | * @ip: the instruction pointer to find the data for | |
4115 | * | |
4116 | * Returns the data mapped to @ip if found otherwise NULL. The return | |
4117 | * is actually the address of the mapper data pointer. The address is | |
4118 | * returned for use cases where the data is no bigger than a long, and | |
4119 | * the user can use the data pointer as its data instead of having to | |
4120 | * allocate more memory for the reference. | |
4121 | */ | |
4122 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, | |
4123 | unsigned long ip) | |
4124 | { | |
4125 | struct ftrace_func_entry *entry; | |
4126 | struct ftrace_func_map *map; | |
59df055f | 4127 | |
41794f19 SRV |
4128 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
4129 | if (!entry) | |
4130 | return NULL; | |
b848914c | 4131 | |
41794f19 SRV |
4132 | map = (struct ftrace_func_map *)entry; |
4133 | return &map->data; | |
59df055f SR |
4134 | } |
4135 | ||
41794f19 SRV |
4136 | /** |
4137 | * ftrace_func_mapper_add_ip - Map some data to an ip | |
4138 | * @mapper: The mapper that has the ip maps | |
4139 | * @ip: The instruction pointer address to map @data to | |
4140 | * @data: The data to map to @ip | |
4141 | * | |
4142 | * Returns 0 on succes otherwise an error. | |
4143 | */ | |
4144 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, | |
4145 | unsigned long ip, void *data) | |
59df055f | 4146 | { |
41794f19 SRV |
4147 | struct ftrace_func_entry *entry; |
4148 | struct ftrace_func_map *map; | |
59df055f | 4149 | |
41794f19 SRV |
4150 | entry = ftrace_lookup_ip(&mapper->hash, ip); |
4151 | if (entry) | |
4152 | return -EBUSY; | |
59df055f | 4153 | |
41794f19 SRV |
4154 | map = kmalloc(sizeof(*map), GFP_KERNEL); |
4155 | if (!map) | |
4156 | return -ENOMEM; | |
59df055f | 4157 | |
41794f19 SRV |
4158 | map->entry.ip = ip; |
4159 | map->data = data; | |
b848914c | 4160 | |
41794f19 | 4161 | __add_hash_entry(&mapper->hash, &map->entry); |
59df055f | 4162 | |
41794f19 SRV |
4163 | return 0; |
4164 | } | |
59df055f | 4165 | |
41794f19 SRV |
4166 | /** |
4167 | * ftrace_func_mapper_remove_ip - Remove an ip from the mapping | |
4168 | * @mapper: The mapper that has the ip maps | |
4169 | * @ip: The instruction pointer address to remove the data from | |
4170 | * | |
4171 | * Returns the data if it is found, otherwise NULL. | |
4172 | * Note, if the data pointer is used as the data itself, (see | |
4173 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, | |
4174 | * if the data pointer was set to zero. | |
4175 | */ | |
4176 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, | |
4177 | unsigned long ip) | |
59df055f | 4178 | { |
41794f19 SRV |
4179 | struct ftrace_func_entry *entry; |
4180 | struct ftrace_func_map *map; | |
4181 | void *data; | |
4182 | ||
4183 | entry = ftrace_lookup_ip(&mapper->hash, ip); | |
4184 | if (!entry) | |
4185 | return NULL; | |
4186 | ||
4187 | map = (struct ftrace_func_map *)entry; | |
4188 | data = map->data; | |
4189 | ||
4190 | remove_hash_entry(&mapper->hash, entry); | |
59df055f | 4191 | kfree(entry); |
41794f19 SRV |
4192 | |
4193 | return data; | |
4194 | } | |
4195 | ||
4196 | /** | |
4197 | * free_ftrace_func_mapper - free a mapping of ips and data | |
4198 | * @mapper: The mapper that has the ip maps | |
4199 | * @free_func: A function to be called on each data item. | |
4200 | * | |
4201 | * This is used to free the function mapper. The @free_func is optional | |
4202 | * and can be used if the data needs to be freed as well. | |
4203 | */ | |
4204 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, | |
4205 | ftrace_mapper_func free_func) | |
4206 | { | |
4207 | struct ftrace_func_entry *entry; | |
4208 | struct ftrace_func_map *map; | |
4209 | struct hlist_head *hhd; | |
4210 | int size = 1 << mapper->hash.size_bits; | |
4211 | int i; | |
4212 | ||
4213 | if (free_func && mapper->hash.count) { | |
4214 | for (i = 0; i < size; i++) { | |
4215 | hhd = &mapper->hash.buckets[i]; | |
4216 | hlist_for_each_entry(entry, hhd, hlist) { | |
4217 | map = (struct ftrace_func_map *)entry; | |
4218 | free_func(map); | |
4219 | } | |
4220 | } | |
4221 | } | |
4222 | free_ftrace_hash(&mapper->hash); | |
4223 | } | |
4224 | ||
7b60f3d8 SRV |
4225 | static void release_probe(struct ftrace_func_probe *probe) |
4226 | { | |
4227 | struct ftrace_probe_ops *probe_ops; | |
4228 | ||
4229 | mutex_lock(&ftrace_lock); | |
4230 | ||
4231 | WARN_ON(probe->ref <= 0); | |
4232 | ||
4233 | /* Subtract the ref that was used to protect this instance */ | |
4234 | probe->ref--; | |
4235 | ||
4236 | if (!probe->ref) { | |
4237 | probe_ops = probe->probe_ops; | |
6e444319 SRV |
4238 | /* |
4239 | * Sending zero as ip tells probe_ops to free | |
4240 | * the probe->data itself | |
4241 | */ | |
4242 | if (probe_ops->free) | |
4243 | probe_ops->free(probe_ops, probe->tr, 0, probe->data); | |
7b60f3d8 SRV |
4244 | list_del(&probe->list); |
4245 | kfree(probe); | |
4246 | } | |
4247 | mutex_unlock(&ftrace_lock); | |
4248 | } | |
4249 | ||
4250 | static void acquire_probe_locked(struct ftrace_func_probe *probe) | |
4251 | { | |
4252 | /* | |
4253 | * Add one ref to keep it from being freed when releasing the | |
4254 | * ftrace_lock mutex. | |
4255 | */ | |
4256 | probe->ref++; | |
59df055f SR |
4257 | } |
4258 | ||
59df055f | 4259 | int |
04ec7bb6 | 4260 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
7b60f3d8 SRV |
4261 | struct ftrace_probe_ops *probe_ops, |
4262 | void *data) | |
59df055f | 4263 | { |
1ec3a81a | 4264 | struct ftrace_func_entry *entry; |
7b60f3d8 | 4265 | struct ftrace_func_probe *probe; |
1ec3a81a SRV |
4266 | struct ftrace_hash **orig_hash; |
4267 | struct ftrace_hash *old_hash; | |
e1df4cb6 | 4268 | struct ftrace_hash *hash; |
59df055f | 4269 | int count = 0; |
1ec3a81a | 4270 | int size; |
e1df4cb6 | 4271 | int ret; |
1ec3a81a | 4272 | int i; |
59df055f | 4273 | |
04ec7bb6 | 4274 | if (WARN_ON(!tr)) |
59df055f SR |
4275 | return -EINVAL; |
4276 | ||
1ec3a81a SRV |
4277 | /* We do not support '!' for function probes */ |
4278 | if (WARN_ON(glob[0] == '!')) | |
59df055f | 4279 | return -EINVAL; |
59df055f | 4280 | |
7485058e | 4281 | |
7b60f3d8 SRV |
4282 | mutex_lock(&ftrace_lock); |
4283 | /* Check if the probe_ops is already registered */ | |
4284 | list_for_each_entry(probe, &tr->func_probes, list) { | |
4285 | if (probe->probe_ops == probe_ops) | |
4286 | break; | |
e1df4cb6 | 4287 | } |
7b60f3d8 SRV |
4288 | if (&probe->list == &tr->func_probes) { |
4289 | probe = kzalloc(sizeof(*probe), GFP_KERNEL); | |
4290 | if (!probe) { | |
4291 | mutex_unlock(&ftrace_lock); | |
4292 | return -ENOMEM; | |
4293 | } | |
4294 | probe->probe_ops = probe_ops; | |
4295 | probe->ops.func = function_trace_probe_call; | |
4296 | probe->tr = tr; | |
4297 | ftrace_ops_init(&probe->ops); | |
4298 | list_add(&probe->list, &tr->func_probes); | |
e1df4cb6 | 4299 | } |
59df055f | 4300 | |
7b60f3d8 | 4301 | acquire_probe_locked(probe); |
5ae0bf59 | 4302 | |
7b60f3d8 | 4303 | mutex_unlock(&ftrace_lock); |
59df055f | 4304 | |
7b60f3d8 | 4305 | mutex_lock(&probe->ops.func_hash->regex_lock); |
546fece4 | 4306 | |
7b60f3d8 | 4307 | orig_hash = &probe->ops.func_hash->filter_hash; |
1ec3a81a SRV |
4308 | old_hash = *orig_hash; |
4309 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | |
59df055f | 4310 | |
1ec3a81a | 4311 | ret = ftrace_match_records(hash, glob, strlen(glob)); |
59df055f | 4312 | |
1ec3a81a SRV |
4313 | /* Nothing found? */ |
4314 | if (!ret) | |
4315 | ret = -EINVAL; | |
59df055f | 4316 | |
1ec3a81a SRV |
4317 | if (ret < 0) |
4318 | goto out; | |
59df055f | 4319 | |
1ec3a81a SRV |
4320 | size = 1 << hash->size_bits; |
4321 | for (i = 0; i < size; i++) { | |
4322 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
4323 | if (ftrace_lookup_ip(old_hash, entry->ip)) | |
59df055f | 4324 | continue; |
1ec3a81a SRV |
4325 | /* |
4326 | * The caller might want to do something special | |
4327 | * for each function we find. We call the callback | |
4328 | * to give the caller an opportunity to do so. | |
4329 | */ | |
7b60f3d8 SRV |
4330 | if (probe_ops->init) { |
4331 | ret = probe_ops->init(probe_ops, tr, | |
6e444319 SRV |
4332 | entry->ip, data, |
4333 | &probe->data); | |
4334 | if (ret < 0) { | |
4335 | if (probe_ops->free && count) | |
4336 | probe_ops->free(probe_ops, tr, | |
4337 | 0, probe->data); | |
4338 | probe->data = NULL; | |
eee8ded1 | 4339 | goto out; |
6e444319 | 4340 | } |
59df055f | 4341 | } |
1ec3a81a | 4342 | count++; |
59df055f | 4343 | } |
1ec3a81a | 4344 | } |
59df055f | 4345 | |
1ec3a81a | 4346 | mutex_lock(&ftrace_lock); |
59df055f | 4347 | |
7b60f3d8 SRV |
4348 | if (!count) { |
4349 | /* Nothing was added? */ | |
4350 | ret = -EINVAL; | |
4351 | goto out_unlock; | |
4352 | } | |
e1df4cb6 | 4353 | |
7b60f3d8 SRV |
4354 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, |
4355 | hash, 1); | |
1ec3a81a | 4356 | if (ret < 0) |
8d70725e | 4357 | goto err_unlock; |
8252ecf3 | 4358 | |
7b60f3d8 SRV |
4359 | /* One ref for each new function traced */ |
4360 | probe->ref += count; | |
8252ecf3 | 4361 | |
7b60f3d8 SRV |
4362 | if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) |
4363 | ret = ftrace_startup(&probe->ops, 0); | |
e1df4cb6 | 4364 | |
59df055f | 4365 | out_unlock: |
5ae0bf59 | 4366 | mutex_unlock(&ftrace_lock); |
8252ecf3 | 4367 | |
3296fc4e | 4368 | if (!ret) |
1ec3a81a | 4369 | ret = count; |
5ae0bf59 | 4370 | out: |
7b60f3d8 | 4371 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
e1df4cb6 | 4372 | free_ftrace_hash(hash); |
59df055f | 4373 | |
7b60f3d8 | 4374 | release_probe(probe); |
59df055f | 4375 | |
1ec3a81a | 4376 | return ret; |
59df055f | 4377 | |
8d70725e | 4378 | err_unlock: |
7b60f3d8 | 4379 | if (!probe_ops->free || !count) |
8d70725e SRV |
4380 | goto out_unlock; |
4381 | ||
4382 | /* Failed to do the move, need to call the free functions */ | |
4383 | for (i = 0; i < size; i++) { | |
4384 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { | |
4385 | if (ftrace_lookup_ip(old_hash, entry->ip)) | |
4386 | continue; | |
6e444319 | 4387 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
8d70725e SRV |
4388 | } |
4389 | } | |
4390 | goto out_unlock; | |
59df055f SR |
4391 | } |
4392 | ||
d3d532d7 | 4393 | int |
7b60f3d8 SRV |
4394 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
4395 | struct ftrace_probe_ops *probe_ops) | |
59df055f | 4396 | { |
82cc4fc2 | 4397 | struct ftrace_ops_hash old_hash_ops; |
eee8ded1 | 4398 | struct ftrace_func_entry *entry; |
7b60f3d8 | 4399 | struct ftrace_func_probe *probe; |
3ba00929 | 4400 | struct ftrace_glob func_g; |
1ec3a81a SRV |
4401 | struct ftrace_hash **orig_hash; |
4402 | struct ftrace_hash *old_hash; | |
1ec3a81a | 4403 | struct ftrace_hash *hash = NULL; |
b67bfe0d | 4404 | struct hlist_node *tmp; |
eee8ded1 | 4405 | struct hlist_head hhd; |
59df055f | 4406 | char str[KSYM_SYMBOL_LEN]; |
7b60f3d8 SRV |
4407 | int count = 0; |
4408 | int i, ret = -ENODEV; | |
eee8ded1 | 4409 | int size; |
59df055f | 4410 | |
cbab567c | 4411 | if (!glob || !strlen(glob) || !strcmp(glob, "*")) |
3ba00929 | 4412 | func_g.search = NULL; |
cbab567c | 4413 | else { |
59df055f SR |
4414 | int not; |
4415 | ||
3ba00929 DS |
4416 | func_g.type = filter_parse_regex(glob, strlen(glob), |
4417 | &func_g.search, ¬); | |
4418 | func_g.len = strlen(func_g.search); | |
59df055f | 4419 | |
b6887d79 | 4420 | /* we do not support '!' for function probes */ |
59df055f | 4421 | if (WARN_ON(not)) |
d3d532d7 | 4422 | return -EINVAL; |
59df055f SR |
4423 | } |
4424 | ||
7b60f3d8 SRV |
4425 | mutex_lock(&ftrace_lock); |
4426 | /* Check if the probe_ops is already registered */ | |
4427 | list_for_each_entry(probe, &tr->func_probes, list) { | |
4428 | if (probe->probe_ops == probe_ops) | |
4429 | break; | |
59df055f | 4430 | } |
7b60f3d8 SRV |
4431 | if (&probe->list == &tr->func_probes) |
4432 | goto err_unlock_ftrace; | |
4433 | ||
4434 | ret = -EINVAL; | |
4435 | if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) | |
4436 | goto err_unlock_ftrace; | |
4437 | ||
4438 | acquire_probe_locked(probe); | |
4439 | ||
4440 | mutex_unlock(&ftrace_lock); | |
59df055f | 4441 | |
7b60f3d8 | 4442 | mutex_lock(&probe->ops.func_hash->regex_lock); |
1ec3a81a | 4443 | |
7b60f3d8 | 4444 | orig_hash = &probe->ops.func_hash->filter_hash; |
1ec3a81a SRV |
4445 | old_hash = *orig_hash; |
4446 | ||
1ec3a81a SRV |
4447 | if (ftrace_hash_empty(old_hash)) |
4448 | goto out_unlock; | |
e1df4cb6 | 4449 | |
82cc4fc2 SRV |
4450 | old_hash_ops.filter_hash = old_hash; |
4451 | /* Probes only have filters */ | |
4452 | old_hash_ops.notrace_hash = NULL; | |
4453 | ||
d3d532d7 | 4454 | ret = -ENOMEM; |
1ec3a81a | 4455 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
e1df4cb6 | 4456 | if (!hash) |
e1df4cb6 SRRH |
4457 | goto out_unlock; |
4458 | ||
eee8ded1 | 4459 | INIT_HLIST_HEAD(&hhd); |
59df055f | 4460 | |
eee8ded1 SRV |
4461 | size = 1 << hash->size_bits; |
4462 | for (i = 0; i < size; i++) { | |
4463 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { | |
59df055f | 4464 | |
3ba00929 | 4465 | if (func_g.search) { |
59df055f SR |
4466 | kallsyms_lookup(entry->ip, NULL, NULL, |
4467 | NULL, str); | |
3ba00929 | 4468 | if (!ftrace_match(str, &func_g)) |
59df055f SR |
4469 | continue; |
4470 | } | |
7b60f3d8 | 4471 | count++; |
eee8ded1 SRV |
4472 | remove_hash_entry(hash, entry); |
4473 | hlist_add_head(&entry->hlist, &hhd); | |
59df055f SR |
4474 | } |
4475 | } | |
d3d532d7 SRV |
4476 | |
4477 | /* Nothing found? */ | |
7b60f3d8 | 4478 | if (!count) { |
d3d532d7 SRV |
4479 | ret = -EINVAL; |
4480 | goto out_unlock; | |
4481 | } | |
4482 | ||
3f2367ba | 4483 | mutex_lock(&ftrace_lock); |
1ec3a81a | 4484 | |
7b60f3d8 | 4485 | WARN_ON(probe->ref < count); |
eee8ded1 | 4486 | |
7b60f3d8 | 4487 | probe->ref -= count; |
1ec3a81a | 4488 | |
7b60f3d8 SRV |
4489 | if (ftrace_hash_empty(hash)) |
4490 | ftrace_shutdown(&probe->ops, 0); | |
4491 | ||
4492 | ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, | |
1ec3a81a | 4493 | hash, 1); |
82cc4fc2 SRV |
4494 | |
4495 | /* still need to update the function call sites */ | |
1ec3a81a | 4496 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
7b60f3d8 | 4497 | ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, |
82cc4fc2 | 4498 | &old_hash_ops); |
7818b388 | 4499 | synchronize_sched(); |
3296fc4e | 4500 | |
eee8ded1 SRV |
4501 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
4502 | hlist_del(&entry->hlist); | |
7b60f3d8 | 4503 | if (probe_ops->free) |
6e444319 | 4504 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
eee8ded1 | 4505 | kfree(entry); |
7818b388 | 4506 | } |
3f2367ba | 4507 | mutex_unlock(&ftrace_lock); |
3ba00929 | 4508 | |
e1df4cb6 | 4509 | out_unlock: |
7b60f3d8 | 4510 | mutex_unlock(&probe->ops.func_hash->regex_lock); |
e1df4cb6 | 4511 | free_ftrace_hash(hash); |
59df055f | 4512 | |
7b60f3d8 | 4513 | release_probe(probe); |
59df055f | 4514 | |
7b60f3d8 | 4515 | return ret; |
59df055f | 4516 | |
7b60f3d8 SRV |
4517 | err_unlock_ftrace: |
4518 | mutex_unlock(&ftrace_lock); | |
d3d532d7 | 4519 | return ret; |
59df055f SR |
4520 | } |
4521 | ||
a0e6369e NR |
4522 | void clear_ftrace_function_probes(struct trace_array *tr) |
4523 | { | |
4524 | struct ftrace_func_probe *probe, *n; | |
4525 | ||
4526 | list_for_each_entry_safe(probe, n, &tr->func_probes, list) | |
4527 | unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); | |
4528 | } | |
4529 | ||
f6180773 SR |
4530 | static LIST_HEAD(ftrace_commands); |
4531 | static DEFINE_MUTEX(ftrace_cmd_mutex); | |
4532 | ||
38de93ab TZ |
4533 | /* |
4534 | * Currently we only register ftrace commands from __init, so mark this | |
4535 | * __init too. | |
4536 | */ | |
4537 | __init int register_ftrace_command(struct ftrace_func_command *cmd) | |
f6180773 SR |
4538 | { |
4539 | struct ftrace_func_command *p; | |
4540 | int ret = 0; | |
4541 | ||
4542 | mutex_lock(&ftrace_cmd_mutex); | |
4543 | list_for_each_entry(p, &ftrace_commands, list) { | |
4544 | if (strcmp(cmd->name, p->name) == 0) { | |
4545 | ret = -EBUSY; | |
4546 | goto out_unlock; | |
4547 | } | |
4548 | } | |
4549 | list_add(&cmd->list, &ftrace_commands); | |
4550 | out_unlock: | |
4551 | mutex_unlock(&ftrace_cmd_mutex); | |
4552 | ||
4553 | return ret; | |
4554 | } | |
4555 | ||
38de93ab TZ |
4556 | /* |
4557 | * Currently we only unregister ftrace commands from __init, so mark | |
4558 | * this __init too. | |
4559 | */ | |
4560 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | |
f6180773 SR |
4561 | { |
4562 | struct ftrace_func_command *p, *n; | |
4563 | int ret = -ENODEV; | |
4564 | ||
4565 | mutex_lock(&ftrace_cmd_mutex); | |
4566 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | |
4567 | if (strcmp(cmd->name, p->name) == 0) { | |
4568 | ret = 0; | |
4569 | list_del_init(&p->list); | |
4570 | goto out_unlock; | |
4571 | } | |
4572 | } | |
4573 | out_unlock: | |
4574 | mutex_unlock(&ftrace_cmd_mutex); | |
4575 | ||
4576 | return ret; | |
4577 | } | |
4578 | ||
04ec7bb6 | 4579 | static int ftrace_process_regex(struct ftrace_iterator *iter, |
33dc9b12 | 4580 | char *buff, int len, int enable) |
64e7c440 | 4581 | { |
04ec7bb6 | 4582 | struct ftrace_hash *hash = iter->hash; |
d2afd57a | 4583 | struct trace_array *tr = iter->ops->private; |
f6180773 | 4584 | char *func, *command, *next = buff; |
6a24a244 | 4585 | struct ftrace_func_command *p; |
0aff1c0c | 4586 | int ret = -EINVAL; |
64e7c440 SR |
4587 | |
4588 | func = strsep(&next, ":"); | |
4589 | ||
4590 | if (!next) { | |
1cf41dd7 | 4591 | ret = ftrace_match_records(hash, func, len); |
b448c4e3 SR |
4592 | if (!ret) |
4593 | ret = -EINVAL; | |
4594 | if (ret < 0) | |
4595 | return ret; | |
4596 | return 0; | |
64e7c440 SR |
4597 | } |
4598 | ||
f6180773 | 4599 | /* command found */ |
64e7c440 SR |
4600 | |
4601 | command = strsep(&next, ":"); | |
4602 | ||
f6180773 SR |
4603 | mutex_lock(&ftrace_cmd_mutex); |
4604 | list_for_each_entry(p, &ftrace_commands, list) { | |
4605 | if (strcmp(p->name, command) == 0) { | |
04ec7bb6 | 4606 | ret = p->func(tr, hash, func, command, next, enable); |
f6180773 SR |
4607 | goto out_unlock; |
4608 | } | |
64e7c440 | 4609 | } |
f6180773 SR |
4610 | out_unlock: |
4611 | mutex_unlock(&ftrace_cmd_mutex); | |
64e7c440 | 4612 | |
f6180773 | 4613 | return ret; |
64e7c440 SR |
4614 | } |
4615 | ||
e309b41d | 4616 | static ssize_t |
41c52c0d SR |
4617 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
4618 | size_t cnt, loff_t *ppos, int enable) | |
5072c59f SR |
4619 | { |
4620 | struct ftrace_iterator *iter; | |
689fd8b6 | 4621 | struct trace_parser *parser; |
4622 | ssize_t ret, read; | |
5072c59f | 4623 | |
4ba7978e | 4624 | if (!cnt) |
5072c59f SR |
4625 | return 0; |
4626 | ||
5072c59f SR |
4627 | if (file->f_mode & FMODE_READ) { |
4628 | struct seq_file *m = file->private_data; | |
4629 | iter = m->private; | |
4630 | } else | |
4631 | iter = file->private_data; | |
4632 | ||
f04f24fb | 4633 | if (unlikely(ftrace_disabled)) |
3f2367ba MH |
4634 | return -ENODEV; |
4635 | ||
4636 | /* iter->hash is a local copy, so we don't need regex_lock */ | |
f04f24fb | 4637 | |
689fd8b6 | 4638 | parser = &iter->parser; |
4639 | read = trace_get_user(parser, ubuf, cnt, ppos); | |
5072c59f | 4640 | |
4ba7978e | 4641 | if (read >= 0 && trace_parser_loaded(parser) && |
689fd8b6 | 4642 | !trace_parser_cont(parser)) { |
04ec7bb6 | 4643 | ret = ftrace_process_regex(iter, parser->buffer, |
689fd8b6 | 4644 | parser->idx, enable); |
313254a9 | 4645 | trace_parser_clear(parser); |
7c088b51 | 4646 | if (ret < 0) |
3f2367ba | 4647 | goto out; |
eda1e328 | 4648 | } |
5072c59f | 4649 | |
5072c59f | 4650 | ret = read; |
3f2367ba | 4651 | out: |
5072c59f SR |
4652 | return ret; |
4653 | } | |
4654 | ||
fc13cb0c | 4655 | ssize_t |
41c52c0d SR |
4656 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
4657 | size_t cnt, loff_t *ppos) | |
4658 | { | |
4659 | return ftrace_regex_write(file, ubuf, cnt, ppos, 1); | |
4660 | } | |
4661 | ||
fc13cb0c | 4662 | ssize_t |
41c52c0d SR |
4663 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
4664 | size_t cnt, loff_t *ppos) | |
4665 | { | |
4666 | return ftrace_regex_write(file, ubuf, cnt, ppos, 0); | |
4667 | } | |
4668 | ||
33dc9b12 | 4669 | static int |
647664ea MH |
4670 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
4671 | { | |
4672 | struct ftrace_func_entry *entry; | |
4673 | ||
4674 | if (!ftrace_location(ip)) | |
4675 | return -EINVAL; | |
4676 | ||
4677 | if (remove) { | |
4678 | entry = ftrace_lookup_ip(hash, ip); | |
4679 | if (!entry) | |
4680 | return -ENOENT; | |
4681 | free_hash_entry(hash, entry); | |
4682 | return 0; | |
4683 | } | |
4684 | ||
4685 | return add_hash_entry(hash, ip); | |
4686 | } | |
4687 | ||
4688 | static int | |
4689 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |
4690 | unsigned long ip, int remove, int reset, int enable) | |
41c52c0d | 4691 | { |
33dc9b12 | 4692 | struct ftrace_hash **orig_hash; |
f45948e8 | 4693 | struct ftrace_hash *hash; |
33dc9b12 | 4694 | int ret; |
f45948e8 | 4695 | |
41c52c0d | 4696 | if (unlikely(ftrace_disabled)) |
33dc9b12 | 4697 | return -ENODEV; |
41c52c0d | 4698 | |
33b7f99c | 4699 | mutex_lock(&ops->func_hash->regex_lock); |
3f2367ba | 4700 | |
f45948e8 | 4701 | if (enable) |
33b7f99c | 4702 | orig_hash = &ops->func_hash->filter_hash; |
f45948e8 | 4703 | else |
33b7f99c | 4704 | orig_hash = &ops->func_hash->notrace_hash; |
33dc9b12 | 4705 | |
b972cc58 WN |
4706 | if (reset) |
4707 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); | |
4708 | else | |
4709 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | |
4710 | ||
3f2367ba MH |
4711 | if (!hash) { |
4712 | ret = -ENOMEM; | |
4713 | goto out_regex_unlock; | |
4714 | } | |
f45948e8 | 4715 | |
ac483c44 JO |
4716 | if (buf && !ftrace_match_records(hash, buf, len)) { |
4717 | ret = -EINVAL; | |
4718 | goto out_regex_unlock; | |
4719 | } | |
647664ea MH |
4720 | if (ip) { |
4721 | ret = ftrace_match_addr(hash, ip, remove); | |
4722 | if (ret < 0) | |
4723 | goto out_regex_unlock; | |
4724 | } | |
33dc9b12 SR |
4725 | |
4726 | mutex_lock(&ftrace_lock); | |
e16b35dd | 4727 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); |
33dc9b12 SR |
4728 | mutex_unlock(&ftrace_lock); |
4729 | ||
ac483c44 | 4730 | out_regex_unlock: |
33b7f99c | 4731 | mutex_unlock(&ops->func_hash->regex_lock); |
33dc9b12 SR |
4732 | |
4733 | free_ftrace_hash(hash); | |
4734 | return ret; | |
41c52c0d SR |
4735 | } |
4736 | ||
647664ea MH |
4737 | static int |
4738 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, | |
4739 | int reset, int enable) | |
4740 | { | |
4741 | return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); | |
4742 | } | |
4743 | ||
4744 | /** | |
4745 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address | |
4746 | * @ops - the ops to set the filter with | |
4747 | * @ip - the address to add to or remove from the filter. | |
4748 | * @remove - non zero to remove the ip from the filter | |
4749 | * @reset - non zero to reset all filters before applying this filter. | |
4750 | * | |
4751 | * Filters denote which functions should be enabled when tracing is enabled | |
4752 | * If @ip is NULL, it failes to update filter. | |
4753 | */ | |
4754 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, | |
4755 | int remove, int reset) | |
4756 | { | |
f04f24fb | 4757 | ftrace_ops_init(ops); |
647664ea MH |
4758 | return ftrace_set_addr(ops, ip, remove, reset, 1); |
4759 | } | |
4760 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); | |
4761 | ||
d032ae89 JF |
4762 | /** |
4763 | * ftrace_ops_set_global_filter - setup ops to use global filters | |
4764 | * @ops - the ops which will use the global filters | |
4765 | * | |
4766 | * ftrace users who need global function trace filtering should call this. | |
4767 | * It can set the global filter only if ops were not initialized before. | |
4768 | */ | |
4769 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops) | |
4770 | { | |
4771 | if (ops->flags & FTRACE_OPS_FL_INITIALIZED) | |
4772 | return; | |
4773 | ||
4774 | ftrace_ops_init(ops); | |
4775 | ops->func_hash = &global_ops.local_hash; | |
4776 | } | |
4777 | EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); | |
4778 | ||
647664ea MH |
4779 | static int |
4780 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, | |
4781 | int reset, int enable) | |
4782 | { | |
4783 | return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); | |
4784 | } | |
4785 | ||
77a2b37d SR |
4786 | /** |
4787 | * ftrace_set_filter - set a function to filter on in ftrace | |
936e074b SR |
4788 | * @ops - the ops to set the filter with |
4789 | * @buf - the string that holds the function filter text. | |
4790 | * @len - the length of the string. | |
4791 | * @reset - non zero to reset all filters before applying this filter. | |
4792 | * | |
4793 | * Filters denote which functions should be enabled when tracing is enabled. | |
4794 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
4795 | */ | |
ac483c44 | 4796 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
4797 | int len, int reset) |
4798 | { | |
f04f24fb | 4799 | ftrace_ops_init(ops); |
ac483c44 | 4800 | return ftrace_set_regex(ops, buf, len, reset, 1); |
936e074b SR |
4801 | } |
4802 | EXPORT_SYMBOL_GPL(ftrace_set_filter); | |
4803 | ||
4804 | /** | |
4805 | * ftrace_set_notrace - set a function to not trace in ftrace | |
4806 | * @ops - the ops to set the notrace filter with | |
4807 | * @buf - the string that holds the function notrace text. | |
4808 | * @len - the length of the string. | |
4809 | * @reset - non zero to reset all filters before applying this filter. | |
4810 | * | |
4811 | * Notrace Filters denote which functions should not be enabled when tracing | |
4812 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
4813 | * for tracing. | |
4814 | */ | |
ac483c44 | 4815 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
936e074b SR |
4816 | int len, int reset) |
4817 | { | |
f04f24fb | 4818 | ftrace_ops_init(ops); |
ac483c44 | 4819 | return ftrace_set_regex(ops, buf, len, reset, 0); |
936e074b SR |
4820 | } |
4821 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); | |
4822 | /** | |
8d1b065d | 4823 | * ftrace_set_global_filter - set a function to filter on with global tracers |
77a2b37d SR |
4824 | * @buf - the string that holds the function filter text. |
4825 | * @len - the length of the string. | |
4826 | * @reset - non zero to reset all filters before applying this filter. | |
4827 | * | |
4828 | * Filters denote which functions should be enabled when tracing is enabled. | |
4829 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. | |
4830 | */ | |
936e074b | 4831 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) |
77a2b37d | 4832 | { |
f45948e8 | 4833 | ftrace_set_regex(&global_ops, buf, len, reset, 1); |
41c52c0d | 4834 | } |
936e074b | 4835 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
4eebcc81 | 4836 | |
41c52c0d | 4837 | /** |
8d1b065d | 4838 | * ftrace_set_global_notrace - set a function to not trace with global tracers |
41c52c0d SR |
4839 | * @buf - the string that holds the function notrace text. |
4840 | * @len - the length of the string. | |
4841 | * @reset - non zero to reset all filters before applying this filter. | |
4842 | * | |
4843 | * Notrace Filters denote which functions should not be enabled when tracing | |
4844 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled | |
4845 | * for tracing. | |
4846 | */ | |
936e074b | 4847 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) |
41c52c0d | 4848 | { |
f45948e8 | 4849 | ftrace_set_regex(&global_ops, buf, len, reset, 0); |
77a2b37d | 4850 | } |
936e074b | 4851 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); |
77a2b37d | 4852 | |
2af15d6a SR |
4853 | /* |
4854 | * command line interface to allow users to set filters on boot up. | |
4855 | */ | |
4856 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE | |
4857 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | |
4858 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | |
4859 | ||
f1ed7c74 SRRH |
4860 | /* Used by function selftest to not test if filter is set */ |
4861 | bool ftrace_filter_param __initdata; | |
4862 | ||
2af15d6a SR |
4863 | static int __init set_ftrace_notrace(char *str) |
4864 | { | |
f1ed7c74 | 4865 | ftrace_filter_param = true; |
75761cc1 | 4866 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
2af15d6a SR |
4867 | return 1; |
4868 | } | |
4869 | __setup("ftrace_notrace=", set_ftrace_notrace); | |
4870 | ||
4871 | static int __init set_ftrace_filter(char *str) | |
4872 | { | |
f1ed7c74 | 4873 | ftrace_filter_param = true; |
75761cc1 | 4874 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
2af15d6a SR |
4875 | return 1; |
4876 | } | |
4877 | __setup("ftrace_filter=", set_ftrace_filter); | |
4878 | ||
369bc18f | 4879 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
f6060f46 | 4880 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
0d7d9a16 | 4881 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
b9b0c831 | 4882 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
801c29fd | 4883 | |
369bc18f SA |
4884 | static int __init set_graph_function(char *str) |
4885 | { | |
06f43d66 | 4886 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
369bc18f SA |
4887 | return 1; |
4888 | } | |
4889 | __setup("ftrace_graph_filter=", set_graph_function); | |
4890 | ||
0d7d9a16 NK |
4891 | static int __init set_graph_notrace_function(char *str) |
4892 | { | |
4893 | strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); | |
4894 | return 1; | |
4895 | } | |
4896 | __setup("ftrace_graph_notrace=", set_graph_notrace_function); | |
4897 | ||
65a50c65 TB |
4898 | static int __init set_graph_max_depth_function(char *str) |
4899 | { | |
4900 | if (!str) | |
4901 | return 0; | |
4902 | fgraph_max_depth = simple_strtoul(str, NULL, 0); | |
4903 | return 1; | |
4904 | } | |
4905 | __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); | |
0d7d9a16 NK |
4906 | |
4907 | static void __init set_ftrace_early_graph(char *buf, int enable) | |
369bc18f SA |
4908 | { |
4909 | int ret; | |
4910 | char *func; | |
b9b0c831 | 4911 | struct ftrace_hash *hash; |
0d7d9a16 | 4912 | |
92ad18ec SRV |
4913 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
4914 | if (WARN_ON(!hash)) | |
4915 | return; | |
369bc18f SA |
4916 | |
4917 | while (buf) { | |
4918 | func = strsep(&buf, ","); | |
4919 | /* we allow only one expression at a time */ | |
b9b0c831 | 4920 | ret = ftrace_graph_set_hash(hash, func); |
369bc18f SA |
4921 | if (ret) |
4922 | printk(KERN_DEBUG "ftrace: function %s not " | |
4923 | "traceable\n", func); | |
4924 | } | |
92ad18ec SRV |
4925 | |
4926 | if (enable) | |
4927 | ftrace_graph_hash = hash; | |
4928 | else | |
4929 | ftrace_graph_notrace_hash = hash; | |
369bc18f SA |
4930 | } |
4931 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | |
4932 | ||
2a85a37f SR |
4933 | void __init |
4934 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) | |
2af15d6a SR |
4935 | { |
4936 | char *func; | |
4937 | ||
f04f24fb MH |
4938 | ftrace_ops_init(ops); |
4939 | ||
2af15d6a SR |
4940 | while (buf) { |
4941 | func = strsep(&buf, ","); | |
f45948e8 | 4942 | ftrace_set_regex(ops, func, strlen(func), 0, enable); |
2af15d6a SR |
4943 | } |
4944 | } | |
4945 | ||
4946 | static void __init set_ftrace_early_filters(void) | |
4947 | { | |
4948 | if (ftrace_filter_buf[0]) | |
2a85a37f | 4949 | ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); |
2af15d6a | 4950 | if (ftrace_notrace_buf[0]) |
2a85a37f | 4951 | ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); |
369bc18f SA |
4952 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
4953 | if (ftrace_graph_buf[0]) | |
0d7d9a16 NK |
4954 | set_ftrace_early_graph(ftrace_graph_buf, 1); |
4955 | if (ftrace_graph_notrace_buf[0]) | |
4956 | set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); | |
369bc18f | 4957 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
2af15d6a SR |
4958 | } |
4959 | ||
fc13cb0c | 4960 | int ftrace_regex_release(struct inode *inode, struct file *file) |
5072c59f SR |
4961 | { |
4962 | struct seq_file *m = (struct seq_file *)file->private_data; | |
4963 | struct ftrace_iterator *iter; | |
33dc9b12 | 4964 | struct ftrace_hash **orig_hash; |
689fd8b6 | 4965 | struct trace_parser *parser; |
ed926f9b | 4966 | int filter_hash; |
33dc9b12 | 4967 | int ret; |
5072c59f | 4968 | |
5072c59f SR |
4969 | if (file->f_mode & FMODE_READ) { |
4970 | iter = m->private; | |
5072c59f SR |
4971 | seq_release(inode, file); |
4972 | } else | |
4973 | iter = file->private_data; | |
4974 | ||
689fd8b6 | 4975 | parser = &iter->parser; |
4976 | if (trace_parser_loaded(parser)) { | |
1cf41dd7 | 4977 | ftrace_match_records(iter->hash, parser->buffer, parser->idx); |
5072c59f SR |
4978 | } |
4979 | ||
689fd8b6 | 4980 | trace_parser_put(parser); |
689fd8b6 | 4981 | |
33b7f99c | 4982 | mutex_lock(&iter->ops->func_hash->regex_lock); |
3f2367ba | 4983 | |
058e297d | 4984 | if (file->f_mode & FMODE_WRITE) { |
ed926f9b SR |
4985 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
4986 | ||
8c08f0d5 | 4987 | if (filter_hash) { |
33b7f99c | 4988 | orig_hash = &iter->ops->func_hash->filter_hash; |
69d71879 | 4989 | if (iter->tr && !list_empty(&iter->tr->mod_trace)) |
8c08f0d5 SRV |
4990 | iter->hash->flags |= FTRACE_HASH_FL_MOD; |
4991 | } else | |
33b7f99c | 4992 | orig_hash = &iter->ops->func_hash->notrace_hash; |
33dc9b12 | 4993 | |
058e297d | 4994 | mutex_lock(&ftrace_lock); |
e16b35dd SRV |
4995 | ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, |
4996 | iter->hash, filter_hash); | |
058e297d | 4997 | mutex_unlock(&ftrace_lock); |
c20489da SRV |
4998 | } else { |
4999 | /* For read only, the hash is the ops hash */ | |
5000 | iter->hash = NULL; | |
058e297d | 5001 | } |
3f2367ba | 5002 | |
33b7f99c | 5003 | mutex_unlock(&iter->ops->func_hash->regex_lock); |
33dc9b12 SR |
5004 | free_ftrace_hash(iter->hash); |
5005 | kfree(iter); | |
058e297d | 5006 | |
5072c59f SR |
5007 | return 0; |
5008 | } | |
5009 | ||
5e2336a0 | 5010 | static const struct file_operations ftrace_avail_fops = { |
5072c59f SR |
5011 | .open = ftrace_avail_open, |
5012 | .read = seq_read, | |
5013 | .llseek = seq_lseek, | |
3be04b47 | 5014 | .release = seq_release_private, |
5072c59f SR |
5015 | }; |
5016 | ||
647bcd03 SR |
5017 | static const struct file_operations ftrace_enabled_fops = { |
5018 | .open = ftrace_enabled_open, | |
5019 | .read = seq_read, | |
5020 | .llseek = seq_lseek, | |
5021 | .release = seq_release_private, | |
5022 | }; | |
5023 | ||
5e2336a0 | 5024 | static const struct file_operations ftrace_filter_fops = { |
5072c59f | 5025 | .open = ftrace_filter_open, |
850a80cf | 5026 | .read = seq_read, |
5072c59f | 5027 | .write = ftrace_filter_write, |
098c879e | 5028 | .llseek = tracing_lseek, |
1cf41dd7 | 5029 | .release = ftrace_regex_release, |
5072c59f SR |
5030 | }; |
5031 | ||
5e2336a0 | 5032 | static const struct file_operations ftrace_notrace_fops = { |
41c52c0d | 5033 | .open = ftrace_notrace_open, |
850a80cf | 5034 | .read = seq_read, |
41c52c0d | 5035 | .write = ftrace_notrace_write, |
098c879e | 5036 | .llseek = tracing_lseek, |
1cf41dd7 | 5037 | .release = ftrace_regex_release, |
41c52c0d SR |
5038 | }; |
5039 | ||
ea4e2bc4 SR |
5040 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5041 | ||
5042 | static DEFINE_MUTEX(graph_lock); | |
5043 | ||
b9b0c831 NK |
5044 | struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; |
5045 | struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; | |
5046 | ||
5047 | enum graph_filter_type { | |
5048 | GRAPH_FILTER_NOTRACE = 0, | |
5049 | GRAPH_FILTER_FUNCTION, | |
5050 | }; | |
ea4e2bc4 | 5051 | |
555fc781 SRV |
5052 | #define FTRACE_GRAPH_EMPTY ((void *)1) |
5053 | ||
faf982a6 | 5054 | struct ftrace_graph_data { |
e704eff3 SRV |
5055 | struct ftrace_hash *hash; |
5056 | struct ftrace_func_entry *entry; | |
5057 | int idx; /* for hash table iteration */ | |
5058 | enum graph_filter_type type; | |
5059 | struct ftrace_hash *new_hash; | |
5060 | const struct seq_operations *seq_ops; | |
5061 | struct trace_parser parser; | |
faf982a6 NK |
5062 | }; |
5063 | ||
ea4e2bc4 | 5064 | static void * |
85951842 | 5065 | __g_next(struct seq_file *m, loff_t *pos) |
ea4e2bc4 | 5066 | { |
faf982a6 | 5067 | struct ftrace_graph_data *fgd = m->private; |
b9b0c831 NK |
5068 | struct ftrace_func_entry *entry = fgd->entry; |
5069 | struct hlist_head *head; | |
5070 | int i, idx = fgd->idx; | |
faf982a6 | 5071 | |
b9b0c831 | 5072 | if (*pos >= fgd->hash->count) |
ea4e2bc4 | 5073 | return NULL; |
b9b0c831 NK |
5074 | |
5075 | if (entry) { | |
5076 | hlist_for_each_entry_continue(entry, hlist) { | |
5077 | fgd->entry = entry; | |
5078 | return entry; | |
5079 | } | |
5080 | ||
5081 | idx++; | |
5082 | } | |
5083 | ||
5084 | for (i = idx; i < 1 << fgd->hash->size_bits; i++) { | |
5085 | head = &fgd->hash->buckets[i]; | |
5086 | hlist_for_each_entry(entry, head, hlist) { | |
5087 | fgd->entry = entry; | |
5088 | fgd->idx = i; | |
5089 | return entry; | |
5090 | } | |
5091 | } | |
5092 | return NULL; | |
85951842 | 5093 | } |
ea4e2bc4 | 5094 | |
85951842 LZ |
5095 | static void * |
5096 | g_next(struct seq_file *m, void *v, loff_t *pos) | |
5097 | { | |
5098 | (*pos)++; | |
5099 | return __g_next(m, pos); | |
ea4e2bc4 SR |
5100 | } |
5101 | ||
5102 | static void *g_start(struct seq_file *m, loff_t *pos) | |
5103 | { | |
faf982a6 NK |
5104 | struct ftrace_graph_data *fgd = m->private; |
5105 | ||
ea4e2bc4 SR |
5106 | mutex_lock(&graph_lock); |
5107 | ||
649b988b SRV |
5108 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
5109 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, | |
5110 | lockdep_is_held(&graph_lock)); | |
5111 | else | |
5112 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | |
5113 | lockdep_is_held(&graph_lock)); | |
5114 | ||
f9349a8f | 5115 | /* Nothing, tell g_show to print all functions are enabled */ |
b9b0c831 | 5116 | if (ftrace_hash_empty(fgd->hash) && !*pos) |
555fc781 | 5117 | return FTRACE_GRAPH_EMPTY; |
f9349a8f | 5118 | |
b9b0c831 NK |
5119 | fgd->idx = 0; |
5120 | fgd->entry = NULL; | |
85951842 | 5121 | return __g_next(m, pos); |
ea4e2bc4 SR |
5122 | } |
5123 | ||
5124 | static void g_stop(struct seq_file *m, void *p) | |
5125 | { | |
5126 | mutex_unlock(&graph_lock); | |
5127 | } | |
5128 | ||
5129 | static int g_show(struct seq_file *m, void *v) | |
5130 | { | |
b9b0c831 | 5131 | struct ftrace_func_entry *entry = v; |
ea4e2bc4 | 5132 | |
b9b0c831 | 5133 | if (!entry) |
ea4e2bc4 SR |
5134 | return 0; |
5135 | ||
555fc781 | 5136 | if (entry == FTRACE_GRAPH_EMPTY) { |
280d1429 NK |
5137 | struct ftrace_graph_data *fgd = m->private; |
5138 | ||
b9b0c831 | 5139 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
fa6f0cc7 | 5140 | seq_puts(m, "#### all functions enabled ####\n"); |
280d1429 | 5141 | else |
fa6f0cc7 | 5142 | seq_puts(m, "#### no functions disabled ####\n"); |
f9349a8f FW |
5143 | return 0; |
5144 | } | |
5145 | ||
b9b0c831 | 5146 | seq_printf(m, "%ps\n", (void *)entry->ip); |
ea4e2bc4 SR |
5147 | |
5148 | return 0; | |
5149 | } | |
5150 | ||
88e9d34c | 5151 | static const struct seq_operations ftrace_graph_seq_ops = { |
ea4e2bc4 SR |
5152 | .start = g_start, |
5153 | .next = g_next, | |
5154 | .stop = g_stop, | |
5155 | .show = g_show, | |
5156 | }; | |
5157 | ||
5158 | static int | |
faf982a6 NK |
5159 | __ftrace_graph_open(struct inode *inode, struct file *file, |
5160 | struct ftrace_graph_data *fgd) | |
ea4e2bc4 SR |
5161 | { |
5162 | int ret = 0; | |
b9b0c831 | 5163 | struct ftrace_hash *new_hash = NULL; |
ea4e2bc4 | 5164 | |
b9b0c831 NK |
5165 | if (file->f_mode & FMODE_WRITE) { |
5166 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; | |
5167 | ||
e704eff3 SRV |
5168 | if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) |
5169 | return -ENOMEM; | |
5170 | ||
b9b0c831 NK |
5171 | if (file->f_flags & O_TRUNC) |
5172 | new_hash = alloc_ftrace_hash(size_bits); | |
5173 | else | |
5174 | new_hash = alloc_and_copy_ftrace_hash(size_bits, | |
5175 | fgd->hash); | |
5176 | if (!new_hash) { | |
5177 | ret = -ENOMEM; | |
5178 | goto out; | |
5179 | } | |
ea4e2bc4 SR |
5180 | } |
5181 | ||
faf982a6 | 5182 | if (file->f_mode & FMODE_READ) { |
b9b0c831 | 5183 | ret = seq_open(file, &ftrace_graph_seq_ops); |
faf982a6 NK |
5184 | if (!ret) { |
5185 | struct seq_file *m = file->private_data; | |
5186 | m->private = fgd; | |
b9b0c831 NK |
5187 | } else { |
5188 | /* Failed */ | |
5189 | free_ftrace_hash(new_hash); | |
5190 | new_hash = NULL; | |
faf982a6 NK |
5191 | } |
5192 | } else | |
5193 | file->private_data = fgd; | |
ea4e2bc4 | 5194 | |
b9b0c831 | 5195 | out: |
e704eff3 SRV |
5196 | if (ret < 0 && file->f_mode & FMODE_WRITE) |
5197 | trace_parser_put(&fgd->parser); | |
5198 | ||
b9b0c831 | 5199 | fgd->new_hash = new_hash; |
649b988b SRV |
5200 | |
5201 | /* | |
5202 | * All uses of fgd->hash must be taken with the graph_lock | |
5203 | * held. The graph_lock is going to be released, so force | |
5204 | * fgd->hash to be reinitialized when it is taken again. | |
5205 | */ | |
5206 | fgd->hash = NULL; | |
5207 | ||
ea4e2bc4 SR |
5208 | return ret; |
5209 | } | |
5210 | ||
faf982a6 NK |
5211 | static int |
5212 | ftrace_graph_open(struct inode *inode, struct file *file) | |
5213 | { | |
5214 | struct ftrace_graph_data *fgd; | |
b9b0c831 | 5215 | int ret; |
faf982a6 NK |
5216 | |
5217 | if (unlikely(ftrace_disabled)) | |
5218 | return -ENODEV; | |
5219 | ||
5220 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
5221 | if (fgd == NULL) | |
5222 | return -ENOMEM; | |
5223 | ||
b9b0c831 NK |
5224 | mutex_lock(&graph_lock); |
5225 | ||
649b988b SRV |
5226 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
5227 | lockdep_is_held(&graph_lock)); | |
b9b0c831 | 5228 | fgd->type = GRAPH_FILTER_FUNCTION; |
faf982a6 NK |
5229 | fgd->seq_ops = &ftrace_graph_seq_ops; |
5230 | ||
b9b0c831 NK |
5231 | ret = __ftrace_graph_open(inode, file, fgd); |
5232 | if (ret < 0) | |
5233 | kfree(fgd); | |
5234 | ||
5235 | mutex_unlock(&graph_lock); | |
5236 | return ret; | |
faf982a6 NK |
5237 | } |
5238 | ||
29ad23b0 NK |
5239 | static int |
5240 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | |
5241 | { | |
5242 | struct ftrace_graph_data *fgd; | |
b9b0c831 | 5243 | int ret; |
29ad23b0 NK |
5244 | |
5245 | if (unlikely(ftrace_disabled)) | |
5246 | return -ENODEV; | |
5247 | ||
5248 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | |
5249 | if (fgd == NULL) | |
5250 | return -ENOMEM; | |
5251 | ||
b9b0c831 NK |
5252 | mutex_lock(&graph_lock); |
5253 | ||
649b988b SRV |
5254 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
5255 | lockdep_is_held(&graph_lock)); | |
b9b0c831 | 5256 | fgd->type = GRAPH_FILTER_NOTRACE; |
29ad23b0 NK |
5257 | fgd->seq_ops = &ftrace_graph_seq_ops; |
5258 | ||
b9b0c831 NK |
5259 | ret = __ftrace_graph_open(inode, file, fgd); |
5260 | if (ret < 0) | |
5261 | kfree(fgd); | |
5262 | ||
5263 | mutex_unlock(&graph_lock); | |
5264 | return ret; | |
29ad23b0 NK |
5265 | } |
5266 | ||
87827111 LZ |
5267 | static int |
5268 | ftrace_graph_release(struct inode *inode, struct file *file) | |
5269 | { | |
b9b0c831 | 5270 | struct ftrace_graph_data *fgd; |
e704eff3 SRV |
5271 | struct ftrace_hash *old_hash, *new_hash; |
5272 | struct trace_parser *parser; | |
5273 | int ret = 0; | |
b9b0c831 | 5274 | |
faf982a6 NK |
5275 | if (file->f_mode & FMODE_READ) { |
5276 | struct seq_file *m = file->private_data; | |
5277 | ||
b9b0c831 | 5278 | fgd = m->private; |
87827111 | 5279 | seq_release(inode, file); |
faf982a6 | 5280 | } else { |
b9b0c831 | 5281 | fgd = file->private_data; |
faf982a6 NK |
5282 | } |
5283 | ||
e704eff3 SRV |
5284 | |
5285 | if (file->f_mode & FMODE_WRITE) { | |
5286 | ||
5287 | parser = &fgd->parser; | |
5288 | ||
5289 | if (trace_parser_loaded((parser))) { | |
e704eff3 SRV |
5290 | ret = ftrace_graph_set_hash(fgd->new_hash, |
5291 | parser->buffer); | |
5292 | } | |
5293 | ||
5294 | trace_parser_put(parser); | |
5295 | ||
5296 | new_hash = __ftrace_hash_move(fgd->new_hash); | |
5297 | if (!new_hash) { | |
5298 | ret = -ENOMEM; | |
5299 | goto out; | |
5300 | } | |
5301 | ||
5302 | mutex_lock(&graph_lock); | |
5303 | ||
5304 | if (fgd->type == GRAPH_FILTER_FUNCTION) { | |
5305 | old_hash = rcu_dereference_protected(ftrace_graph_hash, | |
5306 | lockdep_is_held(&graph_lock)); | |
5307 | rcu_assign_pointer(ftrace_graph_hash, new_hash); | |
5308 | } else { | |
5309 | old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, | |
5310 | lockdep_is_held(&graph_lock)); | |
5311 | rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); | |
5312 | } | |
5313 | ||
5314 | mutex_unlock(&graph_lock); | |
5315 | ||
5316 | /* Wait till all users are no longer using the old hash */ | |
5317 | synchronize_sched(); | |
5318 | ||
5319 | free_ftrace_hash(old_hash); | |
5320 | } | |
5321 | ||
5322 | out: | |
f9797c2f | 5323 | free_ftrace_hash(fgd->new_hash); |
b9b0c831 NK |
5324 | kfree(fgd); |
5325 | ||
e704eff3 | 5326 | return ret; |
87827111 LZ |
5327 | } |
5328 | ||
ea4e2bc4 | 5329 | static int |
b9b0c831 | 5330 | ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) |
ea4e2bc4 | 5331 | { |
3ba00929 | 5332 | struct ftrace_glob func_g; |
ea4e2bc4 SR |
5333 | struct dyn_ftrace *rec; |
5334 | struct ftrace_page *pg; | |
b9b0c831 | 5335 | struct ftrace_func_entry *entry; |
c7c6b1fe | 5336 | int fail = 1; |
3ba00929 | 5337 | int not; |
ea4e2bc4 | 5338 | |
f9349a8f | 5339 | /* decode regex */ |
3ba00929 DS |
5340 | func_g.type = filter_parse_regex(buffer, strlen(buffer), |
5341 | &func_g.search, ¬); | |
f9349a8f | 5342 | |
3ba00929 | 5343 | func_g.len = strlen(func_g.search); |
f9349a8f | 5344 | |
52baf119 | 5345 | mutex_lock(&ftrace_lock); |
45a4a237 SR |
5346 | |
5347 | if (unlikely(ftrace_disabled)) { | |
5348 | mutex_unlock(&ftrace_lock); | |
5349 | return -ENODEV; | |
5350 | } | |
5351 | ||
265c831c SR |
5352 | do_for_each_ftrace_rec(pg, rec) { |
5353 | ||
546fece4 SRRH |
5354 | if (rec->flags & FTRACE_FL_DISABLED) |
5355 | continue; | |
5356 | ||
0b507e1e | 5357 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { |
b9b0c831 | 5358 | entry = ftrace_lookup_ip(hash, rec->ip); |
c7c6b1fe LZ |
5359 | |
5360 | if (!not) { | |
5361 | fail = 0; | |
b9b0c831 NK |
5362 | |
5363 | if (entry) | |
5364 | continue; | |
5365 | if (add_hash_entry(hash, rec->ip) < 0) | |
5366 | goto out; | |
c7c6b1fe | 5367 | } else { |
b9b0c831 NK |
5368 | if (entry) { |
5369 | free_hash_entry(hash, entry); | |
c7c6b1fe LZ |
5370 | fail = 0; |
5371 | } | |
5372 | } | |
ea4e2bc4 | 5373 | } |
265c831c | 5374 | } while_for_each_ftrace_rec(); |
c7c6b1fe | 5375 | out: |
52baf119 | 5376 | mutex_unlock(&ftrace_lock); |
ea4e2bc4 | 5377 | |
c7c6b1fe LZ |
5378 | if (fail) |
5379 | return -EINVAL; | |
5380 | ||
c7c6b1fe | 5381 | return 0; |
ea4e2bc4 SR |
5382 | } |
5383 | ||
5384 | static ssize_t | |
5385 | ftrace_graph_write(struct file *file, const char __user *ubuf, | |
5386 | size_t cnt, loff_t *ppos) | |
5387 | { | |
6a10108b | 5388 | ssize_t read, ret = 0; |
faf982a6 | 5389 | struct ftrace_graph_data *fgd = file->private_data; |
e704eff3 | 5390 | struct trace_parser *parser; |
ea4e2bc4 | 5391 | |
c7c6b1fe | 5392 | if (!cnt) |
ea4e2bc4 SR |
5393 | return 0; |
5394 | ||
ae98d27a SRV |
5395 | /* Read mode uses seq functions */ |
5396 | if (file->f_mode & FMODE_READ) { | |
5397 | struct seq_file *m = file->private_data; | |
5398 | fgd = m->private; | |
5399 | } | |
5400 | ||
e704eff3 | 5401 | parser = &fgd->parser; |
ea4e2bc4 | 5402 | |
e704eff3 | 5403 | read = trace_get_user(parser, ubuf, cnt, ppos); |
689fd8b6 | 5404 | |
e704eff3 SRV |
5405 | if (read >= 0 && trace_parser_loaded(parser) && |
5406 | !trace_parser_cont(parser)) { | |
6a10108b | 5407 | |
b9b0c831 | 5408 | ret = ftrace_graph_set_hash(fgd->new_hash, |
e704eff3 SRV |
5409 | parser->buffer); |
5410 | trace_parser_clear(parser); | |
ea4e2bc4 | 5411 | } |
ea4e2bc4 | 5412 | |
6a10108b NK |
5413 | if (!ret) |
5414 | ret = read; | |
1eb90f13 | 5415 | |
ea4e2bc4 SR |
5416 | return ret; |
5417 | } | |
5418 | ||
5419 | static const struct file_operations ftrace_graph_fops = { | |
87827111 LZ |
5420 | .open = ftrace_graph_open, |
5421 | .read = seq_read, | |
5422 | .write = ftrace_graph_write, | |
098c879e | 5423 | .llseek = tracing_lseek, |
87827111 | 5424 | .release = ftrace_graph_release, |
ea4e2bc4 | 5425 | }; |
29ad23b0 NK |
5426 | |
5427 | static const struct file_operations ftrace_graph_notrace_fops = { | |
5428 | .open = ftrace_graph_notrace_open, | |
5429 | .read = seq_read, | |
5430 | .write = ftrace_graph_write, | |
098c879e | 5431 | .llseek = tracing_lseek, |
29ad23b0 NK |
5432 | .release = ftrace_graph_release, |
5433 | }; | |
ea4e2bc4 SR |
5434 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5435 | ||
591dffda SRRH |
5436 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
5437 | struct dentry *parent) | |
5438 | { | |
5439 | ||
5440 | trace_create_file("set_ftrace_filter", 0644, parent, | |
5441 | ops, &ftrace_filter_fops); | |
5442 | ||
5443 | trace_create_file("set_ftrace_notrace", 0644, parent, | |
5444 | ops, &ftrace_notrace_fops); | |
5445 | } | |
5446 | ||
5447 | /* | |
5448 | * The name "destroy_filter_files" is really a misnomer. Although | |
5449 | * in the future, it may actualy delete the files, but this is | |
5450 | * really intended to make sure the ops passed in are disabled | |
5451 | * and that when this function returns, the caller is free to | |
5452 | * free the ops. | |
5453 | * | |
5454 | * The "destroy" name is only to match the "create" name that this | |
5455 | * should be paired with. | |
5456 | */ | |
5457 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) | |
5458 | { | |
5459 | mutex_lock(&ftrace_lock); | |
5460 | if (ops->flags & FTRACE_OPS_FL_ENABLED) | |
5461 | ftrace_shutdown(ops, 0); | |
5462 | ops->flags |= FTRACE_OPS_FL_DELETED; | |
5463 | mutex_unlock(&ftrace_lock); | |
5464 | } | |
5465 | ||
8434dc93 | 5466 | static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) |
5072c59f | 5467 | { |
5072c59f | 5468 | |
5452af66 FW |
5469 | trace_create_file("available_filter_functions", 0444, |
5470 | d_tracer, NULL, &ftrace_avail_fops); | |
5072c59f | 5471 | |
647bcd03 SR |
5472 | trace_create_file("enabled_functions", 0444, |
5473 | d_tracer, NULL, &ftrace_enabled_fops); | |
5474 | ||
591dffda | 5475 | ftrace_create_filter_files(&global_ops, d_tracer); |
ad90c0e3 | 5476 | |
ea4e2bc4 | 5477 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1ce0500d | 5478 | trace_create_file("set_graph_function", 0644, d_tracer, |
ea4e2bc4 SR |
5479 | NULL, |
5480 | &ftrace_graph_fops); | |
1ce0500d | 5481 | trace_create_file("set_graph_notrace", 0644, d_tracer, |
29ad23b0 NK |
5482 | NULL, |
5483 | &ftrace_graph_notrace_fops); | |
ea4e2bc4 SR |
5484 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5485 | ||
5072c59f SR |
5486 | return 0; |
5487 | } | |
5488 | ||
9fd49328 | 5489 | static int ftrace_cmp_ips(const void *a, const void *b) |
68950619 | 5490 | { |
9fd49328 SR |
5491 | const unsigned long *ipa = a; |
5492 | const unsigned long *ipb = b; | |
68950619 | 5493 | |
9fd49328 SR |
5494 | if (*ipa > *ipb) |
5495 | return 1; | |
5496 | if (*ipa < *ipb) | |
5497 | return -1; | |
5498 | return 0; | |
5499 | } | |
5500 | ||
5cb084bb | 5501 | static int ftrace_process_locs(struct module *mod, |
31e88909 | 5502 | unsigned long *start, |
68bf21aa SR |
5503 | unsigned long *end) |
5504 | { | |
706c81f8 | 5505 | struct ftrace_page *start_pg; |
a7900875 | 5506 | struct ftrace_page *pg; |
706c81f8 | 5507 | struct dyn_ftrace *rec; |
a7900875 | 5508 | unsigned long count; |
68bf21aa SR |
5509 | unsigned long *p; |
5510 | unsigned long addr; | |
4376cac6 | 5511 | unsigned long flags = 0; /* Shut up gcc */ |
a7900875 SR |
5512 | int ret = -ENOMEM; |
5513 | ||
5514 | count = end - start; | |
5515 | ||
5516 | if (!count) | |
5517 | return 0; | |
5518 | ||
9fd49328 | 5519 | sort(start, count, sizeof(*start), |
6db02903 | 5520 | ftrace_cmp_ips, NULL); |
9fd49328 | 5521 | |
706c81f8 SR |
5522 | start_pg = ftrace_allocate_pages(count); |
5523 | if (!start_pg) | |
a7900875 | 5524 | return -ENOMEM; |
68bf21aa | 5525 | |
e6ea44e9 | 5526 | mutex_lock(&ftrace_lock); |
a7900875 | 5527 | |
32082309 SR |
5528 | /* |
5529 | * Core and each module needs their own pages, as | |
5530 | * modules will free them when they are removed. | |
5531 | * Force a new page to be allocated for modules. | |
5532 | */ | |
a7900875 SR |
5533 | if (!mod) { |
5534 | WARN_ON(ftrace_pages || ftrace_pages_start); | |
5535 | /* First initialization */ | |
706c81f8 | 5536 | ftrace_pages = ftrace_pages_start = start_pg; |
a7900875 | 5537 | } else { |
32082309 | 5538 | if (!ftrace_pages) |
a7900875 | 5539 | goto out; |
32082309 | 5540 | |
a7900875 SR |
5541 | if (WARN_ON(ftrace_pages->next)) { |
5542 | /* Hmm, we have free pages? */ | |
5543 | while (ftrace_pages->next) | |
5544 | ftrace_pages = ftrace_pages->next; | |
32082309 | 5545 | } |
a7900875 | 5546 | |
706c81f8 | 5547 | ftrace_pages->next = start_pg; |
32082309 SR |
5548 | } |
5549 | ||
68bf21aa | 5550 | p = start; |
706c81f8 | 5551 | pg = start_pg; |
68bf21aa SR |
5552 | while (p < end) { |
5553 | addr = ftrace_call_adjust(*p++); | |
20e5227e SR |
5554 | /* |
5555 | * Some architecture linkers will pad between | |
5556 | * the different mcount_loc sections of different | |
5557 | * object files to satisfy alignments. | |
5558 | * Skip any NULL pointers. | |
5559 | */ | |
5560 | if (!addr) | |
5561 | continue; | |
706c81f8 SR |
5562 | |
5563 | if (pg->index == pg->size) { | |
5564 | /* We should have allocated enough */ | |
5565 | if (WARN_ON(!pg->next)) | |
5566 | break; | |
5567 | pg = pg->next; | |
5568 | } | |
5569 | ||
5570 | rec = &pg->records[pg->index++]; | |
5571 | rec->ip = addr; | |
68bf21aa SR |
5572 | } |
5573 | ||
706c81f8 SR |
5574 | /* We should have used all pages */ |
5575 | WARN_ON(pg->next); | |
5576 | ||
5577 | /* Assign the last page to ftrace_pages */ | |
5578 | ftrace_pages = pg; | |
5579 | ||
a4f18ed1 | 5580 | /* |
4376cac6 SR |
5581 | * We only need to disable interrupts on start up |
5582 | * because we are modifying code that an interrupt | |
5583 | * may execute, and the modification is not atomic. | |
5584 | * But for modules, nothing runs the code we modify | |
5585 | * until we are finished with it, and there's no | |
5586 | * reason to cause large interrupt latencies while we do it. | |
a4f18ed1 | 5587 | */ |
4376cac6 SR |
5588 | if (!mod) |
5589 | local_irq_save(flags); | |
1dc43cf0 | 5590 | ftrace_update_code(mod, start_pg); |
4376cac6 SR |
5591 | if (!mod) |
5592 | local_irq_restore(flags); | |
a7900875 SR |
5593 | ret = 0; |
5594 | out: | |
e6ea44e9 | 5595 | mutex_unlock(&ftrace_lock); |
68bf21aa | 5596 | |
a7900875 | 5597 | return ret; |
68bf21aa SR |
5598 | } |
5599 | ||
aba4b5c2 SRV |
5600 | struct ftrace_mod_func { |
5601 | struct list_head list; | |
5602 | char *name; | |
5603 | unsigned long ip; | |
5604 | unsigned int size; | |
5605 | }; | |
5606 | ||
5607 | struct ftrace_mod_map { | |
6aa69784 | 5608 | struct rcu_head rcu; |
aba4b5c2 SRV |
5609 | struct list_head list; |
5610 | struct module *mod; | |
5611 | unsigned long start_addr; | |
5612 | unsigned long end_addr; | |
5613 | struct list_head funcs; | |
6171a031 | 5614 | unsigned int num_funcs; |
aba4b5c2 SRV |
5615 | }; |
5616 | ||
93eb677d | 5617 | #ifdef CONFIG_MODULES |
32082309 SR |
5618 | |
5619 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) | |
5620 | ||
6aa69784 SRV |
5621 | static LIST_HEAD(ftrace_mod_maps); |
5622 | ||
b7ffffbb SRRH |
5623 | static int referenced_filters(struct dyn_ftrace *rec) |
5624 | { | |
5625 | struct ftrace_ops *ops; | |
5626 | int cnt = 0; | |
5627 | ||
5628 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { | |
5629 | if (ops_references_rec(ops, rec)) | |
5630 | cnt++; | |
5631 | } | |
5632 | ||
5633 | return cnt; | |
5634 | } | |
5635 | ||
2a5bfe47 SRV |
5636 | static void |
5637 | clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) | |
5638 | { | |
5639 | struct ftrace_func_entry *entry; | |
5640 | struct dyn_ftrace *rec; | |
5641 | int i; | |
5642 | ||
5643 | if (ftrace_hash_empty(hash)) | |
5644 | return; | |
5645 | ||
5646 | for (i = 0; i < pg->index; i++) { | |
5647 | rec = &pg->records[i]; | |
5648 | entry = __ftrace_lookup_ip(hash, rec->ip); | |
5649 | /* | |
5650 | * Do not allow this rec to match again. | |
5651 | * Yeah, it may waste some memory, but will be removed | |
5652 | * if/when the hash is modified again. | |
5653 | */ | |
5654 | if (entry) | |
5655 | entry->ip = 0; | |
5656 | } | |
5657 | } | |
5658 | ||
5659 | /* Clear any records from hashs */ | |
5660 | static void clear_mod_from_hashes(struct ftrace_page *pg) | |
5661 | { | |
5662 | struct trace_array *tr; | |
5663 | ||
5664 | mutex_lock(&trace_types_lock); | |
5665 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
5666 | if (!tr->ops || !tr->ops->func_hash) | |
5667 | continue; | |
5668 | mutex_lock(&tr->ops->func_hash->regex_lock); | |
5669 | clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); | |
5670 | clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); | |
5671 | mutex_unlock(&tr->ops->func_hash->regex_lock); | |
5672 | } | |
5673 | mutex_unlock(&trace_types_lock); | |
5674 | } | |
5675 | ||
6aa69784 SRV |
5676 | static void ftrace_free_mod_map(struct rcu_head *rcu) |
5677 | { | |
5678 | struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); | |
5679 | struct ftrace_mod_func *mod_func; | |
5680 | struct ftrace_mod_func *n; | |
5681 | ||
5682 | /* All the contents of mod_map are now not visible to readers */ | |
5683 | list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { | |
5684 | kfree(mod_func->name); | |
5685 | list_del(&mod_func->list); | |
5686 | kfree(mod_func); | |
5687 | } | |
5688 | ||
5689 | kfree(mod_map); | |
5690 | } | |
5691 | ||
e7247a15 | 5692 | void ftrace_release_mod(struct module *mod) |
93eb677d | 5693 | { |
6aa69784 SRV |
5694 | struct ftrace_mod_map *mod_map; |
5695 | struct ftrace_mod_map *n; | |
93eb677d | 5696 | struct dyn_ftrace *rec; |
32082309 | 5697 | struct ftrace_page **last_pg; |
2a5bfe47 | 5698 | struct ftrace_page *tmp_page = NULL; |
93eb677d | 5699 | struct ftrace_page *pg; |
a7900875 | 5700 | int order; |
93eb677d | 5701 | |
45a4a237 SR |
5702 | mutex_lock(&ftrace_lock); |
5703 | ||
e7247a15 | 5704 | if (ftrace_disabled) |
45a4a237 | 5705 | goto out_unlock; |
93eb677d | 5706 | |
6aa69784 SRV |
5707 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
5708 | if (mod_map->mod == mod) { | |
5709 | list_del_rcu(&mod_map->list); | |
5710 | call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); | |
5711 | break; | |
5712 | } | |
5713 | } | |
5714 | ||
32082309 SR |
5715 | /* |
5716 | * Each module has its own ftrace_pages, remove | |
5717 | * them from the list. | |
5718 | */ | |
5719 | last_pg = &ftrace_pages_start; | |
5720 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { | |
5721 | rec = &pg->records[0]; | |
3e234289 SRV |
5722 | if (within_module_core(rec->ip, mod) || |
5723 | within_module_init(rec->ip, mod)) { | |
93eb677d | 5724 | /* |
32082309 SR |
5725 | * As core pages are first, the first |
5726 | * page should never be a module page. | |
93eb677d | 5727 | */ |
32082309 SR |
5728 | if (WARN_ON(pg == ftrace_pages_start)) |
5729 | goto out_unlock; | |
5730 | ||
5731 | /* Check if we are deleting the last page */ | |
5732 | if (pg == ftrace_pages) | |
5733 | ftrace_pages = next_to_ftrace_page(last_pg); | |
5734 | ||
83dd1493 | 5735 | ftrace_update_tot_cnt -= pg->index; |
32082309 | 5736 | *last_pg = pg->next; |
2a5bfe47 SRV |
5737 | |
5738 | pg->next = tmp_page; | |
5739 | tmp_page = pg; | |
32082309 SR |
5740 | } else |
5741 | last_pg = &pg->next; | |
5742 | } | |
45a4a237 | 5743 | out_unlock: |
93eb677d | 5744 | mutex_unlock(&ftrace_lock); |
2a5bfe47 SRV |
5745 | |
5746 | for (pg = tmp_page; pg; pg = tmp_page) { | |
5747 | ||
5748 | /* Needs to be called outside of ftrace_lock */ | |
5749 | clear_mod_from_hashes(pg); | |
5750 | ||
5751 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | |
5752 | free_pages((unsigned long)pg->records, order); | |
5753 | tmp_page = pg->next; | |
5754 | kfree(pg); | |
5755 | } | |
93eb677d SR |
5756 | } |
5757 | ||
7dcd182b | 5758 | void ftrace_module_enable(struct module *mod) |
b7ffffbb SRRH |
5759 | { |
5760 | struct dyn_ftrace *rec; | |
5761 | struct ftrace_page *pg; | |
5762 | ||
5763 | mutex_lock(&ftrace_lock); | |
5764 | ||
5765 | if (ftrace_disabled) | |
5766 | goto out_unlock; | |
5767 | ||
5768 | /* | |
5769 | * If the tracing is enabled, go ahead and enable the record. | |
5770 | * | |
5771 | * The reason not to enable the record immediatelly is the | |
5772 | * inherent check of ftrace_make_nop/ftrace_make_call for | |
5773 | * correct previous instructions. Making first the NOP | |
5774 | * conversion puts the module to the correct state, thus | |
5775 | * passing the ftrace_make_call check. | |
5776 | * | |
5777 | * We also delay this to after the module code already set the | |
5778 | * text to read-only, as we now need to set it back to read-write | |
5779 | * so that we can modify the text. | |
5780 | */ | |
5781 | if (ftrace_start_up) | |
5782 | ftrace_arch_code_modify_prepare(); | |
5783 | ||
5784 | do_for_each_ftrace_rec(pg, rec) { | |
5785 | int cnt; | |
5786 | /* | |
5787 | * do_for_each_ftrace_rec() is a double loop. | |
5788 | * module text shares the pg. If a record is | |
5789 | * not part of this module, then skip this pg, | |
5790 | * which the "break" will do. | |
5791 | */ | |
3e234289 SRV |
5792 | if (!within_module_core(rec->ip, mod) && |
5793 | !within_module_init(rec->ip, mod)) | |
b7ffffbb SRRH |
5794 | break; |
5795 | ||
5796 | cnt = 0; | |
5797 | ||
5798 | /* | |
5799 | * When adding a module, we need to check if tracers are | |
5800 | * currently enabled and if they are, and can trace this record, | |
5801 | * we need to enable the module functions as well as update the | |
5802 | * reference counts for those function records. | |
5803 | */ | |
5804 | if (ftrace_start_up) | |
5805 | cnt += referenced_filters(rec); | |
5806 | ||
5807 | /* This clears FTRACE_FL_DISABLED */ | |
5808 | rec->flags = cnt; | |
5809 | ||
5810 | if (ftrace_start_up && cnt) { | |
5811 | int failed = __ftrace_replace_code(rec, 1); | |
5812 | if (failed) { | |
5813 | ftrace_bug(failed, rec); | |
5814 | goto out_loop; | |
5815 | } | |
5816 | } | |
5817 | ||
5818 | } while_for_each_ftrace_rec(); | |
5819 | ||
5820 | out_loop: | |
5821 | if (ftrace_start_up) | |
5822 | ftrace_arch_code_modify_post_process(); | |
5823 | ||
5824 | out_unlock: | |
5825 | mutex_unlock(&ftrace_lock); | |
d7fbf8df SRV |
5826 | |
5827 | process_cached_mods(mod->name); | |
b7ffffbb SRRH |
5828 | } |
5829 | ||
b6b71f66 | 5830 | void ftrace_module_init(struct module *mod) |
90d595fe | 5831 | { |
97e9b4fc | 5832 | if (ftrace_disabled || !mod->num_ftrace_callsites) |
fed1939c | 5833 | return; |
90d595fe | 5834 | |
97e9b4fc SRRH |
5835 | ftrace_process_locs(mod, mod->ftrace_callsites, |
5836 | mod->ftrace_callsites + mod->num_ftrace_callsites); | |
8c189ea6 | 5837 | } |
aba4b5c2 SRV |
5838 | |
5839 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, | |
5840 | struct dyn_ftrace *rec) | |
5841 | { | |
5842 | struct ftrace_mod_func *mod_func; | |
5843 | unsigned long symsize; | |
5844 | unsigned long offset; | |
5845 | char str[KSYM_SYMBOL_LEN]; | |
5846 | char *modname; | |
5847 | const char *ret; | |
5848 | ||
5849 | ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); | |
5850 | if (!ret) | |
5851 | return; | |
5852 | ||
5853 | mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); | |
5854 | if (!mod_func) | |
5855 | return; | |
5856 | ||
5857 | mod_func->name = kstrdup(str, GFP_KERNEL); | |
5858 | if (!mod_func->name) { | |
5859 | kfree(mod_func); | |
5860 | return; | |
5861 | } | |
5862 | ||
5863 | mod_func->ip = rec->ip - offset; | |
5864 | mod_func->size = symsize; | |
5865 | ||
6171a031 SRV |
5866 | mod_map->num_funcs++; |
5867 | ||
aba4b5c2 SRV |
5868 | list_add_rcu(&mod_func->list, &mod_map->funcs); |
5869 | } | |
5870 | ||
aba4b5c2 SRV |
5871 | static struct ftrace_mod_map * |
5872 | allocate_ftrace_mod_map(struct module *mod, | |
5873 | unsigned long start, unsigned long end) | |
5874 | { | |
5875 | struct ftrace_mod_map *mod_map; | |
5876 | ||
5877 | mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); | |
5878 | if (!mod_map) | |
5879 | return NULL; | |
5880 | ||
5881 | mod_map->mod = mod; | |
5882 | mod_map->start_addr = start; | |
5883 | mod_map->end_addr = end; | |
6171a031 | 5884 | mod_map->num_funcs = 0; |
aba4b5c2 SRV |
5885 | |
5886 | INIT_LIST_HEAD_RCU(&mod_map->funcs); | |
5887 | ||
5888 | list_add_rcu(&mod_map->list, &ftrace_mod_maps); | |
5889 | ||
5890 | return mod_map; | |
5891 | } | |
5892 | ||
5893 | static const char * | |
5894 | ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, | |
5895 | unsigned long addr, unsigned long *size, | |
5896 | unsigned long *off, char *sym) | |
5897 | { | |
5898 | struct ftrace_mod_func *found_func = NULL; | |
5899 | struct ftrace_mod_func *mod_func; | |
5900 | ||
5901 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { | |
5902 | if (addr >= mod_func->ip && | |
5903 | addr < mod_func->ip + mod_func->size) { | |
5904 | found_func = mod_func; | |
5905 | break; | |
5906 | } | |
5907 | } | |
5908 | ||
5909 | if (found_func) { | |
5910 | if (size) | |
5911 | *size = found_func->size; | |
5912 | if (off) | |
5913 | *off = addr - found_func->ip; | |
5914 | if (sym) | |
5915 | strlcpy(sym, found_func->name, KSYM_NAME_LEN); | |
5916 | ||
5917 | return found_func->name; | |
5918 | } | |
5919 | ||
5920 | return NULL; | |
5921 | } | |
5922 | ||
5923 | const char * | |
5924 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, | |
5925 | unsigned long *off, char **modname, char *sym) | |
5926 | { | |
5927 | struct ftrace_mod_map *mod_map; | |
5928 | const char *ret = NULL; | |
5929 | ||
6aa69784 | 5930 | /* mod_map is freed via call_rcu_sched() */ |
aba4b5c2 SRV |
5931 | preempt_disable(); |
5932 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | |
5933 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); | |
5934 | if (ret) { | |
5935 | if (modname) | |
5936 | *modname = mod_map->mod->name; | |
5937 | break; | |
5938 | } | |
5939 | } | |
5940 | preempt_enable(); | |
5941 | ||
5942 | return ret; | |
5943 | } | |
5944 | ||
6171a031 SRV |
5945 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
5946 | char *type, char *name, | |
5947 | char *module_name, int *exported) | |
5948 | { | |
5949 | struct ftrace_mod_map *mod_map; | |
5950 | struct ftrace_mod_func *mod_func; | |
5951 | ||
5952 | preempt_disable(); | |
5953 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { | |
5954 | ||
5955 | if (symnum >= mod_map->num_funcs) { | |
5956 | symnum -= mod_map->num_funcs; | |
5957 | continue; | |
5958 | } | |
5959 | ||
5960 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { | |
5961 | if (symnum > 1) { | |
5962 | symnum--; | |
5963 | continue; | |
5964 | } | |
5965 | ||
5966 | *value = mod_func->ip; | |
5967 | *type = 'T'; | |
5968 | strlcpy(name, mod_func->name, KSYM_NAME_LEN); | |
5969 | strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); | |
5970 | *exported = 1; | |
5971 | preempt_enable(); | |
5972 | return 0; | |
5973 | } | |
5974 | WARN_ON(1); | |
5975 | break; | |
5976 | } | |
5977 | preempt_enable(); | |
5978 | return -ERANGE; | |
5979 | } | |
5980 | ||
aba4b5c2 SRV |
5981 | #else |
5982 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, | |
5983 | struct dyn_ftrace *rec) { } | |
5984 | static inline struct ftrace_mod_map * | |
5985 | allocate_ftrace_mod_map(struct module *mod, | |
5986 | unsigned long start, unsigned long end) | |
5987 | { | |
5988 | return NULL; | |
5989 | } | |
93eb677d SR |
5990 | #endif /* CONFIG_MODULES */ |
5991 | ||
8715b108 JF |
5992 | struct ftrace_init_func { |
5993 | struct list_head list; | |
5994 | unsigned long ip; | |
5995 | }; | |
5996 | ||
5997 | /* Clear any init ips from hashes */ | |
5998 | static void | |
5999 | clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) | |
42c269c8 | 6000 | { |
8715b108 JF |
6001 | struct ftrace_func_entry *entry; |
6002 | ||
6003 | if (ftrace_hash_empty(hash)) | |
6004 | return; | |
6005 | ||
6006 | entry = __ftrace_lookup_ip(hash, func->ip); | |
6007 | ||
6008 | /* | |
6009 | * Do not allow this rec to match again. | |
6010 | * Yeah, it may waste some memory, but will be removed | |
6011 | * if/when the hash is modified again. | |
6012 | */ | |
6013 | if (entry) | |
6014 | entry->ip = 0; | |
6015 | } | |
6016 | ||
6017 | static void | |
6018 | clear_func_from_hashes(struct ftrace_init_func *func) | |
6019 | { | |
6020 | struct trace_array *tr; | |
6021 | ||
6022 | mutex_lock(&trace_types_lock); | |
6023 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | |
6024 | if (!tr->ops || !tr->ops->func_hash) | |
6025 | continue; | |
6026 | mutex_lock(&tr->ops->func_hash->regex_lock); | |
6027 | clear_func_from_hash(func, tr->ops->func_hash->filter_hash); | |
6028 | clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); | |
6029 | mutex_unlock(&tr->ops->func_hash->regex_lock); | |
6030 | } | |
6031 | mutex_unlock(&trace_types_lock); | |
6032 | } | |
6033 | ||
6034 | static void add_to_clear_hash_list(struct list_head *clear_list, | |
6035 | struct dyn_ftrace *rec) | |
6036 | { | |
6037 | struct ftrace_init_func *func; | |
6038 | ||
6039 | func = kmalloc(sizeof(*func), GFP_KERNEL); | |
6040 | if (!func) { | |
6041 | WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); | |
6042 | return; | |
6043 | } | |
6044 | ||
6045 | func->ip = rec->ip; | |
6046 | list_add(&func->list, clear_list); | |
6047 | } | |
6048 | ||
aba4b5c2 | 6049 | void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) |
42c269c8 | 6050 | { |
6cafbe15 SRV |
6051 | unsigned long start = (unsigned long)(start_ptr); |
6052 | unsigned long end = (unsigned long)(end_ptr); | |
42c269c8 SRV |
6053 | struct ftrace_page **last_pg = &ftrace_pages_start; |
6054 | struct ftrace_page *pg; | |
6055 | struct dyn_ftrace *rec; | |
6056 | struct dyn_ftrace key; | |
aba4b5c2 | 6057 | struct ftrace_mod_map *mod_map = NULL; |
8715b108 JF |
6058 | struct ftrace_init_func *func, *func_next; |
6059 | struct list_head clear_hash; | |
42c269c8 SRV |
6060 | int order; |
6061 | ||
8715b108 JF |
6062 | INIT_LIST_HEAD(&clear_hash); |
6063 | ||
42c269c8 SRV |
6064 | key.ip = start; |
6065 | key.flags = end; /* overload flags, as it is unsigned long */ | |
6066 | ||
6067 | mutex_lock(&ftrace_lock); | |
6068 | ||
aba4b5c2 SRV |
6069 | /* |
6070 | * If we are freeing module init memory, then check if | |
6071 | * any tracer is active. If so, we need to save a mapping of | |
6072 | * the module functions being freed with the address. | |
6073 | */ | |
6074 | if (mod && ftrace_ops_list != &ftrace_list_end) | |
6075 | mod_map = allocate_ftrace_mod_map(mod, start, end); | |
6076 | ||
42c269c8 SRV |
6077 | for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { |
6078 | if (end < pg->records[0].ip || | |
6079 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) | |
6080 | continue; | |
6081 | again: | |
6082 | rec = bsearch(&key, pg->records, pg->index, | |
6083 | sizeof(struct dyn_ftrace), | |
6084 | ftrace_cmp_recs); | |
6085 | if (!rec) | |
6086 | continue; | |
aba4b5c2 | 6087 | |
8715b108 JF |
6088 | /* rec will be cleared from hashes after ftrace_lock unlock */ |
6089 | add_to_clear_hash_list(&clear_hash, rec); | |
6090 | ||
aba4b5c2 SRV |
6091 | if (mod_map) |
6092 | save_ftrace_mod_rec(mod_map, rec); | |
6093 | ||
42c269c8 | 6094 | pg->index--; |
4ec78467 | 6095 | ftrace_update_tot_cnt--; |
42c269c8 SRV |
6096 | if (!pg->index) { |
6097 | *last_pg = pg->next; | |
6098 | order = get_count_order(pg->size / ENTRIES_PER_PAGE); | |
6099 | free_pages((unsigned long)pg->records, order); | |
6100 | kfree(pg); | |
6101 | pg = container_of(last_pg, struct ftrace_page, next); | |
6102 | if (!(*last_pg)) | |
6103 | ftrace_pages = pg; | |
6104 | continue; | |
6105 | } | |
6106 | memmove(rec, rec + 1, | |
6107 | (pg->index - (rec - pg->records)) * sizeof(*rec)); | |
6108 | /* More than one function may be in this block */ | |
6109 | goto again; | |
6110 | } | |
6111 | mutex_unlock(&ftrace_lock); | |
8715b108 JF |
6112 | |
6113 | list_for_each_entry_safe(func, func_next, &clear_hash, list) { | |
6114 | clear_func_from_hashes(func); | |
6115 | kfree(func); | |
6116 | } | |
42c269c8 SRV |
6117 | } |
6118 | ||
6cafbe15 SRV |
6119 | void __init ftrace_free_init_mem(void) |
6120 | { | |
6121 | void *start = (void *)(&__init_begin); | |
6122 | void *end = (void *)(&__init_end); | |
6123 | ||
aba4b5c2 | 6124 | ftrace_free_mem(NULL, start, end); |
42c269c8 SRV |
6125 | } |
6126 | ||
68bf21aa SR |
6127 | void __init ftrace_init(void) |
6128 | { | |
1dc43cf0 JS |
6129 | extern unsigned long __start_mcount_loc[]; |
6130 | extern unsigned long __stop_mcount_loc[]; | |
3a36cb11 | 6131 | unsigned long count, flags; |
68bf21aa SR |
6132 | int ret; |
6133 | ||
68bf21aa | 6134 | local_irq_save(flags); |
3a36cb11 | 6135 | ret = ftrace_dyn_arch_init(); |
68bf21aa | 6136 | local_irq_restore(flags); |
af64a7cb | 6137 | if (ret) |
68bf21aa SR |
6138 | goto failed; |
6139 | ||
6140 | count = __stop_mcount_loc - __start_mcount_loc; | |
c867ccd8 JS |
6141 | if (!count) { |
6142 | pr_info("ftrace: No functions to be traced?\n"); | |
68bf21aa | 6143 | goto failed; |
c867ccd8 JS |
6144 | } |
6145 | ||
6146 | pr_info("ftrace: allocating %ld entries in %ld pages\n", | |
6147 | count, count / ENTRIES_PER_PAGE + 1); | |
68bf21aa SR |
6148 | |
6149 | last_ftrace_enabled = ftrace_enabled = 1; | |
6150 | ||
5cb084bb | 6151 | ret = ftrace_process_locs(NULL, |
31e88909 | 6152 | __start_mcount_loc, |
68bf21aa SR |
6153 | __stop_mcount_loc); |
6154 | ||
2af15d6a SR |
6155 | set_ftrace_early_filters(); |
6156 | ||
68bf21aa SR |
6157 | return; |
6158 | failed: | |
6159 | ftrace_disabled = 1; | |
6160 | } | |
68bf21aa | 6161 | |
f3bea491 SRRH |
6162 | /* Do nothing if arch does not support this */ |
6163 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |
6164 | { | |
6165 | } | |
6166 | ||
6167 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | |
6168 | { | |
f3bea491 SRRH |
6169 | arch_ftrace_update_trampoline(ops); |
6170 | } | |
6171 | ||
04ec7bb6 SRV |
6172 | void ftrace_init_trace_array(struct trace_array *tr) |
6173 | { | |
6174 | INIT_LIST_HEAD(&tr->func_probes); | |
673feb9d SRV |
6175 | INIT_LIST_HEAD(&tr->mod_trace); |
6176 | INIT_LIST_HEAD(&tr->mod_notrace); | |
04ec7bb6 | 6177 | } |
3d083395 | 6178 | #else |
0b6e4d56 | 6179 | |
2b499381 | 6180 | static struct ftrace_ops global_ops = { |
bd69c30b | 6181 | .func = ftrace_stub, |
e3eea140 SRRH |
6182 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
6183 | FTRACE_OPS_FL_INITIALIZED | | |
6184 | FTRACE_OPS_FL_PID, | |
bd69c30b SR |
6185 | }; |
6186 | ||
0b6e4d56 FW |
6187 | static int __init ftrace_nodyn_init(void) |
6188 | { | |
6189 | ftrace_enabled = 1; | |
6190 | return 0; | |
6191 | } | |
6f415672 | 6192 | core_initcall(ftrace_nodyn_init); |
0b6e4d56 | 6193 | |
8434dc93 | 6194 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
df4fc315 | 6195 | static inline void ftrace_startup_enable(int command) { } |
e1effa01 | 6196 | static inline void ftrace_startup_all(int command) { } |
5a45cfe1 | 6197 | /* Keep as macros so we do not need to define the commands */ |
8a56d776 SRRH |
6198 | # define ftrace_startup(ops, command) \ |
6199 | ({ \ | |
6200 | int ___ret = __register_ftrace_function(ops); \ | |
6201 | if (!___ret) \ | |
6202 | (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ | |
6203 | ___ret; \ | |
3b6cfdb1 | 6204 | }) |
1fcc1553 SRRH |
6205 | # define ftrace_shutdown(ops, command) \ |
6206 | ({ \ | |
6207 | int ___ret = __unregister_ftrace_function(ops); \ | |
6208 | if (!___ret) \ | |
6209 | (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ | |
6210 | ___ret; \ | |
6211 | }) | |
8a56d776 | 6212 | |
c7aafc54 IM |
6213 | # define ftrace_startup_sysctl() do { } while (0) |
6214 | # define ftrace_shutdown_sysctl() do { } while (0) | |
b848914c SR |
6215 | |
6216 | static inline int | |
195a8afc | 6217 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
b848914c SR |
6218 | { |
6219 | return 1; | |
6220 | } | |
6221 | ||
f3bea491 SRRH |
6222 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
6223 | { | |
6224 | } | |
6225 | ||
3d083395 SR |
6226 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
6227 | ||
4104d326 SRRH |
6228 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
6229 | { | |
6230 | tr->ops = &global_ops; | |
6231 | tr->ops->private = tr; | |
04ec7bb6 | 6232 | ftrace_init_trace_array(tr); |
4104d326 SRRH |
6233 | } |
6234 | ||
6235 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | |
6236 | { | |
6237 | /* If we filter on pids, update to use the pid function */ | |
6238 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { | |
6239 | if (WARN_ON(tr->ops->func != ftrace_stub)) | |
6240 | printk("ftrace ops had %pS for function\n", | |
6241 | tr->ops->func); | |
4104d326 SRRH |
6242 | } |
6243 | tr->ops->func = func; | |
6244 | tr->ops->private = tr; | |
6245 | } | |
6246 | ||
6247 | void ftrace_reset_array_ops(struct trace_array *tr) | |
6248 | { | |
6249 | tr->ops->func = ftrace_stub; | |
6250 | } | |
6251 | ||
2f5f6ad9 SR |
6252 | static inline void |
6253 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
a1e2e31d | 6254 | struct ftrace_ops *ignored, struct pt_regs *regs) |
b848914c | 6255 | { |
cdbe61bf | 6256 | struct ftrace_ops *op; |
edc15caf | 6257 | int bit; |
b848914c | 6258 | |
edc15caf SR |
6259 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
6260 | if (bit < 0) | |
6261 | return; | |
b1cff0ad | 6262 | |
cdbe61bf SR |
6263 | /* |
6264 | * Some of the ops may be dynamically allocated, | |
6265 | * they must be freed after a synchronize_sched(). | |
6266 | */ | |
6267 | preempt_disable_notrace(); | |
ba27f2bc | 6268 | |
0a016409 | 6269 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
ba27f2bc SRRH |
6270 | /* |
6271 | * Check the following for each ops before calling their func: | |
6272 | * if RCU flag is set, then rcu_is_watching() must be true | |
6273 | * if PER_CPU is set, then ftrace_function_local_disable() | |
6274 | * must be false | |
6275 | * Otherwise test if the ip matches the ops filter | |
6276 | * | |
6277 | * If any of the above fails then the op->func() is not executed. | |
6278 | */ | |
6279 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && | |
ba27f2bc | 6280 | ftrace_ops_test(op, ip, regs)) { |
1d48d596 SRRH |
6281 | if (FTRACE_WARN_ON(!op->func)) { |
6282 | pr_warn("op=%p %pS\n", op, op); | |
4104d326 SRRH |
6283 | goto out; |
6284 | } | |
a1e2e31d | 6285 | op->func(ip, parent_ip, op, regs); |
4104d326 | 6286 | } |
0a016409 | 6287 | } while_for_each_ftrace_op(op); |
4104d326 | 6288 | out: |
cdbe61bf | 6289 | preempt_enable_notrace(); |
edc15caf | 6290 | trace_clear_recursion(bit); |
b848914c SR |
6291 | } |
6292 | ||
2f5f6ad9 SR |
6293 | /* |
6294 | * Some archs only support passing ip and parent_ip. Even though | |
6295 | * the list function ignores the op parameter, we do not want any | |
6296 | * C side effects, where a function is called without the caller | |
6297 | * sending a third parameter. | |
a1e2e31d SR |
6298 | * Archs are to support both the regs and ftrace_ops at the same time. |
6299 | * If they support ftrace_ops, it is assumed they support regs. | |
6300 | * If call backs want to use regs, they must either check for regs | |
06aeaaea MH |
6301 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
6302 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. | |
a1e2e31d | 6303 | * An architecture can pass partial regs with ftrace_ops and still |
b8ec330a | 6304 | * set the ARCH_SUPPORTS_FTRACE_OPS. |
2f5f6ad9 SR |
6305 | */ |
6306 | #if ARCH_SUPPORTS_FTRACE_OPS | |
6307 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |
a1e2e31d | 6308 | struct ftrace_ops *op, struct pt_regs *regs) |
2f5f6ad9 | 6309 | { |
a1e2e31d | 6310 | __ftrace_ops_list_func(ip, parent_ip, NULL, regs); |
2f5f6ad9 SR |
6311 | } |
6312 | #else | |
6313 | static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | |
6314 | { | |
a1e2e31d | 6315 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
2f5f6ad9 SR |
6316 | } |
6317 | #endif | |
6318 | ||
f1ff6348 SRRH |
6319 | /* |
6320 | * If there's only one function registered but it does not support | |
c68c0fa2 SRRH |
6321 | * recursion, needs RCU protection and/or requires per cpu handling, then |
6322 | * this function will be called by the mcount trampoline. | |
f1ff6348 | 6323 | */ |
c68c0fa2 | 6324 | static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, |
f1ff6348 SRRH |
6325 | struct ftrace_ops *op, struct pt_regs *regs) |
6326 | { | |
6327 | int bit; | |
6328 | ||
c68c0fa2 SRRH |
6329 | if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) |
6330 | return; | |
6331 | ||
f1ff6348 SRRH |
6332 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); |
6333 | if (bit < 0) | |
6334 | return; | |
6335 | ||
c68c0fa2 | 6336 | preempt_disable_notrace(); |
f1ff6348 | 6337 | |
b3a88803 | 6338 | op->func(ip, parent_ip, op, regs); |
c68c0fa2 SRRH |
6339 | |
6340 | preempt_enable_notrace(); | |
f1ff6348 SRRH |
6341 | trace_clear_recursion(bit); |
6342 | } | |
6343 | ||
87354059 SRRH |
6344 | /** |
6345 | * ftrace_ops_get_func - get the function a trampoline should call | |
6346 | * @ops: the ops to get the function for | |
6347 | * | |
6348 | * Normally the mcount trampoline will call the ops->func, but there | |
6349 | * are times that it should not. For example, if the ops does not | |
6350 | * have its own recursion protection, then it should call the | |
3a150df9 | 6351 | * ftrace_ops_assist_func() instead. |
87354059 SRRH |
6352 | * |
6353 | * Returns the function that the trampoline should call for @ops. | |
6354 | */ | |
6355 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | |
6356 | { | |
87354059 | 6357 | /* |
c68c0fa2 SRRH |
6358 | * If the function does not handle recursion, needs to be RCU safe, |
6359 | * or does per cpu logic, then we need to call the assist handler. | |
87354059 | 6360 | */ |
c68c0fa2 | 6361 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || |
b3a88803 | 6362 | ops->flags & FTRACE_OPS_FL_RCU) |
c68c0fa2 | 6363 | return ftrace_ops_assist_func; |
87354059 SRRH |
6364 | |
6365 | return ops->func; | |
6366 | } | |
6367 | ||
345ddcc8 SRRH |
6368 | static void |
6369 | ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, | |
6370 | struct task_struct *prev, struct task_struct *next) | |
978f3a45 | 6371 | { |
345ddcc8 SRRH |
6372 | struct trace_array *tr = data; |
6373 | struct trace_pid_list *pid_list; | |
978f3a45 | 6374 | |
345ddcc8 | 6375 | pid_list = rcu_dereference_sched(tr->function_pids); |
e32d8956 | 6376 | |
345ddcc8 SRRH |
6377 | this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, |
6378 | trace_ignore_this_task(pid_list, next)); | |
978f3a45 SR |
6379 | } |
6380 | ||
1e10486f NK |
6381 | static void |
6382 | ftrace_pid_follow_sched_process_fork(void *data, | |
6383 | struct task_struct *self, | |
6384 | struct task_struct *task) | |
6385 | { | |
6386 | struct trace_pid_list *pid_list; | |
6387 | struct trace_array *tr = data; | |
6388 | ||
6389 | pid_list = rcu_dereference_sched(tr->function_pids); | |
6390 | trace_filter_add_remove_task(pid_list, self, task); | |
6391 | } | |
6392 | ||
6393 | static void | |
6394 | ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) | |
6395 | { | |
6396 | struct trace_pid_list *pid_list; | |
6397 | struct trace_array *tr = data; | |
6398 | ||
6399 | pid_list = rcu_dereference_sched(tr->function_pids); | |
6400 | trace_filter_add_remove_task(pid_list, NULL, task); | |
6401 | } | |
6402 | ||
6403 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) | |
6404 | { | |
6405 | if (enable) { | |
6406 | register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | |
6407 | tr); | |
6408 | register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, | |
6409 | tr); | |
6410 | } else { | |
6411 | unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, | |
6412 | tr); | |
6413 | unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, | |
6414 | tr); | |
6415 | } | |
6416 | } | |
6417 | ||
345ddcc8 | 6418 | static void clear_ftrace_pids(struct trace_array *tr) |
e32d8956 | 6419 | { |
345ddcc8 SRRH |
6420 | struct trace_pid_list *pid_list; |
6421 | int cpu; | |
e32d8956 | 6422 | |
345ddcc8 SRRH |
6423 | pid_list = rcu_dereference_protected(tr->function_pids, |
6424 | lockdep_is_held(&ftrace_lock)); | |
6425 | if (!pid_list) | |
6426 | return; | |
229c4ef8 | 6427 | |
345ddcc8 | 6428 | unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); |
e32d8956 | 6429 | |
345ddcc8 SRRH |
6430 | for_each_possible_cpu(cpu) |
6431 | per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; | |
978f3a45 | 6432 | |
345ddcc8 | 6433 | rcu_assign_pointer(tr->function_pids, NULL); |
978f3a45 | 6434 | |
345ddcc8 SRRH |
6435 | /* Wait till all users are no longer using pid filtering */ |
6436 | synchronize_sched(); | |
e32d8956 | 6437 | |
345ddcc8 | 6438 | trace_free_pid_list(pid_list); |
e32d8956 SR |
6439 | } |
6440 | ||
d879d0b8 NK |
6441 | void ftrace_clear_pids(struct trace_array *tr) |
6442 | { | |
6443 | mutex_lock(&ftrace_lock); | |
6444 | ||
6445 | clear_ftrace_pids(tr); | |
6446 | ||
6447 | mutex_unlock(&ftrace_lock); | |
6448 | } | |
6449 | ||
345ddcc8 | 6450 | static void ftrace_pid_reset(struct trace_array *tr) |
df4fc315 | 6451 | { |
756d17ee | 6452 | mutex_lock(&ftrace_lock); |
345ddcc8 | 6453 | clear_ftrace_pids(tr); |
978f3a45 | 6454 | |
756d17ee | 6455 | ftrace_update_pid_func(); |
e1effa01 | 6456 | ftrace_startup_all(0); |
756d17ee | 6457 | |
6458 | mutex_unlock(&ftrace_lock); | |
756d17ee | 6459 | } |
6460 | ||
345ddcc8 SRRH |
6461 | /* Greater than any max PID */ |
6462 | #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) | |
df4fc315 | 6463 | |
756d17ee | 6464 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
345ddcc8 | 6465 | __acquires(RCU) |
756d17ee | 6466 | { |
345ddcc8 SRRH |
6467 | struct trace_pid_list *pid_list; |
6468 | struct trace_array *tr = m->private; | |
6469 | ||
756d17ee | 6470 | mutex_lock(&ftrace_lock); |
345ddcc8 SRRH |
6471 | rcu_read_lock_sched(); |
6472 | ||
6473 | pid_list = rcu_dereference_sched(tr->function_pids); | |
756d17ee | 6474 | |
345ddcc8 SRRH |
6475 | if (!pid_list) |
6476 | return !(*pos) ? FTRACE_NO_PIDS : NULL; | |
756d17ee | 6477 | |
345ddcc8 | 6478 | return trace_pid_start(pid_list, pos); |
756d17ee | 6479 | } |
6480 | ||
6481 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) | |
6482 | { | |
345ddcc8 SRRH |
6483 | struct trace_array *tr = m->private; |
6484 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); | |
6485 | ||
6486 | if (v == FTRACE_NO_PIDS) | |
756d17ee | 6487 | return NULL; |
6488 | ||
345ddcc8 | 6489 | return trace_pid_next(pid_list, v, pos); |
756d17ee | 6490 | } |
6491 | ||
6492 | static void fpid_stop(struct seq_file *m, void *p) | |
345ddcc8 | 6493 | __releases(RCU) |
756d17ee | 6494 | { |
345ddcc8 | 6495 | rcu_read_unlock_sched(); |
756d17ee | 6496 | mutex_unlock(&ftrace_lock); |
6497 | } | |
6498 | ||
6499 | static int fpid_show(struct seq_file *m, void *v) | |
6500 | { | |
345ddcc8 | 6501 | if (v == FTRACE_NO_PIDS) { |
fa6f0cc7 | 6502 | seq_puts(m, "no pid\n"); |
756d17ee | 6503 | return 0; |
6504 | } | |
6505 | ||
345ddcc8 | 6506 | return trace_pid_show(m, v); |
756d17ee | 6507 | } |
6508 | ||
6509 | static const struct seq_operations ftrace_pid_sops = { | |
6510 | .start = fpid_start, | |
6511 | .next = fpid_next, | |
6512 | .stop = fpid_stop, | |
6513 | .show = fpid_show, | |
6514 | }; | |
6515 | ||
6516 | static int | |
6517 | ftrace_pid_open(struct inode *inode, struct file *file) | |
6518 | { | |
345ddcc8 SRRH |
6519 | struct trace_array *tr = inode->i_private; |
6520 | struct seq_file *m; | |
756d17ee | 6521 | int ret = 0; |
6522 | ||
345ddcc8 SRRH |
6523 | if (trace_array_get(tr) < 0) |
6524 | return -ENODEV; | |
6525 | ||
756d17ee | 6526 | if ((file->f_mode & FMODE_WRITE) && |
6527 | (file->f_flags & O_TRUNC)) | |
345ddcc8 | 6528 | ftrace_pid_reset(tr); |
756d17ee | 6529 | |
345ddcc8 SRRH |
6530 | ret = seq_open(file, &ftrace_pid_sops); |
6531 | if (ret < 0) { | |
6532 | trace_array_put(tr); | |
6533 | } else { | |
6534 | m = file->private_data; | |
6535 | /* copy tr over to seq ops */ | |
6536 | m->private = tr; | |
6537 | } | |
756d17ee | 6538 | |
6539 | return ret; | |
6540 | } | |
6541 | ||
345ddcc8 SRRH |
6542 | static void ignore_task_cpu(void *data) |
6543 | { | |
6544 | struct trace_array *tr = data; | |
6545 | struct trace_pid_list *pid_list; | |
6546 | ||
6547 | /* | |
6548 | * This function is called by on_each_cpu() while the | |
6549 | * event_mutex is held. | |
6550 | */ | |
6551 | pid_list = rcu_dereference_protected(tr->function_pids, | |
6552 | mutex_is_locked(&ftrace_lock)); | |
6553 | ||
6554 | this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, | |
6555 | trace_ignore_this_task(pid_list, current)); | |
6556 | } | |
6557 | ||
df4fc315 SR |
6558 | static ssize_t |
6559 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | |
6560 | size_t cnt, loff_t *ppos) | |
6561 | { | |
345ddcc8 SRRH |
6562 | struct seq_file *m = filp->private_data; |
6563 | struct trace_array *tr = m->private; | |
6564 | struct trace_pid_list *filtered_pids = NULL; | |
6565 | struct trace_pid_list *pid_list; | |
6566 | ssize_t ret; | |
df4fc315 | 6567 | |
345ddcc8 SRRH |
6568 | if (!cnt) |
6569 | return 0; | |
6570 | ||
6571 | mutex_lock(&ftrace_lock); | |
6572 | ||
6573 | filtered_pids = rcu_dereference_protected(tr->function_pids, | |
6574 | lockdep_is_held(&ftrace_lock)); | |
6575 | ||
6576 | ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); | |
6577 | if (ret < 0) | |
6578 | goto out; | |
df4fc315 | 6579 | |
345ddcc8 | 6580 | rcu_assign_pointer(tr->function_pids, pid_list); |
df4fc315 | 6581 | |
345ddcc8 SRRH |
6582 | if (filtered_pids) { |
6583 | synchronize_sched(); | |
6584 | trace_free_pid_list(filtered_pids); | |
6585 | } else if (pid_list) { | |
6586 | /* Register a probe to set whether to ignore the tracing of a task */ | |
6587 | register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); | |
6588 | } | |
df4fc315 | 6589 | |
756d17ee | 6590 | /* |
345ddcc8 SRRH |
6591 | * Ignoring of pids is done at task switch. But we have to |
6592 | * check for those tasks that are currently running. | |
6593 | * Always do this in case a pid was appended or removed. | |
756d17ee | 6594 | */ |
345ddcc8 | 6595 | on_each_cpu(ignore_task_cpu, tr, 1); |
756d17ee | 6596 | |
345ddcc8 SRRH |
6597 | ftrace_update_pid_func(); |
6598 | ftrace_startup_all(0); | |
6599 | out: | |
6600 | mutex_unlock(&ftrace_lock); | |
df4fc315 | 6601 | |
345ddcc8 SRRH |
6602 | if (ret > 0) |
6603 | *ppos += ret; | |
df4fc315 | 6604 | |
345ddcc8 | 6605 | return ret; |
756d17ee | 6606 | } |
df4fc315 | 6607 | |
756d17ee | 6608 | static int |
6609 | ftrace_pid_release(struct inode *inode, struct file *file) | |
6610 | { | |
345ddcc8 | 6611 | struct trace_array *tr = inode->i_private; |
df4fc315 | 6612 | |
345ddcc8 SRRH |
6613 | trace_array_put(tr); |
6614 | ||
6615 | return seq_release(inode, file); | |
df4fc315 SR |
6616 | } |
6617 | ||
5e2336a0 | 6618 | static const struct file_operations ftrace_pid_fops = { |
756d17ee | 6619 | .open = ftrace_pid_open, |
6620 | .write = ftrace_pid_write, | |
6621 | .read = seq_read, | |
098c879e | 6622 | .llseek = tracing_lseek, |
756d17ee | 6623 | .release = ftrace_pid_release, |
df4fc315 SR |
6624 | }; |
6625 | ||
345ddcc8 | 6626 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
df4fc315 | 6627 | { |
5452af66 | 6628 | trace_create_file("set_ftrace_pid", 0644, d_tracer, |
345ddcc8 | 6629 | tr, &ftrace_pid_fops); |
df4fc315 | 6630 | } |
df4fc315 | 6631 | |
501c2375 SRRH |
6632 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
6633 | struct dentry *d_tracer) | |
6634 | { | |
6635 | /* Only the top level directory has the dyn_tracefs and profile */ | |
6636 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); | |
6637 | ||
6638 | ftrace_init_dyn_tracefs(d_tracer); | |
6639 | ftrace_profile_tracefs(d_tracer); | |
6640 | } | |
6641 | ||
a2bb6a3d | 6642 | /** |
81adbdc0 | 6643 | * ftrace_kill - kill ftrace |
a2bb6a3d SR |
6644 | * |
6645 | * This function should be used by panic code. It stops ftrace | |
6646 | * but in a not so nice way. If you need to simply kill ftrace | |
6647 | * from a non-atomic section, use ftrace_kill. | |
6648 | */ | |
81adbdc0 | 6649 | void ftrace_kill(void) |
a2bb6a3d SR |
6650 | { |
6651 | ftrace_disabled = 1; | |
6652 | ftrace_enabled = 0; | |
5ccba64a | 6653 | ftrace_trace_function = ftrace_stub; |
a2bb6a3d SR |
6654 | } |
6655 | ||
e0a413f6 SR |
6656 | /** |
6657 | * Test if ftrace is dead or not. | |
6658 | */ | |
6659 | int ftrace_is_dead(void) | |
6660 | { | |
6661 | return ftrace_disabled; | |
6662 | } | |
6663 | ||
16444a8a | 6664 | /** |
3d083395 SR |
6665 | * register_ftrace_function - register a function for profiling |
6666 | * @ops - ops structure that holds the function for profiling. | |
16444a8a | 6667 | * |
3d083395 SR |
6668 | * Register a function to be called by all functions in the |
6669 | * kernel. | |
6670 | * | |
6671 | * Note: @ops->func and all the functions it calls must be labeled | |
6672 | * with "notrace", otherwise it will go into a | |
6673 | * recursive loop. | |
16444a8a | 6674 | */ |
3d083395 | 6675 | int register_ftrace_function(struct ftrace_ops *ops) |
16444a8a | 6676 | { |
45a4a237 | 6677 | int ret = -1; |
4eebcc81 | 6678 | |
f04f24fb MH |
6679 | ftrace_ops_init(ops); |
6680 | ||
e6ea44e9 | 6681 | mutex_lock(&ftrace_lock); |
e7d3737e | 6682 | |
8a56d776 | 6683 | ret = ftrace_startup(ops, 0); |
b848914c | 6684 | |
e6ea44e9 | 6685 | mutex_unlock(&ftrace_lock); |
8d240dd8 | 6686 | |
b0fc494f | 6687 | return ret; |
3d083395 | 6688 | } |
cdbe61bf | 6689 | EXPORT_SYMBOL_GPL(register_ftrace_function); |
3d083395 SR |
6690 | |
6691 | /** | |
32632920 | 6692 | * unregister_ftrace_function - unregister a function for profiling. |
3d083395 SR |
6693 | * @ops - ops structure that holds the function to unregister |
6694 | * | |
6695 | * Unregister a function that was added to be called by ftrace profiling. | |
6696 | */ | |
6697 | int unregister_ftrace_function(struct ftrace_ops *ops) | |
6698 | { | |
6699 | int ret; | |
6700 | ||
e6ea44e9 | 6701 | mutex_lock(&ftrace_lock); |
8a56d776 | 6702 | ret = ftrace_shutdown(ops, 0); |
e6ea44e9 | 6703 | mutex_unlock(&ftrace_lock); |
b0fc494f SR |
6704 | |
6705 | return ret; | |
6706 | } | |
cdbe61bf | 6707 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
b0fc494f | 6708 | |
e309b41d | 6709 | int |
b0fc494f | 6710 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
8d65af78 | 6711 | void __user *buffer, size_t *lenp, |
b0fc494f SR |
6712 | loff_t *ppos) |
6713 | { | |
45a4a237 | 6714 | int ret = -ENODEV; |
4eebcc81 | 6715 | |
e6ea44e9 | 6716 | mutex_lock(&ftrace_lock); |
b0fc494f | 6717 | |
45a4a237 SR |
6718 | if (unlikely(ftrace_disabled)) |
6719 | goto out; | |
6720 | ||
6721 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
b0fc494f | 6722 | |
a32c7765 | 6723 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
b0fc494f SR |
6724 | goto out; |
6725 | ||
a32c7765 | 6726 | last_ftrace_enabled = !!ftrace_enabled; |
b0fc494f SR |
6727 | |
6728 | if (ftrace_enabled) { | |
6729 | ||
b0fc494f | 6730 | /* we are starting ftrace again */ |
f86f4180 CZ |
6731 | if (rcu_dereference_protected(ftrace_ops_list, |
6732 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) | |
5000c418 | 6733 | update_ftrace_function(); |
b0fc494f | 6734 | |
524a3868 SRRH |
6735 | ftrace_startup_sysctl(); |
6736 | ||
b0fc494f SR |
6737 | } else { |
6738 | /* stopping ftrace calls (just send to ftrace_stub) */ | |
6739 | ftrace_trace_function = ftrace_stub; | |
6740 | ||
6741 | ftrace_shutdown_sysctl(); | |
6742 | } | |
6743 | ||
6744 | out: | |
e6ea44e9 | 6745 | mutex_unlock(&ftrace_lock); |
3d083395 | 6746 | return ret; |
16444a8a | 6747 | } |
f17845e5 | 6748 | |
fb52607a | 6749 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
e7d3737e | 6750 | |
5f151b24 SRRH |
6751 | static struct ftrace_ops graph_ops = { |
6752 | .func = ftrace_stub, | |
6753 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | |
6754 | FTRACE_OPS_FL_INITIALIZED | | |
e3eea140 | 6755 | FTRACE_OPS_FL_PID | |
5f151b24 SRRH |
6756 | FTRACE_OPS_FL_STUB, |
6757 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | |
6758 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | |
aec0be2d | 6759 | /* trampoline_size is only needed for dynamically allocated tramps */ |
5f151b24 SRRH |
6760 | #endif |
6761 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | |
6762 | }; | |
6763 | ||
55577204 SRRH |
6764 | void ftrace_graph_sleep_time_control(bool enable) |
6765 | { | |
6766 | fgraph_sleep_time = enable; | |
6767 | } | |
6768 | ||
6769 | void ftrace_graph_graph_time_control(bool enable) | |
6770 | { | |
6771 | fgraph_graph_time = enable; | |
6772 | } | |
6773 | ||
e49dc19c SR |
6774 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
6775 | { | |
6776 | return 0; | |
6777 | } | |
6778 | ||
287b6e68 FW |
6779 | /* The callbacks that hook a function */ |
6780 | trace_func_graph_ret_t ftrace_graph_return = | |
6781 | (trace_func_graph_ret_t)ftrace_stub; | |
e49dc19c | 6782 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
23a8e844 | 6783 | static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; |
f201ae23 FW |
6784 | |
6785 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | |
6786 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |
6787 | { | |
6788 | int i; | |
6789 | int ret = 0; | |
f201ae23 FW |
6790 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
6791 | struct task_struct *g, *t; | |
6792 | ||
6793 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | |
6da2ec56 KC |
6794 | ret_stack_list[i] = |
6795 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | |
6796 | sizeof(struct ftrace_ret_stack), | |
6797 | GFP_KERNEL); | |
f201ae23 FW |
6798 | if (!ret_stack_list[i]) { |
6799 | start = 0; | |
6800 | end = i; | |
6801 | ret = -ENOMEM; | |
6802 | goto free; | |
6803 | } | |
6804 | } | |
6805 | ||
6112a300 | 6806 | read_lock(&tasklist_lock); |
f201ae23 FW |
6807 | do_each_thread(g, t) { |
6808 | if (start == end) { | |
6809 | ret = -EAGAIN; | |
6810 | goto unlock; | |
6811 | } | |
6812 | ||
6813 | if (t->ret_stack == NULL) { | |
380c4b14 | 6814 | atomic_set(&t->tracing_graph_pause, 0); |
f201ae23 | 6815 | atomic_set(&t->trace_overrun, 0); |
26c01624 | 6816 | t->curr_ret_stack = -1; |
39eb456d | 6817 | t->curr_ret_depth = -1; |
26c01624 SR |
6818 | /* Make sure the tasks see the -1 first: */ |
6819 | smp_wmb(); | |
6820 | t->ret_stack = ret_stack_list[start++]; | |
f201ae23 FW |
6821 | } |
6822 | } while_each_thread(g, t); | |
6823 | ||
6824 | unlock: | |
6112a300 | 6825 | read_unlock(&tasklist_lock); |
f201ae23 FW |
6826 | free: |
6827 | for (i = start; i < end; i++) | |
6828 | kfree(ret_stack_list[i]); | |
6829 | return ret; | |
6830 | } | |
6831 | ||
8aef2d28 | 6832 | static void |
c73464b1 | 6833 | ftrace_graph_probe_sched_switch(void *ignore, bool preempt, |
38516ab5 | 6834 | struct task_struct *prev, struct task_struct *next) |
8aef2d28 SR |
6835 | { |
6836 | unsigned long long timestamp; | |
6837 | int index; | |
6838 | ||
be6f164a SR |
6839 | /* |
6840 | * Does the user want to count the time a function was asleep. | |
6841 | * If so, do not update the time stamps. | |
6842 | */ | |
55577204 | 6843 | if (fgraph_sleep_time) |
be6f164a SR |
6844 | return; |
6845 | ||
8aef2d28 SR |
6846 | timestamp = trace_clock_local(); |
6847 | ||
6848 | prev->ftrace_timestamp = timestamp; | |
6849 | ||
6850 | /* only process tasks that we timestamped */ | |
6851 | if (!next->ftrace_timestamp) | |
6852 | return; | |
6853 | ||
6854 | /* | |
6855 | * Update all the counters in next to make up for the | |
6856 | * time next was sleeping. | |
6857 | */ | |
6858 | timestamp -= next->ftrace_timestamp; | |
6859 | ||
6860 | for (index = next->curr_ret_stack; index >= 0; index--) | |
6861 | next->ret_stack[index].calltime += timestamp; | |
6862 | } | |
6863 | ||
f201ae23 | 6864 | /* Allocate a return stack for each task */ |
fb52607a | 6865 | static int start_graph_tracing(void) |
f201ae23 FW |
6866 | { |
6867 | struct ftrace_ret_stack **ret_stack_list; | |
5b058bcd | 6868 | int ret, cpu; |
f201ae23 | 6869 | |
6da2ec56 KC |
6870 | ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, |
6871 | sizeof(struct ftrace_ret_stack *), | |
6872 | GFP_KERNEL); | |
f201ae23 FW |
6873 | |
6874 | if (!ret_stack_list) | |
6875 | return -ENOMEM; | |
6876 | ||
5b058bcd | 6877 | /* The cpu_boot init_task->ret_stack will never be freed */ |
179c498a SR |
6878 | for_each_online_cpu(cpu) { |
6879 | if (!idle_task(cpu)->ret_stack) | |
868baf07 | 6880 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
179c498a | 6881 | } |
5b058bcd | 6882 | |
f201ae23 FW |
6883 | do { |
6884 | ret = alloc_retstack_tasklist(ret_stack_list); | |
6885 | } while (ret == -EAGAIN); | |
6886 | ||
8aef2d28 | 6887 | if (!ret) { |
38516ab5 | 6888 | ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
8aef2d28 SR |
6889 | if (ret) |
6890 | pr_info("ftrace_graph: Couldn't activate tracepoint" | |
6891 | " probe to kernel_sched_switch\n"); | |
6892 | } | |
6893 | ||
f201ae23 FW |
6894 | kfree(ret_stack_list); |
6895 | return ret; | |
6896 | } | |
6897 | ||
4a2b8dda FW |
6898 | /* |
6899 | * Hibernation protection. | |
6900 | * The state of the current task is too much unstable during | |
6901 | * suspend/restore to disk. We want to protect against that. | |
6902 | */ | |
6903 | static int | |
6904 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, | |
6905 | void *unused) | |
6906 | { | |
6907 | switch (state) { | |
6908 | case PM_HIBERNATION_PREPARE: | |
6909 | pause_graph_tracing(); | |
6910 | break; | |
6911 | ||
6912 | case PM_POST_HIBERNATION: | |
6913 | unpause_graph_tracing(); | |
6914 | break; | |
6915 | } | |
6916 | return NOTIFY_DONE; | |
6917 | } | |
6918 | ||
23a8e844 SRRH |
6919 | static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) |
6920 | { | |
6921 | if (!ftrace_ops_test(&global_ops, trace->func, NULL)) | |
6922 | return 0; | |
6923 | return __ftrace_graph_entry(trace); | |
6924 | } | |
6925 | ||
6926 | /* | |
6927 | * The function graph tracer should only trace the functions defined | |
6928 | * by set_ftrace_filter and set_ftrace_notrace. If another function | |
6929 | * tracer ops is registered, the graph tracer requires testing the | |
6930 | * function against the global ops, and not just trace any function | |
6931 | * that any ftrace_ops registered. | |
6932 | */ | |
6933 | static void update_function_graph_func(void) | |
6934 | { | |
5f151b24 SRRH |
6935 | struct ftrace_ops *op; |
6936 | bool do_test = false; | |
6937 | ||
6938 | /* | |
6939 | * The graph and global ops share the same set of functions | |
6940 | * to test. If any other ops is on the list, then | |
6941 | * the graph tracing needs to test if its the function | |
6942 | * it should call. | |
6943 | */ | |
6944 | do_for_each_ftrace_op(op, ftrace_ops_list) { | |
6945 | if (op != &global_ops && op != &graph_ops && | |
6946 | op != &ftrace_list_end) { | |
6947 | do_test = true; | |
6948 | /* in double loop, break out with goto */ | |
6949 | goto out; | |
6950 | } | |
6951 | } while_for_each_ftrace_op(op); | |
6952 | out: | |
6953 | if (do_test) | |
23a8e844 | 6954 | ftrace_graph_entry = ftrace_graph_entry_test; |
5f151b24 SRRH |
6955 | else |
6956 | ftrace_graph_entry = __ftrace_graph_entry; | |
23a8e844 SRRH |
6957 | } |
6958 | ||
8275f69f MK |
6959 | static struct notifier_block ftrace_suspend_notifier = { |
6960 | .notifier_call = ftrace_suspend_notifier_call, | |
6961 | }; | |
6962 | ||
287b6e68 FW |
6963 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
6964 | trace_func_graph_ent_t entryfunc) | |
15e6cb36 | 6965 | { |
e7d3737e FW |
6966 | int ret = 0; |
6967 | ||
e6ea44e9 | 6968 | mutex_lock(&ftrace_lock); |
e7d3737e | 6969 | |
05ce5818 | 6970 | /* we currently allow only one tracer registered at a time */ |
597af815 | 6971 | if (ftrace_graph_active) { |
05ce5818 SR |
6972 | ret = -EBUSY; |
6973 | goto out; | |
6974 | } | |
6975 | ||
4a2b8dda FW |
6976 | register_pm_notifier(&ftrace_suspend_notifier); |
6977 | ||
597af815 | 6978 | ftrace_graph_active++; |
fb52607a | 6979 | ret = start_graph_tracing(); |
f201ae23 | 6980 | if (ret) { |
597af815 | 6981 | ftrace_graph_active--; |
f201ae23 FW |
6982 | goto out; |
6983 | } | |
e53a6319 | 6984 | |
287b6e68 | 6985 | ftrace_graph_return = retfunc; |
23a8e844 SRRH |
6986 | |
6987 | /* | |
6988 | * Update the indirect function to the entryfunc, and the | |
6989 | * function that gets called to the entry_test first. Then | |
6990 | * call the update fgraph entry function to determine if | |
6991 | * the entryfunc should be called directly or not. | |
6992 | */ | |
6993 | __ftrace_graph_entry = entryfunc; | |
6994 | ftrace_graph_entry = ftrace_graph_entry_test; | |
6995 | update_function_graph_func(); | |
e53a6319 | 6996 | |
5f151b24 | 6997 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
e7d3737e | 6998 | out: |
e6ea44e9 | 6999 | mutex_unlock(&ftrace_lock); |
e7d3737e | 7000 | return ret; |
15e6cb36 FW |
7001 | } |
7002 | ||
fb52607a | 7003 | void unregister_ftrace_graph(void) |
15e6cb36 | 7004 | { |
e6ea44e9 | 7005 | mutex_lock(&ftrace_lock); |
e7d3737e | 7006 | |
597af815 | 7007 | if (unlikely(!ftrace_graph_active)) |
2aad1b76 SR |
7008 | goto out; |
7009 | ||
597af815 | 7010 | ftrace_graph_active--; |
287b6e68 | 7011 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
e49dc19c | 7012 | ftrace_graph_entry = ftrace_graph_entry_stub; |
23a8e844 | 7013 | __ftrace_graph_entry = ftrace_graph_entry_stub; |
5f151b24 | 7014 | ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); |
4a2b8dda | 7015 | unregister_pm_notifier(&ftrace_suspend_notifier); |
38516ab5 | 7016 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
e7d3737e | 7017 | |
2aad1b76 | 7018 | out: |
e6ea44e9 | 7019 | mutex_unlock(&ftrace_lock); |
15e6cb36 | 7020 | } |
f201ae23 | 7021 | |
868baf07 SR |
7022 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); |
7023 | ||
7024 | static void | |
7025 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | |
7026 | { | |
7027 | atomic_set(&t->tracing_graph_pause, 0); | |
7028 | atomic_set(&t->trace_overrun, 0); | |
7029 | t->ftrace_timestamp = 0; | |
25985edc | 7030 | /* make curr_ret_stack visible before we add the ret_stack */ |
868baf07 SR |
7031 | smp_wmb(); |
7032 | t->ret_stack = ret_stack; | |
7033 | } | |
7034 | ||
7035 | /* | |
7036 | * Allocate a return stack for the idle task. May be the first | |
7037 | * time through, or it may be done by CPU hotplug online. | |
7038 | */ | |
7039 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | |
7040 | { | |
7041 | t->curr_ret_stack = -1; | |
39eb456d | 7042 | t->curr_ret_depth = -1; |
868baf07 SR |
7043 | /* |
7044 | * The idle task has no parent, it either has its own | |
7045 | * stack or no stack at all. | |
7046 | */ | |
7047 | if (t->ret_stack) | |
7048 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | |
7049 | ||
7050 | if (ftrace_graph_active) { | |
7051 | struct ftrace_ret_stack *ret_stack; | |
7052 | ||
7053 | ret_stack = per_cpu(idle_ret_stack, cpu); | |
7054 | if (!ret_stack) { | |
6da2ec56 KC |
7055 | ret_stack = |
7056 | kmalloc_array(FTRACE_RETFUNC_DEPTH, | |
7057 | sizeof(struct ftrace_ret_stack), | |
7058 | GFP_KERNEL); | |
868baf07 SR |
7059 | if (!ret_stack) |
7060 | return; | |
7061 | per_cpu(idle_ret_stack, cpu) = ret_stack; | |
7062 | } | |
7063 | graph_init_task(t, ret_stack); | |
7064 | } | |
7065 | } | |
7066 | ||
f201ae23 | 7067 | /* Allocate a return stack for newly created task */ |
fb52607a | 7068 | void ftrace_graph_init_task(struct task_struct *t) |
f201ae23 | 7069 | { |
84047e36 SR |
7070 | /* Make sure we do not use the parent ret_stack */ |
7071 | t->ret_stack = NULL; | |
ea14eb71 | 7072 | t->curr_ret_stack = -1; |
39eb456d | 7073 | t->curr_ret_depth = -1; |
84047e36 | 7074 | |
597af815 | 7075 | if (ftrace_graph_active) { |
82310a32 SR |
7076 | struct ftrace_ret_stack *ret_stack; |
7077 | ||
6da2ec56 KC |
7078 | ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, |
7079 | sizeof(struct ftrace_ret_stack), | |
7080 | GFP_KERNEL); | |
82310a32 | 7081 | if (!ret_stack) |
f201ae23 | 7082 | return; |
868baf07 | 7083 | graph_init_task(t, ret_stack); |
84047e36 | 7084 | } |
f201ae23 FW |
7085 | } |
7086 | ||
fb52607a | 7087 | void ftrace_graph_exit_task(struct task_struct *t) |
f201ae23 | 7088 | { |
eae849ca FW |
7089 | struct ftrace_ret_stack *ret_stack = t->ret_stack; |
7090 | ||
f201ae23 | 7091 | t->ret_stack = NULL; |
eae849ca FW |
7092 | /* NULL must become visible to IRQs before we free it: */ |
7093 | barrier(); | |
7094 | ||
7095 | kfree(ret_stack); | |
f201ae23 | 7096 | } |
15e6cb36 | 7097 | #endif |