]>
Commit | Line | Data |
---|---|---|
9849ed4d MF |
1 | /* |
2 | * Ftrace header. For implementation details beyond the random comments | |
3 | * scattered below, see: Documentation/trace/ftrace-design.txt | |
4 | */ | |
5 | ||
16444a8a ACM |
6 | #ifndef _LINUX_FTRACE_H |
7 | #define _LINUX_FTRACE_H | |
8 | ||
0012693a | 9 | #include <linux/trace_clock.h> |
5601020f | 10 | #include <linux/kallsyms.h> |
0012693a | 11 | #include <linux/linkage.h> |
ea4e2bc4 | 12 | #include <linux/bitops.h> |
0012693a FW |
13 | #include <linux/module.h> |
14 | #include <linux/ktime.h> | |
21a8c466 | 15 | #include <linux/sched.h> |
0012693a FW |
16 | #include <linux/types.h> |
17 | #include <linux/init.h> | |
18 | #include <linux/fs.h> | |
16444a8a | 19 | |
c79a61f5 UKK |
20 | #include <asm/ftrace.h> |
21 | ||
04da85b8 SR |
22 | struct ftrace_hash; |
23 | ||
606576ce | 24 | #ifdef CONFIG_FUNCTION_TRACER |
3e1932ad | 25 | |
b0fc494f SR |
26 | extern int ftrace_enabled; |
27 | extern int | |
28 | ftrace_enable_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 29 | void __user *buffer, size_t *lenp, |
b0fc494f SR |
30 | loff_t *ppos); |
31 | ||
16444a8a ACM |
32 | typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); |
33 | ||
b848914c SR |
34 | enum { |
35 | FTRACE_OPS_FL_ENABLED = 1 << 0, | |
36 | FTRACE_OPS_FL_GLOBAL = 1 << 1, | |
cdbe61bf | 37 | FTRACE_OPS_FL_DYNAMIC = 1 << 2, |
b848914c SR |
38 | }; |
39 | ||
16444a8a | 40 | struct ftrace_ops { |
f45948e8 SR |
41 | ftrace_func_t func; |
42 | struct ftrace_ops *next; | |
b848914c | 43 | unsigned long flags; |
f45948e8 SR |
44 | #ifdef CONFIG_DYNAMIC_FTRACE |
45 | struct ftrace_hash *notrace_hash; | |
46 | struct ftrace_hash *filter_hash; | |
47 | #endif | |
16444a8a ACM |
48 | }; |
49 | ||
60a7ecf4 SR |
50 | extern int function_trace_stop; |
51 | ||
e7d3737e FW |
52 | /* |
53 | * Type of the current tracing. | |
54 | */ | |
55 | enum ftrace_tracing_type_t { | |
56 | FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ | |
57 | FTRACE_TYPE_RETURN, /* Hook the return of the function */ | |
58 | }; | |
59 | ||
60 | /* Current tracing type, default is FTRACE_TYPE_ENTER */ | |
61 | extern enum ftrace_tracing_type_t ftrace_tracing_type; | |
62 | ||
60a7ecf4 SR |
63 | /** |
64 | * ftrace_stop - stop function tracer. | |
65 | * | |
66 | * A quick way to stop the function tracer. Note this an on off switch, | |
67 | * it is not something that is recursive like preempt_disable. | |
68 | * This does not disable the calling of mcount, it only stops the | |
69 | * calling of functions from mcount. | |
70 | */ | |
71 | static inline void ftrace_stop(void) | |
72 | { | |
73 | function_trace_stop = 1; | |
74 | } | |
75 | ||
76 | /** | |
77 | * ftrace_start - start the function tracer. | |
78 | * | |
79 | * This function is the inverse of ftrace_stop. This does not enable | |
80 | * the function tracing if the function tracer is disabled. This only | |
81 | * sets the function tracer flag to continue calling the functions | |
82 | * from mcount. | |
83 | */ | |
84 | static inline void ftrace_start(void) | |
85 | { | |
86 | function_trace_stop = 0; | |
87 | } | |
88 | ||
16444a8a ACM |
89 | /* |
90 | * The ftrace_ops must be a static and should also | |
91 | * be read_mostly. These functions do modify read_mostly variables | |
92 | * so use them sparely. Never free an ftrace_op or modify the | |
93 | * next pointer after it has been registered. Even after unregistering | |
94 | * it, the next pointer may still be used internally. | |
95 | */ | |
96 | int register_ftrace_function(struct ftrace_ops *ops); | |
97 | int unregister_ftrace_function(struct ftrace_ops *ops); | |
98 | void clear_ftrace_function(void); | |
99 | ||
100 | extern void ftrace_stub(unsigned long a0, unsigned long a1); | |
16444a8a | 101 | |
606576ce | 102 | #else /* !CONFIG_FUNCTION_TRACER */ |
4dbf6bc2 SR |
103 | /* |
104 | * (un)register_ftrace_function must be a macro since the ops parameter | |
105 | * must not be evaluated. | |
106 | */ | |
107 | #define register_ftrace_function(ops) ({ 0; }) | |
108 | #define unregister_ftrace_function(ops) ({ 0; }) | |
109 | static inline void clear_ftrace_function(void) { } | |
81adbdc0 | 110 | static inline void ftrace_kill(void) { } |
60a7ecf4 SR |
111 | static inline void ftrace_stop(void) { } |
112 | static inline void ftrace_start(void) { } | |
606576ce | 113 | #endif /* CONFIG_FUNCTION_TRACER */ |
352ad25a | 114 | |
f38f1d2a SR |
115 | #ifdef CONFIG_STACK_TRACER |
116 | extern int stack_tracer_enabled; | |
117 | int | |
118 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 119 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
120 | loff_t *ppos); |
121 | #endif | |
122 | ||
f6180773 SR |
123 | struct ftrace_func_command { |
124 | struct list_head list; | |
125 | char *name; | |
43dd61c9 SR |
126 | int (*func)(struct ftrace_hash *hash, |
127 | char *func, char *cmd, | |
f6180773 SR |
128 | char *params, int enable); |
129 | }; | |
130 | ||
3d083395 | 131 | #ifdef CONFIG_DYNAMIC_FTRACE |
31e88909 | 132 | |
000ab691 SR |
133 | int ftrace_arch_code_modify_prepare(void); |
134 | int ftrace_arch_code_modify_post_process(void); | |
135 | ||
809dcf29 SR |
136 | struct seq_file; |
137 | ||
b6887d79 | 138 | struct ftrace_probe_ops { |
59df055f SR |
139 | void (*func)(unsigned long ip, |
140 | unsigned long parent_ip, | |
141 | void **data); | |
142 | int (*callback)(unsigned long ip, void **data); | |
143 | void (*free)(void **data); | |
809dcf29 SR |
144 | int (*print)(struct seq_file *m, |
145 | unsigned long ip, | |
b6887d79 | 146 | struct ftrace_probe_ops *ops, |
809dcf29 | 147 | void *data); |
59df055f SR |
148 | }; |
149 | ||
150 | extern int | |
b6887d79 | 151 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
152 | void *data); |
153 | extern void | |
b6887d79 | 154 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
59df055f SR |
155 | void *data); |
156 | extern void | |
b6887d79 SR |
157 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); |
158 | extern void unregister_ftrace_function_probe_all(char *glob); | |
59df055f | 159 | |
2cfa1978 MH |
160 | extern int ftrace_text_reserved(void *start, void *end); |
161 | ||
3c1720f0 | 162 | enum { |
ed926f9b SR |
163 | FTRACE_FL_ENABLED = (1 << 30), |
164 | FTRACE_FL_FREE = (1 << 31), | |
3c1720f0 SR |
165 | }; |
166 | ||
ed926f9b SR |
167 | #define FTRACE_FL_MASK (0x3UL << 30) |
168 | #define FTRACE_REF_MAX ((1 << 30) - 1) | |
169 | ||
3d083395 | 170 | struct dyn_ftrace { |
ee000b7f LJ |
171 | union { |
172 | unsigned long ip; /* address of mcount call-site */ | |
173 | struct dyn_ftrace *freelist; | |
174 | }; | |
175 | union { | |
176 | unsigned long flags; | |
177 | struct dyn_ftrace *newlist; | |
178 | }; | |
179 | struct dyn_arch_ftrace arch; | |
3d083395 SR |
180 | }; |
181 | ||
e1c08bdd | 182 | int ftrace_force_update(void); |
936e074b SR |
183 | void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
184 | int len, int reset); | |
185 | void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, | |
186 | int len, int reset); | |
187 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset); | |
188 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); | |
e1c08bdd | 189 | |
f6180773 SR |
190 | int register_ftrace_command(struct ftrace_func_command *cmd); |
191 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | |
192 | ||
3d083395 | 193 | /* defined in arch */ |
3c1720f0 | 194 | extern int ftrace_ip_converted(unsigned long ip); |
d61f82d0 | 195 | extern int ftrace_dyn_arch_init(void *data); |
d61f82d0 SR |
196 | extern int ftrace_update_ftrace_func(ftrace_func_t func); |
197 | extern void ftrace_caller(void); | |
198 | extern void ftrace_call(void); | |
199 | extern void mcount_call(void); | |
f0001207 SL |
200 | |
201 | #ifndef FTRACE_ADDR | |
202 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | |
203 | #endif | |
fb52607a FW |
204 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
205 | extern void ftrace_graph_caller(void); | |
5a45cfe1 SR |
206 | extern int ftrace_enable_ftrace_graph_caller(void); |
207 | extern int ftrace_disable_ftrace_graph_caller(void); | |
208 | #else | |
209 | static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } | |
210 | static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |
e7d3737e | 211 | #endif |
ad90c0e3 | 212 | |
31e88909 | 213 | /** |
57794a9d | 214 | * ftrace_make_nop - convert code into nop |
31e88909 SR |
215 | * @mod: module structure if called by module load initialization |
216 | * @rec: the mcount call site record | |
217 | * @addr: the address that the call site should be calling | |
218 | * | |
219 | * This is a very sensitive operation and great care needs | |
220 | * to be taken by the arch. The operation should carefully | |
221 | * read the location, check to see if what is read is indeed | |
222 | * what we expect it to be, and then on success of the compare, | |
223 | * it should write to the location. | |
224 | * | |
225 | * The code segment at @rec->ip should be a caller to @addr | |
226 | * | |
227 | * Return must be: | |
228 | * 0 on success | |
229 | * -EFAULT on error reading the location | |
230 | * -EINVAL on a failed compare of the contents | |
231 | * -EPERM on error writing to the location | |
232 | * Any other value will be considered a failure. | |
233 | */ | |
234 | extern int ftrace_make_nop(struct module *mod, | |
235 | struct dyn_ftrace *rec, unsigned long addr); | |
a26a2a27 | 236 | |
593eb8a2 | 237 | /** |
31e88909 SR |
238 | * ftrace_make_call - convert a nop call site into a call to addr |
239 | * @rec: the mcount call site record | |
240 | * @addr: the address that the call site should call | |
593eb8a2 SR |
241 | * |
242 | * This is a very sensitive operation and great care needs | |
243 | * to be taken by the arch. The operation should carefully | |
244 | * read the location, check to see if what is read is indeed | |
245 | * what we expect it to be, and then on success of the compare, | |
246 | * it should write to the location. | |
247 | * | |
31e88909 SR |
248 | * The code segment at @rec->ip should be a nop |
249 | * | |
593eb8a2 SR |
250 | * Return must be: |
251 | * 0 on success | |
252 | * -EFAULT on error reading the location | |
253 | * -EINVAL on a failed compare of the contents | |
254 | * -EPERM on error writing to the location | |
255 | * Any other value will be considered a failure. | |
256 | */ | |
31e88909 SR |
257 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
258 | ||
31e88909 SR |
259 | /* May be defined in arch */ |
260 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | |
593eb8a2 | 261 | |
ecea656d AS |
262 | extern int skip_trace(unsigned long ip); |
263 | ||
c0719e5a SR |
264 | extern void ftrace_disable_daemon(void); |
265 | extern void ftrace_enable_daemon(void); | |
e1c08bdd | 266 | #else |
4dbf6bc2 SR |
267 | static inline int skip_trace(unsigned long ip) { return 0; } |
268 | static inline int ftrace_force_update(void) { return 0; } | |
269 | static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) | |
270 | { | |
271 | } | |
272 | static inline void ftrace_disable_daemon(void) { } | |
273 | static inline void ftrace_enable_daemon(void) { } | |
e7247a15 | 274 | static inline void ftrace_release_mod(struct module *mod) {} |
f6180773 SR |
275 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) |
276 | { | |
97d0bb8d | 277 | return -EINVAL; |
f6180773 SR |
278 | } |
279 | static inline int unregister_ftrace_command(char *cmd_name) | |
280 | { | |
97d0bb8d | 281 | return -EINVAL; |
f6180773 | 282 | } |
2cfa1978 MH |
283 | static inline int ftrace_text_reserved(void *start, void *end) |
284 | { | |
285 | return 0; | |
286 | } | |
ecea656d | 287 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
352ad25a | 288 | |
aeaee8a2 IM |
289 | /* totally disable ftrace - can not re-enable after this */ |
290 | void ftrace_kill(void); | |
291 | ||
f43fdad8 IM |
292 | static inline void tracer_disable(void) |
293 | { | |
606576ce | 294 | #ifdef CONFIG_FUNCTION_TRACER |
f43fdad8 IM |
295 | ftrace_enabled = 0; |
296 | #endif | |
297 | } | |
298 | ||
37002735 YH |
299 | /* |
300 | * Ftrace disable/restore without lock. Some synchronization mechanism | |
9bdeb7b5 | 301 | * must be used to prevent ftrace_enabled to be changed between |
37002735 YH |
302 | * disable/restore. |
303 | */ | |
9bdeb7b5 YH |
304 | static inline int __ftrace_enabled_save(void) |
305 | { | |
606576ce | 306 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 YH |
307 | int saved_ftrace_enabled = ftrace_enabled; |
308 | ftrace_enabled = 0; | |
309 | return saved_ftrace_enabled; | |
310 | #else | |
311 | return 0; | |
312 | #endif | |
313 | } | |
314 | ||
315 | static inline void __ftrace_enabled_restore(int enabled) | |
316 | { | |
606576ce | 317 | #ifdef CONFIG_FUNCTION_TRACER |
9bdeb7b5 YH |
318 | ftrace_enabled = enabled; |
319 | #endif | |
320 | } | |
321 | ||
c79a61f5 UKK |
322 | #ifndef HAVE_ARCH_CALLER_ADDR |
323 | # ifdef CONFIG_FRAME_POINTER | |
324 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | |
325 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) | |
326 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) | |
327 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) | |
328 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) | |
329 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) | |
330 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) | |
331 | # else | |
332 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | |
333 | # define CALLER_ADDR1 0UL | |
334 | # define CALLER_ADDR2 0UL | |
335 | # define CALLER_ADDR3 0UL | |
336 | # define CALLER_ADDR4 0UL | |
337 | # define CALLER_ADDR5 0UL | |
338 | # define CALLER_ADDR6 0UL | |
339 | # endif | |
340 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ | |
352ad25a | 341 | |
81d68a96 | 342 | #ifdef CONFIG_IRQSOFF_TRACER |
489f1396 IM |
343 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
344 | extern void time_hardirqs_off(unsigned long a0, unsigned long a1); | |
81d68a96 | 345 | #else |
4dbf6bc2 SR |
346 | static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } |
347 | static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } | |
81d68a96 SR |
348 | #endif |
349 | ||
6cd8a4bb | 350 | #ifdef CONFIG_PREEMPT_TRACER |
489f1396 IM |
351 | extern void trace_preempt_on(unsigned long a0, unsigned long a1); |
352 | extern void trace_preempt_off(unsigned long a0, unsigned long a1); | |
6cd8a4bb | 353 | #else |
4dbf6bc2 SR |
354 | static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } |
355 | static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } | |
6cd8a4bb SR |
356 | #endif |
357 | ||
68bf21aa SR |
358 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
359 | extern void ftrace_init(void); | |
360 | #else | |
361 | static inline void ftrace_init(void) { } | |
362 | #endif | |
363 | ||
287b6e68 FW |
364 | /* |
365 | * Structure that defines an entry function trace. | |
366 | */ | |
367 | struct ftrace_graph_ent { | |
368 | unsigned long func; /* Current function */ | |
369 | int depth; | |
370 | }; | |
dd0e545f | 371 | |
caf4b323 FW |
372 | /* |
373 | * Structure that defines a return function trace. | |
374 | */ | |
fb52607a | 375 | struct ftrace_graph_ret { |
caf4b323 FW |
376 | unsigned long func; /* Current function */ |
377 | unsigned long long calltime; | |
378 | unsigned long long rettime; | |
0231022c FW |
379 | /* Number of functions that overran the depth limit for current task */ |
380 | unsigned long overrun; | |
287b6e68 | 381 | int depth; |
caf4b323 FW |
382 | }; |
383 | ||
62b915f1 JO |
384 | /* Type of the callback handlers for tracing function graph*/ |
385 | typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ | |
386 | typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ | |
387 | ||
fb52607a | 388 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
8b96f011 | 389 | |
5ac9f622 | 390 | /* for init task */ |
f876d346 | 391 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, |
5ac9f622 | 392 | |
712406a6 SR |
393 | /* |
394 | * Stack of return addresses for functions | |
395 | * of a thread. | |
396 | * Used in struct thread_info | |
397 | */ | |
398 | struct ftrace_ret_stack { | |
399 | unsigned long ret; | |
400 | unsigned long func; | |
401 | unsigned long long calltime; | |
a2a16d6a | 402 | unsigned long long subtime; |
71e308a2 | 403 | unsigned long fp; |
712406a6 SR |
404 | }; |
405 | ||
406 | /* | |
407 | * Primary handler of a function return. | |
408 | * It relays on ftrace_return_to_handler. | |
409 | * Defined in entry_32/64.S | |
410 | */ | |
411 | extern void return_to_handler(void); | |
412 | ||
413 | extern int | |
71e308a2 SR |
414 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, |
415 | unsigned long frame_pointer); | |
712406a6 | 416 | |
8b96f011 FW |
417 | /* |
418 | * Sometimes we don't want to trace a function with the function | |
419 | * graph tracer but we want them to keep traced by the usual function | |
420 | * tracer if the function graph tracer is not configured. | |
421 | */ | |
422 | #define __notrace_funcgraph notrace | |
423 | ||
bcbc4f20 FW |
424 | /* |
425 | * We want to which function is an entrypoint of a hardirq. | |
426 | * That will help us to put a signal on output. | |
427 | */ | |
428 | #define __irq_entry __attribute__((__section__(".irqentry.text"))) | |
429 | ||
430 | /* Limits of hardirq entrypoints */ | |
431 | extern char __irqentry_text_start[]; | |
432 | extern char __irqentry_text_end[]; | |
433 | ||
f201ae23 FW |
434 | #define FTRACE_RETFUNC_DEPTH 50 |
435 | #define FTRACE_RETSTACK_ALLOC_SIZE 32 | |
287b6e68 FW |
436 | extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
437 | trace_func_graph_ent_t entryfunc); | |
438 | ||
14a866c5 SR |
439 | extern void ftrace_graph_stop(void); |
440 | ||
287b6e68 FW |
441 | /* The current handlers in use */ |
442 | extern trace_func_graph_ret_t ftrace_graph_return; | |
443 | extern trace_func_graph_ent_t ftrace_graph_entry; | |
caf4b323 | 444 | |
fb52607a | 445 | extern void unregister_ftrace_graph(void); |
f201ae23 | 446 | |
fb52607a FW |
447 | extern void ftrace_graph_init_task(struct task_struct *t); |
448 | extern void ftrace_graph_exit_task(struct task_struct *t); | |
868baf07 | 449 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); |
21a8c466 FW |
450 | |
451 | static inline int task_curr_ret_stack(struct task_struct *t) | |
452 | { | |
453 | return t->curr_ret_stack; | |
454 | } | |
380c4b14 FW |
455 | |
456 | static inline void pause_graph_tracing(void) | |
457 | { | |
458 | atomic_inc(¤t->tracing_graph_pause); | |
459 | } | |
460 | ||
461 | static inline void unpause_graph_tracing(void) | |
462 | { | |
463 | atomic_dec(¤t->tracing_graph_pause); | |
464 | } | |
5ac9f622 | 465 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
8b96f011 FW |
466 | |
467 | #define __notrace_funcgraph | |
bcbc4f20 | 468 | #define __irq_entry |
5ac9f622 | 469 | #define INIT_FTRACE_GRAPH |
8b96f011 | 470 | |
fb52607a FW |
471 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
472 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | |
868baf07 | 473 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } |
21a8c466 | 474 | |
62b915f1 JO |
475 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
476 | trace_func_graph_ent_t entryfunc) | |
477 | { | |
478 | return -1; | |
479 | } | |
480 | static inline void unregister_ftrace_graph(void) { } | |
481 | ||
21a8c466 FW |
482 | static inline int task_curr_ret_stack(struct task_struct *tsk) |
483 | { | |
484 | return -1; | |
485 | } | |
380c4b14 FW |
486 | |
487 | static inline void pause_graph_tracing(void) { } | |
488 | static inline void unpause_graph_tracing(void) { } | |
5ac9f622 | 489 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
caf4b323 | 490 | |
ea4e2bc4 | 491 | #ifdef CONFIG_TRACING |
ea4e2bc4 SR |
492 | |
493 | /* flags for current->trace */ | |
494 | enum { | |
495 | TSK_TRACE_FL_TRACE_BIT = 0, | |
496 | TSK_TRACE_FL_GRAPH_BIT = 1, | |
497 | }; | |
498 | enum { | |
499 | TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, | |
500 | TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, | |
501 | }; | |
502 | ||
503 | static inline void set_tsk_trace_trace(struct task_struct *tsk) | |
504 | { | |
505 | set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
506 | } | |
507 | ||
508 | static inline void clear_tsk_trace_trace(struct task_struct *tsk) | |
509 | { | |
510 | clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); | |
511 | } | |
512 | ||
513 | static inline int test_tsk_trace_trace(struct task_struct *tsk) | |
514 | { | |
515 | return tsk->trace & TSK_TRACE_FL_TRACE; | |
516 | } | |
517 | ||
518 | static inline void set_tsk_trace_graph(struct task_struct *tsk) | |
519 | { | |
520 | set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
521 | } | |
522 | ||
523 | static inline void clear_tsk_trace_graph(struct task_struct *tsk) | |
524 | { | |
525 | clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); | |
526 | } | |
527 | ||
528 | static inline int test_tsk_trace_graph(struct task_struct *tsk) | |
529 | { | |
530 | return tsk->trace & TSK_TRACE_FL_GRAPH; | |
531 | } | |
532 | ||
cecbca96 FW |
533 | enum ftrace_dump_mode; |
534 | ||
535 | extern enum ftrace_dump_mode ftrace_dump_on_oops; | |
526211bc | 536 | |
261842b7 SR |
537 | #ifdef CONFIG_PREEMPT |
538 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | |
539 | #endif | |
540 | ||
ea4e2bc4 SR |
541 | #endif /* CONFIG_TRACING */ |
542 | ||
261842b7 SR |
543 | #ifndef INIT_TRACE_RECURSION |
544 | #define INIT_TRACE_RECURSION | |
545 | #endif | |
b1818748 | 546 | |
e7b8e675 MF |
547 | #ifdef CONFIG_FTRACE_SYSCALLS |
548 | ||
549 | unsigned long arch_syscall_addr(int nr); | |
550 | ||
551 | #endif /* CONFIG_FTRACE_SYSCALLS */ | |
552 | ||
16444a8a | 553 | #endif /* _LINUX_FTRACE_H */ |