]>
Commit | Line | Data |
---|---|---|
16444a8a | 1 | # |
606576ce SR |
2 | # Architectures that offer an FUNCTION_TRACER implementation should |
3 | # select HAVE_FUNCTION_TRACER: | |
16444a8a | 4 | # |
2a3a4f66 | 5 | |
8d26487f TE |
6 | config USER_STACKTRACE_SUPPORT |
7 | bool | |
8 | ||
2a3a4f66 FW |
9 | config NOP_TRACER |
10 | bool | |
11 | ||
78d904b4 SR |
12 | config HAVE_FTRACE_NMI_ENTER |
13 | bool | |
14 | ||
606576ce | 15 | config HAVE_FUNCTION_TRACER |
16444a8a | 16 | bool |
bc0c38d1 | 17 | |
fb52607a | 18 | config HAVE_FUNCTION_GRAPH_TRACER |
15e6cb36 FW |
19 | bool |
20 | ||
60a7ecf4 SR |
21 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
22 | bool | |
23 | help | |
24 | This gets selected when the arch tests the function_trace_stop | |
25 | variable at the mcount call site. Otherwise, this variable | |
26 | is tested by the called function. | |
27 | ||
677aa9f7 SR |
28 | config HAVE_DYNAMIC_FTRACE |
29 | bool | |
30 | ||
8da3821b SR |
31 | config HAVE_FTRACE_MCOUNT_RECORD |
32 | bool | |
33 | ||
1e9b51c2 MM |
34 | config HAVE_HW_BRANCH_TRACER |
35 | bool | |
36 | ||
ee08c6ec FW |
37 | config HAVE_FTRACE_SYSCALLS |
38 | bool | |
39 | ||
352ad25a SR |
40 | config TRACER_MAX_TRACE |
41 | bool | |
42 | ||
7a8e76a3 SR |
43 | config RING_BUFFER |
44 | bool | |
45 | ||
78d904b4 SR |
46 | config FTRACE_NMI_ENTER |
47 | bool | |
48 | depends on HAVE_FTRACE_NMI_ENTER | |
49 | default y | |
50 | ||
5f77a88b | 51 | config EVENT_TRACING |
b11c53e1 Z |
52 | select CONTEXT_SWITCH_TRACER |
53 | bool | |
54 | ||
55 | config CONTEXT_SWITCH_TRACER | |
56 | select MARKERS | |
5f77a88b TZ |
57 | bool |
58 | ||
5e0a0939 SR |
59 | # All tracer options should select GENERIC_TRACER. For those options that are |
60 | # enabled by all tracers (context switch and event tracer) they select TRACING. | |
61 | # This allows those options to appear when no other tracer is selected. But the | |
62 | # options do not appear when something else selects it. We need the two options | |
63 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | |
64 | # hidding of the automatic options options. | |
65 | ||
bc0c38d1 SR |
66 | config TRACING |
67 | bool | |
68 | select DEBUG_FS | |
7a8e76a3 | 69 | select RING_BUFFER |
c2c80529 | 70 | select STACKTRACE if STACKTRACE_SUPPORT |
5f87f112 | 71 | select TRACEPOINTS |
f3384b28 | 72 | select NOP_TRACER |
769b0441 | 73 | select BINARY_PRINTF |
5f77a88b | 74 | select EVENT_TRACING |
bc0c38d1 | 75 | |
5e0a0939 SR |
76 | config GENERIC_TRACER |
77 | bool | |
78 | select TRACING | |
79 | ||
40ada30f IM |
80 | # |
81 | # Minimum requirements an architecture has to meet for us to | |
82 | # be able to offer generic tracing facilities: | |
83 | # | |
84 | config TRACING_SUPPORT | |
85 | bool | |
45b95608 AV |
86 | # PPC32 has no irqflags tracing support, but it can use most of the |
87 | # tracers anyway, they were tested to build and work. Note that new | |
88 | # exceptions to this list aren't welcomed, better implement the | |
89 | # irqflags tracing for your architecture. | |
90 | depends on TRACE_IRQFLAGS_SUPPORT || PPC32 | |
40ada30f | 91 | depends on STACKTRACE_SUPPORT |
422d3c7a | 92 | default y |
40ada30f IM |
93 | |
94 | if TRACING_SUPPORT | |
95 | ||
4ed9f071 SR |
96 | menuconfig FTRACE |
97 | bool "Tracers" | |
65b77242 | 98 | default y if DEBUG_KERNEL |
4ed9f071 SR |
99 | help |
100 | Enable the kernel tracing infrastructure. | |
101 | ||
102 | if FTRACE | |
17d80fd0 | 103 | |
606576ce | 104 | config FUNCTION_TRACER |
1b29b018 | 105 | bool "Kernel Function Tracer" |
606576ce | 106 | depends on HAVE_FUNCTION_TRACER |
1b29b018 | 107 | select FRAME_POINTER |
4d7a077c | 108 | select KALLSYMS |
5e0a0939 | 109 | select GENERIC_TRACER |
35e8e302 | 110 | select CONTEXT_SWITCH_TRACER |
1b29b018 SR |
111 | help |
112 | Enable the kernel to trace every kernel function. This is done | |
113 | by using a compiler feature to insert a small, 5-byte No-Operation | |
114 | instruction to the beginning of every kernel function, which NOP | |
115 | sequence is then dynamically patched into a tracer call when | |
116 | tracing is enabled by the administrator. If it's runtime disabled | |
117 | (the bootup default), then the overhead of the instructions is very | |
118 | small and not measurable even in micro-benchmarks. | |
35e8e302 | 119 | |
fb52607a FW |
120 | config FUNCTION_GRAPH_TRACER |
121 | bool "Kernel Function Graph Tracer" | |
122 | depends on HAVE_FUNCTION_GRAPH_TRACER | |
15e6cb36 | 123 | depends on FUNCTION_TRACER |
764f3b95 | 124 | default y |
15e6cb36 | 125 | help |
fb52607a FW |
126 | Enable the kernel to trace a function at both its return |
127 | and its entry. | |
692105b8 ML |
128 | Its first purpose is to trace the duration of functions and |
129 | draw a call graph for each thread with some information like | |
130 | the return value. This is done by setting the current return | |
131 | address on the current task structure into a stack of calls. | |
15e6cb36 | 132 | |
bac429f0 | 133 | |
81d68a96 SR |
134 | config IRQSOFF_TRACER |
135 | bool "Interrupts-off Latency Tracer" | |
136 | default n | |
137 | depends on TRACE_IRQFLAGS_SUPPORT | |
138 | depends on GENERIC_TIME | |
139 | select TRACE_IRQFLAGS | |
5e0a0939 | 140 | select GENERIC_TRACER |
81d68a96 SR |
141 | select TRACER_MAX_TRACE |
142 | help | |
143 | This option measures the time spent in irqs-off critical | |
144 | sections, with microsecond accuracy. | |
145 | ||
146 | The default measurement method is a maximum search, which is | |
147 | disabled by default and can be runtime (re-)started | |
148 | via: | |
149 | ||
150 | echo 0 > /debugfs/tracing/tracing_max_latency | |
151 | ||
6cd8a4bb SR |
152 | (Note that kernel size and overhead increases with this option |
153 | enabled. This option and the preempt-off timing option can be | |
154 | used together or separately.) | |
155 | ||
156 | config PREEMPT_TRACER | |
157 | bool "Preemption-off Latency Tracer" | |
158 | default n | |
159 | depends on GENERIC_TIME | |
160 | depends on PREEMPT | |
5e0a0939 | 161 | select GENERIC_TRACER |
6cd8a4bb SR |
162 | select TRACER_MAX_TRACE |
163 | help | |
164 | This option measures the time spent in preemption off critical | |
165 | sections, with microsecond accuracy. | |
166 | ||
167 | The default measurement method is a maximum search, which is | |
168 | disabled by default and can be runtime (re-)started | |
169 | via: | |
170 | ||
171 | echo 0 > /debugfs/tracing/tracing_max_latency | |
172 | ||
173 | (Note that kernel size and overhead increases with this option | |
174 | enabled. This option and the irqs-off timing option can be | |
175 | used together or separately.) | |
176 | ||
f06c3810 IM |
177 | config SYSPROF_TRACER |
178 | bool "Sysprof Tracer" | |
4d2df795 | 179 | depends on X86 |
5e0a0939 | 180 | select GENERIC_TRACER |
b22f4858 | 181 | select CONTEXT_SWITCH_TRACER |
f06c3810 IM |
182 | help |
183 | This tracer provides the trace needed by the 'Sysprof' userspace | |
184 | tool. | |
185 | ||
352ad25a SR |
186 | config SCHED_TRACER |
187 | bool "Scheduling Latency Tracer" | |
5e0a0939 | 188 | select GENERIC_TRACER |
352ad25a SR |
189 | select CONTEXT_SWITCH_TRACER |
190 | select TRACER_MAX_TRACE | |
191 | help | |
192 | This tracer tracks the latency of the highest priority task | |
193 | to be scheduled in, starting from the point it has woken up. | |
194 | ||
897f17a6 SR |
195 | config ENABLE_DEFAULT_TRACERS |
196 | bool "Trace process context switches and events" | |
5e0a0939 | 197 | depends on !GENERIC_TRACER |
b77e38aa SR |
198 | select TRACING |
199 | help | |
200 | This tracer hooks to various trace points in the kernel | |
201 | allowing the user to pick and choose which trace point they | |
897f17a6 | 202 | want to trace. It also includes the sched_switch tracer plugin. |
a7abe97f | 203 | |
ee08c6ec FW |
204 | config FTRACE_SYSCALLS |
205 | bool "Trace syscalls" | |
206 | depends on HAVE_FTRACE_SYSCALLS | |
5e0a0939 | 207 | select GENERIC_TRACER |
0ea1c415 | 208 | select KALLSYMS |
ee08c6ec FW |
209 | help |
210 | Basic tracer to catch the syscall entry and exit events. | |
211 | ||
1f5c2abb FW |
212 | config BOOT_TRACER |
213 | bool "Trace boot initcalls" | |
5e0a0939 | 214 | select GENERIC_TRACER |
ea31e72d | 215 | select CONTEXT_SWITCH_TRACER |
1f5c2abb FW |
216 | help |
217 | This tracer helps developers to optimize boot times: it records | |
98d9c66a IM |
218 | the timings of the initcalls and traces key events and the identity |
219 | of tasks that can cause boot delays, such as context-switches. | |
220 | ||
221 | Its aim is to be parsed by the /scripts/bootgraph.pl tool to | |
222 | produce pretty graphics about boot inefficiencies, giving a visual | |
223 | representation of the delays during initcalls - but the raw | |
224 | /debug/tracing/trace text output is readable too. | |
225 | ||
79fb0768 SR |
226 | You must pass in ftrace=initcall to the kernel command line |
227 | to enable this on bootup. | |
1f5c2abb | 228 | |
2ed84eeb | 229 | config TRACE_BRANCH_PROFILING |
9ae5b879 | 230 | bool |
5e0a0939 | 231 | select GENERIC_TRACER |
9ae5b879 SR |
232 | |
233 | choice | |
234 | prompt "Branch Profiling" | |
235 | default BRANCH_PROFILE_NONE | |
236 | help | |
237 | The branch profiling is a software profiler. It will add hooks | |
238 | into the C conditionals to test which path a branch takes. | |
239 | ||
240 | The likely/unlikely profiler only looks at the conditions that | |
241 | are annotated with a likely or unlikely macro. | |
242 | ||
243 | The "all branch" profiler will profile every if statement in the | |
244 | kernel. This profiler will also enable the likely/unlikely | |
245 | profiler as well. | |
246 | ||
247 | Either of the above profilers add a bit of overhead to the system. | |
248 | If unsure choose "No branch profiling". | |
249 | ||
250 | config BRANCH_PROFILE_NONE | |
251 | bool "No branch profiling" | |
252 | help | |
253 | No branch profiling. Branch profiling adds a bit of overhead. | |
254 | Only enable it if you want to analyse the branching behavior. | |
255 | Otherwise keep it disabled. | |
256 | ||
257 | config PROFILE_ANNOTATED_BRANCHES | |
258 | bool "Trace likely/unlikely profiler" | |
259 | select TRACE_BRANCH_PROFILING | |
1f0d69a9 SR |
260 | help |
261 | This tracer profiles all the the likely and unlikely macros | |
262 | in the kernel. It will display the results in: | |
263 | ||
45b79749 | 264 | /debugfs/tracing/profile_annotated_branch |
1f0d69a9 SR |
265 | |
266 | Note: this will add a significant overhead, only turn this | |
267 | on if you need to profile the system's use of these macros. | |
268 | ||
2bcd521a SR |
269 | config PROFILE_ALL_BRANCHES |
270 | bool "Profile all if conditionals" | |
9ae5b879 | 271 | select TRACE_BRANCH_PROFILING |
2bcd521a SR |
272 | help |
273 | This tracer profiles all branch conditions. Every if () | |
274 | taken in the kernel is recorded whether it hit or miss. | |
275 | The results will be displayed in: | |
276 | ||
277 | /debugfs/tracing/profile_branch | |
278 | ||
9ae5b879 SR |
279 | This option also enables the likely/unlikely profiler. |
280 | ||
2bcd521a SR |
281 | This configuration, when enabled, will impose a great overhead |
282 | on the system. This should only be enabled when the system | |
283 | is to be analyzed | |
9ae5b879 | 284 | endchoice |
2bcd521a | 285 | |
2ed84eeb | 286 | config TRACING_BRANCHES |
52f232cb SR |
287 | bool |
288 | help | |
289 | Selected by tracers that will trace the likely and unlikely | |
290 | conditions. This prevents the tracers themselves from being | |
291 | profiled. Profiling the tracing infrastructure can only happen | |
292 | when the likelys and unlikelys are not being traced. | |
293 | ||
2ed84eeb | 294 | config BRANCH_TRACER |
52f232cb | 295 | bool "Trace likely/unlikely instances" |
2ed84eeb SR |
296 | depends on TRACE_BRANCH_PROFILING |
297 | select TRACING_BRANCHES | |
52f232cb SR |
298 | help |
299 | This traces the events of likely and unlikely condition | |
300 | calls in the kernel. The difference between this and the | |
301 | "Trace likely/unlikely profiler" is that this is not a | |
302 | histogram of the callers, but actually places the calling | |
303 | events into a running trace buffer to see when and where the | |
304 | events happened, as well as their results. | |
305 | ||
306 | Say N if unsure. | |
307 | ||
f3f47a67 AV |
308 | config POWER_TRACER |
309 | bool "Trace power consumption behavior" | |
f3f47a67 | 310 | depends on X86 |
5e0a0939 | 311 | select GENERIC_TRACER |
f3f47a67 AV |
312 | help |
313 | This tracer helps developers to analyze and optimize the kernels | |
314 | power management decisions, specifically the C-state and P-state | |
315 | behavior. | |
316 | ||
317 | ||
e5a81b62 SR |
318 | config STACK_TRACER |
319 | bool "Trace max stack" | |
606576ce | 320 | depends on HAVE_FUNCTION_TRACER |
606576ce | 321 | select FUNCTION_TRACER |
e5a81b62 | 322 | select STACKTRACE |
4d7a077c | 323 | select KALLSYMS |
e5a81b62 | 324 | help |
4519d9e5 IM |
325 | This special tracer records the maximum stack footprint of the |
326 | kernel and displays it in debugfs/tracing/stack_trace. | |
327 | ||
328 | This tracer works by hooking into every function call that the | |
329 | kernel executes, and keeping a maximum stack depth value and | |
f38f1d2a SR |
330 | stack-trace saved. If this is configured with DYNAMIC_FTRACE |
331 | then it will not have any overhead while the stack tracer | |
332 | is disabled. | |
333 | ||
334 | To enable the stack tracer on bootup, pass in 'stacktrace' | |
335 | on the kernel command line. | |
336 | ||
337 | The stack tracer can also be enabled or disabled via the | |
338 | sysctl kernel.stack_tracer_enabled | |
4519d9e5 IM |
339 | |
340 | Say N if unsure. | |
e5a81b62 | 341 | |
a93751ca | 342 | config HW_BRANCH_TRACER |
1e9b51c2 | 343 | depends on HAVE_HW_BRANCH_TRACER |
a93751ca | 344 | bool "Trace hw branches" |
5e0a0939 | 345 | select GENERIC_TRACER |
1e9b51c2 MM |
346 | help |
347 | This tracer records all branches on the system in a circular | |
348 | buffer giving access to the last N branches for each cpu. | |
349 | ||
36994e58 FW |
350 | config KMEMTRACE |
351 | bool "Trace SLAB allocations" | |
5e0a0939 | 352 | select GENERIC_TRACER |
36994e58 FW |
353 | help |
354 | kmemtrace provides tracing for slab allocator functions, such as | |
355 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | |
356 | data is then fed to the userspace application in order to analyse | |
357 | allocation hotspots, internal fragmentation and so on, making it | |
358 | possible to see how well an allocator performs, as well as debug | |
359 | and profile kernel code. | |
360 | ||
361 | This requires an userspace application to use. See | |
4d1f4372 | 362 | Documentation/trace/kmemtrace.txt for more information. |
36994e58 FW |
363 | |
364 | Saying Y will make the kernel somewhat larger and slower. However, | |
365 | if you disable kmemtrace at run-time or boot-time, the performance | |
366 | impact is minimal (depending on the arch the kernel is built for). | |
367 | ||
368 | If unsure, say N. | |
369 | ||
e1d8aa9f FW |
370 | config WORKQUEUE_TRACER |
371 | bool "Trace workqueues" | |
5e0a0939 | 372 | select GENERIC_TRACER |
e1d8aa9f FW |
373 | help |
374 | The workqueue tracer provides some statistical informations | |
375 | about each cpu workqueue thread such as the number of the | |
376 | works inserted and executed since their creation. It can help | |
377 | to evaluate the amount of work each of them have to perform. | |
378 | For example it can help a developer to decide whether he should | |
379 | choose a per cpu workqueue instead of a singlethreaded one. | |
380 | ||
2db270a8 FW |
381 | config BLK_DEV_IO_TRACE |
382 | bool "Support for tracing block io actions" | |
383 | depends on SYSFS | |
1dfba05d | 384 | depends on BLOCK |
2db270a8 FW |
385 | select RELAY |
386 | select DEBUG_FS | |
387 | select TRACEPOINTS | |
5e0a0939 | 388 | select GENERIC_TRACER |
2db270a8 FW |
389 | select STACKTRACE |
390 | help | |
391 | Say Y here if you want to be able to trace the block layer actions | |
392 | on a given queue. Tracing allows you to see any traffic happening | |
393 | on a block device queue. For more information (and the userspace | |
394 | support tools needed), fetch the blktrace tools from: | |
395 | ||
396 | git://git.kernel.dk/blktrace.git | |
397 | ||
398 | Tracing also is possible using the ftrace interface, e.g.: | |
399 | ||
400 | echo 1 > /sys/block/sda/sda1/trace/enable | |
401 | echo blk > /sys/kernel/debug/tracing/current_tracer | |
402 | cat /sys/kernel/debug/tracing/trace_pipe | |
403 | ||
404 | If unsure, say N. | |
36994e58 | 405 | |
3d083395 SR |
406 | config DYNAMIC_FTRACE |
407 | bool "enable/disable ftrace tracepoints dynamically" | |
606576ce | 408 | depends on FUNCTION_TRACER |
677aa9f7 | 409 | depends on HAVE_DYNAMIC_FTRACE |
3d083395 SR |
410 | default y |
411 | help | |
412 | This option will modify all the calls to ftrace dynamically | |
413 | (will patch them out of the binary image and replaces them | |
414 | with a No-Op instruction) as they are called. A table is | |
415 | created to dynamically enable them again. | |
416 | ||
606576ce | 417 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise |
3d083395 SR |
418 | has native performance as long as no tracing is active. |
419 | ||
420 | The changes to the code are done by a kernel thread that | |
421 | wakes up once a second and checks to see if any ftrace calls | |
422 | were made. If so, it runs stop_machine (stops all CPUS) | |
423 | and modifies the code to jump over the call to ftrace. | |
60a11774 | 424 | |
bac429f0 SR |
425 | config FUNCTION_PROFILER |
426 | bool "Kernel function profiler" | |
493762fc | 427 | depends on FUNCTION_TRACER |
bac429f0 SR |
428 | default n |
429 | help | |
493762fc SR |
430 | This option enables the kernel function profiler. A file is created |
431 | in debugfs called function_profile_enabled which defaults to zero. | |
bac429f0 SR |
432 | When a 1 is echoed into this file profiling begins, and when a |
433 | zero is entered, profiling stops. A file in the trace_stats | |
434 | directory called functions, that show the list of functions that | |
435 | have been hit and their counters. | |
436 | ||
bac429f0 SR |
437 | If in doubt, say N |
438 | ||
8da3821b SR |
439 | config FTRACE_MCOUNT_RECORD |
440 | def_bool y | |
441 | depends on DYNAMIC_FTRACE | |
442 | depends on HAVE_FTRACE_MCOUNT_RECORD | |
443 | ||
60a11774 SR |
444 | config FTRACE_SELFTEST |
445 | bool | |
446 | ||
447 | config FTRACE_STARTUP_TEST | |
448 | bool "Perform a startup test on ftrace" | |
5e0a0939 | 449 | depends on GENERIC_TRACER |
60a11774 SR |
450 | select FTRACE_SELFTEST |
451 | help | |
452 | This option performs a series of startup tests on ftrace. On bootup | |
453 | a series of tests are made to verify that the tracer is | |
454 | functioning properly. It will do tests on all the configured | |
455 | tracers of ftrace. | |
17d80fd0 | 456 | |
fe6f90e5 PP |
457 | config MMIOTRACE |
458 | bool "Memory mapped IO tracing" | |
40ada30f | 459 | depends on HAVE_MMIOTRACE_SUPPORT && PCI |
5e0a0939 | 460 | select GENERIC_TRACER |
fe6f90e5 PP |
461 | help |
462 | Mmiotrace traces Memory Mapped I/O access and is meant for | |
463 | debugging and reverse engineering. It is called from the ioremap | |
464 | implementation and works via page faults. Tracing is disabled by | |
465 | default and can be enabled at run-time. | |
466 | ||
4d1f4372 | 467 | See Documentation/trace/mmiotrace.txt. |
fe6f90e5 PP |
468 | If you are not helping to develop drivers, say N. |
469 | ||
470 | config MMIOTRACE_TEST | |
471 | tristate "Test module for mmiotrace" | |
472 | depends on MMIOTRACE && m | |
473 | help | |
474 | This is a dumb module for testing mmiotrace. It is very dangerous | |
475 | as it will write garbage to IO memory starting at a given address. | |
476 | However, it should be safe to use on e.g. unused portion of VRAM. | |
477 | ||
478 | Say N, unless you absolutely know what you are doing. | |
479 | ||
5092dbc9 SR |
480 | config RING_BUFFER_BENCHMARK |
481 | tristate "Ring buffer benchmark stress tester" | |
482 | depends on RING_BUFFER | |
483 | help | |
484 | This option creates a test to stress the ring buffer and bench mark it. | |
485 | It creates its own ring buffer such that it will not interfer with | |
486 | any other users of the ring buffer (such as ftrace). It then creates | |
487 | a producer and consumer that will run for 10 seconds and sleep for | |
488 | 10 seconds. Each interval it will print out the number of events | |
489 | it recorded and give a rough estimate of how long each iteration took. | |
490 | ||
491 | It does not disable interrupts or raise its priority, so it may be | |
492 | affected by processes that are running. | |
493 | ||
494 | If unsure, say N | |
495 | ||
4ed9f071 | 496 | endif # FTRACE |
40ada30f IM |
497 | |
498 | endif # TRACING_SUPPORT | |
499 |