]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1f0d69a9 SR |
2 | /* |
3 | * unlikely profiler | |
4 | * | |
5 | * Copyright (C) 2008 Steven Rostedt <[email protected]> | |
6 | */ | |
7 | #include <linux/kallsyms.h> | |
8 | #include <linux/seq_file.h> | |
9 | #include <linux/spinlock.h> | |
65c6dc6a | 10 | #include <linux/irqflags.h> |
1f0d69a9 SR |
11 | #include <linux/uaccess.h> |
12 | #include <linux/module.h> | |
13 | #include <linux/ftrace.h> | |
14 | #include <linux/hash.h> | |
15 | #include <linux/fs.h> | |
16 | #include <asm/local.h> | |
f633cef0 | 17 | |
1f0d69a9 | 18 | #include "trace.h" |
002bb86d | 19 | #include "trace_stat.h" |
f633cef0 | 20 | #include "trace_output.h" |
1f0d69a9 | 21 | |
2ed84eeb | 22 | #ifdef CONFIG_BRANCH_TRACER |
52f232cb | 23 | |
002bb86d | 24 | static struct tracer branch_trace; |
9f029e83 SR |
25 | static int branch_tracing_enabled __read_mostly; |
26 | static DEFINE_MUTEX(branch_tracing_mutex); | |
e302cf3f | 27 | |
9f029e83 | 28 | static struct trace_array *branch_tracer; |
52f232cb SR |
29 | |
30 | static void | |
068f530b | 31 | probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
52f232cb | 32 | { |
2425bcb9 | 33 | struct trace_event_call *call = &event_branch; |
9f029e83 | 34 | struct trace_array *tr = branch_tracer; |
a7603ff4 | 35 | struct trace_array_cpu *data; |
52f232cb | 36 | struct ring_buffer_event *event; |
9f029e83 | 37 | struct trace_branch *entry; |
8f6e8a31 | 38 | struct ring_buffer *buffer; |
0a987751 | 39 | unsigned long flags; |
6224beb1 | 40 | int pc; |
52f232cb SR |
41 | const char *p; |
42 | ||
6224beb1 SRRH |
43 | if (current->trace_recursion & TRACE_BRANCH_BIT) |
44 | return; | |
45 | ||
52f232cb SR |
46 | /* |
47 | * I would love to save just the ftrace_likely_data pointer, but | |
48 | * this code can also be used by modules. Ugly things can happen | |
49 | * if the module is unloaded, and then we go and read the | |
50 | * pointer. This is slower, but much safer. | |
51 | */ | |
52 | ||
53 | if (unlikely(!tr)) | |
54 | return; | |
55 | ||
6224beb1 SRRH |
56 | raw_local_irq_save(flags); |
57 | current->trace_recursion |= TRACE_BRANCH_BIT; | |
58 | data = this_cpu_ptr(tr->trace_buffer.data); | |
59 | if (atomic_read(&data->disabled)) | |
52f232cb SR |
60 | goto out; |
61 | ||
51a763dd | 62 | pc = preempt_count(); |
153e8ed9 | 63 | buffer = tr->trace_buffer.buffer; |
8f6e8a31 | 64 | event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, |
51a763dd | 65 | sizeof(*entry), flags, pc); |
52f232cb SR |
66 | if (!event) |
67 | goto out; | |
68 | ||
52f232cb | 69 | entry = ring_buffer_event_data(event); |
52f232cb SR |
70 | |
71 | /* Strip off the path, only save the file */ | |
068f530b SRV |
72 | p = f->data.file + strlen(f->data.file); |
73 | while (p >= f->data.file && *p != '/') | |
52f232cb SR |
74 | p--; |
75 | p++; | |
76 | ||
068f530b | 77 | strncpy(entry->func, f->data.func, TRACE_FUNC_SIZE); |
52f232cb SR |
78 | strncpy(entry->file, p, TRACE_FILE_SIZE); |
79 | entry->func[TRACE_FUNC_SIZE] = 0; | |
80 | entry->file[TRACE_FILE_SIZE] = 0; | |
068f530b SRV |
81 | entry->constant = f->constant; |
82 | entry->line = f->data.line; | |
52f232cb SR |
83 | entry->correct = val == expect; |
84 | ||
f306cc82 | 85 | if (!call_filter_check_discard(call, entry, buffer, event)) |
52ffabe3 | 86 | trace_buffer_unlock_commit_nostack(buffer, event); |
52f232cb SR |
87 | |
88 | out: | |
6224beb1 SRRH |
89 | current->trace_recursion &= ~TRACE_BRANCH_BIT; |
90 | raw_local_irq_restore(flags); | |
52f232cb SR |
91 | } |
92 | ||
93 | static inline | |
068f530b | 94 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
52f232cb | 95 | { |
9f029e83 | 96 | if (!branch_tracing_enabled) |
52f232cb SR |
97 | return; |
98 | ||
99 | probe_likely_condition(f, val, expect); | |
100 | } | |
101 | ||
9f029e83 | 102 | int enable_branch_tracing(struct trace_array *tr) |
52f232cb | 103 | { |
9f029e83 SR |
104 | mutex_lock(&branch_tracing_mutex); |
105 | branch_tracer = tr; | |
52f232cb SR |
106 | /* |
107 | * Must be seen before enabling. The reader is a condition | |
108 | * where we do not need a matching rmb() | |
109 | */ | |
110 | smp_wmb(); | |
9f029e83 SR |
111 | branch_tracing_enabled++; |
112 | mutex_unlock(&branch_tracing_mutex); | |
52f232cb | 113 | |
f54fc98a | 114 | return 0; |
52f232cb SR |
115 | } |
116 | ||
9f029e83 | 117 | void disable_branch_tracing(void) |
52f232cb | 118 | { |
9f029e83 | 119 | mutex_lock(&branch_tracing_mutex); |
52f232cb | 120 | |
9f029e83 | 121 | if (!branch_tracing_enabled) |
52f232cb SR |
122 | goto out_unlock; |
123 | ||
9f029e83 | 124 | branch_tracing_enabled--; |
52f232cb SR |
125 | |
126 | out_unlock: | |
9f029e83 | 127 | mutex_unlock(&branch_tracing_mutex); |
52f232cb | 128 | } |
80e5ea45 | 129 | |
1c80025a | 130 | static int branch_trace_init(struct trace_array *tr) |
80e5ea45 | 131 | { |
30616929 | 132 | return enable_branch_tracing(tr); |
80e5ea45 SR |
133 | } |
134 | ||
135 | static void branch_trace_reset(struct trace_array *tr) | |
136 | { | |
30616929 | 137 | disable_branch_tracing(); |
80e5ea45 SR |
138 | } |
139 | ||
ae7462b4 | 140 | static enum print_line_t trace_branch_print(struct trace_iterator *iter, |
a9a57763 | 141 | int flags, struct trace_event *event) |
f633cef0 SR |
142 | { |
143 | struct trace_branch *field; | |
144 | ||
2c9b238e | 145 | trace_assign_type(field, iter->ent); |
f633cef0 | 146 | |
7d40f671 SRRH |
147 | trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", |
148 | field->correct ? " ok " : " MISS ", | |
149 | field->func, | |
150 | field->file, | |
151 | field->line); | |
152 | ||
153 | return trace_handle_return(&iter->seq); | |
f633cef0 SR |
154 | } |
155 | ||
557055be Z |
156 | static void branch_print_header(struct seq_file *s) |
157 | { | |
158 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" | |
d79ac28f RV |
159 | " FUNC:FILE:LINE\n" |
160 | "# | | | | | " | |
161 | " |\n"); | |
557055be | 162 | } |
e302cf3f | 163 | |
a9a57763 SR |
164 | static struct trace_event_functions trace_branch_funcs = { |
165 | .trace = trace_branch_print, | |
166 | }; | |
167 | ||
f633cef0 | 168 | static struct trace_event trace_branch_event = { |
ef18012b | 169 | .type = TRACE_BRANCH, |
a9a57763 | 170 | .funcs = &trace_branch_funcs, |
f633cef0 SR |
171 | }; |
172 | ||
002bb86d FW |
173 | static struct tracer branch_trace __read_mostly = |
174 | { | |
175 | .name = "branch", | |
176 | .init = branch_trace_init, | |
177 | .reset = branch_trace_reset, | |
178 | #ifdef CONFIG_FTRACE_SELFTEST | |
179 | .selftest = trace_selftest_startup_branch, | |
180 | #endif /* CONFIG_FTRACE_SELFTEST */ | |
557055be | 181 | .print_header = branch_print_header, |
002bb86d FW |
182 | }; |
183 | ||
184 | __init static int init_branch_tracer(void) | |
185 | { | |
186 | int ret; | |
187 | ||
9023c930 | 188 | ret = register_trace_event(&trace_branch_event); |
002bb86d FW |
189 | if (!ret) { |
190 | printk(KERN_WARNING "Warning: could not register " | |
191 | "branch events\n"); | |
192 | return 1; | |
193 | } | |
194 | return register_tracer(&branch_trace); | |
195 | } | |
6f415672 | 196 | core_initcall(init_branch_tracer); |
002bb86d | 197 | |
52f232cb SR |
198 | #else |
199 | static inline | |
068f530b | 200 | void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) |
52f232cb SR |
201 | { |
202 | } | |
2ed84eeb | 203 | #endif /* CONFIG_BRANCH_TRACER */ |
52f232cb | 204 | |
134e6a03 | 205 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
d45ae1f7 | 206 | int expect, int is_constant) |
1f0d69a9 | 207 | { |
d45ae1f7 | 208 | /* A constant is always correct */ |
134e6a03 SRV |
209 | if (is_constant) { |
210 | f->constant++; | |
d45ae1f7 | 211 | val = expect; |
134e6a03 | 212 | } |
52f232cb SR |
213 | /* |
214 | * I would love to have a trace point here instead, but the | |
215 | * trace point code is so inundated with unlikely and likely | |
216 | * conditions that the recursive nightmare that exists is too | |
217 | * much to try to get working. At least for now. | |
218 | */ | |
068f530b | 219 | trace_likely_condition(f, val, expect); |
52f232cb | 220 | |
1f0d69a9 SR |
221 | /* FIXME: Make this atomic! */ |
222 | if (val == expect) | |
134e6a03 | 223 | f->data.correct++; |
1f0d69a9 | 224 | else |
134e6a03 | 225 | f->data.incorrect++; |
1f0d69a9 SR |
226 | } |
227 | EXPORT_SYMBOL(ftrace_likely_update); | |
228 | ||
e302cf3f FW |
229 | extern unsigned long __start_annotated_branch_profile[]; |
230 | extern unsigned long __stop_annotated_branch_profile[]; | |
1f0d69a9 | 231 | |
e302cf3f | 232 | static int annotated_branch_stat_headers(struct seq_file *m) |
1f0d69a9 | 233 | { |
d79ac28f RV |
234 | seq_puts(m, " correct incorrect % " |
235 | " Function " | |
fa6f0cc7 RV |
236 | " File Line\n" |
237 | " ------- --------- - " | |
238 | " -------- " | |
239 | " ---- ----\n"); | |
e302cf3f | 240 | return 0; |
1f0d69a9 SR |
241 | } |
242 | ||
e302cf3f | 243 | static inline long get_incorrect_percent(struct ftrace_branch_data *p) |
1f0d69a9 | 244 | { |
e302cf3f | 245 | long percent; |
1f0d69a9 | 246 | |
e302cf3f FW |
247 | if (p->correct) { |
248 | percent = p->incorrect * 100; | |
249 | percent /= p->correct + p->incorrect; | |
250 | } else | |
251 | percent = p->incorrect ? 100 : -1; | |
1f0d69a9 | 252 | |
e302cf3f | 253 | return percent; |
1f0d69a9 SR |
254 | } |
255 | ||
134e6a03 | 256 | static const char *branch_stat_process_file(struct ftrace_branch_data *p) |
1f0d69a9 | 257 | { |
1f0d69a9 | 258 | const char *f; |
1f0d69a9 | 259 | |
1f0d69a9 SR |
260 | /* Only print the file, not the path */ |
261 | f = p->file + strlen(p->file); | |
262 | while (f >= p->file && *f != '/') | |
263 | f--; | |
134e6a03 SRV |
264 | return ++f; |
265 | } | |
266 | ||
267 | static void branch_stat_show(struct seq_file *m, | |
268 | struct ftrace_branch_data *p, const char *f) | |
269 | { | |
270 | long percent; | |
1f0d69a9 | 271 | |
2bcd521a SR |
272 | /* |
273 | * The miss is overlayed on correct, and hit on incorrect. | |
274 | */ | |
e302cf3f | 275 | percent = get_incorrect_percent(p); |
1f0d69a9 | 276 | |
bac28bfe | 277 | if (percent < 0) |
fa6f0cc7 | 278 | seq_puts(m, " X "); |
bac28bfe SR |
279 | else |
280 | seq_printf(m, "%3ld ", percent); | |
134e6a03 | 281 | |
1f0d69a9 | 282 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
134e6a03 SRV |
283 | } |
284 | ||
285 | static int branch_stat_show_normal(struct seq_file *m, | |
286 | struct ftrace_branch_data *p, const char *f) | |
287 | { | |
288 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | |
289 | branch_stat_show(m, p, f); | |
290 | return 0; | |
291 | } | |
292 | ||
293 | static int annotate_branch_stat_show(struct seq_file *m, void *v) | |
294 | { | |
295 | struct ftrace_likely_data *p = v; | |
296 | const char *f; | |
297 | int l; | |
298 | ||
299 | f = branch_stat_process_file(&p->data); | |
300 | ||
301 | if (!p->constant) | |
302 | return branch_stat_show_normal(m, &p->data, f); | |
303 | ||
304 | l = snprintf(NULL, 0, "/%lu", p->constant); | |
305 | l = l > 8 ? 0 : 8 - l; | |
306 | ||
307 | seq_printf(m, "%8lu/%lu %*lu ", | |
308 | p->data.correct, p->constant, l, p->data.incorrect); | |
309 | branch_stat_show(m, &p->data, f); | |
1f0d69a9 SR |
310 | return 0; |
311 | } | |
312 | ||
42548008 | 313 | static void *annotated_branch_stat_start(struct tracer_stat *trace) |
e302cf3f FW |
314 | { |
315 | return __start_annotated_branch_profile; | |
316 | } | |
1f0d69a9 | 317 | |
e302cf3f FW |
318 | static void * |
319 | annotated_branch_stat_next(void *v, int idx) | |
1f0d69a9 | 320 | { |
134e6a03 | 321 | struct ftrace_likely_data *p = v; |
1f0d69a9 | 322 | |
e302cf3f | 323 | ++p; |
1f0d69a9 | 324 | |
e302cf3f FW |
325 | if ((void *)p >= (void *)__stop_annotated_branch_profile) |
326 | return NULL; | |
327 | ||
328 | return p; | |
1f0d69a9 SR |
329 | } |
330 | ||
e302cf3f FW |
331 | static int annotated_branch_stat_cmp(void *p1, void *p2) |
332 | { | |
333 | struct ftrace_branch_data *a = p1; | |
334 | struct ftrace_branch_data *b = p2; | |
335 | ||
336 | long percent_a, percent_b; | |
337 | ||
338 | percent_a = get_incorrect_percent(a); | |
339 | percent_b = get_incorrect_percent(b); | |
340 | ||
341 | if (percent_a < percent_b) | |
342 | return -1; | |
343 | if (percent_a > percent_b) | |
344 | return 1; | |
ede55c9d SR |
345 | |
346 | if (a->incorrect < b->incorrect) | |
347 | return -1; | |
348 | if (a->incorrect > b->incorrect) | |
349 | return 1; | |
350 | ||
351 | /* | |
352 | * Since the above shows worse (incorrect) cases | |
353 | * first, we continue that by showing best (correct) | |
354 | * cases last. | |
355 | */ | |
356 | if (a->correct > b->correct) | |
357 | return -1; | |
358 | if (a->correct < b->correct) | |
359 | return 1; | |
360 | ||
361 | return 0; | |
e302cf3f | 362 | } |
1f0d69a9 | 363 | |
002bb86d FW |
364 | static struct tracer_stat annotated_branch_stats = { |
365 | .name = "branch_annotated", | |
366 | .stat_start = annotated_branch_stat_start, | |
367 | .stat_next = annotated_branch_stat_next, | |
368 | .stat_cmp = annotated_branch_stat_cmp, | |
369 | .stat_headers = annotated_branch_stat_headers, | |
134e6a03 | 370 | .stat_show = annotate_branch_stat_show |
002bb86d FW |
371 | }; |
372 | ||
373 | __init static int init_annotated_branch_stats(void) | |
374 | { | |
375 | int ret; | |
376 | ||
377 | ret = register_stat_tracer(&annotated_branch_stats); | |
378 | if (!ret) { | |
379 | printk(KERN_WARNING "Warning: could not register " | |
380 | "annotated branches stats\n"); | |
381 | return 1; | |
382 | } | |
383 | return 0; | |
384 | } | |
385 | fs_initcall(init_annotated_branch_stats); | |
386 | ||
2bcd521a | 387 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
2bcd521a | 388 | |
e302cf3f FW |
389 | extern unsigned long __start_branch_profile[]; |
390 | extern unsigned long __stop_branch_profile[]; | |
1f0d69a9 | 391 | |
e302cf3f FW |
392 | static int all_branch_stat_headers(struct seq_file *m) |
393 | { | |
d79ac28f RV |
394 | seq_puts(m, " miss hit % " |
395 | " Function " | |
fa6f0cc7 RV |
396 | " File Line\n" |
397 | " ------- --------- - " | |
398 | " -------- " | |
399 | " ---- ----\n"); | |
e302cf3f FW |
400 | return 0; |
401 | } | |
1f0d69a9 | 402 | |
42548008 | 403 | static void *all_branch_stat_start(struct tracer_stat *trace) |
1f0d69a9 | 404 | { |
e302cf3f FW |
405 | return __start_branch_profile; |
406 | } | |
407 | ||
408 | static void * | |
409 | all_branch_stat_next(void *v, int idx) | |
410 | { | |
411 | struct ftrace_branch_data *p = v; | |
1f0d69a9 | 412 | |
e302cf3f | 413 | ++p; |
1f0d69a9 | 414 | |
e302cf3f FW |
415 | if ((void *)p >= (void *)__stop_branch_profile) |
416 | return NULL; | |
1f0d69a9 | 417 | |
e302cf3f FW |
418 | return p; |
419 | } | |
2bcd521a | 420 | |
134e6a03 SRV |
421 | static int all_branch_stat_show(struct seq_file *m, void *v) |
422 | { | |
423 | struct ftrace_branch_data *p = v; | |
424 | const char *f; | |
425 | ||
426 | f = branch_stat_process_file(p); | |
427 | return branch_stat_show_normal(m, p, f); | |
428 | } | |
429 | ||
002bb86d FW |
430 | static struct tracer_stat all_branch_stats = { |
431 | .name = "branch_all", | |
034939b6 FW |
432 | .stat_start = all_branch_stat_start, |
433 | .stat_next = all_branch_stat_next, | |
434 | .stat_headers = all_branch_stat_headers, | |
134e6a03 | 435 | .stat_show = all_branch_stat_show |
034939b6 | 436 | }; |
e302cf3f | 437 | |
002bb86d | 438 | __init static int all_annotated_branch_stats(void) |
e302cf3f | 439 | { |
e302cf3f | 440 | int ret; |
002bb86d FW |
441 | |
442 | ret = register_stat_tracer(&all_branch_stats); | |
e302cf3f | 443 | if (!ret) { |
002bb86d FW |
444 | printk(KERN_WARNING "Warning: could not register " |
445 | "all branches stats\n"); | |
e302cf3f FW |
446 | return 1; |
447 | } | |
002bb86d | 448 | return 0; |
e302cf3f | 449 | } |
002bb86d FW |
450 | fs_initcall(all_annotated_branch_stats); |
451 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |