]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
b2e16a85 SG |
2 | /* |
3 | * Copyright (c) 2012 The Chromium OS Authors. | |
b2e16a85 SG |
4 | */ |
5 | ||
6 | #include <common.h> | |
0eb25b61 | 7 | #include <mapmem.h> |
1045315d | 8 | #include <time.h> |
b2e16a85 | 9 | #include <trace.h> |
401d1c4f | 10 | #include <asm/global_data.h> |
b2e16a85 SG |
11 | #include <asm/io.h> |
12 | #include <asm/sections.h> | |
13 | ||
14 | DECLARE_GLOBAL_DATA_PTR; | |
15 | ||
236f2ec4 MB |
16 | static char trace_enabled __section(".data"); |
17 | static char trace_inited __section(".data"); | |
b2e16a85 SG |
18 | |
19 | /* The header block at the start of the trace memory area */ | |
20 | struct trace_hdr { | |
21 | int func_count; /* Total number of function call sites */ | |
22 | u64 call_count; /* Total number of tracked function calls */ | |
23 | u64 untracked_count; /* Total number of untracked function calls */ | |
24 | int funcs_used; /* Total number of functions used */ | |
25 | ||
26 | /* | |
27 | * Call count for each function. This is indexed by the word offset | |
28 | * of the function from gd->relocaddr | |
29 | */ | |
30 | uintptr_t *call_accum; | |
31 | ||
32 | /* Function trace list */ | |
33 | struct trace_call *ftrace; /* The function call records */ | |
34 | ulong ftrace_size; /* Num. of ftrace records we have space for */ | |
35 | ulong ftrace_count; /* Num. of ftrace records written */ | |
36 | ulong ftrace_too_deep_count; /* Functions that were too deep */ | |
37 | ||
38 | int depth; | |
39 | int depth_limit; | |
40 | int max_depth; | |
41 | }; | |
42 | ||
43 | static struct trace_hdr *hdr; /* Pointer to start of trace buffer */ | |
44 | ||
45 | static inline uintptr_t __attribute__((no_instrument_function)) | |
46 | func_ptr_to_num(void *func_ptr) | |
47 | { | |
48 | uintptr_t offset = (uintptr_t)func_ptr; | |
49 | ||
50 | #ifdef CONFIG_SANDBOX | |
51 | offset -= (uintptr_t)&_init; | |
52 | #else | |
53 | if (gd->flags & GD_FLG_RELOC) | |
54 | offset -= gd->relocaddr; | |
55 | else | |
98463903 | 56 | offset -= CONFIG_TEXT_BASE; |
b2e16a85 SG |
57 | #endif |
58 | return offset / FUNC_SITE_SIZE; | |
59 | } | |
60 | ||
d3d6afae | 61 | #if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV)) |
a2fa38da HS |
62 | |
63 | /** | |
64 | * trace_gd - the value of the gd register | |
65 | */ | |
7f642cb2 | 66 | static volatile gd_t *trace_gd; |
a2fa38da HS |
67 | |
68 | /** | |
69 | * trace_save_gd() - save the value of the gd register | |
70 | */ | |
71 | static void __attribute__((no_instrument_function)) trace_save_gd(void) | |
72 | { | |
73 | trace_gd = gd; | |
74 | } | |
75 | ||
76 | /** | |
77 | * trace_swap_gd() - swap between U-Boot and application gd register value | |
78 | * | |
79 | * An UEFI application may change the value of the register that gd lives in. | |
80 | * But some of our functions like get_ticks() access this register. So we | |
81 | * have to set the gd register to the U-Boot value when entering a trace | |
82 | * point and set it back to the application value when exiting the trace point. | |
83 | */ | |
84 | static void __attribute__((no_instrument_function)) trace_swap_gd(void) | |
85 | { | |
7f642cb2 | 86 | volatile gd_t *temp_gd = trace_gd; |
a2fa38da HS |
87 | |
88 | trace_gd = gd; | |
7f642cb2 | 89 | set_gd(temp_gd); |
a2fa38da HS |
90 | } |
91 | ||
92 | #else | |
93 | ||
94 | static void __attribute__((no_instrument_function)) trace_save_gd(void) | |
95 | { | |
96 | } | |
97 | ||
98 | static void __attribute__((no_instrument_function)) trace_swap_gd(void) | |
99 | { | |
100 | } | |
101 | ||
102 | #endif | |
103 | ||
b2e16a85 SG |
104 | static void __attribute__((no_instrument_function)) add_ftrace(void *func_ptr, |
105 | void *caller, ulong flags) | |
106 | { | |
107 | if (hdr->depth > hdr->depth_limit) { | |
108 | hdr->ftrace_too_deep_count++; | |
109 | return; | |
110 | } | |
111 | if (hdr->ftrace_count < hdr->ftrace_size) { | |
112 | struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count]; | |
113 | ||
114 | rec->func = func_ptr_to_num(func_ptr); | |
115 | rec->caller = func_ptr_to_num(caller); | |
116 | rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK); | |
117 | } | |
118 | hdr->ftrace_count++; | |
119 | } | |
120 | ||
121 | static void __attribute__((no_instrument_function)) add_textbase(void) | |
122 | { | |
123 | if (hdr->ftrace_count < hdr->ftrace_size) { | |
124 | struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count]; | |
125 | ||
98463903 | 126 | rec->func = CONFIG_TEXT_BASE; |
b2e16a85 SG |
127 | rec->caller = 0; |
128 | rec->flags = FUNCF_TEXTBASE; | |
129 | } | |
130 | hdr->ftrace_count++; | |
131 | } | |
132 | ||
133 | /** | |
e605ab84 | 134 | * __cyg_profile_func_enter() - record function entry |
b2e16a85 SG |
135 | * |
136 | * We add to our tally for this function and add to the list of called | |
137 | * functions. | |
138 | * | |
e605ab84 HS |
139 | * @func_ptr: pointer to function being entered |
140 | * @caller: pointer to function which called this function | |
b2e16a85 SG |
141 | */ |
142 | void __attribute__((no_instrument_function)) __cyg_profile_func_enter( | |
143 | void *func_ptr, void *caller) | |
144 | { | |
145 | if (trace_enabled) { | |
146 | int func; | |
147 | ||
a2fa38da | 148 | trace_swap_gd(); |
b2e16a85 SG |
149 | add_ftrace(func_ptr, caller, FUNCF_ENTRY); |
150 | func = func_ptr_to_num(func_ptr); | |
151 | if (func < hdr->func_count) { | |
152 | hdr->call_accum[func]++; | |
153 | hdr->call_count++; | |
154 | } else { | |
155 | hdr->untracked_count++; | |
156 | } | |
157 | hdr->depth++; | |
158 | if (hdr->depth > hdr->depth_limit) | |
159 | hdr->max_depth = hdr->depth; | |
a2fa38da | 160 | trace_swap_gd(); |
b2e16a85 SG |
161 | } |
162 | } | |
163 | ||
164 | /** | |
e605ab84 | 165 | * __cyg_profile_func_exit() - record function exit |
b2e16a85 | 166 | * |
e605ab84 HS |
167 | * @func_ptr: pointer to function being entered |
168 | * @caller: pointer to function which called this function | |
b2e16a85 SG |
169 | */ |
170 | void __attribute__((no_instrument_function)) __cyg_profile_func_exit( | |
171 | void *func_ptr, void *caller) | |
172 | { | |
173 | if (trace_enabled) { | |
a2fa38da | 174 | trace_swap_gd(); |
b2e16a85 SG |
175 | add_ftrace(func_ptr, caller, FUNCF_EXIT); |
176 | hdr->depth--; | |
a2fa38da | 177 | trace_swap_gd(); |
b2e16a85 SG |
178 | } |
179 | } | |
180 | ||
181 | /** | |
e605ab84 | 182 | * trace_list_functions() - produce a list of called functions |
b2e16a85 SG |
183 | * |
184 | * The information is written into the supplied buffer - a header followed | |
185 | * by a list of function records. | |
186 | * | |
e605ab84 HS |
187 | * @buff: buffer to place list into |
188 | * @buff_size: size of buffer | |
189 | * @needed: returns size of buffer needed, which may be | |
190 | * greater than buff_size if we ran out of space. | |
191 | * Return: 0 if ok, -ENOSPC if space was exhausted | |
b2e16a85 | 192 | */ |
2b7a3882 | 193 | int trace_list_functions(void *buff, size_t buff_size, size_t *needed) |
b2e16a85 SG |
194 | { |
195 | struct trace_output_hdr *output_hdr = NULL; | |
196 | void *end, *ptr = buff; | |
2b7a3882 HS |
197 | size_t func; |
198 | size_t upto; | |
b2e16a85 SG |
199 | |
200 | end = buff ? buff + buff_size : NULL; | |
201 | ||
202 | /* Place some header information */ | |
203 | if (ptr + sizeof(struct trace_output_hdr) < end) | |
204 | output_hdr = ptr; | |
205 | ptr += sizeof(struct trace_output_hdr); | |
206 | ||
207 | /* Add information about each function */ | |
208 | for (func = upto = 0; func < hdr->func_count; func++) { | |
2b7a3882 | 209 | size_t calls = hdr->call_accum[func]; |
b2e16a85 SG |
210 | |
211 | if (!calls) | |
212 | continue; | |
213 | ||
214 | if (ptr + sizeof(struct trace_output_func) < end) { | |
215 | struct trace_output_func *stats = ptr; | |
216 | ||
217 | stats->offset = func * FUNC_SITE_SIZE; | |
218 | stats->call_count = calls; | |
219 | upto++; | |
220 | } | |
221 | ptr += sizeof(struct trace_output_func); | |
222 | } | |
223 | ||
224 | /* Update the header */ | |
225 | if (output_hdr) { | |
226 | output_hdr->rec_count = upto; | |
227 | output_hdr->type = TRACE_CHUNK_FUNCS; | |
228 | } | |
229 | ||
230 | /* Work out how must of the buffer we used */ | |
231 | *needed = ptr - buff; | |
232 | if (ptr > end) | |
f564d096 SG |
233 | return -ENOSPC; |
234 | ||
b2e16a85 SG |
235 | return 0; |
236 | } | |
237 | ||
e605ab84 HS |
238 | /** |
239 | * trace_list_functions() - produce a list of function calls | |
240 | * | |
241 | * The information is written into the supplied buffer - a header followed | |
242 | * by a list of function records. | |
243 | * | |
244 | * @buff: buffer to place list into | |
245 | * @buff_size: size of buffer | |
246 | * @needed: returns size of buffer needed, which may be | |
247 | * greater than buff_size if we ran out of space. | |
248 | * Return: 0 if ok, -ENOSPC if space was exhausted | |
249 | */ | |
2b7a3882 | 250 | int trace_list_calls(void *buff, size_t buff_size, size_t *needed) |
b2e16a85 SG |
251 | { |
252 | struct trace_output_hdr *output_hdr = NULL; | |
253 | void *end, *ptr = buff; | |
2b7a3882 HS |
254 | size_t rec, upto; |
255 | size_t count; | |
b2e16a85 SG |
256 | |
257 | end = buff ? buff + buff_size : NULL; | |
258 | ||
259 | /* Place some header information */ | |
260 | if (ptr + sizeof(struct trace_output_hdr) < end) | |
261 | output_hdr = ptr; | |
262 | ptr += sizeof(struct trace_output_hdr); | |
263 | ||
264 | /* Add information about each call */ | |
265 | count = hdr->ftrace_count; | |
266 | if (count > hdr->ftrace_size) | |
267 | count = hdr->ftrace_size; | |
268 | for (rec = upto = 0; rec < count; rec++) { | |
269 | if (ptr + sizeof(struct trace_call) < end) { | |
270 | struct trace_call *call = &hdr->ftrace[rec]; | |
271 | struct trace_call *out = ptr; | |
272 | ||
273 | out->func = call->func * FUNC_SITE_SIZE; | |
274 | out->caller = call->caller * FUNC_SITE_SIZE; | |
275 | out->flags = call->flags; | |
276 | upto++; | |
277 | } | |
278 | ptr += sizeof(struct trace_call); | |
279 | } | |
280 | ||
281 | /* Update the header */ | |
282 | if (output_hdr) { | |
283 | output_hdr->rec_count = upto; | |
284 | output_hdr->type = TRACE_CHUNK_CALLS; | |
285 | } | |
286 | ||
287 | /* Work out how must of the buffer we used */ | |
288 | *needed = ptr - buff; | |
289 | if (ptr > end) | |
f564d096 SG |
290 | return -ENOSPC; |
291 | ||
b2e16a85 SG |
292 | return 0; |
293 | } | |
294 | ||
e605ab84 HS |
295 | /** |
296 | * trace_print_stats() - print basic information about tracing | |
297 | */ | |
b2e16a85 SG |
298 | void trace_print_stats(void) |
299 | { | |
300 | ulong count; | |
301 | ||
302 | #ifndef FTRACE | |
303 | puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n"); | |
304 | puts("You will likely get zeroed data here\n"); | |
305 | #endif | |
306 | if (!trace_inited) { | |
307 | printf("Trace is disabled\n"); | |
308 | return; | |
309 | } | |
310 | print_grouped_ull(hdr->func_count, 10); | |
311 | puts(" function sites\n"); | |
312 | print_grouped_ull(hdr->call_count, 10); | |
313 | puts(" function calls\n"); | |
314 | print_grouped_ull(hdr->untracked_count, 10); | |
315 | puts(" untracked function calls\n"); | |
316 | count = min(hdr->ftrace_count, hdr->ftrace_size); | |
317 | print_grouped_ull(count, 10); | |
318 | puts(" traced function calls"); | |
319 | if (hdr->ftrace_count > hdr->ftrace_size) { | |
320 | printf(" (%lu dropped due to overflow)", | |
321 | hdr->ftrace_count - hdr->ftrace_size); | |
322 | } | |
323 | puts("\n"); | |
324 | printf("%15d maximum observed call depth\n", hdr->max_depth); | |
325 | printf("%15d call depth limit\n", hdr->depth_limit); | |
326 | print_grouped_ull(hdr->ftrace_too_deep_count, 10); | |
327 | puts(" calls not traced due to depth\n"); | |
328 | } | |
329 | ||
330 | void __attribute__((no_instrument_function)) trace_set_enabled(int enabled) | |
331 | { | |
332 | trace_enabled = enabled != 0; | |
333 | } | |
334 | ||
335 | /** | |
e605ab84 | 336 | * trace_init() - initialize the tracing system and enable it |
b2e16a85 | 337 | * |
e605ab84 HS |
338 | * @buff: Pointer to trace buffer |
339 | * @buff_size: Size of trace buffer | |
340 | * Return: 0 if ok | |
b2e16a85 SG |
341 | */ |
342 | int __attribute__((no_instrument_function)) trace_init(void *buff, | |
343 | size_t buff_size) | |
344 | { | |
345 | ulong func_count = gd->mon_len / FUNC_SITE_SIZE; | |
346 | size_t needed; | |
347 | int was_disabled = !trace_enabled; | |
348 | ||
a2fa38da HS |
349 | trace_save_gd(); |
350 | ||
b2e16a85 SG |
351 | if (!was_disabled) { |
352 | #ifdef CONFIG_TRACE_EARLY | |
353 | char *end; | |
354 | ulong used; | |
355 | ||
356 | /* | |
357 | * Copy over the early trace data if we have it. Disable | |
358 | * tracing while we are doing this. | |
359 | */ | |
360 | trace_enabled = 0; | |
361 | hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, | |
362 | CONFIG_TRACE_EARLY_SIZE); | |
1c6eb075 SG |
363 | end = (char *)&hdr->ftrace[min(hdr->ftrace_count, |
364 | hdr->ftrace_size)]; | |
b2e16a85 SG |
365 | used = end - (char *)hdr; |
366 | printf("trace: copying %08lx bytes of early data from %x to %08lx\n", | |
367 | used, CONFIG_TRACE_EARLY_ADDR, | |
368 | (ulong)map_to_sysmem(buff)); | |
369 | memcpy(buff, hdr, used); | |
370 | #else | |
371 | puts("trace: already enabled\n"); | |
f564d096 | 372 | return -EALREADY; |
b2e16a85 SG |
373 | #endif |
374 | } | |
375 | hdr = (struct trace_hdr *)buff; | |
376 | needed = sizeof(*hdr) + func_count * sizeof(uintptr_t); | |
377 | if (needed > buff_size) { | |
378 | printf("trace: buffer size %zd bytes: at least %zd needed\n", | |
379 | buff_size, needed); | |
f564d096 | 380 | return -ENOSPC; |
b2e16a85 SG |
381 | } |
382 | ||
383 | if (was_disabled) | |
384 | memset(hdr, '\0', needed); | |
385 | hdr->func_count = func_count; | |
386 | hdr->call_accum = (uintptr_t *)(hdr + 1); | |
387 | ||
388 | /* Use any remaining space for the timed function trace */ | |
389 | hdr->ftrace = (struct trace_call *)(buff + needed); | |
390 | hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace); | |
391 | add_textbase(); | |
392 | ||
393 | puts("trace: enabled\n"); | |
da0fb5fd | 394 | hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT; |
b2e16a85 SG |
395 | trace_enabled = 1; |
396 | trace_inited = 1; | |
f564d096 | 397 | |
b2e16a85 SG |
398 | return 0; |
399 | } | |
400 | ||
401 | #ifdef CONFIG_TRACE_EARLY | |
e605ab84 HS |
402 | /** |
403 | * trace_early_init() - initialize the tracing system for early tracing | |
404 | * | |
405 | * Return: 0 if ok, -ENOSPC if not enough memory is available | |
406 | */ | |
b2e16a85 SG |
407 | int __attribute__((no_instrument_function)) trace_early_init(void) |
408 | { | |
409 | ulong func_count = gd->mon_len / FUNC_SITE_SIZE; | |
410 | size_t buff_size = CONFIG_TRACE_EARLY_SIZE; | |
411 | size_t needed; | |
412 | ||
413 | /* We can ignore additional calls to this function */ | |
414 | if (trace_enabled) | |
415 | return 0; | |
416 | ||
417 | hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE); | |
418 | needed = sizeof(*hdr) + func_count * sizeof(uintptr_t); | |
419 | if (needed > buff_size) { | |
420 | printf("trace: buffer size is %zd bytes, at least %zd needed\n", | |
421 | buff_size, needed); | |
f564d096 | 422 | return -ENOSPC; |
b2e16a85 SG |
423 | } |
424 | ||
425 | memset(hdr, '\0', needed); | |
426 | hdr->call_accum = (uintptr_t *)(hdr + 1); | |
427 | hdr->func_count = func_count; | |
428 | ||
429 | /* Use any remaining space for the timed function trace */ | |
430 | hdr->ftrace = (struct trace_call *)((char *)hdr + needed); | |
431 | hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace); | |
432 | add_textbase(); | |
da0fb5fd | 433 | hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT; |
b2e16a85 SG |
434 | printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR); |
435 | ||
436 | trace_enabled = 1; | |
f564d096 | 437 | |
b2e16a85 SG |
438 | return 0; |
439 | } | |
440 | #endif |