]> Git Repo - u-boot.git/blame - lib/trace.c
fdtdec: Support separate BSS for all XPL builds
[u-boot.git] / lib / trace.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
b2e16a85
SG
2/*
3 * Copyright (c) 2012 The Chromium OS Authors.
b2e16a85
SG
4 */
5
0eb25b61 6#include <mapmem.h>
1045315d 7#include <time.h>
b2e16a85 8#include <trace.h>
467382ca 9#include <linux/errno.h>
401d1c4f 10#include <asm/global_data.h>
b2e16a85
SG
11#include <asm/io.h>
12#include <asm/sections.h>
13
14DECLARE_GLOBAL_DATA_PTR;
15
236f2ec4
MB
16static char trace_enabled __section(".data");
17static char trace_inited __section(".data");
b2e16a85
SG
18
19/* The header block at the start of the trace memory area */
20struct trace_hdr {
21 int func_count; /* Total number of function call sites */
22 u64 call_count; /* Total number of tracked function calls */
23 u64 untracked_count; /* Total number of untracked function calls */
24 int funcs_used; /* Total number of functions used */
25
26 /*
27 * Call count for each function. This is indexed by the word offset
28 * of the function from gd->relocaddr
29 */
30 uintptr_t *call_accum;
31
32 /* Function trace list */
33 struct trace_call *ftrace; /* The function call records */
34 ulong ftrace_size; /* Num. of ftrace records we have space for */
35 ulong ftrace_count; /* Num. of ftrace records written */
36 ulong ftrace_too_deep_count; /* Functions that were too deep */
37
daca66d5
SG
38 int depth; /* Depth of function calls */
39 int depth_limit; /* Depth limit to trace to */
40 int max_depth; /* Maximum depth seen so far */
41 int min_depth; /* Minimum depth seen so far */
852d4dbd 42 bool trace_locked; /* Used to detect recursive tracing */
b2e16a85
SG
43};
44
bebc1410
SG
45/* Pointer to start of trace buffer */
46static struct trace_hdr *hdr __section(".data");
b2e16a85
SG
47
48static inline uintptr_t __attribute__((no_instrument_function))
49 func_ptr_to_num(void *func_ptr)
50{
51 uintptr_t offset = (uintptr_t)func_ptr;
52
53#ifdef CONFIG_SANDBOX
ccea96f4 54 offset -= (uintptr_t)_init;
b2e16a85
SG
55#else
56 if (gd->flags & GD_FLG_RELOC)
57 offset -= gd->relocaddr;
58 else
98463903 59 offset -= CONFIG_TEXT_BASE;
b2e16a85
SG
60#endif
61 return offset / FUNC_SITE_SIZE;
62}
63
d3d6afae 64#if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV))
a2fa38da
HS
65
66/**
67 * trace_gd - the value of the gd register
68 */
7f642cb2 69static volatile gd_t *trace_gd;
a2fa38da
HS
70
71/**
72 * trace_save_gd() - save the value of the gd register
73 */
33c60a38 74static void notrace trace_save_gd(void)
a2fa38da
HS
75{
76 trace_gd = gd;
77}
78
79/**
80 * trace_swap_gd() - swap between U-Boot and application gd register value
81 *
82 * An UEFI application may change the value of the register that gd lives in.
83 * But some of our functions like get_ticks() access this register. So we
84 * have to set the gd register to the U-Boot value when entering a trace
85 * point and set it back to the application value when exiting the trace point.
86 */
33c60a38 87static void notrace trace_swap_gd(void)
a2fa38da 88{
7f642cb2 89 volatile gd_t *temp_gd = trace_gd;
a2fa38da
HS
90
91 trace_gd = gd;
7f642cb2 92 set_gd(temp_gd);
a2fa38da
HS
93}
94
95#else
96
33c60a38 97static void notrace trace_save_gd(void)
a2fa38da
HS
98{
99}
100
33c60a38 101static void notrace trace_swap_gd(void)
a2fa38da
HS
102{
103}
104
105#endif
106
33c60a38 107static void notrace add_ftrace(void *func_ptr, void *caller, ulong flags)
b2e16a85
SG
108{
109 if (hdr->depth > hdr->depth_limit) {
110 hdr->ftrace_too_deep_count++;
111 return;
112 }
113 if (hdr->ftrace_count < hdr->ftrace_size) {
114 struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
115
116 rec->func = func_ptr_to_num(func_ptr);
117 rec->caller = func_ptr_to_num(caller);
118 rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
119 }
120 hdr->ftrace_count++;
121}
122
b2e16a85 123/**
e605ab84 124 * __cyg_profile_func_enter() - record function entry
b2e16a85
SG
125 *
126 * We add to our tally for this function and add to the list of called
127 * functions.
128 *
e605ab84
HS
129 * @func_ptr: pointer to function being entered
130 * @caller: pointer to function which called this function
b2e16a85 131 */
33c60a38 132void notrace __cyg_profile_func_enter(void *func_ptr, void *caller)
b2e16a85
SG
133{
134 if (trace_enabled) {
135 int func;
136
852d4dbd
SG
137 if (hdr->trace_locked) {
138 trace_enabled = 0;
139 puts("trace: recursion detected, disabling\n");
140 hdr->trace_locked = false;
141 return;
142 }
143
144 hdr->trace_locked = true;
a2fa38da 145 trace_swap_gd();
b2e16a85
SG
146 add_ftrace(func_ptr, caller, FUNCF_ENTRY);
147 func = func_ptr_to_num(func_ptr);
148 if (func < hdr->func_count) {
149 hdr->call_accum[func]++;
150 hdr->call_count++;
151 } else {
152 hdr->untracked_count++;
153 }
154 hdr->depth++;
daca66d5 155 if (hdr->depth > hdr->max_depth)
b2e16a85 156 hdr->max_depth = hdr->depth;
a2fa38da 157 trace_swap_gd();
852d4dbd 158 hdr->trace_locked = false;
b2e16a85
SG
159 }
160}
161
162/**
e605ab84 163 * __cyg_profile_func_exit() - record function exit
b2e16a85 164 *
e605ab84
HS
165 * @func_ptr: pointer to function being entered
166 * @caller: pointer to function which called this function
b2e16a85 167 */
33c60a38 168void notrace __cyg_profile_func_exit(void *func_ptr, void *caller)
b2e16a85
SG
169{
170 if (trace_enabled) {
a2fa38da 171 trace_swap_gd();
b2e16a85 172 hdr->depth--;
daca66d5
SG
173 add_ftrace(func_ptr, caller, FUNCF_EXIT);
174 if (hdr->depth < hdr->min_depth)
175 hdr->min_depth = hdr->depth;
a2fa38da 176 trace_swap_gd();
b2e16a85
SG
177 }
178}
179
180/**
e605ab84 181 * trace_list_functions() - produce a list of called functions
b2e16a85
SG
182 *
183 * The information is written into the supplied buffer - a header followed
184 * by a list of function records.
185 *
e605ab84
HS
186 * @buff: buffer to place list into
187 * @buff_size: size of buffer
188 * @needed: returns size of buffer needed, which may be
189 * greater than buff_size if we ran out of space.
190 * Return: 0 if ok, -ENOSPC if space was exhausted
b2e16a85 191 */
2b7a3882 192int trace_list_functions(void *buff, size_t buff_size, size_t *needed)
b2e16a85
SG
193{
194 struct trace_output_hdr *output_hdr = NULL;
195 void *end, *ptr = buff;
2b7a3882
HS
196 size_t func;
197 size_t upto;
b2e16a85
SG
198
199 end = buff ? buff + buff_size : NULL;
200
201 /* Place some header information */
202 if (ptr + sizeof(struct trace_output_hdr) < end)
203 output_hdr = ptr;
204 ptr += sizeof(struct trace_output_hdr);
205
206 /* Add information about each function */
207 for (func = upto = 0; func < hdr->func_count; func++) {
2b7a3882 208 size_t calls = hdr->call_accum[func];
b2e16a85
SG
209
210 if (!calls)
211 continue;
212
213 if (ptr + sizeof(struct trace_output_func) < end) {
214 struct trace_output_func *stats = ptr;
215
216 stats->offset = func * FUNC_SITE_SIZE;
217 stats->call_count = calls;
218 upto++;
219 }
220 ptr += sizeof(struct trace_output_func);
221 }
222
223 /* Update the header */
224 if (output_hdr) {
225 output_hdr->rec_count = upto;
226 output_hdr->type = TRACE_CHUNK_FUNCS;
227 }
228
229 /* Work out how must of the buffer we used */
230 *needed = ptr - buff;
231 if (ptr > end)
f564d096
SG
232 return -ENOSPC;
233
b2e16a85
SG
234 return 0;
235}
236
e605ab84
HS
237/**
238 * trace_list_functions() - produce a list of function calls
239 *
240 * The information is written into the supplied buffer - a header followed
241 * by a list of function records.
242 *
243 * @buff: buffer to place list into
244 * @buff_size: size of buffer
245 * @needed: returns size of buffer needed, which may be
246 * greater than buff_size if we ran out of space.
247 * Return: 0 if ok, -ENOSPC if space was exhausted
248 */
2b7a3882 249int trace_list_calls(void *buff, size_t buff_size, size_t *needed)
b2e16a85
SG
250{
251 struct trace_output_hdr *output_hdr = NULL;
252 void *end, *ptr = buff;
2b7a3882
HS
253 size_t rec, upto;
254 size_t count;
b2e16a85
SG
255
256 end = buff ? buff + buff_size : NULL;
257
258 /* Place some header information */
259 if (ptr + sizeof(struct trace_output_hdr) < end)
260 output_hdr = ptr;
261 ptr += sizeof(struct trace_output_hdr);
262
263 /* Add information about each call */
264 count = hdr->ftrace_count;
265 if (count > hdr->ftrace_size)
266 count = hdr->ftrace_size;
267 for (rec = upto = 0; rec < count; rec++) {
268 if (ptr + sizeof(struct trace_call) < end) {
269 struct trace_call *call = &hdr->ftrace[rec];
270 struct trace_call *out = ptr;
271
272 out->func = call->func * FUNC_SITE_SIZE;
273 out->caller = call->caller * FUNC_SITE_SIZE;
274 out->flags = call->flags;
275 upto++;
276 }
277 ptr += sizeof(struct trace_call);
278 }
279
280 /* Update the header */
281 if (output_hdr) {
d9044e53 282 memset(output_hdr, '\0', sizeof(*output_hdr));
b2e16a85
SG
283 output_hdr->rec_count = upto;
284 output_hdr->type = TRACE_CHUNK_CALLS;
d9044e53
SG
285 output_hdr->version = TRACE_VERSION;
286 output_hdr->text_base = CONFIG_TEXT_BASE;
b2e16a85
SG
287 }
288
289 /* Work out how must of the buffer we used */
290 *needed = ptr - buff;
291 if (ptr > end)
f564d096
SG
292 return -ENOSPC;
293
b2e16a85
SG
294 return 0;
295}
296
e605ab84
HS
297/**
298 * trace_print_stats() - print basic information about tracing
299 */
b2e16a85
SG
300void trace_print_stats(void)
301{
302 ulong count;
303
304#ifndef FTRACE
305 puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
306 puts("You will likely get zeroed data here\n");
307#endif
308 if (!trace_inited) {
309 printf("Trace is disabled\n");
310 return;
311 }
312 print_grouped_ull(hdr->func_count, 10);
313 puts(" function sites\n");
314 print_grouped_ull(hdr->call_count, 10);
315 puts(" function calls\n");
316 print_grouped_ull(hdr->untracked_count, 10);
317 puts(" untracked function calls\n");
318 count = min(hdr->ftrace_count, hdr->ftrace_size);
319 print_grouped_ull(count, 10);
320 puts(" traced function calls");
321 if (hdr->ftrace_count > hdr->ftrace_size) {
322 printf(" (%lu dropped due to overflow)",
323 hdr->ftrace_count - hdr->ftrace_size);
324 }
daca66d5
SG
325
326 /* Add in minimum depth since the trace did not start at top level */
327 printf("\n%15d maximum observed call depth\n",
328 hdr->max_depth - hdr->min_depth);
b2e16a85
SG
329 printf("%15d call depth limit\n", hdr->depth_limit);
330 print_grouped_ull(hdr->ftrace_too_deep_count, 10);
331 puts(" calls not traced due to depth\n");
9dd665ad
SG
332 print_grouped_ull(hdr->ftrace_size, 10);
333 puts(" max function calls\n");
334 printf("\ntrace buffer %lx call records %lx\n",
335 (ulong)map_to_sysmem(hdr), (ulong)map_to_sysmem(hdr->ftrace));
b2e16a85
SG
336}
337
33c60a38 338void notrace trace_set_enabled(int enabled)
b2e16a85
SG
339{
340 trace_enabled = enabled != 0;
341}
342
c3d91812
SG
343static int get_func_count(void)
344{
345 /* Detect no support for mon_len since this means tracing cannot work */
346 if (IS_ENABLED(CONFIG_SANDBOX) && !gd->mon_len) {
347 puts("Tracing is not supported on this board\n");
348 return -ENOTSUPP;
349 }
350
351 return gd->mon_len / FUNC_SITE_SIZE;
352}
353
b2e16a85 354/**
e605ab84 355 * trace_init() - initialize the tracing system and enable it
b2e16a85 356 *
e605ab84
HS
357 * @buff: Pointer to trace buffer
358 * @buff_size: Size of trace buffer
359 * Return: 0 if ok
b2e16a85 360 */
33c60a38 361int notrace trace_init(void *buff, size_t buff_size)
b2e16a85 362{
c3d91812 363 int func_count = get_func_count();
b2e16a85
SG
364 size_t needed;
365 int was_disabled = !trace_enabled;
366
c3d91812
SG
367 if (func_count < 0)
368 return func_count;
a2fa38da
HS
369 trace_save_gd();
370
b2e16a85
SG
371 if (!was_disabled) {
372#ifdef CONFIG_TRACE_EARLY
80f91558 373 ulong used, count;
b2e16a85 374 char *end;
b2e16a85
SG
375
376 /*
377 * Copy over the early trace data if we have it. Disable
378 * tracing while we are doing this.
379 */
380 trace_enabled = 0;
381 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
382 CONFIG_TRACE_EARLY_SIZE);
80f91558
SG
383 count = min(hdr->ftrace_count, hdr->ftrace_size);
384 end = (char *)&hdr->ftrace[count];
b2e16a85
SG
385 used = end - (char *)hdr;
386 printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
387 used, CONFIG_TRACE_EARLY_ADDR,
388 (ulong)map_to_sysmem(buff));
80f91558
SG
389 printf("%lu traced function calls", count);
390 if (hdr->ftrace_count > hdr->ftrace_size) {
391 printf(" (%lu dropped due to overflow)",
392 hdr->ftrace_count - hdr->ftrace_size);
393 hdr->ftrace_count = hdr->ftrace_size;
394 }
395 puts("\n");
b2e16a85
SG
396 memcpy(buff, hdr, used);
397#else
398 puts("trace: already enabled\n");
f564d096 399 return -EALREADY;
b2e16a85
SG
400#endif
401 }
402 hdr = (struct trace_hdr *)buff;
403 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
404 if (needed > buff_size) {
9dd665ad 405 printf("trace: buffer size %zx bytes: at least %zx needed\n",
b2e16a85 406 buff_size, needed);
f564d096 407 return -ENOSPC;
b2e16a85
SG
408 }
409
daca66d5 410 if (was_disabled) {
b2e16a85 411 memset(hdr, '\0', needed);
daca66d5
SG
412 hdr->min_depth = INT_MAX;
413 }
b2e16a85
SG
414 hdr->func_count = func_count;
415 hdr->call_accum = (uintptr_t *)(hdr + 1);
416
417 /* Use any remaining space for the timed function trace */
418 hdr->ftrace = (struct trace_call *)(buff + needed);
419 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
d9044e53 420 hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT;
b2e16a85
SG
421
422 puts("trace: enabled\n");
b2e16a85
SG
423 trace_enabled = 1;
424 trace_inited = 1;
f564d096 425
b2e16a85
SG
426 return 0;
427}
428
429#ifdef CONFIG_TRACE_EARLY
e605ab84
HS
430/**
431 * trace_early_init() - initialize the tracing system for early tracing
432 *
433 * Return: 0 if ok, -ENOSPC if not enough memory is available
434 */
33c60a38 435int notrace trace_early_init(void)
b2e16a85 436{
c3d91812 437 int func_count = get_func_count();
b2e16a85
SG
438 size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
439 size_t needed;
440
c3d91812
SG
441 if (func_count < 0)
442 return func_count;
b2e16a85
SG
443 /* We can ignore additional calls to this function */
444 if (trace_enabled)
445 return 0;
446
447 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
448 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
449 if (needed > buff_size) {
9dd665ad 450 printf("trace: buffer size is %zx bytes, at least %zx needed\n",
b2e16a85 451 buff_size, needed);
f564d096 452 return -ENOSPC;
b2e16a85
SG
453 }
454
455 memset(hdr, '\0', needed);
456 hdr->call_accum = (uintptr_t *)(hdr + 1);
457 hdr->func_count = func_count;
daca66d5 458 hdr->min_depth = INT_MAX;
b2e16a85
SG
459
460 /* Use any remaining space for the timed function trace */
461 hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
462 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
da0fb5fd 463 hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT;
b2e16a85
SG
464 printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
465
466 trace_enabled = 1;
f564d096 467
b2e16a85
SG
468 return 0;
469}
470#endif
This page took 0.334831 seconds and 4 git commands to generate.