]>
Commit | Line | Data |
---|---|---|
02d27625 MM |
1 | /* Branch trace support for GDB, the GNU debugger. |
2 | ||
e2882c85 | 3 | Copyright (C) 2013-2018 Free Software Foundation, Inc. |
02d27625 MM |
4 | |
5 | Contributed by Intel Corp. <[email protected]> | |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 3 of the License, or | |
12 | (at your option) any later version. | |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
21 | ||
d41f6d8e | 22 | #include "defs.h" |
02d27625 MM |
23 | #include "btrace.h" |
24 | #include "gdbthread.h" | |
02d27625 MM |
25 | #include "inferior.h" |
26 | #include "target.h" | |
27 | #include "record.h" | |
28 | #include "symtab.h" | |
29 | #include "disasm.h" | |
30 | #include "source.h" | |
31 | #include "filenames.h" | |
c12a2917 | 32 | #include "xml-support.h" |
6e07b1d2 | 33 | #include "regcache.h" |
b20a6524 | 34 | #include "rsp-low.h" |
b0627500 MM |
35 | #include "gdbcmd.h" |
36 | #include "cli/cli-utils.h" | |
b20a6524 MM |
37 | |
38 | #include <inttypes.h> | |
b0627500 | 39 | #include <ctype.h> |
325fac50 | 40 | #include <algorithm> |
b0627500 MM |
41 | |
42 | /* Command lists for btrace maintenance commands. */ | |
43 | static struct cmd_list_element *maint_btrace_cmdlist; | |
44 | static struct cmd_list_element *maint_btrace_set_cmdlist; | |
45 | static struct cmd_list_element *maint_btrace_show_cmdlist; | |
46 | static struct cmd_list_element *maint_btrace_pt_set_cmdlist; | |
47 | static struct cmd_list_element *maint_btrace_pt_show_cmdlist; | |
48 | ||
49 | /* Control whether to skip PAD packets when computing the packet history. */ | |
50 | static int maint_btrace_pt_skip_pad = 1; | |
b20a6524 MM |
51 | |
52 | static void btrace_add_pc (struct thread_info *tp); | |
02d27625 MM |
53 | |
54 | /* Print a record debug message. Use do ... while (0) to avoid ambiguities | |
55 | when used in if statements. */ | |
56 | ||
57 | #define DEBUG(msg, args...) \ | |
58 | do \ | |
59 | { \ | |
60 | if (record_debug != 0) \ | |
61 | fprintf_unfiltered (gdb_stdlog, \ | |
62 | "[btrace] " msg "\n", ##args); \ | |
63 | } \ | |
64 | while (0) | |
65 | ||
66 | #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args) | |
67 | ||
02d27625 MM |
68 | /* Return the function name of a recorded function segment for printing. |
69 | This function never returns NULL. */ | |
70 | ||
71 | static const char * | |
23a7fe75 | 72 | ftrace_print_function_name (const struct btrace_function *bfun) |
02d27625 MM |
73 | { |
74 | struct minimal_symbol *msym; | |
75 | struct symbol *sym; | |
76 | ||
77 | msym = bfun->msym; | |
78 | sym = bfun->sym; | |
79 | ||
80 | if (sym != NULL) | |
81 | return SYMBOL_PRINT_NAME (sym); | |
82 | ||
83 | if (msym != NULL) | |
efd66ac6 | 84 | return MSYMBOL_PRINT_NAME (msym); |
02d27625 MM |
85 | |
86 | return "<unknown>"; | |
87 | } | |
88 | ||
89 | /* Return the file name of a recorded function segment for printing. | |
90 | This function never returns NULL. */ | |
91 | ||
92 | static const char * | |
23a7fe75 | 93 | ftrace_print_filename (const struct btrace_function *bfun) |
02d27625 MM |
94 | { |
95 | struct symbol *sym; | |
96 | const char *filename; | |
97 | ||
98 | sym = bfun->sym; | |
99 | ||
100 | if (sym != NULL) | |
08be3fe3 | 101 | filename = symtab_to_filename_for_display (symbol_symtab (sym)); |
02d27625 MM |
102 | else |
103 | filename = "<unknown>"; | |
104 | ||
105 | return filename; | |
106 | } | |
107 | ||
23a7fe75 MM |
108 | /* Return a string representation of the address of an instruction. |
109 | This function never returns NULL. */ | |
02d27625 | 110 | |
23a7fe75 MM |
111 | static const char * |
112 | ftrace_print_insn_addr (const struct btrace_insn *insn) | |
02d27625 | 113 | { |
23a7fe75 MM |
114 | if (insn == NULL) |
115 | return "<nil>"; | |
116 | ||
117 | return core_addr_to_string_nz (insn->pc); | |
02d27625 MM |
118 | } |
119 | ||
23a7fe75 | 120 | /* Print an ftrace debug status message. */ |
02d27625 MM |
121 | |
122 | static void | |
23a7fe75 | 123 | ftrace_debug (const struct btrace_function *bfun, const char *prefix) |
02d27625 | 124 | { |
23a7fe75 MM |
125 | const char *fun, *file; |
126 | unsigned int ibegin, iend; | |
ce0dfbea | 127 | int level; |
23a7fe75 MM |
128 | |
129 | fun = ftrace_print_function_name (bfun); | |
130 | file = ftrace_print_filename (bfun); | |
131 | level = bfun->level; | |
132 | ||
23a7fe75 | 133 | ibegin = bfun->insn_offset; |
0860c437 | 134 | iend = ibegin + bfun->insn.size (); |
23a7fe75 | 135 | |
ce0dfbea MM |
136 | DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)", |
137 | prefix, fun, file, level, ibegin, iend); | |
02d27625 MM |
138 | } |
139 | ||
69090cee TW |
140 | /* Return the number of instructions in a given function call segment. */ |
141 | ||
142 | static unsigned int | |
143 | ftrace_call_num_insn (const struct btrace_function* bfun) | |
144 | { | |
145 | if (bfun == NULL) | |
146 | return 0; | |
147 | ||
148 | /* A gap is always counted as one instruction. */ | |
149 | if (bfun->errcode != 0) | |
150 | return 1; | |
151 | ||
0860c437 | 152 | return bfun->insn.size (); |
69090cee TW |
153 | } |
154 | ||
42bfe59e TW |
155 | /* Return the function segment with the given NUMBER or NULL if no such segment |
156 | exists. BTINFO is the branch trace information for the current thread. */ | |
157 | ||
158 | static struct btrace_function * | |
08c3f6d2 TW |
159 | ftrace_find_call_by_number (struct btrace_thread_info *btinfo, |
160 | unsigned int number) | |
161 | { | |
162 | if (number == 0 || number > btinfo->functions.size ()) | |
163 | return NULL; | |
164 | ||
165 | return &btinfo->functions[number - 1]; | |
166 | } | |
167 | ||
168 | /* A const version of the function above. */ | |
169 | ||
170 | static const struct btrace_function * | |
42bfe59e TW |
171 | ftrace_find_call_by_number (const struct btrace_thread_info *btinfo, |
172 | unsigned int number) | |
173 | { | |
174 | if (number == 0 || number > btinfo->functions.size ()) | |
175 | return NULL; | |
176 | ||
08c3f6d2 | 177 | return &btinfo->functions[number - 1]; |
42bfe59e TW |
178 | } |
179 | ||
23a7fe75 MM |
180 | /* Return non-zero if BFUN does not match MFUN and FUN, |
181 | return zero otherwise. */ | |
02d27625 MM |
182 | |
183 | static int | |
23a7fe75 MM |
184 | ftrace_function_switched (const struct btrace_function *bfun, |
185 | const struct minimal_symbol *mfun, | |
186 | const struct symbol *fun) | |
02d27625 MM |
187 | { |
188 | struct minimal_symbol *msym; | |
189 | struct symbol *sym; | |
190 | ||
02d27625 MM |
191 | msym = bfun->msym; |
192 | sym = bfun->sym; | |
193 | ||
194 | /* If the minimal symbol changed, we certainly switched functions. */ | |
195 | if (mfun != NULL && msym != NULL | |
efd66ac6 | 196 | && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0) |
02d27625 MM |
197 | return 1; |
198 | ||
199 | /* If the symbol changed, we certainly switched functions. */ | |
200 | if (fun != NULL && sym != NULL) | |
201 | { | |
202 | const char *bfname, *fname; | |
203 | ||
204 | /* Check the function name. */ | |
205 | if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0) | |
206 | return 1; | |
207 | ||
208 | /* Check the location of those functions, as well. */ | |
08be3fe3 DE |
209 | bfname = symtab_to_fullname (symbol_symtab (sym)); |
210 | fname = symtab_to_fullname (symbol_symtab (fun)); | |
02d27625 MM |
211 | if (filename_cmp (fname, bfname) != 0) |
212 | return 1; | |
213 | } | |
214 | ||
23a7fe75 MM |
215 | /* If we lost symbol information, we switched functions. */ |
216 | if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL) | |
217 | return 1; | |
218 | ||
219 | /* If we gained symbol information, we switched functions. */ | |
220 | if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL)) | |
221 | return 1; | |
222 | ||
02d27625 MM |
223 | return 0; |
224 | } | |
225 | ||
8286623c TW |
226 | /* Allocate and initialize a new branch trace function segment at the end of |
227 | the trace. | |
17b89b34 | 228 | BTINFO is the branch trace information for the current thread. |
08c3f6d2 TW |
229 | MFUN and FUN are the symbol information we have for this function. |
230 | This invalidates all struct btrace_function pointer currently held. */ | |
23a7fe75 MM |
231 | |
232 | static struct btrace_function * | |
17b89b34 | 233 | ftrace_new_function (struct btrace_thread_info *btinfo, |
23a7fe75 MM |
234 | struct minimal_symbol *mfun, |
235 | struct symbol *fun) | |
236 | { | |
08c3f6d2 TW |
237 | int level; |
238 | unsigned int number, insn_offset; | |
23a7fe75 | 239 | |
b54b03bd | 240 | if (btinfo->functions.empty ()) |
5de9129b | 241 | { |
08c3f6d2 TW |
242 | /* Start counting NUMBER and INSN_OFFSET at one. */ |
243 | level = 0; | |
244 | number = 1; | |
245 | insn_offset = 1; | |
5de9129b MM |
246 | } |
247 | else | |
23a7fe75 | 248 | { |
08c3f6d2 TW |
249 | const struct btrace_function *prev = &btinfo->functions.back (); |
250 | level = prev->level; | |
251 | number = prev->number + 1; | |
252 | insn_offset = prev->insn_offset + ftrace_call_num_insn (prev); | |
23a7fe75 MM |
253 | } |
254 | ||
08c3f6d2 TW |
255 | btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level); |
256 | return &btinfo->functions.back (); | |
02d27625 MM |
257 | } |
258 | ||
23a7fe75 | 259 | /* Update the UP field of a function segment. */ |
02d27625 | 260 | |
23a7fe75 MM |
261 | static void |
262 | ftrace_update_caller (struct btrace_function *bfun, | |
263 | struct btrace_function *caller, | |
264 | enum btrace_function_flag flags) | |
02d27625 | 265 | { |
42bfe59e | 266 | if (bfun->up != 0) |
23a7fe75 | 267 | ftrace_debug (bfun, "updating caller"); |
02d27625 | 268 | |
42bfe59e | 269 | bfun->up = caller->number; |
23a7fe75 MM |
270 | bfun->flags = flags; |
271 | ||
272 | ftrace_debug (bfun, "set caller"); | |
d87fdac3 | 273 | ftrace_debug (caller, "..to"); |
23a7fe75 MM |
274 | } |
275 | ||
276 | /* Fix up the caller for all segments of a function. */ | |
277 | ||
278 | static void | |
4aeb0dfc TW |
279 | ftrace_fixup_caller (struct btrace_thread_info *btinfo, |
280 | struct btrace_function *bfun, | |
23a7fe75 MM |
281 | struct btrace_function *caller, |
282 | enum btrace_function_flag flags) | |
283 | { | |
4aeb0dfc | 284 | unsigned int prev, next; |
23a7fe75 | 285 | |
4aeb0dfc TW |
286 | prev = bfun->prev; |
287 | next = bfun->next; | |
23a7fe75 MM |
288 | ftrace_update_caller (bfun, caller, flags); |
289 | ||
290 | /* Update all function segments belonging to the same function. */ | |
4aeb0dfc TW |
291 | for (; prev != 0; prev = bfun->prev) |
292 | { | |
293 | bfun = ftrace_find_call_by_number (btinfo, prev); | |
294 | ftrace_update_caller (bfun, caller, flags); | |
295 | } | |
23a7fe75 | 296 | |
4aeb0dfc TW |
297 | for (; next != 0; next = bfun->next) |
298 | { | |
299 | bfun = ftrace_find_call_by_number (btinfo, next); | |
300 | ftrace_update_caller (bfun, caller, flags); | |
301 | } | |
23a7fe75 MM |
302 | } |
303 | ||
8286623c | 304 | /* Add a new function segment for a call at the end of the trace. |
17b89b34 | 305 | BTINFO is the branch trace information for the current thread. |
23a7fe75 MM |
306 | MFUN and FUN are the symbol information we have for this function. */ |
307 | ||
308 | static struct btrace_function * | |
17b89b34 | 309 | ftrace_new_call (struct btrace_thread_info *btinfo, |
23a7fe75 MM |
310 | struct minimal_symbol *mfun, |
311 | struct symbol *fun) | |
312 | { | |
b54b03bd | 313 | const unsigned int length = btinfo->functions.size (); |
8286623c | 314 | struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun); |
23a7fe75 | 315 | |
42bfe59e | 316 | bfun->up = length; |
31fd9caa | 317 | bfun->level += 1; |
23a7fe75 MM |
318 | |
319 | ftrace_debug (bfun, "new call"); | |
320 | ||
321 | return bfun; | |
322 | } | |
323 | ||
8286623c | 324 | /* Add a new function segment for a tail call at the end of the trace. |
17b89b34 | 325 | BTINFO is the branch trace information for the current thread. |
23a7fe75 MM |
326 | MFUN and FUN are the symbol information we have for this function. */ |
327 | ||
328 | static struct btrace_function * | |
17b89b34 | 329 | ftrace_new_tailcall (struct btrace_thread_info *btinfo, |
23a7fe75 MM |
330 | struct minimal_symbol *mfun, |
331 | struct symbol *fun) | |
332 | { | |
b54b03bd | 333 | const unsigned int length = btinfo->functions.size (); |
8286623c | 334 | struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun); |
02d27625 | 335 | |
42bfe59e | 336 | bfun->up = length; |
31fd9caa | 337 | bfun->level += 1; |
23a7fe75 | 338 | bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL; |
02d27625 | 339 | |
23a7fe75 MM |
340 | ftrace_debug (bfun, "new tail call"); |
341 | ||
342 | return bfun; | |
343 | } | |
344 | ||
d87fdac3 | 345 | /* Return the caller of BFUN or NULL if there is none. This function skips |
42bfe59e TW |
346 | tail calls in the call chain. BTINFO is the branch trace information for |
347 | the current thread. */ | |
d87fdac3 | 348 | static struct btrace_function * |
42bfe59e TW |
349 | ftrace_get_caller (struct btrace_thread_info *btinfo, |
350 | struct btrace_function *bfun) | |
d87fdac3 | 351 | { |
42bfe59e | 352 | for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up)) |
d87fdac3 | 353 | if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) |
42bfe59e | 354 | return ftrace_find_call_by_number (btinfo, bfun->up); |
d87fdac3 MM |
355 | |
356 | return NULL; | |
357 | } | |
358 | ||
23a7fe75 | 359 | /* Find the innermost caller in the back trace of BFUN with MFUN/FUN |
42bfe59e TW |
360 | symbol information. BTINFO is the branch trace information for the current |
361 | thread. */ | |
23a7fe75 MM |
362 | |
363 | static struct btrace_function * | |
42bfe59e TW |
364 | ftrace_find_caller (struct btrace_thread_info *btinfo, |
365 | struct btrace_function *bfun, | |
23a7fe75 MM |
366 | struct minimal_symbol *mfun, |
367 | struct symbol *fun) | |
368 | { | |
42bfe59e | 369 | for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up)) |
23a7fe75 MM |
370 | { |
371 | /* Skip functions with incompatible symbol information. */ | |
372 | if (ftrace_function_switched (bfun, mfun, fun)) | |
373 | continue; | |
374 | ||
375 | /* This is the function segment we're looking for. */ | |
376 | break; | |
377 | } | |
378 | ||
379 | return bfun; | |
380 | } | |
381 | ||
382 | /* Find the innermost caller in the back trace of BFUN, skipping all | |
383 | function segments that do not end with a call instruction (e.g. | |
42bfe59e TW |
384 | tail calls ending with a jump). BTINFO is the branch trace information for |
385 | the current thread. */ | |
23a7fe75 MM |
386 | |
387 | static struct btrace_function * | |
42bfe59e TW |
388 | ftrace_find_call (struct btrace_thread_info *btinfo, |
389 | struct btrace_function *bfun) | |
23a7fe75 | 390 | { |
42bfe59e | 391 | for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up)) |
02d27625 | 392 | { |
31fd9caa MM |
393 | /* Skip gaps. */ |
394 | if (bfun->errcode != 0) | |
395 | continue; | |
23a7fe75 | 396 | |
0860c437 | 397 | btrace_insn &last = bfun->insn.back (); |
02d27625 | 398 | |
0860c437 | 399 | if (last.iclass == BTRACE_INSN_CALL) |
23a7fe75 MM |
400 | break; |
401 | } | |
402 | ||
403 | return bfun; | |
404 | } | |
405 | ||
8286623c TW |
406 | /* Add a continuation segment for a function into which we return at the end of |
407 | the trace. | |
17b89b34 | 408 | BTINFO is the branch trace information for the current thread. |
23a7fe75 MM |
409 | MFUN and FUN are the symbol information we have for this function. */ |
410 | ||
411 | static struct btrace_function * | |
17b89b34 | 412 | ftrace_new_return (struct btrace_thread_info *btinfo, |
23a7fe75 MM |
413 | struct minimal_symbol *mfun, |
414 | struct symbol *fun) | |
415 | { | |
08c3f6d2 | 416 | struct btrace_function *prev, *bfun, *caller; |
23a7fe75 | 417 | |
8286623c | 418 | bfun = ftrace_new_function (btinfo, mfun, fun); |
08c3f6d2 | 419 | prev = ftrace_find_call_by_number (btinfo, bfun->number - 1); |
23a7fe75 MM |
420 | |
421 | /* It is important to start at PREV's caller. Otherwise, we might find | |
422 | PREV itself, if PREV is a recursive function. */ | |
42bfe59e TW |
423 | caller = ftrace_find_call_by_number (btinfo, prev->up); |
424 | caller = ftrace_find_caller (btinfo, caller, mfun, fun); | |
23a7fe75 MM |
425 | if (caller != NULL) |
426 | { | |
427 | /* The caller of PREV is the preceding btrace function segment in this | |
428 | function instance. */ | |
4aeb0dfc | 429 | gdb_assert (caller->next == 0); |
23a7fe75 | 430 | |
4aeb0dfc TW |
431 | caller->next = bfun->number; |
432 | bfun->prev = caller->number; | |
23a7fe75 MM |
433 | |
434 | /* Maintain the function level. */ | |
435 | bfun->level = caller->level; | |
436 | ||
437 | /* Maintain the call stack. */ | |
438 | bfun->up = caller->up; | |
439 | bfun->flags = caller->flags; | |
440 | ||
441 | ftrace_debug (bfun, "new return"); | |
442 | } | |
443 | else | |
444 | { | |
445 | /* We did not find a caller. This could mean that something went | |
446 | wrong or that the call is simply not included in the trace. */ | |
02d27625 | 447 | |
23a7fe75 | 448 | /* Let's search for some actual call. */ |
42bfe59e TW |
449 | caller = ftrace_find_call_by_number (btinfo, prev->up); |
450 | caller = ftrace_find_call (btinfo, caller); | |
23a7fe75 | 451 | if (caller == NULL) |
02d27625 | 452 | { |
23a7fe75 MM |
453 | /* There is no call in PREV's back trace. We assume that the |
454 | branch trace did not include it. */ | |
455 | ||
259ba1e8 MM |
456 | /* Let's find the topmost function and add a new caller for it. |
457 | This should handle a series of initial tail calls. */ | |
42bfe59e TW |
458 | while (prev->up != 0) |
459 | prev = ftrace_find_call_by_number (btinfo, prev->up); | |
02d27625 | 460 | |
259ba1e8 | 461 | bfun->level = prev->level - 1; |
23a7fe75 MM |
462 | |
463 | /* Fix up the call stack for PREV. */ | |
4aeb0dfc | 464 | ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET); |
23a7fe75 MM |
465 | |
466 | ftrace_debug (bfun, "new return - no caller"); | |
467 | } | |
468 | else | |
02d27625 | 469 | { |
23a7fe75 | 470 | /* There is a call in PREV's back trace to which we should have |
259ba1e8 MM |
471 | returned but didn't. Let's start a new, separate back trace |
472 | from PREV's level. */ | |
473 | bfun->level = prev->level - 1; | |
474 | ||
475 | /* We fix up the back trace for PREV but leave other function segments | |
476 | on the same level as they are. | |
477 | This should handle things like schedule () correctly where we're | |
478 | switching contexts. */ | |
42bfe59e | 479 | prev->up = bfun->number; |
259ba1e8 | 480 | prev->flags = BFUN_UP_LINKS_TO_RET; |
02d27625 | 481 | |
23a7fe75 | 482 | ftrace_debug (bfun, "new return - unknown caller"); |
02d27625 | 483 | } |
23a7fe75 MM |
484 | } |
485 | ||
486 | return bfun; | |
487 | } | |
488 | ||
8286623c | 489 | /* Add a new function segment for a function switch at the end of the trace. |
17b89b34 | 490 | BTINFO is the branch trace information for the current thread. |
23a7fe75 MM |
491 | MFUN and FUN are the symbol information we have for this function. */ |
492 | ||
493 | static struct btrace_function * | |
17b89b34 | 494 | ftrace_new_switch (struct btrace_thread_info *btinfo, |
23a7fe75 MM |
495 | struct minimal_symbol *mfun, |
496 | struct symbol *fun) | |
497 | { | |
08c3f6d2 | 498 | struct btrace_function *prev, *bfun; |
23a7fe75 | 499 | |
4c2c7ac6 MM |
500 | /* This is an unexplained function switch. We can't really be sure about the |
501 | call stack, yet the best I can think of right now is to preserve it. */ | |
8286623c | 502 | bfun = ftrace_new_function (btinfo, mfun, fun); |
08c3f6d2 | 503 | prev = ftrace_find_call_by_number (btinfo, bfun->number - 1); |
4c2c7ac6 MM |
504 | bfun->up = prev->up; |
505 | bfun->flags = prev->flags; | |
02d27625 | 506 | |
23a7fe75 MM |
507 | ftrace_debug (bfun, "new switch"); |
508 | ||
509 | return bfun; | |
510 | } | |
511 | ||
8286623c TW |
512 | /* Add a new function segment for a gap in the trace due to a decode error at |
513 | the end of the trace. | |
17b89b34 | 514 | BTINFO is the branch trace information for the current thread. |
31fd9caa MM |
515 | ERRCODE is the format-specific error code. */ |
516 | ||
517 | static struct btrace_function * | |
8ffd39f2 TW |
518 | ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode, |
519 | std::vector<unsigned int> &gaps) | |
31fd9caa MM |
520 | { |
521 | struct btrace_function *bfun; | |
522 | ||
b54b03bd | 523 | if (btinfo->functions.empty ()) |
8286623c | 524 | bfun = ftrace_new_function (btinfo, NULL, NULL); |
b54b03bd TW |
525 | else |
526 | { | |
527 | /* We hijack the previous function segment if it was empty. */ | |
08c3f6d2 | 528 | bfun = &btinfo->functions.back (); |
0860c437 | 529 | if (bfun->errcode != 0 || !bfun->insn.empty ()) |
b54b03bd TW |
530 | bfun = ftrace_new_function (btinfo, NULL, NULL); |
531 | } | |
31fd9caa MM |
532 | |
533 | bfun->errcode = errcode; | |
8ffd39f2 | 534 | gaps.push_back (bfun->number); |
31fd9caa MM |
535 | |
536 | ftrace_debug (bfun, "new gap"); | |
537 | ||
538 | return bfun; | |
539 | } | |
540 | ||
8286623c TW |
541 | /* Update the current function segment at the end of the trace in BTINFO with |
542 | respect to the instruction at PC. This may create new function segments. | |
23a7fe75 MM |
543 | Return the chronologically latest function segment, never NULL. */ |
544 | ||
545 | static struct btrace_function * | |
8286623c | 546 | ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc) |
23a7fe75 MM |
547 | { |
548 | struct bound_minimal_symbol bmfun; | |
549 | struct minimal_symbol *mfun; | |
550 | struct symbol *fun; | |
b54b03bd | 551 | struct btrace_function *bfun; |
23a7fe75 MM |
552 | |
553 | /* Try to determine the function we're in. We use both types of symbols | |
554 | to avoid surprises when we sometimes get a full symbol and sometimes | |
555 | only a minimal symbol. */ | |
556 | fun = find_pc_function (pc); | |
557 | bmfun = lookup_minimal_symbol_by_pc (pc); | |
558 | mfun = bmfun.minsym; | |
559 | ||
560 | if (fun == NULL && mfun == NULL) | |
561 | DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc)); | |
562 | ||
b54b03bd TW |
563 | /* If we didn't have a function, we create one. */ |
564 | if (btinfo->functions.empty ()) | |
565 | return ftrace_new_function (btinfo, mfun, fun); | |
566 | ||
567 | /* If we had a gap before, we create a function. */ | |
08c3f6d2 | 568 | bfun = &btinfo->functions.back (); |
b54b03bd | 569 | if (bfun->errcode != 0) |
8286623c | 570 | return ftrace_new_function (btinfo, mfun, fun); |
23a7fe75 MM |
571 | |
572 | /* Check the last instruction, if we have one. | |
573 | We do this check first, since it allows us to fill in the call stack | |
574 | links in addition to the normal flow links. */ | |
0860c437 SM |
575 | btrace_insn *last = NULL; |
576 | if (!bfun->insn.empty ()) | |
577 | last = &bfun->insn.back (); | |
23a7fe75 MM |
578 | |
579 | if (last != NULL) | |
580 | { | |
7d5c24b3 MM |
581 | switch (last->iclass) |
582 | { | |
583 | case BTRACE_INSN_RETURN: | |
986b6601 MM |
584 | { |
585 | const char *fname; | |
586 | ||
587 | /* On some systems, _dl_runtime_resolve returns to the resolved | |
588 | function instead of jumping to it. From our perspective, | |
589 | however, this is a tailcall. | |
590 | If we treated it as return, we wouldn't be able to find the | |
591 | resolved function in our stack back trace. Hence, we would | |
592 | lose the current stack back trace and start anew with an empty | |
593 | back trace. When the resolved function returns, we would then | |
594 | create a stack back trace with the same function names but | |
595 | different frame id's. This will confuse stepping. */ | |
596 | fname = ftrace_print_function_name (bfun); | |
597 | if (strcmp (fname, "_dl_runtime_resolve") == 0) | |
8286623c | 598 | return ftrace_new_tailcall (btinfo, mfun, fun); |
986b6601 | 599 | |
8286623c | 600 | return ftrace_new_return (btinfo, mfun, fun); |
986b6601 | 601 | } |
23a7fe75 | 602 | |
7d5c24b3 MM |
603 | case BTRACE_INSN_CALL: |
604 | /* Ignore calls to the next instruction. They are used for PIC. */ | |
605 | if (last->pc + last->size == pc) | |
606 | break; | |
23a7fe75 | 607 | |
8286623c | 608 | return ftrace_new_call (btinfo, mfun, fun); |
23a7fe75 | 609 | |
7d5c24b3 MM |
610 | case BTRACE_INSN_JUMP: |
611 | { | |
612 | CORE_ADDR start; | |
23a7fe75 | 613 | |
7d5c24b3 | 614 | start = get_pc_function_start (pc); |
23a7fe75 | 615 | |
2dfdb47a MM |
616 | /* A jump to the start of a function is (typically) a tail call. */ |
617 | if (start == pc) | |
8286623c | 618 | return ftrace_new_tailcall (btinfo, mfun, fun); |
2dfdb47a | 619 | |
7d5c24b3 | 620 | /* If we can't determine the function for PC, we treat a jump at |
2dfdb47a MM |
621 | the end of the block as tail call if we're switching functions |
622 | and as an intra-function branch if we don't. */ | |
623 | if (start == 0 && ftrace_function_switched (bfun, mfun, fun)) | |
8286623c | 624 | return ftrace_new_tailcall (btinfo, mfun, fun); |
2dfdb47a MM |
625 | |
626 | break; | |
7d5c24b3 | 627 | } |
02d27625 | 628 | } |
23a7fe75 MM |
629 | } |
630 | ||
631 | /* Check if we're switching functions for some other reason. */ | |
632 | if (ftrace_function_switched (bfun, mfun, fun)) | |
633 | { | |
634 | DEBUG_FTRACE ("switching from %s in %s at %s", | |
635 | ftrace_print_insn_addr (last), | |
636 | ftrace_print_function_name (bfun), | |
637 | ftrace_print_filename (bfun)); | |
02d27625 | 638 | |
8286623c | 639 | return ftrace_new_switch (btinfo, mfun, fun); |
23a7fe75 MM |
640 | } |
641 | ||
642 | return bfun; | |
643 | } | |
644 | ||
23a7fe75 MM |
645 | /* Add the instruction at PC to BFUN's instructions. */ |
646 | ||
647 | static void | |
0860c437 | 648 | ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn) |
23a7fe75 | 649 | { |
0860c437 | 650 | bfun->insn.push_back (insn); |
23a7fe75 MM |
651 | |
652 | if (record_debug > 1) | |
653 | ftrace_debug (bfun, "update insn"); | |
654 | } | |
655 | ||
7d5c24b3 MM |
656 | /* Classify the instruction at PC. */ |
657 | ||
658 | static enum btrace_insn_class | |
659 | ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc) | |
660 | { | |
7d5c24b3 MM |
661 | enum btrace_insn_class iclass; |
662 | ||
663 | iclass = BTRACE_INSN_OTHER; | |
492d29ea | 664 | TRY |
7d5c24b3 MM |
665 | { |
666 | if (gdbarch_insn_is_call (gdbarch, pc)) | |
667 | iclass = BTRACE_INSN_CALL; | |
668 | else if (gdbarch_insn_is_ret (gdbarch, pc)) | |
669 | iclass = BTRACE_INSN_RETURN; | |
670 | else if (gdbarch_insn_is_jump (gdbarch, pc)) | |
671 | iclass = BTRACE_INSN_JUMP; | |
672 | } | |
492d29ea PA |
673 | CATCH (error, RETURN_MASK_ERROR) |
674 | { | |
675 | } | |
676 | END_CATCH | |
7d5c24b3 MM |
677 | |
678 | return iclass; | |
679 | } | |
680 | ||
d87fdac3 MM |
681 | /* Try to match the back trace at LHS to the back trace at RHS. Returns the |
682 | number of matching function segments or zero if the back traces do not | |
42bfe59e | 683 | match. BTINFO is the branch trace information for the current thread. */ |
d87fdac3 MM |
684 | |
685 | static int | |
42bfe59e TW |
686 | ftrace_match_backtrace (struct btrace_thread_info *btinfo, |
687 | struct btrace_function *lhs, | |
d87fdac3 MM |
688 | struct btrace_function *rhs) |
689 | { | |
690 | int matches; | |
691 | ||
692 | for (matches = 0; lhs != NULL && rhs != NULL; ++matches) | |
693 | { | |
694 | if (ftrace_function_switched (lhs, rhs->msym, rhs->sym)) | |
695 | return 0; | |
696 | ||
42bfe59e TW |
697 | lhs = ftrace_get_caller (btinfo, lhs); |
698 | rhs = ftrace_get_caller (btinfo, rhs); | |
d87fdac3 MM |
699 | } |
700 | ||
701 | return matches; | |
702 | } | |
703 | ||
eb8f2b9c TW |
704 | /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. |
705 | BTINFO is the branch trace information for the current thread. */ | |
d87fdac3 MM |
706 | |
707 | static void | |
eb8f2b9c TW |
708 | ftrace_fixup_level (struct btrace_thread_info *btinfo, |
709 | struct btrace_function *bfun, int adjustment) | |
d87fdac3 MM |
710 | { |
711 | if (adjustment == 0) | |
712 | return; | |
713 | ||
714 | DEBUG_FTRACE ("fixup level (%+d)", adjustment); | |
715 | ftrace_debug (bfun, "..bfun"); | |
716 | ||
eb8f2b9c TW |
717 | while (bfun != NULL) |
718 | { | |
719 | bfun->level += adjustment; | |
720 | bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1); | |
721 | } | |
d87fdac3 MM |
722 | } |
723 | ||
724 | /* Recompute the global level offset. Traverse the function trace and compute | |
725 | the global level offset as the negative of the minimal function level. */ | |
726 | ||
727 | static void | |
728 | ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo) | |
729 | { | |
b54b03bd | 730 | int level = INT_MAX; |
d87fdac3 MM |
731 | |
732 | if (btinfo == NULL) | |
733 | return; | |
734 | ||
b54b03bd | 735 | if (btinfo->functions.empty ()) |
d87fdac3 MM |
736 | return; |
737 | ||
b54b03bd TW |
738 | unsigned int length = btinfo->functions.size() - 1; |
739 | for (unsigned int i = 0; i < length; ++i) | |
08c3f6d2 | 740 | level = std::min (level, btinfo->functions[i].level); |
b54b03bd | 741 | |
d87fdac3 MM |
742 | /* The last function segment contains the current instruction, which is not |
743 | really part of the trace. If it contains just this one instruction, we | |
b54b03bd | 744 | ignore the segment. */ |
08c3f6d2 | 745 | struct btrace_function *last = &btinfo->functions.back(); |
0860c437 | 746 | if (last->insn.size () != 1) |
b54b03bd | 747 | level = std::min (level, last->level); |
d87fdac3 MM |
748 | |
749 | DEBUG_FTRACE ("setting global level offset: %d", -level); | |
750 | btinfo->level = -level; | |
751 | } | |
752 | ||
753 | /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in | |
42bfe59e TW |
754 | ftrace_connect_backtrace. BTINFO is the branch trace information for the |
755 | current thread. */ | |
d87fdac3 MM |
756 | |
757 | static void | |
42bfe59e TW |
758 | ftrace_connect_bfun (struct btrace_thread_info *btinfo, |
759 | struct btrace_function *prev, | |
d87fdac3 MM |
760 | struct btrace_function *next) |
761 | { | |
762 | DEBUG_FTRACE ("connecting..."); | |
763 | ftrace_debug (prev, "..prev"); | |
764 | ftrace_debug (next, "..next"); | |
765 | ||
766 | /* The function segments are not yet connected. */ | |
4aeb0dfc TW |
767 | gdb_assert (prev->next == 0); |
768 | gdb_assert (next->prev == 0); | |
d87fdac3 | 769 | |
4aeb0dfc TW |
770 | prev->next = next->number; |
771 | next->prev = prev->number; | |
d87fdac3 MM |
772 | |
773 | /* We may have moved NEXT to a different function level. */ | |
eb8f2b9c | 774 | ftrace_fixup_level (btinfo, next, prev->level - next->level); |
d87fdac3 MM |
775 | |
776 | /* If we run out of back trace for one, let's use the other's. */ | |
42bfe59e | 777 | if (prev->up == 0) |
d87fdac3 | 778 | { |
42bfe59e TW |
779 | const btrace_function_flags flags = next->flags; |
780 | ||
781 | next = ftrace_find_call_by_number (btinfo, next->up); | |
782 | if (next != NULL) | |
d87fdac3 MM |
783 | { |
784 | DEBUG_FTRACE ("using next's callers"); | |
4aeb0dfc | 785 | ftrace_fixup_caller (btinfo, prev, next, flags); |
d87fdac3 MM |
786 | } |
787 | } | |
42bfe59e | 788 | else if (next->up == 0) |
d87fdac3 | 789 | { |
42bfe59e TW |
790 | const btrace_function_flags flags = prev->flags; |
791 | ||
792 | prev = ftrace_find_call_by_number (btinfo, prev->up); | |
793 | if (prev != NULL) | |
d87fdac3 MM |
794 | { |
795 | DEBUG_FTRACE ("using prev's callers"); | |
4aeb0dfc | 796 | ftrace_fixup_caller (btinfo, next, prev, flags); |
d87fdac3 MM |
797 | } |
798 | } | |
799 | else | |
800 | { | |
801 | /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up | |
802 | link to add the tail callers to NEXT's back trace. | |
803 | ||
804 | This removes NEXT->UP from NEXT's back trace. It will be added back | |
805 | when connecting NEXT and PREV's callers - provided they exist. | |
806 | ||
807 | If PREV's back trace consists of a series of tail calls without an | |
808 | actual call, there will be no further connection and NEXT's caller will | |
809 | be removed for good. To catch this case, we handle it here and connect | |
810 | the top of PREV's back trace to NEXT's caller. */ | |
811 | if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0) | |
812 | { | |
813 | struct btrace_function *caller; | |
42bfe59e | 814 | btrace_function_flags next_flags, prev_flags; |
d87fdac3 MM |
815 | |
816 | /* We checked NEXT->UP above so CALLER can't be NULL. */ | |
42bfe59e TW |
817 | caller = ftrace_find_call_by_number (btinfo, next->up); |
818 | next_flags = next->flags; | |
819 | prev_flags = prev->flags; | |
d87fdac3 MM |
820 | |
821 | DEBUG_FTRACE ("adding prev's tail calls to next"); | |
822 | ||
42bfe59e | 823 | prev = ftrace_find_call_by_number (btinfo, prev->up); |
4aeb0dfc | 824 | ftrace_fixup_caller (btinfo, next, prev, prev_flags); |
d87fdac3 | 825 | |
42bfe59e TW |
826 | for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo, |
827 | prev->up)) | |
d87fdac3 MM |
828 | { |
829 | /* At the end of PREV's back trace, continue with CALLER. */ | |
42bfe59e | 830 | if (prev->up == 0) |
d87fdac3 MM |
831 | { |
832 | DEBUG_FTRACE ("fixing up link for tailcall chain"); | |
833 | ftrace_debug (prev, "..top"); | |
834 | ftrace_debug (caller, "..up"); | |
835 | ||
4aeb0dfc | 836 | ftrace_fixup_caller (btinfo, prev, caller, next_flags); |
d87fdac3 MM |
837 | |
838 | /* If we skipped any tail calls, this may move CALLER to a | |
839 | different function level. | |
840 | ||
841 | Note that changing CALLER's level is only OK because we | |
842 | know that this is the last iteration of the bottom-to-top | |
843 | walk in ftrace_connect_backtrace. | |
844 | ||
845 | Otherwise we will fix up CALLER's level when we connect it | |
846 | to PREV's caller in the next iteration. */ | |
eb8f2b9c TW |
847 | ftrace_fixup_level (btinfo, caller, |
848 | prev->level - caller->level - 1); | |
d87fdac3 MM |
849 | break; |
850 | } | |
851 | ||
852 | /* There's nothing to do if we find a real call. */ | |
853 | if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0) | |
854 | { | |
855 | DEBUG_FTRACE ("will fix up link in next iteration"); | |
856 | break; | |
857 | } | |
858 | } | |
859 | } | |
860 | } | |
861 | } | |
862 | ||
863 | /* Connect function segments on the same level in the back trace at LHS and RHS. | |
864 | The back traces at LHS and RHS are expected to match according to | |
42bfe59e TW |
865 | ftrace_match_backtrace. BTINFO is the branch trace information for the |
866 | current thread. */ | |
d87fdac3 MM |
867 | |
868 | static void | |
42bfe59e TW |
869 | ftrace_connect_backtrace (struct btrace_thread_info *btinfo, |
870 | struct btrace_function *lhs, | |
d87fdac3 MM |
871 | struct btrace_function *rhs) |
872 | { | |
873 | while (lhs != NULL && rhs != NULL) | |
874 | { | |
875 | struct btrace_function *prev, *next; | |
876 | ||
877 | gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym)); | |
878 | ||
879 | /* Connecting LHS and RHS may change the up link. */ | |
880 | prev = lhs; | |
881 | next = rhs; | |
882 | ||
42bfe59e TW |
883 | lhs = ftrace_get_caller (btinfo, lhs); |
884 | rhs = ftrace_get_caller (btinfo, rhs); | |
d87fdac3 | 885 | |
42bfe59e | 886 | ftrace_connect_bfun (btinfo, prev, next); |
d87fdac3 MM |
887 | } |
888 | } | |
889 | ||
890 | /* Bridge the gap between two function segments left and right of a gap if their | |
42bfe59e TW |
891 | respective back traces match in at least MIN_MATCHES functions. BTINFO is |
892 | the branch trace information for the current thread. | |
d87fdac3 MM |
893 | |
894 | Returns non-zero if the gap could be bridged, zero otherwise. */ | |
895 | ||
896 | static int | |
42bfe59e TW |
897 | ftrace_bridge_gap (struct btrace_thread_info *btinfo, |
898 | struct btrace_function *lhs, struct btrace_function *rhs, | |
d87fdac3 MM |
899 | int min_matches) |
900 | { | |
901 | struct btrace_function *best_l, *best_r, *cand_l, *cand_r; | |
902 | int best_matches; | |
903 | ||
904 | DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)", | |
905 | rhs->insn_offset - 1, min_matches); | |
906 | ||
907 | best_matches = 0; | |
908 | best_l = NULL; | |
909 | best_r = NULL; | |
910 | ||
911 | /* We search the back traces of LHS and RHS for valid connections and connect | |
912 | the two functon segments that give the longest combined back trace. */ | |
913 | ||
42bfe59e TW |
914 | for (cand_l = lhs; cand_l != NULL; |
915 | cand_l = ftrace_get_caller (btinfo, cand_l)) | |
916 | for (cand_r = rhs; cand_r != NULL; | |
917 | cand_r = ftrace_get_caller (btinfo, cand_r)) | |
d87fdac3 MM |
918 | { |
919 | int matches; | |
920 | ||
42bfe59e | 921 | matches = ftrace_match_backtrace (btinfo, cand_l, cand_r); |
d87fdac3 MM |
922 | if (best_matches < matches) |
923 | { | |
924 | best_matches = matches; | |
925 | best_l = cand_l; | |
926 | best_r = cand_r; | |
927 | } | |
928 | } | |
929 | ||
930 | /* We need at least MIN_MATCHES matches. */ | |
931 | gdb_assert (min_matches > 0); | |
932 | if (best_matches < min_matches) | |
933 | return 0; | |
934 | ||
935 | DEBUG_FTRACE ("..matches: %d", best_matches); | |
936 | ||
937 | /* We will fix up the level of BEST_R and succeeding function segments such | |
938 | that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R. | |
939 | ||
940 | This will ignore the level of RHS and following if BEST_R != RHS. I.e. if | |
941 | BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3). | |
942 | ||
943 | To catch this, we already fix up the level here where we can start at RHS | |
944 | instead of at BEST_R. We will ignore the level fixup when connecting | |
945 | BEST_L to BEST_R as they will already be on the same level. */ | |
eb8f2b9c | 946 | ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level); |
d87fdac3 | 947 | |
42bfe59e | 948 | ftrace_connect_backtrace (btinfo, best_l, best_r); |
d87fdac3 MM |
949 | |
950 | return best_matches; | |
951 | } | |
952 | ||
953 | /* Try to bridge gaps due to overflow or decode errors by connecting the | |
954 | function segments that are separated by the gap. */ | |
955 | ||
956 | static void | |
8ffd39f2 | 957 | btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps) |
d87fdac3 | 958 | { |
4aeb0dfc | 959 | struct btrace_thread_info *btinfo = &tp->btrace; |
8ffd39f2 | 960 | std::vector<unsigned int> remaining; |
d87fdac3 MM |
961 | int min_matches; |
962 | ||
963 | DEBUG ("bridge gaps"); | |
964 | ||
d87fdac3 MM |
965 | /* We require a minimum amount of matches for bridging a gap. The number of |
966 | required matches will be lowered with each iteration. | |
967 | ||
968 | The more matches the higher our confidence that the bridging is correct. | |
969 | For big gaps or small traces, however, it may not be feasible to require a | |
970 | high number of matches. */ | |
971 | for (min_matches = 5; min_matches > 0; --min_matches) | |
972 | { | |
973 | /* Let's try to bridge as many gaps as we can. In some cases, we need to | |
974 | skip a gap and revisit it again after we closed later gaps. */ | |
8ffd39f2 | 975 | while (!gaps.empty ()) |
d87fdac3 | 976 | { |
8ffd39f2 | 977 | for (const unsigned int number : gaps) |
d87fdac3 | 978 | { |
8ffd39f2 | 979 | struct btrace_function *gap, *lhs, *rhs; |
d87fdac3 MM |
980 | int bridged; |
981 | ||
8ffd39f2 TW |
982 | gap = ftrace_find_call_by_number (btinfo, number); |
983 | ||
d87fdac3 MM |
984 | /* We may have a sequence of gaps if we run from one error into |
985 | the next as we try to re-sync onto the trace stream. Ignore | |
986 | all but the leftmost gap in such a sequence. | |
987 | ||
988 | Also ignore gaps at the beginning of the trace. */ | |
eb8f2b9c | 989 | lhs = ftrace_find_call_by_number (btinfo, gap->number - 1); |
d87fdac3 MM |
990 | if (lhs == NULL || lhs->errcode != 0) |
991 | continue; | |
992 | ||
993 | /* Skip gaps to the right. */ | |
eb8f2b9c TW |
994 | rhs = ftrace_find_call_by_number (btinfo, gap->number + 1); |
995 | while (rhs != NULL && rhs->errcode != 0) | |
996 | rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1); | |
d87fdac3 MM |
997 | |
998 | /* Ignore gaps at the end of the trace. */ | |
999 | if (rhs == NULL) | |
1000 | continue; | |
1001 | ||
eb8f2b9c | 1002 | bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches); |
d87fdac3 MM |
1003 | |
1004 | /* Keep track of gaps we were not able to bridge and try again. | |
1005 | If we just pushed them to the end of GAPS we would risk an | |
1006 | infinite loop in case we simply cannot bridge a gap. */ | |
1007 | if (bridged == 0) | |
8ffd39f2 | 1008 | remaining.push_back (number); |
d87fdac3 MM |
1009 | } |
1010 | ||
1011 | /* Let's see if we made any progress. */ | |
8ffd39f2 | 1012 | if (remaining.size () == gaps.size ()) |
d87fdac3 MM |
1013 | break; |
1014 | ||
8ffd39f2 TW |
1015 | gaps.clear (); |
1016 | gaps.swap (remaining); | |
d87fdac3 MM |
1017 | } |
1018 | ||
1019 | /* We get here if either GAPS is empty or if GAPS equals REMAINING. */ | |
8ffd39f2 | 1020 | if (gaps.empty ()) |
d87fdac3 MM |
1021 | break; |
1022 | ||
8ffd39f2 | 1023 | remaining.clear (); |
d87fdac3 MM |
1024 | } |
1025 | ||
d87fdac3 MM |
1026 | /* We may omit this in some cases. Not sure it is worth the extra |
1027 | complication, though. */ | |
eb8f2b9c | 1028 | ftrace_compute_global_level_offset (btinfo); |
d87fdac3 MM |
1029 | } |
1030 | ||
734b0e4b | 1031 | /* Compute the function branch trace from BTS trace. */ |
23a7fe75 MM |
1032 | |
1033 | static void | |
76235df1 | 1034 | btrace_compute_ftrace_bts (struct thread_info *tp, |
d87fdac3 | 1035 | const struct btrace_data_bts *btrace, |
8ffd39f2 | 1036 | std::vector<unsigned int> &gaps) |
23a7fe75 | 1037 | { |
76235df1 | 1038 | struct btrace_thread_info *btinfo; |
23a7fe75 | 1039 | struct gdbarch *gdbarch; |
d87fdac3 | 1040 | unsigned int blk; |
23a7fe75 MM |
1041 | int level; |
1042 | ||
23a7fe75 | 1043 | gdbarch = target_gdbarch (); |
76235df1 | 1044 | btinfo = &tp->btrace; |
734b0e4b | 1045 | blk = VEC_length (btrace_block_s, btrace->blocks); |
23a7fe75 | 1046 | |
b54b03bd TW |
1047 | if (btinfo->functions.empty ()) |
1048 | level = INT_MAX; | |
1049 | else | |
1050 | level = -btinfo->level; | |
1051 | ||
23a7fe75 MM |
1052 | while (blk != 0) |
1053 | { | |
1054 | btrace_block_s *block; | |
1055 | CORE_ADDR pc; | |
1056 | ||
1057 | blk -= 1; | |
1058 | ||
734b0e4b | 1059 | block = VEC_index (btrace_block_s, btrace->blocks, blk); |
23a7fe75 MM |
1060 | pc = block->begin; |
1061 | ||
1062 | for (;;) | |
1063 | { | |
b54b03bd | 1064 | struct btrace_function *bfun; |
7d5c24b3 | 1065 | struct btrace_insn insn; |
23a7fe75 MM |
1066 | int size; |
1067 | ||
1068 | /* We should hit the end of the block. Warn if we went too far. */ | |
1069 | if (block->end < pc) | |
1070 | { | |
b61ce85c | 1071 | /* Indicate the gap in the trace. */ |
8ffd39f2 | 1072 | bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps); |
b61ce85c MM |
1073 | |
1074 | warning (_("Recorded trace may be corrupted at instruction " | |
b54b03bd | 1075 | "%u (pc = %s)."), bfun->insn_offset - 1, |
b61ce85c | 1076 | core_addr_to_string_nz (pc)); |
63ab433e | 1077 | |
23a7fe75 MM |
1078 | break; |
1079 | } | |
1080 | ||
b54b03bd | 1081 | bfun = ftrace_update_function (btinfo, pc); |
23a7fe75 | 1082 | |
8710b709 MM |
1083 | /* Maintain the function level offset. |
1084 | For all but the last block, we do it here. */ | |
1085 | if (blk != 0) | |
b54b03bd | 1086 | level = std::min (level, bfun->level); |
23a7fe75 | 1087 | |
7d5c24b3 | 1088 | size = 0; |
492d29ea PA |
1089 | TRY |
1090 | { | |
1091 | size = gdb_insn_length (gdbarch, pc); | |
1092 | } | |
1093 | CATCH (error, RETURN_MASK_ERROR) | |
1094 | { | |
1095 | } | |
1096 | END_CATCH | |
7d5c24b3 MM |
1097 | |
1098 | insn.pc = pc; | |
1099 | insn.size = size; | |
1100 | insn.iclass = ftrace_classify_insn (gdbarch, pc); | |
da8c46d2 | 1101 | insn.flags = 0; |
7d5c24b3 | 1102 | |
0860c437 | 1103 | ftrace_update_insns (bfun, insn); |
23a7fe75 MM |
1104 | |
1105 | /* We're done once we pushed the instruction at the end. */ | |
1106 | if (block->end == pc) | |
1107 | break; | |
1108 | ||
7d5c24b3 | 1109 | /* We can't continue if we fail to compute the size. */ |
23a7fe75 MM |
1110 | if (size <= 0) |
1111 | { | |
31fd9caa MM |
1112 | /* Indicate the gap in the trace. We just added INSN so we're |
1113 | not at the beginning. */ | |
8ffd39f2 | 1114 | bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps); |
31fd9caa | 1115 | |
63ab433e | 1116 | warning (_("Recorded trace may be incomplete at instruction %u " |
b54b03bd | 1117 | "(pc = %s)."), bfun->insn_offset - 1, |
63ab433e MM |
1118 | core_addr_to_string_nz (pc)); |
1119 | ||
23a7fe75 MM |
1120 | break; |
1121 | } | |
1122 | ||
1123 | pc += size; | |
8710b709 MM |
1124 | |
1125 | /* Maintain the function level offset. | |
1126 | For the last block, we do it here to not consider the last | |
1127 | instruction. | |
1128 | Since the last instruction corresponds to the current instruction | |
1129 | and is not really part of the execution history, it shouldn't | |
1130 | affect the level. */ | |
1131 | if (blk == 0) | |
b54b03bd | 1132 | level = std::min (level, bfun->level); |
23a7fe75 | 1133 | } |
02d27625 MM |
1134 | } |
1135 | ||
23a7fe75 MM |
1136 | /* LEVEL is the minimal function level of all btrace function segments. |
1137 | Define the global level offset to -LEVEL so all function levels are | |
1138 | normalized to start at zero. */ | |
1139 | btinfo->level = -level; | |
02d27625 MM |
1140 | } |
1141 | ||
b20a6524 MM |
1142 | #if defined (HAVE_LIBIPT) |
1143 | ||
1144 | static enum btrace_insn_class | |
1145 | pt_reclassify_insn (enum pt_insn_class iclass) | |
1146 | { | |
1147 | switch (iclass) | |
1148 | { | |
1149 | case ptic_call: | |
1150 | return BTRACE_INSN_CALL; | |
1151 | ||
1152 | case ptic_return: | |
1153 | return BTRACE_INSN_RETURN; | |
1154 | ||
1155 | case ptic_jump: | |
1156 | return BTRACE_INSN_JUMP; | |
1157 | ||
1158 | default: | |
1159 | return BTRACE_INSN_OTHER; | |
1160 | } | |
1161 | } | |
1162 | ||
da8c46d2 MM |
1163 | /* Return the btrace instruction flags for INSN. */ |
1164 | ||
d7abe101 | 1165 | static btrace_insn_flags |
b5c36682 | 1166 | pt_btrace_insn_flags (const struct pt_insn &insn) |
da8c46d2 | 1167 | { |
d7abe101 | 1168 | btrace_insn_flags flags = 0; |
da8c46d2 | 1169 | |
b5c36682 | 1170 | if (insn.speculative) |
da8c46d2 MM |
1171 | flags |= BTRACE_INSN_FLAG_SPECULATIVE; |
1172 | ||
1173 | return flags; | |
1174 | } | |
1175 | ||
b5c36682 PA |
1176 | /* Return the btrace instruction for INSN. */ |
1177 | ||
1178 | static btrace_insn | |
1179 | pt_btrace_insn (const struct pt_insn &insn) | |
1180 | { | |
1181 | return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size, | |
1182 | pt_reclassify_insn (insn.iclass), | |
1183 | pt_btrace_insn_flags (insn)}; | |
1184 | } | |
1185 | ||
13ace077 MM |
1186 | /* Handle instruction decode events (libipt-v2). */ |
1187 | ||
1188 | static int | |
1189 | handle_pt_insn_events (struct btrace_thread_info *btinfo, | |
1190 | struct pt_insn_decoder *decoder, | |
1191 | std::vector<unsigned int> &gaps, int status) | |
1192 | { | |
1193 | #if defined (HAVE_PT_INSN_EVENT) | |
1194 | while (status & pts_event_pending) | |
1195 | { | |
1196 | struct btrace_function *bfun; | |
1197 | struct pt_event event; | |
1198 | uint64_t offset; | |
1199 | ||
1200 | status = pt_insn_event (decoder, &event, sizeof (event)); | |
1201 | if (status < 0) | |
1202 | break; | |
1203 | ||
1204 | switch (event.type) | |
1205 | { | |
1206 | default: | |
1207 | break; | |
1208 | ||
1209 | case ptev_enabled: | |
1210 | if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ()) | |
1211 | { | |
1212 | bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps); | |
1213 | ||
1214 | pt_insn_get_offset (decoder, &offset); | |
1215 | ||
1216 | warning (_("Non-contiguous trace at instruction %u (offset = 0x%" | |
1217 | PRIx64 ")."), bfun->insn_offset - 1, offset); | |
1218 | } | |
1219 | ||
1220 | break; | |
1221 | ||
1222 | case ptev_overflow: | |
1223 | bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps); | |
1224 | ||
1225 | pt_insn_get_offset (decoder, &offset); | |
1226 | ||
1227 | warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."), | |
1228 | bfun->insn_offset - 1, offset); | |
1229 | ||
1230 | break; | |
1231 | } | |
1232 | } | |
1233 | #endif /* defined (HAVE_PT_INSN_EVENT) */ | |
1234 | ||
1235 | return status; | |
1236 | } | |
1237 | ||
1238 | /* Handle events indicated by flags in INSN (libipt-v1). */ | |
1239 | ||
1240 | static void | |
1241 | handle_pt_insn_event_flags (struct btrace_thread_info *btinfo, | |
1242 | struct pt_insn_decoder *decoder, | |
1243 | const struct pt_insn &insn, | |
1244 | std::vector<unsigned int> &gaps) | |
1245 | { | |
1246 | #if defined (HAVE_STRUCT_PT_INSN_ENABLED) | |
1247 | /* Tracing is disabled and re-enabled each time we enter the kernel. Most | |
1248 | times, we continue from the same instruction we stopped before. This is | |
1249 | indicated via the RESUMED instruction flag. The ENABLED instruction flag | |
1250 | means that we continued from some other instruction. Indicate this as a | |
1251 | trace gap except when tracing just started. */ | |
1252 | if (insn.enabled && !btinfo->functions.empty ()) | |
1253 | { | |
1254 | struct btrace_function *bfun; | |
1255 | uint64_t offset; | |
1256 | ||
1257 | bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps); | |
1258 | ||
1259 | pt_insn_get_offset (decoder, &offset); | |
1260 | ||
1261 | warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64 | |
1262 | ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset, | |
1263 | insn.ip); | |
1264 | } | |
1265 | #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */ | |
1266 | ||
1267 | #if defined (HAVE_STRUCT_PT_INSN_RESYNCED) | |
1268 | /* Indicate trace overflows. */ | |
1269 | if (insn.resynced) | |
1270 | { | |
1271 | struct btrace_function *bfun; | |
1272 | uint64_t offset; | |
1273 | ||
1274 | bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps); | |
1275 | ||
1276 | pt_insn_get_offset (decoder, &offset); | |
1277 | ||
1278 | warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%" | |
1279 | PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip); | |
1280 | } | |
1281 | #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */ | |
1282 | } | |
b5c36682 | 1283 | |
17b89b34 | 1284 | /* Add function branch trace to BTINFO using DECODER. */ |
b20a6524 MM |
1285 | |
1286 | static void | |
17b89b34 TW |
1287 | ftrace_add_pt (struct btrace_thread_info *btinfo, |
1288 | struct pt_insn_decoder *decoder, | |
b54b03bd | 1289 | int *plevel, |
8ffd39f2 | 1290 | std::vector<unsigned int> &gaps) |
b20a6524 | 1291 | { |
b54b03bd | 1292 | struct btrace_function *bfun; |
b20a6524 | 1293 | uint64_t offset; |
13ace077 | 1294 | int status; |
b20a6524 | 1295 | |
b20a6524 MM |
1296 | for (;;) |
1297 | { | |
b20a6524 MM |
1298 | struct pt_insn insn; |
1299 | ||
13ace077 MM |
1300 | status = pt_insn_sync_forward (decoder); |
1301 | if (status < 0) | |
b20a6524 | 1302 | { |
13ace077 | 1303 | if (status != -pte_eos) |
bc504a31 | 1304 | warning (_("Failed to synchronize onto the Intel Processor " |
13ace077 | 1305 | "Trace stream: %s."), pt_errstr (pt_errcode (status))); |
b20a6524 MM |
1306 | break; |
1307 | } | |
1308 | ||
b20a6524 MM |
1309 | for (;;) |
1310 | { | |
13ace077 MM |
1311 | /* Handle events from the previous iteration or synchronization. */ |
1312 | status = handle_pt_insn_events (btinfo, decoder, gaps, status); | |
1313 | if (status < 0) | |
b20a6524 MM |
1314 | break; |
1315 | ||
13ace077 MM |
1316 | status = pt_insn_next (decoder, &insn, sizeof(insn)); |
1317 | if (status < 0) | |
1318 | break; | |
b61ce85c | 1319 | |
13ace077 MM |
1320 | /* Handle events indicated by flags in INSN. */ |
1321 | handle_pt_insn_event_flags (btinfo, decoder, insn, gaps); | |
b20a6524 | 1322 | |
b54b03bd | 1323 | bfun = ftrace_update_function (btinfo, insn.ip); |
b20a6524 MM |
1324 | |
1325 | /* Maintain the function level offset. */ | |
b54b03bd | 1326 | *plevel = std::min (*plevel, bfun->level); |
b20a6524 | 1327 | |
7525b645 | 1328 | ftrace_update_insns (bfun, pt_btrace_insn (insn)); |
b20a6524 MM |
1329 | } |
1330 | ||
13ace077 | 1331 | if (status == -pte_eos) |
b20a6524 MM |
1332 | break; |
1333 | ||
b20a6524 | 1334 | /* Indicate the gap in the trace. */ |
13ace077 | 1335 | bfun = ftrace_new_gap (btinfo, status, gaps); |
b20a6524 | 1336 | |
63ab433e MM |
1337 | pt_insn_get_offset (decoder, &offset); |
1338 | ||
1339 | warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64 | |
13ace077 MM |
1340 | ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1, |
1341 | offset, insn.ip, pt_errstr (pt_errcode (status))); | |
63ab433e | 1342 | } |
b20a6524 MM |
1343 | } |
1344 | ||
1345 | /* A callback function to allow the trace decoder to read the inferior's | |
1346 | memory. */ | |
1347 | ||
1348 | static int | |
1349 | btrace_pt_readmem_callback (gdb_byte *buffer, size_t size, | |
80a2b330 | 1350 | const struct pt_asid *asid, uint64_t pc, |
b20a6524 MM |
1351 | void *context) |
1352 | { | |
43368e1d | 1353 | int result, errcode; |
b20a6524 | 1354 | |
43368e1d | 1355 | result = (int) size; |
b20a6524 MM |
1356 | TRY |
1357 | { | |
80a2b330 | 1358 | errcode = target_read_code ((CORE_ADDR) pc, buffer, size); |
b20a6524 | 1359 | if (errcode != 0) |
43368e1d | 1360 | result = -pte_nomap; |
b20a6524 MM |
1361 | } |
1362 | CATCH (error, RETURN_MASK_ERROR) | |
1363 | { | |
43368e1d | 1364 | result = -pte_nomap; |
b20a6524 MM |
1365 | } |
1366 | END_CATCH | |
1367 | ||
43368e1d | 1368 | return result; |
b20a6524 MM |
1369 | } |
1370 | ||
1371 | /* Translate the vendor from one enum to another. */ | |
1372 | ||
1373 | static enum pt_cpu_vendor | |
1374 | pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor) | |
1375 | { | |
1376 | switch (vendor) | |
1377 | { | |
1378 | default: | |
1379 | return pcv_unknown; | |
1380 | ||
1381 | case CV_INTEL: | |
1382 | return pcv_intel; | |
1383 | } | |
1384 | } | |
1385 | ||
1386 | /* Finalize the function branch trace after decode. */ | |
1387 | ||
1388 | static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder, | |
1389 | struct thread_info *tp, int level) | |
1390 | { | |
1391 | pt_insn_free_decoder (decoder); | |
1392 | ||
1393 | /* LEVEL is the minimal function level of all btrace function segments. | |
1394 | Define the global level offset to -LEVEL so all function levels are | |
1395 | normalized to start at zero. */ | |
1396 | tp->btrace.level = -level; | |
1397 | ||
1398 | /* Add a single last instruction entry for the current PC. | |
1399 | This allows us to compute the backtrace at the current PC using both | |
1400 | standard unwind and btrace unwind. | |
1401 | This extra entry is ignored by all record commands. */ | |
1402 | btrace_add_pc (tp); | |
1403 | } | |
1404 | ||
bc504a31 PA |
1405 | /* Compute the function branch trace from Intel Processor Trace |
1406 | format. */ | |
b20a6524 MM |
1407 | |
1408 | static void | |
1409 | btrace_compute_ftrace_pt (struct thread_info *tp, | |
d87fdac3 | 1410 | const struct btrace_data_pt *btrace, |
8ffd39f2 | 1411 | std::vector<unsigned int> &gaps) |
b20a6524 MM |
1412 | { |
1413 | struct btrace_thread_info *btinfo; | |
1414 | struct pt_insn_decoder *decoder; | |
1415 | struct pt_config config; | |
1416 | int level, errcode; | |
1417 | ||
1418 | if (btrace->size == 0) | |
1419 | return; | |
1420 | ||
1421 | btinfo = &tp->btrace; | |
b54b03bd TW |
1422 | if (btinfo->functions.empty ()) |
1423 | level = INT_MAX; | |
1424 | else | |
1425 | level = -btinfo->level; | |
b20a6524 MM |
1426 | |
1427 | pt_config_init(&config); | |
1428 | config.begin = btrace->data; | |
1429 | config.end = btrace->data + btrace->size; | |
1430 | ||
1431 | config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor); | |
1432 | config.cpu.family = btrace->config.cpu.family; | |
1433 | config.cpu.model = btrace->config.cpu.model; | |
1434 | config.cpu.stepping = btrace->config.cpu.stepping; | |
1435 | ||
1436 | errcode = pt_cpu_errata (&config.errata, &config.cpu); | |
1437 | if (errcode < 0) | |
bc504a31 | 1438 | error (_("Failed to configure the Intel Processor Trace decoder: %s."), |
b20a6524 MM |
1439 | pt_errstr (pt_errcode (errcode))); |
1440 | ||
1441 | decoder = pt_insn_alloc_decoder (&config); | |
1442 | if (decoder == NULL) | |
bc504a31 | 1443 | error (_("Failed to allocate the Intel Processor Trace decoder.")); |
b20a6524 MM |
1444 | |
1445 | TRY | |
1446 | { | |
1447 | struct pt_image *image; | |
1448 | ||
1449 | image = pt_insn_get_image(decoder); | |
1450 | if (image == NULL) | |
bc504a31 | 1451 | error (_("Failed to configure the Intel Processor Trace decoder.")); |
b20a6524 MM |
1452 | |
1453 | errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL); | |
1454 | if (errcode < 0) | |
bc504a31 | 1455 | error (_("Failed to configure the Intel Processor Trace decoder: " |
b20a6524 MM |
1456 | "%s."), pt_errstr (pt_errcode (errcode))); |
1457 | ||
b54b03bd | 1458 | ftrace_add_pt (btinfo, decoder, &level, gaps); |
b20a6524 MM |
1459 | } |
1460 | CATCH (error, RETURN_MASK_ALL) | |
1461 | { | |
1462 | /* Indicate a gap in the trace if we quit trace processing. */ | |
b54b03bd | 1463 | if (error.reason == RETURN_QUIT && !btinfo->functions.empty ()) |
8ffd39f2 | 1464 | ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps); |
b20a6524 MM |
1465 | |
1466 | btrace_finalize_ftrace_pt (decoder, tp, level); | |
1467 | ||
1468 | throw_exception (error); | |
1469 | } | |
1470 | END_CATCH | |
1471 | ||
1472 | btrace_finalize_ftrace_pt (decoder, tp, level); | |
1473 | } | |
1474 | ||
1475 | #else /* defined (HAVE_LIBIPT) */ | |
1476 | ||
1477 | static void | |
1478 | btrace_compute_ftrace_pt (struct thread_info *tp, | |
d87fdac3 | 1479 | const struct btrace_data_pt *btrace, |
8ffd39f2 | 1480 | std::vector<unsigned int> &gaps) |
b20a6524 MM |
1481 | { |
1482 | internal_error (__FILE__, __LINE__, _("Unexpected branch trace format.")); | |
1483 | } | |
1484 | ||
1485 | #endif /* defined (HAVE_LIBIPT) */ | |
1486 | ||
734b0e4b MM |
1487 | /* Compute the function branch trace from a block branch trace BTRACE for |
1488 | a thread given by BTINFO. */ | |
1489 | ||
1490 | static void | |
d87fdac3 | 1491 | btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace, |
8ffd39f2 | 1492 | std::vector<unsigned int> &gaps) |
734b0e4b MM |
1493 | { |
1494 | DEBUG ("compute ftrace"); | |
1495 | ||
1496 | switch (btrace->format) | |
1497 | { | |
1498 | case BTRACE_FORMAT_NONE: | |
1499 | return; | |
1500 | ||
1501 | case BTRACE_FORMAT_BTS: | |
d87fdac3 | 1502 | btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps); |
734b0e4b | 1503 | return; |
b20a6524 MM |
1504 | |
1505 | case BTRACE_FORMAT_PT: | |
d87fdac3 | 1506 | btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps); |
b20a6524 | 1507 | return; |
734b0e4b MM |
1508 | } |
1509 | ||
1510 | internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); | |
1511 | } | |
1512 | ||
d87fdac3 | 1513 | static void |
8ffd39f2 | 1514 | btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps) |
d87fdac3 | 1515 | { |
8ffd39f2 | 1516 | if (!gaps.empty ()) |
d87fdac3 | 1517 | { |
8ffd39f2 | 1518 | tp->btrace.ngaps += gaps.size (); |
d87fdac3 MM |
1519 | btrace_bridge_gaps (tp, gaps); |
1520 | } | |
1521 | } | |
1522 | ||
1523 | static void | |
1524 | btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace) | |
1525 | { | |
8ffd39f2 | 1526 | std::vector<unsigned int> gaps; |
d87fdac3 MM |
1527 | |
1528 | TRY | |
1529 | { | |
8ffd39f2 | 1530 | btrace_compute_ftrace_1 (tp, btrace, gaps); |
d87fdac3 MM |
1531 | } |
1532 | CATCH (error, RETURN_MASK_ALL) | |
1533 | { | |
8ffd39f2 | 1534 | btrace_finalize_ftrace (tp, gaps); |
d87fdac3 MM |
1535 | |
1536 | throw_exception (error); | |
1537 | } | |
1538 | END_CATCH | |
1539 | ||
8ffd39f2 | 1540 | btrace_finalize_ftrace (tp, gaps); |
d87fdac3 MM |
1541 | } |
1542 | ||
6e07b1d2 MM |
1543 | /* Add an entry for the current PC. */ |
1544 | ||
1545 | static void | |
1546 | btrace_add_pc (struct thread_info *tp) | |
1547 | { | |
734b0e4b | 1548 | struct btrace_data btrace; |
6e07b1d2 MM |
1549 | struct btrace_block *block; |
1550 | struct regcache *regcache; | |
1551 | struct cleanup *cleanup; | |
1552 | CORE_ADDR pc; | |
1553 | ||
1554 | regcache = get_thread_regcache (tp->ptid); | |
1555 | pc = regcache_read_pc (regcache); | |
1556 | ||
734b0e4b MM |
1557 | btrace_data_init (&btrace); |
1558 | btrace.format = BTRACE_FORMAT_BTS; | |
1559 | btrace.variant.bts.blocks = NULL; | |
6e07b1d2 | 1560 | |
734b0e4b MM |
1561 | cleanup = make_cleanup_btrace_data (&btrace); |
1562 | ||
1563 | block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL); | |
6e07b1d2 MM |
1564 | block->begin = pc; |
1565 | block->end = pc; | |
1566 | ||
76235df1 | 1567 | btrace_compute_ftrace (tp, &btrace); |
6e07b1d2 MM |
1568 | |
1569 | do_cleanups (cleanup); | |
1570 | } | |
1571 | ||
02d27625 MM |
1572 | /* See btrace.h. */ |
1573 | ||
1574 | void | |
f4abbc16 | 1575 | btrace_enable (struct thread_info *tp, const struct btrace_config *conf) |
02d27625 MM |
1576 | { |
1577 | if (tp->btrace.target != NULL) | |
1578 | return; | |
1579 | ||
46a3515b MM |
1580 | #if !defined (HAVE_LIBIPT) |
1581 | if (conf->format == BTRACE_FORMAT_PT) | |
bc504a31 | 1582 | error (_("GDB does not support Intel Processor Trace.")); |
46a3515b MM |
1583 | #endif /* !defined (HAVE_LIBIPT) */ |
1584 | ||
f4abbc16 | 1585 | if (!target_supports_btrace (conf->format)) |
02d27625 MM |
1586 | error (_("Target does not support branch tracing.")); |
1587 | ||
43792cf0 PA |
1588 | DEBUG ("enable thread %s (%s)", print_thread_id (tp), |
1589 | target_pid_to_str (tp->ptid)); | |
02d27625 | 1590 | |
f4abbc16 | 1591 | tp->btrace.target = target_enable_btrace (tp->ptid, conf); |
6e07b1d2 | 1592 | |
cd4007e4 MM |
1593 | /* We're done if we failed to enable tracing. */ |
1594 | if (tp->btrace.target == NULL) | |
1595 | return; | |
1596 | ||
1597 | /* We need to undo the enable in case of errors. */ | |
1598 | TRY | |
1599 | { | |
1600 | /* Add an entry for the current PC so we start tracing from where we | |
1601 | enabled it. | |
1602 | ||
1603 | If we can't access TP's registers, TP is most likely running. In this | |
1604 | case, we can't really say where tracing was enabled so it should be | |
1605 | safe to simply skip this step. | |
1606 | ||
1607 | This is not relevant for BTRACE_FORMAT_PT since the trace will already | |
1608 | start at the PC at which tracing was enabled. */ | |
1609 | if (conf->format != BTRACE_FORMAT_PT | |
1610 | && can_access_registers_ptid (tp->ptid)) | |
1611 | btrace_add_pc (tp); | |
1612 | } | |
1613 | CATCH (exception, RETURN_MASK_ALL) | |
1614 | { | |
1615 | btrace_disable (tp); | |
1616 | ||
1617 | throw_exception (exception); | |
1618 | } | |
1619 | END_CATCH | |
02d27625 MM |
1620 | } |
1621 | ||
1622 | /* See btrace.h. */ | |
1623 | ||
f4abbc16 MM |
1624 | const struct btrace_config * |
1625 | btrace_conf (const struct btrace_thread_info *btinfo) | |
1626 | { | |
1627 | if (btinfo->target == NULL) | |
1628 | return NULL; | |
1629 | ||
1630 | return target_btrace_conf (btinfo->target); | |
1631 | } | |
1632 | ||
1633 | /* See btrace.h. */ | |
1634 | ||
02d27625 MM |
1635 | void |
1636 | btrace_disable (struct thread_info *tp) | |
1637 | { | |
1638 | struct btrace_thread_info *btp = &tp->btrace; | |
02d27625 MM |
1639 | |
1640 | if (btp->target == NULL) | |
1641 | return; | |
1642 | ||
43792cf0 PA |
1643 | DEBUG ("disable thread %s (%s)", print_thread_id (tp), |
1644 | target_pid_to_str (tp->ptid)); | |
02d27625 MM |
1645 | |
1646 | target_disable_btrace (btp->target); | |
1647 | btp->target = NULL; | |
1648 | ||
1649 | btrace_clear (tp); | |
1650 | } | |
1651 | ||
1652 | /* See btrace.h. */ | |
1653 | ||
1654 | void | |
1655 | btrace_teardown (struct thread_info *tp) | |
1656 | { | |
1657 | struct btrace_thread_info *btp = &tp->btrace; | |
02d27625 MM |
1658 | |
1659 | if (btp->target == NULL) | |
1660 | return; | |
1661 | ||
43792cf0 PA |
1662 | DEBUG ("teardown thread %s (%s)", print_thread_id (tp), |
1663 | target_pid_to_str (tp->ptid)); | |
02d27625 MM |
1664 | |
1665 | target_teardown_btrace (btp->target); | |
1666 | btp->target = NULL; | |
1667 | ||
1668 | btrace_clear (tp); | |
1669 | } | |
1670 | ||
734b0e4b | 1671 | /* Stitch branch trace in BTS format. */ |
969c39fb MM |
1672 | |
1673 | static int | |
31fd9caa | 1674 | btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp) |
969c39fb | 1675 | { |
31fd9caa | 1676 | struct btrace_thread_info *btinfo; |
969c39fb | 1677 | struct btrace_function *last_bfun; |
969c39fb MM |
1678 | btrace_block_s *first_new_block; |
1679 | ||
31fd9caa | 1680 | btinfo = &tp->btrace; |
b54b03bd | 1681 | gdb_assert (!btinfo->functions.empty ()); |
31fd9caa MM |
1682 | gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks)); |
1683 | ||
08c3f6d2 | 1684 | last_bfun = &btinfo->functions.back (); |
b54b03bd | 1685 | |
31fd9caa MM |
1686 | /* If the existing trace ends with a gap, we just glue the traces |
1687 | together. We need to drop the last (i.e. chronologically first) block | |
1688 | of the new trace, though, since we can't fill in the start address.*/ | |
0860c437 | 1689 | if (last_bfun->insn.empty ()) |
31fd9caa MM |
1690 | { |
1691 | VEC_pop (btrace_block_s, btrace->blocks); | |
1692 | return 0; | |
1693 | } | |
969c39fb MM |
1694 | |
1695 | /* Beware that block trace starts with the most recent block, so the | |
1696 | chronologically first block in the new trace is the last block in | |
1697 | the new trace's block vector. */ | |
734b0e4b | 1698 | first_new_block = VEC_last (btrace_block_s, btrace->blocks); |
0860c437 | 1699 | const btrace_insn &last_insn = last_bfun->insn.back (); |
969c39fb MM |
1700 | |
1701 | /* If the current PC at the end of the block is the same as in our current | |
1702 | trace, there are two explanations: | |
1703 | 1. we executed the instruction and some branch brought us back. | |
1704 | 2. we have not made any progress. | |
1705 | In the first case, the delta trace vector should contain at least two | |
1706 | entries. | |
1707 | In the second case, the delta trace vector should contain exactly one | |
1708 | entry for the partial block containing the current PC. Remove it. */ | |
0860c437 | 1709 | if (first_new_block->end == last_insn.pc |
734b0e4b | 1710 | && VEC_length (btrace_block_s, btrace->blocks) == 1) |
969c39fb | 1711 | { |
734b0e4b | 1712 | VEC_pop (btrace_block_s, btrace->blocks); |
969c39fb MM |
1713 | return 0; |
1714 | } | |
1715 | ||
0860c437 | 1716 | DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn), |
969c39fb MM |
1717 | core_addr_to_string_nz (first_new_block->end)); |
1718 | ||
1719 | /* Do a simple sanity check to make sure we don't accidentally end up | |
1720 | with a bad block. This should not occur in practice. */ | |
0860c437 | 1721 | if (first_new_block->end < last_insn.pc) |
969c39fb MM |
1722 | { |
1723 | warning (_("Error while trying to read delta trace. Falling back to " | |
1724 | "a full read.")); | |
1725 | return -1; | |
1726 | } | |
1727 | ||
1728 | /* We adjust the last block to start at the end of our current trace. */ | |
1729 | gdb_assert (first_new_block->begin == 0); | |
0860c437 | 1730 | first_new_block->begin = last_insn.pc; |
969c39fb MM |
1731 | |
1732 | /* We simply pop the last insn so we can insert it again as part of | |
1733 | the normal branch trace computation. | |
1734 | Since instruction iterators are based on indices in the instructions | |
1735 | vector, we don't leave any pointers dangling. */ | |
1736 | DEBUG ("pruning insn at %s for stitching", | |
0860c437 | 1737 | ftrace_print_insn_addr (&last_insn)); |
969c39fb | 1738 | |
0860c437 | 1739 | last_bfun->insn.pop_back (); |
969c39fb MM |
1740 | |
1741 | /* The instructions vector may become empty temporarily if this has | |
1742 | been the only instruction in this function segment. | |
1743 | This violates the invariant but will be remedied shortly by | |
1744 | btrace_compute_ftrace when we add the new trace. */ | |
31fd9caa MM |
1745 | |
1746 | /* The only case where this would hurt is if the entire trace consisted | |
1747 | of just that one instruction. If we remove it, we might turn the now | |
1748 | empty btrace function segment into a gap. But we don't want gaps at | |
1749 | the beginning. To avoid this, we remove the entire old trace. */ | |
0860c437 | 1750 | if (last_bfun->number == 1 && last_bfun->insn.empty ()) |
31fd9caa MM |
1751 | btrace_clear (tp); |
1752 | ||
969c39fb MM |
1753 | return 0; |
1754 | } | |
1755 | ||
734b0e4b MM |
1756 | /* Adjust the block trace in order to stitch old and new trace together. |
1757 | BTRACE is the new delta trace between the last and the current stop. | |
31fd9caa MM |
1758 | TP is the traced thread. |
1759 | May modifx BTRACE as well as the existing trace in TP. | |
734b0e4b MM |
1760 | Return 0 on success, -1 otherwise. */ |
1761 | ||
1762 | static int | |
31fd9caa | 1763 | btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp) |
734b0e4b MM |
1764 | { |
1765 | /* If we don't have trace, there's nothing to do. */ | |
1766 | if (btrace_data_empty (btrace)) | |
1767 | return 0; | |
1768 | ||
1769 | switch (btrace->format) | |
1770 | { | |
1771 | case BTRACE_FORMAT_NONE: | |
1772 | return 0; | |
1773 | ||
1774 | case BTRACE_FORMAT_BTS: | |
31fd9caa | 1775 | return btrace_stitch_bts (&btrace->variant.bts, tp); |
b20a6524 MM |
1776 | |
1777 | case BTRACE_FORMAT_PT: | |
1778 | /* Delta reads are not supported. */ | |
1779 | return -1; | |
734b0e4b MM |
1780 | } |
1781 | ||
1782 | internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); | |
1783 | } | |
1784 | ||
969c39fb MM |
1785 | /* Clear the branch trace histories in BTINFO. */ |
1786 | ||
1787 | static void | |
1788 | btrace_clear_history (struct btrace_thread_info *btinfo) | |
1789 | { | |
1790 | xfree (btinfo->insn_history); | |
1791 | xfree (btinfo->call_history); | |
1792 | xfree (btinfo->replay); | |
1793 | ||
1794 | btinfo->insn_history = NULL; | |
1795 | btinfo->call_history = NULL; | |
1796 | btinfo->replay = NULL; | |
1797 | } | |
1798 | ||
b0627500 MM |
1799 | /* Clear the branch trace maintenance histories in BTINFO. */ |
1800 | ||
1801 | static void | |
1802 | btrace_maint_clear (struct btrace_thread_info *btinfo) | |
1803 | { | |
1804 | switch (btinfo->data.format) | |
1805 | { | |
1806 | default: | |
1807 | break; | |
1808 | ||
1809 | case BTRACE_FORMAT_BTS: | |
1810 | btinfo->maint.variant.bts.packet_history.begin = 0; | |
1811 | btinfo->maint.variant.bts.packet_history.end = 0; | |
1812 | break; | |
1813 | ||
1814 | #if defined (HAVE_LIBIPT) | |
1815 | case BTRACE_FORMAT_PT: | |
1816 | xfree (btinfo->maint.variant.pt.packets); | |
1817 | ||
1818 | btinfo->maint.variant.pt.packets = NULL; | |
1819 | btinfo->maint.variant.pt.packet_history.begin = 0; | |
1820 | btinfo->maint.variant.pt.packet_history.end = 0; | |
1821 | break; | |
1822 | #endif /* defined (HAVE_LIBIPT) */ | |
1823 | } | |
1824 | } | |
1825 | ||
02d27625 MM |
1826 | /* See btrace.h. */ |
1827 | ||
508352a9 TW |
1828 | const char * |
1829 | btrace_decode_error (enum btrace_format format, int errcode) | |
1830 | { | |
1831 | switch (format) | |
1832 | { | |
1833 | case BTRACE_FORMAT_BTS: | |
1834 | switch (errcode) | |
1835 | { | |
1836 | case BDE_BTS_OVERFLOW: | |
1837 | return _("instruction overflow"); | |
1838 | ||
1839 | case BDE_BTS_INSN_SIZE: | |
1840 | return _("unknown instruction"); | |
1841 | ||
1842 | default: | |
1843 | break; | |
1844 | } | |
1845 | break; | |
1846 | ||
1847 | #if defined (HAVE_LIBIPT) | |
1848 | case BTRACE_FORMAT_PT: | |
1849 | switch (errcode) | |
1850 | { | |
1851 | case BDE_PT_USER_QUIT: | |
1852 | return _("trace decode cancelled"); | |
1853 | ||
1854 | case BDE_PT_DISABLED: | |
1855 | return _("disabled"); | |
1856 | ||
1857 | case BDE_PT_OVERFLOW: | |
1858 | return _("overflow"); | |
1859 | ||
1860 | default: | |
1861 | if (errcode < 0) | |
1862 | return pt_errstr (pt_errcode (errcode)); | |
1863 | break; | |
1864 | } | |
1865 | break; | |
1866 | #endif /* defined (HAVE_LIBIPT) */ | |
1867 | ||
1868 | default: | |
1869 | break; | |
1870 | } | |
1871 | ||
1872 | return _("unknown"); | |
1873 | } | |
1874 | ||
1875 | /* See btrace.h. */ | |
1876 | ||
02d27625 MM |
1877 | void |
1878 | btrace_fetch (struct thread_info *tp) | |
1879 | { | |
1880 | struct btrace_thread_info *btinfo; | |
969c39fb | 1881 | struct btrace_target_info *tinfo; |
734b0e4b | 1882 | struct btrace_data btrace; |
23a7fe75 | 1883 | struct cleanup *cleanup; |
969c39fb | 1884 | int errcode; |
02d27625 | 1885 | |
43792cf0 PA |
1886 | DEBUG ("fetch thread %s (%s)", print_thread_id (tp), |
1887 | target_pid_to_str (tp->ptid)); | |
02d27625 MM |
1888 | |
1889 | btinfo = &tp->btrace; | |
969c39fb MM |
1890 | tinfo = btinfo->target; |
1891 | if (tinfo == NULL) | |
1892 | return; | |
1893 | ||
1894 | /* There's no way we could get new trace while replaying. | |
1895 | On the other hand, delta trace would return a partial record with the | |
1896 | current PC, which is the replay PC, not the last PC, as expected. */ | |
1897 | if (btinfo->replay != NULL) | |
02d27625 MM |
1898 | return; |
1899 | ||
ae20e79a TW |
1900 | /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we |
1901 | can store a gdb.Record object in Python referring to a different thread | |
1902 | than the current one, temporarily set INFERIOR_PTID. */ | |
2989a365 | 1903 | scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid); |
ae20e79a TW |
1904 | inferior_ptid = tp->ptid; |
1905 | ||
cd4007e4 MM |
1906 | /* We should not be called on running or exited threads. */ |
1907 | gdb_assert (can_access_registers_ptid (tp->ptid)); | |
1908 | ||
734b0e4b | 1909 | btrace_data_init (&btrace); |
2989a365 | 1910 | cleanup = make_cleanup_btrace_data (&btrace); |
02d27625 | 1911 | |
969c39fb | 1912 | /* Let's first try to extend the trace we already have. */ |
b54b03bd | 1913 | if (!btinfo->functions.empty ()) |
969c39fb MM |
1914 | { |
1915 | errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA); | |
1916 | if (errcode == 0) | |
1917 | { | |
1918 | /* Success. Let's try to stitch the traces together. */ | |
31fd9caa | 1919 | errcode = btrace_stitch_trace (&btrace, tp); |
969c39fb MM |
1920 | } |
1921 | else | |
1922 | { | |
1923 | /* We failed to read delta trace. Let's try to read new trace. */ | |
1924 | errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW); | |
1925 | ||
1926 | /* If we got any new trace, discard what we have. */ | |
734b0e4b | 1927 | if (errcode == 0 && !btrace_data_empty (&btrace)) |
969c39fb MM |
1928 | btrace_clear (tp); |
1929 | } | |
1930 | ||
1931 | /* If we were not able to read the trace, we start over. */ | |
1932 | if (errcode != 0) | |
1933 | { | |
1934 | btrace_clear (tp); | |
1935 | errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL); | |
1936 | } | |
1937 | } | |
1938 | else | |
1939 | errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL); | |
1940 | ||
1941 | /* If we were not able to read the branch trace, signal an error. */ | |
1942 | if (errcode != 0) | |
1943 | error (_("Failed to read branch trace.")); | |
1944 | ||
1945 | /* Compute the trace, provided we have any. */ | |
734b0e4b | 1946 | if (!btrace_data_empty (&btrace)) |
23a7fe75 | 1947 | { |
9be54cae MM |
1948 | /* Store the raw trace data. The stored data will be cleared in |
1949 | btrace_clear, so we always append the new trace. */ | |
1950 | btrace_data_append (&btinfo->data, &btrace); | |
b0627500 | 1951 | btrace_maint_clear (btinfo); |
9be54cae | 1952 | |
969c39fb | 1953 | btrace_clear_history (btinfo); |
76235df1 | 1954 | btrace_compute_ftrace (tp, &btrace); |
23a7fe75 | 1955 | } |
02d27625 | 1956 | |
23a7fe75 | 1957 | do_cleanups (cleanup); |
02d27625 MM |
1958 | } |
1959 | ||
1960 | /* See btrace.h. */ | |
1961 | ||
1962 | void | |
1963 | btrace_clear (struct thread_info *tp) | |
1964 | { | |
1965 | struct btrace_thread_info *btinfo; | |
1966 | ||
43792cf0 PA |
1967 | DEBUG ("clear thread %s (%s)", print_thread_id (tp), |
1968 | target_pid_to_str (tp->ptid)); | |
02d27625 | 1969 | |
0b722aec MM |
1970 | /* Make sure btrace frames that may hold a pointer into the branch |
1971 | trace data are destroyed. */ | |
1972 | reinit_frame_cache (); | |
1973 | ||
02d27625 | 1974 | btinfo = &tp->btrace; |
23a7fe75 | 1975 | |
17b89b34 | 1976 | btinfo->functions.clear (); |
31fd9caa | 1977 | btinfo->ngaps = 0; |
23a7fe75 | 1978 | |
b0627500 MM |
1979 | /* Must clear the maint data before - it depends on BTINFO->DATA. */ |
1980 | btrace_maint_clear (btinfo); | |
9be54cae | 1981 | btrace_data_clear (&btinfo->data); |
969c39fb | 1982 | btrace_clear_history (btinfo); |
02d27625 MM |
1983 | } |
1984 | ||
1985 | /* See btrace.h. */ | |
1986 | ||
1987 | void | |
1988 | btrace_free_objfile (struct objfile *objfile) | |
1989 | { | |
1990 | struct thread_info *tp; | |
1991 | ||
1992 | DEBUG ("free objfile"); | |
1993 | ||
034f788c | 1994 | ALL_NON_EXITED_THREADS (tp) |
02d27625 MM |
1995 | btrace_clear (tp); |
1996 | } | |
c12a2917 MM |
1997 | |
1998 | #if defined (HAVE_LIBEXPAT) | |
1999 | ||
2000 | /* Check the btrace document version. */ | |
2001 | ||
2002 | static void | |
2003 | check_xml_btrace_version (struct gdb_xml_parser *parser, | |
2004 | const struct gdb_xml_element *element, | |
4d0fdd9b SM |
2005 | void *user_data, |
2006 | std::vector<gdb_xml_value> &attributes) | |
c12a2917 | 2007 | { |
9a3c8263 | 2008 | const char *version |
4d0fdd9b | 2009 | = (const char *) xml_find_attribute (attributes, "version")->value.get (); |
c12a2917 MM |
2010 | |
2011 | if (strcmp (version, "1.0") != 0) | |
2012 | gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version); | |
2013 | } | |
2014 | ||
2015 | /* Parse a btrace "block" xml record. */ | |
2016 | ||
2017 | static void | |
2018 | parse_xml_btrace_block (struct gdb_xml_parser *parser, | |
2019 | const struct gdb_xml_element *element, | |
4d0fdd9b SM |
2020 | void *user_data, |
2021 | std::vector<gdb_xml_value> &attributes) | |
c12a2917 | 2022 | { |
734b0e4b | 2023 | struct btrace_data *btrace; |
c12a2917 MM |
2024 | struct btrace_block *block; |
2025 | ULONGEST *begin, *end; | |
2026 | ||
9a3c8263 | 2027 | btrace = (struct btrace_data *) user_data; |
734b0e4b MM |
2028 | |
2029 | switch (btrace->format) | |
2030 | { | |
2031 | case BTRACE_FORMAT_BTS: | |
2032 | break; | |
2033 | ||
2034 | case BTRACE_FORMAT_NONE: | |
2035 | btrace->format = BTRACE_FORMAT_BTS; | |
2036 | btrace->variant.bts.blocks = NULL; | |
2037 | break; | |
2038 | ||
2039 | default: | |
2040 | gdb_xml_error (parser, _("Btrace format error.")); | |
2041 | } | |
c12a2917 | 2042 | |
4d0fdd9b SM |
2043 | begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get (); |
2044 | end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get (); | |
c12a2917 | 2045 | |
734b0e4b | 2046 | block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL); |
c12a2917 MM |
2047 | block->begin = *begin; |
2048 | block->end = *end; | |
2049 | } | |
2050 | ||
b20a6524 MM |
2051 | /* Parse a "raw" xml record. */ |
2052 | ||
2053 | static void | |
2054 | parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text, | |
e7b01ce0 | 2055 | gdb_byte **pdata, size_t *psize) |
b20a6524 MM |
2056 | { |
2057 | struct cleanup *cleanup; | |
2058 | gdb_byte *data, *bin; | |
e7b01ce0 | 2059 | size_t len, size; |
b20a6524 MM |
2060 | |
2061 | len = strlen (body_text); | |
e7b01ce0 | 2062 | if (len % 2 != 0) |
b20a6524 MM |
2063 | gdb_xml_error (parser, _("Bad raw data size.")); |
2064 | ||
e7b01ce0 MM |
2065 | size = len / 2; |
2066 | ||
224c3ddb | 2067 | bin = data = (gdb_byte *) xmalloc (size); |
b20a6524 MM |
2068 | cleanup = make_cleanup (xfree, data); |
2069 | ||
2070 | /* We use hex encoding - see common/rsp-low.h. */ | |
2071 | while (len > 0) | |
2072 | { | |
2073 | char hi, lo; | |
2074 | ||
2075 | hi = *body_text++; | |
2076 | lo = *body_text++; | |
2077 | ||
2078 | if (hi == 0 || lo == 0) | |
2079 | gdb_xml_error (parser, _("Bad hex encoding.")); | |
2080 | ||
2081 | *bin++ = fromhex (hi) * 16 + fromhex (lo); | |
2082 | len -= 2; | |
2083 | } | |
2084 | ||
2085 | discard_cleanups (cleanup); | |
2086 | ||
2087 | *pdata = data; | |
2088 | *psize = size; | |
2089 | } | |
2090 | ||
2091 | /* Parse a btrace pt-config "cpu" xml record. */ | |
2092 | ||
2093 | static void | |
2094 | parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser, | |
2095 | const struct gdb_xml_element *element, | |
2096 | void *user_data, | |
4d0fdd9b | 2097 | std::vector<gdb_xml_value> &attributes) |
b20a6524 MM |
2098 | { |
2099 | struct btrace_data *btrace; | |
2100 | const char *vendor; | |
2101 | ULONGEST *family, *model, *stepping; | |
2102 | ||
4d0fdd9b SM |
2103 | vendor = |
2104 | (const char *) xml_find_attribute (attributes, "vendor")->value.get (); | |
2105 | family | |
2106 | = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get (); | |
2107 | model | |
2108 | = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get (); | |
2109 | stepping | |
2110 | = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get (); | |
b20a6524 | 2111 | |
9a3c8263 | 2112 | btrace = (struct btrace_data *) user_data; |
b20a6524 MM |
2113 | |
2114 | if (strcmp (vendor, "GenuineIntel") == 0) | |
2115 | btrace->variant.pt.config.cpu.vendor = CV_INTEL; | |
2116 | ||
2117 | btrace->variant.pt.config.cpu.family = *family; | |
2118 | btrace->variant.pt.config.cpu.model = *model; | |
2119 | btrace->variant.pt.config.cpu.stepping = *stepping; | |
2120 | } | |
2121 | ||
2122 | /* Parse a btrace pt "raw" xml record. */ | |
2123 | ||
2124 | static void | |
2125 | parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser, | |
2126 | const struct gdb_xml_element *element, | |
2127 | void *user_data, const char *body_text) | |
2128 | { | |
2129 | struct btrace_data *btrace; | |
2130 | ||
9a3c8263 | 2131 | btrace = (struct btrace_data *) user_data; |
b20a6524 MM |
2132 | parse_xml_raw (parser, body_text, &btrace->variant.pt.data, |
2133 | &btrace->variant.pt.size); | |
2134 | } | |
2135 | ||
2136 | /* Parse a btrace "pt" xml record. */ | |
2137 | ||
2138 | static void | |
2139 | parse_xml_btrace_pt (struct gdb_xml_parser *parser, | |
2140 | const struct gdb_xml_element *element, | |
4d0fdd9b SM |
2141 | void *user_data, |
2142 | std::vector<gdb_xml_value> &attributes) | |
b20a6524 MM |
2143 | { |
2144 | struct btrace_data *btrace; | |
2145 | ||
9a3c8263 | 2146 | btrace = (struct btrace_data *) user_data; |
b20a6524 MM |
2147 | btrace->format = BTRACE_FORMAT_PT; |
2148 | btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN; | |
2149 | btrace->variant.pt.data = NULL; | |
2150 | btrace->variant.pt.size = 0; | |
2151 | } | |
2152 | ||
c12a2917 MM |
2153 | static const struct gdb_xml_attribute block_attributes[] = { |
2154 | { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, | |
2155 | { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, | |
2156 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2157 | }; | |
2158 | ||
b20a6524 MM |
2159 | static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = { |
2160 | { "vendor", GDB_XML_AF_NONE, NULL, NULL }, | |
2161 | { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, | |
2162 | { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, | |
2163 | { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL }, | |
2164 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2165 | }; | |
2166 | ||
2167 | static const struct gdb_xml_element btrace_pt_config_children[] = { | |
2168 | { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL, | |
2169 | parse_xml_btrace_pt_config_cpu, NULL }, | |
2170 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } | |
2171 | }; | |
2172 | ||
2173 | static const struct gdb_xml_element btrace_pt_children[] = { | |
2174 | { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL, | |
2175 | NULL }, | |
2176 | { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw }, | |
2177 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } | |
2178 | }; | |
2179 | ||
c12a2917 MM |
2180 | static const struct gdb_xml_attribute btrace_attributes[] = { |
2181 | { "version", GDB_XML_AF_NONE, NULL, NULL }, | |
2182 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2183 | }; | |
2184 | ||
2185 | static const struct gdb_xml_element btrace_children[] = { | |
2186 | { "block", block_attributes, NULL, | |
2187 | GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL }, | |
b20a6524 MM |
2188 | { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt, |
2189 | NULL }, | |
c12a2917 MM |
2190 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } |
2191 | }; | |
2192 | ||
2193 | static const struct gdb_xml_element btrace_elements[] = { | |
2194 | { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE, | |
2195 | check_xml_btrace_version, NULL }, | |
2196 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } | |
2197 | }; | |
2198 | ||
2199 | #endif /* defined (HAVE_LIBEXPAT) */ | |
2200 | ||
2201 | /* See btrace.h. */ | |
2202 | ||
734b0e4b MM |
2203 | void |
2204 | parse_xml_btrace (struct btrace_data *btrace, const char *buffer) | |
c12a2917 | 2205 | { |
c12a2917 MM |
2206 | struct cleanup *cleanup; |
2207 | int errcode; | |
2208 | ||
2209 | #if defined (HAVE_LIBEXPAT) | |
2210 | ||
734b0e4b MM |
2211 | btrace->format = BTRACE_FORMAT_NONE; |
2212 | ||
2213 | cleanup = make_cleanup_btrace_data (btrace); | |
c12a2917 | 2214 | errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements, |
734b0e4b | 2215 | buffer, btrace); |
c12a2917 | 2216 | if (errcode != 0) |
969c39fb | 2217 | error (_("Error parsing branch trace.")); |
c12a2917 MM |
2218 | |
2219 | /* Keep parse results. */ | |
2220 | discard_cleanups (cleanup); | |
2221 | ||
2222 | #else /* !defined (HAVE_LIBEXPAT) */ | |
2223 | ||
2224 | error (_("Cannot process branch trace. XML parsing is not supported.")); | |
2225 | ||
2226 | #endif /* !defined (HAVE_LIBEXPAT) */ | |
c12a2917 | 2227 | } |
23a7fe75 | 2228 | |
f4abbc16 MM |
2229 | #if defined (HAVE_LIBEXPAT) |
2230 | ||
2231 | /* Parse a btrace-conf "bts" xml record. */ | |
2232 | ||
2233 | static void | |
2234 | parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser, | |
2235 | const struct gdb_xml_element *element, | |
4d0fdd9b SM |
2236 | void *user_data, |
2237 | std::vector<gdb_xml_value> &attributes) | |
f4abbc16 MM |
2238 | { |
2239 | struct btrace_config *conf; | |
d33501a5 | 2240 | struct gdb_xml_value *size; |
f4abbc16 | 2241 | |
9a3c8263 | 2242 | conf = (struct btrace_config *) user_data; |
f4abbc16 | 2243 | conf->format = BTRACE_FORMAT_BTS; |
d33501a5 MM |
2244 | conf->bts.size = 0; |
2245 | ||
2246 | size = xml_find_attribute (attributes, "size"); | |
2247 | if (size != NULL) | |
4d0fdd9b | 2248 | conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get (); |
f4abbc16 MM |
2249 | } |
2250 | ||
b20a6524 MM |
2251 | /* Parse a btrace-conf "pt" xml record. */ |
2252 | ||
2253 | static void | |
2254 | parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser, | |
2255 | const struct gdb_xml_element *element, | |
4d0fdd9b SM |
2256 | void *user_data, |
2257 | std::vector<gdb_xml_value> &attributes) | |
b20a6524 MM |
2258 | { |
2259 | struct btrace_config *conf; | |
2260 | struct gdb_xml_value *size; | |
2261 | ||
9a3c8263 | 2262 | conf = (struct btrace_config *) user_data; |
b20a6524 MM |
2263 | conf->format = BTRACE_FORMAT_PT; |
2264 | conf->pt.size = 0; | |
2265 | ||
2266 | size = xml_find_attribute (attributes, "size"); | |
2267 | if (size != NULL) | |
4d0fdd9b | 2268 | conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get (); |
b20a6524 MM |
2269 | } |
2270 | ||
2271 | static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = { | |
2272 | { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL }, | |
2273 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2274 | }; | |
2275 | ||
d33501a5 MM |
2276 | static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = { |
2277 | { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL }, | |
2278 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2279 | }; | |
2280 | ||
f4abbc16 | 2281 | static const struct gdb_xml_element btrace_conf_children[] = { |
d33501a5 MM |
2282 | { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL, |
2283 | parse_xml_btrace_conf_bts, NULL }, | |
b20a6524 MM |
2284 | { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL, |
2285 | parse_xml_btrace_conf_pt, NULL }, | |
f4abbc16 MM |
2286 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } |
2287 | }; | |
2288 | ||
2289 | static const struct gdb_xml_attribute btrace_conf_attributes[] = { | |
2290 | { "version", GDB_XML_AF_NONE, NULL, NULL }, | |
2291 | { NULL, GDB_XML_AF_NONE, NULL, NULL } | |
2292 | }; | |
2293 | ||
2294 | static const struct gdb_xml_element btrace_conf_elements[] = { | |
2295 | { "btrace-conf", btrace_conf_attributes, btrace_conf_children, | |
2296 | GDB_XML_EF_NONE, NULL, NULL }, | |
2297 | { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL } | |
2298 | }; | |
2299 | ||
2300 | #endif /* defined (HAVE_LIBEXPAT) */ | |
2301 | ||
2302 | /* See btrace.h. */ | |
2303 | ||
2304 | void | |
2305 | parse_xml_btrace_conf (struct btrace_config *conf, const char *xml) | |
2306 | { | |
2307 | int errcode; | |
2308 | ||
2309 | #if defined (HAVE_LIBEXPAT) | |
2310 | ||
2311 | errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd", | |
2312 | btrace_conf_elements, xml, conf); | |
2313 | if (errcode != 0) | |
2314 | error (_("Error parsing branch trace configuration.")); | |
2315 | ||
2316 | #else /* !defined (HAVE_LIBEXPAT) */ | |
2317 | ||
2318 | error (_("XML parsing is not supported.")); | |
2319 | ||
2320 | #endif /* !defined (HAVE_LIBEXPAT) */ | |
2321 | } | |
2322 | ||
23a7fe75 MM |
2323 | /* See btrace.h. */ |
2324 | ||
2325 | const struct btrace_insn * | |
2326 | btrace_insn_get (const struct btrace_insn_iterator *it) | |
2327 | { | |
2328 | const struct btrace_function *bfun; | |
2329 | unsigned int index, end; | |
2330 | ||
a0f1b963 | 2331 | index = it->insn_index; |
08c3f6d2 | 2332 | bfun = &it->btinfo->functions[it->call_index]; |
23a7fe75 | 2333 | |
31fd9caa MM |
2334 | /* Check if the iterator points to a gap in the trace. */ |
2335 | if (bfun->errcode != 0) | |
2336 | return NULL; | |
2337 | ||
23a7fe75 | 2338 | /* The index is within the bounds of this function's instruction vector. */ |
0860c437 | 2339 | end = bfun->insn.size (); |
23a7fe75 MM |
2340 | gdb_assert (0 < end); |
2341 | gdb_assert (index < end); | |
2342 | ||
0860c437 | 2343 | return &bfun->insn[index]; |
23a7fe75 MM |
2344 | } |
2345 | ||
2346 | /* See btrace.h. */ | |
2347 | ||
69090cee TW |
2348 | int |
2349 | btrace_insn_get_error (const struct btrace_insn_iterator *it) | |
23a7fe75 | 2350 | { |
08c3f6d2 | 2351 | return it->btinfo->functions[it->call_index].errcode; |
69090cee | 2352 | } |
31fd9caa | 2353 | |
69090cee | 2354 | /* See btrace.h. */ |
31fd9caa | 2355 | |
69090cee TW |
2356 | unsigned int |
2357 | btrace_insn_number (const struct btrace_insn_iterator *it) | |
2358 | { | |
08c3f6d2 | 2359 | return it->btinfo->functions[it->call_index].insn_offset + it->insn_index; |
23a7fe75 MM |
2360 | } |
2361 | ||
2362 | /* See btrace.h. */ | |
2363 | ||
2364 | void | |
2365 | btrace_insn_begin (struct btrace_insn_iterator *it, | |
2366 | const struct btrace_thread_info *btinfo) | |
2367 | { | |
b54b03bd | 2368 | if (btinfo->functions.empty ()) |
23a7fe75 MM |
2369 | error (_("No trace.")); |
2370 | ||
521103fd | 2371 | it->btinfo = btinfo; |
a0f1b963 TW |
2372 | it->call_index = 0; |
2373 | it->insn_index = 0; | |
23a7fe75 MM |
2374 | } |
2375 | ||
2376 | /* See btrace.h. */ | |
2377 | ||
2378 | void | |
2379 | btrace_insn_end (struct btrace_insn_iterator *it, | |
2380 | const struct btrace_thread_info *btinfo) | |
2381 | { | |
2382 | const struct btrace_function *bfun; | |
2383 | unsigned int length; | |
2384 | ||
b54b03bd | 2385 | if (btinfo->functions.empty ()) |
23a7fe75 MM |
2386 | error (_("No trace.")); |
2387 | ||
08c3f6d2 | 2388 | bfun = &btinfo->functions.back (); |
0860c437 | 2389 | length = bfun->insn.size (); |
23a7fe75 | 2390 | |
31fd9caa MM |
2391 | /* The last function may either be a gap or it contains the current |
2392 | instruction, which is one past the end of the execution trace; ignore | |
2393 | it. */ | |
2394 | if (length > 0) | |
2395 | length -= 1; | |
2396 | ||
521103fd | 2397 | it->btinfo = btinfo; |
a0f1b963 TW |
2398 | it->call_index = bfun->number - 1; |
2399 | it->insn_index = length; | |
23a7fe75 MM |
2400 | } |
2401 | ||
2402 | /* See btrace.h. */ | |
2403 | ||
2404 | unsigned int | |
2405 | btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride) | |
2406 | { | |
2407 | const struct btrace_function *bfun; | |
2408 | unsigned int index, steps; | |
2409 | ||
08c3f6d2 | 2410 | bfun = &it->btinfo->functions[it->call_index]; |
23a7fe75 | 2411 | steps = 0; |
a0f1b963 | 2412 | index = it->insn_index; |
23a7fe75 MM |
2413 | |
2414 | while (stride != 0) | |
2415 | { | |
2416 | unsigned int end, space, adv; | |
2417 | ||
0860c437 | 2418 | end = bfun->insn.size (); |
23a7fe75 | 2419 | |
31fd9caa MM |
2420 | /* An empty function segment represents a gap in the trace. We count |
2421 | it as one instruction. */ | |
2422 | if (end == 0) | |
2423 | { | |
2424 | const struct btrace_function *next; | |
2425 | ||
eb8f2b9c | 2426 | next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1); |
31fd9caa MM |
2427 | if (next == NULL) |
2428 | break; | |
2429 | ||
2430 | stride -= 1; | |
2431 | steps += 1; | |
2432 | ||
2433 | bfun = next; | |
2434 | index = 0; | |
2435 | ||
2436 | continue; | |
2437 | } | |
2438 | ||
23a7fe75 MM |
2439 | gdb_assert (0 < end); |
2440 | gdb_assert (index < end); | |
2441 | ||
2442 | /* Compute the number of instructions remaining in this segment. */ | |
2443 | space = end - index; | |
2444 | ||
2445 | /* Advance the iterator as far as possible within this segment. */ | |
325fac50 | 2446 | adv = std::min (space, stride); |
23a7fe75 MM |
2447 | stride -= adv; |
2448 | index += adv; | |
2449 | steps += adv; | |
2450 | ||
2451 | /* Move to the next function if we're at the end of this one. */ | |
2452 | if (index == end) | |
2453 | { | |
2454 | const struct btrace_function *next; | |
2455 | ||
eb8f2b9c | 2456 | next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1); |
23a7fe75 MM |
2457 | if (next == NULL) |
2458 | { | |
2459 | /* We stepped past the last function. | |
2460 | ||
2461 | Let's adjust the index to point to the last instruction in | |
2462 | the previous function. */ | |
2463 | index -= 1; | |
2464 | steps -= 1; | |
2465 | break; | |
2466 | } | |
2467 | ||
2468 | /* We now point to the first instruction in the new function. */ | |
2469 | bfun = next; | |
2470 | index = 0; | |
2471 | } | |
2472 | ||
2473 | /* We did make progress. */ | |
2474 | gdb_assert (adv > 0); | |
2475 | } | |
2476 | ||
2477 | /* Update the iterator. */ | |
a0f1b963 TW |
2478 | it->call_index = bfun->number - 1; |
2479 | it->insn_index = index; | |
23a7fe75 MM |
2480 | |
2481 | return steps; | |
2482 | } | |
2483 | ||
2484 | /* See btrace.h. */ | |
2485 | ||
2486 | unsigned int | |
2487 | btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride) | |
2488 | { | |
2489 | const struct btrace_function *bfun; | |
2490 | unsigned int index, steps; | |
2491 | ||
08c3f6d2 | 2492 | bfun = &it->btinfo->functions[it->call_index]; |
23a7fe75 | 2493 | steps = 0; |
a0f1b963 | 2494 | index = it->insn_index; |
23a7fe75 MM |
2495 | |
2496 | while (stride != 0) | |
2497 | { | |
2498 | unsigned int adv; | |
2499 | ||
2500 | /* Move to the previous function if we're at the start of this one. */ | |
2501 | if (index == 0) | |
2502 | { | |
2503 | const struct btrace_function *prev; | |
2504 | ||
eb8f2b9c | 2505 | prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1); |
23a7fe75 MM |
2506 | if (prev == NULL) |
2507 | break; | |
2508 | ||
2509 | /* We point to one after the last instruction in the new function. */ | |
2510 | bfun = prev; | |
0860c437 | 2511 | index = bfun->insn.size (); |
23a7fe75 | 2512 | |
31fd9caa MM |
2513 | /* An empty function segment represents a gap in the trace. We count |
2514 | it as one instruction. */ | |
2515 | if (index == 0) | |
2516 | { | |
2517 | stride -= 1; | |
2518 | steps += 1; | |
2519 | ||
2520 | continue; | |
2521 | } | |
23a7fe75 MM |
2522 | } |
2523 | ||
2524 | /* Advance the iterator as far as possible within this segment. */ | |
325fac50 | 2525 | adv = std::min (index, stride); |
31fd9caa | 2526 | |
23a7fe75 MM |
2527 | stride -= adv; |
2528 | index -= adv; | |
2529 | steps += adv; | |
2530 | ||
2531 | /* We did make progress. */ | |
2532 | gdb_assert (adv > 0); | |
2533 | } | |
2534 | ||
2535 | /* Update the iterator. */ | |
a0f1b963 TW |
2536 | it->call_index = bfun->number - 1; |
2537 | it->insn_index = index; | |
23a7fe75 MM |
2538 | |
2539 | return steps; | |
2540 | } | |
2541 | ||
2542 | /* See btrace.h. */ | |
2543 | ||
2544 | int | |
2545 | btrace_insn_cmp (const struct btrace_insn_iterator *lhs, | |
2546 | const struct btrace_insn_iterator *rhs) | |
2547 | { | |
a0f1b963 | 2548 | gdb_assert (lhs->btinfo == rhs->btinfo); |
23a7fe75 | 2549 | |
a0f1b963 TW |
2550 | if (lhs->call_index != rhs->call_index) |
2551 | return lhs->call_index - rhs->call_index; | |
23a7fe75 | 2552 | |
a0f1b963 | 2553 | return lhs->insn_index - rhs->insn_index; |
23a7fe75 MM |
2554 | } |
2555 | ||
2556 | /* See btrace.h. */ | |
2557 | ||
2558 | int | |
2559 | btrace_find_insn_by_number (struct btrace_insn_iterator *it, | |
2560 | const struct btrace_thread_info *btinfo, | |
2561 | unsigned int number) | |
2562 | { | |
2563 | const struct btrace_function *bfun; | |
fdd2bd92 | 2564 | unsigned int upper, lower; |
23a7fe75 | 2565 | |
2b51eddc | 2566 | if (btinfo->functions.empty ()) |
fdd2bd92 | 2567 | return 0; |
23a7fe75 | 2568 | |
fdd2bd92 | 2569 | lower = 0; |
08c3f6d2 | 2570 | bfun = &btinfo->functions[lower]; |
fdd2bd92 | 2571 | if (number < bfun->insn_offset) |
23a7fe75 MM |
2572 | return 0; |
2573 | ||
2b51eddc | 2574 | upper = btinfo->functions.size () - 1; |
08c3f6d2 | 2575 | bfun = &btinfo->functions[upper]; |
fdd2bd92 | 2576 | if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) |
23a7fe75 MM |
2577 | return 0; |
2578 | ||
fdd2bd92 TW |
2579 | /* We assume that there are no holes in the numbering. */ |
2580 | for (;;) | |
2581 | { | |
2582 | const unsigned int average = lower + (upper - lower) / 2; | |
2583 | ||
08c3f6d2 | 2584 | bfun = &btinfo->functions[average]; |
fdd2bd92 TW |
2585 | |
2586 | if (number < bfun->insn_offset) | |
2587 | { | |
2588 | upper = average - 1; | |
2589 | continue; | |
2590 | } | |
2591 | ||
2592 | if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun)) | |
2593 | { | |
2594 | lower = average + 1; | |
2595 | continue; | |
2596 | } | |
2597 | ||
2598 | break; | |
2599 | } | |
2600 | ||
521103fd | 2601 | it->btinfo = btinfo; |
a0f1b963 TW |
2602 | it->call_index = bfun->number - 1; |
2603 | it->insn_index = number - bfun->insn_offset; | |
23a7fe75 MM |
2604 | return 1; |
2605 | } | |
2606 | ||
f158f208 TW |
2607 | /* Returns true if the recording ends with a function segment that |
2608 | contains only a single (i.e. the current) instruction. */ | |
2609 | ||
2610 | static bool | |
2611 | btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo) | |
2612 | { | |
2613 | const btrace_function *bfun; | |
2614 | ||
2615 | if (btinfo->functions.empty ()) | |
2616 | return false; | |
2617 | ||
08c3f6d2 | 2618 | bfun = &btinfo->functions.back (); |
f158f208 TW |
2619 | if (bfun->errcode != 0) |
2620 | return false; | |
2621 | ||
2622 | return ftrace_call_num_insn (bfun) == 1; | |
2623 | } | |
2624 | ||
23a7fe75 MM |
2625 | /* See btrace.h. */ |
2626 | ||
2627 | const struct btrace_function * | |
2628 | btrace_call_get (const struct btrace_call_iterator *it) | |
2629 | { | |
f158f208 TW |
2630 | if (it->index >= it->btinfo->functions.size ()) |
2631 | return NULL; | |
2632 | ||
08c3f6d2 | 2633 | return &it->btinfo->functions[it->index]; |
23a7fe75 MM |
2634 | } |
2635 | ||
2636 | /* See btrace.h. */ | |
2637 | ||
2638 | unsigned int | |
2639 | btrace_call_number (const struct btrace_call_iterator *it) | |
2640 | { | |
f158f208 | 2641 | const unsigned int length = it->btinfo->functions.size (); |
23a7fe75 | 2642 | |
f158f208 TW |
2643 | /* If the last function segment contains only a single instruction (i.e. the |
2644 | current instruction), skip it. */ | |
2645 | if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo)) | |
2646 | return length; | |
23a7fe75 | 2647 | |
f158f208 | 2648 | return it->index + 1; |
23a7fe75 MM |
2649 | } |
2650 | ||
2651 | /* See btrace.h. */ | |
2652 | ||
2653 | void | |
2654 | btrace_call_begin (struct btrace_call_iterator *it, | |
2655 | const struct btrace_thread_info *btinfo) | |
2656 | { | |
f158f208 | 2657 | if (btinfo->functions.empty ()) |
23a7fe75 MM |
2658 | error (_("No trace.")); |
2659 | ||
2660 | it->btinfo = btinfo; | |
f158f208 | 2661 | it->index = 0; |
23a7fe75 MM |
2662 | } |
2663 | ||
2664 | /* See btrace.h. */ | |
2665 | ||
2666 | void | |
2667 | btrace_call_end (struct btrace_call_iterator *it, | |
2668 | const struct btrace_thread_info *btinfo) | |
2669 | { | |
f158f208 | 2670 | if (btinfo->functions.empty ()) |
23a7fe75 MM |
2671 | error (_("No trace.")); |
2672 | ||
2673 | it->btinfo = btinfo; | |
f158f208 | 2674 | it->index = btinfo->functions.size (); |
23a7fe75 MM |
2675 | } |
2676 | ||
2677 | /* See btrace.h. */ | |
2678 | ||
2679 | unsigned int | |
2680 | btrace_call_next (struct btrace_call_iterator *it, unsigned int stride) | |
2681 | { | |
f158f208 | 2682 | const unsigned int length = it->btinfo->functions.size (); |
23a7fe75 | 2683 | |
f158f208 TW |
2684 | if (it->index + stride < length - 1) |
2685 | /* Default case: Simply advance the iterator. */ | |
2686 | it->index += stride; | |
2687 | else if (it->index + stride == length - 1) | |
23a7fe75 | 2688 | { |
f158f208 TW |
2689 | /* We land exactly at the last function segment. If it contains only one |
2690 | instruction (i.e. the current instruction) it is not actually part of | |
2691 | the trace. */ | |
2692 | if (btrace_ends_with_single_insn (it->btinfo)) | |
2693 | it->index = length; | |
2694 | else | |
2695 | it->index = length - 1; | |
2696 | } | |
2697 | else | |
2698 | { | |
2699 | /* We land past the last function segment and have to adjust the stride. | |
2700 | If the last function segment contains only one instruction (i.e. the | |
2701 | current instruction) it is not actually part of the trace. */ | |
2702 | if (btrace_ends_with_single_insn (it->btinfo)) | |
2703 | stride = length - it->index - 1; | |
2704 | else | |
2705 | stride = length - it->index; | |
23a7fe75 | 2706 | |
f158f208 | 2707 | it->index = length; |
23a7fe75 MM |
2708 | } |
2709 | ||
f158f208 | 2710 | return stride; |
23a7fe75 MM |
2711 | } |
2712 | ||
2713 | /* See btrace.h. */ | |
2714 | ||
2715 | unsigned int | |
2716 | btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride) | |
2717 | { | |
f158f208 TW |
2718 | const unsigned int length = it->btinfo->functions.size (); |
2719 | int steps = 0; | |
23a7fe75 | 2720 | |
f158f208 | 2721 | gdb_assert (it->index <= length); |
23a7fe75 | 2722 | |
f158f208 TW |
2723 | if (stride == 0 || it->index == 0) |
2724 | return 0; | |
23a7fe75 | 2725 | |
f158f208 TW |
2726 | /* If we are at the end, the first step is a special case. If the last |
2727 | function segment contains only one instruction (i.e. the current | |
2728 | instruction) it is not actually part of the trace. To be able to step | |
2729 | over this instruction, we need at least one more function segment. */ | |
2730 | if ((it->index == length) && (length > 1)) | |
23a7fe75 | 2731 | { |
f158f208 TW |
2732 | if (btrace_ends_with_single_insn (it->btinfo)) |
2733 | it->index = length - 2; | |
2734 | else | |
2735 | it->index = length - 1; | |
23a7fe75 | 2736 | |
f158f208 TW |
2737 | steps = 1; |
2738 | stride -= 1; | |
23a7fe75 MM |
2739 | } |
2740 | ||
f158f208 TW |
2741 | stride = std::min (stride, it->index); |
2742 | ||
2743 | it->index -= stride; | |
2744 | return steps + stride; | |
23a7fe75 MM |
2745 | } |
2746 | ||
2747 | /* See btrace.h. */ | |
2748 | ||
2749 | int | |
2750 | btrace_call_cmp (const struct btrace_call_iterator *lhs, | |
2751 | const struct btrace_call_iterator *rhs) | |
2752 | { | |
f158f208 TW |
2753 | gdb_assert (lhs->btinfo == rhs->btinfo); |
2754 | return (int) (lhs->index - rhs->index); | |
23a7fe75 MM |
2755 | } |
2756 | ||
2757 | /* See btrace.h. */ | |
2758 | ||
2759 | int | |
2760 | btrace_find_call_by_number (struct btrace_call_iterator *it, | |
2761 | const struct btrace_thread_info *btinfo, | |
2762 | unsigned int number) | |
2763 | { | |
f158f208 | 2764 | const unsigned int length = btinfo->functions.size (); |
23a7fe75 | 2765 | |
f158f208 TW |
2766 | if ((number == 0) || (number > length)) |
2767 | return 0; | |
23a7fe75 | 2768 | |
f158f208 TW |
2769 | it->btinfo = btinfo; |
2770 | it->index = number - 1; | |
2771 | return 1; | |
23a7fe75 MM |
2772 | } |
2773 | ||
2774 | /* See btrace.h. */ | |
2775 | ||
2776 | void | |
2777 | btrace_set_insn_history (struct btrace_thread_info *btinfo, | |
2778 | const struct btrace_insn_iterator *begin, | |
2779 | const struct btrace_insn_iterator *end) | |
2780 | { | |
2781 | if (btinfo->insn_history == NULL) | |
8d749320 | 2782 | btinfo->insn_history = XCNEW (struct btrace_insn_history); |
23a7fe75 MM |
2783 | |
2784 | btinfo->insn_history->begin = *begin; | |
2785 | btinfo->insn_history->end = *end; | |
2786 | } | |
2787 | ||
2788 | /* See btrace.h. */ | |
2789 | ||
2790 | void | |
2791 | btrace_set_call_history (struct btrace_thread_info *btinfo, | |
2792 | const struct btrace_call_iterator *begin, | |
2793 | const struct btrace_call_iterator *end) | |
2794 | { | |
2795 | gdb_assert (begin->btinfo == end->btinfo); | |
2796 | ||
2797 | if (btinfo->call_history == NULL) | |
8d749320 | 2798 | btinfo->call_history = XCNEW (struct btrace_call_history); |
23a7fe75 MM |
2799 | |
2800 | btinfo->call_history->begin = *begin; | |
2801 | btinfo->call_history->end = *end; | |
2802 | } | |
07bbe694 MM |
2803 | |
2804 | /* See btrace.h. */ | |
2805 | ||
2806 | int | |
2807 | btrace_is_replaying (struct thread_info *tp) | |
2808 | { | |
2809 | return tp->btrace.replay != NULL; | |
2810 | } | |
6e07b1d2 MM |
2811 | |
2812 | /* See btrace.h. */ | |
2813 | ||
2814 | int | |
2815 | btrace_is_empty (struct thread_info *tp) | |
2816 | { | |
2817 | struct btrace_insn_iterator begin, end; | |
2818 | struct btrace_thread_info *btinfo; | |
2819 | ||
2820 | btinfo = &tp->btrace; | |
2821 | ||
b54b03bd | 2822 | if (btinfo->functions.empty ()) |
6e07b1d2 MM |
2823 | return 1; |
2824 | ||
2825 | btrace_insn_begin (&begin, btinfo); | |
2826 | btrace_insn_end (&end, btinfo); | |
2827 | ||
2828 | return btrace_insn_cmp (&begin, &end) == 0; | |
2829 | } | |
734b0e4b MM |
2830 | |
2831 | /* Forward the cleanup request. */ | |
2832 | ||
2833 | static void | |
2834 | do_btrace_data_cleanup (void *arg) | |
2835 | { | |
9a3c8263 | 2836 | btrace_data_fini ((struct btrace_data *) arg); |
734b0e4b MM |
2837 | } |
2838 | ||
2839 | /* See btrace.h. */ | |
2840 | ||
2841 | struct cleanup * | |
2842 | make_cleanup_btrace_data (struct btrace_data *data) | |
2843 | { | |
2844 | return make_cleanup (do_btrace_data_cleanup, data); | |
2845 | } | |
b0627500 MM |
2846 | |
2847 | #if defined (HAVE_LIBIPT) | |
2848 | ||
2849 | /* Print a single packet. */ | |
2850 | ||
2851 | static void | |
2852 | pt_print_packet (const struct pt_packet *packet) | |
2853 | { | |
2854 | switch (packet->type) | |
2855 | { | |
2856 | default: | |
2857 | printf_unfiltered (("[??: %x]"), packet->type); | |
2858 | break; | |
2859 | ||
2860 | case ppt_psb: | |
2861 | printf_unfiltered (("psb")); | |
2862 | break; | |
2863 | ||
2864 | case ppt_psbend: | |
2865 | printf_unfiltered (("psbend")); | |
2866 | break; | |
2867 | ||
2868 | case ppt_pad: | |
2869 | printf_unfiltered (("pad")); | |
2870 | break; | |
2871 | ||
2872 | case ppt_tip: | |
2873 | printf_unfiltered (("tip %u: 0x%" PRIx64 ""), | |
2874 | packet->payload.ip.ipc, | |
2875 | packet->payload.ip.ip); | |
2876 | break; | |
2877 | ||
2878 | case ppt_tip_pge: | |
2879 | printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""), | |
2880 | packet->payload.ip.ipc, | |
2881 | packet->payload.ip.ip); | |
2882 | break; | |
2883 | ||
2884 | case ppt_tip_pgd: | |
2885 | printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""), | |
2886 | packet->payload.ip.ipc, | |
2887 | packet->payload.ip.ip); | |
2888 | break; | |
2889 | ||
2890 | case ppt_fup: | |
2891 | printf_unfiltered (("fup %u: 0x%" PRIx64 ""), | |
2892 | packet->payload.ip.ipc, | |
2893 | packet->payload.ip.ip); | |
2894 | break; | |
2895 | ||
2896 | case ppt_tnt_8: | |
2897 | printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""), | |
2898 | packet->payload.tnt.bit_size, | |
2899 | packet->payload.tnt.payload); | |
2900 | break; | |
2901 | ||
2902 | case ppt_tnt_64: | |
2903 | printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""), | |
2904 | packet->payload.tnt.bit_size, | |
2905 | packet->payload.tnt.payload); | |
2906 | break; | |
2907 | ||
2908 | case ppt_pip: | |
37fdfe4c MM |
2909 | printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3, |
2910 | packet->payload.pip.nr ? (" nr") : ("")); | |
b0627500 MM |
2911 | break; |
2912 | ||
2913 | case ppt_tsc: | |
2914 | printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc); | |
2915 | break; | |
2916 | ||
2917 | case ppt_cbr: | |
2918 | printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio); | |
2919 | break; | |
2920 | ||
2921 | case ppt_mode: | |
2922 | switch (packet->payload.mode.leaf) | |
2923 | { | |
2924 | default: | |
2925 | printf_unfiltered (("mode %u"), packet->payload.mode.leaf); | |
2926 | break; | |
2927 | ||
2928 | case pt_mol_exec: | |
2929 | printf_unfiltered (("mode.exec%s%s"), | |
2930 | packet->payload.mode.bits.exec.csl | |
2931 | ? (" cs.l") : (""), | |
2932 | packet->payload.mode.bits.exec.csd | |
2933 | ? (" cs.d") : ("")); | |
2934 | break; | |
2935 | ||
2936 | case pt_mol_tsx: | |
2937 | printf_unfiltered (("mode.tsx%s%s"), | |
2938 | packet->payload.mode.bits.tsx.intx | |
2939 | ? (" intx") : (""), | |
2940 | packet->payload.mode.bits.tsx.abrt | |
2941 | ? (" abrt") : ("")); | |
2942 | break; | |
2943 | } | |
2944 | break; | |
2945 | ||
2946 | case ppt_ovf: | |
2947 | printf_unfiltered (("ovf")); | |
2948 | break; | |
2949 | ||
37fdfe4c MM |
2950 | case ppt_stop: |
2951 | printf_unfiltered (("stop")); | |
2952 | break; | |
2953 | ||
2954 | case ppt_vmcs: | |
2955 | printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base); | |
2956 | break; | |
2957 | ||
2958 | case ppt_tma: | |
2959 | printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc, | |
2960 | packet->payload.tma.fc); | |
2961 | break; | |
2962 | ||
2963 | case ppt_mtc: | |
2964 | printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc); | |
2965 | break; | |
2966 | ||
2967 | case ppt_cyc: | |
2968 | printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value); | |
2969 | break; | |
2970 | ||
2971 | case ppt_mnt: | |
2972 | printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload); | |
2973 | break; | |
b0627500 MM |
2974 | } |
2975 | } | |
2976 | ||
2977 | /* Decode packets into MAINT using DECODER. */ | |
2978 | ||
2979 | static void | |
2980 | btrace_maint_decode_pt (struct btrace_maint_info *maint, | |
2981 | struct pt_packet_decoder *decoder) | |
2982 | { | |
2983 | int errcode; | |
2984 | ||
2985 | for (;;) | |
2986 | { | |
2987 | struct btrace_pt_packet packet; | |
2988 | ||
2989 | errcode = pt_pkt_sync_forward (decoder); | |
2990 | if (errcode < 0) | |
2991 | break; | |
2992 | ||
2993 | for (;;) | |
2994 | { | |
2995 | pt_pkt_get_offset (decoder, &packet.offset); | |
2996 | ||
2997 | errcode = pt_pkt_next (decoder, &packet.packet, | |
2998 | sizeof(packet.packet)); | |
2999 | if (errcode < 0) | |
3000 | break; | |
3001 | ||
3002 | if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad) | |
3003 | { | |
3004 | packet.errcode = pt_errcode (errcode); | |
3005 | VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets, | |
3006 | &packet); | |
3007 | } | |
3008 | } | |
3009 | ||
3010 | if (errcode == -pte_eos) | |
3011 | break; | |
3012 | ||
3013 | packet.errcode = pt_errcode (errcode); | |
3014 | VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets, | |
3015 | &packet); | |
3016 | ||
3017 | warning (_("Error at trace offset 0x%" PRIx64 ": %s."), | |
3018 | packet.offset, pt_errstr (packet.errcode)); | |
3019 | } | |
3020 | ||
3021 | if (errcode != -pte_eos) | |
bc504a31 | 3022 | warning (_("Failed to synchronize onto the Intel Processor Trace " |
b0627500 MM |
3023 | "stream: %s."), pt_errstr (pt_errcode (errcode))); |
3024 | } | |
3025 | ||
3026 | /* Update the packet history in BTINFO. */ | |
3027 | ||
3028 | static void | |
3029 | btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo) | |
3030 | { | |
b0627500 MM |
3031 | struct pt_packet_decoder *decoder; |
3032 | struct btrace_data_pt *pt; | |
3033 | struct pt_config config; | |
3034 | int errcode; | |
3035 | ||
3036 | pt = &btinfo->data.variant.pt; | |
3037 | ||
3038 | /* Nothing to do if there is no trace. */ | |
3039 | if (pt->size == 0) | |
3040 | return; | |
3041 | ||
3042 | memset (&config, 0, sizeof(config)); | |
3043 | ||
3044 | config.size = sizeof (config); | |
3045 | config.begin = pt->data; | |
3046 | config.end = pt->data + pt->size; | |
3047 | ||
3048 | config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor); | |
3049 | config.cpu.family = pt->config.cpu.family; | |
3050 | config.cpu.model = pt->config.cpu.model; | |
3051 | config.cpu.stepping = pt->config.cpu.stepping; | |
3052 | ||
3053 | errcode = pt_cpu_errata (&config.errata, &config.cpu); | |
3054 | if (errcode < 0) | |
bc504a31 | 3055 | error (_("Failed to configure the Intel Processor Trace decoder: %s."), |
b0627500 MM |
3056 | pt_errstr (pt_errcode (errcode))); |
3057 | ||
3058 | decoder = pt_pkt_alloc_decoder (&config); | |
3059 | if (decoder == NULL) | |
bc504a31 | 3060 | error (_("Failed to allocate the Intel Processor Trace decoder.")); |
b0627500 MM |
3061 | |
3062 | TRY | |
3063 | { | |
3064 | btrace_maint_decode_pt (&btinfo->maint, decoder); | |
3065 | } | |
3066 | CATCH (except, RETURN_MASK_ALL) | |
3067 | { | |
3068 | pt_pkt_free_decoder (decoder); | |
3069 | ||
3070 | if (except.reason < 0) | |
3071 | throw_exception (except); | |
3072 | } | |
3073 | END_CATCH | |
3074 | ||
3075 | pt_pkt_free_decoder (decoder); | |
3076 | } | |
3077 | ||
3078 | #endif /* !defined (HAVE_LIBIPT) */ | |
3079 | ||
3080 | /* Update the packet maintenance information for BTINFO and store the | |
3081 | low and high bounds into BEGIN and END, respectively. | |
3082 | Store the current iterator state into FROM and TO. */ | |
3083 | ||
3084 | static void | |
3085 | btrace_maint_update_packets (struct btrace_thread_info *btinfo, | |
3086 | unsigned int *begin, unsigned int *end, | |
3087 | unsigned int *from, unsigned int *to) | |
3088 | { | |
3089 | switch (btinfo->data.format) | |
3090 | { | |
3091 | default: | |
3092 | *begin = 0; | |
3093 | *end = 0; | |
3094 | *from = 0; | |
3095 | *to = 0; | |
3096 | break; | |
3097 | ||
3098 | case BTRACE_FORMAT_BTS: | |
3099 | /* Nothing to do - we operate directly on BTINFO->DATA. */ | |
3100 | *begin = 0; | |
3101 | *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks); | |
3102 | *from = btinfo->maint.variant.bts.packet_history.begin; | |
3103 | *to = btinfo->maint.variant.bts.packet_history.end; | |
3104 | break; | |
3105 | ||
3106 | #if defined (HAVE_LIBIPT) | |
3107 | case BTRACE_FORMAT_PT: | |
3108 | if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets)) | |
3109 | btrace_maint_update_pt_packets (btinfo); | |
3110 | ||
3111 | *begin = 0; | |
3112 | *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets); | |
3113 | *from = btinfo->maint.variant.pt.packet_history.begin; | |
3114 | *to = btinfo->maint.variant.pt.packet_history.end; | |
3115 | break; | |
3116 | #endif /* defined (HAVE_LIBIPT) */ | |
3117 | } | |
3118 | } | |
3119 | ||
3120 | /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and | |
3121 | update the current iterator position. */ | |
3122 | ||
3123 | static void | |
3124 | btrace_maint_print_packets (struct btrace_thread_info *btinfo, | |
3125 | unsigned int begin, unsigned int end) | |
3126 | { | |
3127 | switch (btinfo->data.format) | |
3128 | { | |
3129 | default: | |
3130 | break; | |
3131 | ||
3132 | case BTRACE_FORMAT_BTS: | |
3133 | { | |
3134 | VEC (btrace_block_s) *blocks; | |
3135 | unsigned int blk; | |
3136 | ||
3137 | blocks = btinfo->data.variant.bts.blocks; | |
3138 | for (blk = begin; blk < end; ++blk) | |
3139 | { | |
3140 | const btrace_block_s *block; | |
3141 | ||
3142 | block = VEC_index (btrace_block_s, blocks, blk); | |
3143 | ||
3144 | printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk, | |
3145 | core_addr_to_string_nz (block->begin), | |
3146 | core_addr_to_string_nz (block->end)); | |
3147 | } | |
3148 | ||
3149 | btinfo->maint.variant.bts.packet_history.begin = begin; | |
3150 | btinfo->maint.variant.bts.packet_history.end = end; | |
3151 | } | |
3152 | break; | |
3153 | ||
3154 | #if defined (HAVE_LIBIPT) | |
3155 | case BTRACE_FORMAT_PT: | |
3156 | { | |
3157 | VEC (btrace_pt_packet_s) *packets; | |
3158 | unsigned int pkt; | |
3159 | ||
3160 | packets = btinfo->maint.variant.pt.packets; | |
3161 | for (pkt = begin; pkt < end; ++pkt) | |
3162 | { | |
3163 | const struct btrace_pt_packet *packet; | |
3164 | ||
3165 | packet = VEC_index (btrace_pt_packet_s, packets, pkt); | |
3166 | ||
3167 | printf_unfiltered ("%u\t", pkt); | |
3168 | printf_unfiltered ("0x%" PRIx64 "\t", packet->offset); | |
3169 | ||
3170 | if (packet->errcode == pte_ok) | |
3171 | pt_print_packet (&packet->packet); | |
3172 | else | |
3173 | printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode)); | |
3174 | ||
3175 | printf_unfiltered ("\n"); | |
3176 | } | |
3177 | ||
3178 | btinfo->maint.variant.pt.packet_history.begin = begin; | |
3179 | btinfo->maint.variant.pt.packet_history.end = end; | |
3180 | } | |
3181 | break; | |
3182 | #endif /* defined (HAVE_LIBIPT) */ | |
3183 | } | |
3184 | } | |
3185 | ||
3186 | /* Read a number from an argument string. */ | |
3187 | ||
3188 | static unsigned int | |
f938677d | 3189 | get_uint (const char **arg) |
b0627500 | 3190 | { |
f938677d TT |
3191 | const char *begin, *pos; |
3192 | char *end; | |
b0627500 MM |
3193 | unsigned long number; |
3194 | ||
3195 | begin = *arg; | |
3196 | pos = skip_spaces (begin); | |
3197 | ||
3198 | if (!isdigit (*pos)) | |
3199 | error (_("Expected positive number, got: %s."), pos); | |
3200 | ||
3201 | number = strtoul (pos, &end, 10); | |
3202 | if (number > UINT_MAX) | |
3203 | error (_("Number too big.")); | |
3204 | ||
3205 | *arg += (end - begin); | |
3206 | ||
3207 | return (unsigned int) number; | |
3208 | } | |
3209 | ||
3210 | /* Read a context size from an argument string. */ | |
3211 | ||
3212 | static int | |
f938677d | 3213 | get_context_size (const char **arg) |
b0627500 | 3214 | { |
f938677d | 3215 | const char *pos = skip_spaces (*arg); |
b0627500 MM |
3216 | |
3217 | if (!isdigit (*pos)) | |
3218 | error (_("Expected positive number, got: %s."), pos); | |
3219 | ||
f938677d TT |
3220 | char *end; |
3221 | long result = strtol (pos, &end, 10); | |
3222 | *arg = end; | |
3223 | return result; | |
b0627500 MM |
3224 | } |
3225 | ||
3226 | /* Complain about junk at the end of an argument string. */ | |
3227 | ||
3228 | static void | |
f938677d | 3229 | no_chunk (const char *arg) |
b0627500 MM |
3230 | { |
3231 | if (*arg != 0) | |
3232 | error (_("Junk after argument: %s."), arg); | |
3233 | } | |
3234 | ||
3235 | /* The "maintenance btrace packet-history" command. */ | |
3236 | ||
3237 | static void | |
f938677d | 3238 | maint_btrace_packet_history_cmd (const char *arg, int from_tty) |
b0627500 MM |
3239 | { |
3240 | struct btrace_thread_info *btinfo; | |
3241 | struct thread_info *tp; | |
3242 | unsigned int size, begin, end, from, to; | |
3243 | ||
3244 | tp = find_thread_ptid (inferior_ptid); | |
3245 | if (tp == NULL) | |
3246 | error (_("No thread.")); | |
3247 | ||
3248 | size = 10; | |
3249 | btinfo = &tp->btrace; | |
3250 | ||
3251 | btrace_maint_update_packets (btinfo, &begin, &end, &from, &to); | |
3252 | if (begin == end) | |
3253 | { | |
3254 | printf_unfiltered (_("No trace.\n")); | |
3255 | return; | |
3256 | } | |
3257 | ||
3258 | if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0) | |
3259 | { | |
3260 | from = to; | |
3261 | ||
3262 | if (end - from < size) | |
3263 | size = end - from; | |
3264 | to = from + size; | |
3265 | } | |
3266 | else if (strcmp (arg, "-") == 0) | |
3267 | { | |
3268 | to = from; | |
3269 | ||
3270 | if (to - begin < size) | |
3271 | size = to - begin; | |
3272 | from = to - size; | |
3273 | } | |
3274 | else | |
3275 | { | |
3276 | from = get_uint (&arg); | |
3277 | if (end <= from) | |
3278 | error (_("'%u' is out of range."), from); | |
3279 | ||
3280 | arg = skip_spaces (arg); | |
3281 | if (*arg == ',') | |
3282 | { | |
3283 | arg = skip_spaces (++arg); | |
3284 | ||
3285 | if (*arg == '+') | |
3286 | { | |
3287 | arg += 1; | |
3288 | size = get_context_size (&arg); | |
3289 | ||
3290 | no_chunk (arg); | |
3291 | ||
3292 | if (end - from < size) | |
3293 | size = end - from; | |
3294 | to = from + size; | |
3295 | } | |
3296 | else if (*arg == '-') | |
3297 | { | |
3298 | arg += 1; | |
3299 | size = get_context_size (&arg); | |
3300 | ||
3301 | no_chunk (arg); | |
3302 | ||
3303 | /* Include the packet given as first argument. */ | |
3304 | from += 1; | |
3305 | to = from; | |
3306 | ||
3307 | if (to - begin < size) | |
3308 | size = to - begin; | |
3309 | from = to - size; | |
3310 | } | |
3311 | else | |
3312 | { | |
3313 | to = get_uint (&arg); | |
3314 | ||
3315 | /* Include the packet at the second argument and silently | |
3316 | truncate the range. */ | |
3317 | if (to < end) | |
3318 | to += 1; | |
3319 | else | |
3320 | to = end; | |
3321 | ||
3322 | no_chunk (arg); | |
3323 | } | |
3324 | } | |
3325 | else | |
3326 | { | |
3327 | no_chunk (arg); | |
3328 | ||
3329 | if (end - from < size) | |
3330 | size = end - from; | |
3331 | to = from + size; | |
3332 | } | |
3333 | ||
3334 | dont_repeat (); | |
3335 | } | |
3336 | ||
3337 | btrace_maint_print_packets (btinfo, from, to); | |
3338 | } | |
3339 | ||
3340 | /* The "maintenance btrace clear-packet-history" command. */ | |
3341 | ||
3342 | static void | |
f938677d | 3343 | maint_btrace_clear_packet_history_cmd (const char *args, int from_tty) |
b0627500 MM |
3344 | { |
3345 | struct btrace_thread_info *btinfo; | |
3346 | struct thread_info *tp; | |
3347 | ||
3348 | if (args != NULL && *args != 0) | |
3349 | error (_("Invalid argument.")); | |
3350 | ||
3351 | tp = find_thread_ptid (inferior_ptid); | |
3352 | if (tp == NULL) | |
3353 | error (_("No thread.")); | |
3354 | ||
3355 | btinfo = &tp->btrace; | |
3356 | ||
3357 | /* Must clear the maint data before - it depends on BTINFO->DATA. */ | |
3358 | btrace_maint_clear (btinfo); | |
3359 | btrace_data_clear (&btinfo->data); | |
3360 | } | |
3361 | ||
3362 | /* The "maintenance btrace clear" command. */ | |
3363 | ||
3364 | static void | |
f938677d | 3365 | maint_btrace_clear_cmd (const char *args, int from_tty) |
b0627500 | 3366 | { |
b0627500 MM |
3367 | struct thread_info *tp; |
3368 | ||
3369 | if (args != NULL && *args != 0) | |
3370 | error (_("Invalid argument.")); | |
3371 | ||
3372 | tp = find_thread_ptid (inferior_ptid); | |
3373 | if (tp == NULL) | |
3374 | error (_("No thread.")); | |
3375 | ||
3376 | btrace_clear (tp); | |
3377 | } | |
3378 | ||
3379 | /* The "maintenance btrace" command. */ | |
3380 | ||
3381 | static void | |
981a3fb3 | 3382 | maint_btrace_cmd (const char *args, int from_tty) |
b0627500 MM |
3383 | { |
3384 | help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands, | |
3385 | gdb_stdout); | |
3386 | } | |
3387 | ||
3388 | /* The "maintenance set btrace" command. */ | |
3389 | ||
3390 | static void | |
981a3fb3 | 3391 | maint_btrace_set_cmd (const char *args, int from_tty) |
b0627500 MM |
3392 | { |
3393 | help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands, | |
3394 | gdb_stdout); | |
3395 | } | |
3396 | ||
3397 | /* The "maintenance show btrace" command. */ | |
3398 | ||
3399 | static void | |
981a3fb3 | 3400 | maint_btrace_show_cmd (const char *args, int from_tty) |
b0627500 MM |
3401 | { |
3402 | help_list (maint_btrace_show_cmdlist, "maintenance show btrace ", | |
3403 | all_commands, gdb_stdout); | |
3404 | } | |
3405 | ||
3406 | /* The "maintenance set btrace pt" command. */ | |
3407 | ||
3408 | static void | |
981a3fb3 | 3409 | maint_btrace_pt_set_cmd (const char *args, int from_tty) |
b0627500 MM |
3410 | { |
3411 | help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ", | |
3412 | all_commands, gdb_stdout); | |
3413 | } | |
3414 | ||
3415 | /* The "maintenance show btrace pt" command. */ | |
3416 | ||
3417 | static void | |
981a3fb3 | 3418 | maint_btrace_pt_show_cmd (const char *args, int from_tty) |
b0627500 MM |
3419 | { |
3420 | help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ", | |
3421 | all_commands, gdb_stdout); | |
3422 | } | |
3423 | ||
3424 | /* The "maintenance info btrace" command. */ | |
3425 | ||
3426 | static void | |
f938677d | 3427 | maint_info_btrace_cmd (const char *args, int from_tty) |
b0627500 MM |
3428 | { |
3429 | struct btrace_thread_info *btinfo; | |
3430 | struct thread_info *tp; | |
3431 | const struct btrace_config *conf; | |
3432 | ||
3433 | if (args != NULL && *args != 0) | |
3434 | error (_("Invalid argument.")); | |
3435 | ||
3436 | tp = find_thread_ptid (inferior_ptid); | |
3437 | if (tp == NULL) | |
3438 | error (_("No thread.")); | |
3439 | ||
3440 | btinfo = &tp->btrace; | |
3441 | ||
3442 | conf = btrace_conf (btinfo); | |
3443 | if (conf == NULL) | |
3444 | error (_("No btrace configuration.")); | |
3445 | ||
3446 | printf_unfiltered (_("Format: %s.\n"), | |
3447 | btrace_format_string (conf->format)); | |
3448 | ||
3449 | switch (conf->format) | |
3450 | { | |
3451 | default: | |
3452 | break; | |
3453 | ||
3454 | case BTRACE_FORMAT_BTS: | |
3455 | printf_unfiltered (_("Number of packets: %u.\n"), | |
3456 | VEC_length (btrace_block_s, | |
3457 | btinfo->data.variant.bts.blocks)); | |
3458 | break; | |
3459 | ||
3460 | #if defined (HAVE_LIBIPT) | |
3461 | case BTRACE_FORMAT_PT: | |
3462 | { | |
3463 | struct pt_version version; | |
3464 | ||
3465 | version = pt_library_version (); | |
3466 | printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major, | |
3467 | version.minor, version.build, | |
3468 | version.ext != NULL ? version.ext : ""); | |
3469 | ||
3470 | btrace_maint_update_pt_packets (btinfo); | |
3471 | printf_unfiltered (_("Number of packets: %u.\n"), | |
3472 | VEC_length (btrace_pt_packet_s, | |
3473 | btinfo->maint.variant.pt.packets)); | |
3474 | } | |
3475 | break; | |
3476 | #endif /* defined (HAVE_LIBIPT) */ | |
3477 | } | |
3478 | } | |
3479 | ||
3480 | /* The "maint show btrace pt skip-pad" show value function. */ | |
3481 | ||
3482 | static void | |
3483 | show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty, | |
3484 | struct cmd_list_element *c, | |
3485 | const char *value) | |
3486 | { | |
3487 | fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value); | |
3488 | } | |
3489 | ||
3490 | ||
3491 | /* Initialize btrace maintenance commands. */ | |
3492 | ||
b0627500 MM |
3493 | void |
3494 | _initialize_btrace (void) | |
3495 | { | |
3496 | add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd, | |
3497 | _("Info about branch tracing data."), &maintenanceinfolist); | |
3498 | ||
3499 | add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd, | |
3500 | _("Branch tracing maintenance commands."), | |
3501 | &maint_btrace_cmdlist, "maintenance btrace ", | |
3502 | 0, &maintenancelist); | |
3503 | ||
3504 | add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\ | |
3505 | Set branch tracing specific variables."), | |
3506 | &maint_btrace_set_cmdlist, "maintenance set btrace ", | |
3507 | 0, &maintenance_set_cmdlist); | |
3508 | ||
3509 | add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\ | |
bc504a31 | 3510 | Set Intel Processor Trace specific variables."), |
b0627500 MM |
3511 | &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ", |
3512 | 0, &maint_btrace_set_cmdlist); | |
3513 | ||
3514 | add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\ | |
3515 | Show branch tracing specific variables."), | |
3516 | &maint_btrace_show_cmdlist, "maintenance show btrace ", | |
3517 | 0, &maintenance_show_cmdlist); | |
3518 | ||
3519 | add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\ | |
bc504a31 | 3520 | Show Intel Processor Trace specific variables."), |
b0627500 MM |
3521 | &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ", |
3522 | 0, &maint_btrace_show_cmdlist); | |
3523 | ||
3524 | add_setshow_boolean_cmd ("skip-pad", class_maintenance, | |
3525 | &maint_btrace_pt_skip_pad, _("\ | |
3526 | Set whether PAD packets should be skipped in the btrace packet history."), _("\ | |
3527 | Show whether PAD packets should be skipped in the btrace packet history."),_("\ | |
3528 | When enabled, PAD packets are ignored in the btrace packet history."), | |
3529 | NULL, show_maint_btrace_pt_skip_pad, | |
3530 | &maint_btrace_pt_set_cmdlist, | |
3531 | &maint_btrace_pt_show_cmdlist); | |
3532 | ||
3533 | add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd, | |
3534 | _("Print the raw branch tracing data.\n\ | |
3535 | With no argument, print ten more packets after the previous ten-line print.\n\ | |
3536 | With '-' as argument print ten packets before a previous ten-line print.\n\ | |
3537 | One argument specifies the starting packet of a ten-line print.\n\ | |
3538 | Two arguments with comma between specify starting and ending packets to \ | |
3539 | print.\n\ | |
3540 | Preceded with '+'/'-' the second argument specifies the distance from the \ | |
3541 | first.\n"), | |
3542 | &maint_btrace_cmdlist); | |
3543 | ||
3544 | add_cmd ("clear-packet-history", class_maintenance, | |
3545 | maint_btrace_clear_packet_history_cmd, | |
3546 | _("Clears the branch tracing packet history.\n\ | |
3547 | Discards the raw branch tracing data but not the execution history data.\n\ | |
3548 | "), | |
3549 | &maint_btrace_cmdlist); | |
3550 | ||
3551 | add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd, | |
3552 | _("Clears the branch tracing data.\n\ | |
3553 | Discards the raw branch tracing data and the execution history data.\n\ | |
3554 | The next 'record' command will fetch the branch tracing data anew.\n\ | |
3555 | "), | |
3556 | &maint_btrace_cmdlist); | |
3557 | ||
3558 | } |