1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2021 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
34 #include "gdbsupport/rsp-low.h"
36 #include "cli/cli-utils.h"
39 /* For maintenance commands. */
40 #include "record-btrace.h"
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element *maint_btrace_cmdlist;
48 static struct cmd_list_element *maint_btrace_set_cmdlist;
49 static struct cmd_list_element *maint_btrace_show_cmdlist;
50 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
51 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad = true;
56 static void btrace_add_pc (struct thread_info *tp);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function *bfun)
78 struct minimal_symbol *msym;
85 return sym->print_name ();
88 return msym->print_name ();
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function *bfun)
100 const char *filename;
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
107 filename = "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
121 return core_addr_to_string_nz (insn->pc);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
129 const char *fun, *file;
130 unsigned int ibegin, iend;
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
137 ibegin = bfun->insn_offset;
138 iend = ibegin + bfun->insn.size ();
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function* bfun)
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
156 return bfun->insn.size ();
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function *
163 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
166 if (number == 0 || number > btinfo->functions.size ())
169 return &btinfo->functions[number - 1];
172 /* A const version of the function above. */
174 static const struct btrace_function *
175 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
178 if (number == 0 || number > btinfo->functions.size ())
181 return &btinfo->functions[number - 1];
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
188 ftrace_function_switched (const struct btrace_function *bfun,
189 const struct minimal_symbol *mfun,
190 const struct symbol *fun)
192 struct minimal_symbol *msym;
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun != NULL && msym != NULL
200 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun != NULL && sym != NULL)
206 const char *bfname, *fname;
208 /* Check the function name. */
209 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
212 /* Check the location of those functions, as well. */
213 bfname = symtab_to_fullname (symbol_symtab (sym));
214 fname = symtab_to_fullname (symbol_symtab (fun));
215 if (filename_cmp (fname, bfname) != 0)
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
223 /* If we gained symbol information, we switched functions. */
224 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
230 /* Allocate and initialize a new branch trace function segment at the end of
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
236 static struct btrace_function *
237 ftrace_new_function (struct btrace_thread_info *btinfo,
238 struct minimal_symbol *mfun,
242 unsigned int number, insn_offset;
244 if (btinfo->functions.empty ())
246 /* Start counting NUMBER and INSN_OFFSET at one. */
253 const struct btrace_function *prev = &btinfo->functions.back ();
255 number = prev->number + 1;
256 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
259 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
260 return &btinfo->functions.back ();
263 /* Update the UP field of a function segment. */
266 ftrace_update_caller (struct btrace_function *bfun,
267 struct btrace_function *caller,
268 btrace_function_flags flags)
271 ftrace_debug (bfun, "updating caller");
273 bfun->up = caller->number;
276 ftrace_debug (bfun, "set caller");
277 ftrace_debug (caller, "..to");
280 /* Fix up the caller for all segments of a function. */
283 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
284 struct btrace_function *bfun,
285 struct btrace_function *caller,
286 btrace_function_flags flags)
288 unsigned int prev, next;
292 ftrace_update_caller (bfun, caller, flags);
294 /* Update all function segments belonging to the same function. */
295 for (; prev != 0; prev = bfun->prev)
297 bfun = ftrace_find_call_by_number (btinfo, prev);
298 ftrace_update_caller (bfun, caller, flags);
301 for (; next != 0; next = bfun->next)
303 bfun = ftrace_find_call_by_number (btinfo, next);
304 ftrace_update_caller (bfun, caller, flags);
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
312 static struct btrace_function *
313 ftrace_new_call (struct btrace_thread_info *btinfo,
314 struct minimal_symbol *mfun,
317 const unsigned int length = btinfo->functions.size ();
318 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
323 ftrace_debug (bfun, "new call");
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
332 static struct btrace_function *
333 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
334 struct minimal_symbol *mfun,
337 const unsigned int length = btinfo->functions.size ();
338 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
342 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
344 ftrace_debug (bfun, "new tail call");
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function *
353 ftrace_get_caller (struct btrace_thread_info *btinfo,
354 struct btrace_function *bfun)
356 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
358 return ftrace_find_call_by_number (btinfo, bfun->up);
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
367 static struct btrace_function *
368 ftrace_find_caller (struct btrace_thread_info *btinfo,
369 struct btrace_function *bfun,
370 struct minimal_symbol *mfun,
373 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun, mfun, fun))
379 /* This is the function segment we're looking for. */
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
391 static struct btrace_function *
392 ftrace_find_call (struct btrace_thread_info *btinfo,
393 struct btrace_function *bfun)
395 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
398 if (bfun->errcode != 0)
401 btrace_insn &last = bfun->insn.back ();
403 if (last.iclass == BTRACE_INSN_CALL)
410 /* Add a continuation segment for a function into which we return at the end of
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
415 static struct btrace_function *
416 ftrace_new_return (struct btrace_thread_info *btinfo,
417 struct minimal_symbol *mfun,
420 struct btrace_function *prev, *bfun, *caller;
422 bfun = ftrace_new_function (btinfo, mfun, fun);
423 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller = ftrace_find_call_by_number (btinfo, prev->up);
428 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller->next == 0);
435 caller->next = bfun->number;
436 bfun->prev = caller->number;
438 /* Maintain the function level. */
439 bfun->level = caller->level;
441 /* Maintain the call stack. */
442 bfun->up = caller->up;
443 bfun->flags = caller->flags;
445 ftrace_debug (bfun, "new return");
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
452 /* Let's search for some actual call. */
453 caller = ftrace_find_call_by_number (btinfo, prev->up);
454 caller = ftrace_find_call (btinfo, caller);
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev->up != 0)
463 prev = ftrace_find_call_by_number (btinfo, prev->up);
465 bfun->level = prev->level - 1;
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
470 ftrace_debug (bfun, "new return - no caller");
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun->level = prev->level - 1;
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev->up = bfun->number;
484 prev->flags = BFUN_UP_LINKS_TO_RET;
486 ftrace_debug (bfun, "new return - unknown caller");
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
497 static struct btrace_function *
498 ftrace_new_switch (struct btrace_thread_info *btinfo,
499 struct minimal_symbol *mfun,
502 struct btrace_function *prev, *bfun;
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun = ftrace_new_function (btinfo, mfun, fun);
507 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
509 bfun->flags = prev->flags;
511 ftrace_debug (bfun, "new switch");
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
521 static struct btrace_function *
522 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
523 std::vector<unsigned int> &gaps)
525 struct btrace_function *bfun;
527 if (btinfo->functions.empty ())
528 bfun = ftrace_new_function (btinfo, NULL, NULL);
531 /* We hijack the previous function segment if it was empty. */
532 bfun = &btinfo->functions.back ();
533 if (bfun->errcode != 0 || !bfun->insn.empty ())
534 bfun = ftrace_new_function (btinfo, NULL, NULL);
537 bfun->errcode = errcode;
538 gaps.push_back (bfun->number);
540 ftrace_debug (bfun, "new gap");
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
549 static struct btrace_function *
550 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
552 struct bound_minimal_symbol bmfun;
553 struct minimal_symbol *mfun;
555 struct btrace_function *bfun;
557 /* Try to determine the function we're in. We use both types of symbols
558 to avoid surprises when we sometimes get a full symbol and sometimes
559 only a minimal symbol. */
560 fun = find_pc_function (pc);
561 bmfun = lookup_minimal_symbol_by_pc (pc);
564 if (fun == NULL && mfun == NULL)
565 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
567 /* If we didn't have a function, we create one. */
568 if (btinfo->functions.empty ())
569 return ftrace_new_function (btinfo, mfun, fun);
571 /* If we had a gap before, we create a function. */
572 bfun = &btinfo->functions.back ();
573 if (bfun->errcode != 0)
574 return ftrace_new_function (btinfo, mfun, fun);
576 /* Check the last instruction, if we have one.
577 We do this check first, since it allows us to fill in the call stack
578 links in addition to the normal flow links. */
579 btrace_insn *last = NULL;
580 if (!bfun->insn.empty ())
581 last = &bfun->insn.back ();
585 switch (last->iclass)
587 case BTRACE_INSN_RETURN:
591 /* On some systems, _dl_runtime_resolve returns to the resolved
592 function instead of jumping to it. From our perspective,
593 however, this is a tailcall.
594 If we treated it as return, we wouldn't be able to find the
595 resolved function in our stack back trace. Hence, we would
596 lose the current stack back trace and start anew with an empty
597 back trace. When the resolved function returns, we would then
598 create a stack back trace with the same function names but
599 different frame id's. This will confuse stepping. */
600 fname = ftrace_print_function_name (bfun);
601 if (strcmp (fname, "_dl_runtime_resolve") == 0)
602 return ftrace_new_tailcall (btinfo, mfun, fun);
604 return ftrace_new_return (btinfo, mfun, fun);
607 case BTRACE_INSN_CALL:
608 /* Ignore calls to the next instruction. They are used for PIC. */
609 if (last->pc + last->size == pc)
612 return ftrace_new_call (btinfo, mfun, fun);
614 case BTRACE_INSN_JUMP:
618 start = get_pc_function_start (pc);
620 /* A jump to the start of a function is (typically) a tail call. */
622 return ftrace_new_tailcall (btinfo, mfun, fun);
624 /* Some versions of _Unwind_RaiseException use an indirect
625 jump to 'return' to the exception handler of the caller
626 handling the exception instead of a return. Let's restrict
627 this heuristic to that and related functions. */
628 const char *fname = ftrace_print_function_name (bfun);
629 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
631 struct btrace_function *caller
632 = ftrace_find_call_by_number (btinfo, bfun->up);
633 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
635 return ftrace_new_return (btinfo, mfun, fun);
638 /* If we can't determine the function for PC, we treat a jump at
639 the end of the block as tail call if we're switching functions
640 and as an intra-function branch if we don't. */
641 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
642 return ftrace_new_tailcall (btinfo, mfun, fun);
649 /* Check if we're switching functions for some other reason. */
650 if (ftrace_function_switched (bfun, mfun, fun))
652 DEBUG_FTRACE ("switching from %s in %s at %s",
653 ftrace_print_insn_addr (last),
654 ftrace_print_function_name (bfun),
655 ftrace_print_filename (bfun));
657 return ftrace_new_switch (btinfo, mfun, fun);
663 /* Add the instruction at PC to BFUN's instructions. */
666 ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
668 bfun->insn.push_back (insn);
670 if (record_debug > 1)
671 ftrace_debug (bfun, "update insn");
674 /* Classify the instruction at PC. */
676 static enum btrace_insn_class
677 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
679 enum btrace_insn_class iclass;
681 iclass = BTRACE_INSN_OTHER;
684 if (gdbarch_insn_is_call (gdbarch, pc))
685 iclass = BTRACE_INSN_CALL;
686 else if (gdbarch_insn_is_ret (gdbarch, pc))
687 iclass = BTRACE_INSN_RETURN;
688 else if (gdbarch_insn_is_jump (gdbarch, pc))
689 iclass = BTRACE_INSN_JUMP;
691 catch (const gdb_exception_error &error)
698 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
699 number of matching function segments or zero if the back traces do not
700 match. BTINFO is the branch trace information for the current thread. */
703 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
704 struct btrace_function *lhs,
705 struct btrace_function *rhs)
709 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
711 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
714 lhs = ftrace_get_caller (btinfo, lhs);
715 rhs = ftrace_get_caller (btinfo, rhs);
721 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
722 BTINFO is the branch trace information for the current thread. */
725 ftrace_fixup_level (struct btrace_thread_info *btinfo,
726 struct btrace_function *bfun, int adjustment)
731 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
732 ftrace_debug (bfun, "..bfun");
736 bfun->level += adjustment;
737 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
741 /* Recompute the global level offset. Traverse the function trace and compute
742 the global level offset as the negative of the minimal function level. */
745 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
752 if (btinfo->functions.empty ())
755 unsigned int length = btinfo->functions.size() - 1;
756 for (unsigned int i = 0; i < length; ++i)
757 level = std::min (level, btinfo->functions[i].level);
759 /* The last function segment contains the current instruction, which is not
760 really part of the trace. If it contains just this one instruction, we
761 ignore the segment. */
762 struct btrace_function *last = &btinfo->functions.back();
763 if (last->insn.size () != 1)
764 level = std::min (level, last->level);
766 DEBUG_FTRACE ("setting global level offset: %d", -level);
767 btinfo->level = -level;
770 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
771 ftrace_connect_backtrace. BTINFO is the branch trace information for the
775 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
776 struct btrace_function *prev,
777 struct btrace_function *next)
779 DEBUG_FTRACE ("connecting...");
780 ftrace_debug (prev, "..prev");
781 ftrace_debug (next, "..next");
783 /* The function segments are not yet connected. */
784 gdb_assert (prev->next == 0);
785 gdb_assert (next->prev == 0);
787 prev->next = next->number;
788 next->prev = prev->number;
790 /* We may have moved NEXT to a different function level. */
791 ftrace_fixup_level (btinfo, next, prev->level - next->level);
793 /* If we run out of back trace for one, let's use the other's. */
796 const btrace_function_flags flags = next->flags;
798 next = ftrace_find_call_by_number (btinfo, next->up);
801 DEBUG_FTRACE ("using next's callers");
802 ftrace_fixup_caller (btinfo, prev, next, flags);
805 else if (next->up == 0)
807 const btrace_function_flags flags = prev->flags;
809 prev = ftrace_find_call_by_number (btinfo, prev->up);
812 DEBUG_FTRACE ("using prev's callers");
813 ftrace_fixup_caller (btinfo, next, prev, flags);
818 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
819 link to add the tail callers to NEXT's back trace.
821 This removes NEXT->UP from NEXT's back trace. It will be added back
822 when connecting NEXT and PREV's callers - provided they exist.
824 If PREV's back trace consists of a series of tail calls without an
825 actual call, there will be no further connection and NEXT's caller will
826 be removed for good. To catch this case, we handle it here and connect
827 the top of PREV's back trace to NEXT's caller. */
828 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
830 struct btrace_function *caller;
831 btrace_function_flags next_flags, prev_flags;
833 /* We checked NEXT->UP above so CALLER can't be NULL. */
834 caller = ftrace_find_call_by_number (btinfo, next->up);
835 next_flags = next->flags;
836 prev_flags = prev->flags;
838 DEBUG_FTRACE ("adding prev's tail calls to next");
840 prev = ftrace_find_call_by_number (btinfo, prev->up);
841 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
843 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
846 /* At the end of PREV's back trace, continue with CALLER. */
849 DEBUG_FTRACE ("fixing up link for tailcall chain");
850 ftrace_debug (prev, "..top");
851 ftrace_debug (caller, "..up");
853 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
855 /* If we skipped any tail calls, this may move CALLER to a
856 different function level.
858 Note that changing CALLER's level is only OK because we
859 know that this is the last iteration of the bottom-to-top
860 walk in ftrace_connect_backtrace.
862 Otherwise we will fix up CALLER's level when we connect it
863 to PREV's caller in the next iteration. */
864 ftrace_fixup_level (btinfo, caller,
865 prev->level - caller->level - 1);
869 /* There's nothing to do if we find a real call. */
870 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
872 DEBUG_FTRACE ("will fix up link in next iteration");
880 /* Connect function segments on the same level in the back trace at LHS and RHS.
881 The back traces at LHS and RHS are expected to match according to
882 ftrace_match_backtrace. BTINFO is the branch trace information for the
886 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
887 struct btrace_function *lhs,
888 struct btrace_function *rhs)
890 while (lhs != NULL && rhs != NULL)
892 struct btrace_function *prev, *next;
894 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
896 /* Connecting LHS and RHS may change the up link. */
900 lhs = ftrace_get_caller (btinfo, lhs);
901 rhs = ftrace_get_caller (btinfo, rhs);
903 ftrace_connect_bfun (btinfo, prev, next);
907 /* Bridge the gap between two function segments left and right of a gap if their
908 respective back traces match in at least MIN_MATCHES functions. BTINFO is
909 the branch trace information for the current thread.
911 Returns non-zero if the gap could be bridged, zero otherwise. */
914 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
915 struct btrace_function *lhs, struct btrace_function *rhs,
918 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
921 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
922 rhs->insn_offset - 1, min_matches);
928 /* We search the back traces of LHS and RHS for valid connections and connect
929 the two function segments that give the longest combined back trace. */
931 for (cand_l = lhs; cand_l != NULL;
932 cand_l = ftrace_get_caller (btinfo, cand_l))
933 for (cand_r = rhs; cand_r != NULL;
934 cand_r = ftrace_get_caller (btinfo, cand_r))
938 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
939 if (best_matches < matches)
941 best_matches = matches;
947 /* We need at least MIN_MATCHES matches. */
948 gdb_assert (min_matches > 0);
949 if (best_matches < min_matches)
952 DEBUG_FTRACE ("..matches: %d", best_matches);
954 /* We will fix up the level of BEST_R and succeeding function segments such
955 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
957 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
958 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
960 To catch this, we already fix up the level here where we can start at RHS
961 instead of at BEST_R. We will ignore the level fixup when connecting
962 BEST_L to BEST_R as they will already be on the same level. */
963 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
965 ftrace_connect_backtrace (btinfo, best_l, best_r);
970 /* Try to bridge gaps due to overflow or decode errors by connecting the
971 function segments that are separated by the gap. */
974 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
976 struct btrace_thread_info *btinfo = &tp->btrace;
977 std::vector<unsigned int> remaining;
980 DEBUG ("bridge gaps");
982 /* We require a minimum amount of matches for bridging a gap. The number of
983 required matches will be lowered with each iteration.
985 The more matches the higher our confidence that the bridging is correct.
986 For big gaps or small traces, however, it may not be feasible to require a
987 high number of matches. */
988 for (min_matches = 5; min_matches > 0; --min_matches)
990 /* Let's try to bridge as many gaps as we can. In some cases, we need to
991 skip a gap and revisit it again after we closed later gaps. */
992 while (!gaps.empty ())
994 for (const unsigned int number : gaps)
996 struct btrace_function *gap, *lhs, *rhs;
999 gap = ftrace_find_call_by_number (btinfo, number);
1001 /* We may have a sequence of gaps if we run from one error into
1002 the next as we try to re-sync onto the trace stream. Ignore
1003 all but the leftmost gap in such a sequence.
1005 Also ignore gaps at the beginning of the trace. */
1006 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1007 if (lhs == NULL || lhs->errcode != 0)
1010 /* Skip gaps to the right. */
1011 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1012 while (rhs != NULL && rhs->errcode != 0)
1013 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1015 /* Ignore gaps at the end of the trace. */
1019 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1021 /* Keep track of gaps we were not able to bridge and try again.
1022 If we just pushed them to the end of GAPS we would risk an
1023 infinite loop in case we simply cannot bridge a gap. */
1025 remaining.push_back (number);
1028 /* Let's see if we made any progress. */
1029 if (remaining.size () == gaps.size ())
1033 gaps.swap (remaining);
1036 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1043 /* We may omit this in some cases. Not sure it is worth the extra
1044 complication, though. */
1045 ftrace_compute_global_level_offset (btinfo);
1048 /* Compute the function branch trace from BTS trace. */
1051 btrace_compute_ftrace_bts (struct thread_info *tp,
1052 const struct btrace_data_bts *btrace,
1053 std::vector<unsigned int> &gaps)
1055 struct btrace_thread_info *btinfo;
1056 struct gdbarch *gdbarch;
1060 gdbarch = target_gdbarch ();
1061 btinfo = &tp->btrace;
1062 blk = btrace->blocks->size ();
1064 if (btinfo->functions.empty ())
1067 level = -btinfo->level;
1075 const btrace_block &block = btrace->blocks->at (blk);
1080 struct btrace_function *bfun;
1081 struct btrace_insn insn;
1084 /* We should hit the end of the block. Warn if we went too far. */
1087 /* Indicate the gap in the trace. */
1088 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1090 warning (_("Recorded trace may be corrupted at instruction "
1091 "%u (pc = %s)."), bfun->insn_offset - 1,
1092 core_addr_to_string_nz (pc));
1097 bfun = ftrace_update_function (btinfo, pc);
1099 /* Maintain the function level offset.
1100 For all but the last block, we do it here. */
1102 level = std::min (level, bfun->level);
1107 size = gdb_insn_length (gdbarch, pc);
1109 catch (const gdb_exception_error &error)
1115 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1118 ftrace_update_insns (bfun, insn);
1120 /* We're done once we pushed the instruction at the end. */
1121 if (block.end == pc)
1124 /* We can't continue if we fail to compute the size. */
1127 /* Indicate the gap in the trace. We just added INSN so we're
1128 not at the beginning. */
1129 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1131 warning (_("Recorded trace may be incomplete at instruction %u "
1132 "(pc = %s)."), bfun->insn_offset - 1,
1133 core_addr_to_string_nz (pc));
1140 /* Maintain the function level offset.
1141 For the last block, we do it here to not consider the last
1143 Since the last instruction corresponds to the current instruction
1144 and is not really part of the execution history, it shouldn't
1145 affect the level. */
1147 level = std::min (level, bfun->level);
1151 /* LEVEL is the minimal function level of all btrace function segments.
1152 Define the global level offset to -LEVEL so all function levels are
1153 normalized to start at zero. */
1154 btinfo->level = -level;
1157 #if defined (HAVE_LIBIPT)
1159 static enum btrace_insn_class
1160 pt_reclassify_insn (enum pt_insn_class iclass)
1165 return BTRACE_INSN_CALL;
1168 return BTRACE_INSN_RETURN;
1171 return BTRACE_INSN_JUMP;
1174 return BTRACE_INSN_OTHER;
1178 /* Return the btrace instruction flags for INSN. */
1180 static btrace_insn_flags
1181 pt_btrace_insn_flags (const struct pt_insn &insn)
1183 btrace_insn_flags flags = 0;
1185 if (insn.speculative)
1186 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1191 /* Return the btrace instruction for INSN. */
1194 pt_btrace_insn (const struct pt_insn &insn)
1196 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1197 pt_reclassify_insn (insn.iclass),
1198 pt_btrace_insn_flags (insn)};
1201 /* Handle instruction decode events (libipt-v2). */
1204 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1205 struct pt_insn_decoder *decoder,
1206 std::vector<unsigned int> &gaps, int status)
1208 #if defined (HAVE_PT_INSN_EVENT)
1209 while (status & pts_event_pending)
1211 struct btrace_function *bfun;
1212 struct pt_event event;
1215 status = pt_insn_event (decoder, &event, sizeof (event));
1225 if (event.status_update != 0)
1228 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1230 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1232 pt_insn_get_offset (decoder, &offset);
1234 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1235 PRIx64 ")."), bfun->insn_offset - 1, offset);
1241 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1243 pt_insn_get_offset (decoder, &offset);
1245 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1246 bfun->insn_offset - 1, offset);
1251 #endif /* defined (HAVE_PT_INSN_EVENT) */
1256 /* Handle events indicated by flags in INSN (libipt-v1). */
1259 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1260 struct pt_insn_decoder *decoder,
1261 const struct pt_insn &insn,
1262 std::vector<unsigned int> &gaps)
1264 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1265 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1266 times, we continue from the same instruction we stopped before. This is
1267 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1268 means that we continued from some other instruction. Indicate this as a
1269 trace gap except when tracing just started. */
1270 if (insn.enabled && !btinfo->functions.empty ())
1272 struct btrace_function *bfun;
1275 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1277 pt_insn_get_offset (decoder, &offset);
1279 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1280 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1283 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1285 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1286 /* Indicate trace overflows. */
1289 struct btrace_function *bfun;
1292 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1294 pt_insn_get_offset (decoder, &offset);
1296 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1297 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1299 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1302 /* Add function branch trace to BTINFO using DECODER. */
1305 ftrace_add_pt (struct btrace_thread_info *btinfo,
1306 struct pt_insn_decoder *decoder,
1308 std::vector<unsigned int> &gaps)
1310 struct btrace_function *bfun;
1316 struct pt_insn insn;
1318 status = pt_insn_sync_forward (decoder);
1321 if (status != -pte_eos)
1322 warning (_("Failed to synchronize onto the Intel Processor "
1323 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1329 /* Handle events from the previous iteration or synchronization. */
1330 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1334 status = pt_insn_next (decoder, &insn, sizeof(insn));
1338 /* Handle events indicated by flags in INSN. */
1339 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1341 bfun = ftrace_update_function (btinfo, insn.ip);
1343 /* Maintain the function level offset. */
1344 *plevel = std::min (*plevel, bfun->level);
1346 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1349 if (status == -pte_eos)
1352 /* Indicate the gap in the trace. */
1353 bfun = ftrace_new_gap (btinfo, status, gaps);
1355 pt_insn_get_offset (decoder, &offset);
1357 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1358 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1359 offset, insn.ip, pt_errstr (pt_errcode (status)));
1363 /* A callback function to allow the trace decoder to read the inferior's
1367 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1368 const struct pt_asid *asid, uint64_t pc,
1371 int result, errcode;
1373 result = (int) size;
1376 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1378 result = -pte_nomap;
1380 catch (const gdb_exception_error &error)
1382 result = -pte_nomap;
1388 /* Translate the vendor from one enum to another. */
1390 static enum pt_cpu_vendor
1391 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1403 /* Finalize the function branch trace after decode. */
1405 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1406 struct thread_info *tp, int level)
1408 pt_insn_free_decoder (decoder);
1410 /* LEVEL is the minimal function level of all btrace function segments.
1411 Define the global level offset to -LEVEL so all function levels are
1412 normalized to start at zero. */
1413 tp->btrace.level = -level;
1415 /* Add a single last instruction entry for the current PC.
1416 This allows us to compute the backtrace at the current PC using both
1417 standard unwind and btrace unwind.
1418 This extra entry is ignored by all record commands. */
1422 /* Compute the function branch trace from Intel Processor Trace
1426 btrace_compute_ftrace_pt (struct thread_info *tp,
1427 const struct btrace_data_pt *btrace,
1428 std::vector<unsigned int> &gaps)
1430 struct btrace_thread_info *btinfo;
1431 struct pt_insn_decoder *decoder;
1432 struct pt_config config;
1435 if (btrace->size == 0)
1438 btinfo = &tp->btrace;
1439 if (btinfo->functions.empty ())
1442 level = -btinfo->level;
1444 pt_config_init(&config);
1445 config.begin = btrace->data;
1446 config.end = btrace->data + btrace->size;
1448 /* We treat an unknown vendor as 'no errata'. */
1449 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1452 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1453 config.cpu.family = btrace->config.cpu.family;
1454 config.cpu.model = btrace->config.cpu.model;
1455 config.cpu.stepping = btrace->config.cpu.stepping;
1457 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1459 error (_("Failed to configure the Intel Processor Trace "
1460 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1463 decoder = pt_insn_alloc_decoder (&config);
1464 if (decoder == NULL)
1465 error (_("Failed to allocate the Intel Processor Trace decoder."));
1469 struct pt_image *image;
1471 image = pt_insn_get_image(decoder);
1473 error (_("Failed to configure the Intel Processor Trace decoder."));
1475 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1477 error (_("Failed to configure the Intel Processor Trace decoder: "
1478 "%s."), pt_errstr (pt_errcode (errcode)));
1480 ftrace_add_pt (btinfo, decoder, &level, gaps);
1482 catch (const gdb_exception &error)
1484 /* Indicate a gap in the trace if we quit trace processing. */
1485 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1486 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1488 btrace_finalize_ftrace_pt (decoder, tp, level);
1493 btrace_finalize_ftrace_pt (decoder, tp, level);
1496 #else /* defined (HAVE_LIBIPT) */
1499 btrace_compute_ftrace_pt (struct thread_info *tp,
1500 const struct btrace_data_pt *btrace,
1501 std::vector<unsigned int> &gaps)
1503 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1506 #endif /* defined (HAVE_LIBIPT) */
1508 /* Compute the function branch trace from a block branch trace BTRACE for
1509 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1510 branch trace configuration. This is currently only used for the PT
1514 btrace_compute_ftrace_1 (struct thread_info *tp,
1515 struct btrace_data *btrace,
1516 const struct btrace_cpu *cpu,
1517 std::vector<unsigned int> &gaps)
1519 DEBUG ("compute ftrace");
1521 switch (btrace->format)
1523 case BTRACE_FORMAT_NONE:
1526 case BTRACE_FORMAT_BTS:
1527 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1530 case BTRACE_FORMAT_PT:
1531 /* Overwrite the cpu we use for enabling errata workarounds. */
1533 btrace->variant.pt.config.cpu = *cpu;
1535 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1539 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
1543 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1547 tp->btrace.ngaps += gaps.size ();
1548 btrace_bridge_gaps (tp, gaps);
1553 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1554 const struct btrace_cpu *cpu)
1556 std::vector<unsigned int> gaps;
1560 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1562 catch (const gdb_exception &error)
1564 btrace_finalize_ftrace (tp, gaps);
1569 btrace_finalize_ftrace (tp, gaps);
1572 /* Add an entry for the current PC. */
1575 btrace_add_pc (struct thread_info *tp)
1577 struct btrace_data btrace;
1578 struct regcache *regcache;
1581 regcache = get_thread_regcache (tp);
1582 pc = regcache_read_pc (regcache);
1584 btrace.format = BTRACE_FORMAT_BTS;
1585 btrace.variant.bts.blocks = new std::vector<btrace_block>;
1587 btrace.variant.bts.blocks->emplace_back (pc, pc);
1589 btrace_compute_ftrace (tp, &btrace, NULL);
1595 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1597 if (tp->btrace.target != NULL)
1598 error (_("Recording already enabled on thread %s (%s)."),
1599 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1601 #if !defined (HAVE_LIBIPT)
1602 if (conf->format == BTRACE_FORMAT_PT)
1603 error (_("Intel Processor Trace support was disabled at compile time."));
1604 #endif /* !defined (HAVE_LIBIPT) */
1606 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1607 target_pid_to_str (tp->ptid).c_str ());
1609 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1611 if (tp->btrace.target == NULL)
1612 error (_("Failed to enable recording on thread %s (%s)."),
1613 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1615 /* We need to undo the enable in case of errors. */
1618 /* Add an entry for the current PC so we start tracing from where we
1621 If we can't access TP's registers, TP is most likely running. In this
1622 case, we can't really say where tracing was enabled so it should be
1623 safe to simply skip this step.
1625 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1626 start at the PC at which tracing was enabled. */
1627 if (conf->format != BTRACE_FORMAT_PT
1628 && can_access_registers_thread (tp))
1631 catch (const gdb_exception &exception)
1633 btrace_disable (tp);
1641 const struct btrace_config *
1642 btrace_conf (const struct btrace_thread_info *btinfo)
1644 if (btinfo->target == NULL)
1647 return target_btrace_conf (btinfo->target);
1653 btrace_disable (struct thread_info *tp)
1655 struct btrace_thread_info *btp = &tp->btrace;
1657 if (btp->target == NULL)
1658 error (_("Recording not enabled on thread %s (%s)."),
1659 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1661 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1662 target_pid_to_str (tp->ptid).c_str ());
1664 target_disable_btrace (btp->target);
1673 btrace_teardown (struct thread_info *tp)
1675 struct btrace_thread_info *btp = &tp->btrace;
1677 if (btp->target == NULL)
1680 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1681 target_pid_to_str (tp->ptid).c_str ());
1683 target_teardown_btrace (btp->target);
1689 /* Stitch branch trace in BTS format. */
1692 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1694 struct btrace_thread_info *btinfo;
1695 struct btrace_function *last_bfun;
1696 btrace_block *first_new_block;
1698 btinfo = &tp->btrace;
1699 gdb_assert (!btinfo->functions.empty ());
1700 gdb_assert (!btrace->blocks->empty ());
1702 last_bfun = &btinfo->functions.back ();
1704 /* If the existing trace ends with a gap, we just glue the traces
1705 together. We need to drop the last (i.e. chronologically first) block
1706 of the new trace, though, since we can't fill in the start address.*/
1707 if (last_bfun->insn.empty ())
1709 btrace->blocks->pop_back ();
1713 /* Beware that block trace starts with the most recent block, so the
1714 chronologically first block in the new trace is the last block in
1715 the new trace's block vector. */
1716 first_new_block = &btrace->blocks->back ();
1717 const btrace_insn &last_insn = last_bfun->insn.back ();
1719 /* If the current PC at the end of the block is the same as in our current
1720 trace, there are two explanations:
1721 1. we executed the instruction and some branch brought us back.
1722 2. we have not made any progress.
1723 In the first case, the delta trace vector should contain at least two
1725 In the second case, the delta trace vector should contain exactly one
1726 entry for the partial block containing the current PC. Remove it. */
1727 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
1729 btrace->blocks->pop_back ();
1733 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1734 core_addr_to_string_nz (first_new_block->end));
1736 /* Do a simple sanity check to make sure we don't accidentally end up
1737 with a bad block. This should not occur in practice. */
1738 if (first_new_block->end < last_insn.pc)
1740 warning (_("Error while trying to read delta trace. Falling back to "
1745 /* We adjust the last block to start at the end of our current trace. */
1746 gdb_assert (first_new_block->begin == 0);
1747 first_new_block->begin = last_insn.pc;
1749 /* We simply pop the last insn so we can insert it again as part of
1750 the normal branch trace computation.
1751 Since instruction iterators are based on indices in the instructions
1752 vector, we don't leave any pointers dangling. */
1753 DEBUG ("pruning insn at %s for stitching",
1754 ftrace_print_insn_addr (&last_insn));
1756 last_bfun->insn.pop_back ();
1758 /* The instructions vector may become empty temporarily if this has
1759 been the only instruction in this function segment.
1760 This violates the invariant but will be remedied shortly by
1761 btrace_compute_ftrace when we add the new trace. */
1763 /* The only case where this would hurt is if the entire trace consisted
1764 of just that one instruction. If we remove it, we might turn the now
1765 empty btrace function segment into a gap. But we don't want gaps at
1766 the beginning. To avoid this, we remove the entire old trace. */
1767 if (last_bfun->number == 1 && last_bfun->insn.empty ())
1773 /* Adjust the block trace in order to stitch old and new trace together.
1774 BTRACE is the new delta trace between the last and the current stop.
1775 TP is the traced thread.
1776 May modifx BTRACE as well as the existing trace in TP.
1777 Return 0 on success, -1 otherwise. */
1780 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1782 /* If we don't have trace, there's nothing to do. */
1783 if (btrace->empty ())
1786 switch (btrace->format)
1788 case BTRACE_FORMAT_NONE:
1791 case BTRACE_FORMAT_BTS:
1792 return btrace_stitch_bts (&btrace->variant.bts, tp);
1794 case BTRACE_FORMAT_PT:
1795 /* Delta reads are not supported. */
1799 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
1802 /* Clear the branch trace histories in BTINFO. */
1805 btrace_clear_history (struct btrace_thread_info *btinfo)
1807 xfree (btinfo->insn_history);
1808 xfree (btinfo->call_history);
1809 xfree (btinfo->replay);
1811 btinfo->insn_history = NULL;
1812 btinfo->call_history = NULL;
1813 btinfo->replay = NULL;
1816 /* Clear the branch trace maintenance histories in BTINFO. */
1819 btrace_maint_clear (struct btrace_thread_info *btinfo)
1821 switch (btinfo->data.format)
1826 case BTRACE_FORMAT_BTS:
1827 btinfo->maint.variant.bts.packet_history.begin = 0;
1828 btinfo->maint.variant.bts.packet_history.end = 0;
1831 #if defined (HAVE_LIBIPT)
1832 case BTRACE_FORMAT_PT:
1833 delete btinfo->maint.variant.pt.packets;
1835 btinfo->maint.variant.pt.packets = NULL;
1836 btinfo->maint.variant.pt.packet_history.begin = 0;
1837 btinfo->maint.variant.pt.packet_history.end = 0;
1839 #endif /* defined (HAVE_LIBIPT) */
1846 btrace_decode_error (enum btrace_format format, int errcode)
1850 case BTRACE_FORMAT_BTS:
1853 case BDE_BTS_OVERFLOW:
1854 return _("instruction overflow");
1856 case BDE_BTS_INSN_SIZE:
1857 return _("unknown instruction");
1864 #if defined (HAVE_LIBIPT)
1865 case BTRACE_FORMAT_PT:
1868 case BDE_PT_USER_QUIT:
1869 return _("trace decode cancelled");
1871 case BDE_PT_DISABLED:
1872 return _("disabled");
1874 case BDE_PT_OVERFLOW:
1875 return _("overflow");
1879 return pt_errstr (pt_errcode (errcode));
1883 #endif /* defined (HAVE_LIBIPT) */
1889 return _("unknown");
1895 btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1897 struct btrace_thread_info *btinfo;
1898 struct btrace_target_info *tinfo;
1899 struct btrace_data btrace;
1902 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1903 target_pid_to_str (tp->ptid).c_str ());
1905 btinfo = &tp->btrace;
1906 tinfo = btinfo->target;
1910 /* There's no way we could get new trace while replaying.
1911 On the other hand, delta trace would return a partial record with the
1912 current PC, which is the replay PC, not the last PC, as expected. */
1913 if (btinfo->replay != NULL)
1916 /* With CLI usage, TP is always the current thread when we get here.
1917 However, since we can also store a gdb.Record object in Python
1918 referring to a different thread than the current one, we need to
1919 temporarily set the current thread. */
1920 scoped_restore_current_thread restore_thread;
1921 switch_to_thread (tp);
1923 /* We should not be called on running or exited threads. */
1924 gdb_assert (can_access_registers_thread (tp));
1926 /* Let's first try to extend the trace we already have. */
1927 if (!btinfo->functions.empty ())
1929 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1932 /* Success. Let's try to stitch the traces together. */
1933 errcode = btrace_stitch_trace (&btrace, tp);
1937 /* We failed to read delta trace. Let's try to read new trace. */
1938 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1940 /* If we got any new trace, discard what we have. */
1941 if (errcode == 0 && !btrace.empty ())
1945 /* If we were not able to read the trace, we start over. */
1949 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1953 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1955 /* If we were not able to read the branch trace, signal an error. */
1957 error (_("Failed to read branch trace."));
1959 /* Compute the trace, provided we have any. */
1960 if (!btrace.empty ())
1962 /* Store the raw trace data. The stored data will be cleared in
1963 btrace_clear, so we always append the new trace. */
1964 btrace_data_append (&btinfo->data, &btrace);
1965 btrace_maint_clear (btinfo);
1967 btrace_clear_history (btinfo);
1968 btrace_compute_ftrace (tp, &btrace, cpu);
1975 btrace_clear (struct thread_info *tp)
1977 struct btrace_thread_info *btinfo;
1979 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1980 target_pid_to_str (tp->ptid).c_str ());
1982 /* Make sure btrace frames that may hold a pointer into the branch
1983 trace data are destroyed. */
1984 reinit_frame_cache ();
1986 btinfo = &tp->btrace;
1988 btinfo->functions.clear ();
1991 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1992 btrace_maint_clear (btinfo);
1993 btinfo->data.clear ();
1994 btrace_clear_history (btinfo);
2000 btrace_free_objfile (struct objfile *objfile)
2002 DEBUG ("free objfile");
2004 for (thread_info *tp : all_non_exited_threads ())
2008 #if defined (HAVE_LIBEXPAT)
2010 /* Check the btrace document version. */
2013 check_xml_btrace_version (struct gdb_xml_parser *parser,
2014 const struct gdb_xml_element *element,
2016 std::vector<gdb_xml_value> &attributes)
2019 = (const char *) xml_find_attribute (attributes, "version")->value.get ();
2021 if (strcmp (version, "1.0") != 0)
2022 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2025 /* Parse a btrace "block" xml record. */
2028 parse_xml_btrace_block (struct gdb_xml_parser *parser,
2029 const struct gdb_xml_element *element,
2031 std::vector<gdb_xml_value> &attributes)
2033 struct btrace_data *btrace;
2034 ULONGEST *begin, *end;
2036 btrace = (struct btrace_data *) user_data;
2038 switch (btrace->format)
2040 case BTRACE_FORMAT_BTS:
2043 case BTRACE_FORMAT_NONE:
2044 btrace->format = BTRACE_FORMAT_BTS;
2045 btrace->variant.bts.blocks = new std::vector<btrace_block>;
2049 gdb_xml_error (parser, _("Btrace format error."));
2052 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
2053 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
2054 btrace->variant.bts.blocks->emplace_back (*begin, *end);
2057 /* Parse a "raw" xml record. */
2060 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2061 gdb_byte **pdata, size_t *psize)
2066 len = strlen (body_text);
2068 gdb_xml_error (parser, _("Bad raw data size."));
2072 gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
2075 /* We use hex encoding - see gdbsupport/rsp-low.h. */
2083 if (hi == 0 || lo == 0)
2084 gdb_xml_error (parser, _("Bad hex encoding."));
2086 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2090 *pdata = data.release ();
2094 /* Parse a btrace pt-config "cpu" xml record. */
2097 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2098 const struct gdb_xml_element *element,
2100 std::vector<gdb_xml_value> &attributes)
2102 struct btrace_data *btrace;
2104 ULONGEST *family, *model, *stepping;
2107 (const char *) xml_find_attribute (attributes, "vendor")->value.get ();
2109 = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
2111 = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
2113 = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
2115 btrace = (struct btrace_data *) user_data;
2117 if (strcmp (vendor, "GenuineIntel") == 0)
2118 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2120 btrace->variant.pt.config.cpu.family = *family;
2121 btrace->variant.pt.config.cpu.model = *model;
2122 btrace->variant.pt.config.cpu.stepping = *stepping;
2125 /* Parse a btrace pt "raw" xml record. */
2128 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2129 const struct gdb_xml_element *element,
2130 void *user_data, const char *body_text)
2132 struct btrace_data *btrace;
2134 btrace = (struct btrace_data *) user_data;
2135 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2136 &btrace->variant.pt.size);
2139 /* Parse a btrace "pt" xml record. */
2142 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2143 const struct gdb_xml_element *element,
2145 std::vector<gdb_xml_value> &attributes)
2147 struct btrace_data *btrace;
2149 btrace = (struct btrace_data *) user_data;
2150 btrace->format = BTRACE_FORMAT_PT;
2151 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2152 btrace->variant.pt.data = NULL;
2153 btrace->variant.pt.size = 0;
2156 static const struct gdb_xml_attribute block_attributes[] = {
2157 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2158 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2159 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2162 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2163 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2164 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2165 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2166 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2167 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2170 static const struct gdb_xml_element btrace_pt_config_children[] = {
2171 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2172 parse_xml_btrace_pt_config_cpu, NULL },
2173 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2176 static const struct gdb_xml_element btrace_pt_children[] = {
2177 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2179 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2180 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2183 static const struct gdb_xml_attribute btrace_attributes[] = {
2184 { "version", GDB_XML_AF_NONE, NULL, NULL },
2185 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2188 static const struct gdb_xml_element btrace_children[] = {
2189 { "block", block_attributes, NULL,
2190 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2191 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2193 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2196 static const struct gdb_xml_element btrace_elements[] = {
2197 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2198 check_xml_btrace_version, NULL },
2199 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2202 #endif /* defined (HAVE_LIBEXPAT) */
2207 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2209 #if defined (HAVE_LIBEXPAT)
2213 result.format = BTRACE_FORMAT_NONE;
2215 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2218 error (_("Error parsing branch trace."));
2220 /* Keep parse results. */
2221 *btrace = std::move (result);
2223 #else /* !defined (HAVE_LIBEXPAT) */
2225 error (_("Cannot process branch trace. XML support was disabled at "
2228 #endif /* !defined (HAVE_LIBEXPAT) */
2231 #if defined (HAVE_LIBEXPAT)
2233 /* Parse a btrace-conf "bts" xml record. */
2236 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2237 const struct gdb_xml_element *element,
2239 std::vector<gdb_xml_value> &attributes)
2241 struct btrace_config *conf;
2242 struct gdb_xml_value *size;
2244 conf = (struct btrace_config *) user_data;
2245 conf->format = BTRACE_FORMAT_BTS;
2248 size = xml_find_attribute (attributes, "size");
2250 conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
2253 /* Parse a btrace-conf "pt" xml record. */
2256 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2257 const struct gdb_xml_element *element,
2259 std::vector<gdb_xml_value> &attributes)
2261 struct btrace_config *conf;
2262 struct gdb_xml_value *size;
2264 conf = (struct btrace_config *) user_data;
2265 conf->format = BTRACE_FORMAT_PT;
2268 size = xml_find_attribute (attributes, "size");
2270 conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
2273 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2274 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2275 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2278 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2279 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2280 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2283 static const struct gdb_xml_element btrace_conf_children[] = {
2284 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2285 parse_xml_btrace_conf_bts, NULL },
2286 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2287 parse_xml_btrace_conf_pt, NULL },
2288 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2291 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2292 { "version", GDB_XML_AF_NONE, NULL, NULL },
2293 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2296 static const struct gdb_xml_element btrace_conf_elements[] = {
2297 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2298 GDB_XML_EF_NONE, NULL, NULL },
2299 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2302 #endif /* defined (HAVE_LIBEXPAT) */
2307 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2309 #if defined (HAVE_LIBEXPAT)
2312 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2313 btrace_conf_elements, xml, conf);
2315 error (_("Error parsing branch trace configuration."));
2317 #else /* !defined (HAVE_LIBEXPAT) */
2319 error (_("Cannot process the branch trace configuration. XML support "
2320 "was disabled at compile time."));
2322 #endif /* !defined (HAVE_LIBEXPAT) */
2327 const struct btrace_insn *
2328 btrace_insn_get (const struct btrace_insn_iterator *it)
2330 const struct btrace_function *bfun;
2331 unsigned int index, end;
2333 index = it->insn_index;
2334 bfun = &it->btinfo->functions[it->call_index];
2336 /* Check if the iterator points to a gap in the trace. */
2337 if (bfun->errcode != 0)
2340 /* The index is within the bounds of this function's instruction vector. */
2341 end = bfun->insn.size ();
2342 gdb_assert (0 < end);
2343 gdb_assert (index < end);
2345 return &bfun->insn[index];
2351 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2353 return it->btinfo->functions[it->call_index].errcode;
2359 btrace_insn_number (const struct btrace_insn_iterator *it)
2361 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2367 btrace_insn_begin (struct btrace_insn_iterator *it,
2368 const struct btrace_thread_info *btinfo)
2370 if (btinfo->functions.empty ())
2371 error (_("No trace."));
2373 it->btinfo = btinfo;
2381 btrace_insn_end (struct btrace_insn_iterator *it,
2382 const struct btrace_thread_info *btinfo)
2384 const struct btrace_function *bfun;
2385 unsigned int length;
2387 if (btinfo->functions.empty ())
2388 error (_("No trace."));
2390 bfun = &btinfo->functions.back ();
2391 length = bfun->insn.size ();
2393 /* The last function may either be a gap or it contains the current
2394 instruction, which is one past the end of the execution trace; ignore
2399 it->btinfo = btinfo;
2400 it->call_index = bfun->number - 1;
2401 it->insn_index = length;
2407 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2409 const struct btrace_function *bfun;
2410 unsigned int index, steps;
2412 bfun = &it->btinfo->functions[it->call_index];
2414 index = it->insn_index;
2418 unsigned int end, space, adv;
2420 end = bfun->insn.size ();
2422 /* An empty function segment represents a gap in the trace. We count
2423 it as one instruction. */
2426 const struct btrace_function *next;
2428 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2441 gdb_assert (0 < end);
2442 gdb_assert (index < end);
2444 /* Compute the number of instructions remaining in this segment. */
2445 space = end - index;
2447 /* Advance the iterator as far as possible within this segment. */
2448 adv = std::min (space, stride);
2453 /* Move to the next function if we're at the end of this one. */
2456 const struct btrace_function *next;
2458 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2461 /* We stepped past the last function.
2463 Let's adjust the index to point to the last instruction in
2464 the previous function. */
2470 /* We now point to the first instruction in the new function. */
2475 /* We did make progress. */
2476 gdb_assert (adv > 0);
2479 /* Update the iterator. */
2480 it->call_index = bfun->number - 1;
2481 it->insn_index = index;
2489 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2491 const struct btrace_function *bfun;
2492 unsigned int index, steps;
2494 bfun = &it->btinfo->functions[it->call_index];
2496 index = it->insn_index;
2502 /* Move to the previous function if we're at the start of this one. */
2505 const struct btrace_function *prev;
2507 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2511 /* We point to one after the last instruction in the new function. */
2513 index = bfun->insn.size ();
2515 /* An empty function segment represents a gap in the trace. We count
2516 it as one instruction. */
2526 /* Advance the iterator as far as possible within this segment. */
2527 adv = std::min (index, stride);
2533 /* We did make progress. */
2534 gdb_assert (adv > 0);
2537 /* Update the iterator. */
2538 it->call_index = bfun->number - 1;
2539 it->insn_index = index;
2547 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2548 const struct btrace_insn_iterator *rhs)
2550 gdb_assert (lhs->btinfo == rhs->btinfo);
2552 if (lhs->call_index != rhs->call_index)
2553 return lhs->call_index - rhs->call_index;
2555 return lhs->insn_index - rhs->insn_index;
2561 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2562 const struct btrace_thread_info *btinfo,
2563 unsigned int number)
2565 const struct btrace_function *bfun;
2566 unsigned int upper, lower;
2568 if (btinfo->functions.empty ())
2572 bfun = &btinfo->functions[lower];
2573 if (number < bfun->insn_offset)
2576 upper = btinfo->functions.size () - 1;
2577 bfun = &btinfo->functions[upper];
2578 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2581 /* We assume that there are no holes in the numbering. */
2584 const unsigned int average = lower + (upper - lower) / 2;
2586 bfun = &btinfo->functions[average];
2588 if (number < bfun->insn_offset)
2590 upper = average - 1;
2594 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2596 lower = average + 1;
2603 it->btinfo = btinfo;
2604 it->call_index = bfun->number - 1;
2605 it->insn_index = number - bfun->insn_offset;
2609 /* Returns true if the recording ends with a function segment that
2610 contains only a single (i.e. the current) instruction. */
2613 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2615 const btrace_function *bfun;
2617 if (btinfo->functions.empty ())
2620 bfun = &btinfo->functions.back ();
2621 if (bfun->errcode != 0)
2624 return ftrace_call_num_insn (bfun) == 1;
2629 const struct btrace_function *
2630 btrace_call_get (const struct btrace_call_iterator *it)
2632 if (it->index >= it->btinfo->functions.size ())
2635 return &it->btinfo->functions[it->index];
2641 btrace_call_number (const struct btrace_call_iterator *it)
2643 const unsigned int length = it->btinfo->functions.size ();
2645 /* If the last function segment contains only a single instruction (i.e. the
2646 current instruction), skip it. */
2647 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2650 return it->index + 1;
2656 btrace_call_begin (struct btrace_call_iterator *it,
2657 const struct btrace_thread_info *btinfo)
2659 if (btinfo->functions.empty ())
2660 error (_("No trace."));
2662 it->btinfo = btinfo;
2669 btrace_call_end (struct btrace_call_iterator *it,
2670 const struct btrace_thread_info *btinfo)
2672 if (btinfo->functions.empty ())
2673 error (_("No trace."));
2675 it->btinfo = btinfo;
2676 it->index = btinfo->functions.size ();
2682 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2684 const unsigned int length = it->btinfo->functions.size ();
2686 if (it->index + stride < length - 1)
2687 /* Default case: Simply advance the iterator. */
2688 it->index += stride;
2689 else if (it->index + stride == length - 1)
2691 /* We land exactly at the last function segment. If it contains only one
2692 instruction (i.e. the current instruction) it is not actually part of
2694 if (btrace_ends_with_single_insn (it->btinfo))
2697 it->index = length - 1;
2701 /* We land past the last function segment and have to adjust the stride.
2702 If the last function segment contains only one instruction (i.e. the
2703 current instruction) it is not actually part of the trace. */
2704 if (btrace_ends_with_single_insn (it->btinfo))
2705 stride = length - it->index - 1;
2707 stride = length - it->index;
2718 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2720 const unsigned int length = it->btinfo->functions.size ();
2723 gdb_assert (it->index <= length);
2725 if (stride == 0 || it->index == 0)
2728 /* If we are at the end, the first step is a special case. If the last
2729 function segment contains only one instruction (i.e. the current
2730 instruction) it is not actually part of the trace. To be able to step
2731 over this instruction, we need at least one more function segment. */
2732 if ((it->index == length) && (length > 1))
2734 if (btrace_ends_with_single_insn (it->btinfo))
2735 it->index = length - 2;
2737 it->index = length - 1;
2743 stride = std::min (stride, it->index);
2745 it->index -= stride;
2746 return steps + stride;
2752 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2753 const struct btrace_call_iterator *rhs)
2755 gdb_assert (lhs->btinfo == rhs->btinfo);
2756 return (int) (lhs->index - rhs->index);
2762 btrace_find_call_by_number (struct btrace_call_iterator *it,
2763 const struct btrace_thread_info *btinfo,
2764 unsigned int number)
2766 const unsigned int length = btinfo->functions.size ();
2768 if ((number == 0) || (number > length))
2771 it->btinfo = btinfo;
2772 it->index = number - 1;
2779 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2780 const struct btrace_insn_iterator *begin,
2781 const struct btrace_insn_iterator *end)
2783 if (btinfo->insn_history == NULL)
2784 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2786 btinfo->insn_history->begin = *begin;
2787 btinfo->insn_history->end = *end;
2793 btrace_set_call_history (struct btrace_thread_info *btinfo,
2794 const struct btrace_call_iterator *begin,
2795 const struct btrace_call_iterator *end)
2797 gdb_assert (begin->btinfo == end->btinfo);
2799 if (btinfo->call_history == NULL)
2800 btinfo->call_history = XCNEW (struct btrace_call_history);
2802 btinfo->call_history->begin = *begin;
2803 btinfo->call_history->end = *end;
2809 btrace_is_replaying (struct thread_info *tp)
2811 return tp->btrace.replay != NULL;
2817 btrace_is_empty (struct thread_info *tp)
2819 struct btrace_insn_iterator begin, end;
2820 struct btrace_thread_info *btinfo;
2822 btinfo = &tp->btrace;
2824 if (btinfo->functions.empty ())
2827 btrace_insn_begin (&begin, btinfo);
2828 btrace_insn_end (&end, btinfo);
2830 return btrace_insn_cmp (&begin, &end) == 0;
2833 #if defined (HAVE_LIBIPT)
2835 /* Print a single packet. */
2838 pt_print_packet (const struct pt_packet *packet)
2840 switch (packet->type)
2843 printf_unfiltered (("[??: %x]"), packet->type);
2847 printf_unfiltered (("psb"));
2851 printf_unfiltered (("psbend"));
2855 printf_unfiltered (("pad"));
2859 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2860 packet->payload.ip.ipc,
2861 packet->payload.ip.ip);
2865 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2866 packet->payload.ip.ipc,
2867 packet->payload.ip.ip);
2871 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2872 packet->payload.ip.ipc,
2873 packet->payload.ip.ip);
2877 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2878 packet->payload.ip.ipc,
2879 packet->payload.ip.ip);
2883 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2884 packet->payload.tnt.bit_size,
2885 packet->payload.tnt.payload);
2889 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2890 packet->payload.tnt.bit_size,
2891 packet->payload.tnt.payload);
2895 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2896 packet->payload.pip.nr ? (" nr") : (""));
2900 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2904 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2908 switch (packet->payload.mode.leaf)
2911 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2915 printf_unfiltered (("mode.exec%s%s"),
2916 packet->payload.mode.bits.exec.csl
2918 packet->payload.mode.bits.exec.csd
2919 ? (" cs.d") : (""));
2923 printf_unfiltered (("mode.tsx%s%s"),
2924 packet->payload.mode.bits.tsx.intx
2926 packet->payload.mode.bits.tsx.abrt
2927 ? (" abrt") : (""));
2933 printf_unfiltered (("ovf"));
2937 printf_unfiltered (("stop"));
2941 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2945 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2946 packet->payload.tma.fc);
2950 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2954 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2958 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2963 /* Decode packets into MAINT using DECODER. */
2966 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2967 struct pt_packet_decoder *decoder)
2971 if (maint->variant.pt.packets == NULL)
2972 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
2976 struct btrace_pt_packet packet;
2978 errcode = pt_pkt_sync_forward (decoder);
2984 pt_pkt_get_offset (decoder, &packet.offset);
2986 errcode = pt_pkt_next (decoder, &packet.packet,
2987 sizeof(packet.packet));
2991 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2993 packet.errcode = pt_errcode (errcode);
2994 maint->variant.pt.packets->push_back (packet);
2998 if (errcode == -pte_eos)
3001 packet.errcode = pt_errcode (errcode);
3002 maint->variant.pt.packets->push_back (packet);
3004 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3005 packet.offset, pt_errstr (packet.errcode));
3008 if (errcode != -pte_eos)
3009 warning (_("Failed to synchronize onto the Intel Processor Trace "
3010 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3013 /* Update the packet history in BTINFO. */
3016 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3018 struct pt_packet_decoder *decoder;
3019 const struct btrace_cpu *cpu;
3020 struct btrace_data_pt *pt;
3021 struct pt_config config;
3024 pt = &btinfo->data.variant.pt;
3026 /* Nothing to do if there is no trace. */
3030 memset (&config, 0, sizeof(config));
3032 config.size = sizeof (config);
3033 config.begin = pt->data;
3034 config.end = pt->data + pt->size;
3036 cpu = record_btrace_get_cpu ();
3038 cpu = &pt->config.cpu;
3040 /* We treat an unknown vendor as 'no errata'. */
3041 if (cpu->vendor != CV_UNKNOWN)
3043 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
3044 config.cpu.family = cpu->family;
3045 config.cpu.model = cpu->model;
3046 config.cpu.stepping = cpu->stepping;
3048 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3050 error (_("Failed to configure the Intel Processor Trace "
3051 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
3054 decoder = pt_pkt_alloc_decoder (&config);
3055 if (decoder == NULL)
3056 error (_("Failed to allocate the Intel Processor Trace decoder."));
3060 btrace_maint_decode_pt (&btinfo->maint, decoder);
3062 catch (const gdb_exception &except)
3064 pt_pkt_free_decoder (decoder);
3066 if (except.reason < 0)
3070 pt_pkt_free_decoder (decoder);
3073 #endif /* !defined (HAVE_LIBIPT) */
3075 /* Update the packet maintenance information for BTINFO and store the
3076 low and high bounds into BEGIN and END, respectively.
3077 Store the current iterator state into FROM and TO. */
3080 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3081 unsigned int *begin, unsigned int *end,
3082 unsigned int *from, unsigned int *to)
3084 switch (btinfo->data.format)
3093 case BTRACE_FORMAT_BTS:
3094 /* Nothing to do - we operate directly on BTINFO->DATA. */
3096 *end = btinfo->data.variant.bts.blocks->size ();
3097 *from = btinfo->maint.variant.bts.packet_history.begin;
3098 *to = btinfo->maint.variant.bts.packet_history.end;
3101 #if defined (HAVE_LIBIPT)
3102 case BTRACE_FORMAT_PT:
3103 if (btinfo->maint.variant.pt.packets == nullptr)
3104 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
3106 if (btinfo->maint.variant.pt.packets->empty ())
3107 btrace_maint_update_pt_packets (btinfo);
3110 *end = btinfo->maint.variant.pt.packets->size ();
3111 *from = btinfo->maint.variant.pt.packet_history.begin;
3112 *to = btinfo->maint.variant.pt.packet_history.end;
3114 #endif /* defined (HAVE_LIBIPT) */
3118 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3119 update the current iterator position. */
3122 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3123 unsigned int begin, unsigned int end)
3125 switch (btinfo->data.format)
3130 case BTRACE_FORMAT_BTS:
3132 const std::vector<btrace_block> &blocks
3133 = *btinfo->data.variant.bts.blocks;
3136 for (blk = begin; blk < end; ++blk)
3138 const btrace_block &block = blocks.at (blk);
3140 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3141 core_addr_to_string_nz (block.begin),
3142 core_addr_to_string_nz (block.end));
3145 btinfo->maint.variant.bts.packet_history.begin = begin;
3146 btinfo->maint.variant.bts.packet_history.end = end;
3150 #if defined (HAVE_LIBIPT)
3151 case BTRACE_FORMAT_PT:
3153 const std::vector<btrace_pt_packet> &packets
3154 = *btinfo->maint.variant.pt.packets;
3157 for (pkt = begin; pkt < end; ++pkt)
3159 const struct btrace_pt_packet &packet = packets.at (pkt);
3161 printf_unfiltered ("%u\t", pkt);
3162 printf_unfiltered ("0x%" PRIx64 "\t", packet.offset);
3164 if (packet.errcode == pte_ok)
3165 pt_print_packet (&packet.packet);
3167 printf_unfiltered ("[error: %s]", pt_errstr (packet.errcode));
3169 printf_unfiltered ("\n");
3172 btinfo->maint.variant.pt.packet_history.begin = begin;
3173 btinfo->maint.variant.pt.packet_history.end = end;
3176 #endif /* defined (HAVE_LIBIPT) */
3180 /* Read a number from an argument string. */
3183 get_uint (const char **arg)
3185 const char *begin, *pos;
3187 unsigned long number;
3190 pos = skip_spaces (begin);
3192 if (!isdigit (*pos))
3193 error (_("Expected positive number, got: %s."), pos);
3195 number = strtoul (pos, &end, 10);
3196 if (number > UINT_MAX)
3197 error (_("Number too big."));
3199 *arg += (end - begin);
3201 return (unsigned int) number;
3204 /* Read a context size from an argument string. */
3207 get_context_size (const char **arg)
3209 const char *pos = skip_spaces (*arg);
3211 if (!isdigit (*pos))
3212 error (_("Expected positive number, got: %s."), pos);
3215 long result = strtol (pos, &end, 10);
3220 /* Complain about junk at the end of an argument string. */
3223 no_chunk (const char *arg)
3226 error (_("Junk after argument: %s."), arg);
3229 /* The "maintenance btrace packet-history" command. */
3232 maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3234 struct btrace_thread_info *btinfo;
3235 unsigned int size, begin, end, from, to;
3237 thread_info *tp = find_thread_ptid (current_inferior (), inferior_ptid);
3239 error (_("No thread."));
3242 btinfo = &tp->btrace;
3244 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3247 printf_unfiltered (_("No trace.\n"));
3251 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3255 if (end - from < size)
3259 else if (strcmp (arg, "-") == 0)
3263 if (to - begin < size)
3269 from = get_uint (&arg);
3271 error (_("'%u' is out of range."), from);
3273 arg = skip_spaces (arg);
3276 arg = skip_spaces (++arg);
3281 size = get_context_size (&arg);
3285 if (end - from < size)
3289 else if (*arg == '-')
3292 size = get_context_size (&arg);
3296 /* Include the packet given as first argument. */
3300 if (to - begin < size)
3306 to = get_uint (&arg);
3308 /* Include the packet at the second argument and silently
3309 truncate the range. */
3322 if (end - from < size)
3330 btrace_maint_print_packets (btinfo, from, to);
3333 /* The "maintenance btrace clear-packet-history" command. */
3336 maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3338 if (args != NULL && *args != 0)
3339 error (_("Invalid argument."));
3341 if (inferior_ptid == null_ptid)
3342 error (_("No thread."));
3344 thread_info *tp = inferior_thread ();
3345 btrace_thread_info *btinfo = &tp->btrace;
3347 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3348 btrace_maint_clear (btinfo);
3349 btinfo->data.clear ();
3352 /* The "maintenance btrace clear" command. */
3355 maint_btrace_clear_cmd (const char *args, int from_tty)
3357 if (args != NULL && *args != 0)
3358 error (_("Invalid argument."));
3360 if (inferior_ptid == null_ptid)
3361 error (_("No thread."));
3363 thread_info *tp = inferior_thread ();
3367 /* The "maintenance info btrace" command. */
3370 maint_info_btrace_cmd (const char *args, int from_tty)
3372 struct btrace_thread_info *btinfo;
3373 const struct btrace_config *conf;
3375 if (args != NULL && *args != 0)
3376 error (_("Invalid argument."));
3378 if (inferior_ptid == null_ptid)
3379 error (_("No thread."));
3381 thread_info *tp = inferior_thread ();
3383 btinfo = &tp->btrace;
3385 conf = btrace_conf (btinfo);
3387 error (_("No btrace configuration."));
3389 printf_unfiltered (_("Format: %s.\n"),
3390 btrace_format_string (conf->format));
3392 switch (conf->format)
3397 case BTRACE_FORMAT_BTS:
3398 printf_unfiltered (_("Number of packets: %zu.\n"),
3399 btinfo->data.variant.bts.blocks->size ());
3402 #if defined (HAVE_LIBIPT)
3403 case BTRACE_FORMAT_PT:
3405 struct pt_version version;
3407 version = pt_library_version ();
3408 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3409 version.minor, version.build,
3410 version.ext != NULL ? version.ext : "");
3412 btrace_maint_update_pt_packets (btinfo);
3413 printf_unfiltered (_("Number of packets: %zu.\n"),
3414 ((btinfo->maint.variant.pt.packets == nullptr)
3415 ? 0 : btinfo->maint.variant.pt.packets->size ()));
3418 #endif /* defined (HAVE_LIBIPT) */
3422 /* The "maint show btrace pt skip-pad" show value function. */
3425 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3426 struct cmd_list_element *c,
3429 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3433 /* Initialize btrace maintenance commands. */
3435 void _initialize_btrace ();
3437 _initialize_btrace ()
3439 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3440 _("Info about branch tracing data."), &maintenanceinfolist);
3442 add_basic_prefix_cmd ("btrace", class_maintenance,
3443 _("Branch tracing maintenance commands."),
3444 &maint_btrace_cmdlist, 0, &maintenancelist);
3446 add_basic_prefix_cmd ("btrace", class_maintenance, _("\
3447 Set branch tracing specific variables."),
3448 &maint_btrace_set_cmdlist,
3449 0, &maintenance_set_cmdlist);
3451 add_basic_prefix_cmd ("pt", class_maintenance, _("\
3452 Set Intel Processor Trace specific variables."),
3453 &maint_btrace_pt_set_cmdlist,
3454 0, &maint_btrace_set_cmdlist);
3456 add_show_prefix_cmd ("btrace", class_maintenance, _("\
3457 Show branch tracing specific variables."),
3458 &maint_btrace_show_cmdlist,
3459 0, &maintenance_show_cmdlist);
3461 add_show_prefix_cmd ("pt", class_maintenance, _("\
3462 Show Intel Processor Trace specific variables."),
3463 &maint_btrace_pt_show_cmdlist,
3464 0, &maint_btrace_show_cmdlist);
3466 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3467 &maint_btrace_pt_skip_pad, _("\
3468 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3469 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3470 When enabled, PAD packets are ignored in the btrace packet history."),
3471 NULL, show_maint_btrace_pt_skip_pad,
3472 &maint_btrace_pt_set_cmdlist,
3473 &maint_btrace_pt_show_cmdlist);
3475 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3476 _("Print the raw branch tracing data.\n\
3477 With no argument, print ten more packets after the previous ten-line print.\n\
3478 With '-' as argument print ten packets before a previous ten-line print.\n\
3479 One argument specifies the starting packet of a ten-line print.\n\
3480 Two arguments with comma between specify starting and ending packets to \
3482 Preceded with '+'/'-' the second argument specifies the distance from the \
3484 &maint_btrace_cmdlist);
3486 add_cmd ("clear-packet-history", class_maintenance,
3487 maint_btrace_clear_packet_history_cmd,
3488 _("Clears the branch tracing packet history.\n\
3489 Discards the raw branch tracing data but not the execution history data."),
3490 &maint_btrace_cmdlist);
3492 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3493 _("Clears the branch tracing data.\n\
3494 Discards the raw branch tracing data and the execution history data.\n\
3495 The next 'record' command will fetch the branch tracing data anew."),
3496 &maint_btrace_cmdlist);