1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
38 #define DEBUG(msg, args...) \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
53 ftrace_print_function_name (const struct btrace_function *bfun)
55 struct minimal_symbol *msym;
62 return SYMBOL_PRINT_NAME (sym);
65 return MSYMBOL_PRINT_NAME (msym);
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_filename (const struct btrace_function *bfun)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
84 filename = "<unknown>";
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
98 return core_addr_to_string_nz (insn->pc);
101 /* Print an ftrace debug status message. */
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
106 const char *fun, *file;
107 unsigned int ibegin, iend;
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
114 ibegin = bfun->insn_offset;
115 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix, fun, file, level, ibegin, iend);
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
125 ftrace_function_switched (const struct btrace_function *bfun,
126 const struct minimal_symbol *mfun,
127 const struct symbol *fun)
129 struct minimal_symbol *msym;
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun != NULL && msym != NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun != NULL && sym != NULL)
143 const char *bfname, *fname;
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
149 /* Check the location of those functions, as well. */
150 bfname = symtab_to_fullname (symbol_symtab (sym));
151 fname = symtab_to_fullname (symbol_symtab (fun));
152 if (filename_cmp (fname, bfname) != 0)
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
160 /* If we gained symbol information, we switched functions. */
161 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
171 static struct btrace_function *
172 ftrace_new_function (struct btrace_function *prev,
173 struct minimal_symbol *mfun,
176 struct btrace_function *bfun;
178 bfun = xzalloc (sizeof (*bfun));
182 bfun->flow.prev = prev;
186 /* Start counting at one. */
188 bfun->insn_offset = 1;
192 gdb_assert (prev->flow.next == NULL);
193 prev->flow.next = bfun;
195 bfun->number = prev->number + 1;
196 bfun->insn_offset = (prev->insn_offset
197 + VEC_length (btrace_insn_s, prev->insn));
198 bfun->level = prev->level;
204 /* Update the UP field of a function segment. */
207 ftrace_update_caller (struct btrace_function *bfun,
208 struct btrace_function *caller,
209 enum btrace_function_flag flags)
211 if (bfun->up != NULL)
212 ftrace_debug (bfun, "updating caller");
217 ftrace_debug (bfun, "set caller");
220 /* Fix up the caller for all segments of a function. */
223 ftrace_fixup_caller (struct btrace_function *bfun,
224 struct btrace_function *caller,
225 enum btrace_function_flag flags)
227 struct btrace_function *prev, *next;
229 ftrace_update_caller (bfun, caller, flags);
231 /* Update all function segments belonging to the same function. */
232 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
233 ftrace_update_caller (prev, caller, flags);
235 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
236 ftrace_update_caller (next, caller, flags);
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
243 static struct btrace_function *
244 ftrace_new_call (struct btrace_function *caller,
245 struct minimal_symbol *mfun,
248 struct btrace_function *bfun;
250 bfun = ftrace_new_function (caller, mfun, fun);
254 ftrace_debug (bfun, "new call");
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
263 static struct btrace_function *
264 ftrace_new_tailcall (struct btrace_function *caller,
265 struct minimal_symbol *mfun,
268 struct btrace_function *bfun;
270 bfun = ftrace_new_function (caller, mfun, fun);
273 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
275 ftrace_debug (bfun, "new tail call");
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
283 static struct btrace_function *
284 ftrace_find_caller (struct btrace_function *bfun,
285 struct minimal_symbol *mfun,
288 for (; bfun != NULL; bfun = bfun->up)
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun, mfun, fun))
294 /* This is the function segment we're looking for. */
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
305 static struct btrace_function *
306 ftrace_find_call (struct btrace_function *bfun)
308 for (; bfun != NULL; bfun = bfun->up)
310 struct btrace_insn *last;
313 if (bfun->errcode != 0)
316 last = VEC_last (btrace_insn_s, bfun->insn);
318 if (last->iclass == BTRACE_INSN_CALL)
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
329 static struct btrace_function *
330 ftrace_new_return (struct btrace_function *prev,
331 struct minimal_symbol *mfun,
334 struct btrace_function *bfun, *caller;
336 bfun = ftrace_new_function (prev, mfun, fun);
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller = ftrace_find_caller (prev->up, mfun, fun);
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller->segment.next == NULL);
347 caller->segment.next = bfun;
348 bfun->segment.prev = caller;
350 /* Maintain the function level. */
351 bfun->level = caller->level;
353 /* Maintain the call stack. */
354 bfun->up = caller->up;
355 bfun->flags = caller->flags;
357 ftrace_debug (bfun, "new return");
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
364 /* Let's search for some actual call. */
365 caller = ftrace_find_call (prev->up);
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev->up != NULL)
375 /* We maintain levels for a series of returns for which we have
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun->level = min (0, prev->level) - 1;
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
385 ftrace_debug (bfun, "new return - no caller");
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun->level = prev->level;
393 ftrace_debug (bfun, "new return - unknown caller");
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
404 static struct btrace_function *
405 ftrace_new_switch (struct btrace_function *prev,
406 struct minimal_symbol *mfun,
409 struct btrace_function *bfun;
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun = ftrace_new_function (prev, mfun, fun);
415 ftrace_debug (bfun, "new switch");
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
424 static struct btrace_function *
425 ftrace_new_gap (struct btrace_function *prev, int errcode)
427 struct btrace_function *bfun;
429 /* We hijack prev if it was empty. */
430 if (prev != NULL && prev->errcode == 0
431 && VEC_empty (btrace_insn_s, prev->insn))
434 bfun = ftrace_new_function (prev, NULL, NULL);
436 bfun->errcode = errcode;
438 ftrace_debug (bfun, "new gap");
443 /* Update BFUN with respect to the instruction at PC. This may create new
445 Return the chronologically latest function segment, never NULL. */
447 static struct btrace_function *
448 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
450 struct bound_minimal_symbol bmfun;
451 struct minimal_symbol *mfun;
453 struct btrace_insn *last;
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun = find_pc_function (pc);
459 bmfun = lookup_minimal_symbol_by_pc (pc);
462 if (fun == NULL && mfun == NULL)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun == NULL || bfun->errcode != 0)
467 return ftrace_new_function (bfun, mfun, fun);
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
473 if (!VEC_empty (btrace_insn_s, bfun->insn))
474 last = VEC_last (btrace_insn_s, bfun->insn);
478 switch (last->iclass)
480 case BTRACE_INSN_RETURN:
484 /* On some systems, _dl_runtime_resolve returns to the resolved
485 function instead of jumping to it. From our perspective,
486 however, this is a tailcall.
487 If we treated it as return, we wouldn't be able to find the
488 resolved function in our stack back trace. Hence, we would
489 lose the current stack back trace and start anew with an empty
490 back trace. When the resolved function returns, we would then
491 create a stack back trace with the same function names but
492 different frame id's. This will confuse stepping. */
493 fname = ftrace_print_function_name (bfun);
494 if (strcmp (fname, "_dl_runtime_resolve") == 0)
495 return ftrace_new_tailcall (bfun, mfun, fun);
497 return ftrace_new_return (bfun, mfun, fun);
500 case BTRACE_INSN_CALL:
501 /* Ignore calls to the next instruction. They are used for PIC. */
502 if (last->pc + last->size == pc)
505 return ftrace_new_call (bfun, mfun, fun);
507 case BTRACE_INSN_JUMP:
511 start = get_pc_function_start (pc);
513 /* If we can't determine the function for PC, we treat a jump at
514 the end of the block as tail call. */
515 if (start == 0 || start == pc)
516 return ftrace_new_tailcall (bfun, mfun, fun);
521 /* Check if we're switching functions for some other reason. */
522 if (ftrace_function_switched (bfun, mfun, fun))
524 DEBUG_FTRACE ("switching from %s in %s at %s",
525 ftrace_print_insn_addr (last),
526 ftrace_print_function_name (bfun),
527 ftrace_print_filename (bfun));
529 return ftrace_new_switch (bfun, mfun, fun);
535 /* Add the instruction at PC to BFUN's instructions. */
538 ftrace_update_insns (struct btrace_function *bfun,
539 const struct btrace_insn *insn)
541 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
543 if (record_debug > 1)
544 ftrace_debug (bfun, "update insn");
547 /* Classify the instruction at PC. */
549 static enum btrace_insn_class
550 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
552 enum btrace_insn_class iclass;
554 iclass = BTRACE_INSN_OTHER;
557 if (gdbarch_insn_is_call (gdbarch, pc))
558 iclass = BTRACE_INSN_CALL;
559 else if (gdbarch_insn_is_ret (gdbarch, pc))
560 iclass = BTRACE_INSN_RETURN;
561 else if (gdbarch_insn_is_jump (gdbarch, pc))
562 iclass = BTRACE_INSN_JUMP;
564 CATCH (error, RETURN_MASK_ERROR)
572 /* Compute the function branch trace from BTS trace. */
575 btrace_compute_ftrace_bts (struct thread_info *tp,
576 const struct btrace_data_bts *btrace)
578 struct btrace_thread_info *btinfo;
579 struct btrace_function *begin, *end;
580 struct gdbarch *gdbarch;
581 unsigned int blk, ngaps;
584 gdbarch = target_gdbarch ();
585 btinfo = &tp->btrace;
586 begin = btinfo->begin;
588 ngaps = btinfo->ngaps;
589 level = begin != NULL ? -btinfo->level : INT_MAX;
590 blk = VEC_length (btrace_block_s, btrace->blocks);
594 btrace_block_s *block;
599 block = VEC_index (btrace_block_s, btrace->blocks, blk);
604 struct btrace_insn insn;
607 /* We should hit the end of the block. Warn if we went too far. */
610 /* Indicate the gap in the trace - unless we're at the
614 warning (_("Recorded trace may be corrupted around %s."),
615 core_addr_to_string_nz (pc));
617 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
623 end = ftrace_update_function (end, pc);
627 /* Maintain the function level offset.
628 For all but the last block, we do it here. */
630 level = min (level, end->level);
635 size = gdb_insn_length (gdbarch, pc);
637 CATCH (error, RETURN_MASK_ERROR)
644 insn.iclass = ftrace_classify_insn (gdbarch, pc);
646 ftrace_update_insns (end, &insn);
648 /* We're done once we pushed the instruction at the end. */
649 if (block->end == pc)
652 /* We can't continue if we fail to compute the size. */
655 warning (_("Recorded trace may be incomplete around %s."),
656 core_addr_to_string_nz (pc));
658 /* Indicate the gap in the trace. We just added INSN so we're
659 not at the beginning. */
660 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
668 /* Maintain the function level offset.
669 For the last block, we do it here to not consider the last
671 Since the last instruction corresponds to the current instruction
672 and is not really part of the execution history, it shouldn't
675 level = min (level, end->level);
679 btinfo->begin = begin;
681 btinfo->ngaps = ngaps;
683 /* LEVEL is the minimal function level of all btrace function segments.
684 Define the global level offset to -LEVEL so all function levels are
685 normalized to start at zero. */
686 btinfo->level = -level;
689 /* Compute the function branch trace from a block branch trace BTRACE for
690 a thread given by BTINFO. */
693 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
695 DEBUG ("compute ftrace");
697 switch (btrace->format)
699 case BTRACE_FORMAT_NONE:
702 case BTRACE_FORMAT_BTS:
703 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
707 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
710 /* Add an entry for the current PC. */
713 btrace_add_pc (struct thread_info *tp)
715 struct btrace_data btrace;
716 struct btrace_block *block;
717 struct regcache *regcache;
718 struct cleanup *cleanup;
721 regcache = get_thread_regcache (tp->ptid);
722 pc = regcache_read_pc (regcache);
724 btrace_data_init (&btrace);
725 btrace.format = BTRACE_FORMAT_BTS;
726 btrace.variant.bts.blocks = NULL;
728 cleanup = make_cleanup_btrace_data (&btrace);
730 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
734 btrace_compute_ftrace (tp, &btrace);
736 do_cleanups (cleanup);
742 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
744 if (tp->btrace.target != NULL)
747 if (!target_supports_btrace (conf->format))
748 error (_("Target does not support branch tracing."));
750 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
752 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
754 /* Add an entry for the current PC so we start tracing from where we
756 if (tp->btrace.target != NULL)
762 const struct btrace_config *
763 btrace_conf (const struct btrace_thread_info *btinfo)
765 if (btinfo->target == NULL)
768 return target_btrace_conf (btinfo->target);
774 btrace_disable (struct thread_info *tp)
776 struct btrace_thread_info *btp = &tp->btrace;
779 if (btp->target == NULL)
782 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
784 target_disable_btrace (btp->target);
793 btrace_teardown (struct thread_info *tp)
795 struct btrace_thread_info *btp = &tp->btrace;
798 if (btp->target == NULL)
801 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
803 target_teardown_btrace (btp->target);
809 /* Stitch branch trace in BTS format. */
812 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
814 struct btrace_thread_info *btinfo;
815 struct btrace_function *last_bfun;
816 struct btrace_insn *last_insn;
817 btrace_block_s *first_new_block;
819 btinfo = &tp->btrace;
820 last_bfun = btinfo->end;
821 gdb_assert (last_bfun != NULL);
822 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
824 /* If the existing trace ends with a gap, we just glue the traces
825 together. We need to drop the last (i.e. chronologically first) block
826 of the new trace, though, since we can't fill in the start address.*/
827 if (VEC_empty (btrace_insn_s, last_bfun->insn))
829 VEC_pop (btrace_block_s, btrace->blocks);
833 /* Beware that block trace starts with the most recent block, so the
834 chronologically first block in the new trace is the last block in
835 the new trace's block vector. */
836 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
837 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
839 /* If the current PC at the end of the block is the same as in our current
840 trace, there are two explanations:
841 1. we executed the instruction and some branch brought us back.
842 2. we have not made any progress.
843 In the first case, the delta trace vector should contain at least two
845 In the second case, the delta trace vector should contain exactly one
846 entry for the partial block containing the current PC. Remove it. */
847 if (first_new_block->end == last_insn->pc
848 && VEC_length (btrace_block_s, btrace->blocks) == 1)
850 VEC_pop (btrace_block_s, btrace->blocks);
854 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
855 core_addr_to_string_nz (first_new_block->end));
857 /* Do a simple sanity check to make sure we don't accidentally end up
858 with a bad block. This should not occur in practice. */
859 if (first_new_block->end < last_insn->pc)
861 warning (_("Error while trying to read delta trace. Falling back to "
866 /* We adjust the last block to start at the end of our current trace. */
867 gdb_assert (first_new_block->begin == 0);
868 first_new_block->begin = last_insn->pc;
870 /* We simply pop the last insn so we can insert it again as part of
871 the normal branch trace computation.
872 Since instruction iterators are based on indices in the instructions
873 vector, we don't leave any pointers dangling. */
874 DEBUG ("pruning insn at %s for stitching",
875 ftrace_print_insn_addr (last_insn));
877 VEC_pop (btrace_insn_s, last_bfun->insn);
879 /* The instructions vector may become empty temporarily if this has
880 been the only instruction in this function segment.
881 This violates the invariant but will be remedied shortly by
882 btrace_compute_ftrace when we add the new trace. */
884 /* The only case where this would hurt is if the entire trace consisted
885 of just that one instruction. If we remove it, we might turn the now
886 empty btrace function segment into a gap. But we don't want gaps at
887 the beginning. To avoid this, we remove the entire old trace. */
888 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
894 /* Adjust the block trace in order to stitch old and new trace together.
895 BTRACE is the new delta trace between the last and the current stop.
896 TP is the traced thread.
897 May modifx BTRACE as well as the existing trace in TP.
898 Return 0 on success, -1 otherwise. */
901 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
903 /* If we don't have trace, there's nothing to do. */
904 if (btrace_data_empty (btrace))
907 switch (btrace->format)
909 case BTRACE_FORMAT_NONE:
912 case BTRACE_FORMAT_BTS:
913 return btrace_stitch_bts (&btrace->variant.bts, tp);
916 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
919 /* Clear the branch trace histories in BTINFO. */
922 btrace_clear_history (struct btrace_thread_info *btinfo)
924 xfree (btinfo->insn_history);
925 xfree (btinfo->call_history);
926 xfree (btinfo->replay);
928 btinfo->insn_history = NULL;
929 btinfo->call_history = NULL;
930 btinfo->replay = NULL;
936 btrace_fetch (struct thread_info *tp)
938 struct btrace_thread_info *btinfo;
939 struct btrace_target_info *tinfo;
940 struct btrace_data btrace;
941 struct cleanup *cleanup;
944 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
946 btinfo = &tp->btrace;
947 tinfo = btinfo->target;
951 /* There's no way we could get new trace while replaying.
952 On the other hand, delta trace would return a partial record with the
953 current PC, which is the replay PC, not the last PC, as expected. */
954 if (btinfo->replay != NULL)
957 btrace_data_init (&btrace);
958 cleanup = make_cleanup_btrace_data (&btrace);
960 /* Let's first try to extend the trace we already have. */
961 if (btinfo->end != NULL)
963 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
966 /* Success. Let's try to stitch the traces together. */
967 errcode = btrace_stitch_trace (&btrace, tp);
971 /* We failed to read delta trace. Let's try to read new trace. */
972 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
974 /* If we got any new trace, discard what we have. */
975 if (errcode == 0 && !btrace_data_empty (&btrace))
979 /* If we were not able to read the trace, we start over. */
983 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
987 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
989 /* If we were not able to read the branch trace, signal an error. */
991 error (_("Failed to read branch trace."));
993 /* Compute the trace, provided we have any. */
994 if (!btrace_data_empty (&btrace))
996 btrace_clear_history (btinfo);
997 btrace_compute_ftrace (tp, &btrace);
1000 do_cleanups (cleanup);
1006 btrace_clear (struct thread_info *tp)
1008 struct btrace_thread_info *btinfo;
1009 struct btrace_function *it, *trash;
1011 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1013 /* Make sure btrace frames that may hold a pointer into the branch
1014 trace data are destroyed. */
1015 reinit_frame_cache ();
1017 btinfo = &tp->btrace;
1028 btinfo->begin = NULL;
1032 btrace_clear_history (btinfo);
1038 btrace_free_objfile (struct objfile *objfile)
1040 struct thread_info *tp;
1042 DEBUG ("free objfile");
1044 ALL_NON_EXITED_THREADS (tp)
1048 #if defined (HAVE_LIBEXPAT)
1050 /* Check the btrace document version. */
1053 check_xml_btrace_version (struct gdb_xml_parser *parser,
1054 const struct gdb_xml_element *element,
1055 void *user_data, VEC (gdb_xml_value_s) *attributes)
1057 const char *version = xml_find_attribute (attributes, "version")->value;
1059 if (strcmp (version, "1.0") != 0)
1060 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1063 /* Parse a btrace "block" xml record. */
1066 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1067 const struct gdb_xml_element *element,
1068 void *user_data, VEC (gdb_xml_value_s) *attributes)
1070 struct btrace_data *btrace;
1071 struct btrace_block *block;
1072 ULONGEST *begin, *end;
1076 switch (btrace->format)
1078 case BTRACE_FORMAT_BTS:
1081 case BTRACE_FORMAT_NONE:
1082 btrace->format = BTRACE_FORMAT_BTS;
1083 btrace->variant.bts.blocks = NULL;
1087 gdb_xml_error (parser, _("Btrace format error."));
1090 begin = xml_find_attribute (attributes, "begin")->value;
1091 end = xml_find_attribute (attributes, "end")->value;
1093 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1094 block->begin = *begin;
1098 static const struct gdb_xml_attribute block_attributes[] = {
1099 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1100 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1101 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1104 static const struct gdb_xml_attribute btrace_attributes[] = {
1105 { "version", GDB_XML_AF_NONE, NULL, NULL },
1106 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1109 static const struct gdb_xml_element btrace_children[] = {
1110 { "block", block_attributes, NULL,
1111 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1112 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1115 static const struct gdb_xml_element btrace_elements[] = {
1116 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1117 check_xml_btrace_version, NULL },
1118 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1121 #endif /* defined (HAVE_LIBEXPAT) */
1126 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1128 struct cleanup *cleanup;
1131 #if defined (HAVE_LIBEXPAT)
1133 btrace->format = BTRACE_FORMAT_NONE;
1135 cleanup = make_cleanup_btrace_data (btrace);
1136 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1139 error (_("Error parsing branch trace."));
1141 /* Keep parse results. */
1142 discard_cleanups (cleanup);
1144 #else /* !defined (HAVE_LIBEXPAT) */
1146 error (_("Cannot process branch trace. XML parsing is not supported."));
1148 #endif /* !defined (HAVE_LIBEXPAT) */
1151 #if defined (HAVE_LIBEXPAT)
1153 /* Parse a btrace-conf "bts" xml record. */
1156 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1157 const struct gdb_xml_element *element,
1158 void *user_data, VEC (gdb_xml_value_s) *attributes)
1160 struct btrace_config *conf;
1161 struct gdb_xml_value *size;
1164 conf->format = BTRACE_FORMAT_BTS;
1167 size = xml_find_attribute (attributes, "size");
1169 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1172 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1173 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1174 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1177 static const struct gdb_xml_element btrace_conf_children[] = {
1178 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1179 parse_xml_btrace_conf_bts, NULL },
1180 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1183 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1184 { "version", GDB_XML_AF_NONE, NULL, NULL },
1185 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1188 static const struct gdb_xml_element btrace_conf_elements[] = {
1189 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1190 GDB_XML_EF_NONE, NULL, NULL },
1191 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1194 #endif /* defined (HAVE_LIBEXPAT) */
1199 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1203 #if defined (HAVE_LIBEXPAT)
1205 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1206 btrace_conf_elements, xml, conf);
1208 error (_("Error parsing branch trace configuration."));
1210 #else /* !defined (HAVE_LIBEXPAT) */
1212 error (_("XML parsing is not supported."));
1214 #endif /* !defined (HAVE_LIBEXPAT) */
1219 const struct btrace_insn *
1220 btrace_insn_get (const struct btrace_insn_iterator *it)
1222 const struct btrace_function *bfun;
1223 unsigned int index, end;
1226 bfun = it->function;
1228 /* Check if the iterator points to a gap in the trace. */
1229 if (bfun->errcode != 0)
1232 /* The index is within the bounds of this function's instruction vector. */
1233 end = VEC_length (btrace_insn_s, bfun->insn);
1234 gdb_assert (0 < end);
1235 gdb_assert (index < end);
1237 return VEC_index (btrace_insn_s, bfun->insn, index);
1243 btrace_insn_number (const struct btrace_insn_iterator *it)
1245 const struct btrace_function *bfun;
1247 bfun = it->function;
1249 /* Return zero if the iterator points to a gap in the trace. */
1250 if (bfun->errcode != 0)
1253 return bfun->insn_offset + it->index;
1259 btrace_insn_begin (struct btrace_insn_iterator *it,
1260 const struct btrace_thread_info *btinfo)
1262 const struct btrace_function *bfun;
1264 bfun = btinfo->begin;
1266 error (_("No trace."));
1268 it->function = bfun;
1275 btrace_insn_end (struct btrace_insn_iterator *it,
1276 const struct btrace_thread_info *btinfo)
1278 const struct btrace_function *bfun;
1279 unsigned int length;
1283 error (_("No trace."));
1285 length = VEC_length (btrace_insn_s, bfun->insn);
1287 /* The last function may either be a gap or it contains the current
1288 instruction, which is one past the end of the execution trace; ignore
1293 it->function = bfun;
1300 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1302 const struct btrace_function *bfun;
1303 unsigned int index, steps;
1305 bfun = it->function;
1311 unsigned int end, space, adv;
1313 end = VEC_length (btrace_insn_s, bfun->insn);
1315 /* An empty function segment represents a gap in the trace. We count
1316 it as one instruction. */
1319 const struct btrace_function *next;
1321 next = bfun->flow.next;
1334 gdb_assert (0 < end);
1335 gdb_assert (index < end);
1337 /* Compute the number of instructions remaining in this segment. */
1338 space = end - index;
1340 /* Advance the iterator as far as possible within this segment. */
1341 adv = min (space, stride);
1346 /* Move to the next function if we're at the end of this one. */
1349 const struct btrace_function *next;
1351 next = bfun->flow.next;
1354 /* We stepped past the last function.
1356 Let's adjust the index to point to the last instruction in
1357 the previous function. */
1363 /* We now point to the first instruction in the new function. */
1368 /* We did make progress. */
1369 gdb_assert (adv > 0);
1372 /* Update the iterator. */
1373 it->function = bfun;
1382 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1384 const struct btrace_function *bfun;
1385 unsigned int index, steps;
1387 bfun = it->function;
1395 /* Move to the previous function if we're at the start of this one. */
1398 const struct btrace_function *prev;
1400 prev = bfun->flow.prev;
1404 /* We point to one after the last instruction in the new function. */
1406 index = VEC_length (btrace_insn_s, bfun->insn);
1408 /* An empty function segment represents a gap in the trace. We count
1409 it as one instruction. */
1419 /* Advance the iterator as far as possible within this segment. */
1420 adv = min (index, stride);
1426 /* We did make progress. */
1427 gdb_assert (adv > 0);
1430 /* Update the iterator. */
1431 it->function = bfun;
1440 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1441 const struct btrace_insn_iterator *rhs)
1443 unsigned int lnum, rnum;
1445 lnum = btrace_insn_number (lhs);
1446 rnum = btrace_insn_number (rhs);
1448 /* A gap has an instruction number of zero. Things are getting more
1449 complicated if gaps are involved.
1451 We take the instruction number offset from the iterator's function.
1452 This is the number of the first instruction after the gap.
1454 This is OK as long as both lhs and rhs point to gaps. If only one of
1455 them does, we need to adjust the number based on the other's regular
1456 instruction number. Otherwise, a gap might compare equal to an
1459 if (lnum == 0 && rnum == 0)
1461 lnum = lhs->function->insn_offset;
1462 rnum = rhs->function->insn_offset;
1466 lnum = lhs->function->insn_offset;
1473 rnum = rhs->function->insn_offset;
1479 return (int) (lnum - rnum);
1485 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1486 const struct btrace_thread_info *btinfo,
1487 unsigned int number)
1489 const struct btrace_function *bfun;
1490 unsigned int end, length;
1492 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1495 if (bfun->errcode != 0)
1498 if (bfun->insn_offset <= number)
1505 length = VEC_length (btrace_insn_s, bfun->insn);
1506 gdb_assert (length > 0);
1508 end = bfun->insn_offset + length;
1512 it->function = bfun;
1513 it->index = number - bfun->insn_offset;
1520 const struct btrace_function *
1521 btrace_call_get (const struct btrace_call_iterator *it)
1523 return it->function;
1529 btrace_call_number (const struct btrace_call_iterator *it)
1531 const struct btrace_thread_info *btinfo;
1532 const struct btrace_function *bfun;
1535 btinfo = it->btinfo;
1536 bfun = it->function;
1538 return bfun->number;
1540 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1541 number of the last function. */
1543 insns = VEC_length (btrace_insn_s, bfun->insn);
1545 /* If the function contains only a single instruction (i.e. the current
1546 instruction), it will be skipped and its number is already the number
1549 return bfun->number;
1551 /* Otherwise, return one more than the number of the last function. */
1552 return bfun->number + 1;
1558 btrace_call_begin (struct btrace_call_iterator *it,
1559 const struct btrace_thread_info *btinfo)
1561 const struct btrace_function *bfun;
1563 bfun = btinfo->begin;
1565 error (_("No trace."));
1567 it->btinfo = btinfo;
1568 it->function = bfun;
1574 btrace_call_end (struct btrace_call_iterator *it,
1575 const struct btrace_thread_info *btinfo)
1577 const struct btrace_function *bfun;
1581 error (_("No trace."));
1583 it->btinfo = btinfo;
1584 it->function = NULL;
1590 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1592 const struct btrace_function *bfun;
1595 bfun = it->function;
1597 while (bfun != NULL)
1599 const struct btrace_function *next;
1602 next = bfun->flow.next;
1605 /* Ignore the last function if it only contains a single
1606 (i.e. the current) instruction. */
1607 insns = VEC_length (btrace_insn_s, bfun->insn);
1612 if (stride == steps)
1619 it->function = bfun;
1626 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1628 const struct btrace_thread_info *btinfo;
1629 const struct btrace_function *bfun;
1632 bfun = it->function;
1639 btinfo = it->btinfo;
1644 /* Ignore the last function if it only contains a single
1645 (i.e. the current) instruction. */
1646 insns = VEC_length (btrace_insn_s, bfun->insn);
1648 bfun = bfun->flow.prev;
1656 while (steps < stride)
1658 const struct btrace_function *prev;
1660 prev = bfun->flow.prev;
1668 it->function = bfun;
1675 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1676 const struct btrace_call_iterator *rhs)
1678 unsigned int lnum, rnum;
1680 lnum = btrace_call_number (lhs);
1681 rnum = btrace_call_number (rhs);
1683 return (int) (lnum - rnum);
1689 btrace_find_call_by_number (struct btrace_call_iterator *it,
1690 const struct btrace_thread_info *btinfo,
1691 unsigned int number)
1693 const struct btrace_function *bfun;
1695 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1699 bnum = bfun->number;
1702 it->btinfo = btinfo;
1703 it->function = bfun;
1707 /* Functions are ordered and numbered consecutively. We could bail out
1708 earlier. On the other hand, it is very unlikely that we search for
1709 a nonexistent function. */
1718 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1719 const struct btrace_insn_iterator *begin,
1720 const struct btrace_insn_iterator *end)
1722 if (btinfo->insn_history == NULL)
1723 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1725 btinfo->insn_history->begin = *begin;
1726 btinfo->insn_history->end = *end;
1732 btrace_set_call_history (struct btrace_thread_info *btinfo,
1733 const struct btrace_call_iterator *begin,
1734 const struct btrace_call_iterator *end)
1736 gdb_assert (begin->btinfo == end->btinfo);
1738 if (btinfo->call_history == NULL)
1739 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1741 btinfo->call_history->begin = *begin;
1742 btinfo->call_history->end = *end;
1748 btrace_is_replaying (struct thread_info *tp)
1750 return tp->btrace.replay != NULL;
1756 btrace_is_empty (struct thread_info *tp)
1758 struct btrace_insn_iterator begin, end;
1759 struct btrace_thread_info *btinfo;
1761 btinfo = &tp->btrace;
1763 if (btinfo->begin == NULL)
1766 btrace_insn_begin (&begin, btinfo);
1767 btrace_insn_end (&end, btinfo);
1769 return btrace_insn_cmp (&begin, &end) == 0;
1772 /* Forward the cleanup request. */
1775 do_btrace_data_cleanup (void *arg)
1777 btrace_data_fini (arg);
1783 make_cleanup_btrace_data (struct btrace_data *data)
1785 return make_cleanup (do_btrace_data_cleanup, data);