/* Branch trace support for GDB, the GNU debugger.
- Copyright (C) 2013-2014 Free Software Foundation, Inc.
+ Copyright (C) 2013-2016 Free Software Foundation, Inc.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
+#include "defs.h"
#include "btrace.h"
#include "gdbthread.h"
-#include "exceptions.h"
#include "inferior.h"
#include "target.h"
#include "record.h"
#include "filenames.h"
#include "xml-support.h"
#include "regcache.h"
+#include "rsp-low.h"
+#include "gdbcmd.h"
+#include "cli/cli-utils.h"
+
+#include <inttypes.h>
+#include <ctype.h>
+
+/* Command lists for btrace maintenance commands. */
+static struct cmd_list_element *maint_btrace_cmdlist;
+static struct cmd_list_element *maint_btrace_set_cmdlist;
+static struct cmd_list_element *maint_btrace_show_cmdlist;
+static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
+static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
+
+/* Control whether to skip PAD packets when computing the packet history. */
+static int maint_btrace_pt_skip_pad = 1;
+
+static void btrace_add_pc (struct thread_info *tp);
/* Print a record debug message. Use do ... while (0) to avoid ambiguities
when used in if statements. */
return SYMBOL_PRINT_NAME (sym);
if (msym != NULL)
- return SYMBOL_PRINT_NAME (msym);
+ return MSYMBOL_PRINT_NAME (msym);
return "<unknown>";
}
sym = bfun->sym;
if (sym != NULL)
- filename = symtab_to_filename_for_display (sym->symtab);
+ filename = symtab_to_filename_for_display (symbol_symtab (sym));
else
filename = "<unknown>";
{
const char *fun, *file;
unsigned int ibegin, iend;
- int lbegin, lend, level;
+ int level;
fun = ftrace_print_function_name (bfun);
file = ftrace_print_filename (bfun);
level = bfun->level;
- lbegin = bfun->lbegin;
- lend = bfun->lend;
-
ibegin = bfun->insn_offset;
iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
- DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
- "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
- ibegin, iend);
+ DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
+ prefix, fun, file, level, ibegin, iend);
}
/* Return non-zero if BFUN does not match MFUN and FUN,
/* If the minimal symbol changed, we certainly switched functions. */
if (mfun != NULL && msym != NULL
- && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
+ && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
return 1;
/* If the symbol changed, we certainly switched functions. */
return 1;
/* Check the location of those functions, as well. */
- bfname = symtab_to_fullname (sym->symtab);
- fname = symtab_to_fullname (fun->symtab);
+ bfname = symtab_to_fullname (symbol_symtab (sym));
+ fname = symtab_to_fullname (symbol_symtab (fun));
if (filename_cmp (fname, bfname) != 0)
return 1;
}
return 0;
}
-/* Return non-zero if we should skip this file when generating the function
- call history, zero otherwise.
- We would want to do that if, say, a macro that is defined in another file
- is expanded in this function. */
-
-static int
-ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
-{
- struct symbol *sym;
- const char *bfile;
-
- sym = bfun->sym;
- if (sym == NULL)
- return 1;
-
- bfile = symtab_to_fullname (sym->symtab);
-
- return (filename_cmp (bfile, fullname) != 0);
-}
-
/* Allocate and initialize a new branch trace function segment.
PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
{
struct btrace_function *bfun;
- bfun = xzalloc (sizeof (*bfun));
+ bfun = XCNEW (struct btrace_function);
bfun->msym = mfun;
bfun->sym = fun;
bfun->flow.prev = prev;
- /* We start with the identities of min and max, respectively. */
- bfun->lbegin = INT_MAX;
- bfun->lend = INT_MIN;
-
if (prev == NULL)
{
/* Start counting at one. */
bfun->number = prev->number + 1;
bfun->insn_offset = (prev->insn_offset
+ VEC_length (btrace_insn_s, prev->insn));
+ bfun->level = prev->level;
}
return bfun;
bfun = ftrace_new_function (caller, mfun, fun);
bfun->up = caller;
- bfun->level = caller->level + 1;
+ bfun->level += 1;
ftrace_debug (bfun, "new call");
bfun = ftrace_new_function (caller, mfun, fun);
bfun->up = caller;
- bfun->level = caller->level + 1;
+ bfun->level += 1;
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
ftrace_debug (bfun, "new tail call");
tail calls ending with a jump). */
static struct btrace_function *
-ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
+ftrace_find_call (struct btrace_function *bfun)
{
for (; bfun != NULL; bfun = bfun->up)
{
struct btrace_insn *last;
- CORE_ADDR pc;
- /* We do not allow empty function segments. */
- gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
+ /* Skip gaps. */
+ if (bfun->errcode != 0)
+ continue;
last = VEC_last (btrace_insn_s, bfun->insn);
- pc = last->pc;
- if (gdbarch_insn_is_call (gdbarch, pc))
+ if (last->iclass == BTRACE_INSN_CALL)
break;
}
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_return (struct gdbarch *gdbarch,
- struct btrace_function *prev,
+ftrace_new_return (struct btrace_function *prev,
struct minimal_symbol *mfun,
struct symbol *fun)
{
wrong or that the call is simply not included in the trace. */
/* Let's search for some actual call. */
- caller = ftrace_find_call (gdbarch, prev->up);
+ caller = ftrace_find_call (prev->up);
if (caller == NULL)
{
/* There is no call in PREV's back trace. We assume that the
be wrong at this point. */
bfun = ftrace_new_function (prev, mfun, fun);
- /* We keep the function level. */
- bfun->level = prev->level;
-
ftrace_debug (bfun, "new switch");
return bfun;
}
+/* Add a new function segment for a gap in the trace due to a decode error.
+ PREV is the chronologically preceding function segment.
+ ERRCODE is the format-specific error code. */
+
+static struct btrace_function *
+ftrace_new_gap (struct btrace_function *prev, int errcode)
+{
+ struct btrace_function *bfun;
+
+ /* We hijack prev if it was empty. */
+ if (prev != NULL && prev->errcode == 0
+ && VEC_empty (btrace_insn_s, prev->insn))
+ bfun = prev;
+ else
+ bfun = ftrace_new_function (prev, NULL, NULL);
+
+ bfun->errcode = errcode;
+
+ ftrace_debug (bfun, "new gap");
+
+ return bfun;
+}
+
/* Update BFUN with respect to the instruction at PC. This may create new
function segments.
Return the chronologically latest function segment, never NULL. */
static struct btrace_function *
-ftrace_update_function (struct gdbarch *gdbarch,
- struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
{
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
if (fun == NULL && mfun == NULL)
DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
- /* If we didn't have a function before, we create one. */
- if (bfun == NULL)
+ /* If we didn't have a function or if we had a gap before, we create one. */
+ if (bfun == NULL || bfun->errcode != 0)
return ftrace_new_function (bfun, mfun, fun);
/* Check the last instruction, if we have one.
if (last != NULL)
{
- CORE_ADDR lpc;
-
- lpc = last->pc;
+ switch (last->iclass)
+ {
+ case BTRACE_INSN_RETURN:
+ {
+ const char *fname;
+
+ /* On some systems, _dl_runtime_resolve returns to the resolved
+ function instead of jumping to it. From our perspective,
+ however, this is a tailcall.
+ If we treated it as return, we wouldn't be able to find the
+ resolved function in our stack back trace. Hence, we would
+ lose the current stack back trace and start anew with an empty
+ back trace. When the resolved function returns, we would then
+ create a stack back trace with the same function names but
+ different frame id's. This will confuse stepping. */
+ fname = ftrace_print_function_name (bfun);
+ if (strcmp (fname, "_dl_runtime_resolve") == 0)
+ return ftrace_new_tailcall (bfun, mfun, fun);
+
+ return ftrace_new_return (bfun, mfun, fun);
+ }
+
+ case BTRACE_INSN_CALL:
+ /* Ignore calls to the next instruction. They are used for PIC. */
+ if (last->pc + last->size == pc)
+ break;
- /* Check for returns. */
- if (gdbarch_insn_is_ret (gdbarch, lpc))
- return ftrace_new_return (gdbarch, bfun, mfun, fun);
+ return ftrace_new_call (bfun, mfun, fun);
- /* Check for calls. */
- if (gdbarch_insn_is_call (gdbarch, lpc))
- {
- int size;
+ case BTRACE_INSN_JUMP:
+ {
+ CORE_ADDR start;
- size = gdb_insn_length (gdbarch, lpc);
+ start = get_pc_function_start (pc);
- /* Ignore calls to the next instruction. They are used for PIC. */
- if (lpc + size != pc)
- return ftrace_new_call (bfun, mfun, fun);
+ /* If we can't determine the function for PC, we treat a jump at
+ the end of the block as tail call. */
+ if (start == 0 || start == pc)
+ return ftrace_new_tailcall (bfun, mfun, fun);
+ }
}
}
ftrace_print_function_name (bfun),
ftrace_print_filename (bfun));
- if (last != NULL)
- {
- CORE_ADDR start, lpc;
-
- start = get_pc_function_start (pc);
-
- /* If we can't determine the function for PC, we treat a jump at
- the end of the block as tail call. */
- if (start == 0)
- start = pc;
-
- lpc = last->pc;
-
- /* Jumps indicate optimized tail calls. */
- if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
- return ftrace_new_tailcall (bfun, mfun, fun);
- }
-
return ftrace_new_switch (bfun, mfun, fun);
}
return bfun;
}
-/* Update BFUN's source range with respect to the instruction at PC. */
+/* Add the instruction at PC to BFUN's instructions. */
static void
-ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_insns (struct btrace_function *bfun,
+ const struct btrace_insn *insn)
{
- struct symtab_and_line sal;
- const char *fullname;
-
- sal = find_pc_line (pc, 0);
- if (sal.symtab == NULL || sal.line == 0)
- {
- DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
- return;
- }
-
- /* Check if we switched files. This could happen if, say, a macro that
- is defined in another file is expanded here. */
- fullname = symtab_to_fullname (sal.symtab);
- if (ftrace_skip_file (bfun, fullname))
- {
- DEBUG_FTRACE ("ignoring file at %s, file=%s",
- core_addr_to_string_nz (pc), fullname);
- return;
- }
-
- /* Update the line range. */
- bfun->lbegin = min (bfun->lbegin, sal.line);
- bfun->lend = max (bfun->lend, sal.line);
+ VEC_safe_push (btrace_insn_s, bfun->insn, insn);
if (record_debug > 1)
- ftrace_debug (bfun, "update lines");
+ ftrace_debug (bfun, "update insn");
}
-/* Add the instruction at PC to BFUN's instructions. */
+/* Classify the instruction at PC. */
-static void
-ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
+static enum btrace_insn_class
+ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
{
- struct btrace_insn *insn;
+ enum btrace_insn_class iclass;
- insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
- insn->pc = pc;
+ iclass = BTRACE_INSN_OTHER;
+ TRY
+ {
+ if (gdbarch_insn_is_call (gdbarch, pc))
+ iclass = BTRACE_INSN_CALL;
+ else if (gdbarch_insn_is_ret (gdbarch, pc))
+ iclass = BTRACE_INSN_RETURN;
+ else if (gdbarch_insn_is_jump (gdbarch, pc))
+ iclass = BTRACE_INSN_JUMP;
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ }
+ END_CATCH
- if (record_debug > 1)
- ftrace_debug (bfun, "update insn");
+ return iclass;
}
-/* Compute the function branch trace from a block branch trace BTRACE for
- a thread given by BTINFO. */
+/* Compute the function branch trace from BTS trace. */
static void
-btrace_compute_ftrace (struct btrace_thread_info *btinfo,
- VEC (btrace_block_s) *btrace)
+btrace_compute_ftrace_bts (struct thread_info *tp,
+ const struct btrace_data_bts *btrace)
{
+ struct btrace_thread_info *btinfo;
struct btrace_function *begin, *end;
struct gdbarch *gdbarch;
- unsigned int blk;
+ unsigned int blk, ngaps;
int level;
- DEBUG ("compute ftrace");
-
gdbarch = target_gdbarch ();
+ btinfo = &tp->btrace;
begin = btinfo->begin;
end = btinfo->end;
+ ngaps = btinfo->ngaps;
level = begin != NULL ? -btinfo->level : INT_MAX;
- blk = VEC_length (btrace_block_s, btrace);
+ blk = VEC_length (btrace_block_s, btrace->blocks);
while (blk != 0)
{
blk -= 1;
- block = VEC_index (btrace_block_s, btrace, blk);
+ block = VEC_index (btrace_block_s, btrace->blocks, blk);
pc = block->begin;
for (;;)
{
+ struct btrace_insn insn;
int size;
/* We should hit the end of the block. Warn if we went too far. */
if (block->end < pc)
{
- warning (_("Recorded trace may be corrupted around %s."),
- core_addr_to_string_nz (pc));
+ /* Indicate the gap in the trace - unless we're at the
+ beginning. */
+ if (begin != NULL)
+ {
+ warning (_("Recorded trace may be corrupted around %s."),
+ core_addr_to_string_nz (pc));
+
+ end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
+ ngaps += 1;
+ }
break;
}
- end = ftrace_update_function (gdbarch, end, pc);
+ end = ftrace_update_function (end, pc);
if (begin == NULL)
begin = end;
if (blk != 0)
level = min (level, end->level);
- ftrace_update_insns (end, pc);
- ftrace_update_lines (end, pc);
+ size = 0;
+ TRY
+ {
+ size = gdb_insn_length (gdbarch, pc);
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ }
+ END_CATCH
+
+ insn.pc = pc;
+ insn.size = size;
+ insn.iclass = ftrace_classify_insn (gdbarch, pc);
+ insn.flags = 0;
+
+ ftrace_update_insns (end, &insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
break;
- size = gdb_insn_length (gdbarch, pc);
-
- /* Make sure we terminate if we fail to compute the size. */
+ /* We can't continue if we fail to compute the size. */
if (size <= 0)
{
warning (_("Recorded trace may be incomplete around %s."),
core_addr_to_string_nz (pc));
+
+ /* Indicate the gap in the trace. We just added INSN so we're
+ not at the beginning. */
+ end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
+ ngaps += 1;
+
break;
}
btinfo->begin = begin;
btinfo->end = end;
+ btinfo->ngaps = ngaps;
/* LEVEL is the minimal function level of all btrace function segments.
Define the global level offset to -LEVEL so all function levels are
btinfo->level = -level;
}
+#if defined (HAVE_LIBIPT)
+
+static enum btrace_insn_class
+pt_reclassify_insn (enum pt_insn_class iclass)
+{
+ switch (iclass)
+ {
+ case ptic_call:
+ return BTRACE_INSN_CALL;
+
+ case ptic_return:
+ return BTRACE_INSN_RETURN;
+
+ case ptic_jump:
+ return BTRACE_INSN_JUMP;
+
+ default:
+ return BTRACE_INSN_OTHER;
+ }
+}
+
+/* Return the btrace instruction flags for INSN. */
+
+static enum btrace_insn_flag
+pt_btrace_insn_flags (const struct pt_insn *insn)
+{
+ enum btrace_insn_flag flags = 0;
+
+ if (insn->speculative)
+ flags |= BTRACE_INSN_FLAG_SPECULATIVE;
+
+ return flags;
+}
+
+/* Add function branch trace using DECODER. */
+
+static void
+ftrace_add_pt (struct pt_insn_decoder *decoder,
+ struct btrace_function **pbegin,
+ struct btrace_function **pend, int *plevel,
+ unsigned int *ngaps)
+{
+ struct btrace_function *begin, *end, *upd;
+ uint64_t offset;
+ int errcode, nerrors;
+
+ begin = *pbegin;
+ end = *pend;
+ nerrors = 0;
+ for (;;)
+ {
+ struct btrace_insn btinsn;
+ struct pt_insn insn;
+
+ errcode = pt_insn_sync_forward (decoder);
+ if (errcode < 0)
+ {
+ if (errcode != -pte_eos)
+ warning (_("Failed to synchronize onto the Intel Processor "
+ "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
+ break;
+ }
+
+ memset (&btinsn, 0, sizeof (btinsn));
+ for (;;)
+ {
+ errcode = pt_insn_next (decoder, &insn, sizeof(insn));
+ if (errcode < 0)
+ break;
+
+ /* Look for gaps in the trace - unless we're at the beginning. */
+ if (begin != NULL)
+ {
+ /* Tracing is disabled and re-enabled each time we enter the
+ kernel. Most times, we continue from the same instruction we
+ stopped before. This is indicated via the RESUMED instruction
+ flag. The ENABLED instruction flag means that we continued
+ from some other instruction. Indicate this as a trace gap. */
+ if (insn.enabled)
+ *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+ }
+
+ upd = ftrace_update_function (end, insn.ip);
+ if (upd != end)
+ {
+ *pend = end = upd;
+
+ if (begin == NULL)
+ *pbegin = begin = upd;
+ }
+
+ /* Maintain the function level offset. */
+ *plevel = min (*plevel, end->level);
+
+ btinsn.pc = (CORE_ADDR) insn.ip;
+ btinsn.size = (gdb_byte) insn.size;
+ btinsn.iclass = pt_reclassify_insn (insn.iclass);
+ btinsn.flags = pt_btrace_insn_flags (&insn);
+
+ ftrace_update_insns (end, &btinsn);
+ }
+
+ if (errcode == -pte_eos)
+ break;
+
+ /* If the gap is at the very beginning, we ignore it - we will have
+ less trace, but we won't have any holes in the trace. */
+ if (begin == NULL)
+ continue;
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Failed to decode Intel Processor Trace near trace "
+ "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
+ offset, insn.ip, pt_errstr (pt_errcode (errcode)));
+
+ /* Indicate the gap in the trace. */
+ *pend = end = ftrace_new_gap (end, errcode);
+ *ngaps += 1;
+ }
+
+ if (nerrors > 0)
+ warning (_("The recorded execution trace may have gaps."));
+}
+
+/* A callback function to allow the trace decoder to read the inferior's
+ memory. */
+
+static int
+btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
+ const struct pt_asid *asid, uint64_t pc,
+ void *context)
+{
+ int result, errcode;
+
+ result = (int) size;
+ TRY
+ {
+ errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
+ if (errcode != 0)
+ result = -pte_nomap;
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ result = -pte_nomap;
+ }
+ END_CATCH
+
+ return result;
+}
+
+/* Translate the vendor from one enum to another. */
+
+static enum pt_cpu_vendor
+pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
+{
+ switch (vendor)
+ {
+ default:
+ return pcv_unknown;
+
+ case CV_INTEL:
+ return pcv_intel;
+ }
+}
+
+/* Finalize the function branch trace after decode. */
+
+static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
+ struct thread_info *tp, int level)
+{
+ pt_insn_free_decoder (decoder);
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ tp->btrace.level = -level;
+
+ /* Add a single last instruction entry for the current PC.
+ This allows us to compute the backtrace at the current PC using both
+ standard unwind and btrace unwind.
+ This extra entry is ignored by all record commands. */
+ btrace_add_pc (tp);
+}
+
+/* Compute the function branch trace from Intel Processor Trace
+ format. */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace)
+{
+ struct btrace_thread_info *btinfo;
+ struct pt_insn_decoder *decoder;
+ struct pt_config config;
+ int level, errcode;
+
+ if (btrace->size == 0)
+ return;
+
+ btinfo = &tp->btrace;
+ level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
+
+ pt_config_init(&config);
+ config.begin = btrace->data;
+ config.end = btrace->data + btrace->size;
+
+ config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
+ config.cpu.family = btrace->config.cpu.family;
+ config.cpu.model = btrace->config.cpu.model;
+ config.cpu.stepping = btrace->config.cpu.stepping;
+
+ errcode = pt_cpu_errata (&config.errata, &config.cpu);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: %s."),
+ pt_errstr (pt_errcode (errcode)));
+
+ decoder = pt_insn_alloc_decoder (&config);
+ if (decoder == NULL)
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
+
+ TRY
+ {
+ struct pt_image *image;
+
+ image = pt_insn_get_image(decoder);
+ if (image == NULL)
+ error (_("Failed to configure the Intel Processor Trace decoder."));
+
+ errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: "
+ "%s."), pt_errstr (pt_errcode (errcode)));
+
+ ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
+ &btinfo->ngaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ /* Indicate a gap in the trace if we quit trace processing. */
+ if (error.reason == RETURN_QUIT && btinfo->end != NULL)
+ {
+ btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+ btinfo->ngaps++;
+ }
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+}
+
+#else /* defined (HAVE_LIBIPT) */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace)
+{
+ internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
+}
+
+#endif /* defined (HAVE_LIBIPT) */
+
+/* Compute the function branch trace from a block branch trace BTRACE for
+ a thread given by BTINFO. */
+
+static void
+btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+{
+ DEBUG ("compute ftrace");
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return;
+
+ case BTRACE_FORMAT_BTS:
+ btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
+ return;
+
+ case BTRACE_FORMAT_PT:
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
+ return;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
/* Add an entry for the current PC. */
static void
btrace_add_pc (struct thread_info *tp)
{
- VEC (btrace_block_s) *btrace;
+ struct btrace_data btrace;
struct btrace_block *block;
struct regcache *regcache;
struct cleanup *cleanup;
regcache = get_thread_regcache (tp->ptid);
pc = regcache_read_pc (regcache);
- btrace = NULL;
- cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
+ btrace_data_init (&btrace);
+ btrace.format = BTRACE_FORMAT_BTS;
+ btrace.variant.bts.blocks = NULL;
- block = VEC_safe_push (btrace_block_s, btrace, NULL);
+ cleanup = make_cleanup_btrace_data (&btrace);
+
+ block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
block->begin = pc;
block->end = pc;
- btrace_compute_ftrace (&tp->btrace, btrace);
+ btrace_compute_ftrace (tp, &btrace);
do_cleanups (cleanup);
}
/* See btrace.h. */
void
-btrace_enable (struct thread_info *tp)
+btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
{
if (tp->btrace.target != NULL)
return;
- if (!target_supports_btrace ())
+#if !defined (HAVE_LIBIPT)
+ if (conf->format == BTRACE_FORMAT_PT)
+ error (_("GDB does not support Intel Processor Trace."));
+#endif /* !defined (HAVE_LIBIPT) */
+
+ if (!target_supports_btrace (conf->format))
error (_("Target does not support branch tracing."));
- DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("enable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
- tp->btrace.target = target_enable_btrace (tp->ptid);
+ tp->btrace.target = target_enable_btrace (tp->ptid, conf);
/* Add an entry for the current PC so we start tracing from where we
enabled it. */
/* See btrace.h. */
+const struct btrace_config *
+btrace_conf (const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->target == NULL)
+ return NULL;
+
+ return target_btrace_conf (btinfo->target);
+}
+
+/* See btrace.h. */
+
void
btrace_disable (struct thread_info *tp)
{
if (btp->target == NULL)
return;
- DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("disable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
target_disable_btrace (btp->target);
btp->target = NULL;
if (btp->target == NULL)
return;
- DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
target_teardown_btrace (btp->target);
btp->target = NULL;
btrace_clear (tp);
}
-/* Adjust the block trace in order to stitch old and new trace together.
- BTRACE is the new delta trace between the last and the current stop.
- BTINFO is the old branch trace until the last stop.
- May modify BTRACE as well as the existing trace in BTINFO.
- Return 0 on success, -1 otherwise. */
+/* Stitch branch trace in BTS format. */
static int
-btrace_stitch_trace (VEC (btrace_block_s) **btrace,
- const struct btrace_thread_info *btinfo)
+btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
{
+ struct btrace_thread_info *btinfo;
struct btrace_function *last_bfun;
struct btrace_insn *last_insn;
btrace_block_s *first_new_block;
- /* If we don't have trace, there's nothing to do. */
- if (VEC_empty (btrace_block_s, *btrace))
- return 0;
-
+ btinfo = &tp->btrace;
last_bfun = btinfo->end;
gdb_assert (last_bfun != NULL);
+ gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
+
+ /* If the existing trace ends with a gap, we just glue the traces
+ together. We need to drop the last (i.e. chronologically first) block
+ of the new trace, though, since we can't fill in the start address.*/
+ if (VEC_empty (btrace_insn_s, last_bfun->insn))
+ {
+ VEC_pop (btrace_block_s, btrace->blocks);
+ return 0;
+ }
/* Beware that block trace starts with the most recent block, so the
chronologically first block in the new trace is the last block in
the new trace's block vector. */
- first_new_block = VEC_last (btrace_block_s, *btrace);
+ first_new_block = VEC_last (btrace_block_s, btrace->blocks);
last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
/* If the current PC at the end of the block is the same as in our current
In the second case, the delta trace vector should contain exactly one
entry for the partial block containing the current PC. Remove it. */
if (first_new_block->end == last_insn->pc
- && VEC_length (btrace_block_s, *btrace) == 1)
+ && VEC_length (btrace_block_s, btrace->blocks) == 1)
{
- VEC_pop (btrace_block_s, *btrace);
+ VEC_pop (btrace_block_s, btrace->blocks);
return 0;
}
been the only instruction in this function segment.
This violates the invariant but will be remedied shortly by
btrace_compute_ftrace when we add the new trace. */
+
+ /* The only case where this would hurt is if the entire trace consisted
+ of just that one instruction. If we remove it, we might turn the now
+ empty btrace function segment into a gap. But we don't want gaps at
+ the beginning. To avoid this, we remove the entire old trace. */
+ if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
+ btrace_clear (tp);
+
return 0;
}
+/* Adjust the block trace in order to stitch old and new trace together.
+ BTRACE is the new delta trace between the last and the current stop.
+ TP is the traced thread.
+ May modifx BTRACE as well as the existing trace in TP.
+ Return 0 on success, -1 otherwise. */
+
+static int
+btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
+{
+ /* If we don't have trace, there's nothing to do. */
+ if (btrace_data_empty (btrace))
+ return 0;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return 0;
+
+ case BTRACE_FORMAT_BTS:
+ return btrace_stitch_bts (&btrace->variant.bts, tp);
+
+ case BTRACE_FORMAT_PT:
+ /* Delta reads are not supported. */
+ return -1;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
/* Clear the branch trace histories in BTINFO. */
static void
btinfo->replay = NULL;
}
-/* See btrace.h. */
+/* Clear the branch trace maintenance histories in BTINFO. */
-void
-btrace_fetch (struct thread_info *tp)
+static void
+btrace_maint_clear (struct btrace_thread_info *btinfo)
+{
+ switch (btinfo->data.format)
+ {
+ default:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ btinfo->maint.variant.bts.packet_history.begin = 0;
+ btinfo->maint.variant.bts.packet_history.end = 0;
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ xfree (btinfo->maint.variant.pt.packets);
+
+ btinfo->maint.variant.pt.packets = NULL;
+ btinfo->maint.variant.pt.packet_history.begin = 0;
+ btinfo->maint.variant.pt.packet_history.end = 0;
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* See btrace.h. */
+
+void
+btrace_fetch (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
struct btrace_target_info *tinfo;
- VEC (btrace_block_s) *btrace;
+ struct btrace_data btrace;
struct cleanup *cleanup;
int errcode;
- DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
- btrace = NULL;
btinfo = &tp->btrace;
tinfo = btinfo->target;
if (tinfo == NULL)
if (btinfo->replay != NULL)
return;
- cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
+ btrace_data_init (&btrace);
+ cleanup = make_cleanup_btrace_data (&btrace);
/* Let's first try to extend the trace we already have. */
if (btinfo->end != NULL)
if (errcode == 0)
{
/* Success. Let's try to stitch the traces together. */
- errcode = btrace_stitch_trace (&btrace, btinfo);
+ errcode = btrace_stitch_trace (&btrace, tp);
}
else
{
errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
/* If we got any new trace, discard what we have. */
- if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
+ if (errcode == 0 && !btrace_data_empty (&btrace))
btrace_clear (tp);
}
error (_("Failed to read branch trace."));
/* Compute the trace, provided we have any. */
- if (!VEC_empty (btrace_block_s, btrace))
+ if (!btrace_data_empty (&btrace))
{
+ /* Store the raw trace data. The stored data will be cleared in
+ btrace_clear, so we always append the new trace. */
+ btrace_data_append (&btinfo->data, &btrace);
+ btrace_maint_clear (btinfo);
+
btrace_clear_history (btinfo);
- btrace_compute_ftrace (btinfo, btrace);
+ btrace_compute_ftrace (tp, &btrace);
}
do_cleanups (cleanup);
struct btrace_thread_info *btinfo;
struct btrace_function *it, *trash;
- DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
+ DEBUG ("clear thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
/* Make sure btrace frames that may hold a pointer into the branch
trace data are destroyed. */
btinfo->begin = NULL;
btinfo->end = NULL;
+ btinfo->ngaps = 0;
+ /* Must clear the maint data before - it depends on BTINFO->DATA. */
+ btrace_maint_clear (btinfo);
+ btrace_data_clear (&btinfo->data);
btrace_clear_history (btinfo);
}
DEBUG ("free objfile");
- ALL_THREADS (tp)
+ ALL_NON_EXITED_THREADS (tp)
btrace_clear (tp);
}
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
- const char *version = xml_find_attribute (attributes, "version")->value;
+ const char *version
+ = (const char *) xml_find_attribute (attributes, "version")->value;
if (strcmp (version, "1.0") != 0)
gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
const struct gdb_xml_element *element,
void *user_data, VEC (gdb_xml_value_s) *attributes)
{
- VEC (btrace_block_s) **btrace;
+ struct btrace_data *btrace;
struct btrace_block *block;
ULONGEST *begin, *end;
- btrace = user_data;
- block = VEC_safe_push (btrace_block_s, *btrace, NULL);
+ btrace = (struct btrace_data *) user_data;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_BTS:
+ break;
+
+ case BTRACE_FORMAT_NONE:
+ btrace->format = BTRACE_FORMAT_BTS;
+ btrace->variant.bts.blocks = NULL;
+ break;
+
+ default:
+ gdb_xml_error (parser, _("Btrace format error."));
+ }
- begin = xml_find_attribute (attributes, "begin")->value;
- end = xml_find_attribute (attributes, "end")->value;
+ begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
+ end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
+ block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
block->begin = *begin;
block->end = *end;
}
+/* Parse a "raw" xml record. */
+
+static void
+parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
+ gdb_byte **pdata, size_t *psize)
+{
+ struct cleanup *cleanup;
+ gdb_byte *data, *bin;
+ size_t len, size;
+
+ len = strlen (body_text);
+ if (len % 2 != 0)
+ gdb_xml_error (parser, _("Bad raw data size."));
+
+ size = len / 2;
+
+ bin = data = (gdb_byte *) xmalloc (size);
+ cleanup = make_cleanup (xfree, data);
+
+ /* We use hex encoding - see common/rsp-low.h. */
+ while (len > 0)
+ {
+ char hi, lo;
+
+ hi = *body_text++;
+ lo = *body_text++;
+
+ if (hi == 0 || lo == 0)
+ gdb_xml_error (parser, _("Bad hex encoding."));
+
+ *bin++ = fromhex (hi) * 16 + fromhex (lo);
+ len -= 2;
+ }
+
+ discard_cleanups (cleanup);
+
+ *pdata = data;
+ *psize = size;
+}
+
+/* Parse a btrace pt-config "cpu" xml record. */
+
+static void
+parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+ const char *vendor;
+ ULONGEST *family, *model, *stepping;
+
+ vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
+ family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
+ model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
+ stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
+
+ btrace = (struct btrace_data *) user_data;
+
+ if (strcmp (vendor, "GenuineIntel") == 0)
+ btrace->variant.pt.config.cpu.vendor = CV_INTEL;
+
+ btrace->variant.pt.config.cpu.family = *family;
+ btrace->variant.pt.config.cpu.model = *model;
+ btrace->variant.pt.config.cpu.stepping = *stepping;
+}
+
+/* Parse a btrace pt "raw" xml record. */
+
+static void
+parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, const char *body_text)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
+ &btrace->variant.pt.size);
+}
+
+/* Parse a btrace "pt" xml record. */
+
+static void
+parse_xml_btrace_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ btrace->format = BTRACE_FORMAT_PT;
+ btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
+ btrace->variant.pt.data = NULL;
+ btrace->variant.pt.size = 0;
+}
+
static const struct gdb_xml_attribute block_attributes[] = {
{ "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
};
+static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
+ { "vendor", GDB_XML_AF_NONE, NULL, NULL },
+ { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_config_children[] = {
+ { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_pt_config_cpu, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_children[] = {
+ { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
+ NULL },
+ { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
static const struct gdb_xml_attribute btrace_attributes[] = {
{ "version", GDB_XML_AF_NONE, NULL, NULL },
{ NULL, GDB_XML_AF_NONE, NULL, NULL }
static const struct gdb_xml_element btrace_children[] = {
{ "block", block_attributes, NULL,
GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
+ { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
+ NULL },
{ NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
};
/* See btrace.h. */
-VEC (btrace_block_s) *
-parse_xml_btrace (const char *buffer)
+void
+parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
{
- VEC (btrace_block_s) *btrace = NULL;
struct cleanup *cleanup;
int errcode;
#if defined (HAVE_LIBEXPAT)
- cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
+ btrace->format = BTRACE_FORMAT_NONE;
+
+ cleanup = make_cleanup_btrace_data (btrace);
errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
- buffer, &btrace);
+ buffer, btrace);
if (errcode != 0)
error (_("Error parsing branch trace."));
error (_("Cannot process branch trace. XML parsing is not supported."));
#endif /* !defined (HAVE_LIBEXPAT) */
+}
+
+#if defined (HAVE_LIBEXPAT)
+
+/* Parse a btrace-conf "bts" xml record. */
+
+static void
+parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_BTS;
+ conf->bts.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
+}
+
+/* Parse a btrace-conf "pt" xml record. */
- return btrace;
+static void
+parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_PT;
+ conf->pt.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
+}
+
+static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_children[] = {
+ { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_bts, NULL },
+ { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_pt, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_attributes[] = {
+ { "version", GDB_XML_AF_NONE, NULL, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_elements[] = {
+ { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
+ GDB_XML_EF_NONE, NULL, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+#endif /* defined (HAVE_LIBEXPAT) */
+
+/* See btrace.h. */
+
+void
+parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
+{
+ int errcode;
+
+#if defined (HAVE_LIBEXPAT)
+
+ errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
+ btrace_conf_elements, xml, conf);
+ if (errcode != 0)
+ error (_("Error parsing branch trace configuration."));
+
+#else /* !defined (HAVE_LIBEXPAT) */
+
+ error (_("XML parsing is not supported."));
+
+#endif /* !defined (HAVE_LIBEXPAT) */
}
/* See btrace.h. */
index = it->index;
bfun = it->function;
+ /* Check if the iterator points to a gap in the trace. */
+ if (bfun->errcode != 0)
+ return NULL;
+
/* The index is within the bounds of this function's instruction vector. */
end = VEC_length (btrace_insn_s, bfun->insn);
gdb_assert (0 < end);
const struct btrace_function *bfun;
bfun = it->function;
+
+ /* Return zero if the iterator points to a gap in the trace. */
+ if (bfun->errcode != 0)
+ return 0;
+
return bfun->insn_offset + it->index;
}
if (bfun == NULL)
error (_("No trace."));
- /* The last instruction in the last function is the current instruction.
- We point to it - it is one past the end of the execution trace. */
length = VEC_length (btrace_insn_s, bfun->insn);
+ /* The last function may either be a gap or it contains the current
+ instruction, which is one past the end of the execution trace; ignore
+ it. */
+ if (length > 0)
+ length -= 1;
+
it->function = bfun;
- it->index = length - 1;
+ it->index = length;
}
/* See btrace.h. */
end = VEC_length (btrace_insn_s, bfun->insn);
+ /* An empty function segment represents a gap in the trace. We count
+ it as one instruction. */
+ if (end == 0)
+ {
+ const struct btrace_function *next;
+
+ next = bfun->flow.next;
+ if (next == NULL)
+ break;
+
+ stride -= 1;
+ steps += 1;
+
+ bfun = next;
+ index = 0;
+
+ continue;
+ }
+
gdb_assert (0 < end);
gdb_assert (index < end);
bfun = prev;
index = VEC_length (btrace_insn_s, bfun->insn);
- /* There is at least one instruction in this function segment. */
- gdb_assert (index > 0);
+ /* An empty function segment represents a gap in the trace. We count
+ it as one instruction. */
+ if (index == 0)
+ {
+ stride -= 1;
+ steps += 1;
+
+ continue;
+ }
}
/* Advance the iterator as far as possible within this segment. */
adv = min (index, stride);
+
stride -= adv;
index -= adv;
steps += adv;
lnum = btrace_insn_number (lhs);
rnum = btrace_insn_number (rhs);
+ /* A gap has an instruction number of zero. Things are getting more
+ complicated if gaps are involved.
+
+ We take the instruction number offset from the iterator's function.
+ This is the number of the first instruction after the gap.
+
+ This is OK as long as both lhs and rhs point to gaps. If only one of
+ them does, we need to adjust the number based on the other's regular
+ instruction number. Otherwise, a gap might compare equal to an
+ instruction. */
+
+ if (lnum == 0 && rnum == 0)
+ {
+ lnum = lhs->function->insn_offset;
+ rnum = rhs->function->insn_offset;
+ }
+ else if (lnum == 0)
+ {
+ lnum = lhs->function->insn_offset;
+
+ if (lnum == rnum)
+ lnum -= 1;
+ }
+ else if (rnum == 0)
+ {
+ rnum = rhs->function->insn_offset;
+
+ if (rnum == lnum)
+ rnum -= 1;
+ }
+
return (int) (lnum - rnum);
}
unsigned int number)
{
const struct btrace_function *bfun;
- unsigned int end;
+ unsigned int end, length;
for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
- if (bfun->insn_offset <= number)
- break;
+ {
+ /* Skip gaps. */
+ if (bfun->errcode != 0)
+ continue;
+
+ if (bfun->insn_offset <= number)
+ break;
+ }
if (bfun == NULL)
return 0;
- end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
+ length = VEC_length (btrace_insn_s, bfun->insn);
+ gdb_assert (length > 0);
+
+ end = bfun->insn_offset + length;
if (end <= number)
return 0;
const struct btrace_insn_iterator *end)
{
if (btinfo->insn_history == NULL)
- btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
+ btinfo->insn_history = XCNEW (struct btrace_insn_history);
btinfo->insn_history->begin = *begin;
btinfo->insn_history->end = *end;
gdb_assert (begin->btinfo == end->btinfo);
if (btinfo->call_history == NULL)
- btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
+ btinfo->call_history = XCNEW (struct btrace_call_history);
btinfo->call_history->begin = *begin;
btinfo->call_history->end = *end;
return btrace_insn_cmp (&begin, &end) == 0;
}
+
+/* Forward the cleanup request. */
+
+static void
+do_btrace_data_cleanup (void *arg)
+{
+ btrace_data_fini ((struct btrace_data *) arg);
+}
+
+/* See btrace.h. */
+
+struct cleanup *
+make_cleanup_btrace_data (struct btrace_data *data)
+{
+ return make_cleanup (do_btrace_data_cleanup, data);
+}
+
+#if defined (HAVE_LIBIPT)
+
+/* Print a single packet. */
+
+static void
+pt_print_packet (const struct pt_packet *packet)
+{
+ switch (packet->type)
+ {
+ default:
+ printf_unfiltered (("[??: %x]"), packet->type);
+ break;
+
+ case ppt_psb:
+ printf_unfiltered (("psb"));
+ break;
+
+ case ppt_psbend:
+ printf_unfiltered (("psbend"));
+ break;
+
+ case ppt_pad:
+ printf_unfiltered (("pad"));
+ break;
+
+ case ppt_tip:
+ printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
+ packet->payload.ip.ipc,
+ packet->payload.ip.ip);
+ break;
+
+ case ppt_tip_pge:
+ printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
+ packet->payload.ip.ipc,
+ packet->payload.ip.ip);
+ break;
+
+ case ppt_tip_pgd:
+ printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
+ packet->payload.ip.ipc,
+ packet->payload.ip.ip);
+ break;
+
+ case ppt_fup:
+ printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
+ packet->payload.ip.ipc,
+ packet->payload.ip.ip);
+ break;
+
+ case ppt_tnt_8:
+ printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
+ packet->payload.tnt.bit_size,
+ packet->payload.tnt.payload);
+ break;
+
+ case ppt_tnt_64:
+ printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
+ packet->payload.tnt.bit_size,
+ packet->payload.tnt.payload);
+ break;
+
+ case ppt_pip:
+ printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
+ packet->payload.pip.nr ? (" nr") : (""));
+ break;
+
+ case ppt_tsc:
+ printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
+ break;
+
+ case ppt_cbr:
+ printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
+ break;
+
+ case ppt_mode:
+ switch (packet->payload.mode.leaf)
+ {
+ default:
+ printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
+ break;
+
+ case pt_mol_exec:
+ printf_unfiltered (("mode.exec%s%s"),
+ packet->payload.mode.bits.exec.csl
+ ? (" cs.l") : (""),
+ packet->payload.mode.bits.exec.csd
+ ? (" cs.d") : (""));
+ break;
+
+ case pt_mol_tsx:
+ printf_unfiltered (("mode.tsx%s%s"),
+ packet->payload.mode.bits.tsx.intx
+ ? (" intx") : (""),
+ packet->payload.mode.bits.tsx.abrt
+ ? (" abrt") : (""));
+ break;
+ }
+ break;
+
+ case ppt_ovf:
+ printf_unfiltered (("ovf"));
+ break;
+
+ case ppt_stop:
+ printf_unfiltered (("stop"));
+ break;
+
+ case ppt_vmcs:
+ printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
+ break;
+
+ case ppt_tma:
+ printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
+ packet->payload.tma.fc);
+ break;
+
+ case ppt_mtc:
+ printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
+ break;
+
+ case ppt_cyc:
+ printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
+ break;
+
+ case ppt_mnt:
+ printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
+ break;
+ }
+}
+
+/* Decode packets into MAINT using DECODER. */
+
+static void
+btrace_maint_decode_pt (struct btrace_maint_info *maint,
+ struct pt_packet_decoder *decoder)
+{
+ int errcode;
+
+ for (;;)
+ {
+ struct btrace_pt_packet packet;
+
+ errcode = pt_pkt_sync_forward (decoder);
+ if (errcode < 0)
+ break;
+
+ for (;;)
+ {
+ pt_pkt_get_offset (decoder, &packet.offset);
+
+ errcode = pt_pkt_next (decoder, &packet.packet,
+ sizeof(packet.packet));
+ if (errcode < 0)
+ break;
+
+ if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
+ {
+ packet.errcode = pt_errcode (errcode);
+ VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
+ &packet);
+ }
+ }
+
+ if (errcode == -pte_eos)
+ break;
+
+ packet.errcode = pt_errcode (errcode);
+ VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
+ &packet);
+
+ warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
+ packet.offset, pt_errstr (packet.errcode));
+ }
+
+ if (errcode != -pte_eos)
+ warning (_("Failed to synchronize onto the Intel Processor Trace "
+ "stream: %s."), pt_errstr (pt_errcode (errcode)));
+}
+
+/* Update the packet history in BTINFO. */
+
+static void
+btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
+{
+ volatile struct gdb_exception except;
+ struct pt_packet_decoder *decoder;
+ struct btrace_data_pt *pt;
+ struct pt_config config;
+ int errcode;
+
+ pt = &btinfo->data.variant.pt;
+
+ /* Nothing to do if there is no trace. */
+ if (pt->size == 0)
+ return;
+
+ memset (&config, 0, sizeof(config));
+
+ config.size = sizeof (config);
+ config.begin = pt->data;
+ config.end = pt->data + pt->size;
+
+ config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
+ config.cpu.family = pt->config.cpu.family;
+ config.cpu.model = pt->config.cpu.model;
+ config.cpu.stepping = pt->config.cpu.stepping;
+
+ errcode = pt_cpu_errata (&config.errata, &config.cpu);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: %s."),
+ pt_errstr (pt_errcode (errcode)));
+
+ decoder = pt_pkt_alloc_decoder (&config);
+ if (decoder == NULL)
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
+
+ TRY
+ {
+ btrace_maint_decode_pt (&btinfo->maint, decoder);
+ }
+ CATCH (except, RETURN_MASK_ALL)
+ {
+ pt_pkt_free_decoder (decoder);
+
+ if (except.reason < 0)
+ throw_exception (except);
+ }
+ END_CATCH
+
+ pt_pkt_free_decoder (decoder);
+}
+
+#endif /* !defined (HAVE_LIBIPT) */
+
+/* Update the packet maintenance information for BTINFO and store the
+ low and high bounds into BEGIN and END, respectively.
+ Store the current iterator state into FROM and TO. */
+
+static void
+btrace_maint_update_packets (struct btrace_thread_info *btinfo,
+ unsigned int *begin, unsigned int *end,
+ unsigned int *from, unsigned int *to)
+{
+ switch (btinfo->data.format)
+ {
+ default:
+ *begin = 0;
+ *end = 0;
+ *from = 0;
+ *to = 0;
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ /* Nothing to do - we operate directly on BTINFO->DATA. */
+ *begin = 0;
+ *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
+ *from = btinfo->maint.variant.bts.packet_history.begin;
+ *to = btinfo->maint.variant.bts.packet_history.end;
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
+ btrace_maint_update_pt_packets (btinfo);
+
+ *begin = 0;
+ *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
+ *from = btinfo->maint.variant.pt.packet_history.begin;
+ *to = btinfo->maint.variant.pt.packet_history.end;
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
+ update the current iterator position. */
+
+static void
+btrace_maint_print_packets (struct btrace_thread_info *btinfo,
+ unsigned int begin, unsigned int end)
+{
+ switch (btinfo->data.format)
+ {
+ default:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ {
+ VEC (btrace_block_s) *blocks;
+ unsigned int blk;
+
+ blocks = btinfo->data.variant.bts.blocks;
+ for (blk = begin; blk < end; ++blk)
+ {
+ const btrace_block_s *block;
+
+ block = VEC_index (btrace_block_s, blocks, blk);
+
+ printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
+ core_addr_to_string_nz (block->begin),
+ core_addr_to_string_nz (block->end));
+ }
+
+ btinfo->maint.variant.bts.packet_history.begin = begin;
+ btinfo->maint.variant.bts.packet_history.end = end;
+ }
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ {
+ VEC (btrace_pt_packet_s) *packets;
+ unsigned int pkt;
+
+ packets = btinfo->maint.variant.pt.packets;
+ for (pkt = begin; pkt < end; ++pkt)
+ {
+ const struct btrace_pt_packet *packet;
+
+ packet = VEC_index (btrace_pt_packet_s, packets, pkt);
+
+ printf_unfiltered ("%u\t", pkt);
+ printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
+
+ if (packet->errcode == pte_ok)
+ pt_print_packet (&packet->packet);
+ else
+ printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
+
+ printf_unfiltered ("\n");
+ }
+
+ btinfo->maint.variant.pt.packet_history.begin = begin;
+ btinfo->maint.variant.pt.packet_history.end = end;
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* Read a number from an argument string. */
+
+static unsigned int
+get_uint (char **arg)
+{
+ char *begin, *end, *pos;
+ unsigned long number;
+
+ begin = *arg;
+ pos = skip_spaces (begin);
+
+ if (!isdigit (*pos))
+ error (_("Expected positive number, got: %s."), pos);
+
+ number = strtoul (pos, &end, 10);
+ if (number > UINT_MAX)
+ error (_("Number too big."));
+
+ *arg += (end - begin);
+
+ return (unsigned int) number;
+}
+
+/* Read a context size from an argument string. */
+
+static int
+get_context_size (char **arg)
+{
+ char *pos;
+ int number;
+
+ pos = skip_spaces (*arg);
+
+ if (!isdigit (*pos))
+ error (_("Expected positive number, got: %s."), pos);
+
+ return strtol (pos, arg, 10);
+}
+
+/* Complain about junk at the end of an argument string. */
+
+static void
+no_chunk (char *arg)
+{
+ if (*arg != 0)
+ error (_("Junk after argument: %s."), arg);
+}
+
+/* The "maintenance btrace packet-history" command. */
+
+static void
+maint_btrace_packet_history_cmd (char *arg, int from_tty)
+{
+ struct btrace_thread_info *btinfo;
+ struct thread_info *tp;
+ unsigned int size, begin, end, from, to;
+
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ error (_("No thread."));
+
+ size = 10;
+ btinfo = &tp->btrace;
+
+ btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
+ if (begin == end)
+ {
+ printf_unfiltered (_("No trace.\n"));
+ return;
+ }
+
+ if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
+ {
+ from = to;
+
+ if (end - from < size)
+ size = end - from;
+ to = from + size;
+ }
+ else if (strcmp (arg, "-") == 0)
+ {
+ to = from;
+
+ if (to - begin < size)
+ size = to - begin;
+ from = to - size;
+ }
+ else
+ {
+ from = get_uint (&arg);
+ if (end <= from)
+ error (_("'%u' is out of range."), from);
+
+ arg = skip_spaces (arg);
+ if (*arg == ',')
+ {
+ arg = skip_spaces (++arg);
+
+ if (*arg == '+')
+ {
+ arg += 1;
+ size = get_context_size (&arg);
+
+ no_chunk (arg);
+
+ if (end - from < size)
+ size = end - from;
+ to = from + size;
+ }
+ else if (*arg == '-')
+ {
+ arg += 1;
+ size = get_context_size (&arg);
+
+ no_chunk (arg);
+
+ /* Include the packet given as first argument. */
+ from += 1;
+ to = from;
+
+ if (to - begin < size)
+ size = to - begin;
+ from = to - size;
+ }
+ else
+ {
+ to = get_uint (&arg);
+
+ /* Include the packet at the second argument and silently
+ truncate the range. */
+ if (to < end)
+ to += 1;
+ else
+ to = end;
+
+ no_chunk (arg);
+ }
+ }
+ else
+ {
+ no_chunk (arg);
+
+ if (end - from < size)
+ size = end - from;
+ to = from + size;
+ }
+
+ dont_repeat ();
+ }
+
+ btrace_maint_print_packets (btinfo, from, to);
+}
+
+/* The "maintenance btrace clear-packet-history" command. */
+
+static void
+maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
+{
+ struct btrace_thread_info *btinfo;
+ struct thread_info *tp;
+
+ if (args != NULL && *args != 0)
+ error (_("Invalid argument."));
+
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ error (_("No thread."));
+
+ btinfo = &tp->btrace;
+
+ /* Must clear the maint data before - it depends on BTINFO->DATA. */
+ btrace_maint_clear (btinfo);
+ btrace_data_clear (&btinfo->data);
+}
+
+/* The "maintenance btrace clear" command. */
+
+static void
+maint_btrace_clear_cmd (char *args, int from_tty)
+{
+ struct btrace_thread_info *btinfo;
+ struct thread_info *tp;
+
+ if (args != NULL && *args != 0)
+ error (_("Invalid argument."));
+
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ error (_("No thread."));
+
+ btrace_clear (tp);
+}
+
+/* The "maintenance btrace" command. */
+
+static void
+maint_btrace_cmd (char *args, int from_tty)
+{
+ help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
+ gdb_stdout);
+}
+
+/* The "maintenance set btrace" command. */
+
+static void
+maint_btrace_set_cmd (char *args, int from_tty)
+{
+ help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
+ gdb_stdout);
+}
+
+/* The "maintenance show btrace" command. */
+
+static void
+maint_btrace_show_cmd (char *args, int from_tty)
+{
+ help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
+ all_commands, gdb_stdout);
+}
+
+/* The "maintenance set btrace pt" command. */
+
+static void
+maint_btrace_pt_set_cmd (char *args, int from_tty)
+{
+ help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
+ all_commands, gdb_stdout);
+}
+
+/* The "maintenance show btrace pt" command. */
+
+static void
+maint_btrace_pt_show_cmd (char *args, int from_tty)
+{
+ help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
+ all_commands, gdb_stdout);
+}
+
+/* The "maintenance info btrace" command. */
+
+static void
+maint_info_btrace_cmd (char *args, int from_tty)
+{
+ struct btrace_thread_info *btinfo;
+ struct thread_info *tp;
+ const struct btrace_config *conf;
+
+ if (args != NULL && *args != 0)
+ error (_("Invalid argument."));
+
+ tp = find_thread_ptid (inferior_ptid);
+ if (tp == NULL)
+ error (_("No thread."));
+
+ btinfo = &tp->btrace;
+
+ conf = btrace_conf (btinfo);
+ if (conf == NULL)
+ error (_("No btrace configuration."));
+
+ printf_unfiltered (_("Format: %s.\n"),
+ btrace_format_string (conf->format));
+
+ switch (conf->format)
+ {
+ default:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ printf_unfiltered (_("Number of packets: %u.\n"),
+ VEC_length (btrace_block_s,
+ btinfo->data.variant.bts.blocks));
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ {
+ struct pt_version version;
+
+ version = pt_library_version ();
+ printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
+ version.minor, version.build,
+ version.ext != NULL ? version.ext : "");
+
+ btrace_maint_update_pt_packets (btinfo);
+ printf_unfiltered (_("Number of packets: %u.\n"),
+ VEC_length (btrace_pt_packet_s,
+ btinfo->maint.variant.pt.packets));
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* The "maint show btrace pt skip-pad" show value function. */
+
+static void
+show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c,
+ const char *value)
+{
+ fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
+}
+
+
+/* Initialize btrace maintenance commands. */
+
+void _initialize_btrace (void);
+void
+_initialize_btrace (void)
+{
+ add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
+ _("Info about branch tracing data."), &maintenanceinfolist);
+
+ add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
+ _("Branch tracing maintenance commands."),
+ &maint_btrace_cmdlist, "maintenance btrace ",
+ 0, &maintenancelist);
+
+ add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
+Set branch tracing specific variables."),
+ &maint_btrace_set_cmdlist, "maintenance set btrace ",
+ 0, &maintenance_set_cmdlist);
+
+ add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
+Set Intel Processor Trace specific variables."),
+ &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
+ 0, &maint_btrace_set_cmdlist);
+
+ add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
+Show branch tracing specific variables."),
+ &maint_btrace_show_cmdlist, "maintenance show btrace ",
+ 0, &maintenance_show_cmdlist);
+
+ add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
+Show Intel Processor Trace specific variables."),
+ &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
+ 0, &maint_btrace_show_cmdlist);
+
+ add_setshow_boolean_cmd ("skip-pad", class_maintenance,
+ &maint_btrace_pt_skip_pad, _("\
+Set whether PAD packets should be skipped in the btrace packet history."), _("\
+Show whether PAD packets should be skipped in the btrace packet history."),_("\
+When enabled, PAD packets are ignored in the btrace packet history."),
+ NULL, show_maint_btrace_pt_skip_pad,
+ &maint_btrace_pt_set_cmdlist,
+ &maint_btrace_pt_show_cmdlist);
+
+ add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
+ _("Print the raw branch tracing data.\n\
+With no argument, print ten more packets after the previous ten-line print.\n\
+With '-' as argument print ten packets before a previous ten-line print.\n\
+One argument specifies the starting packet of a ten-line print.\n\
+Two arguments with comma between specify starting and ending packets to \
+print.\n\
+Preceded with '+'/'-' the second argument specifies the distance from the \
+first.\n"),
+ &maint_btrace_cmdlist);
+
+ add_cmd ("clear-packet-history", class_maintenance,
+ maint_btrace_clear_packet_history_cmd,
+ _("Clears the branch tracing packet history.\n\
+Discards the raw branch tracing data but not the execution history data.\n\
+"),
+ &maint_btrace_cmdlist);
+
+ add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
+ _("Clears the branch tracing data.\n\
+Discards the raw branch tracing data and the execution history data.\n\
+The next 'record' command will fetch the branch tracing data anew.\n\
+"),
+ &maint_btrace_cmdlist);
+
+}