1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt_decoder.c: Intel Processor Trace support
4 * Copyright (c) 2013-2014, Intel Corporation.
16 #include <linux/compiler.h>
17 #include <linux/string.h>
18 #include <linux/zalloc.h>
20 #include "../auxtrace.h"
22 #include "intel-pt-insn-decoder.h"
23 #include "intel-pt-pkt-decoder.h"
24 #include "intel-pt-decoder.h"
25 #include "intel-pt-log.h"
27 #define BITULL(x) (1ULL << (x))
29 /* IA32_RTIT_CTL MSR bits */
30 #define INTEL_PT_CYC_ENABLE BITULL(1)
31 #define INTEL_PT_CYC_THRESHOLD (BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
32 #define INTEL_PT_CYC_THRESHOLD_SHIFT 19
34 #define INTEL_PT_BLK_SIZE 1024
36 #define BIT63 (((uint64_t)1 << 63))
38 #define SEVEN_BYTES 0xffffffffffffffULL
40 #define NO_VMCS 0xffffffffffULL
42 #define INTEL_PT_RETURN 1
45 * Default maximum number of loops with no packets consumed i.e. stuck in a
48 #define INTEL_PT_MAX_LOOPS 100000
51 struct intel_pt_blk *prev;
52 uint64_t ip[INTEL_PT_BLK_SIZE];
55 struct intel_pt_stack {
56 struct intel_pt_blk *blk;
57 struct intel_pt_blk *spare;
61 enum intel_pt_p_once {
62 INTEL_PT_PRT_ONCE_UNK_VMCS,
63 INTEL_PT_PRT_ONCE_ERANGE,
66 enum intel_pt_pkt_state {
67 INTEL_PT_STATE_NO_PSB,
69 INTEL_PT_STATE_ERR_RESYNC,
70 INTEL_PT_STATE_IN_SYNC,
71 INTEL_PT_STATE_TNT_CONT,
74 INTEL_PT_STATE_TIP_PGD,
76 INTEL_PT_STATE_FUP_NO_TIP,
77 INTEL_PT_STATE_FUP_IN_PSB,
78 INTEL_PT_STATE_RESAMPLE,
79 INTEL_PT_STATE_VM_TIME_CORRELATION,
82 static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
85 case INTEL_PT_STATE_NO_PSB:
86 case INTEL_PT_STATE_NO_IP:
87 case INTEL_PT_STATE_ERR_RESYNC:
88 case INTEL_PT_STATE_IN_SYNC:
89 case INTEL_PT_STATE_TNT_CONT:
90 case INTEL_PT_STATE_RESAMPLE:
91 case INTEL_PT_STATE_VM_TIME_CORRELATION:
93 case INTEL_PT_STATE_TNT:
94 case INTEL_PT_STATE_TIP:
95 case INTEL_PT_STATE_TIP_PGD:
96 case INTEL_PT_STATE_FUP:
97 case INTEL_PT_STATE_FUP_NO_TIP:
98 case INTEL_PT_STATE_FUP_IN_PSB:
105 #ifdef INTEL_PT_STRICT
106 #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
107 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
108 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB
109 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB
111 #define INTEL_PT_STATE_ERR1 (decoder->pkt_state)
112 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP
113 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC
114 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC
117 struct intel_pt_decoder {
118 int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
119 int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
120 uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
121 uint64_t max_insn_cnt, void *data);
122 bool (*pgd_ip)(uint64_t ip, void *data);
123 int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
124 struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
126 struct intel_pt_state state;
127 const unsigned char *buf;
129 bool return_compression;
140 bool emulated_ptwrite;
141 bool vm_time_correlation;
142 bool vm_tm_corr_dry_run;
143 bool vm_tm_corr_reliable;
144 bool vm_tm_corr_same_buf;
145 bool vm_tm_corr_continuous;
150 enum intel_pt_param_flags flags;
154 uint64_t pip_payload;
156 uint64_t tsc_timestamp;
157 uint64_t ref_timestamp;
158 uint64_t buf_timestamp;
159 uint64_t sample_timestamp;
161 uint64_t ctc_timestamp;
164 uint64_t cyc_ref_timestamp;
165 uint64_t first_timestamp;
166 uint64_t last_reliable_timestamp;
171 uint32_t tsc_ctc_ratio_n;
172 uint32_t tsc_ctc_ratio_d;
173 uint32_t tsc_ctc_mult;
175 uint32_t ctc_rem_mask;
177 struct intel_pt_stack stack;
178 enum intel_pt_pkt_state pkt_state;
179 enum intel_pt_pkt_ctx pkt_ctx;
180 enum intel_pt_pkt_ctx prev_pkt_ctx;
181 enum intel_pt_blk_type blk_type;
183 struct intel_pt_pkt packet;
184 struct intel_pt_pkt tnt;
187 int last_packet_type;
189 unsigned int cbr_seen;
190 unsigned int max_non_turbo_ratio;
191 double max_non_turbo_ratio_fp;
192 double cbr_cyc_to_tsc;
193 double calc_cyc_to_tsc;
194 bool have_calc_cyc_to_tsc;
196 unsigned int insn_bytes;
198 enum intel_pt_period_type period_type;
199 uint64_t tot_insn_cnt;
200 uint64_t period_insn_cnt;
201 uint64_t period_mask;
202 uint64_t period_ticks;
203 uint64_t last_masked_timestamp;
204 uint64_t tot_cyc_cnt;
205 uint64_t sample_tot_cyc_cnt;
206 uint64_t base_cyc_cnt;
207 uint64_t cyc_cnt_timestamp;
209 uint64_t cyc_threshold;
211 bool continuous_period;
213 bool set_fup_tx_flags;
221 bool set_fup_mode_exec;
223 unsigned int fup_tx_flags;
224 unsigned int tx_flags;
225 uint64_t fup_ptw_payload;
226 uint64_t fup_mwait_payload;
227 uint64_t fup_pwre_payload;
228 uint64_t cbr_payload;
229 uint64_t timestamp_insn_cnt;
230 uint64_t sample_insn_cnt;
232 struct intel_pt_pkt fup_cfe_pkt;
238 const unsigned char *next_buf;
240 unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
242 struct intel_pt_evd evd[INTEL_PT_MAX_EVDS];
245 static uint64_t intel_pt_lower_power_of_2(uint64_t x)
249 for (i = 0; x != 1; i++)
256 static void p_log(const char *fmt, ...)
262 vsnprintf(buf, sizeof(buf), fmt, args);
265 fprintf(stderr, "%s\n", buf);
266 intel_pt_log("%s\n", buf);
269 static bool intel_pt_print_once(struct intel_pt_decoder *decoder,
270 enum intel_pt_p_once id)
272 uint64_t bit = 1ULL << id;
274 if (decoder->print_once & bit)
276 decoder->print_once |= bit;
280 static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
282 if (!(ctl & INTEL_PT_CYC_ENABLE))
285 return (ctl & INTEL_PT_CYC_THRESHOLD) >> INTEL_PT_CYC_THRESHOLD_SHIFT;
288 static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
290 if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
293 period = intel_pt_lower_power_of_2(decoder->period);
294 decoder->period_mask = ~(period - 1);
295 decoder->period_ticks = period;
299 static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
303 return (t / d) * n + ((t % d) * n) / d;
306 struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
308 struct intel_pt_decoder *decoder;
310 if (!params->get_trace || !params->walk_insn)
313 decoder = zalloc(sizeof(struct intel_pt_decoder));
317 decoder->get_trace = params->get_trace;
318 decoder->walk_insn = params->walk_insn;
319 decoder->pgd_ip = params->pgd_ip;
320 decoder->lookahead = params->lookahead;
321 decoder->findnew_vmcs_info = params->findnew_vmcs_info;
322 decoder->data = params->data;
323 decoder->return_compression = params->return_compression;
324 decoder->branch_enable = params->branch_enable;
325 decoder->hop = params->quick >= 1;
326 decoder->leap = params->quick >= 2;
327 decoder->vm_time_correlation = params->vm_time_correlation;
328 decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run;
329 decoder->first_timestamp = params->first_timestamp;
330 decoder->last_reliable_timestamp = params->first_timestamp;
331 decoder->max_loops = params->max_loops ? params->max_loops : INTEL_PT_MAX_LOOPS;
333 decoder->flags = params->flags;
335 decoder->ctl = params->ctl;
336 decoder->period = params->period;
337 decoder->period_type = params->period_type;
339 decoder->max_non_turbo_ratio = params->max_non_turbo_ratio;
340 decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
342 decoder->cyc_threshold = intel_pt_cyc_threshold(decoder->ctl);
344 intel_pt_setup_period(decoder);
346 decoder->mtc_shift = params->mtc_period;
347 decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1;
349 decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n;
350 decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d;
352 if (!decoder->tsc_ctc_ratio_n)
353 decoder->tsc_ctc_ratio_d = 0;
355 if (decoder->tsc_ctc_ratio_d) {
356 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
357 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
358 decoder->tsc_ctc_ratio_d;
362 * A TSC packet can slip past MTC packets so that the timestamp appears
363 * to go backwards. One estimate is that can be up to about 40 CPU
364 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
365 * slippage an order of magnitude more to be on the safe side.
367 decoder->tsc_slip = 0x10000;
369 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
370 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
371 intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d);
372 intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
373 intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
376 intel_pt_log("Hop mode: decoding FUP and TIPs, but not TNT\n");
381 void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
382 uint64_t first_timestamp)
384 decoder->first_timestamp = first_timestamp;
387 static void intel_pt_pop_blk(struct intel_pt_stack *stack)
389 struct intel_pt_blk *blk = stack->blk;
391 stack->blk = blk->prev;
398 static uint64_t intel_pt_pop(struct intel_pt_stack *stack)
403 intel_pt_pop_blk(stack);
406 stack->pos = INTEL_PT_BLK_SIZE;
408 return stack->blk->ip[--stack->pos];
411 static int intel_pt_alloc_blk(struct intel_pt_stack *stack)
413 struct intel_pt_blk *blk;
419 blk = malloc(sizeof(struct intel_pt_blk));
424 blk->prev = stack->blk;
430 static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip)
434 if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) {
435 err = intel_pt_alloc_blk(stack);
440 stack->blk->ip[stack->pos++] = ip;
444 static void intel_pt_clear_stack(struct intel_pt_stack *stack)
447 intel_pt_pop_blk(stack);
451 static void intel_pt_free_stack(struct intel_pt_stack *stack)
453 intel_pt_clear_stack(stack);
455 zfree(&stack->spare);
458 void intel_pt_decoder_free(struct intel_pt_decoder *decoder)
460 intel_pt_free_stack(&decoder->stack);
464 static int intel_pt_ext_err(int code)
468 return INTEL_PT_ERR_NOMEM;
470 return INTEL_PT_ERR_INTERN;
472 return INTEL_PT_ERR_BADPKT;
474 return INTEL_PT_ERR_NODATA;
476 return INTEL_PT_ERR_NOINSN;
478 return INTEL_PT_ERR_MISMAT;
480 return INTEL_PT_ERR_OVR;
482 return INTEL_PT_ERR_LOST;
484 return INTEL_PT_ERR_NELOOP;
486 return INTEL_PT_ERR_EPTW;
488 return INTEL_PT_ERR_UNK;
492 static const char *intel_pt_err_msgs[] = {
493 [INTEL_PT_ERR_NOMEM] = "Memory allocation failed",
494 [INTEL_PT_ERR_INTERN] = "Internal error",
495 [INTEL_PT_ERR_BADPKT] = "Bad packet",
496 [INTEL_PT_ERR_NODATA] = "No more data",
497 [INTEL_PT_ERR_NOINSN] = "Failed to get instruction",
498 [INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction",
499 [INTEL_PT_ERR_OVR] = "Overflow packet",
500 [INTEL_PT_ERR_LOST] = "Lost trace data",
501 [INTEL_PT_ERR_UNK] = "Unknown error!",
502 [INTEL_PT_ERR_NELOOP] = "Never-ending loop (refer perf config intel-pt.max-loops)",
503 [INTEL_PT_ERR_EPTW] = "Broken emulated ptwrite",
506 int intel_pt__strerror(int code, char *buf, size_t buflen)
508 if (code < 1 || code >= INTEL_PT_ERR_MAX)
509 code = INTEL_PT_ERR_UNK;
510 strlcpy(buf, intel_pt_err_msgs[code], buflen);
514 static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
519 switch (packet->count) {
521 ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
525 ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
529 ip = packet->payload;
530 /* Sign-extend 6-byte ip */
531 if (ip & (uint64_t)0x800000000000ULL)
532 ip |= (uint64_t)0xffff000000000000ULL;
535 ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
539 ip = packet->payload;
548 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
550 decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
551 decoder->have_last_ip = true;
554 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
556 intel_pt_set_last_ip(decoder);
557 decoder->ip = decoder->last_ip;
560 static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder)
562 intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos,
566 static int intel_pt_bug(struct intel_pt_decoder *decoder)
568 intel_pt_log("ERROR: Internal error\n");
569 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
573 static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder)
575 decoder->tx_flags = 0;
578 static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
580 decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
583 static inline void intel_pt_update_pip(struct intel_pt_decoder *decoder)
585 decoder->pip_payload = decoder->packet.payload;
588 static inline void intel_pt_update_nr(struct intel_pt_decoder *decoder)
590 decoder->next_nr = decoder->pip_payload & 1;
593 static inline void intel_pt_set_nr(struct intel_pt_decoder *decoder)
595 decoder->nr = decoder->pip_payload & 1;
596 decoder->next_nr = decoder->nr;
599 static inline void intel_pt_set_pip(struct intel_pt_decoder *decoder)
601 intel_pt_update_pip(decoder);
602 intel_pt_set_nr(decoder);
605 static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
607 intel_pt_clear_tx_flags(decoder);
608 decoder->have_tma = false;
609 decoder->pkt_len = 1;
610 decoder->pkt_step = 1;
611 intel_pt_decoder_log_packet(decoder);
612 if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) {
613 intel_pt_log("ERROR: Bad packet\n");
614 decoder->pkt_state = INTEL_PT_STATE_ERR1;
619 static inline void intel_pt_update_sample_time(struct intel_pt_decoder *decoder)
621 decoder->sample_timestamp = decoder->timestamp;
622 decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
623 decoder->state.cycles = decoder->tot_cyc_cnt;
626 static void intel_pt_reposition(struct intel_pt_decoder *decoder)
629 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
630 decoder->timestamp = 0;
631 decoder->have_tma = false;
634 static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
636 struct intel_pt_buffer buffer = { .buf = 0, };
639 decoder->pkt_step = 0;
641 intel_pt_log("Getting more data\n");
642 ret = decoder->get_trace(&buffer, decoder->data);
645 decoder->buf = buffer.buf;
646 decoder->len = buffer.len;
648 intel_pt_log("No more data\n");
651 decoder->buf_timestamp = buffer.ref_timestamp;
652 if (!buffer.consecutive || reposition) {
653 intel_pt_reposition(decoder);
654 decoder->ref_timestamp = buffer.ref_timestamp;
655 decoder->state.trace_nr = buffer.trace_nr;
656 decoder->vm_tm_corr_same_buf = false;
657 intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
658 decoder->ref_timestamp);
665 static int intel_pt_get_next_data(struct intel_pt_decoder *decoder,
668 if (!decoder->next_buf)
669 return intel_pt_get_data(decoder, reposition);
671 decoder->buf = decoder->next_buf;
672 decoder->len = decoder->next_len;
673 decoder->next_buf = 0;
674 decoder->next_len = 0;
678 static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
680 unsigned char *buf = decoder->temp_buf;
681 size_t old_len, len, n;
684 old_len = decoder->len;
686 memcpy(buf, decoder->buf, len);
688 ret = intel_pt_get_data(decoder, false);
690 decoder->pos += old_len;
691 return ret < 0 ? ret : -EINVAL;
694 n = INTEL_PT_PKT_MAX_SZ - len;
695 if (n > decoder->len)
697 memcpy(buf + len, decoder->buf, n);
700 decoder->prev_pkt_ctx = decoder->pkt_ctx;
701 ret = intel_pt_get_packet(buf, len, &decoder->packet, &decoder->pkt_ctx);
702 if (ret < (int)old_len) {
703 decoder->next_buf = decoder->buf;
704 decoder->next_len = decoder->len;
706 decoder->len = old_len;
707 return intel_pt_bad_packet(decoder);
710 decoder->next_buf = decoder->buf + (ret - old_len);
711 decoder->next_len = decoder->len - (ret - old_len);
719 struct intel_pt_pkt_info {
720 struct intel_pt_decoder *decoder;
721 struct intel_pt_pkt packet;
724 int last_packet_type;
728 typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info);
730 /* Lookahead packets in current buffer */
731 static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder,
732 intel_pt_pkt_cb_t cb, void *data)
734 struct intel_pt_pkt_info pkt_info;
735 const unsigned char *buf = decoder->buf;
736 enum intel_pt_pkt_ctx pkt_ctx = decoder->pkt_ctx;
737 size_t len = decoder->len;
740 pkt_info.decoder = decoder;
741 pkt_info.pos = decoder->pos;
742 pkt_info.pkt_len = decoder->pkt_step;
743 pkt_info.last_packet_type = decoder->last_packet_type;
744 pkt_info.data = data;
748 pkt_info.pos += pkt_info.pkt_len;
749 buf += pkt_info.pkt_len;
750 len -= pkt_info.pkt_len;
753 return INTEL_PT_NEED_MORE_BYTES;
755 ret = intel_pt_get_packet(buf, len, &pkt_info.packet,
758 return INTEL_PT_NEED_MORE_BYTES;
762 pkt_info.pkt_len = ret;
763 } while (pkt_info.packet.type == INTEL_PT_PAD);
769 pkt_info.last_packet_type = pkt_info.packet.type;
773 struct intel_pt_calc_cyc_to_tsc_info {
777 uint64_t ctc_timestamp;
779 uint64_t tsc_timestamp;
784 double cbr_cyc_to_tsc;
788 * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
789 * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
790 * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
791 * packet by copying the missing bits from the current MTC assuming the least
792 * difference between the two, and that the current MTC comes after last_mtc.
794 static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
797 uint32_t first_missing_bit = 1U << (16 - mtc_shift);
798 uint32_t mask = ~(first_missing_bit - 1);
800 *last_mtc |= mtc & mask;
801 if (*last_mtc >= mtc) {
802 *last_mtc -= first_missing_bit;
807 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
809 struct intel_pt_decoder *decoder = pkt_info->decoder;
810 struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data;
814 uint32_t mtc, mtc_delta, ctc, fc, ctc_rem;
816 switch (pkt_info->packet.type) {
818 case INTEL_PT_TIP_PGE:
823 case INTEL_PT_MODE_EXEC:
824 case INTEL_PT_MODE_TSX:
825 case INTEL_PT_PSBEND:
829 case INTEL_PT_PTWRITE:
830 case INTEL_PT_PTWRITE_IP:
834 case INTEL_PT_BEP_IP:
836 case INTEL_PT_CFE_IP:
844 mtc = pkt_info->packet.payload;
845 if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
846 data->fixup_last_mtc = false;
847 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
850 if (mtc > data->last_mtc)
851 mtc_delta = mtc - data->last_mtc;
853 mtc_delta = mtc + 256 - data->last_mtc;
854 data->ctc_delta += mtc_delta << decoder->mtc_shift;
855 data->last_mtc = mtc;
857 if (decoder->tsc_ctc_mult) {
858 timestamp = data->ctc_timestamp +
859 data->ctc_delta * decoder->tsc_ctc_mult;
861 timestamp = data->ctc_timestamp +
862 multdiv(data->ctc_delta,
863 decoder->tsc_ctc_ratio_n,
864 decoder->tsc_ctc_ratio_d);
867 if (timestamp < data->timestamp)
870 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
871 data->timestamp = timestamp;
879 * For now, do not support using TSC packets - refer
880 * intel_pt_calc_cyc_to_tsc().
884 timestamp = pkt_info->packet.payload |
885 (data->timestamp & (0xffULL << 56));
886 if (data->from_mtc && timestamp < data->timestamp &&
887 data->timestamp - timestamp < decoder->tsc_slip)
889 if (timestamp < data->timestamp)
890 timestamp += (1ULL << 56);
891 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
894 data->tsc_timestamp = timestamp;
895 data->timestamp = timestamp;
904 if (!decoder->tsc_ctc_ratio_d)
907 ctc = pkt_info->packet.payload;
908 fc = pkt_info->packet.count;
909 ctc_rem = ctc & decoder->ctc_rem_mask;
911 data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
913 data->ctc_timestamp = data->tsc_timestamp - fc;
914 if (decoder->tsc_ctc_mult) {
915 data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
917 data->ctc_timestamp -=
918 multdiv(ctc_rem, decoder->tsc_ctc_ratio_n,
919 decoder->tsc_ctc_ratio_d);
923 data->have_tma = true;
924 data->fixup_last_mtc = true;
929 data->cycle_cnt += pkt_info->packet.payload;
933 cbr = pkt_info->packet.payload;
934 if (data->cbr && data->cbr != cbr)
937 data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
940 case INTEL_PT_TIP_PGD:
941 case INTEL_PT_TRACESTOP:
942 case INTEL_PT_EXSTOP:
943 case INTEL_PT_EXSTOP_IP:
948 case INTEL_PT_BAD: /* Does not happen */
953 if (!data->cbr && decoder->cbr) {
954 data->cbr = decoder->cbr;
955 data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc;
958 if (!data->cycle_cnt)
961 cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt;
963 if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc &&
964 cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) {
965 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n",
966 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
970 decoder->calc_cyc_to_tsc = cyc_to_tsc;
971 decoder->have_calc_cyc_to_tsc = true;
974 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n",
975 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
977 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n",
978 cyc_to_tsc, pkt_info->pos);
984 static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
987 struct intel_pt_calc_cyc_to_tsc_info data = {
990 .last_mtc = decoder->last_mtc,
991 .ctc_timestamp = decoder->ctc_timestamp,
992 .ctc_delta = decoder->ctc_delta,
993 .tsc_timestamp = decoder->tsc_timestamp,
994 .timestamp = decoder->timestamp,
995 .have_tma = decoder->have_tma,
996 .fixup_last_mtc = decoder->fixup_last_mtc,
997 .from_mtc = from_mtc,
1002 * For now, do not support using TSC packets for at least the reasons:
1003 * 1) timing might have stopped
1004 * 2) TSC packets within PSB+ can slip against CYC packets
1009 intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
1012 static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
1016 decoder->last_packet_type = decoder->packet.type;
1019 decoder->pos += decoder->pkt_step;
1020 decoder->buf += decoder->pkt_step;
1021 decoder->len -= decoder->pkt_step;
1023 if (!decoder->len) {
1024 ret = intel_pt_get_next_data(decoder, false);
1029 decoder->prev_pkt_ctx = decoder->pkt_ctx;
1030 ret = intel_pt_get_packet(decoder->buf, decoder->len,
1031 &decoder->packet, &decoder->pkt_ctx);
1032 if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
1033 decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
1034 ret = intel_pt_get_split_packet(decoder);
1039 return intel_pt_bad_packet(decoder);
1041 decoder->pkt_len = ret;
1042 decoder->pkt_step = ret;
1043 intel_pt_decoder_log_packet(decoder);
1044 } while (decoder->packet.type == INTEL_PT_PAD);
1049 static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
1051 uint64_t timestamp, masked_timestamp;
1053 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
1054 masked_timestamp = timestamp & decoder->period_mask;
1055 if (decoder->continuous_period) {
1056 if (masked_timestamp > decoder->last_masked_timestamp)
1060 masked_timestamp = timestamp & decoder->period_mask;
1061 if (masked_timestamp > decoder->last_masked_timestamp) {
1062 decoder->last_masked_timestamp = masked_timestamp;
1063 decoder->continuous_period = true;
1067 if (masked_timestamp < decoder->last_masked_timestamp)
1068 return decoder->period_ticks;
1070 return decoder->period_ticks - (timestamp - masked_timestamp);
1073 static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder)
1075 switch (decoder->period_type) {
1076 case INTEL_PT_PERIOD_INSTRUCTIONS:
1077 return decoder->period - decoder->period_insn_cnt;
1078 case INTEL_PT_PERIOD_TICKS:
1079 return intel_pt_next_period(decoder);
1080 case INTEL_PT_PERIOD_NONE:
1081 case INTEL_PT_PERIOD_MTC:
1087 static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
1089 uint64_t timestamp, masked_timestamp;
1091 switch (decoder->period_type) {
1092 case INTEL_PT_PERIOD_INSTRUCTIONS:
1093 decoder->period_insn_cnt = 0;
1095 case INTEL_PT_PERIOD_TICKS:
1096 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
1097 masked_timestamp = timestamp & decoder->period_mask;
1098 if (masked_timestamp > decoder->last_masked_timestamp)
1099 decoder->last_masked_timestamp = masked_timestamp;
1101 decoder->last_masked_timestamp += decoder->period_ticks;
1103 case INTEL_PT_PERIOD_NONE:
1104 case INTEL_PT_PERIOD_MTC:
1109 decoder->state.type |= INTEL_PT_INSTRUCTION;
1113 * Sample FUP instruction at the same time as reporting the FUP event, so the
1114 * instruction sample gets the same flags as the FUP event.
1116 static void intel_pt_sample_fup_insn(struct intel_pt_decoder *decoder)
1118 struct intel_pt_insn intel_pt_insn;
1119 uint64_t max_insn_cnt, insn_cnt = 0;
1122 decoder->state.insn_op = INTEL_PT_OP_OTHER;
1123 decoder->state.insn_len = 0;
1125 if (!decoder->branch_enable || !decoder->pge || decoder->hop ||
1126 decoder->ip != decoder->last_ip)
1129 if (!decoder->mtc_insn)
1130 decoder->mtc_insn = true;
1132 max_insn_cnt = intel_pt_next_sample(decoder);
1133 if (max_insn_cnt != 1)
1136 err = decoder->walk_insn(&intel_pt_insn, &insn_cnt, &decoder->ip,
1137 0, max_insn_cnt, decoder->data);
1138 /* Ignore error, it will be reported next walk anyway */
1142 if (intel_pt_insn.branch != INTEL_PT_BR_NO_BRANCH) {
1143 intel_pt_log_at("ERROR: Unexpected branch at FUP instruction", decoder->ip);
1147 decoder->tot_insn_cnt += insn_cnt;
1148 decoder->timestamp_insn_cnt += insn_cnt;
1149 decoder->sample_insn_cnt += insn_cnt;
1150 decoder->period_insn_cnt += insn_cnt;
1152 intel_pt_sample_insn(decoder);
1154 decoder->state.type |= INTEL_PT_INSTRUCTION;
1155 decoder->ip += intel_pt_insn.length;
1158 static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
1159 struct intel_pt_insn *intel_pt_insn, uint64_t ip)
1161 uint64_t max_insn_cnt, insn_cnt = 0;
1164 if (!decoder->mtc_insn)
1165 decoder->mtc_insn = true;
1167 max_insn_cnt = intel_pt_next_sample(decoder);
1169 err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip,
1170 max_insn_cnt, decoder->data);
1172 decoder->tot_insn_cnt += insn_cnt;
1173 decoder->timestamp_insn_cnt += insn_cnt;
1174 decoder->sample_insn_cnt += insn_cnt;
1175 decoder->period_insn_cnt += insn_cnt;
1178 decoder->no_progress = 0;
1179 decoder->pkt_state = INTEL_PT_STATE_ERR2;
1180 intel_pt_log_at("ERROR: Failed to get instruction",
1187 if (ip && decoder->ip == ip) {
1192 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
1193 intel_pt_sample_insn(decoder);
1195 if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) {
1196 decoder->state.type = INTEL_PT_INSTRUCTION;
1197 decoder->state.from_ip = decoder->ip;
1198 decoder->state.to_ip = 0;
1199 decoder->ip += intel_pt_insn->length;
1200 err = INTEL_PT_RETURN;
1204 if (intel_pt_insn->op == INTEL_PT_OP_CALL) {
1205 /* Zero-length calls are excluded */
1206 if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL ||
1207 intel_pt_insn->rel) {
1208 err = intel_pt_push(&decoder->stack, decoder->ip +
1209 intel_pt_insn->length);
1213 } else if (intel_pt_insn->op == INTEL_PT_OP_RET) {
1214 decoder->ret_addr = intel_pt_pop(&decoder->stack);
1217 if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) {
1218 int cnt = decoder->no_progress++;
1220 decoder->state.from_ip = decoder->ip;
1221 decoder->ip += intel_pt_insn->length +
1223 decoder->state.to_ip = decoder->ip;
1224 err = INTEL_PT_RETURN;
1227 * Check for being stuck in a loop. This can happen if a
1228 * decoder error results in the decoder erroneously setting the
1229 * ip to an address that is itself in an infinite loop that
1230 * consumes no packets. When that happens, there must be an
1231 * unconditional branch.
1235 decoder->stuck_ip = decoder->state.to_ip;
1236 decoder->stuck_ip_prd = 1;
1237 decoder->stuck_ip_cnt = 1;
1238 } else if (cnt > decoder->max_loops ||
1239 decoder->state.to_ip == decoder->stuck_ip) {
1240 intel_pt_log_at("ERROR: Never-ending loop",
1241 decoder->state.to_ip);
1242 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1245 } else if (!--decoder->stuck_ip_cnt) {
1246 decoder->stuck_ip_prd += 1;
1247 decoder->stuck_ip_cnt = decoder->stuck_ip_prd;
1248 decoder->stuck_ip = decoder->state.to_ip;
1251 goto out_no_progress;
1254 decoder->no_progress = 0;
1256 decoder->state.insn_op = intel_pt_insn->op;
1257 decoder->state.insn_len = intel_pt_insn->length;
1258 memcpy(decoder->state.insn, intel_pt_insn->buf,
1259 INTEL_PT_INSN_BUF_SZ);
1261 if (decoder->tx_flags & INTEL_PT_IN_TX)
1262 decoder->state.flags |= INTEL_PT_IN_TX;
1267 static void intel_pt_mode_exec_status(struct intel_pt_decoder *decoder)
1269 bool iflag = decoder->packet.count & INTEL_PT_IFLAG;
1271 decoder->exec_mode = decoder->packet.payload;
1272 decoder->iflag = iflag;
1273 decoder->next_iflag = iflag;
1274 decoder->state.from_iflag = iflag;
1275 decoder->state.to_iflag = iflag;
1278 static void intel_pt_mode_exec(struct intel_pt_decoder *decoder)
1280 bool iflag = decoder->packet.count & INTEL_PT_IFLAG;
1282 decoder->exec_mode = decoder->packet.payload;
1283 decoder->next_iflag = iflag;
1286 static void intel_pt_sample_iflag(struct intel_pt_decoder *decoder)
1288 decoder->state.type |= INTEL_PT_IFLAG_CHG;
1289 decoder->state.from_iflag = decoder->iflag;
1290 decoder->state.to_iflag = decoder->next_iflag;
1291 decoder->iflag = decoder->next_iflag;
1294 static void intel_pt_sample_iflag_chg(struct intel_pt_decoder *decoder)
1296 if (decoder->iflag != decoder->next_iflag)
1297 intel_pt_sample_iflag(decoder);
1300 static void intel_pt_clear_fup_event(struct intel_pt_decoder *decoder)
1302 decoder->set_fup_tx_flags = false;
1303 decoder->set_fup_ptw = false;
1304 decoder->set_fup_mwait = false;
1305 decoder->set_fup_pwre = false;
1306 decoder->set_fup_exstop = false;
1307 decoder->set_fup_bep = false;
1308 decoder->set_fup_cfe_ip = false;
1309 decoder->set_fup_cfe = false;
1310 decoder->evd_cnt = 0;
1311 decoder->set_fup_mode_exec = false;
1312 decoder->iflag = decoder->next_iflag;
1315 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
1317 enum intel_pt_sample_type type = decoder->state.type;
1318 bool sample_fup_insn = false;
1321 decoder->state.type &= ~INTEL_PT_BRANCH;
1323 if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
1324 bool ip = decoder->set_fup_cfe_ip;
1326 decoder->set_fup_cfe_ip = false;
1327 decoder->set_fup_cfe = false;
1328 decoder->state.type |= INTEL_PT_EVT;
1329 if (!ip && decoder->pge)
1330 decoder->state.type |= INTEL_PT_BRANCH;
1331 decoder->state.cfe_type = decoder->fup_cfe_pkt.count;
1332 decoder->state.cfe_vector = decoder->fup_cfe_pkt.payload;
1333 decoder->state.evd_cnt = decoder->evd_cnt;
1334 decoder->state.evd = decoder->evd;
1335 decoder->evd_cnt = 0;
1336 if (ip || decoder->pge)
1337 decoder->state.flags |= INTEL_PT_FUP_IP;
1340 if (decoder->set_fup_mode_exec) {
1341 decoder->set_fup_mode_exec = false;
1342 intel_pt_sample_iflag(decoder);
1343 sample_fup_insn = no_tip;
1346 if (decoder->set_fup_tx_flags) {
1347 decoder->set_fup_tx_flags = false;
1348 decoder->tx_flags = decoder->fup_tx_flags;
1349 decoder->state.type |= INTEL_PT_TRANSACTION;
1350 if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
1351 decoder->state.type |= INTEL_PT_BRANCH;
1352 decoder->state.flags = decoder->fup_tx_flags;
1355 if (decoder->set_fup_ptw) {
1356 decoder->set_fup_ptw = false;
1357 decoder->state.type |= INTEL_PT_PTW;
1358 decoder->state.flags |= INTEL_PT_FUP_IP;
1359 decoder->state.ptw_payload = decoder->fup_ptw_payload;
1362 if (decoder->set_fup_mwait) {
1363 decoder->set_fup_mwait = false;
1364 decoder->state.type |= INTEL_PT_MWAIT_OP;
1365 decoder->state.mwait_payload = decoder->fup_mwait_payload;
1368 if (decoder->set_fup_pwre) {
1369 decoder->set_fup_pwre = false;
1370 decoder->state.type |= INTEL_PT_PWR_ENTRY;
1371 decoder->state.pwre_payload = decoder->fup_pwre_payload;
1374 if (decoder->set_fup_exstop) {
1375 decoder->set_fup_exstop = false;
1376 decoder->state.type |= INTEL_PT_EX_STOP;
1377 decoder->state.flags |= INTEL_PT_FUP_IP;
1380 if (decoder->set_fup_bep) {
1381 decoder->set_fup_bep = false;
1382 decoder->state.type |= INTEL_PT_BLK_ITEMS;
1385 if (decoder->overflow) {
1386 decoder->overflow = false;
1387 if (!ret && !decoder->pge) {
1389 decoder->state.type = 0;
1390 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
1392 decoder->pge = true;
1393 decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
1394 decoder->state.from_ip = 0;
1395 decoder->state.to_ip = decoder->ip;
1400 decoder->state.from_ip = decoder->ip;
1401 decoder->state.to_ip = 0;
1402 if (sample_fup_insn)
1403 intel_pt_sample_fup_insn(decoder);
1405 decoder->state.type = type;
1410 static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
1411 struct intel_pt_insn *intel_pt_insn,
1412 uint64_t ip, int err)
1414 return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
1415 intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
1416 ip == decoder->ip + intel_pt_insn->length;
1419 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1421 struct intel_pt_insn intel_pt_insn;
1425 ip = decoder->last_ip;
1428 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
1429 if (err == INTEL_PT_RETURN)
1431 if (err == -EAGAIN ||
1432 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
1433 bool no_tip = decoder->pkt_state != INTEL_PT_STATE_FUP;
1435 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1436 if (intel_pt_fup_event(decoder, no_tip) && no_tip)
1440 decoder->set_fup_tx_flags = false;
1444 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1445 intel_pt_log_at("ERROR: Unexpected indirect branch",
1447 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1451 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1452 intel_pt_log_at("ERROR: Unexpected conditional branch",
1454 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1458 intel_pt_bug(decoder);
1462 static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
1464 struct intel_pt_insn intel_pt_insn;
1467 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1468 if (err == INTEL_PT_RETURN &&
1470 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1471 (decoder->state.type & INTEL_PT_BRANCH) &&
1472 decoder->pgd_ip(decoder->state.to_ip, decoder->data)) {
1473 /* Unconditional branch leaving filter region */
1474 decoder->no_progress = 0;
1475 decoder->pge = false;
1476 decoder->continuous_period = false;
1477 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1478 decoder->state.type |= INTEL_PT_TRACE_END;
1479 intel_pt_update_nr(decoder);
1482 if (err == INTEL_PT_RETURN)
1487 intel_pt_update_nr(decoder);
1488 intel_pt_sample_iflag_chg(decoder);
1490 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1491 if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
1492 decoder->pge = false;
1493 decoder->continuous_period = false;
1494 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1495 decoder->state.from_ip = decoder->ip;
1496 if (decoder->packet.count == 0) {
1497 decoder->state.to_ip = 0;
1499 decoder->state.to_ip = decoder->last_ip;
1500 decoder->ip = decoder->last_ip;
1502 decoder->state.type |= INTEL_PT_TRACE_END;
1504 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1505 decoder->state.from_ip = decoder->ip;
1506 if (decoder->packet.count == 0) {
1507 decoder->state.to_ip = 0;
1509 decoder->state.to_ip = decoder->last_ip;
1510 decoder->ip = decoder->last_ip;
1516 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1517 uint64_t to_ip = decoder->ip + intel_pt_insn.length +
1520 if (decoder->pgd_ip &&
1521 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1522 decoder->pgd_ip(to_ip, decoder->data)) {
1523 /* Conditional branch leaving filter region */
1524 decoder->pge = false;
1525 decoder->continuous_period = false;
1526 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1527 decoder->ip = to_ip;
1528 decoder->state.from_ip = decoder->ip;
1529 decoder->state.to_ip = to_ip;
1530 decoder->state.type |= INTEL_PT_TRACE_END;
1533 intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
1535 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1539 return intel_pt_bug(decoder);
1547 static int intel_pt_eptw_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
1549 struct eptw_data *data = pkt_info->data;
1552 switch (pkt_info->packet.type) {
1555 case INTEL_PT_MODE_EXEC:
1556 case INTEL_PT_MODE_TSX:
1566 case INTEL_PT_PSBEND:
1567 case INTEL_PT_PTWRITE:
1568 case INTEL_PT_PTWRITE_IP:
1569 case INTEL_PT_EXSTOP:
1570 case INTEL_PT_EXSTOP_IP:
1571 case INTEL_PT_MWAIT:
1577 case INTEL_PT_BEP_IP:
1579 case INTEL_PT_CFE_IP:
1584 nr_bits = data->bit_countdown;
1585 if (nr_bits > pkt_info->packet.count)
1586 nr_bits = pkt_info->packet.count;
1587 data->payload <<= nr_bits;
1588 data->payload |= pkt_info->packet.payload >> (64 - nr_bits);
1589 data->bit_countdown -= nr_bits;
1590 return !data->bit_countdown;
1592 case INTEL_PT_TIP_PGE:
1593 case INTEL_PT_TIP_PGD:
1597 case INTEL_PT_TRACESTOP:
1605 static int intel_pt_emulated_ptwrite(struct intel_pt_decoder *decoder)
1607 int n = 64 - decoder->tnt.count;
1608 struct eptw_data data = {
1610 .payload = decoder->tnt.payload >> n,
1613 decoder->emulated_ptwrite = false;
1614 intel_pt_log("Emulated ptwrite detected\n");
1616 intel_pt_pkt_lookahead(decoder, intel_pt_eptw_lookahead_cb, &data);
1617 if (data.bit_countdown)
1620 decoder->state.type = INTEL_PT_PTW;
1621 decoder->state.from_ip = decoder->ip;
1622 decoder->state.to_ip = 0;
1623 decoder->state.ptw_payload = data.payload;
1627 static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
1629 struct intel_pt_insn intel_pt_insn;
1633 if (decoder->emulated_ptwrite)
1634 return intel_pt_emulated_ptwrite(decoder);
1635 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1636 if (err == INTEL_PT_RETURN) {
1637 decoder->emulated_ptwrite = intel_pt_insn.emulated_ptwrite;
1641 decoder->emulated_ptwrite = false;
1645 if (intel_pt_insn.op == INTEL_PT_OP_RET) {
1646 if (!decoder->return_compression) {
1647 intel_pt_log_at("ERROR: RET when expecting conditional branch",
1649 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1652 if (!decoder->ret_addr) {
1653 intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
1655 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1658 if (!(decoder->tnt.payload & BIT63)) {
1659 intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
1661 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1664 decoder->tnt.count -= 1;
1665 if (decoder->tnt.count)
1666 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1668 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1669 decoder->tnt.payload <<= 1;
1670 decoder->state.from_ip = decoder->ip;
1671 decoder->ip = decoder->ret_addr;
1672 decoder->state.to_ip = decoder->ip;
1676 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1677 /* Handle deferred TIPs */
1678 err = intel_pt_get_next_packet(decoder);
1681 if (decoder->packet.type != INTEL_PT_TIP ||
1682 decoder->packet.count == 0) {
1683 intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
1685 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1686 decoder->pkt_step = 0;
1689 intel_pt_set_last_ip(decoder);
1690 decoder->state.from_ip = decoder->ip;
1691 decoder->state.to_ip = decoder->last_ip;
1692 decoder->ip = decoder->last_ip;
1693 intel_pt_update_nr(decoder);
1694 intel_pt_sample_iflag_chg(decoder);
1698 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1699 decoder->tnt.count -= 1;
1700 if (decoder->tnt.count)
1701 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1703 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1704 if (decoder->tnt.payload & BIT63) {
1705 decoder->tnt.payload <<= 1;
1706 decoder->state.from_ip = decoder->ip;
1707 decoder->ip += intel_pt_insn.length +
1709 decoder->state.to_ip = decoder->ip;
1712 /* Instruction sample for a non-taken branch */
1713 if (decoder->state.type & INTEL_PT_INSTRUCTION) {
1714 decoder->tnt.payload <<= 1;
1715 decoder->state.type = INTEL_PT_INSTRUCTION;
1716 decoder->state.from_ip = decoder->ip;
1717 decoder->state.to_ip = 0;
1718 decoder->ip += intel_pt_insn.length;
1721 decoder->sample_cyc = false;
1722 decoder->ip += intel_pt_insn.length;
1723 if (!decoder->tnt.count) {
1724 intel_pt_update_sample_time(decoder);
1727 decoder->tnt.payload <<= 1;
1731 return intel_pt_bug(decoder);
1735 static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
1737 unsigned int fup_tx_flags;
1740 fup_tx_flags = decoder->packet.payload &
1741 (INTEL_PT_IN_TX | INTEL_PT_ABORT_TX);
1742 err = intel_pt_get_next_packet(decoder);
1745 if (decoder->packet.type == INTEL_PT_FUP) {
1746 decoder->fup_tx_flags = fup_tx_flags;
1747 decoder->set_fup_tx_flags = true;
1748 if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX))
1751 intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
1753 intel_pt_update_in_tx(decoder);
1758 static int intel_pt_evd(struct intel_pt_decoder *decoder)
1760 if (decoder->evd_cnt >= INTEL_PT_MAX_EVDS) {
1761 intel_pt_log_at("ERROR: Too many EVD packets", decoder->pos);
1764 decoder->evd[decoder->evd_cnt++] = (struct intel_pt_evd){
1765 .type = decoder->packet.count,
1766 .payload = decoder->packet.payload,
1771 static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
1773 timestamp |= (ref_timestamp & (0xffULL << 56));
1775 if (timestamp < ref_timestamp) {
1776 if (ref_timestamp - timestamp > (1ULL << 55))
1777 timestamp += (1ULL << 56);
1779 if (timestamp - ref_timestamp > (1ULL << 55))
1780 timestamp -= (1ULL << 56);
1786 /* For use only when decoder->vm_time_correlation is true */
1787 static bool intel_pt_time_in_range(struct intel_pt_decoder *decoder,
1790 uint64_t max_timestamp = decoder->buf_timestamp;
1792 if (!max_timestamp) {
1793 max_timestamp = decoder->last_reliable_timestamp +
1796 return timestamp >= decoder->last_reliable_timestamp &&
1797 timestamp < decoder->buf_timestamp;
1800 static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
1805 decoder->have_tma = false;
1807 if (decoder->ref_timestamp) {
1808 timestamp = intel_pt_8b_tsc(decoder->packet.payload,
1809 decoder->ref_timestamp);
1810 decoder->tsc_timestamp = timestamp;
1811 decoder->timestamp = timestamp;
1812 decoder->ref_timestamp = 0;
1813 decoder->timestamp_insn_cnt = 0;
1814 } else if (decoder->timestamp) {
1815 timestamp = decoder->packet.payload |
1816 (decoder->timestamp & (0xffULL << 56));
1817 decoder->tsc_timestamp = timestamp;
1818 if (timestamp < decoder->timestamp &&
1819 decoder->timestamp - timestamp < decoder->tsc_slip) {
1820 intel_pt_log_to("Suppressing backwards timestamp",
1822 timestamp = decoder->timestamp;
1824 if (timestamp < decoder->timestamp) {
1825 if (!decoder->buf_timestamp ||
1826 (timestamp + (1ULL << 56) < decoder->buf_timestamp)) {
1827 intel_pt_log_to("Wraparound timestamp", timestamp);
1828 timestamp += (1ULL << 56);
1829 decoder->tsc_timestamp = timestamp;
1831 intel_pt_log_to("Suppressing bad timestamp", timestamp);
1832 timestamp = decoder->timestamp;
1836 if (decoder->vm_time_correlation &&
1837 (bad || !intel_pt_time_in_range(decoder, timestamp)) &&
1838 intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
1839 p_log("Timestamp out of range");
1840 decoder->timestamp = timestamp;
1841 decoder->timestamp_insn_cnt = 0;
1844 if (decoder->last_packet_type == INTEL_PT_CYC) {
1845 decoder->cyc_ref_timestamp = decoder->timestamp;
1846 decoder->cycle_cnt = 0;
1847 decoder->have_calc_cyc_to_tsc = false;
1848 intel_pt_calc_cyc_to_tsc(decoder, false);
1851 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1854 static int intel_pt_overflow(struct intel_pt_decoder *decoder)
1856 intel_pt_log("ERROR: Buffer overflow\n");
1857 intel_pt_clear_tx_flags(decoder);
1858 intel_pt_set_nr(decoder);
1859 decoder->timestamp_insn_cnt = 0;
1860 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1861 decoder->state.from_ip = decoder->ip;
1863 decoder->pge = false;
1864 intel_pt_clear_fup_event(decoder);
1865 decoder->overflow = true;
1869 static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder *decoder)
1871 if (decoder->have_cyc)
1874 decoder->cyc_cnt_timestamp = decoder->timestamp;
1875 decoder->base_cyc_cnt = decoder->tot_cyc_cnt;
1878 static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder *decoder)
1880 decoder->tsc_to_cyc = decoder->cbr / decoder->max_non_turbo_ratio_fp;
1883 intel_pt_mtc_cyc_cnt_pge(decoder);
1886 static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder *decoder)
1888 uint64_t tot_cyc_cnt, tsc_delta;
1890 if (decoder->have_cyc)
1893 decoder->sample_cyc = true;
1895 if (!decoder->pge || decoder->timestamp <= decoder->cyc_cnt_timestamp)
1898 tsc_delta = decoder->timestamp - decoder->cyc_cnt_timestamp;
1899 tot_cyc_cnt = tsc_delta * decoder->tsc_to_cyc + decoder->base_cyc_cnt;
1901 if (tot_cyc_cnt > decoder->tot_cyc_cnt)
1902 decoder->tot_cyc_cnt = tot_cyc_cnt;
1905 static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
1907 uint32_t ctc = decoder->packet.payload;
1908 uint32_t fc = decoder->packet.count;
1909 uint32_t ctc_rem = ctc & decoder->ctc_rem_mask;
1911 if (!decoder->tsc_ctc_ratio_d)
1914 if (decoder->pge && !decoder->in_psb)
1915 intel_pt_mtc_cyc_cnt_pge(decoder);
1917 intel_pt_mtc_cyc_cnt_upd(decoder);
1919 decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
1920 decoder->last_ctc = ctc - ctc_rem;
1921 decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
1922 if (decoder->tsc_ctc_mult) {
1923 decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
1925 decoder->ctc_timestamp -= multdiv(ctc_rem,
1926 decoder->tsc_ctc_ratio_n,
1927 decoder->tsc_ctc_ratio_d);
1929 decoder->ctc_delta = 0;
1930 decoder->have_tma = true;
1931 decoder->fixup_last_mtc = true;
1932 intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
1933 decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
1936 static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
1939 uint32_t mtc, mtc_delta;
1941 if (!decoder->have_tma)
1944 mtc = decoder->packet.payload;
1946 if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
1947 decoder->fixup_last_mtc = false;
1948 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
1949 &decoder->last_mtc);
1952 if (mtc > decoder->last_mtc)
1953 mtc_delta = mtc - decoder->last_mtc;
1955 mtc_delta = mtc + 256 - decoder->last_mtc;
1957 decoder->ctc_delta += mtc_delta << decoder->mtc_shift;
1959 if (decoder->tsc_ctc_mult) {
1960 timestamp = decoder->ctc_timestamp +
1961 decoder->ctc_delta * decoder->tsc_ctc_mult;
1963 timestamp = decoder->ctc_timestamp +
1964 multdiv(decoder->ctc_delta,
1965 decoder->tsc_ctc_ratio_n,
1966 decoder->tsc_ctc_ratio_d);
1969 if (timestamp < decoder->timestamp)
1970 intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
1971 timestamp, decoder->timestamp);
1973 decoder->timestamp = timestamp;
1975 intel_pt_mtc_cyc_cnt_upd(decoder);
1977 decoder->timestamp_insn_cnt = 0;
1978 decoder->last_mtc = mtc;
1980 if (decoder->last_packet_type == INTEL_PT_CYC) {
1981 decoder->cyc_ref_timestamp = decoder->timestamp;
1982 decoder->cycle_cnt = 0;
1983 decoder->have_calc_cyc_to_tsc = false;
1984 intel_pt_calc_cyc_to_tsc(decoder, true);
1987 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1990 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
1992 unsigned int cbr = decoder->packet.payload & 0xff;
1994 decoder->cbr_payload = decoder->packet.payload;
1996 if (decoder->cbr == cbr)
2000 decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
2001 decoder->cyc_ref_timestamp = decoder->timestamp;
2002 decoder->cycle_cnt = 0;
2004 intel_pt_mtc_cyc_cnt_cbr(decoder);
2007 static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
2009 uint64_t timestamp = decoder->cyc_ref_timestamp;
2011 decoder->have_cyc = true;
2013 decoder->cycle_cnt += decoder->packet.payload;
2015 decoder->tot_cyc_cnt += decoder->packet.payload;
2016 decoder->sample_cyc = true;
2018 if (!decoder->cyc_ref_timestamp)
2021 if (decoder->have_calc_cyc_to_tsc)
2022 timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc;
2023 else if (decoder->cbr)
2024 timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc;
2028 if (timestamp < decoder->timestamp)
2029 intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
2030 timestamp, decoder->timestamp);
2032 decoder->timestamp = timestamp;
2034 decoder->timestamp_insn_cnt = 0;
2036 intel_pt_log_to("Setting timestamp", decoder->timestamp);
2039 static void intel_pt_bbp(struct intel_pt_decoder *decoder)
2041 if (decoder->prev_pkt_ctx == INTEL_PT_NO_CTX) {
2042 memset(decoder->state.items.mask, 0, sizeof(decoder->state.items.mask));
2043 decoder->state.items.is_32_bit = false;
2045 decoder->blk_type = decoder->packet.payload;
2046 decoder->blk_type_pos = intel_pt_blk_type_pos(decoder->blk_type);
2047 if (decoder->blk_type == INTEL_PT_GP_REGS)
2048 decoder->state.items.is_32_bit = decoder->packet.count;
2049 if (decoder->blk_type_pos < 0) {
2050 intel_pt_log("WARNING: Unknown block type %u\n",
2052 } else if (decoder->state.items.mask[decoder->blk_type_pos]) {
2053 intel_pt_log("WARNING: Duplicate block type %u\n",
2058 static void intel_pt_bip(struct intel_pt_decoder *decoder)
2060 uint32_t id = decoder->packet.count;
2061 uint32_t bit = 1 << id;
2062 int pos = decoder->blk_type_pos;
2064 if (pos < 0 || id >= INTEL_PT_BLK_ITEM_ID_CNT) {
2065 intel_pt_log("WARNING: Unknown block item %u type %d\n",
2066 id, decoder->blk_type);
2070 if (decoder->state.items.mask[pos] & bit) {
2071 intel_pt_log("WARNING: Duplicate block item %u type %d\n",
2072 id, decoder->blk_type);
2075 decoder->state.items.mask[pos] |= bit;
2076 decoder->state.items.val[pos][id] = decoder->packet.payload;
2079 /* Walk PSB+ packets when already in sync. */
2080 static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
2084 decoder->in_psb = true;
2087 err = intel_pt_get_next_packet(decoder);
2091 switch (decoder->packet.type) {
2092 case INTEL_PT_PSBEND:
2096 case INTEL_PT_TIP_PGD:
2097 case INTEL_PT_TIP_PGE:
2100 case INTEL_PT_TRACESTOP:
2103 case INTEL_PT_PTWRITE:
2104 case INTEL_PT_PTWRITE_IP:
2105 case INTEL_PT_EXSTOP:
2106 case INTEL_PT_EXSTOP_IP:
2107 case INTEL_PT_MWAIT:
2113 case INTEL_PT_BEP_IP:
2115 case INTEL_PT_CFE_IP:
2117 decoder->have_tma = false;
2118 intel_pt_log("ERROR: Unexpected packet\n");
2123 err = intel_pt_overflow(decoder);
2127 intel_pt_calc_tsc_timestamp(decoder);
2131 intel_pt_calc_tma(decoder);
2135 intel_pt_calc_cbr(decoder);
2138 case INTEL_PT_MODE_EXEC:
2139 intel_pt_mode_exec_status(decoder);
2143 intel_pt_set_pip(decoder);
2147 decoder->pge = true;
2148 if (decoder->packet.count) {
2149 intel_pt_set_last_ip(decoder);
2150 decoder->psb_ip = decoder->last_ip;
2154 case INTEL_PT_MODE_TSX:
2155 intel_pt_update_in_tx(decoder);
2159 intel_pt_calc_mtc_timestamp(decoder);
2160 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
2161 decoder->state.type |= INTEL_PT_INSTRUCTION;
2165 intel_pt_calc_cyc_timestamp(decoder);
2176 decoder->in_psb = false;
2181 static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
2185 if (decoder->tx_flags & INTEL_PT_ABORT_TX) {
2186 decoder->tx_flags = 0;
2187 decoder->state.flags &= ~INTEL_PT_IN_TX;
2188 decoder->state.flags |= INTEL_PT_ABORT_TX;
2190 decoder->state.flags |= INTEL_PT_ASYNC;
2194 err = intel_pt_get_next_packet(decoder);
2198 switch (decoder->packet.type) {
2201 case INTEL_PT_TRACESTOP:
2205 case INTEL_PT_MODE_TSX:
2207 case INTEL_PT_PSBEND:
2208 case INTEL_PT_PTWRITE:
2209 case INTEL_PT_PTWRITE_IP:
2210 case INTEL_PT_EXSTOP:
2211 case INTEL_PT_EXSTOP_IP:
2212 case INTEL_PT_MWAIT:
2218 case INTEL_PT_BEP_IP:
2220 case INTEL_PT_CFE_IP:
2222 intel_pt_log("ERROR: Missing TIP after FUP\n");
2223 decoder->pkt_state = INTEL_PT_STATE_ERR3;
2224 decoder->pkt_step = 0;
2228 intel_pt_calc_cbr(decoder);
2232 return intel_pt_overflow(decoder);
2234 case INTEL_PT_TIP_PGD:
2235 decoder->state.from_ip = decoder->ip;
2236 if (decoder->packet.count == 0) {
2237 decoder->state.to_ip = 0;
2239 intel_pt_set_ip(decoder);
2240 decoder->state.to_ip = decoder->ip;
2242 decoder->pge = false;
2243 decoder->continuous_period = false;
2244 decoder->state.type |= INTEL_PT_TRACE_END;
2245 intel_pt_update_nr(decoder);
2248 case INTEL_PT_TIP_PGE:
2249 decoder->pge = true;
2250 intel_pt_log("Omitting PGE ip " x64_fmt "\n",
2252 decoder->state.from_ip = 0;
2253 if (decoder->packet.count == 0) {
2254 decoder->state.to_ip = 0;
2256 intel_pt_set_ip(decoder);
2257 decoder->state.to_ip = decoder->ip;
2259 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
2260 intel_pt_mtc_cyc_cnt_pge(decoder);
2261 intel_pt_set_nr(decoder);
2265 decoder->state.from_ip = decoder->ip;
2266 if (decoder->packet.count == 0) {
2267 decoder->state.to_ip = 0;
2269 intel_pt_set_ip(decoder);
2270 decoder->state.to_ip = decoder->ip;
2272 intel_pt_update_nr(decoder);
2273 intel_pt_sample_iflag_chg(decoder);
2277 intel_pt_update_pip(decoder);
2281 intel_pt_calc_mtc_timestamp(decoder);
2282 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
2283 decoder->state.type |= INTEL_PT_INSTRUCTION;
2287 intel_pt_calc_cyc_timestamp(decoder);
2290 case INTEL_PT_MODE_EXEC:
2291 intel_pt_mode_exec(decoder);
2300 return intel_pt_bug(decoder);
2305 static int intel_pt_resample(struct intel_pt_decoder *decoder)
2307 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2308 decoder->state.type = INTEL_PT_INSTRUCTION;
2309 decoder->state.from_ip = decoder->ip;
2310 decoder->state.to_ip = 0;
2314 struct intel_pt_vm_tsc_info {
2315 struct intel_pt_pkt pip_packet;
2316 struct intel_pt_pkt vmcs_packet;
2317 struct intel_pt_pkt tma_packet;
2318 bool tsc, pip, vmcs, tma, psbend;
2324 /* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
2325 static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2327 struct intel_pt_vm_tsc_info *data = pkt_info->data;
2329 switch (pkt_info->packet.type) {
2332 case INTEL_PT_MODE_EXEC:
2333 case INTEL_PT_MODE_TSX:
2345 data->tma_packet = pkt_info->packet;
2350 data->pip_packet = pkt_info->packet;
2355 data->vmcs_packet = pkt_info->packet;
2359 case INTEL_PT_PSBEND:
2360 data->psbend = true;
2363 case INTEL_PT_TIP_PGE:
2364 case INTEL_PT_PTWRITE:
2365 case INTEL_PT_PTWRITE_IP:
2366 case INTEL_PT_EXSTOP:
2367 case INTEL_PT_EXSTOP_IP:
2368 case INTEL_PT_MWAIT:
2374 case INTEL_PT_BEP_IP:
2378 case INTEL_PT_TIP_PGD:
2381 case INTEL_PT_TRACESTOP:
2383 case INTEL_PT_CFE_IP:
2392 struct intel_pt_ovf_fup_info {
2397 /* Lookahead to detect a FUP packet after OVF */
2398 static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2400 struct intel_pt_ovf_fup_info *data = pkt_info->data;
2402 if (pkt_info->packet.type == INTEL_PT_CYC ||
2403 pkt_info->packet.type == INTEL_PT_MTC ||
2404 pkt_info->packet.type == INTEL_PT_TSC)
2405 return !--(data->max_lookahead);
2406 data->found = pkt_info->packet.type == INTEL_PT_FUP;
2410 static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder *decoder)
2412 struct intel_pt_ovf_fup_info data = {
2413 .max_lookahead = 16,
2417 intel_pt_pkt_lookahead(decoder, intel_pt_ovf_fup_lookahead_cb, &data);
2421 /* Lookahead and get the TMA packet after TSC */
2422 static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
2424 struct intel_pt_vm_tsc_info *data = pkt_info->data;
2426 if (pkt_info->packet.type == INTEL_PT_CYC ||
2427 pkt_info->packet.type == INTEL_PT_MTC)
2428 return !--(data->max_lookahead);
2430 if (pkt_info->packet.type == INTEL_PT_TMA) {
2431 data->tma_packet = pkt_info->packet;
2437 static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder *decoder, uint64_t ctc)
2439 if (decoder->tsc_ctc_mult)
2440 return ctc * decoder->tsc_ctc_mult;
2442 return multdiv(ctc, decoder->tsc_ctc_ratio_n, decoder->tsc_ctc_ratio_d);
2445 static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder *decoder,
2448 uint64_t last_ctc_timestamp,
2452 /* Number of CTC ticks from last_ctc_timestamp to last_mtc */
2453 uint64_t last_mtc_ctc = last_ctc + ctc_delta;
2455 * Number of CTC ticks from there until current TMA packet. We would
2456 * expect last_mtc_ctc to be before ctc, but the TSC packet can slip
2457 * past an MTC, so a sign-extended value is used.
2459 uint64_t delta = (int16_t)((uint16_t)ctc - (uint16_t)last_mtc_ctc);
2460 /* Total CTC ticks from last_ctc_timestamp to current TMA packet */
2461 uint64_t new_ctc_delta = ctc_delta + delta;
2462 uint64_t expected_tsc;
2465 * Convert CTC ticks to TSC ticks, add the starting point
2466 * (last_ctc_timestamp) and the fast counter from the TMA packet.
2468 expected_tsc = last_ctc_timestamp + intel_pt_ctc_to_tsc(decoder, new_ctc_delta) + fc;
2470 if (intel_pt_enable_logging) {
2471 intel_pt_log_x64(last_mtc_ctc);
2472 intel_pt_log_x32(last_ctc);
2473 intel_pt_log_x64(ctc_delta);
2474 intel_pt_log_x64(delta);
2475 intel_pt_log_x32(ctc);
2476 intel_pt_log_x64(new_ctc_delta);
2477 intel_pt_log_x64(last_ctc_timestamp);
2478 intel_pt_log_x32(fc);
2479 intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder, new_ctc_delta));
2480 intel_pt_log_x64(expected_tsc);
2483 return expected_tsc;
2486 static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder *decoder,
2487 struct intel_pt_vm_tsc_info *data)
2489 uint32_t ctc = data->tma_packet.payload;
2490 uint32_t fc = data->tma_packet.count;
2492 return intel_pt_calc_expected_tsc(decoder, ctc, fc,
2493 decoder->ctc_timestamp,
2494 data->ctc_delta, data->last_ctc);
2497 static void intel_pt_translate_vm_tsc(struct intel_pt_decoder *decoder,
2498 struct intel_pt_vmcs_info *vmcs_info)
2500 uint64_t payload = decoder->packet.payload;
2502 /* VMX adds the TSC Offset, so subtract to get host TSC */
2503 decoder->packet.payload -= vmcs_info->tsc_offset;
2504 /* TSC packet has only 7 bytes */
2505 decoder->packet.payload &= SEVEN_BYTES;
2508 * The buffer is mmapped from the data file, so this also updates the
2511 if (!decoder->vm_tm_corr_dry_run)
2512 memcpy((void *)decoder->buf + 1, &decoder->packet.payload, 7);
2514 intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
2515 " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
2516 payload, decoder->packet.payload, vmcs_info->vmcs,
2517 vmcs_info->tsc_offset);
2520 static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder *decoder,
2521 uint64_t tsc_offset)
2523 struct intel_pt_vmcs_info vmcs_info = {
2525 .tsc_offset = tsc_offset
2528 intel_pt_translate_vm_tsc(decoder, &vmcs_info);
2531 static inline bool in_vm(uint64_t pip_payload)
2533 return pip_payload & 1;
2536 static inline bool pip_in_vm(struct intel_pt_pkt *pip_packet)
2538 return pip_packet->payload & 1;
2541 static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info *vmcs_info)
2543 p_log("VMCS: %#" PRIx64 " TSC Offset %#" PRIx64,
2544 vmcs_info->vmcs, vmcs_info->tsc_offset);
2547 static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder *decoder,
2548 struct intel_pt_vm_tsc_info *data)
2550 memset(data, 0, sizeof(*data));
2551 data->ctc_delta = decoder->ctc_delta;
2552 data->last_ctc = decoder->last_ctc;
2553 intel_pt_pkt_lookahead(decoder, intel_pt_vm_psb_lookahead_cb, data);
2554 if (data->tsc && !data->psbend)
2555 p_log("ERROR: PSB without PSBEND");
2556 decoder->in_psb = data->psbend;
2559 static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder *decoder,
2560 struct intel_pt_vm_tsc_info *data,
2561 struct intel_pt_vmcs_info *vmcs_info,
2564 if (!decoder->in_psb) {
2566 p_log("ERROR: First TSC is not in PSB+");
2570 if (pip_in_vm(&data->pip_packet)) { /* Guest */
2571 if (vmcs_info && vmcs_info->tsc_offset) {
2572 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2573 decoder->vm_tm_corr_reliable = true;
2575 p_log("ERROR: First TSC, unknown TSC Offset");
2578 decoder->vm_tm_corr_reliable = true;
2580 } else { /* Host or Guest */
2581 decoder->vm_tm_corr_reliable = false;
2582 if (intel_pt_time_in_range(decoder, host_tsc)) {
2586 if (vmcs_info && vmcs_info->tsc_offset)
2587 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2589 p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
2594 static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder *decoder,
2595 struct intel_pt_vm_tsc_info *data)
2597 struct intel_pt_vmcs_info *vmcs_info;
2598 uint64_t tsc_offset = 0;
2600 bool reliable = true;
2601 uint64_t expected_tsc;
2603 uint64_t ref_timestamp;
2605 bool assign = false;
2606 bool assign_reliable = false;
2608 /* Already have 'data' for the in_psb case */
2609 if (!decoder->in_psb) {
2610 memset(data, 0, sizeof(*data));
2611 data->ctc_delta = decoder->ctc_delta;
2612 data->last_ctc = decoder->last_ctc;
2613 data->max_lookahead = 16;
2614 intel_pt_pkt_lookahead(decoder, intel_pt_tma_lookahead_cb, data);
2617 data->pip_packet.payload = decoder->pip_payload;
2621 /* Calculations depend on having TMA packets */
2623 p_log("ERROR: TSC without TMA");
2627 vmcs = data->vmcs ? data->vmcs_packet.payload : decoder->vmcs;
2628 if (vmcs == NO_VMCS)
2631 vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
2633 ref_timestamp = decoder->timestamp ? decoder->timestamp : decoder->buf_timestamp;
2634 host_tsc = intel_pt_8b_tsc(decoder->packet.payload, ref_timestamp);
2636 if (!decoder->ctc_timestamp) {
2637 intel_pt_vm_tm_corr_first_tsc(decoder, data, vmcs_info, host_tsc);
2641 expected_tsc = intel_pt_expected_tsc(decoder, data);
2643 tsc_offset = host_tsc - expected_tsc;
2645 /* Determine if TSC is from Host or Guest */
2647 if (pip_in_vm(&data->pip_packet)) { /* Guest */
2649 /* PIP NR=1 without VMCS cannot happen */
2650 p_log("ERROR: Missing VMCS");
2651 intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
2652 decoder->vm_tm_corr_reliable = false;
2656 decoder->last_reliable_timestamp = host_tsc;
2657 decoder->vm_tm_corr_reliable = true;
2660 } else { /* Host or Guest */
2661 reliable = false; /* Host/Guest is a guess, so not reliable */
2662 if (decoder->in_psb) {
2664 return; /* Zero TSC Offset, assume Host */
2666 * TSC packet has only 7 bytes of TSC. We have no
2667 * information about the Guest's 8th byte, but it
2668 * doesn't matter because we only need 7 bytes.
2669 * Here, since the 8th byte is unreliable and
2670 * irrelevant, compare only 7 byes.
2673 (tsc_offset & SEVEN_BYTES) ==
2674 (vmcs_info->tsc_offset & SEVEN_BYTES)) {
2675 /* Same TSC Offset as last VMCS, assume Guest */
2680 * Check if the host_tsc is within the expected range.
2681 * Note, we could narrow the range more by looking ahead for
2682 * the next host TSC in the same buffer, but we don't bother to
2683 * do that because this is probably good enough.
2685 if (host_tsc >= expected_tsc && intel_pt_time_in_range(decoder, host_tsc)) {
2686 /* Within expected range for Host TSC, assume Host */
2687 decoder->vm_tm_corr_reliable = false;
2692 guest: /* Assuming Guest */
2694 /* Determine whether to assign TSC Offset */
2695 if (vmcs_info && vmcs_info->vmcs) {
2696 if (vmcs_info->tsc_offset && vmcs_info->reliable) {
2698 } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_reliable &&
2699 decoder->vm_tm_corr_continuous && decoder->vm_tm_corr_same_buf) {
2700 /* Continuous tracing, TSC in a PSB is not a time loss */
2702 assign_reliable = true;
2703 } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_same_buf) {
2705 * Unlikely to be a time loss TSC in a PSB which is not
2706 * at the start of a buffer.
2709 assign_reliable = false;
2713 /* Record VMCS TSC Offset */
2714 if (assign && (vmcs_info->tsc_offset != tsc_offset ||
2715 vmcs_info->reliable != assign_reliable)) {
2716 bool print = vmcs_info->tsc_offset != tsc_offset;
2718 vmcs_info->tsc_offset = tsc_offset;
2719 vmcs_info->reliable = assign_reliable;
2721 intel_pt_print_vmcs_info(vmcs_info);
2724 /* Determine what TSC Offset to use */
2725 if (vmcs_info && vmcs_info->tsc_offset) {
2726 if (!vmcs_info->reliable)
2728 intel_pt_translate_vm_tsc(decoder, vmcs_info);
2732 if (!vmcs_info->error_printed) {
2733 p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64,
2735 vmcs_info->error_printed = true;
2738 if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
2739 p_log("ERROR: Unknown VMCS");
2741 intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
2744 decoder->vm_tm_corr_reliable = reliable;
2747 static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder *decoder)
2749 uint64_t host_tsc = decoder->packet.payload;
2750 uint64_t guest_tsc = decoder->packet.payload;
2751 struct intel_pt_vmcs_info *vmcs_info;
2754 vmcs = decoder->vmcs;
2755 if (vmcs == NO_VMCS)
2758 vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
2761 if (in_vm(decoder->pip_payload)) { /* Guest */
2763 /* PIP NR=1 without VMCS cannot happen */
2764 p_log("ERROR: Missing VMCS");
2769 } else { /* Host or Guest */
2770 if (intel_pt_time_in_range(decoder, host_tsc)) {
2771 /* Within expected range for Host TSC, assume Host */
2777 /* Translate Guest TSC to Host TSC */
2778 host_tsc = ((guest_tsc & SEVEN_BYTES) - vmcs_info->tsc_offset) & SEVEN_BYTES;
2779 host_tsc = intel_pt_8b_tsc(host_tsc, decoder->timestamp);
2780 intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
2781 " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
2782 guest_tsc, host_tsc, vmcs_info->vmcs,
2783 vmcs_info->tsc_offset);
2784 if (!intel_pt_time_in_range(decoder, host_tsc) &&
2785 intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
2786 p_log("Timestamp out of range");
2788 if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
2789 p_log("ERROR: Unknown VMCS");
2790 host_tsc = decoder->timestamp;
2793 decoder->packet.payload = host_tsc;
2795 if (!decoder->vm_tm_corr_dry_run)
2796 memcpy((void *)decoder->buf + 1, &host_tsc, 8);
2799 static int intel_pt_vm_time_correlation(struct intel_pt_decoder *decoder)
2801 struct intel_pt_vm_tsc_info data = { .psbend = false };
2805 if (decoder->in_psb)
2806 intel_pt_vm_tm_corr_psb(decoder, &data);
2809 err = intel_pt_get_next_packet(decoder);
2810 if (err == -ENOLINK)
2815 switch (decoder->packet.type) {
2816 case INTEL_PT_TIP_PGD:
2817 decoder->pge = false;
2818 decoder->vm_tm_corr_continuous = false;
2823 case INTEL_PT_TIP_PGE:
2824 decoder->pge = true;
2828 decoder->in_psb = false;
2830 decoder->pge = intel_pt_ovf_fup_lookahead(decoder);
2831 if (pge != decoder->pge)
2832 intel_pt_log("Surprising PGE change in OVF!");
2834 decoder->vm_tm_corr_continuous = false;
2838 if (decoder->in_psb)
2839 decoder->pge = true;
2842 case INTEL_PT_TRACESTOP:
2843 decoder->pge = false;
2844 decoder->vm_tm_corr_continuous = false;
2845 decoder->have_tma = false;
2849 intel_pt_vm_tm_corr_psb(decoder, &data);
2853 decoder->pip_payload = decoder->packet.payload;
2857 intel_pt_calc_mtc_timestamp(decoder);
2861 intel_pt_vm_tm_corr_tsc(decoder, &data);
2862 intel_pt_calc_tsc_timestamp(decoder);
2863 decoder->vm_tm_corr_same_buf = true;
2864 decoder->vm_tm_corr_continuous = decoder->pge;
2868 intel_pt_calc_tma(decoder);
2872 intel_pt_calc_cyc_timestamp(decoder);
2876 intel_pt_calc_cbr(decoder);
2879 case INTEL_PT_PSBEND:
2880 decoder->in_psb = false;
2881 data.psbend = false;
2885 if (decoder->packet.payload != NO_VMCS)
2886 decoder->vmcs = decoder->packet.payload;
2890 decoder->blk_type = decoder->packet.payload;
2894 if (decoder->blk_type == INTEL_PT_PEBS_BASIC &&
2895 decoder->packet.count == 2)
2896 intel_pt_vm_tm_corr_pebs_tsc(decoder);
2900 case INTEL_PT_BEP_IP:
2901 decoder->blk_type = 0;
2905 case INTEL_PT_CFE_IP:
2907 case INTEL_PT_MODE_EXEC:
2908 case INTEL_PT_MODE_TSX:
2911 case INTEL_PT_PTWRITE_IP:
2912 case INTEL_PT_PTWRITE:
2913 case INTEL_PT_MWAIT:
2915 case INTEL_PT_EXSTOP_IP:
2916 case INTEL_PT_EXSTOP:
2918 case INTEL_PT_BAD: /* Does not happen */
2927 #define HOP_PROCESS 0
2928 #define HOP_IGNORE 1
2929 #define HOP_RETURN 2
2932 static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
2934 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
2935 static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
2939 /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
2940 if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
2941 *err = intel_pt_scan_for_psb(decoder);
2946 switch (decoder->packet.type) {
2950 case INTEL_PT_TIP_PGD:
2951 decoder->pge = false;
2952 if (!decoder->packet.count) {
2953 intel_pt_set_nr(decoder);
2956 intel_pt_set_ip(decoder);
2957 decoder->state.type |= INTEL_PT_TRACE_END;
2958 decoder->state.from_ip = 0;
2959 decoder->state.to_ip = decoder->ip;
2960 intel_pt_update_nr(decoder);
2964 if (!decoder->packet.count) {
2965 intel_pt_set_nr(decoder);
2968 intel_pt_set_ip(decoder);
2969 decoder->state.type = INTEL_PT_INSTRUCTION;
2970 decoder->state.from_ip = decoder->ip;
2971 decoder->state.to_ip = 0;
2972 intel_pt_update_nr(decoder);
2973 intel_pt_sample_iflag_chg(decoder);
2977 if (!decoder->packet.count)
2979 intel_pt_set_ip(decoder);
2980 if (decoder->set_fup_mwait || decoder->set_fup_pwre)
2982 if (!decoder->branch_enable || !decoder->pge)
2985 decoder->state.type = INTEL_PT_INSTRUCTION;
2986 decoder->state.from_ip = decoder->ip;
2987 decoder->state.to_ip = 0;
2988 intel_pt_fup_event(decoder, *no_tip);
2991 intel_pt_fup_event(decoder, *no_tip);
2992 decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
2993 *err = intel_pt_walk_fup_tip(decoder);
2994 if (!*err && decoder->state.to_ip)
2995 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
2999 decoder->state.psb_offset = decoder->pos;
3000 decoder->psb_ip = 0;
3001 decoder->last_ip = 0;
3002 decoder->have_last_ip = true;
3003 *err = intel_pt_walk_psbend(decoder);
3004 if (*err == -EAGAIN)
3008 decoder->state.type = INTEL_PT_PSB_EVT;
3009 if (decoder->psb_ip) {
3010 decoder->state.type |= INTEL_PT_INSTRUCTION;
3011 decoder->ip = decoder->psb_ip;
3013 decoder->state.from_ip = decoder->psb_ip;
3014 decoder->state.to_ip = 0;
3019 case INTEL_PT_TIP_PGE:
3022 case INTEL_PT_MODE_EXEC:
3023 case INTEL_PT_MODE_TSX:
3027 case INTEL_PT_PSBEND:
3029 case INTEL_PT_TRACESTOP:
3033 case INTEL_PT_PTWRITE:
3034 case INTEL_PT_PTWRITE_IP:
3035 case INTEL_PT_EXSTOP:
3036 case INTEL_PT_EXSTOP_IP:
3037 case INTEL_PT_MWAIT:
3043 case INTEL_PT_BEP_IP:
3045 case INTEL_PT_CFE_IP:
3052 struct intel_pt_psb_info {
3053 struct intel_pt_pkt fup_packet;
3058 /* Lookahead and get the FUP packet from PSB+ */
3059 static int intel_pt_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
3061 struct intel_pt_psb_info *data = pkt_info->data;
3063 switch (pkt_info->packet.type) {
3068 case INTEL_PT_MODE_EXEC:
3069 case INTEL_PT_MODE_TSX:
3075 if (data->after_psbend) {
3076 data->after_psbend -= 1;
3077 if (!data->after_psbend)
3083 if (data->after_psbend)
3085 if (data->fup || pkt_info->packet.count == 0)
3087 data->fup_packet = pkt_info->packet;
3091 case INTEL_PT_PSBEND:
3094 /* Keep going to check for a TIP.PGE */
3095 data->after_psbend = 6;
3098 case INTEL_PT_TIP_PGE:
3099 /* Ignore FUP in PSB+ if followed by TIP.PGE */
3100 if (data->after_psbend)
3104 case INTEL_PT_PTWRITE:
3105 case INTEL_PT_PTWRITE_IP:
3106 case INTEL_PT_EXSTOP:
3107 case INTEL_PT_EXSTOP_IP:
3108 case INTEL_PT_MWAIT:
3114 case INTEL_PT_BEP_IP:
3116 case INTEL_PT_CFE_IP:
3118 if (data->after_psbend) {
3119 data->after_psbend -= 1;
3120 if (!data->after_psbend)
3129 case INTEL_PT_TIP_PGD:
3132 case INTEL_PT_TRACESTOP:
3140 static int intel_pt_psb(struct intel_pt_decoder *decoder)
3144 decoder->last_ip = 0;
3145 decoder->psb_ip = 0;
3146 decoder->have_last_ip = true;
3147 intel_pt_clear_stack(&decoder->stack);
3148 err = intel_pt_walk_psbend(decoder);
3151 decoder->state.type = INTEL_PT_PSB_EVT;
3152 decoder->state.from_ip = decoder->psb_ip;
3153 decoder->state.to_ip = 0;
3157 static int intel_pt_fup_in_psb(struct intel_pt_decoder *decoder)
3161 if (decoder->ip != decoder->last_ip) {
3162 err = intel_pt_walk_fup(decoder);
3163 if (!err || err != -EAGAIN)
3167 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3168 err = intel_pt_psb(decoder);
3170 decoder->pkt_state = INTEL_PT_STATE_ERR3;
3177 static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
3179 struct intel_pt_psb_info data = { .fup = false };
3181 if (!decoder->branch_enable)
3184 intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
3188 decoder->packet = data.fup_packet;
3189 intel_pt_set_last_ip(decoder);
3190 decoder->pkt_state = INTEL_PT_STATE_FUP_IN_PSB;
3192 *err = intel_pt_fup_in_psb(decoder);
3197 static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
3199 int last_packet_type = INTEL_PT_PAD;
3200 bool no_tip = false;
3204 err = intel_pt_get_next_packet(decoder);
3209 if (decoder->cyc_threshold) {
3210 if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
3211 decoder->sample_cyc = false;
3212 last_packet_type = decoder->packet.type;
3216 switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
3228 switch (decoder->packet.type) {
3230 if (!decoder->packet.count)
3232 decoder->tnt = decoder->packet;
3233 decoder->pkt_state = INTEL_PT_STATE_TNT;
3234 err = intel_pt_walk_tnt(decoder);
3239 case INTEL_PT_TIP_PGD:
3240 if (decoder->packet.count != 0)
3241 intel_pt_set_last_ip(decoder);
3242 decoder->pkt_state = INTEL_PT_STATE_TIP_PGD;
3243 return intel_pt_walk_tip(decoder);
3245 case INTEL_PT_TIP_PGE: {
3246 decoder->pge = true;
3247 decoder->overflow = false;
3248 intel_pt_mtc_cyc_cnt_pge(decoder);
3249 intel_pt_set_nr(decoder);
3250 if (decoder->packet.count == 0) {
3251 intel_pt_log_at("Skipping zero TIP.PGE",
3255 intel_pt_sample_iflag_chg(decoder);
3256 intel_pt_set_ip(decoder);
3257 decoder->state.from_ip = 0;
3258 decoder->state.to_ip = decoder->ip;
3259 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
3261 * In hop mode, resample to get the to_ip as an
3262 * "instruction" sample.
3265 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
3270 return intel_pt_overflow(decoder);
3273 if (decoder->packet.count != 0)
3274 intel_pt_set_last_ip(decoder);
3275 decoder->pkt_state = INTEL_PT_STATE_TIP;
3276 return intel_pt_walk_tip(decoder);
3279 if (decoder->packet.count == 0) {
3280 intel_pt_log_at("Skipping zero FUP",
3285 intel_pt_set_last_ip(decoder);
3286 if (!decoder->branch_enable || !decoder->pge) {
3287 decoder->ip = decoder->last_ip;
3288 if (intel_pt_fup_event(decoder, no_tip))
3293 if (decoder->set_fup_mwait)
3296 decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP;
3298 decoder->pkt_state = INTEL_PT_STATE_FUP;
3299 err = intel_pt_walk_fup(decoder);
3306 return intel_pt_walk_fup_tip(decoder);
3308 case INTEL_PT_TRACESTOP:
3309 decoder->pge = false;
3310 decoder->continuous_period = false;
3311 intel_pt_clear_tx_flags(decoder);
3312 decoder->have_tma = false;
3316 decoder->state.psb_offset = decoder->pos;
3317 decoder->psb_ip = 0;
3318 if (intel_pt_psb_with_fup(decoder, &err))
3320 err = intel_pt_psb(decoder);
3326 intel_pt_update_pip(decoder);
3330 intel_pt_calc_mtc_timestamp(decoder);
3331 if (decoder->period_type != INTEL_PT_PERIOD_MTC)
3334 * Ensure that there has been an instruction since the
3337 if (!decoder->mtc_insn)
3339 decoder->mtc_insn = false;
3340 /* Ensure that there is a timestamp */
3341 if (!decoder->timestamp)
3343 decoder->state.type = INTEL_PT_INSTRUCTION;
3344 decoder->state.from_ip = decoder->ip;
3345 decoder->state.to_ip = 0;
3346 decoder->mtc_insn = false;
3350 intel_pt_calc_tsc_timestamp(decoder);
3354 intel_pt_calc_tma(decoder);
3358 intel_pt_calc_cyc_timestamp(decoder);
3362 intel_pt_calc_cbr(decoder);
3363 if (decoder->cbr != decoder->cbr_seen) {
3364 decoder->state.type = 0;
3369 case INTEL_PT_MODE_EXEC:
3370 intel_pt_mode_exec(decoder);
3371 err = intel_pt_get_next_packet(decoder);
3374 if (decoder->packet.type == INTEL_PT_FUP) {
3375 decoder->set_fup_mode_exec = true;
3380 case INTEL_PT_MODE_TSX:
3381 /* MODE_TSX need not be followed by FUP */
3382 if (!decoder->pge || decoder->in_psb) {
3383 intel_pt_update_in_tx(decoder);
3386 err = intel_pt_mode_tsx(decoder, &no_tip);
3391 case INTEL_PT_BAD: /* Does not happen */
3392 return intel_pt_bug(decoder);
3394 case INTEL_PT_PSBEND:
3400 case INTEL_PT_PTWRITE_IP:
3401 decoder->fup_ptw_payload = decoder->packet.payload;
3402 err = intel_pt_get_next_packet(decoder);
3405 if (decoder->packet.type == INTEL_PT_FUP) {
3406 decoder->set_fup_ptw = true;
3409 intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
3414 case INTEL_PT_PTWRITE:
3415 decoder->state.type = INTEL_PT_PTW;
3416 decoder->state.from_ip = decoder->ip;
3417 decoder->state.to_ip = 0;
3418 decoder->state.ptw_payload = decoder->packet.payload;
3421 case INTEL_PT_MWAIT:
3422 decoder->fup_mwait_payload = decoder->packet.payload;
3423 decoder->set_fup_mwait = true;
3427 if (decoder->set_fup_mwait) {
3428 decoder->fup_pwre_payload =
3429 decoder->packet.payload;
3430 decoder->set_fup_pwre = true;
3433 decoder->state.type = INTEL_PT_PWR_ENTRY;
3434 decoder->state.from_ip = decoder->ip;
3435 decoder->state.to_ip = 0;
3436 decoder->state.pwrx_payload = decoder->packet.payload;
3439 case INTEL_PT_EXSTOP_IP:
3440 err = intel_pt_get_next_packet(decoder);
3443 if (decoder->packet.type == INTEL_PT_FUP) {
3444 decoder->set_fup_exstop = true;
3447 intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
3452 case INTEL_PT_EXSTOP:
3453 decoder->state.type = INTEL_PT_EX_STOP;
3454 decoder->state.from_ip = decoder->ip;
3455 decoder->state.to_ip = 0;
3459 decoder->state.type = INTEL_PT_PWR_EXIT;
3460 decoder->state.from_ip = decoder->ip;
3461 decoder->state.to_ip = 0;
3462 decoder->state.pwrx_payload = decoder->packet.payload;
3466 intel_pt_bbp(decoder);
3470 intel_pt_bip(decoder);
3474 decoder->state.type = INTEL_PT_BLK_ITEMS;
3475 decoder->state.from_ip = decoder->ip;
3476 decoder->state.to_ip = 0;
3479 case INTEL_PT_BEP_IP:
3480 err = intel_pt_get_next_packet(decoder);
3483 if (decoder->packet.type == INTEL_PT_FUP) {
3484 decoder->set_fup_bep = true;
3487 intel_pt_log_at("ERROR: Missing FUP after BEP",
3493 decoder->fup_cfe_pkt = decoder->packet;
3494 decoder->set_fup_cfe = true;
3495 if (!decoder->pge) {
3496 intel_pt_fup_event(decoder, true);
3501 case INTEL_PT_CFE_IP:
3502 decoder->fup_cfe_pkt = decoder->packet;
3503 err = intel_pt_get_next_packet(decoder);
3506 if (decoder->packet.type == INTEL_PT_FUP) {
3507 decoder->set_fup_cfe_ip = true;
3510 intel_pt_log_at("ERROR: Missing FUP after CFE",
3516 err = intel_pt_evd(decoder);
3522 return intel_pt_bug(decoder);
3527 static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
3529 return decoder->packet.count &&
3530 (decoder->have_last_ip || decoder->packet.count == 3 ||
3531 decoder->packet.count == 6);
3534 /* Walk PSB+ packets to get in sync. */
3535 static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
3539 decoder->in_psb = true;
3542 err = intel_pt_get_next_packet(decoder);
3546 switch (decoder->packet.type) {
3547 case INTEL_PT_TIP_PGD:
3548 decoder->continuous_period = false;
3550 case INTEL_PT_TIP_PGE:
3552 case INTEL_PT_PTWRITE:
3553 case INTEL_PT_PTWRITE_IP:
3554 case INTEL_PT_EXSTOP:
3555 case INTEL_PT_EXSTOP_IP:
3556 case INTEL_PT_MWAIT:
3562 case INTEL_PT_BEP_IP:
3564 case INTEL_PT_CFE_IP:
3566 intel_pt_log("ERROR: Unexpected packet\n");
3571 decoder->pge = true;
3572 if (intel_pt_have_ip(decoder)) {
3573 uint64_t current_ip = decoder->ip;
3575 intel_pt_set_ip(decoder);
3576 decoder->psb_ip = decoder->ip;
3578 intel_pt_log_to("Setting IP",
3584 intel_pt_calc_mtc_timestamp(decoder);
3588 intel_pt_calc_tsc_timestamp(decoder);
3592 intel_pt_calc_tma(decoder);
3596 intel_pt_calc_cyc_timestamp(decoder);
3600 intel_pt_calc_cbr(decoder);
3604 intel_pt_set_pip(decoder);
3607 case INTEL_PT_MODE_EXEC:
3608 intel_pt_mode_exec_status(decoder);
3611 case INTEL_PT_MODE_TSX:
3612 intel_pt_update_in_tx(decoder);
3615 case INTEL_PT_TRACESTOP:
3616 decoder->pge = false;
3617 decoder->continuous_period = false;
3618 intel_pt_clear_tx_flags(decoder);
3622 decoder->have_tma = false;
3623 intel_pt_log("ERROR: Unexpected packet\n");
3625 decoder->pkt_state = INTEL_PT_STATE_ERR4;
3627 decoder->pkt_state = INTEL_PT_STATE_ERR3;
3631 case INTEL_PT_BAD: /* Does not happen */
3632 err = intel_pt_bug(decoder);
3636 err = intel_pt_overflow(decoder);
3639 case INTEL_PT_PSBEND:
3652 decoder->in_psb = false;
3657 static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
3662 err = intel_pt_get_next_packet(decoder);
3666 switch (decoder->packet.type) {
3667 case INTEL_PT_TIP_PGD:
3668 decoder->continuous_period = false;
3669 decoder->pge = false;
3670 if (intel_pt_have_ip(decoder))
3671 intel_pt_set_ip(decoder);
3674 decoder->state.type |= INTEL_PT_TRACE_END;
3677 case INTEL_PT_TIP_PGE:
3678 decoder->pge = true;
3679 intel_pt_mtc_cyc_cnt_pge(decoder);
3680 if (intel_pt_have_ip(decoder))
3681 intel_pt_set_ip(decoder);
3684 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
3688 decoder->pge = true;
3689 if (intel_pt_have_ip(decoder))
3690 intel_pt_set_ip(decoder);
3696 if (intel_pt_have_ip(decoder))
3697 intel_pt_set_ip(decoder);
3703 intel_pt_calc_mtc_timestamp(decoder);
3707 intel_pt_calc_tsc_timestamp(decoder);
3711 intel_pt_calc_tma(decoder);
3715 intel_pt_calc_cyc_timestamp(decoder);
3719 intel_pt_calc_cbr(decoder);
3723 intel_pt_set_pip(decoder);
3726 case INTEL_PT_MODE_EXEC:
3727 intel_pt_mode_exec_status(decoder);
3730 case INTEL_PT_MODE_TSX:
3731 intel_pt_update_in_tx(decoder);
3735 return intel_pt_overflow(decoder);
3737 case INTEL_PT_BAD: /* Does not happen */
3738 return intel_pt_bug(decoder);
3740 case INTEL_PT_TRACESTOP:
3741 decoder->pge = false;
3742 decoder->continuous_period = false;
3743 intel_pt_clear_tx_flags(decoder);
3744 decoder->have_tma = false;
3748 decoder->state.psb_offset = decoder->pos;
3749 decoder->psb_ip = 0;
3750 decoder->last_ip = 0;
3751 decoder->have_last_ip = true;
3752 intel_pt_clear_stack(&decoder->stack);
3753 err = intel_pt_walk_psb(decoder);
3756 decoder->state.type = INTEL_PT_PSB_EVT;
3757 decoder->state.from_ip = decoder->psb_ip;
3758 decoder->state.to_ip = 0;
3762 case INTEL_PT_PSBEND:
3766 case INTEL_PT_PTWRITE:
3767 case INTEL_PT_PTWRITE_IP:
3768 case INTEL_PT_EXSTOP:
3769 case INTEL_PT_EXSTOP_IP:
3770 case INTEL_PT_MWAIT:
3776 case INTEL_PT_BEP_IP:
3778 case INTEL_PT_CFE_IP:
3786 static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
3790 intel_pt_clear_fup_event(decoder);
3791 decoder->overflow = false;
3793 if (!decoder->branch_enable) {
3794 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3795 decoder->state.type = 0; /* Do not have a sample */
3799 intel_pt_log("Scanning for full IP\n");
3800 err = intel_pt_walk_to_ip(decoder);
3801 if (err || ((decoder->state.type & INTEL_PT_PSB_EVT) && !decoder->ip))
3804 /* In hop mode, resample to get the to_ip as an "instruction" sample */
3806 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
3808 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3810 decoder->state.from_ip = 0;
3811 decoder->state.to_ip = decoder->ip;
3812 intel_pt_log_to("Setting IP", decoder->ip);
3817 static int intel_pt_part_psb(struct intel_pt_decoder *decoder)
3819 const unsigned char *end = decoder->buf + decoder->len;
3822 for (i = INTEL_PT_PSB_LEN - 1; i; i--) {
3823 if (i > decoder->len)
3825 if (!memcmp(end - i, INTEL_PT_PSB_STR, i))
3831 static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb)
3833 size_t rest_psb = INTEL_PT_PSB_LEN - part_psb;
3834 const char *psb = INTEL_PT_PSB_STR;
3836 if (rest_psb > decoder->len ||
3837 memcmp(decoder->buf, psb + part_psb, rest_psb))
3843 static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
3848 decoder->pos += decoder->len;
3851 ret = intel_pt_get_next_data(decoder, false);
3855 rest_psb = intel_pt_rest_psb(decoder, part_psb);
3859 decoder->pos -= part_psb;
3860 decoder->next_buf = decoder->buf + rest_psb;
3861 decoder->next_len = decoder->len - rest_psb;
3862 memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
3863 decoder->buf = decoder->temp_buf;
3864 decoder->len = INTEL_PT_PSB_LEN;
3869 static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
3871 unsigned char *next;
3874 intel_pt_log("Scanning for PSB\n");
3876 if (!decoder->len) {
3877 ret = intel_pt_get_next_data(decoder, false);
3882 next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR,
3887 part_psb = intel_pt_part_psb(decoder);
3889 ret = intel_pt_get_split_psb(decoder, part_psb);
3893 decoder->pos += decoder->len;
3899 decoder->pkt_step = next - decoder->buf;
3900 return intel_pt_get_next_packet(decoder);
3904 static int intel_pt_sync(struct intel_pt_decoder *decoder)
3908 decoder->pge = false;
3909 decoder->continuous_period = false;
3910 decoder->have_last_ip = false;
3911 decoder->last_ip = 0;
3912 decoder->psb_ip = 0;
3914 intel_pt_clear_stack(&decoder->stack);
3916 err = intel_pt_scan_for_psb(decoder);
3920 if (decoder->vm_time_correlation) {
3921 decoder->in_psb = true;
3922 if (!decoder->timestamp)
3923 decoder->timestamp = 1;
3924 decoder->state.type = 0;
3925 decoder->pkt_state = INTEL_PT_STATE_VM_TIME_CORRELATION;
3929 decoder->have_last_ip = true;
3930 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3932 err = intel_pt_walk_psb(decoder);
3936 decoder->state.type = INTEL_PT_PSB_EVT; /* Only PSB sample */
3937 decoder->state.from_ip = decoder->psb_ip;
3938 decoder->state.to_ip = 0;
3942 * In hop mode, resample to get the PSB FUP ip as an
3943 * "instruction" sample.
3946 decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
3948 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
3954 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
3956 uint64_t est = decoder->sample_insn_cnt << 1;
3958 if (!decoder->cbr || !decoder->max_non_turbo_ratio)
3961 est *= decoder->max_non_turbo_ratio;
3962 est /= decoder->cbr;
3964 return decoder->sample_timestamp + est;
3967 const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
3972 decoder->state.type = INTEL_PT_BRANCH;
3973 decoder->state.flags = 0;
3975 switch (decoder->pkt_state) {
3976 case INTEL_PT_STATE_NO_PSB:
3977 err = intel_pt_sync(decoder);
3979 case INTEL_PT_STATE_NO_IP:
3980 decoder->have_last_ip = false;
3981 decoder->last_ip = 0;
3984 case INTEL_PT_STATE_ERR_RESYNC:
3985 err = intel_pt_sync_ip(decoder);
3987 case INTEL_PT_STATE_IN_SYNC:
3988 err = intel_pt_walk_trace(decoder);
3990 case INTEL_PT_STATE_TNT:
3991 case INTEL_PT_STATE_TNT_CONT:
3992 err = intel_pt_walk_tnt(decoder);
3994 err = intel_pt_walk_trace(decoder);
3996 case INTEL_PT_STATE_TIP:
3997 case INTEL_PT_STATE_TIP_PGD:
3998 err = intel_pt_walk_tip(decoder);
4000 case INTEL_PT_STATE_FUP:
4001 err = intel_pt_walk_fup(decoder);
4003 err = intel_pt_walk_fup_tip(decoder);
4005 case INTEL_PT_STATE_FUP_NO_TIP:
4006 err = intel_pt_walk_fup(decoder);
4008 err = intel_pt_walk_trace(decoder);
4010 case INTEL_PT_STATE_FUP_IN_PSB:
4011 err = intel_pt_fup_in_psb(decoder);
4013 case INTEL_PT_STATE_RESAMPLE:
4014 err = intel_pt_resample(decoder);
4016 case INTEL_PT_STATE_VM_TIME_CORRELATION:
4017 err = intel_pt_vm_time_correlation(decoder);
4020 err = intel_pt_bug(decoder);
4023 } while (err == -ENOLINK);
4026 decoder->state.err = intel_pt_ext_err(err);
4027 if (err != -EOVERFLOW)
4028 decoder->state.from_ip = decoder->ip;
4029 intel_pt_update_sample_time(decoder);
4030 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
4031 intel_pt_set_nr(decoder);
4033 decoder->state.err = 0;
4034 if (decoder->cbr != decoder->cbr_seen) {
4035 decoder->cbr_seen = decoder->cbr;
4036 if (!decoder->state.type) {
4037 decoder->state.from_ip = decoder->ip;
4038 decoder->state.to_ip = 0;
4040 decoder->state.type |= INTEL_PT_CBR_CHG;
4041 decoder->state.cbr_payload = decoder->cbr_payload;
4042 decoder->state.cbr = decoder->cbr;
4044 if (intel_pt_sample_time(decoder->pkt_state)) {
4045 intel_pt_update_sample_time(decoder);
4046 if (decoder->sample_cyc) {
4047 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
4048 decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
4049 decoder->sample_cyc = false;
4053 * When using only TSC/MTC to compute cycles, IPC can be
4054 * sampled as soon as the cycle count changes.
4056 if (!decoder->have_cyc)
4057 decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
4060 /* Let PSB event always have TSC timestamp */
4061 if ((decoder->state.type & INTEL_PT_PSB_EVT) && decoder->tsc_timestamp)
4062 decoder->sample_timestamp = decoder->tsc_timestamp;
4064 decoder->state.from_nr = decoder->nr;
4065 decoder->state.to_nr = decoder->next_nr;
4066 decoder->nr = decoder->next_nr;
4068 decoder->state.timestamp = decoder->sample_timestamp;
4069 decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
4070 decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
4071 decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
4073 return &decoder->state;
4077 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
4078 * @buf: pointer to buffer pointer
4079 * @len: size of buffer
4081 * Updates the buffer pointer to point to the start of the next PSB packet if
4082 * there is one, otherwise the buffer pointer is unchanged. If @buf is updated,
4083 * @len is adjusted accordingly.
4085 * Return: %true if a PSB packet is found, %false otherwise.
4087 static bool intel_pt_next_psb(unsigned char **buf, size_t *len)
4089 unsigned char *next;
4091 next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
4093 *len -= next - *buf;
4101 * intel_pt_step_psb - move buffer pointer to the start of the following PSB
4103 * @buf: pointer to buffer pointer
4104 * @len: size of buffer
4106 * Updates the buffer pointer to point to the start of the following PSB packet
4107 * (skipping the PSB at @buf itself) if there is one, otherwise the buffer
4108 * pointer is unchanged. If @buf is updated, @len is adjusted accordingly.
4110 * Return: %true if a PSB packet is found, %false otherwise.
4112 static bool intel_pt_step_psb(unsigned char **buf, size_t *len)
4114 unsigned char *next;
4119 next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
4121 *len -= next - *buf;
4129 * intel_pt_last_psb - find the last PSB packet in a buffer.
4131 * @len: size of buffer
4133 * This function finds the last PSB in a buffer.
4135 * Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
4137 static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
4139 const char *n = INTEL_PT_PSB_STR;
4143 if (len < INTEL_PT_PSB_LEN)
4146 k = len - INTEL_PT_PSB_LEN + 1;
4148 p = memrchr(buf, n[0], k);
4151 if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
4160 * intel_pt_next_tsc - find and return next TSC.
4162 * @len: size of buffer
4163 * @tsc: TSC value returned
4164 * @rem: returns remaining size when TSC is found
4166 * Find a TSC packet in @buf and return the TSC value. This function assumes
4167 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
4168 * PSBEND packet is found.
4170 * Return: %true if TSC is found, false otherwise.
4172 static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
4175 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
4176 struct intel_pt_pkt packet;
4180 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
4183 if (packet.type == INTEL_PT_TSC) {
4184 *tsc = packet.payload;
4188 if (packet.type == INTEL_PT_PSBEND)
4197 * intel_pt_tsc_cmp - compare 7-byte TSCs.
4198 * @tsc1: first TSC to compare
4199 * @tsc2: second TSC to compare
4201 * This function compares 7-byte TSC values allowing for the possibility that
4202 * TSC wrapped around. Generally it is not possible to know if TSC has wrapped
4203 * around so for that purpose this function assumes the absolute difference is
4204 * less than half the maximum difference.
4206 * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
4209 static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
4211 const uint64_t halfway = (1ULL << 55);
4217 if (tsc2 - tsc1 < halfway)
4222 if (tsc1 - tsc2 < halfway)
4229 #define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
4232 * adj_for_padding - adjust overlap to account for padding.
4233 * @buf_b: second buffer
4234 * @buf_a: first buffer
4235 * @len_a: size of first buffer
4237 * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
4240 * Return: A pointer into @buf_b from where non-overlapped data starts
4242 static unsigned char *adj_for_padding(unsigned char *buf_b,
4243 unsigned char *buf_a, size_t len_a)
4245 unsigned char *p = buf_b - MAX_PADDING;
4246 unsigned char *q = buf_a + len_a - MAX_PADDING;
4249 for (i = MAX_PADDING; i; i--, p++, q++) {
4258 * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
4260 * @buf_a: first buffer
4261 * @len_a: size of first buffer
4262 * @buf_b: second buffer
4263 * @len_b: size of second buffer
4264 * @consecutive: returns true if there is data in buf_b that is consecutive
4266 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
4268 * If the trace contains TSC we can look at the last TSC of @buf_a and the
4269 * first TSC of @buf_b in order to determine if the buffers overlap, and then
4270 * walk forward in @buf_b until a later TSC is found. A precondition is that
4271 * @buf_a and @buf_b are positioned at a PSB.
4273 * Return: A pointer into @buf_b from where non-overlapped data starts, or
4274 * @buf_b + @len_b if there is no non-overlapped data.
4276 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
4278 unsigned char *buf_b,
4279 size_t len_b, bool *consecutive,
4282 uint64_t tsc_a, tsc_b;
4284 size_t len, rem_a, rem_b;
4286 p = intel_pt_last_psb(buf_a, len_a);
4288 return buf_b; /* No PSB in buf_a => no overlap */
4290 len = len_a - (p - buf_a);
4291 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
4292 /* The last PSB+ in buf_a is incomplete, so go back one more */
4294 p = intel_pt_last_psb(buf_a, len_a);
4296 return buf_b; /* No full PSB+ => assume no overlap */
4297 len = len_a - (p - buf_a);
4298 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
4299 return buf_b; /* No TSC in buf_a => assume no overlap */
4303 /* Ignore PSB+ with no TSC */
4304 if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
4305 int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
4307 /* Same TSC, so buffers are consecutive */
4308 if (!cmp && rem_b >= rem_a) {
4309 unsigned char *start;
4311 *consecutive = true;
4312 start = buf_b + len_b - (rem_b - rem_a);
4313 return adj_for_padding(start, buf_a, len_a);
4315 if (cmp < 0 && !ooo_tsc)
4316 return buf_b; /* tsc_a < tsc_b => no overlap */
4319 if (!intel_pt_step_psb(&buf_b, &len_b))
4320 return buf_b + len_b; /* No PSB in buf_b => no data */
4325 * intel_pt_find_overlap - determine start of non-overlapped trace data.
4326 * @buf_a: first buffer
4327 * @len_a: size of first buffer
4328 * @buf_b: second buffer
4329 * @len_b: size of second buffer
4330 * @have_tsc: can use TSC packets to detect overlap
4331 * @consecutive: returns true if there is data in buf_b that is consecutive
4333 * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
4335 * When trace samples or snapshots are recorded there is the possibility that
4336 * the data overlaps. Note that, for the purposes of decoding, data is only
4337 * useful if it begins with a PSB packet.
4339 * Return: A pointer into @buf_b from where non-overlapped data starts, or
4340 * @buf_b + @len_b if there is no non-overlapped data.
4342 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
4343 unsigned char *buf_b, size_t len_b,
4344 bool have_tsc, bool *consecutive,
4347 unsigned char *found;
4349 /* Buffer 'b' must start at PSB so throw away everything before that */
4350 if (!intel_pt_next_psb(&buf_b, &len_b))
4351 return buf_b + len_b; /* No PSB */
4353 if (!intel_pt_next_psb(&buf_a, &len_a))
4354 return buf_b; /* No overlap */
4357 found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
4358 consecutive, ooo_tsc);
4364 * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
4365 * we can ignore the first part of buffer 'a'.
4367 while (len_b < len_a) {
4368 if (!intel_pt_step_psb(&buf_a, &len_a))
4369 return buf_b; /* No overlap */
4372 /* Now len_b >= len_a */
4374 /* Potential overlap so check the bytes */
4375 found = memmem(buf_a, len_a, buf_b, len_a);
4377 *consecutive = true;
4378 return adj_for_padding(buf_b + len_a, buf_a, len_a);
4381 /* Try again at next PSB in buffer 'a' */
4382 if (!intel_pt_step_psb(&buf_a, &len_a))
4383 return buf_b; /* No overlap */
4388 * struct fast_forward_data - data used by intel_pt_ff_cb().
4389 * @timestamp: timestamp to fast forward towards
4390 * @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
4391 * the fast forward timestamp.
4393 struct fast_forward_data {
4395 uint64_t buf_timestamp;
4399 * intel_pt_ff_cb - fast forward lookahead callback.
4400 * @buffer: Intel PT trace buffer
4401 * @data: opaque pointer to fast forward data (struct fast_forward_data)
4403 * Determine if @buffer trace is past the fast forward timestamp.
4405 * Return: 1 (stop lookahead) if @buffer trace is past the fast forward
4406 * timestamp, and 0 otherwise.
4408 static int intel_pt_ff_cb(struct intel_pt_buffer *buffer, void *data)
4410 struct fast_forward_data *d = data;
4416 buf = (unsigned char *)buffer->buf;
4419 if (!intel_pt_next_psb(&buf, &len) ||
4420 !intel_pt_next_tsc(buf, len, &tsc, &rem))
4423 tsc = intel_pt_8b_tsc(tsc, buffer->ref_timestamp);
4425 intel_pt_log("Buffer 1st timestamp " x64_fmt " ref timestamp " x64_fmt "\n",
4426 tsc, buffer->ref_timestamp);
4429 * If the buffer contains a timestamp earlier that the fast forward
4430 * timestamp, then record it, else stop.
4432 if (tsc < d->timestamp)
4433 d->buf_timestamp = buffer->ref_timestamp;
4441 * intel_pt_fast_forward - reposition decoder forwards.
4442 * @decoder: Intel PT decoder
4443 * @timestamp: timestamp to fast forward towards
4445 * Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
4447 * Return: 0 on success or negative error code on failure.
4449 int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp)
4451 struct fast_forward_data d = { .timestamp = timestamp };
4456 intel_pt_log("Fast forward towards timestamp " x64_fmt "\n", timestamp);
4458 /* Find buffer timestamp of buffer to fast forward to */
4459 err = decoder->lookahead(decoder->data, intel_pt_ff_cb, &d);
4463 /* Walk to buffer with same buffer timestamp */
4464 if (d.buf_timestamp) {
4466 decoder->pos += decoder->len;
4468 err = intel_pt_get_next_data(decoder, true);
4469 /* -ENOLINK means non-consecutive trace */
4470 if (err && err != -ENOLINK)
4472 } while (decoder->buf_timestamp != d.buf_timestamp);
4478 buf = (unsigned char *)decoder->buf;
4481 if (!intel_pt_next_psb(&buf, &len))
4485 * Walk PSBs while the PSB timestamp is less than the fast forward
4492 if (!intel_pt_next_tsc(buf, len, &tsc, &rem))
4494 tsc = intel_pt_8b_tsc(tsc, decoder->buf_timestamp);
4496 * A TSC packet can slip past MTC packets but, after fast
4497 * forward, decoding starts at the TSC timestamp. That means
4498 * the timestamps may not be exactly the same as the timestamps
4499 * that would have been decoded without fast forward.
4501 if (tsc < timestamp) {
4502 intel_pt_log("Fast forward to next PSB timestamp " x64_fmt "\n", tsc);
4503 decoder->pos += decoder->len - len;
4506 intel_pt_reposition(decoder);
4510 } while (intel_pt_step_psb(&buf, &len));