1 // SPDX-License-Identifier: GPL-2.0
3 * Arm Statistical Profiling Extensions (SPE) support
4 * Copyright (c) 2017-2018, Arm Ltd.
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
28 #include "thread-stack.h"
31 #include "util/synthetic-events.h"
34 #include "arm-spe-decoder/arm-spe-decoder.h"
35 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
37 #include "../../arch/arm64/include/asm/cputype.h"
38 #define MAX_TIMESTAMP (~0ULL)
41 struct auxtrace auxtrace;
42 struct auxtrace_queues queues;
43 struct auxtrace_heap heap;
44 struct itrace_synth_opts synth_opts;
46 struct perf_session *session;
47 struct machine *machine;
50 struct perf_tsc_conversion tc;
60 u8 sample_remote_access;
62 u8 sample_instructions;
63 u64 instructions_sample_period;
78 unsigned long num_events;
79 u8 use_ctx_pkt_for_pid;
87 struct arm_spe_queue {
89 unsigned int queue_nr;
90 struct auxtrace_buffer *buffer;
91 struct auxtrace_buffer *old_buffer;
92 union perf_event *event_buf;
98 struct arm_spe_decoder *decoder;
101 struct thread *thread;
102 u64 period_instructions;
106 struct data_source_handle {
107 const struct midr_range *midr_ranges;
108 void (*ds_synth)(const struct arm_spe_record *record,
109 union perf_mem_data_src *data_src);
112 #define DS(range, func) \
114 .midr_ranges = range, \
115 .ds_synth = arm_spe__synth_##func, \
118 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
119 unsigned char *buf, size_t len)
121 struct arm_spe_pkt packet;
124 char desc[ARM_SPE_PKT_DESC_MAX];
125 const char *color = PERF_COLOR_BLUE;
127 color_fprintf(stdout, color,
128 ". ... ARM SPE data: size %#zx bytes\n",
132 ret = arm_spe_get_packet(buf, len, &packet);
138 color_fprintf(stdout, color, " %08zx: ", pos);
139 for (i = 0; i < pkt_len; i++)
140 color_fprintf(stdout, color, " %02x", buf[i]);
142 color_fprintf(stdout, color, " ");
144 ret = arm_spe_pkt_desc(&packet, desc,
145 ARM_SPE_PKT_DESC_MAX);
147 color_fprintf(stdout, color, " %s\n", desc);
149 color_fprintf(stdout, color, " Bad packet!\n");
157 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
161 arm_spe_dump(spe, buf, len);
164 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
166 struct arm_spe_queue *speq = data;
167 struct auxtrace_buffer *buffer = speq->buffer;
168 struct auxtrace_buffer *old_buffer = speq->old_buffer;
169 struct auxtrace_queue *queue;
171 queue = &speq->spe->queues.queue_array[speq->queue_nr];
173 buffer = auxtrace_buffer__next(queue, buffer);
174 /* If no more data, drop the previous auxtrace_buffer and return */
177 auxtrace_buffer__drop_data(old_buffer);
182 speq->buffer = buffer;
184 /* If the aux_buffer doesn't have data associated, try to load it */
186 /* get the file desc associated with the perf data file */
187 int fd = perf_data__fd(speq->spe->session->data);
189 buffer->data = auxtrace_buffer__get_data(buffer, fd);
194 b->len = buffer->size;
195 b->buf = buffer->data;
199 auxtrace_buffer__drop_data(old_buffer);
200 speq->old_buffer = buffer;
202 auxtrace_buffer__drop_data(buffer);
203 return arm_spe_get_trace(b, data);
209 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
210 unsigned int queue_nr)
212 struct arm_spe_params params = { .get_trace = 0, };
213 struct arm_spe_queue *speq;
215 speq = zalloc(sizeof(*speq));
219 speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
220 if (!speq->event_buf)
224 speq->queue_nr = queue_nr;
228 speq->period_instructions = 0;
231 params.get_trace = arm_spe_get_trace;
234 /* create new decoder */
235 speq->decoder = arm_spe_decoder_new(¶ms);
242 zfree(&speq->event_buf);
248 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
250 return ip >= spe->kernel_start ?
251 PERF_RECORD_MISC_KERNEL :
252 PERF_RECORD_MISC_USER;
255 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
256 struct auxtrace_queue *queue)
258 struct arm_spe_queue *speq = queue->priv;
261 tid = machine__get_current_tid(spe->machine, speq->cpu);
264 thread__zput(speq->thread);
266 speq->tid = queue->tid;
268 if ((!speq->thread) && (speq->tid != -1)) {
269 speq->thread = machine__find_thread(spe->machine, -1,
274 speq->pid = thread__pid(speq->thread);
275 if (queue->cpu == -1)
276 speq->cpu = thread__cpu(speq->thread);
280 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
282 struct arm_spe *spe = speq->spe;
283 int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
288 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
293 static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, u64 cpu)
300 for (i = 0; i < spe->metadata_nr_cpu; i++)
301 if (spe->metadata[i][ARM_SPE_CPU] == cpu)
302 return spe->metadata[i];
307 static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *record)
309 struct simd_flags simd_flags = {};
311 if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
312 simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
314 if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
315 simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
317 if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
318 simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
320 if (record->type & ARM_SPE_SVE_EMPTY_PRED)
321 simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
326 static void arm_spe_prep_sample(struct arm_spe *spe,
327 struct arm_spe_queue *speq,
328 union perf_event *event,
329 struct perf_sample *sample)
331 struct arm_spe_record *record = &speq->decoder->record;
333 if (!spe->timeless_decoding)
334 sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
336 sample->ip = record->from_ip;
337 sample->cpumode = arm_spe_cpumode(spe, sample->ip);
338 sample->pid = speq->pid;
339 sample->tid = speq->tid;
341 sample->cpu = speq->cpu;
342 sample->simd_flags = arm_spe__synth_simd_flags(record);
344 event->sample.header.type = PERF_RECORD_SAMPLE;
345 event->sample.header.misc = sample->cpumode;
346 event->sample.header.size = sizeof(struct perf_event_header);
349 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
351 event->header.size = perf_event__sample_event_size(sample, type, 0);
352 return perf_event__synthesize_sample(event, type, 0, sample);
356 arm_spe_deliver_synth_event(struct arm_spe *spe,
357 struct arm_spe_queue *speq __maybe_unused,
358 union perf_event *event,
359 struct perf_sample *sample)
363 if (spe->synth_opts.inject) {
364 ret = arm_spe__inject_event(event, sample, spe->sample_type);
369 ret = perf_session__deliver_synth_event(spe->session, event, sample);
371 pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
376 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
377 u64 spe_events_id, u64 data_src)
379 struct arm_spe *spe = speq->spe;
380 struct arm_spe_record *record = &speq->decoder->record;
381 union perf_event *event = speq->event_buf;
382 struct perf_sample sample = { .ip = 0, };
384 arm_spe_prep_sample(spe, speq, event, &sample);
386 sample.id = spe_events_id;
387 sample.stream_id = spe_events_id;
388 sample.addr = record->virt_addr;
389 sample.phys_addr = record->phys_addr;
390 sample.data_src = data_src;
391 sample.weight = record->latency;
393 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
396 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
399 struct arm_spe *spe = speq->spe;
400 struct arm_spe_record *record = &speq->decoder->record;
401 union perf_event *event = speq->event_buf;
402 struct perf_sample sample = { .ip = 0, };
404 arm_spe_prep_sample(spe, speq, event, &sample);
406 sample.id = spe_events_id;
407 sample.stream_id = spe_events_id;
408 sample.addr = record->to_ip;
409 sample.weight = record->latency;
410 sample.flags = speq->flags;
412 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
415 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
416 u64 spe_events_id, u64 data_src)
418 struct arm_spe *spe = speq->spe;
419 struct arm_spe_record *record = &speq->decoder->record;
420 union perf_event *event = speq->event_buf;
421 struct perf_sample sample = { .ip = 0, };
424 * Handles perf instruction sampling period.
426 speq->period_instructions++;
427 if (speq->period_instructions < spe->instructions_sample_period)
429 speq->period_instructions = 0;
431 arm_spe_prep_sample(spe, speq, event, &sample);
433 sample.id = spe_events_id;
434 sample.stream_id = spe_events_id;
435 sample.addr = record->to_ip;
436 sample.phys_addr = record->phys_addr;
437 sample.data_src = data_src;
438 sample.period = spe->instructions_sample_period;
439 sample.weight = record->latency;
440 sample.flags = speq->flags;
442 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
445 static const struct midr_range common_ds_encoding_cpus[] = {
446 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
447 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
448 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
449 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
450 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
451 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
452 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
453 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
454 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
458 static const struct midr_range ampereone_ds_encoding_cpus[] = {
459 MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
463 static void arm_spe__sample_flags(struct arm_spe_queue *speq)
465 const struct arm_spe_record *record = &speq->decoder->record;
468 if (record->op & ARM_SPE_OP_BRANCH_ERET) {
469 speq->flags = PERF_IP_FLAG_BRANCH;
471 if (record->type & ARM_SPE_BRANCH_MISS)
472 speq->flags |= PERF_IP_FLAG_BRANCH_MISS;
476 static void arm_spe__synth_data_source_common(const struct arm_spe_record *record,
477 union perf_mem_data_src *data_src)
480 * Even though four levels of cache hierarchy are possible, no known
481 * production Neoverse systems currently include more than three levels
482 * so for the time being we assume three exist. If a production system
483 * is built with four the this function would have to be changed to
484 * detect the number of levels for reporting.
488 * We have no data on the hit level or data source for stores in the
489 * Neoverse SPE records.
491 if (record->op & ARM_SPE_OP_ST) {
492 data_src->mem_lvl = PERF_MEM_LVL_NA;
493 data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
494 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
498 switch (record->source) {
499 case ARM_SPE_COMMON_DS_L1D:
500 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
501 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
502 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
504 case ARM_SPE_COMMON_DS_L2:
505 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
506 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
507 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
509 case ARM_SPE_COMMON_DS_PEER_CORE:
510 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
511 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
512 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
515 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
516 * transfer, so set SNOOPX_PEER
518 case ARM_SPE_COMMON_DS_LOCAL_CLUSTER:
519 case ARM_SPE_COMMON_DS_PEER_CLUSTER:
520 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
521 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
522 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
525 * System cache is assumed to be L3
527 case ARM_SPE_COMMON_DS_SYS_CACHE:
528 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
529 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
530 data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
533 * We don't know what level it hit in, except it came from the other
536 case ARM_SPE_COMMON_DS_REMOTE:
537 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
538 data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
539 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
540 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
542 case ARM_SPE_COMMON_DS_DRAM:
543 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
544 data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
545 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
553 * Source is IMPDEF. Here we convert the source code used on AmpereOne cores
554 * to the common (Neoverse, Cortex) to avoid duplicating the decoding code.
556 static void arm_spe__synth_data_source_ampereone(const struct arm_spe_record *record,
557 union perf_mem_data_src *data_src)
559 struct arm_spe_record common_record;
561 switch (record->source) {
562 case ARM_SPE_AMPEREONE_LOCAL_CHIP_CACHE_OR_DEVICE:
563 common_record.source = ARM_SPE_COMMON_DS_PEER_CORE;
565 case ARM_SPE_AMPEREONE_SLC:
566 common_record.source = ARM_SPE_COMMON_DS_SYS_CACHE;
568 case ARM_SPE_AMPEREONE_REMOTE_CHIP_CACHE:
569 common_record.source = ARM_SPE_COMMON_DS_REMOTE;
571 case ARM_SPE_AMPEREONE_DDR:
572 common_record.source = ARM_SPE_COMMON_DS_DRAM;
574 case ARM_SPE_AMPEREONE_L1D:
575 common_record.source = ARM_SPE_COMMON_DS_L1D;
577 case ARM_SPE_AMPEREONE_L2D:
578 common_record.source = ARM_SPE_COMMON_DS_L2;
581 pr_warning_once("AmpereOne: Unknown data source (0x%x)\n",
586 common_record.op = record->op;
587 arm_spe__synth_data_source_common(&common_record, data_src);
590 static const struct data_source_handle data_source_handles[] = {
591 DS(common_ds_encoding_cpus, data_source_common),
592 DS(ampereone_ds_encoding_cpus, data_source_ampereone),
595 static void arm_spe__synth_memory_level(const struct arm_spe_record *record,
596 union perf_mem_data_src *data_src)
598 if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
599 data_src->mem_lvl = PERF_MEM_LVL_L3;
601 if (record->type & ARM_SPE_LLC_MISS)
602 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
604 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
605 } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
606 data_src->mem_lvl = PERF_MEM_LVL_L1;
608 if (record->type & ARM_SPE_L1D_MISS)
609 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
611 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
614 if (record->type & ARM_SPE_REMOTE_ACCESS)
615 data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
618 static bool arm_spe__synth_ds(struct arm_spe_queue *speq,
619 const struct arm_spe_record *record,
620 union perf_mem_data_src *data_src)
622 struct arm_spe *spe = speq->spe;
623 u64 *metadata = NULL;
627 /* Metadata version 1 assumes all CPUs are the same (old behavior) */
628 if (spe->metadata_ver == 1) {
631 pr_warning_once("Old SPE metadata, re-record to improve decode accuracy\n");
632 cpuid = perf_env__cpuid(spe->session->evlist->env);
633 midr = strtol(cpuid, NULL, 16);
635 /* CPU ID is -1 for per-thread mode */
638 * On the heterogeneous system, due to CPU ID is -1,
639 * cannot confirm the data source packet is supported.
641 if (!spe->is_homogeneous)
644 /* In homogeneous system, simply use CPU0's metadata */
646 metadata = spe->metadata[0];
648 metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
654 midr = metadata[ARM_SPE_CPU_MIDR];
657 for (i = 0; i < ARRAY_SIZE(data_source_handles); i++) {
658 if (is_midr_in_range_list(midr, data_source_handles[i].midr_ranges)) {
659 data_source_handles[i].ds_synth(record, data_src);
667 static u64 arm_spe__synth_data_source(struct arm_spe_queue *speq,
668 const struct arm_spe_record *record)
670 union perf_mem_data_src data_src = { .mem_op = PERF_MEM_OP_NA };
672 if (record->op & ARM_SPE_OP_LD)
673 data_src.mem_op = PERF_MEM_OP_LOAD;
674 else if (record->op & ARM_SPE_OP_ST)
675 data_src.mem_op = PERF_MEM_OP_STORE;
679 if (!arm_spe__synth_ds(speq, record, &data_src))
680 arm_spe__synth_memory_level(record, &data_src);
682 if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
683 data_src.mem_dtlb = PERF_MEM_TLB_WK;
685 if (record->type & ARM_SPE_TLB_MISS)
686 data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
688 data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
694 static int arm_spe_sample(struct arm_spe_queue *speq)
696 const struct arm_spe_record *record = &speq->decoder->record;
697 struct arm_spe *spe = speq->spe;
701 arm_spe__sample_flags(speq);
702 data_src = arm_spe__synth_data_source(speq, record);
704 if (spe->sample_flc) {
705 if (record->type & ARM_SPE_L1D_MISS) {
706 err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
712 if (record->type & ARM_SPE_L1D_ACCESS) {
713 err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
720 if (spe->sample_llc) {
721 if (record->type & ARM_SPE_LLC_MISS) {
722 err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
728 if (record->type & ARM_SPE_LLC_ACCESS) {
729 err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
736 if (spe->sample_tlb) {
737 if (record->type & ARM_SPE_TLB_MISS) {
738 err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
744 if (record->type & ARM_SPE_TLB_ACCESS) {
745 err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
752 if (spe->sample_branch && (record->op & ARM_SPE_OP_BRANCH_ERET)) {
753 err = arm_spe__synth_branch_sample(speq, spe->branch_id);
758 if (spe->sample_remote_access &&
759 (record->type & ARM_SPE_REMOTE_ACCESS)) {
760 err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
767 * When data_src is zero it means the record is not a memory operation,
768 * skip to synthesize memory sample for this case.
770 if (spe->sample_memory && data_src) {
771 err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
776 if (spe->sample_instructions) {
777 err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
785 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
787 struct arm_spe *spe = speq->spe;
788 struct arm_spe_record *record;
791 if (!spe->kernel_start)
792 spe->kernel_start = machine__kernel_start(spe->machine);
796 * The usual logic is firstly to decode the packets, and then
797 * based the record to synthesize sample; but here the flow is
798 * reversed: it calls arm_spe_sample() for synthesizing samples
799 * prior to arm_spe_decode().
801 * Two reasons for this code logic:
802 * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
803 * has decoded trace data and generated a record, but the record
804 * is left to generate sample until run to here, so it's correct
805 * to synthesize sample for the left record.
806 * 2. After decoding trace data, it needs to compare the record
807 * timestamp with the coming perf event, if the record timestamp
808 * is later than the perf event, it needs bail out and pushs the
809 * record into auxtrace heap, thus the record can be deferred to
810 * synthesize sample until run to here at the next time; so this
811 * can correlate samples between Arm SPE trace data and other
812 * perf events with correct time ordering.
816 * Update pid/tid info.
818 record = &speq->decoder->record;
819 if (!spe->timeless_decoding && record->context_id != (u64)-1) {
820 ret = arm_spe_set_tid(speq, record->context_id);
824 spe->use_ctx_pkt_for_pid = true;
827 ret = arm_spe_sample(speq);
831 ret = arm_spe_decode(speq->decoder);
833 pr_debug("No data or all data has been processed.\n");
838 * Error is detected when decode SPE trace data, continue to
839 * the next trace data and find out more records.
844 record = &speq->decoder->record;
846 /* Update timestamp for the last record */
847 if (record->timestamp > speq->timestamp)
848 speq->timestamp = record->timestamp;
851 * If the timestamp of the queue is later than timestamp of the
852 * coming perf event, bail out so can allow the perf event to
853 * be processed ahead.
855 if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
856 *timestamp = speq->timestamp;
864 static int arm_spe__setup_queue(struct arm_spe *spe,
865 struct auxtrace_queue *queue,
866 unsigned int queue_nr)
868 struct arm_spe_queue *speq = queue->priv;
869 struct arm_spe_record *record;
871 if (list_empty(&queue->head) || speq)
874 speq = arm_spe__alloc_queue(spe, queue_nr);
881 if (queue->cpu != -1)
882 speq->cpu = queue->cpu;
884 if (!speq->on_heap) {
887 if (spe->timeless_decoding)
891 ret = arm_spe_decode(speq->decoder);
899 record = &speq->decoder->record;
901 speq->timestamp = record->timestamp;
902 ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
905 speq->on_heap = true;
911 static int arm_spe__setup_queues(struct arm_spe *spe)
916 for (i = 0; i < spe->queues.nr_queues; i++) {
917 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
925 static int arm_spe__update_queues(struct arm_spe *spe)
927 if (spe->queues.new_data) {
928 spe->queues.new_data = false;
929 return arm_spe__setup_queues(spe);
935 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
938 struct evlist *evlist = spe->session->evlist;
939 bool timeless_decoding = true;
942 * Circle through the list of event and complain if we find one
943 * with the time bit set.
945 evlist__for_each_entry(evlist, evsel) {
946 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
947 timeless_decoding = false;
950 return timeless_decoding;
953 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
955 unsigned int queue_nr;
960 struct auxtrace_queue *queue;
961 struct arm_spe_queue *speq;
963 if (!spe->heap.heap_cnt)
966 if (spe->heap.heap_array[0].ordinal >= timestamp)
969 queue_nr = spe->heap.heap_array[0].queue_nr;
970 queue = &spe->queues.queue_array[queue_nr];
973 auxtrace_heap__pop(&spe->heap);
975 if (spe->heap.heap_cnt) {
976 ts = spe->heap.heap_array[0].ordinal + 1;
984 * A previous context-switch event has set pid/tid in the machine's context, so
985 * here we need to update the pid/tid in the thread and SPE queue.
987 if (!spe->use_ctx_pkt_for_pid)
988 arm_spe_set_pid_tid_cpu(spe, queue);
990 ret = arm_spe_run_decoder(speq, &ts);
992 auxtrace_heap__add(&spe->heap, queue_nr, ts);
997 ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
1001 speq->on_heap = false;
1008 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
1011 struct auxtrace_queues *queues = &spe->queues;
1015 for (i = 0; i < queues->nr_queues; i++) {
1016 struct auxtrace_queue *queue = &spe->queues.queue_array[i];
1017 struct arm_spe_queue *speq = queue->priv;
1019 if (speq && (tid == -1 || speq->tid == tid)) {
1021 arm_spe_set_pid_tid_cpu(spe, queue);
1022 arm_spe_run_decoder(speq, &ts);
1028 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
1029 struct perf_sample *sample)
1034 if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
1037 pid = event->context_switch.next_prev_pid;
1038 tid = event->context_switch.next_prev_tid;
1042 pr_warning("context_switch event has no tid\n");
1044 return machine__set_current_tid(spe->machine, cpu, pid, tid);
1047 static int arm_spe_process_event(struct perf_session *session,
1048 union perf_event *event,
1049 struct perf_sample *sample,
1050 const struct perf_tool *tool)
1054 struct arm_spe *spe = container_of(session->auxtrace,
1055 struct arm_spe, auxtrace);
1060 if (!tool->ordered_events) {
1061 pr_err("SPE trace requires ordered events\n");
1065 if (sample->time && (sample->time != (u64) -1))
1066 timestamp = perf_time_to_tsc(sample->time, &spe->tc);
1070 if (timestamp || spe->timeless_decoding) {
1071 err = arm_spe__update_queues(spe);
1076 if (spe->timeless_decoding) {
1077 if (event->header.type == PERF_RECORD_EXIT) {
1078 err = arm_spe_process_timeless_queues(spe,
1082 } else if (timestamp) {
1083 err = arm_spe_process_queues(spe, timestamp);
1087 if (!spe->use_ctx_pkt_for_pid &&
1088 (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
1089 event->header.type == PERF_RECORD_SWITCH))
1090 err = arm_spe_context_switch(spe, event, sample);
1096 static int arm_spe_process_auxtrace_event(struct perf_session *session,
1097 union perf_event *event,
1098 const struct perf_tool *tool __maybe_unused)
1100 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1103 if (!spe->data_queued) {
1104 struct auxtrace_buffer *buffer;
1106 int fd = perf_data__fd(session->data);
1109 if (perf_data__is_pipe(session->data)) {
1112 data_offset = lseek(fd, 0, SEEK_CUR);
1113 if (data_offset == -1)
1117 err = auxtrace_queues__add_event(&spe->queues, session, event,
1118 data_offset, &buffer);
1122 /* Dump here now we have copied a piped trace out of the pipe */
1124 if (auxtrace_buffer__get_data(buffer, fd)) {
1125 arm_spe_dump_event(spe, buffer->data,
1127 auxtrace_buffer__put_data(buffer);
1135 static int arm_spe_flush(struct perf_session *session __maybe_unused,
1136 const struct perf_tool *tool __maybe_unused)
1138 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1145 if (!tool->ordered_events)
1148 ret = arm_spe__update_queues(spe);
1152 if (spe->timeless_decoding)
1153 return arm_spe_process_timeless_queues(spe, -1,
1156 ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
1160 if (!spe->use_ctx_pkt_for_pid)
1161 ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
1162 "Matching of TIDs to SPE events could be inaccurate.\n");
1167 static u64 *arm_spe__alloc_per_cpu_metadata(u64 *buf, int per_cpu_size)
1171 metadata = zalloc(per_cpu_size);
1175 memcpy(metadata, buf, per_cpu_size);
1179 static void arm_spe__free_metadata(u64 **metadata, int nr_cpu)
1183 for (i = 0; i < nr_cpu; i++)
1184 zfree(&metadata[i]);
1188 static u64 **arm_spe__alloc_metadata(struct perf_record_auxtrace_info *info,
1189 u64 *ver, int *nr_cpu)
1191 u64 *ptr = (u64 *)info->priv;
1193 u64 **metadata = NULL;
1194 int hdr_sz, per_cpu_sz, i;
1196 metadata_size = info->header.size -
1197 sizeof(struct perf_record_auxtrace_info);
1199 /* Metadata version 1 */
1200 if (metadata_size == ARM_SPE_AUXTRACE_V1_PRIV_SIZE) {
1203 /* No per CPU metadata */
1207 *ver = ptr[ARM_SPE_HEADER_VERSION];
1208 hdr_sz = ptr[ARM_SPE_HEADER_SIZE];
1209 *nr_cpu = ptr[ARM_SPE_CPUS_NUM];
1211 metadata = calloc(*nr_cpu, sizeof(*metadata));
1215 /* Locate the start address of per CPU metadata */
1217 per_cpu_sz = (metadata_size - (hdr_sz * sizeof(u64))) / (*nr_cpu);
1219 for (i = 0; i < *nr_cpu; i++) {
1220 metadata[i] = arm_spe__alloc_per_cpu_metadata(ptr, per_cpu_sz);
1222 goto err_per_cpu_metadata;
1224 ptr += per_cpu_sz / sizeof(u64);
1229 err_per_cpu_metadata:
1230 arm_spe__free_metadata(metadata, *nr_cpu);
1234 static void arm_spe_free_queue(void *priv)
1236 struct arm_spe_queue *speq = priv;
1240 thread__zput(speq->thread);
1241 arm_spe_decoder_free(speq->decoder);
1242 zfree(&speq->event_buf);
1246 static void arm_spe_free_events(struct perf_session *session)
1248 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1250 struct auxtrace_queues *queues = &spe->queues;
1253 for (i = 0; i < queues->nr_queues; i++) {
1254 arm_spe_free_queue(queues->queue_array[i].priv);
1255 queues->queue_array[i].priv = NULL;
1257 auxtrace_queues__free(queues);
1260 static void arm_spe_free(struct perf_session *session)
1262 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1265 auxtrace_heap__free(&spe->heap);
1266 arm_spe_free_events(session);
1267 session->auxtrace = NULL;
1268 arm_spe__free_metadata(spe->metadata, spe->metadata_nr_cpu);
1272 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1273 struct evsel *evsel)
1275 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1277 return evsel->core.attr.type == spe->pmu_type;
1280 static const char * const metadata_hdr_v1_fmts[] = {
1281 [ARM_SPE_PMU_TYPE] = " PMU Type :%"PRId64"\n",
1282 [ARM_SPE_PER_CPU_MMAPS] = " Per CPU mmaps :%"PRId64"\n",
1285 static const char * const metadata_hdr_fmts[] = {
1286 [ARM_SPE_HEADER_VERSION] = " Header version :%"PRId64"\n",
1287 [ARM_SPE_HEADER_SIZE] = " Header size :%"PRId64"\n",
1288 [ARM_SPE_PMU_TYPE_V2] = " PMU type v2 :%"PRId64"\n",
1289 [ARM_SPE_CPUS_NUM] = " CPU number :%"PRId64"\n",
1292 static const char * const metadata_per_cpu_fmts[] = {
1293 [ARM_SPE_MAGIC] = " Magic :0x%"PRIx64"\n",
1294 [ARM_SPE_CPU] = " CPU # :%"PRId64"\n",
1295 [ARM_SPE_CPU_NR_PARAMS] = " Num of params :%"PRId64"\n",
1296 [ARM_SPE_CPU_MIDR] = " MIDR :0x%"PRIx64"\n",
1297 [ARM_SPE_CPU_PMU_TYPE] = " PMU Type :%"PRId64"\n",
1298 [ARM_SPE_CAP_MIN_IVAL] = " Min Interval :%"PRId64"\n",
1301 static void arm_spe_print_info(struct arm_spe *spe, __u64 *arr)
1303 unsigned int i, cpu, hdr_size, cpu_num, cpu_size;
1304 const char * const *hdr_fmts;
1309 if (spe->metadata_ver == 1) {
1311 hdr_size = ARM_SPE_AUXTRACE_V1_PRIV_MAX;
1312 hdr_fmts = metadata_hdr_v1_fmts;
1314 cpu_num = arr[ARM_SPE_CPUS_NUM];
1315 hdr_size = arr[ARM_SPE_HEADER_SIZE];
1316 hdr_fmts = metadata_hdr_fmts;
1319 for (i = 0; i < hdr_size; i++)
1320 fprintf(stdout, hdr_fmts[i], arr[i]);
1323 for (cpu = 0; cpu < cpu_num; cpu++) {
1325 * The parameters from ARM_SPE_MAGIC to ARM_SPE_CPU_NR_PARAMS
1326 * are fixed. The sequential parameter size is decided by the
1327 * field 'ARM_SPE_CPU_NR_PARAMS'.
1329 cpu_size = (ARM_SPE_CPU_NR_PARAMS + 1) + arr[ARM_SPE_CPU_NR_PARAMS];
1330 for (i = 0; i < cpu_size; i++)
1331 fprintf(stdout, metadata_per_cpu_fmts[i], arr[i]);
1336 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1339 struct evsel *evsel;
1341 evlist__for_each_entry(evlist, evsel) {
1342 if (evsel->core.id && evsel->core.id[0] == id) {
1344 zfree(&evsel->name);
1345 evsel->name = strdup(name);
1352 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1354 struct evlist *evlist = session->evlist;
1355 struct evsel *evsel;
1356 struct perf_event_attr attr;
1361 evlist__for_each_entry(evlist, evsel) {
1362 if (evsel->core.attr.type == spe->pmu_type) {
1369 pr_debug("No selected events with SPE trace data\n");
1373 memset(&attr, 0, sizeof(struct perf_event_attr));
1374 attr.size = sizeof(struct perf_event_attr);
1375 attr.type = PERF_TYPE_HARDWARE;
1376 attr.sample_type = evsel->core.attr.sample_type &
1377 (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1378 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1379 PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1380 PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1381 if (spe->timeless_decoding)
1382 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1384 attr.sample_type |= PERF_SAMPLE_TIME;
1386 spe->sample_type = attr.sample_type;
1388 attr.exclude_user = evsel->core.attr.exclude_user;
1389 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1390 attr.exclude_hv = evsel->core.attr.exclude_hv;
1391 attr.exclude_host = evsel->core.attr.exclude_host;
1392 attr.exclude_guest = evsel->core.attr.exclude_guest;
1393 attr.sample_id_all = evsel->core.attr.sample_id_all;
1394 attr.read_format = evsel->core.attr.read_format;
1396 /* create new id val to be a fixed offset from evsel id */
1397 id = evsel->core.id[0] + 1000000000;
1402 if (spe->synth_opts.flc) {
1403 spe->sample_flc = true;
1405 /* Level 1 data cache miss */
1406 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1409 spe->l1d_miss_id = id;
1410 arm_spe_set_event_name(evlist, id, "l1d-miss");
1413 /* Level 1 data cache access */
1414 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1417 spe->l1d_access_id = id;
1418 arm_spe_set_event_name(evlist, id, "l1d-access");
1422 if (spe->synth_opts.llc) {
1423 spe->sample_llc = true;
1425 /* Last level cache miss */
1426 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1429 spe->llc_miss_id = id;
1430 arm_spe_set_event_name(evlist, id, "llc-miss");
1433 /* Last level cache access */
1434 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1437 spe->llc_access_id = id;
1438 arm_spe_set_event_name(evlist, id, "llc-access");
1442 if (spe->synth_opts.tlb) {
1443 spe->sample_tlb = true;
1446 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1449 spe->tlb_miss_id = id;
1450 arm_spe_set_event_name(evlist, id, "tlb-miss");
1454 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1457 spe->tlb_access_id = id;
1458 arm_spe_set_event_name(evlist, id, "tlb-access");
1462 if (spe->synth_opts.branches) {
1463 spe->sample_branch = true;
1466 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1469 spe->branch_id = id;
1470 arm_spe_set_event_name(evlist, id, "branch");
1474 if (spe->synth_opts.remote_access) {
1475 spe->sample_remote_access = true;
1478 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1481 spe->remote_access_id = id;
1482 arm_spe_set_event_name(evlist, id, "remote-access");
1486 if (spe->synth_opts.mem) {
1487 spe->sample_memory = true;
1489 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1492 spe->memory_id = id;
1493 arm_spe_set_event_name(evlist, id, "memory");
1497 if (spe->synth_opts.instructions) {
1498 if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1499 pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
1500 goto synth_instructions_out;
1502 if (spe->synth_opts.period > 1)
1503 pr_warning("Arm SPE has a hardware-based sample period.\n"
1504 "Additional instruction events will be discarded by --itrace\n");
1506 spe->sample_instructions = true;
1507 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1508 attr.sample_period = spe->synth_opts.period;
1509 spe->instructions_sample_period = attr.sample_period;
1510 err = perf_session__deliver_synth_attr_event(session, &attr, id);
1513 spe->instructions_id = id;
1514 arm_spe_set_event_name(evlist, id, "instructions");
1516 synth_instructions_out:
1521 static bool arm_spe__is_homogeneous(u64 **metadata, int nr_cpu)
1529 for (i = 0; i < nr_cpu; i++) {
1534 midr = metadata[i][ARM_SPE_CPU_MIDR];
1538 if (midr != metadata[i][ARM_SPE_CPU_MIDR])
1545 int arm_spe_process_auxtrace_info(union perf_event *event,
1546 struct perf_session *session)
1548 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1549 size_t min_sz = ARM_SPE_AUXTRACE_V1_PRIV_SIZE;
1550 struct perf_record_time_conv *tc = &session->time_conv;
1551 struct arm_spe *spe;
1552 u64 **metadata = NULL;
1556 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1560 metadata = arm_spe__alloc_metadata(auxtrace_info, &metadata_ver,
1562 if (!metadata && metadata_ver != 1) {
1563 pr_err("Failed to parse Arm SPE metadata.\n");
1567 spe = zalloc(sizeof(struct arm_spe));
1570 goto err_free_metadata;
1573 err = auxtrace_queues__init(&spe->queues);
1577 spe->session = session;
1578 spe->machine = &session->machines.host; /* No kvm support */
1579 spe->auxtrace_type = auxtrace_info->type;
1580 if (metadata_ver == 1)
1581 spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1583 spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2];
1584 spe->metadata = metadata;
1585 spe->metadata_ver = metadata_ver;
1586 spe->metadata_nr_cpu = nr_cpu;
1587 spe->is_homogeneous = arm_spe__is_homogeneous(metadata, nr_cpu);
1589 spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1592 * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
1593 * and the parameters for hardware clock are stored in the session
1594 * context. Passes these parameters to the struct perf_tsc_conversion
1595 * in "spe->tc", which is used for later conversion between clock
1596 * counter and timestamp.
1598 * For backward compatibility, copies the fields starting from
1599 * "time_cycles" only if they are contained in the event.
1601 spe->tc.time_shift = tc->time_shift;
1602 spe->tc.time_mult = tc->time_mult;
1603 spe->tc.time_zero = tc->time_zero;
1605 if (event_contains(*tc, time_cycles)) {
1606 spe->tc.time_cycles = tc->time_cycles;
1607 spe->tc.time_mask = tc->time_mask;
1608 spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1609 spe->tc.cap_user_time_short = tc->cap_user_time_short;
1612 spe->auxtrace.process_event = arm_spe_process_event;
1613 spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1614 spe->auxtrace.flush_events = arm_spe_flush;
1615 spe->auxtrace.free_events = arm_spe_free_events;
1616 spe->auxtrace.free = arm_spe_free;
1617 spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1618 session->auxtrace = &spe->auxtrace;
1620 arm_spe_print_info(spe, &auxtrace_info->priv[0]);
1625 if (session->itrace_synth_opts && session->itrace_synth_opts->set)
1626 spe->synth_opts = *session->itrace_synth_opts;
1628 itrace_synth_opts__set_default(&spe->synth_opts, false);
1630 err = arm_spe_synth_events(spe, session);
1632 goto err_free_queues;
1634 err = auxtrace_queues__process_index(&spe->queues, session);
1636 goto err_free_queues;
1638 if (spe->queues.populated)
1639 spe->data_queued = true;
1644 auxtrace_queues__free(&spe->queues);
1645 session->auxtrace = NULL;
1649 arm_spe__free_metadata(metadata, nr_cpu);