1 // SPDX-License-Identifier: GPL-2.0
3 * Arm Statistical Profiling Extensions (SPE) support
4 * Copyright (c) 2017-2018, Arm Ltd.
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
28 #include "thread-stack.h"
31 #include "util/synthetic-events.h"
34 #include "arm-spe-decoder/arm-spe-decoder.h"
35 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
37 #include "../../arch/arm64/include/asm/cputype.h"
38 #define MAX_TIMESTAMP (~0ULL)
41 struct auxtrace auxtrace;
42 struct auxtrace_queues queues;
43 struct auxtrace_heap heap;
44 struct itrace_synth_opts synth_opts;
46 struct perf_session *session;
47 struct machine *machine;
51 struct perf_tsc_conversion tc;
61 u8 sample_remote_access;
63 u8 sample_instructions;
64 u64 instructions_sample_period;
79 unsigned long num_events;
80 u8 use_ctx_pkt_for_pid;
83 struct arm_spe_queue {
85 unsigned int queue_nr;
86 struct auxtrace_buffer *buffer;
87 struct auxtrace_buffer *old_buffer;
88 union perf_event *event_buf;
94 struct arm_spe_decoder *decoder;
97 struct thread *thread;
98 u64 period_instructions;
101 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
102 unsigned char *buf, size_t len)
104 struct arm_spe_pkt packet;
107 char desc[ARM_SPE_PKT_DESC_MAX];
108 const char *color = PERF_COLOR_BLUE;
110 color_fprintf(stdout, color,
111 ". ... ARM SPE data: size %#zx bytes\n",
115 ret = arm_spe_get_packet(buf, len, &packet);
121 color_fprintf(stdout, color, " %08x: ", pos);
122 for (i = 0; i < pkt_len; i++)
123 color_fprintf(stdout, color, " %02x", buf[i]);
125 color_fprintf(stdout, color, " ");
127 ret = arm_spe_pkt_desc(&packet, desc,
128 ARM_SPE_PKT_DESC_MAX);
130 color_fprintf(stdout, color, " %s\n", desc);
132 color_fprintf(stdout, color, " Bad packet!\n");
140 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
144 arm_spe_dump(spe, buf, len);
147 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
149 struct arm_spe_queue *speq = data;
150 struct auxtrace_buffer *buffer = speq->buffer;
151 struct auxtrace_buffer *old_buffer = speq->old_buffer;
152 struct auxtrace_queue *queue;
154 queue = &speq->spe->queues.queue_array[speq->queue_nr];
156 buffer = auxtrace_buffer__next(queue, buffer);
157 /* If no more data, drop the previous auxtrace_buffer and return */
160 auxtrace_buffer__drop_data(old_buffer);
165 speq->buffer = buffer;
167 /* If the aux_buffer doesn't have data associated, try to load it */
169 /* get the file desc associated with the perf data file */
170 int fd = perf_data__fd(speq->spe->session->data);
172 buffer->data = auxtrace_buffer__get_data(buffer, fd);
177 b->len = buffer->size;
178 b->buf = buffer->data;
182 auxtrace_buffer__drop_data(old_buffer);
183 speq->old_buffer = buffer;
185 auxtrace_buffer__drop_data(buffer);
186 return arm_spe_get_trace(b, data);
192 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
193 unsigned int queue_nr)
195 struct arm_spe_params params = { .get_trace = 0, };
196 struct arm_spe_queue *speq;
198 speq = zalloc(sizeof(*speq));
202 speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
203 if (!speq->event_buf)
207 speq->queue_nr = queue_nr;
211 speq->period_instructions = 0;
214 params.get_trace = arm_spe_get_trace;
217 /* create new decoder */
218 speq->decoder = arm_spe_decoder_new(¶ms);
225 zfree(&speq->event_buf);
231 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
233 return ip >= spe->kernel_start ?
234 PERF_RECORD_MISC_KERNEL :
235 PERF_RECORD_MISC_USER;
238 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
239 struct auxtrace_queue *queue)
241 struct arm_spe_queue *speq = queue->priv;
244 tid = machine__get_current_tid(spe->machine, speq->cpu);
247 thread__zput(speq->thread);
249 speq->tid = queue->tid;
251 if ((!speq->thread) && (speq->tid != -1)) {
252 speq->thread = machine__find_thread(spe->machine, -1,
257 speq->pid = speq->thread->pid_;
258 if (queue->cpu == -1)
259 speq->cpu = speq->thread->cpu;
263 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
265 struct arm_spe *spe = speq->spe;
266 int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
271 arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
276 static void arm_spe_prep_sample(struct arm_spe *spe,
277 struct arm_spe_queue *speq,
278 union perf_event *event,
279 struct perf_sample *sample)
281 struct arm_spe_record *record = &speq->decoder->record;
283 if (!spe->timeless_decoding)
284 sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
286 sample->ip = record->from_ip;
287 sample->cpumode = arm_spe_cpumode(spe, sample->ip);
288 sample->pid = speq->pid;
289 sample->tid = speq->tid;
291 sample->cpu = speq->cpu;
293 event->sample.header.type = PERF_RECORD_SAMPLE;
294 event->sample.header.misc = sample->cpumode;
295 event->sample.header.size = sizeof(struct perf_event_header);
298 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
300 event->header.size = perf_event__sample_event_size(sample, type, 0);
301 return perf_event__synthesize_sample(event, type, 0, sample);
305 arm_spe_deliver_synth_event(struct arm_spe *spe,
306 struct arm_spe_queue *speq __maybe_unused,
307 union perf_event *event,
308 struct perf_sample *sample)
312 if (spe->synth_opts.inject) {
313 ret = arm_spe__inject_event(event, sample, spe->sample_type);
318 ret = perf_session__deliver_synth_event(spe->session, event, sample);
320 pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
325 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
326 u64 spe_events_id, u64 data_src)
328 struct arm_spe *spe = speq->spe;
329 struct arm_spe_record *record = &speq->decoder->record;
330 union perf_event *event = speq->event_buf;
331 struct perf_sample sample = { .ip = 0, };
333 arm_spe_prep_sample(spe, speq, event, &sample);
335 sample.id = spe_events_id;
336 sample.stream_id = spe_events_id;
337 sample.addr = record->virt_addr;
338 sample.phys_addr = record->phys_addr;
339 sample.data_src = data_src;
340 sample.weight = record->latency;
342 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
345 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
348 struct arm_spe *spe = speq->spe;
349 struct arm_spe_record *record = &speq->decoder->record;
350 union perf_event *event = speq->event_buf;
351 struct perf_sample sample = { .ip = 0, };
353 arm_spe_prep_sample(spe, speq, event, &sample);
355 sample.id = spe_events_id;
356 sample.stream_id = spe_events_id;
357 sample.addr = record->to_ip;
358 sample.weight = record->latency;
360 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
363 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
364 u64 spe_events_id, u64 data_src)
366 struct arm_spe *spe = speq->spe;
367 struct arm_spe_record *record = &speq->decoder->record;
368 union perf_event *event = speq->event_buf;
369 struct perf_sample sample = { .ip = 0, };
372 * Handles perf instruction sampling period.
374 speq->period_instructions++;
375 if (speq->period_instructions < spe->instructions_sample_period)
377 speq->period_instructions = 0;
379 arm_spe_prep_sample(spe, speq, event, &sample);
381 sample.id = spe_events_id;
382 sample.stream_id = spe_events_id;
383 sample.addr = record->virt_addr;
384 sample.phys_addr = record->phys_addr;
385 sample.data_src = data_src;
386 sample.period = spe->instructions_sample_period;
387 sample.weight = record->latency;
389 return arm_spe_deliver_synth_event(spe, speq, event, &sample);
392 static const struct midr_range neoverse_spe[] = {
393 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
394 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
395 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
399 static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
400 union perf_mem_data_src *data_src)
403 * Even though four levels of cache hierarchy are possible, no known
404 * production Neoverse systems currently include more than three levels
405 * so for the time being we assume three exist. If a production system
406 * is built with four the this function would have to be changed to
407 * detect the number of levels for reporting.
411 * We have no data on the hit level or data source for stores in the
412 * Neoverse SPE records.
414 if (record->op & ARM_SPE_ST) {
415 data_src->mem_lvl = PERF_MEM_LVL_NA;
416 data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
417 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
421 switch (record->source) {
423 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
424 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
425 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
428 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
429 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
430 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
432 case ARM_SPE_NV_PEER_CORE:
433 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
434 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
435 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
438 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
439 * transfer, so set SNOOPX_PEER
441 case ARM_SPE_NV_LOCAL_CLUSTER:
442 case ARM_SPE_NV_PEER_CLUSTER:
443 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
444 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
445 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
448 * System cache is assumed to be L3
450 case ARM_SPE_NV_SYS_CACHE:
451 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
452 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
453 data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
456 * We don't know what level it hit in, except it came from the other
459 case ARM_SPE_NV_REMOTE:
460 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
461 data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
462 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
463 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
465 case ARM_SPE_NV_DRAM:
466 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
467 data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
468 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
475 static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
476 union perf_mem_data_src *data_src)
478 if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
479 data_src->mem_lvl = PERF_MEM_LVL_L3;
481 if (record->type & ARM_SPE_LLC_MISS)
482 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
484 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
485 } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
486 data_src->mem_lvl = PERF_MEM_LVL_L1;
488 if (record->type & ARM_SPE_L1D_MISS)
489 data_src->mem_lvl |= PERF_MEM_LVL_MISS;
491 data_src->mem_lvl |= PERF_MEM_LVL_HIT;
494 if (record->type & ARM_SPE_REMOTE_ACCESS)
495 data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
498 static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
500 union perf_mem_data_src data_src = { 0 };
501 bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
503 if (record->op == ARM_SPE_LD)
504 data_src.mem_op = PERF_MEM_OP_LOAD;
505 else if (record->op == ARM_SPE_ST)
506 data_src.mem_op = PERF_MEM_OP_STORE;
511 arm_spe__synth_data_source_neoverse(record, &data_src);
513 arm_spe__synth_data_source_generic(record, &data_src);
515 if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
516 data_src.mem_dtlb = PERF_MEM_TLB_WK;
518 if (record->type & ARM_SPE_TLB_MISS)
519 data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
521 data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
527 static int arm_spe_sample(struct arm_spe_queue *speq)
529 const struct arm_spe_record *record = &speq->decoder->record;
530 struct arm_spe *spe = speq->spe;
534 data_src = arm_spe__synth_data_source(record, spe->midr);
536 if (spe->sample_flc) {
537 if (record->type & ARM_SPE_L1D_MISS) {
538 err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
544 if (record->type & ARM_SPE_L1D_ACCESS) {
545 err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
552 if (spe->sample_llc) {
553 if (record->type & ARM_SPE_LLC_MISS) {
554 err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
560 if (record->type & ARM_SPE_LLC_ACCESS) {
561 err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
568 if (spe->sample_tlb) {
569 if (record->type & ARM_SPE_TLB_MISS) {
570 err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
576 if (record->type & ARM_SPE_TLB_ACCESS) {
577 err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
584 if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
585 err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
590 if (spe->sample_remote_access &&
591 (record->type & ARM_SPE_REMOTE_ACCESS)) {
592 err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
599 * When data_src is zero it means the record is not a memory operation,
600 * skip to synthesize memory sample for this case.
602 if (spe->sample_memory && data_src) {
603 err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
608 if (spe->sample_instructions) {
609 err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
617 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
619 struct arm_spe *spe = speq->spe;
620 struct arm_spe_record *record;
623 if (!spe->kernel_start)
624 spe->kernel_start = machine__kernel_start(spe->machine);
628 * The usual logic is firstly to decode the packets, and then
629 * based the record to synthesize sample; but here the flow is
630 * reversed: it calls arm_spe_sample() for synthesizing samples
631 * prior to arm_spe_decode().
633 * Two reasons for this code logic:
634 * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
635 * has decoded trace data and generated a record, but the record
636 * is left to generate sample until run to here, so it's correct
637 * to synthesize sample for the left record.
638 * 2. After decoding trace data, it needs to compare the record
639 * timestamp with the coming perf event, if the record timestamp
640 * is later than the perf event, it needs bail out and pushs the
641 * record into auxtrace heap, thus the record can be deferred to
642 * synthesize sample until run to here at the next time; so this
643 * can correlate samples between Arm SPE trace data and other
644 * perf events with correct time ordering.
648 * Update pid/tid info.
650 record = &speq->decoder->record;
651 if (!spe->timeless_decoding && record->context_id != (u64)-1) {
652 ret = arm_spe_set_tid(speq, record->context_id);
656 spe->use_ctx_pkt_for_pid = true;
659 ret = arm_spe_sample(speq);
663 ret = arm_spe_decode(speq->decoder);
665 pr_debug("No data or all data has been processed.\n");
670 * Error is detected when decode SPE trace data, continue to
671 * the next trace data and find out more records.
676 record = &speq->decoder->record;
678 /* Update timestamp for the last record */
679 if (record->timestamp > speq->timestamp)
680 speq->timestamp = record->timestamp;
683 * If the timestamp of the queue is later than timestamp of the
684 * coming perf event, bail out so can allow the perf event to
685 * be processed ahead.
687 if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
688 *timestamp = speq->timestamp;
696 static int arm_spe__setup_queue(struct arm_spe *spe,
697 struct auxtrace_queue *queue,
698 unsigned int queue_nr)
700 struct arm_spe_queue *speq = queue->priv;
701 struct arm_spe_record *record;
703 if (list_empty(&queue->head) || speq)
706 speq = arm_spe__alloc_queue(spe, queue_nr);
713 if (queue->cpu != -1)
714 speq->cpu = queue->cpu;
716 if (!speq->on_heap) {
719 if (spe->timeless_decoding)
723 ret = arm_spe_decode(speq->decoder);
731 record = &speq->decoder->record;
733 speq->timestamp = record->timestamp;
734 ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
737 speq->on_heap = true;
743 static int arm_spe__setup_queues(struct arm_spe *spe)
748 for (i = 0; i < spe->queues.nr_queues; i++) {
749 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
757 static int arm_spe__update_queues(struct arm_spe *spe)
759 if (spe->queues.new_data) {
760 spe->queues.new_data = false;
761 return arm_spe__setup_queues(spe);
767 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
770 struct evlist *evlist = spe->session->evlist;
771 bool timeless_decoding = true;
774 * Circle through the list of event and complain if we find one
775 * with the time bit set.
777 evlist__for_each_entry(evlist, evsel) {
778 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
779 timeless_decoding = false;
782 return timeless_decoding;
785 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
787 unsigned int queue_nr;
792 struct auxtrace_queue *queue;
793 struct arm_spe_queue *speq;
795 if (!spe->heap.heap_cnt)
798 if (spe->heap.heap_array[0].ordinal >= timestamp)
801 queue_nr = spe->heap.heap_array[0].queue_nr;
802 queue = &spe->queues.queue_array[queue_nr];
805 auxtrace_heap__pop(&spe->heap);
807 if (spe->heap.heap_cnt) {
808 ts = spe->heap.heap_array[0].ordinal + 1;
816 * A previous context-switch event has set pid/tid in the machine's context, so
817 * here we need to update the pid/tid in the thread and SPE queue.
819 if (!spe->use_ctx_pkt_for_pid)
820 arm_spe_set_pid_tid_cpu(spe, queue);
822 ret = arm_spe_run_decoder(speq, &ts);
824 auxtrace_heap__add(&spe->heap, queue_nr, ts);
829 ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
833 speq->on_heap = false;
840 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
843 struct auxtrace_queues *queues = &spe->queues;
847 for (i = 0; i < queues->nr_queues; i++) {
848 struct auxtrace_queue *queue = &spe->queues.queue_array[i];
849 struct arm_spe_queue *speq = queue->priv;
851 if (speq && (tid == -1 || speq->tid == tid)) {
853 arm_spe_set_pid_tid_cpu(spe, queue);
854 arm_spe_run_decoder(speq, &ts);
860 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
861 struct perf_sample *sample)
866 if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
869 pid = event->context_switch.next_prev_pid;
870 tid = event->context_switch.next_prev_tid;
874 pr_warning("context_switch event has no tid\n");
876 return machine__set_current_tid(spe->machine, cpu, pid, tid);
879 static int arm_spe_process_event(struct perf_session *session,
880 union perf_event *event,
881 struct perf_sample *sample,
882 struct perf_tool *tool)
886 struct arm_spe *spe = container_of(session->auxtrace,
887 struct arm_spe, auxtrace);
892 if (!tool->ordered_events) {
893 pr_err("SPE trace requires ordered events\n");
897 if (sample->time && (sample->time != (u64) -1))
898 timestamp = perf_time_to_tsc(sample->time, &spe->tc);
902 if (timestamp || spe->timeless_decoding) {
903 err = arm_spe__update_queues(spe);
908 if (spe->timeless_decoding) {
909 if (event->header.type == PERF_RECORD_EXIT) {
910 err = arm_spe_process_timeless_queues(spe,
914 } else if (timestamp) {
915 err = arm_spe_process_queues(spe, timestamp);
919 if (!spe->use_ctx_pkt_for_pid &&
920 (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
921 event->header.type == PERF_RECORD_SWITCH))
922 err = arm_spe_context_switch(spe, event, sample);
928 static int arm_spe_process_auxtrace_event(struct perf_session *session,
929 union perf_event *event,
930 struct perf_tool *tool __maybe_unused)
932 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
935 if (!spe->data_queued) {
936 struct auxtrace_buffer *buffer;
938 int fd = perf_data__fd(session->data);
941 if (perf_data__is_pipe(session->data)) {
944 data_offset = lseek(fd, 0, SEEK_CUR);
945 if (data_offset == -1)
949 err = auxtrace_queues__add_event(&spe->queues, session, event,
950 data_offset, &buffer);
954 /* Dump here now we have copied a piped trace out of the pipe */
956 if (auxtrace_buffer__get_data(buffer, fd)) {
957 arm_spe_dump_event(spe, buffer->data,
959 auxtrace_buffer__put_data(buffer);
967 static int arm_spe_flush(struct perf_session *session __maybe_unused,
968 struct perf_tool *tool __maybe_unused)
970 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
977 if (!tool->ordered_events)
980 ret = arm_spe__update_queues(spe);
984 if (spe->timeless_decoding)
985 return arm_spe_process_timeless_queues(spe, -1,
988 ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
992 if (!spe->use_ctx_pkt_for_pid)
993 ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
994 "Matching of TIDs to SPE events could be inaccurate.\n");
999 static void arm_spe_free_queue(void *priv)
1001 struct arm_spe_queue *speq = priv;
1005 thread__zput(speq->thread);
1006 arm_spe_decoder_free(speq->decoder);
1007 zfree(&speq->event_buf);
1011 static void arm_spe_free_events(struct perf_session *session)
1013 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1015 struct auxtrace_queues *queues = &spe->queues;
1018 for (i = 0; i < queues->nr_queues; i++) {
1019 arm_spe_free_queue(queues->queue_array[i].priv);
1020 queues->queue_array[i].priv = NULL;
1022 auxtrace_queues__free(queues);
1025 static void arm_spe_free(struct perf_session *session)
1027 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1030 auxtrace_heap__free(&spe->heap);
1031 arm_spe_free_events(session);
1032 session->auxtrace = NULL;
1036 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1037 struct evsel *evsel)
1039 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1041 return evsel->core.attr.type == spe->pmu_type;
1044 static const char * const arm_spe_info_fmts[] = {
1045 [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
1048 static void arm_spe_print_info(__u64 *arr)
1053 fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
1056 struct arm_spe_synth {
1057 struct perf_tool dummy_tool;
1058 struct perf_session *session;
1061 static int arm_spe_event_synth(struct perf_tool *tool,
1062 union perf_event *event,
1063 struct perf_sample *sample __maybe_unused,
1064 struct machine *machine __maybe_unused)
1066 struct arm_spe_synth *arm_spe_synth =
1067 container_of(tool, struct arm_spe_synth, dummy_tool);
1069 return perf_session__deliver_synth_event(arm_spe_synth->session,
1073 static int arm_spe_synth_event(struct perf_session *session,
1074 struct perf_event_attr *attr, u64 id)
1076 struct arm_spe_synth arm_spe_synth;
1078 memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
1079 arm_spe_synth.session = session;
1081 return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
1082 &id, arm_spe_event_synth);
1085 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1088 struct evsel *evsel;
1090 evlist__for_each_entry(evlist, evsel) {
1091 if (evsel->core.id && evsel->core.id[0] == id) {
1093 zfree(&evsel->name);
1094 evsel->name = strdup(name);
1101 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1103 struct evlist *evlist = session->evlist;
1104 struct evsel *evsel;
1105 struct perf_event_attr attr;
1110 evlist__for_each_entry(evlist, evsel) {
1111 if (evsel->core.attr.type == spe->pmu_type) {
1118 pr_debug("No selected events with SPE trace data\n");
1122 memset(&attr, 0, sizeof(struct perf_event_attr));
1123 attr.size = sizeof(struct perf_event_attr);
1124 attr.type = PERF_TYPE_HARDWARE;
1125 attr.sample_type = evsel->core.attr.sample_type &
1126 (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1127 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1128 PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1129 PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1130 if (spe->timeless_decoding)
1131 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1133 attr.sample_type |= PERF_SAMPLE_TIME;
1135 spe->sample_type = attr.sample_type;
1137 attr.exclude_user = evsel->core.attr.exclude_user;
1138 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1139 attr.exclude_hv = evsel->core.attr.exclude_hv;
1140 attr.exclude_host = evsel->core.attr.exclude_host;
1141 attr.exclude_guest = evsel->core.attr.exclude_guest;
1142 attr.sample_id_all = evsel->core.attr.sample_id_all;
1143 attr.read_format = evsel->core.attr.read_format;
1145 /* create new id val to be a fixed offset from evsel id */
1146 id = evsel->core.id[0] + 1000000000;
1151 if (spe->synth_opts.flc) {
1152 spe->sample_flc = true;
1154 /* Level 1 data cache miss */
1155 err = arm_spe_synth_event(session, &attr, id);
1158 spe->l1d_miss_id = id;
1159 arm_spe_set_event_name(evlist, id, "l1d-miss");
1162 /* Level 1 data cache access */
1163 err = arm_spe_synth_event(session, &attr, id);
1166 spe->l1d_access_id = id;
1167 arm_spe_set_event_name(evlist, id, "l1d-access");
1171 if (spe->synth_opts.llc) {
1172 spe->sample_llc = true;
1174 /* Last level cache miss */
1175 err = arm_spe_synth_event(session, &attr, id);
1178 spe->llc_miss_id = id;
1179 arm_spe_set_event_name(evlist, id, "llc-miss");
1182 /* Last level cache access */
1183 err = arm_spe_synth_event(session, &attr, id);
1186 spe->llc_access_id = id;
1187 arm_spe_set_event_name(evlist, id, "llc-access");
1191 if (spe->synth_opts.tlb) {
1192 spe->sample_tlb = true;
1195 err = arm_spe_synth_event(session, &attr, id);
1198 spe->tlb_miss_id = id;
1199 arm_spe_set_event_name(evlist, id, "tlb-miss");
1203 err = arm_spe_synth_event(session, &attr, id);
1206 spe->tlb_access_id = id;
1207 arm_spe_set_event_name(evlist, id, "tlb-access");
1211 if (spe->synth_opts.branches) {
1212 spe->sample_branch = true;
1215 err = arm_spe_synth_event(session, &attr, id);
1218 spe->branch_miss_id = id;
1219 arm_spe_set_event_name(evlist, id, "branch-miss");
1223 if (spe->synth_opts.remote_access) {
1224 spe->sample_remote_access = true;
1227 err = arm_spe_synth_event(session, &attr, id);
1230 spe->remote_access_id = id;
1231 arm_spe_set_event_name(evlist, id, "remote-access");
1235 if (spe->synth_opts.mem) {
1236 spe->sample_memory = true;
1238 err = arm_spe_synth_event(session, &attr, id);
1241 spe->memory_id = id;
1242 arm_spe_set_event_name(evlist, id, "memory");
1246 if (spe->synth_opts.instructions) {
1247 if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1248 pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
1249 goto synth_instructions_out;
1251 if (spe->synth_opts.period > 1)
1252 pr_warning("Arm SPE has a hardware-based sample period.\n"
1253 "Additional instruction events will be discarded by --itrace\n");
1255 spe->sample_instructions = true;
1256 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1257 attr.sample_period = spe->synth_opts.period;
1258 spe->instructions_sample_period = attr.sample_period;
1259 err = arm_spe_synth_event(session, &attr, id);
1262 spe->instructions_id = id;
1263 arm_spe_set_event_name(evlist, id, "instructions");
1265 synth_instructions_out:
1270 int arm_spe_process_auxtrace_info(union perf_event *event,
1271 struct perf_session *session)
1273 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1274 size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
1275 struct perf_record_time_conv *tc = &session->time_conv;
1276 const char *cpuid = perf_env__cpuid(session->evlist->env);
1277 u64 midr = strtol(cpuid, NULL, 16);
1278 struct arm_spe *spe;
1281 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1285 spe = zalloc(sizeof(struct arm_spe));
1289 err = auxtrace_queues__init(&spe->queues);
1293 spe->session = session;
1294 spe->machine = &session->machines.host; /* No kvm support */
1295 spe->auxtrace_type = auxtrace_info->type;
1296 spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1299 spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1302 * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
1303 * and the parameters for hardware clock are stored in the session
1304 * context. Passes these parameters to the struct perf_tsc_conversion
1305 * in "spe->tc", which is used for later conversion between clock
1306 * counter and timestamp.
1308 * For backward compatibility, copies the fields starting from
1309 * "time_cycles" only if they are contained in the event.
1311 spe->tc.time_shift = tc->time_shift;
1312 spe->tc.time_mult = tc->time_mult;
1313 spe->tc.time_zero = tc->time_zero;
1315 if (event_contains(*tc, time_cycles)) {
1316 spe->tc.time_cycles = tc->time_cycles;
1317 spe->tc.time_mask = tc->time_mask;
1318 spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1319 spe->tc.cap_user_time_short = tc->cap_user_time_short;
1322 spe->auxtrace.process_event = arm_spe_process_event;
1323 spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1324 spe->auxtrace.flush_events = arm_spe_flush;
1325 spe->auxtrace.free_events = arm_spe_free_events;
1326 spe->auxtrace.free = arm_spe_free;
1327 spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1328 session->auxtrace = &spe->auxtrace;
1330 arm_spe_print_info(&auxtrace_info->priv[0]);
1335 if (session->itrace_synth_opts && session->itrace_synth_opts->set)
1336 spe->synth_opts = *session->itrace_synth_opts;
1338 itrace_synth_opts__set_default(&spe->synth_opts, false);
1340 err = arm_spe_synth_events(spe, session);
1342 goto err_free_queues;
1344 err = auxtrace_queues__process_index(&spe->queues, session);
1346 goto err_free_queues;
1348 if (spe->queues.populated)
1349 spe->data_queued = true;
1354 auxtrace_queues__free(&spe->queues);
1355 session->auxtrace = NULL;