]> Git Repo - linux.git/blob - tools/perf/util/arm-spe.c
HID: hid-sensor-custom: Fix big on-stack allocation in hid_sensor_custom_get_known()
[linux.git] / tools / perf / util / arm-spe.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arm Statistical Profiling Extensions (SPE) support
4  * Copyright (c) 2017-2018, Arm Ltd.
5  */
6
7 #include <byteswap.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18
19 #include "auxtrace.h"
20 #include "color.h"
21 #include "debug.h"
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "machine.h"
25 #include "session.h"
26 #include "symbol.h"
27 #include "thread.h"
28 #include "thread-stack.h"
29 #include "tsc.h"
30 #include "tool.h"
31 #include "util/synthetic-events.h"
32
33 #include "arm-spe.h"
34 #include "arm-spe-decoder/arm-spe-decoder.h"
35 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
36
37 #include "../../arch/arm64/include/asm/cputype.h"
38 #define MAX_TIMESTAMP (~0ULL)
39
40 struct arm_spe {
41         struct auxtrace                 auxtrace;
42         struct auxtrace_queues          queues;
43         struct auxtrace_heap            heap;
44         struct itrace_synth_opts        synth_opts;
45         u32                             auxtrace_type;
46         struct perf_session             *session;
47         struct machine                  *machine;
48         u32                             pmu_type;
49         u64                             midr;
50
51         struct perf_tsc_conversion      tc;
52
53         u8                              timeless_decoding;
54         u8                              data_queued;
55
56         u64                             sample_type;
57         u8                              sample_flc;
58         u8                              sample_llc;
59         u8                              sample_tlb;
60         u8                              sample_branch;
61         u8                              sample_remote_access;
62         u8                              sample_memory;
63         u8                              sample_instructions;
64         u64                             instructions_sample_period;
65
66         u64                             l1d_miss_id;
67         u64                             l1d_access_id;
68         u64                             llc_miss_id;
69         u64                             llc_access_id;
70         u64                             tlb_miss_id;
71         u64                             tlb_access_id;
72         u64                             branch_miss_id;
73         u64                             remote_access_id;
74         u64                             memory_id;
75         u64                             instructions_id;
76
77         u64                             kernel_start;
78
79         unsigned long                   num_events;
80         u8                              use_ctx_pkt_for_pid;
81 };
82
83 struct arm_spe_queue {
84         struct arm_spe                  *spe;
85         unsigned int                    queue_nr;
86         struct auxtrace_buffer          *buffer;
87         struct auxtrace_buffer          *old_buffer;
88         union perf_event                *event_buf;
89         bool                            on_heap;
90         bool                            done;
91         pid_t                           pid;
92         pid_t                           tid;
93         int                             cpu;
94         struct arm_spe_decoder          *decoder;
95         u64                             time;
96         u64                             timestamp;
97         struct thread                   *thread;
98         u64                             period_instructions;
99 };
100
101 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
102                          unsigned char *buf, size_t len)
103 {
104         struct arm_spe_pkt packet;
105         size_t pos = 0;
106         int ret, pkt_len, i;
107         char desc[ARM_SPE_PKT_DESC_MAX];
108         const char *color = PERF_COLOR_BLUE;
109
110         color_fprintf(stdout, color,
111                       ". ... ARM SPE data: size %#zx bytes\n",
112                       len);
113
114         while (len) {
115                 ret = arm_spe_get_packet(buf, len, &packet);
116                 if (ret > 0)
117                         pkt_len = ret;
118                 else
119                         pkt_len = 1;
120                 printf(".");
121                 color_fprintf(stdout, color, "  %08x: ", pos);
122                 for (i = 0; i < pkt_len; i++)
123                         color_fprintf(stdout, color, " %02x", buf[i]);
124                 for (; i < 16; i++)
125                         color_fprintf(stdout, color, "   ");
126                 if (ret > 0) {
127                         ret = arm_spe_pkt_desc(&packet, desc,
128                                                ARM_SPE_PKT_DESC_MAX);
129                         if (!ret)
130                                 color_fprintf(stdout, color, " %s\n", desc);
131                 } else {
132                         color_fprintf(stdout, color, " Bad packet!\n");
133                 }
134                 pos += pkt_len;
135                 buf += pkt_len;
136                 len -= pkt_len;
137         }
138 }
139
140 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
141                                size_t len)
142 {
143         printf(".\n");
144         arm_spe_dump(spe, buf, len);
145 }
146
147 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
148 {
149         struct arm_spe_queue *speq = data;
150         struct auxtrace_buffer *buffer = speq->buffer;
151         struct auxtrace_buffer *old_buffer = speq->old_buffer;
152         struct auxtrace_queue *queue;
153
154         queue = &speq->spe->queues.queue_array[speq->queue_nr];
155
156         buffer = auxtrace_buffer__next(queue, buffer);
157         /* If no more data, drop the previous auxtrace_buffer and return */
158         if (!buffer) {
159                 if (old_buffer)
160                         auxtrace_buffer__drop_data(old_buffer);
161                 b->len = 0;
162                 return 0;
163         }
164
165         speq->buffer = buffer;
166
167         /* If the aux_buffer doesn't have data associated, try to load it */
168         if (!buffer->data) {
169                 /* get the file desc associated with the perf data file */
170                 int fd = perf_data__fd(speq->spe->session->data);
171
172                 buffer->data = auxtrace_buffer__get_data(buffer, fd);
173                 if (!buffer->data)
174                         return -ENOMEM;
175         }
176
177         b->len = buffer->size;
178         b->buf = buffer->data;
179
180         if (b->len) {
181                 if (old_buffer)
182                         auxtrace_buffer__drop_data(old_buffer);
183                 speq->old_buffer = buffer;
184         } else {
185                 auxtrace_buffer__drop_data(buffer);
186                 return arm_spe_get_trace(b, data);
187         }
188
189         return 0;
190 }
191
192 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
193                 unsigned int queue_nr)
194 {
195         struct arm_spe_params params = { .get_trace = 0, };
196         struct arm_spe_queue *speq;
197
198         speq = zalloc(sizeof(*speq));
199         if (!speq)
200                 return NULL;
201
202         speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
203         if (!speq->event_buf)
204                 goto out_free;
205
206         speq->spe = spe;
207         speq->queue_nr = queue_nr;
208         speq->pid = -1;
209         speq->tid = -1;
210         speq->cpu = -1;
211         speq->period_instructions = 0;
212
213         /* params set */
214         params.get_trace = arm_spe_get_trace;
215         params.data = speq;
216
217         /* create new decoder */
218         speq->decoder = arm_spe_decoder_new(&params);
219         if (!speq->decoder)
220                 goto out_free;
221
222         return speq;
223
224 out_free:
225         zfree(&speq->event_buf);
226         free(speq);
227
228         return NULL;
229 }
230
231 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
232 {
233         return ip >= spe->kernel_start ?
234                 PERF_RECORD_MISC_KERNEL :
235                 PERF_RECORD_MISC_USER;
236 }
237
238 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
239                                     struct auxtrace_queue *queue)
240 {
241         struct arm_spe_queue *speq = queue->priv;
242         pid_t tid;
243
244         tid = machine__get_current_tid(spe->machine, speq->cpu);
245         if (tid != -1) {
246                 speq->tid = tid;
247                 thread__zput(speq->thread);
248         } else
249                 speq->tid = queue->tid;
250
251         if ((!speq->thread) && (speq->tid != -1)) {
252                 speq->thread = machine__find_thread(spe->machine, -1,
253                                                     speq->tid);
254         }
255
256         if (speq->thread) {
257                 speq->pid = speq->thread->pid_;
258                 if (queue->cpu == -1)
259                         speq->cpu = speq->thread->cpu;
260         }
261 }
262
263 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
264 {
265         struct arm_spe *spe = speq->spe;
266         int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
267
268         if (err)
269                 return err;
270
271         arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
272
273         return 0;
274 }
275
276 static void arm_spe_prep_sample(struct arm_spe *spe,
277                                 struct arm_spe_queue *speq,
278                                 union perf_event *event,
279                                 struct perf_sample *sample)
280 {
281         struct arm_spe_record *record = &speq->decoder->record;
282
283         if (!spe->timeless_decoding)
284                 sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
285
286         sample->ip = record->from_ip;
287         sample->cpumode = arm_spe_cpumode(spe, sample->ip);
288         sample->pid = speq->pid;
289         sample->tid = speq->tid;
290         sample->period = 1;
291         sample->cpu = speq->cpu;
292
293         event->sample.header.type = PERF_RECORD_SAMPLE;
294         event->sample.header.misc = sample->cpumode;
295         event->sample.header.size = sizeof(struct perf_event_header);
296 }
297
298 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
299 {
300         event->header.size = perf_event__sample_event_size(sample, type, 0);
301         return perf_event__synthesize_sample(event, type, 0, sample);
302 }
303
304 static inline int
305 arm_spe_deliver_synth_event(struct arm_spe *spe,
306                             struct arm_spe_queue *speq __maybe_unused,
307                             union perf_event *event,
308                             struct perf_sample *sample)
309 {
310         int ret;
311
312         if (spe->synth_opts.inject) {
313                 ret = arm_spe__inject_event(event, sample, spe->sample_type);
314                 if (ret)
315                         return ret;
316         }
317
318         ret = perf_session__deliver_synth_event(spe->session, event, sample);
319         if (ret)
320                 pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
321
322         return ret;
323 }
324
325 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
326                                      u64 spe_events_id, u64 data_src)
327 {
328         struct arm_spe *spe = speq->spe;
329         struct arm_spe_record *record = &speq->decoder->record;
330         union perf_event *event = speq->event_buf;
331         struct perf_sample sample = { .ip = 0, };
332
333         arm_spe_prep_sample(spe, speq, event, &sample);
334
335         sample.id = spe_events_id;
336         sample.stream_id = spe_events_id;
337         sample.addr = record->virt_addr;
338         sample.phys_addr = record->phys_addr;
339         sample.data_src = data_src;
340         sample.weight = record->latency;
341
342         return arm_spe_deliver_synth_event(spe, speq, event, &sample);
343 }
344
345 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
346                                         u64 spe_events_id)
347 {
348         struct arm_spe *spe = speq->spe;
349         struct arm_spe_record *record = &speq->decoder->record;
350         union perf_event *event = speq->event_buf;
351         struct perf_sample sample = { .ip = 0, };
352
353         arm_spe_prep_sample(spe, speq, event, &sample);
354
355         sample.id = spe_events_id;
356         sample.stream_id = spe_events_id;
357         sample.addr = record->to_ip;
358         sample.weight = record->latency;
359
360         return arm_spe_deliver_synth_event(spe, speq, event, &sample);
361 }
362
363 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
364                                              u64 spe_events_id, u64 data_src)
365 {
366         struct arm_spe *spe = speq->spe;
367         struct arm_spe_record *record = &speq->decoder->record;
368         union perf_event *event = speq->event_buf;
369         struct perf_sample sample = { .ip = 0, };
370
371         /*
372          * Handles perf instruction sampling period.
373          */
374         speq->period_instructions++;
375         if (speq->period_instructions < spe->instructions_sample_period)
376                 return 0;
377         speq->period_instructions = 0;
378
379         arm_spe_prep_sample(spe, speq, event, &sample);
380
381         sample.id = spe_events_id;
382         sample.stream_id = spe_events_id;
383         sample.addr = record->virt_addr;
384         sample.phys_addr = record->phys_addr;
385         sample.data_src = data_src;
386         sample.period = spe->instructions_sample_period;
387         sample.weight = record->latency;
388
389         return arm_spe_deliver_synth_event(spe, speq, event, &sample);
390 }
391
392 static const struct midr_range neoverse_spe[] = {
393         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
394         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
395         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
396         {},
397 };
398
399 static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
400                                                 union perf_mem_data_src *data_src)
401 {
402         /*
403          * Even though four levels of cache hierarchy are possible, no known
404          * production Neoverse systems currently include more than three levels
405          * so for the time being we assume three exist. If a production system
406          * is built with four the this function would have to be changed to
407          * detect the number of levels for reporting.
408          */
409
410         /*
411          * We have no data on the hit level or data source for stores in the
412          * Neoverse SPE records.
413          */
414         if (record->op & ARM_SPE_ST) {
415                 data_src->mem_lvl = PERF_MEM_LVL_NA;
416                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
417                 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
418                 return;
419         }
420
421         switch (record->source) {
422         case ARM_SPE_NV_L1D:
423                 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
424                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
425                 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
426                 break;
427         case ARM_SPE_NV_L2:
428                 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
429                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
430                 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
431                 break;
432         case ARM_SPE_NV_PEER_CORE:
433                 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
434                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
435                 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
436                 break;
437         /*
438          * We don't know if this is L1, L2 but we do know it was a cache-2-cache
439          * transfer, so set SNOOPX_PEER
440          */
441         case ARM_SPE_NV_LOCAL_CLUSTER:
442         case ARM_SPE_NV_PEER_CLUSTER:
443                 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
444                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
445                 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
446                 break;
447         /*
448          * System cache is assumed to be L3
449          */
450         case ARM_SPE_NV_SYS_CACHE:
451                 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
452                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
453                 data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
454                 break;
455         /*
456          * We don't know what level it hit in, except it came from the other
457          * socket
458          */
459         case ARM_SPE_NV_REMOTE:
460                 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
461                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
462                 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
463                 data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
464                 break;
465         case ARM_SPE_NV_DRAM:
466                 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
467                 data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
468                 data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
469                 break;
470         default:
471                 break;
472         }
473 }
474
475 static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
476                                                union perf_mem_data_src *data_src)
477 {
478         if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
479                 data_src->mem_lvl = PERF_MEM_LVL_L3;
480
481                 if (record->type & ARM_SPE_LLC_MISS)
482                         data_src->mem_lvl |= PERF_MEM_LVL_MISS;
483                 else
484                         data_src->mem_lvl |= PERF_MEM_LVL_HIT;
485         } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
486                 data_src->mem_lvl = PERF_MEM_LVL_L1;
487
488                 if (record->type & ARM_SPE_L1D_MISS)
489                         data_src->mem_lvl |= PERF_MEM_LVL_MISS;
490                 else
491                         data_src->mem_lvl |= PERF_MEM_LVL_HIT;
492         }
493
494         if (record->type & ARM_SPE_REMOTE_ACCESS)
495                 data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
496 }
497
498 static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
499 {
500         union perf_mem_data_src data_src = { 0 };
501         bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
502
503         if (record->op == ARM_SPE_LD)
504                 data_src.mem_op = PERF_MEM_OP_LOAD;
505         else if (record->op == ARM_SPE_ST)
506                 data_src.mem_op = PERF_MEM_OP_STORE;
507         else
508                 return 0;
509
510         if (is_neoverse)
511                 arm_spe__synth_data_source_neoverse(record, &data_src);
512         else
513                 arm_spe__synth_data_source_generic(record, &data_src);
514
515         if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
516                 data_src.mem_dtlb = PERF_MEM_TLB_WK;
517
518                 if (record->type & ARM_SPE_TLB_MISS)
519                         data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
520                 else
521                         data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
522         }
523
524         return data_src.val;
525 }
526
527 static int arm_spe_sample(struct arm_spe_queue *speq)
528 {
529         const struct arm_spe_record *record = &speq->decoder->record;
530         struct arm_spe *spe = speq->spe;
531         u64 data_src;
532         int err;
533
534         data_src = arm_spe__synth_data_source(record, spe->midr);
535
536         if (spe->sample_flc) {
537                 if (record->type & ARM_SPE_L1D_MISS) {
538                         err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
539                                                         data_src);
540                         if (err)
541                                 return err;
542                 }
543
544                 if (record->type & ARM_SPE_L1D_ACCESS) {
545                         err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
546                                                         data_src);
547                         if (err)
548                                 return err;
549                 }
550         }
551
552         if (spe->sample_llc) {
553                 if (record->type & ARM_SPE_LLC_MISS) {
554                         err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
555                                                         data_src);
556                         if (err)
557                                 return err;
558                 }
559
560                 if (record->type & ARM_SPE_LLC_ACCESS) {
561                         err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
562                                                         data_src);
563                         if (err)
564                                 return err;
565                 }
566         }
567
568         if (spe->sample_tlb) {
569                 if (record->type & ARM_SPE_TLB_MISS) {
570                         err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
571                                                         data_src);
572                         if (err)
573                                 return err;
574                 }
575
576                 if (record->type & ARM_SPE_TLB_ACCESS) {
577                         err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
578                                                         data_src);
579                         if (err)
580                                 return err;
581                 }
582         }
583
584         if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
585                 err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
586                 if (err)
587                         return err;
588         }
589
590         if (spe->sample_remote_access &&
591             (record->type & ARM_SPE_REMOTE_ACCESS)) {
592                 err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
593                                                 data_src);
594                 if (err)
595                         return err;
596         }
597
598         /*
599          * When data_src is zero it means the record is not a memory operation,
600          * skip to synthesize memory sample for this case.
601          */
602         if (spe->sample_memory && data_src) {
603                 err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
604                 if (err)
605                         return err;
606         }
607
608         if (spe->sample_instructions) {
609                 err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
610                 if (err)
611                         return err;
612         }
613
614         return 0;
615 }
616
617 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
618 {
619         struct arm_spe *spe = speq->spe;
620         struct arm_spe_record *record;
621         int ret;
622
623         if (!spe->kernel_start)
624                 spe->kernel_start = machine__kernel_start(spe->machine);
625
626         while (1) {
627                 /*
628                  * The usual logic is firstly to decode the packets, and then
629                  * based the record to synthesize sample; but here the flow is
630                  * reversed: it calls arm_spe_sample() for synthesizing samples
631                  * prior to arm_spe_decode().
632                  *
633                  * Two reasons for this code logic:
634                  * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
635                  * has decoded trace data and generated a record, but the record
636                  * is left to generate sample until run to here, so it's correct
637                  * to synthesize sample for the left record.
638                  * 2. After decoding trace data, it needs to compare the record
639                  * timestamp with the coming perf event, if the record timestamp
640                  * is later than the perf event, it needs bail out and pushs the
641                  * record into auxtrace heap, thus the record can be deferred to
642                  * synthesize sample until run to here at the next time; so this
643                  * can correlate samples between Arm SPE trace data and other
644                  * perf events with correct time ordering.
645                  */
646
647                 /*
648                  * Update pid/tid info.
649                  */
650                 record = &speq->decoder->record;
651                 if (!spe->timeless_decoding && record->context_id != (u64)-1) {
652                         ret = arm_spe_set_tid(speq, record->context_id);
653                         if (ret)
654                                 return ret;
655
656                         spe->use_ctx_pkt_for_pid = true;
657                 }
658
659                 ret = arm_spe_sample(speq);
660                 if (ret)
661                         return ret;
662
663                 ret = arm_spe_decode(speq->decoder);
664                 if (!ret) {
665                         pr_debug("No data or all data has been processed.\n");
666                         return 1;
667                 }
668
669                 /*
670                  * Error is detected when decode SPE trace data, continue to
671                  * the next trace data and find out more records.
672                  */
673                 if (ret < 0)
674                         continue;
675
676                 record = &speq->decoder->record;
677
678                 /* Update timestamp for the last record */
679                 if (record->timestamp > speq->timestamp)
680                         speq->timestamp = record->timestamp;
681
682                 /*
683                  * If the timestamp of the queue is later than timestamp of the
684                  * coming perf event, bail out so can allow the perf event to
685                  * be processed ahead.
686                  */
687                 if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
688                         *timestamp = speq->timestamp;
689                         return 0;
690                 }
691         }
692
693         return 0;
694 }
695
696 static int arm_spe__setup_queue(struct arm_spe *spe,
697                                struct auxtrace_queue *queue,
698                                unsigned int queue_nr)
699 {
700         struct arm_spe_queue *speq = queue->priv;
701         struct arm_spe_record *record;
702
703         if (list_empty(&queue->head) || speq)
704                 return 0;
705
706         speq = arm_spe__alloc_queue(spe, queue_nr);
707
708         if (!speq)
709                 return -ENOMEM;
710
711         queue->priv = speq;
712
713         if (queue->cpu != -1)
714                 speq->cpu = queue->cpu;
715
716         if (!speq->on_heap) {
717                 int ret;
718
719                 if (spe->timeless_decoding)
720                         return 0;
721
722 retry:
723                 ret = arm_spe_decode(speq->decoder);
724
725                 if (!ret)
726                         return 0;
727
728                 if (ret < 0)
729                         goto retry;
730
731                 record = &speq->decoder->record;
732
733                 speq->timestamp = record->timestamp;
734                 ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
735                 if (ret)
736                         return ret;
737                 speq->on_heap = true;
738         }
739
740         return 0;
741 }
742
743 static int arm_spe__setup_queues(struct arm_spe *spe)
744 {
745         unsigned int i;
746         int ret;
747
748         for (i = 0; i < spe->queues.nr_queues; i++) {
749                 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
750                 if (ret)
751                         return ret;
752         }
753
754         return 0;
755 }
756
757 static int arm_spe__update_queues(struct arm_spe *spe)
758 {
759         if (spe->queues.new_data) {
760                 spe->queues.new_data = false;
761                 return arm_spe__setup_queues(spe);
762         }
763
764         return 0;
765 }
766
767 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
768 {
769         struct evsel *evsel;
770         struct evlist *evlist = spe->session->evlist;
771         bool timeless_decoding = true;
772
773         /*
774          * Circle through the list of event and complain if we find one
775          * with the time bit set.
776          */
777         evlist__for_each_entry(evlist, evsel) {
778                 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
779                         timeless_decoding = false;
780         }
781
782         return timeless_decoding;
783 }
784
785 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
786 {
787         unsigned int queue_nr;
788         u64 ts;
789         int ret;
790
791         while (1) {
792                 struct auxtrace_queue *queue;
793                 struct arm_spe_queue *speq;
794
795                 if (!spe->heap.heap_cnt)
796                         return 0;
797
798                 if (spe->heap.heap_array[0].ordinal >= timestamp)
799                         return 0;
800
801                 queue_nr = spe->heap.heap_array[0].queue_nr;
802                 queue = &spe->queues.queue_array[queue_nr];
803                 speq = queue->priv;
804
805                 auxtrace_heap__pop(&spe->heap);
806
807                 if (spe->heap.heap_cnt) {
808                         ts = spe->heap.heap_array[0].ordinal + 1;
809                         if (ts > timestamp)
810                                 ts = timestamp;
811                 } else {
812                         ts = timestamp;
813                 }
814
815                 /*
816                  * A previous context-switch event has set pid/tid in the machine's context, so
817                  * here we need to update the pid/tid in the thread and SPE queue.
818                  */
819                 if (!spe->use_ctx_pkt_for_pid)
820                         arm_spe_set_pid_tid_cpu(spe, queue);
821
822                 ret = arm_spe_run_decoder(speq, &ts);
823                 if (ret < 0) {
824                         auxtrace_heap__add(&spe->heap, queue_nr, ts);
825                         return ret;
826                 }
827
828                 if (!ret) {
829                         ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
830                         if (ret < 0)
831                                 return ret;
832                 } else {
833                         speq->on_heap = false;
834                 }
835         }
836
837         return 0;
838 }
839
840 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
841                                             u64 time_)
842 {
843         struct auxtrace_queues *queues = &spe->queues;
844         unsigned int i;
845         u64 ts = 0;
846
847         for (i = 0; i < queues->nr_queues; i++) {
848                 struct auxtrace_queue *queue = &spe->queues.queue_array[i];
849                 struct arm_spe_queue *speq = queue->priv;
850
851                 if (speq && (tid == -1 || speq->tid == tid)) {
852                         speq->time = time_;
853                         arm_spe_set_pid_tid_cpu(spe, queue);
854                         arm_spe_run_decoder(speq, &ts);
855                 }
856         }
857         return 0;
858 }
859
860 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
861                                   struct perf_sample *sample)
862 {
863         pid_t pid, tid;
864         int cpu;
865
866         if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
867                 return 0;
868
869         pid = event->context_switch.next_prev_pid;
870         tid = event->context_switch.next_prev_tid;
871         cpu = sample->cpu;
872
873         if (tid == -1)
874                 pr_warning("context_switch event has no tid\n");
875
876         return machine__set_current_tid(spe->machine, cpu, pid, tid);
877 }
878
879 static int arm_spe_process_event(struct perf_session *session,
880                                  union perf_event *event,
881                                  struct perf_sample *sample,
882                                  struct perf_tool *tool)
883 {
884         int err = 0;
885         u64 timestamp;
886         struct arm_spe *spe = container_of(session->auxtrace,
887                         struct arm_spe, auxtrace);
888
889         if (dump_trace)
890                 return 0;
891
892         if (!tool->ordered_events) {
893                 pr_err("SPE trace requires ordered events\n");
894                 return -EINVAL;
895         }
896
897         if (sample->time && (sample->time != (u64) -1))
898                 timestamp = perf_time_to_tsc(sample->time, &spe->tc);
899         else
900                 timestamp = 0;
901
902         if (timestamp || spe->timeless_decoding) {
903                 err = arm_spe__update_queues(spe);
904                 if (err)
905                         return err;
906         }
907
908         if (spe->timeless_decoding) {
909                 if (event->header.type == PERF_RECORD_EXIT) {
910                         err = arm_spe_process_timeless_queues(spe,
911                                         event->fork.tid,
912                                         sample->time);
913                 }
914         } else if (timestamp) {
915                 err = arm_spe_process_queues(spe, timestamp);
916                 if (err)
917                         return err;
918
919                 if (!spe->use_ctx_pkt_for_pid &&
920                     (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
921                     event->header.type == PERF_RECORD_SWITCH))
922                         err = arm_spe_context_switch(spe, event, sample);
923         }
924
925         return err;
926 }
927
928 static int arm_spe_process_auxtrace_event(struct perf_session *session,
929                                           union perf_event *event,
930                                           struct perf_tool *tool __maybe_unused)
931 {
932         struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
933                                              auxtrace);
934
935         if (!spe->data_queued) {
936                 struct auxtrace_buffer *buffer;
937                 off_t data_offset;
938                 int fd = perf_data__fd(session->data);
939                 int err;
940
941                 if (perf_data__is_pipe(session->data)) {
942                         data_offset = 0;
943                 } else {
944                         data_offset = lseek(fd, 0, SEEK_CUR);
945                         if (data_offset == -1)
946                                 return -errno;
947                 }
948
949                 err = auxtrace_queues__add_event(&spe->queues, session, event,
950                                 data_offset, &buffer);
951                 if (err)
952                         return err;
953
954                 /* Dump here now we have copied a piped trace out of the pipe */
955                 if (dump_trace) {
956                         if (auxtrace_buffer__get_data(buffer, fd)) {
957                                 arm_spe_dump_event(spe, buffer->data,
958                                                 buffer->size);
959                                 auxtrace_buffer__put_data(buffer);
960                         }
961                 }
962         }
963
964         return 0;
965 }
966
967 static int arm_spe_flush(struct perf_session *session __maybe_unused,
968                          struct perf_tool *tool __maybe_unused)
969 {
970         struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
971                         auxtrace);
972         int ret;
973
974         if (dump_trace)
975                 return 0;
976
977         if (!tool->ordered_events)
978                 return -EINVAL;
979
980         ret = arm_spe__update_queues(spe);
981         if (ret < 0)
982                 return ret;
983
984         if (spe->timeless_decoding)
985                 return arm_spe_process_timeless_queues(spe, -1,
986                                 MAX_TIMESTAMP - 1);
987
988         ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
989         if (ret)
990                 return ret;
991
992         if (!spe->use_ctx_pkt_for_pid)
993                 ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
994                             "Matching of TIDs to SPE events could be inaccurate.\n");
995
996         return 0;
997 }
998
999 static void arm_spe_free_queue(void *priv)
1000 {
1001         struct arm_spe_queue *speq = priv;
1002
1003         if (!speq)
1004                 return;
1005         thread__zput(speq->thread);
1006         arm_spe_decoder_free(speq->decoder);
1007         zfree(&speq->event_buf);
1008         free(speq);
1009 }
1010
1011 static void arm_spe_free_events(struct perf_session *session)
1012 {
1013         struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1014                                              auxtrace);
1015         struct auxtrace_queues *queues = &spe->queues;
1016         unsigned int i;
1017
1018         for (i = 0; i < queues->nr_queues; i++) {
1019                 arm_spe_free_queue(queues->queue_array[i].priv);
1020                 queues->queue_array[i].priv = NULL;
1021         }
1022         auxtrace_queues__free(queues);
1023 }
1024
1025 static void arm_spe_free(struct perf_session *session)
1026 {
1027         struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1028                                              auxtrace);
1029
1030         auxtrace_heap__free(&spe->heap);
1031         arm_spe_free_events(session);
1032         session->auxtrace = NULL;
1033         free(spe);
1034 }
1035
1036 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1037                                       struct evsel *evsel)
1038 {
1039         struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1040
1041         return evsel->core.attr.type == spe->pmu_type;
1042 }
1043
1044 static const char * const arm_spe_info_fmts[] = {
1045         [ARM_SPE_PMU_TYPE]              = "  PMU Type           %"PRId64"\n",
1046 };
1047
1048 static void arm_spe_print_info(__u64 *arr)
1049 {
1050         if (!dump_trace)
1051                 return;
1052
1053         fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
1054 }
1055
1056 struct arm_spe_synth {
1057         struct perf_tool dummy_tool;
1058         struct perf_session *session;
1059 };
1060
1061 static int arm_spe_event_synth(struct perf_tool *tool,
1062                                union perf_event *event,
1063                                struct perf_sample *sample __maybe_unused,
1064                                struct machine *machine __maybe_unused)
1065 {
1066         struct arm_spe_synth *arm_spe_synth =
1067                       container_of(tool, struct arm_spe_synth, dummy_tool);
1068
1069         return perf_session__deliver_synth_event(arm_spe_synth->session,
1070                                                  event, NULL);
1071 }
1072
1073 static int arm_spe_synth_event(struct perf_session *session,
1074                                struct perf_event_attr *attr, u64 id)
1075 {
1076         struct arm_spe_synth arm_spe_synth;
1077
1078         memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
1079         arm_spe_synth.session = session;
1080
1081         return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
1082                                            &id, arm_spe_event_synth);
1083 }
1084
1085 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1086                                     const char *name)
1087 {
1088         struct evsel *evsel;
1089
1090         evlist__for_each_entry(evlist, evsel) {
1091                 if (evsel->core.id && evsel->core.id[0] == id) {
1092                         if (evsel->name)
1093                                 zfree(&evsel->name);
1094                         evsel->name = strdup(name);
1095                         break;
1096                 }
1097         }
1098 }
1099
1100 static int
1101 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1102 {
1103         struct evlist *evlist = session->evlist;
1104         struct evsel *evsel;
1105         struct perf_event_attr attr;
1106         bool found = false;
1107         u64 id;
1108         int err;
1109
1110         evlist__for_each_entry(evlist, evsel) {
1111                 if (evsel->core.attr.type == spe->pmu_type) {
1112                         found = true;
1113                         break;
1114                 }
1115         }
1116
1117         if (!found) {
1118                 pr_debug("No selected events with SPE trace data\n");
1119                 return 0;
1120         }
1121
1122         memset(&attr, 0, sizeof(struct perf_event_attr));
1123         attr.size = sizeof(struct perf_event_attr);
1124         attr.type = PERF_TYPE_HARDWARE;
1125         attr.sample_type = evsel->core.attr.sample_type &
1126                                 (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1127         attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1128                             PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1129                             PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1130         if (spe->timeless_decoding)
1131                 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1132         else
1133                 attr.sample_type |= PERF_SAMPLE_TIME;
1134
1135         spe->sample_type = attr.sample_type;
1136
1137         attr.exclude_user = evsel->core.attr.exclude_user;
1138         attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1139         attr.exclude_hv = evsel->core.attr.exclude_hv;
1140         attr.exclude_host = evsel->core.attr.exclude_host;
1141         attr.exclude_guest = evsel->core.attr.exclude_guest;
1142         attr.sample_id_all = evsel->core.attr.sample_id_all;
1143         attr.read_format = evsel->core.attr.read_format;
1144
1145         /* create new id val to be a fixed offset from evsel id */
1146         id = evsel->core.id[0] + 1000000000;
1147
1148         if (!id)
1149                 id = 1;
1150
1151         if (spe->synth_opts.flc) {
1152                 spe->sample_flc = true;
1153
1154                 /* Level 1 data cache miss */
1155                 err = arm_spe_synth_event(session, &attr, id);
1156                 if (err)
1157                         return err;
1158                 spe->l1d_miss_id = id;
1159                 arm_spe_set_event_name(evlist, id, "l1d-miss");
1160                 id += 1;
1161
1162                 /* Level 1 data cache access */
1163                 err = arm_spe_synth_event(session, &attr, id);
1164                 if (err)
1165                         return err;
1166                 spe->l1d_access_id = id;
1167                 arm_spe_set_event_name(evlist, id, "l1d-access");
1168                 id += 1;
1169         }
1170
1171         if (spe->synth_opts.llc) {
1172                 spe->sample_llc = true;
1173
1174                 /* Last level cache miss */
1175                 err = arm_spe_synth_event(session, &attr, id);
1176                 if (err)
1177                         return err;
1178                 spe->llc_miss_id = id;
1179                 arm_spe_set_event_name(evlist, id, "llc-miss");
1180                 id += 1;
1181
1182                 /* Last level cache access */
1183                 err = arm_spe_synth_event(session, &attr, id);
1184                 if (err)
1185                         return err;
1186                 spe->llc_access_id = id;
1187                 arm_spe_set_event_name(evlist, id, "llc-access");
1188                 id += 1;
1189         }
1190
1191         if (spe->synth_opts.tlb) {
1192                 spe->sample_tlb = true;
1193
1194                 /* TLB miss */
1195                 err = arm_spe_synth_event(session, &attr, id);
1196                 if (err)
1197                         return err;
1198                 spe->tlb_miss_id = id;
1199                 arm_spe_set_event_name(evlist, id, "tlb-miss");
1200                 id += 1;
1201
1202                 /* TLB access */
1203                 err = arm_spe_synth_event(session, &attr, id);
1204                 if (err)
1205                         return err;
1206                 spe->tlb_access_id = id;
1207                 arm_spe_set_event_name(evlist, id, "tlb-access");
1208                 id += 1;
1209         }
1210
1211         if (spe->synth_opts.branches) {
1212                 spe->sample_branch = true;
1213
1214                 /* Branch miss */
1215                 err = arm_spe_synth_event(session, &attr, id);
1216                 if (err)
1217                         return err;
1218                 spe->branch_miss_id = id;
1219                 arm_spe_set_event_name(evlist, id, "branch-miss");
1220                 id += 1;
1221         }
1222
1223         if (spe->synth_opts.remote_access) {
1224                 spe->sample_remote_access = true;
1225
1226                 /* Remote access */
1227                 err = arm_spe_synth_event(session, &attr, id);
1228                 if (err)
1229                         return err;
1230                 spe->remote_access_id = id;
1231                 arm_spe_set_event_name(evlist, id, "remote-access");
1232                 id += 1;
1233         }
1234
1235         if (spe->synth_opts.mem) {
1236                 spe->sample_memory = true;
1237
1238                 err = arm_spe_synth_event(session, &attr, id);
1239                 if (err)
1240                         return err;
1241                 spe->memory_id = id;
1242                 arm_spe_set_event_name(evlist, id, "memory");
1243                 id += 1;
1244         }
1245
1246         if (spe->synth_opts.instructions) {
1247                 if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1248                         pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
1249                         goto synth_instructions_out;
1250                 }
1251                 if (spe->synth_opts.period > 1)
1252                         pr_warning("Arm SPE has a hardware-based sample period.\n"
1253                                    "Additional instruction events will be discarded by --itrace\n");
1254
1255                 spe->sample_instructions = true;
1256                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1257                 attr.sample_period = spe->synth_opts.period;
1258                 spe->instructions_sample_period = attr.sample_period;
1259                 err = arm_spe_synth_event(session, &attr, id);
1260                 if (err)
1261                         return err;
1262                 spe->instructions_id = id;
1263                 arm_spe_set_event_name(evlist, id, "instructions");
1264         }
1265 synth_instructions_out:
1266
1267         return 0;
1268 }
1269
1270 int arm_spe_process_auxtrace_info(union perf_event *event,
1271                                   struct perf_session *session)
1272 {
1273         struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1274         size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
1275         struct perf_record_time_conv *tc = &session->time_conv;
1276         const char *cpuid = perf_env__cpuid(session->evlist->env);
1277         u64 midr = strtol(cpuid, NULL, 16);
1278         struct arm_spe *spe;
1279         int err;
1280
1281         if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1282                                         min_sz)
1283                 return -EINVAL;
1284
1285         spe = zalloc(sizeof(struct arm_spe));
1286         if (!spe)
1287                 return -ENOMEM;
1288
1289         err = auxtrace_queues__init(&spe->queues);
1290         if (err)
1291                 goto err_free;
1292
1293         spe->session = session;
1294         spe->machine = &session->machines.host; /* No kvm support */
1295         spe->auxtrace_type = auxtrace_info->type;
1296         spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1297         spe->midr = midr;
1298
1299         spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1300
1301         /*
1302          * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
1303          * and the parameters for hardware clock are stored in the session
1304          * context.  Passes these parameters to the struct perf_tsc_conversion
1305          * in "spe->tc", which is used for later conversion between clock
1306          * counter and timestamp.
1307          *
1308          * For backward compatibility, copies the fields starting from
1309          * "time_cycles" only if they are contained in the event.
1310          */
1311         spe->tc.time_shift = tc->time_shift;
1312         spe->tc.time_mult = tc->time_mult;
1313         spe->tc.time_zero = tc->time_zero;
1314
1315         if (event_contains(*tc, time_cycles)) {
1316                 spe->tc.time_cycles = tc->time_cycles;
1317                 spe->tc.time_mask = tc->time_mask;
1318                 spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1319                 spe->tc.cap_user_time_short = tc->cap_user_time_short;
1320         }
1321
1322         spe->auxtrace.process_event = arm_spe_process_event;
1323         spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1324         spe->auxtrace.flush_events = arm_spe_flush;
1325         spe->auxtrace.free_events = arm_spe_free_events;
1326         spe->auxtrace.free = arm_spe_free;
1327         spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1328         session->auxtrace = &spe->auxtrace;
1329
1330         arm_spe_print_info(&auxtrace_info->priv[0]);
1331
1332         if (dump_trace)
1333                 return 0;
1334
1335         if (session->itrace_synth_opts && session->itrace_synth_opts->set)
1336                 spe->synth_opts = *session->itrace_synth_opts;
1337         else
1338                 itrace_synth_opts__set_default(&spe->synth_opts, false);
1339
1340         err = arm_spe_synth_events(spe, session);
1341         if (err)
1342                 goto err_free_queues;
1343
1344         err = auxtrace_queues__process_index(&spe->queues, session);
1345         if (err)
1346                 goto err_free_queues;
1347
1348         if (spe->queues.populated)
1349                 spe->data_queued = true;
1350
1351         return 0;
1352
1353 err_free_queues:
1354         auxtrace_queues__free(&spe->queues);
1355         session->auxtrace = NULL;
1356 err_free:
1357         free(spe);
1358         return err;
1359 }
This page took 0.109844 seconds and 4 git commands to generate.