1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
6 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
9 * Data type definitions, declarations, prototypes.
11 * Started by: Thomas Gleixner and Ingo Molnar
13 * For licencing details see kernel-base/COPYING
15 #ifndef _UAPI_LINUX_PERF_EVENT_H
16 #define _UAPI_LINUX_PERF_EVENT_H
18 #include <linux/types.h>
19 #include <linux/ioctl.h>
20 #include <asm/byteorder.h>
23 * User-space ABI bits:
30 PERF_TYPE_HARDWARE = 0,
31 PERF_TYPE_SOFTWARE = 1,
32 PERF_TYPE_TRACEPOINT = 2,
33 PERF_TYPE_HW_CACHE = 3,
35 PERF_TYPE_BREAKPOINT = 5,
37 PERF_TYPE_MAX, /* non-ABI */
41 * Generalized performance event event_id types, used by the
42 * attr.event_id parameter of the sys_perf_event_open()
47 * Common hardware events, generalized by the kernel:
49 PERF_COUNT_HW_CPU_CYCLES = 0,
50 PERF_COUNT_HW_INSTRUCTIONS = 1,
51 PERF_COUNT_HW_CACHE_REFERENCES = 2,
52 PERF_COUNT_HW_CACHE_MISSES = 3,
53 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
54 PERF_COUNT_HW_BRANCH_MISSES = 5,
55 PERF_COUNT_HW_BUS_CYCLES = 6,
56 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
57 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
58 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
60 PERF_COUNT_HW_MAX, /* non-ABI */
64 * Generalized hardware cache events:
66 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
67 * { read, write, prefetch } x
68 * { accesses, misses }
70 enum perf_hw_cache_id {
71 PERF_COUNT_HW_CACHE_L1D = 0,
72 PERF_COUNT_HW_CACHE_L1I = 1,
73 PERF_COUNT_HW_CACHE_LL = 2,
74 PERF_COUNT_HW_CACHE_DTLB = 3,
75 PERF_COUNT_HW_CACHE_ITLB = 4,
76 PERF_COUNT_HW_CACHE_BPU = 5,
77 PERF_COUNT_HW_CACHE_NODE = 6,
79 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
82 enum perf_hw_cache_op_id {
83 PERF_COUNT_HW_CACHE_OP_READ = 0,
84 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
85 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
87 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
90 enum perf_hw_cache_op_result_id {
91 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
92 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
94 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
98 * Special "software" events provided by the kernel, even if the hardware
99 * does not support performance events. These events measure various
100 * physical and sw events of the kernel (and allow the profiling of them as
104 PERF_COUNT_SW_CPU_CLOCK = 0,
105 PERF_COUNT_SW_TASK_CLOCK = 1,
106 PERF_COUNT_SW_PAGE_FAULTS = 2,
107 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
108 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
109 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
110 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
111 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
112 PERF_COUNT_SW_EMULATION_FAULTS = 8,
113 PERF_COUNT_SW_DUMMY = 9,
114 PERF_COUNT_SW_BPF_OUTPUT = 10,
116 PERF_COUNT_SW_MAX, /* non-ABI */
120 * Bits that can be set in attr.sample_type to request information
121 * in the overflow packets.
123 enum perf_event_sample_format {
124 PERF_SAMPLE_IP = 1U << 0,
125 PERF_SAMPLE_TID = 1U << 1,
126 PERF_SAMPLE_TIME = 1U << 2,
127 PERF_SAMPLE_ADDR = 1U << 3,
128 PERF_SAMPLE_READ = 1U << 4,
129 PERF_SAMPLE_CALLCHAIN = 1U << 5,
130 PERF_SAMPLE_ID = 1U << 6,
131 PERF_SAMPLE_CPU = 1U << 7,
132 PERF_SAMPLE_PERIOD = 1U << 8,
133 PERF_SAMPLE_STREAM_ID = 1U << 9,
134 PERF_SAMPLE_RAW = 1U << 10,
135 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
136 PERF_SAMPLE_REGS_USER = 1U << 12,
137 PERF_SAMPLE_STACK_USER = 1U << 13,
138 PERF_SAMPLE_WEIGHT = 1U << 14,
139 PERF_SAMPLE_DATA_SRC = 1U << 15,
140 PERF_SAMPLE_IDENTIFIER = 1U << 16,
141 PERF_SAMPLE_TRANSACTION = 1U << 17,
142 PERF_SAMPLE_REGS_INTR = 1U << 18,
143 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
149 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
151 * If the user does not pass priv level information via branch_sample_type,
152 * the kernel uses the event's priv level. Branch and event priv levels do
153 * not have to match. Branch priv level is checked for permissions.
155 * The branch types can be combined, however BRANCH_ANY covers all types
156 * of branches and therefore it supersedes all the other types.
158 enum perf_branch_sample_type_shift {
159 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
160 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
161 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
163 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
164 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
165 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
166 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
167 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
168 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
169 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
170 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
172 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
173 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
174 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
176 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
177 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
179 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
181 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
184 enum perf_branch_sample_type {
185 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
186 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
187 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
189 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
190 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
191 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
192 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
193 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
194 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
195 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
196 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
198 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
199 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
200 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
202 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
203 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
205 PERF_SAMPLE_BRANCH_TYPE_SAVE =
206 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
208 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
212 * Common flow change classification
215 PERF_BR_UNKNOWN = 0, /* unknown */
216 PERF_BR_COND = 1, /* conditional */
217 PERF_BR_UNCOND = 2, /* unconditional */
218 PERF_BR_IND = 3, /* indirect */
219 PERF_BR_CALL = 4, /* function call */
220 PERF_BR_IND_CALL = 5, /* indirect function call */
221 PERF_BR_RET = 6, /* function return */
222 PERF_BR_SYSCALL = 7, /* syscall */
223 PERF_BR_SYSRET = 8, /* syscall return */
224 PERF_BR_COND_CALL = 9, /* conditional function call */
225 PERF_BR_COND_RET = 10, /* conditional function return */
229 #define PERF_SAMPLE_BRANCH_PLM_ALL \
230 (PERF_SAMPLE_BRANCH_USER|\
231 PERF_SAMPLE_BRANCH_KERNEL|\
232 PERF_SAMPLE_BRANCH_HV)
235 * Values to determine ABI of the registers dump.
237 enum perf_sample_regs_abi {
238 PERF_SAMPLE_REGS_ABI_NONE = 0,
239 PERF_SAMPLE_REGS_ABI_32 = 1,
240 PERF_SAMPLE_REGS_ABI_64 = 2,
244 * Values for the memory transaction event qualifier, mostly for
245 * abort events. Multiple bits can be set.
248 PERF_TXN_ELISION = (1 << 0), /* From elision */
249 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
250 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
251 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
252 PERF_TXN_RETRY = (1 << 4), /* Retry possible */
253 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
254 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
255 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
257 PERF_TXN_MAX = (1 << 8), /* non-ABI */
259 /* bits 32..63 are reserved for the abort code */
261 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
262 PERF_TXN_ABORT_SHIFT = 32,
266 * The format of the data returned by read() on a perf event fd,
267 * as specified by attr.read_format:
269 * struct read_format {
271 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
272 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
273 * { u64 id; } && PERF_FORMAT_ID
274 * } && !PERF_FORMAT_GROUP
277 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
278 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
280 * { u64 id; } && PERF_FORMAT_ID
282 * } && PERF_FORMAT_GROUP
285 enum perf_event_read_format {
286 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
287 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
288 PERF_FORMAT_ID = 1U << 2,
289 PERF_FORMAT_GROUP = 1U << 3,
291 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
294 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
295 #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
296 #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
297 #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
298 /* add: sample_stack_user */
299 #define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
300 #define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
303 * Hardware event_id to monitor via a performance monitoring event:
305 * @sample_max_stack: Max number of frame pointers in a callchain,
306 * should be < /proc/sys/kernel/perf_event_max_stack
308 struct perf_event_attr {
311 * Major type: hardware/software/tracepoint/etc.
316 * Size of the attr structure, for fwd/bwd compat.
321 * Type specific configuration information.
333 __u64 disabled : 1, /* off by default */
334 inherit : 1, /* children inherit it */
335 pinned : 1, /* must always be on PMU */
336 exclusive : 1, /* only group on PMU */
337 exclude_user : 1, /* don't count user */
338 exclude_kernel : 1, /* ditto kernel */
339 exclude_hv : 1, /* ditto hypervisor */
340 exclude_idle : 1, /* don't count when idle */
341 mmap : 1, /* include mmap data */
342 comm : 1, /* include comm data */
343 freq : 1, /* use freq, not period */
344 inherit_stat : 1, /* per task counts */
345 enable_on_exec : 1, /* next exec enables */
346 task : 1, /* trace fork/exit */
347 watermark : 1, /* wakeup_watermark */
351 * 0 - SAMPLE_IP can have arbitrary skid
352 * 1 - SAMPLE_IP must have constant skid
353 * 2 - SAMPLE_IP requested to have 0 skid
354 * 3 - SAMPLE_IP must have 0 skid
356 * See also PERF_RECORD_MISC_EXACT_IP
358 precise_ip : 2, /* skid constraint */
359 mmap_data : 1, /* non-exec mmap data */
360 sample_id_all : 1, /* sample_type all events */
362 exclude_host : 1, /* don't count in host */
363 exclude_guest : 1, /* don't count in guest */
365 exclude_callchain_kernel : 1, /* exclude kernel callchains */
366 exclude_callchain_user : 1, /* exclude user callchains */
367 mmap2 : 1, /* include mmap with inode data */
368 comm_exec : 1, /* flag comm events that are due to an exec */
369 use_clockid : 1, /* use @clockid for time fields */
370 context_switch : 1, /* context switch data */
371 write_backward : 1, /* Write ring buffer from end to beginning */
372 namespaces : 1, /* include namespaces data */
376 __u32 wakeup_events; /* wakeup every n events */
377 __u32 wakeup_watermark; /* bytes before wakeup */
383 __u64 config1; /* extension of config */
387 __u64 config2; /* extension of config1 */
389 __u64 branch_sample_type; /* enum perf_branch_sample_type */
392 * Defines set of user regs to dump on samples.
393 * See asm/perf_regs.h for details.
395 __u64 sample_regs_user;
398 * Defines size of the user stack to dump on samples.
400 __u32 sample_stack_user;
404 * Defines set of regs to dump for each sample
406 * - precise = 0: PMU interrupt
407 * - precise > 0: sampled instruction
409 * See asm/perf_regs.h for details.
411 __u64 sample_regs_intr;
414 * Wakeup watermark for AUX area
417 __u16 sample_max_stack;
418 __u16 __reserved_2; /* align to __u64 */
421 #define perf_flags(attr) (*(&(attr)->read_format + 1))
424 * Ioctls that can be done on a perf event fd:
426 #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
427 #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
428 #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
429 #define PERF_EVENT_IOC_RESET _IO ('$', 3)
430 #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
431 #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
432 #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
433 #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
434 #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
435 #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
437 enum perf_event_ioc_flags {
438 PERF_IOC_FLAG_GROUP = 1U << 0,
442 * Structure of the page that can be mapped via mmap
444 struct perf_event_mmap_page {
445 __u32 version; /* version number of this structure */
446 __u32 compat_version; /* lowest version this is compat with */
449 * Bits needed to read the hw events in user-space.
451 * u32 seq, time_mult, time_shift, index, width;
452 * u64 count, enabled, running;
453 * u64 cyc, time_offset;
460 * enabled = pc->time_enabled;
461 * running = pc->time_running;
463 * if (pc->cap_usr_time && enabled != running) {
465 * time_offset = pc->time_offset;
466 * time_mult = pc->time_mult;
467 * time_shift = pc->time_shift;
471 * count = pc->offset;
472 * if (pc->cap_user_rdpmc && index) {
473 * width = pc->pmc_width;
474 * pmc = rdpmc(index - 1);
478 * } while (pc->lock != seq);
480 * NOTE: for obvious reason this only works on self-monitoring
483 __u32 lock; /* seqlock for synchronization */
484 __u32 index; /* hardware event identifier */
485 __s64 offset; /* add to hardware event value */
486 __u64 time_enabled; /* time event active */
487 __u64 time_running; /* time event on cpu */
491 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
492 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
494 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
495 cap_user_time : 1, /* The time_* fields are used */
496 cap_user_time_zero : 1, /* The time_zero field is used */
502 * If cap_user_rdpmc this field provides the bit-width of the value
503 * read using the rdpmc() or equivalent instruction. This can be used
504 * to sign extend the result like:
506 * pmc <<= 64 - width;
507 * pmc >>= 64 - width; // signed shift right
513 * If cap_usr_time the below fields can be used to compute the time
514 * delta since time_enabled (in ns) using rdtsc or similar.
519 * quot = (cyc >> time_shift);
520 * rem = cyc & (((u64)1 << time_shift) - 1);
521 * delta = time_offset + quot * time_mult +
522 * ((rem * time_mult) >> time_shift);
524 * Where time_offset,time_mult,time_shift and cyc are read in the
525 * seqcount loop described above. This delta can then be added to
526 * enabled and possible running (if index), improving the scaling:
532 * quot = count / running;
533 * rem = count % running;
534 * count = quot * enabled + (rem * enabled) / running;
540 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
541 * from sample timestamps.
543 * time = timestamp - time_zero;
544 * quot = time / time_mult;
545 * rem = time % time_mult;
546 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
550 * quot = cyc >> time_shift;
551 * rem = cyc & (((u64)1 << time_shift) - 1);
552 * timestamp = time_zero + quot * time_mult +
553 * ((rem * time_mult) >> time_shift);
556 __u32 size; /* Header size up to __reserved[] fields. */
559 * Hole for extension of the self monitor capabilities
562 __u8 __reserved[118*8+4]; /* align to 1k. */
565 * Control data for the mmap() data buffer.
567 * User-space reading the @data_head value should issue an smp_rmb(),
568 * after reading this value.
570 * When the mapping is PROT_WRITE the @data_tail value should be
571 * written by userspace to reflect the last read data, after issueing
572 * an smp_mb() to separate the data read from the ->data_tail store.
573 * In this case the kernel will not over-write unread data.
575 * See perf_output_put_handle() for the data ordering.
577 * data_{offset,size} indicate the location and size of the perf record
578 * buffer within the mmapped area.
580 __u64 data_head; /* head in the data section */
581 __u64 data_tail; /* user-space written tail */
582 __u64 data_offset; /* where the buffer starts */
583 __u64 data_size; /* data buffer size */
586 * AUX area is defined by aux_{offset,size} fields that should be set
587 * by the userspace, so that
589 * aux_offset >= data_offset + data_size
591 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
593 * Ring buffer pointers aux_{head,tail} have the same semantics as
594 * data_{head,tail} and same ordering rules apply.
602 #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
603 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
604 #define PERF_RECORD_MISC_KERNEL (1 << 0)
605 #define PERF_RECORD_MISC_USER (2 << 0)
606 #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
607 #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
608 #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
611 * Indicates that /proc/PID/maps parsing are truncated by time out.
613 #define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
615 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
616 * different events so can reuse the same bit position.
617 * Ditto PERF_RECORD_MISC_SWITCH_OUT.
619 #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
620 #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
621 #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
623 * Indicates that the content of PERF_SAMPLE_IP points to
624 * the actual instruction that triggered the event. See also
625 * perf_event_attr::precise_ip.
627 #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
629 * Reserve the last bit to indicate some extended misc field
631 #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
633 struct perf_event_header {
639 struct perf_ns_link_info {
653 NR_NAMESPACES, /* number of available namespaces */
656 enum perf_event_type {
659 * If perf_event_attr.sample_id_all is set then all event types will
660 * have the sample_type selected fields related to where/when
661 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
662 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
663 * just after the perf_event_header and the fields already present for
664 * the existing fields, i.e. at the end of the payload. That way a newer
665 * perf.data file will be supported by older perf tools, with these new
666 * optional fields being ignored.
669 * { u32 pid, tid; } && PERF_SAMPLE_TID
670 * { u64 time; } && PERF_SAMPLE_TIME
671 * { u64 id; } && PERF_SAMPLE_ID
672 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
673 * { u32 cpu, res; } && PERF_SAMPLE_CPU
674 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
675 * } && perf_event_attr::sample_id_all
677 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
678 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
679 * relative to header.size.
683 * The MMAP events record the PROT_EXEC mappings so that we can
684 * correlate userspace IPs to code. They have the following structure:
687 * struct perf_event_header header;
694 * struct sample_id sample_id;
697 PERF_RECORD_MMAP = 1,
701 * struct perf_event_header header;
704 * struct sample_id sample_id;
707 PERF_RECORD_LOST = 2,
711 * struct perf_event_header header;
715 * struct sample_id sample_id;
718 PERF_RECORD_COMM = 3,
722 * struct perf_event_header header;
726 * struct sample_id sample_id;
729 PERF_RECORD_EXIT = 4,
733 * struct perf_event_header header;
737 * struct sample_id sample_id;
740 PERF_RECORD_THROTTLE = 5,
741 PERF_RECORD_UNTHROTTLE = 6,
745 * struct perf_event_header header;
749 * struct sample_id sample_id;
752 PERF_RECORD_FORK = 7,
756 * struct perf_event_header header;
759 * struct read_format values;
760 * struct sample_id sample_id;
763 PERF_RECORD_READ = 8,
767 * struct perf_event_header header;
770 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
771 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
772 * # is fixed relative to header.
775 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
776 * { u64 ip; } && PERF_SAMPLE_IP
777 * { u32 pid, tid; } && PERF_SAMPLE_TID
778 * { u64 time; } && PERF_SAMPLE_TIME
779 * { u64 addr; } && PERF_SAMPLE_ADDR
780 * { u64 id; } && PERF_SAMPLE_ID
781 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
782 * { u32 cpu, res; } && PERF_SAMPLE_CPU
783 * { u64 period; } && PERF_SAMPLE_PERIOD
785 * { struct read_format values; } && PERF_SAMPLE_READ
788 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
791 * # The RAW record below is opaque data wrt the ABI
793 * # That is, the ABI doesn't make any promises wrt to
794 * # the stability of its content, it may vary depending
795 * # on event, hardware, kernel version and phase of
798 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
802 * char data[size];}&& PERF_SAMPLE_RAW
805 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
807 * { u64 abi; # enum perf_sample_regs_abi
808 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
812 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
814 * { u64 weight; } && PERF_SAMPLE_WEIGHT
815 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
816 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
817 * { u64 abi; # enum perf_sample_regs_abi
818 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
819 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
822 PERF_RECORD_SAMPLE = 9,
825 * The MMAP2 records are an augmented version of MMAP, they add
826 * maj, min, ino numbers to be used to uniquely identify each mapping
829 * struct perf_event_header header;
838 * u64 ino_generation;
841 * struct sample_id sample_id;
844 PERF_RECORD_MMAP2 = 10,
847 * Records that new data landed in the AUX buffer part.
850 * struct perf_event_header header;
855 * struct sample_id sample_id;
858 PERF_RECORD_AUX = 11,
861 * Indicates that instruction trace has started
864 * struct perf_event_header header;
869 PERF_RECORD_ITRACE_START = 12,
872 * Records the dropped/lost sample number.
875 * struct perf_event_header header;
878 * struct sample_id sample_id;
881 PERF_RECORD_LOST_SAMPLES = 13,
884 * Records a context switch in or out (flagged by
885 * PERF_RECORD_MISC_SWITCH_OUT). See also
886 * PERF_RECORD_SWITCH_CPU_WIDE.
889 * struct perf_event_header header;
890 * struct sample_id sample_id;
893 PERF_RECORD_SWITCH = 14,
896 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
897 * next_prev_tid that are the next (switching out) or previous
898 * (switching in) pid/tid.
901 * struct perf_event_header header;
904 * struct sample_id sample_id;
907 PERF_RECORD_SWITCH_CPU_WIDE = 15,
911 * struct perf_event_header header;
915 * { u64 dev, inode; } [nr_namespaces];
916 * struct sample_id sample_id;
919 PERF_RECORD_NAMESPACES = 16,
921 PERF_RECORD_MAX, /* non-ABI */
924 #define PERF_MAX_STACK_DEPTH 127
925 #define PERF_MAX_CONTEXTS_PER_STACK 8
927 enum perf_callchain_context {
928 PERF_CONTEXT_HV = (__u64)-32,
929 PERF_CONTEXT_KERNEL = (__u64)-128,
930 PERF_CONTEXT_USER = (__u64)-512,
932 PERF_CONTEXT_GUEST = (__u64)-2048,
933 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
934 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
936 PERF_CONTEXT_MAX = (__u64)-4095,
940 * PERF_RECORD_AUX::flags bits
942 #define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
943 #define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
944 #define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
945 #define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
947 #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
948 #define PERF_FLAG_FD_OUTPUT (1UL << 1)
949 #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
950 #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
952 #if defined(__LITTLE_ENDIAN_BITFIELD)
953 union perf_mem_data_src {
956 __u64 mem_op:5, /* type of opcode */
957 mem_lvl:14, /* memory hierarchy level */
958 mem_snoop:5, /* snoop mode */
959 mem_lock:2, /* lock instr */
960 mem_dtlb:7, /* tlb access */
961 mem_lvl_num:4, /* memory hierarchy level number */
962 mem_remote:1, /* remote */
963 mem_snoopx:2, /* snoop mode, ext */
967 #elif defined(__BIG_ENDIAN_BITFIELD)
968 union perf_mem_data_src {
972 mem_snoopx:2, /* snoop mode, ext */
973 mem_remote:1, /* remote */
974 mem_lvl_num:4, /* memory hierarchy level number */
975 mem_dtlb:7, /* tlb access */
976 mem_lock:2, /* lock instr */
977 mem_snoop:5, /* snoop mode */
978 mem_lvl:14, /* memory hierarchy level */
979 mem_op:5; /* type of opcode */
983 #error "Unknown endianness"
986 /* type of opcode (load/store/prefetch,code) */
987 #define PERF_MEM_OP_NA 0x01 /* not available */
988 #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
989 #define PERF_MEM_OP_STORE 0x04 /* store instruction */
990 #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
991 #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
992 #define PERF_MEM_OP_SHIFT 0
994 /* memory hierarchy (memory level, hit or miss) */
995 #define PERF_MEM_LVL_NA 0x01 /* not available */
996 #define PERF_MEM_LVL_HIT 0x02 /* hit level */
997 #define PERF_MEM_LVL_MISS 0x04 /* miss level */
998 #define PERF_MEM_LVL_L1 0x08 /* L1 */
999 #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
1000 #define PERF_MEM_LVL_L2 0x20 /* L2 */
1001 #define PERF_MEM_LVL_L3 0x40 /* L3 */
1002 #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
1003 #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
1004 #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
1005 #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
1006 #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
1007 #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
1008 #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
1009 #define PERF_MEM_LVL_SHIFT 5
1011 #define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
1012 #define PERF_MEM_REMOTE_SHIFT 37
1014 #define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
1015 #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
1016 #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
1017 #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
1018 /* 5-0xa available */
1019 #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1020 #define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
1021 #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
1022 #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
1023 #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
1025 #define PERF_MEM_LVLNUM_SHIFT 33
1028 #define PERF_MEM_SNOOP_NA 0x01 /* not available */
1029 #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
1030 #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
1031 #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
1032 #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
1033 #define PERF_MEM_SNOOP_SHIFT 19
1035 #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
1037 #define PERF_MEM_SNOOPX_SHIFT 37
1039 /* locked instruction */
1040 #define PERF_MEM_LOCK_NA 0x01 /* not available */
1041 #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
1042 #define PERF_MEM_LOCK_SHIFT 24
1045 #define PERF_MEM_TLB_NA 0x01 /* not available */
1046 #define PERF_MEM_TLB_HIT 0x02 /* hit level */
1047 #define PERF_MEM_TLB_MISS 0x04 /* miss level */
1048 #define PERF_MEM_TLB_L1 0x08 /* L1 */
1049 #define PERF_MEM_TLB_L2 0x10 /* L2 */
1050 #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
1051 #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
1052 #define PERF_MEM_TLB_SHIFT 26
1054 #define PERF_MEM_S(a, s) \
1055 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1058 * single taken branch record layout:
1060 * from: source instruction (may not always be a branch insn)
1062 * mispred: branch target was mispredicted
1063 * predicted: branch target was predicted
1065 * support for mispred, predicted is optional. In case it
1066 * is not supported mispred = predicted = 0.
1068 * in_tx: running in a hardware transaction
1069 * abort: aborting a hardware transaction
1070 * cycles: cycles from last branch (or 0 if not supported)
1073 struct perf_branch_entry {
1076 __u64 mispred:1, /* target mispredicted */
1077 predicted:1,/* target predicted */
1078 in_tx:1, /* in transaction */
1079 abort:1, /* transaction abort */
1080 cycles:16, /* cycle count to last branch */
1081 type:4, /* branch type */
1085 #endif /* _UAPI_LINUX_PERF_EVENT_H */