1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
6 * Performance event hw details:
9 #define INTEL_PMC_MAX_GENERIC 32
10 #define INTEL_PMC_MAX_FIXED 4
11 #define INTEL_PMC_IDX_FIXED 32
13 #define X86_PMC_IDX_MAX 64
15 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
18 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
21 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
33 #define HSW_IN_TX (1ULL << 32)
34 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
38 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
42 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
46 #define AMD64_EVENTSEL_EVENT \
47 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48 #define INTEL_ARCH_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
51 #define AMD64_L3_SLICE_SHIFT 48
52 #define AMD64_L3_SLICE_MASK \
53 (0xFULL << AMD64_L3_SLICE_SHIFT)
54 #define AMD64_L3_SLICEID_MASK \
55 (0x7ULL << AMD64_L3_SLICE_SHIFT)
57 #define AMD64_L3_THREAD_SHIFT 56
58 #define AMD64_L3_THREAD_MASK \
59 (0xFFULL << AMD64_L3_THREAD_SHIFT)
60 #define AMD64_L3_F19H_THREAD_MASK \
61 (0x3ULL << AMD64_L3_THREAD_SHIFT)
63 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
64 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
66 #define AMD64_L3_COREID_SHIFT 42
67 #define AMD64_L3_COREID_MASK \
68 (0x7ULL << AMD64_L3_COREID_SHIFT)
70 #define X86_RAW_EVENT_MASK \
71 (ARCH_PERFMON_EVENTSEL_EVENT | \
72 ARCH_PERFMON_EVENTSEL_UMASK | \
73 ARCH_PERFMON_EVENTSEL_EDGE | \
74 ARCH_PERFMON_EVENTSEL_INV | \
75 ARCH_PERFMON_EVENTSEL_CMASK)
76 #define X86_ALL_EVENT_FLAGS \
77 (ARCH_PERFMON_EVENTSEL_EDGE | \
78 ARCH_PERFMON_EVENTSEL_INV | \
79 ARCH_PERFMON_EVENTSEL_CMASK | \
80 ARCH_PERFMON_EVENTSEL_ANY | \
81 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
83 HSW_IN_TX_CHECKPOINTED)
84 #define AMD64_RAW_EVENT_MASK \
85 (X86_RAW_EVENT_MASK | \
87 #define AMD64_RAW_EVENT_MASK_NB \
88 (AMD64_EVENTSEL_EVENT | \
89 ARCH_PERFMON_EVENTSEL_UMASK)
90 #define AMD64_NUM_COUNTERS 4
91 #define AMD64_NUM_COUNTERS_CORE 6
92 #define AMD64_NUM_COUNTERS_NB 4
94 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
95 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
96 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
97 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
100 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
101 #define ARCH_PERFMON_EVENTS_COUNT 7
103 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
104 #define PEBS_DATACFG_GP BIT_ULL(1)
105 #define PEBS_DATACFG_XMMS BIT_ULL(2)
106 #define PEBS_DATACFG_LBRS BIT_ULL(3)
107 #define PEBS_DATACFG_LBR_SHIFT 24
110 * Intel "Architectural Performance Monitoring" CPUID
111 * detection/enumeration details:
115 unsigned int version_id:8;
116 unsigned int num_counters:8;
117 unsigned int bit_width:8;
118 unsigned int mask_length:8;
125 unsigned int no_unhalted_core_cycles:1;
126 unsigned int no_instructions_retired:1;
127 unsigned int no_unhalted_reference_cycles:1;
128 unsigned int no_llc_reference:1;
129 unsigned int no_llc_misses:1;
130 unsigned int no_branch_instruction_retired:1;
131 unsigned int no_branch_misses_retired:1;
138 unsigned int num_counters_fixed:5;
139 unsigned int bit_width_fixed:8;
140 unsigned int reserved:19;
146 * Intel Architectural LBR CPUID detection/enumeration details:
150 /* Supported LBR depth values */
151 unsigned int lbr_depth_mask:8;
152 unsigned int reserved:22;
153 /* Deep C-state Reset */
154 unsigned int lbr_deep_c_reset:1;
155 /* IP values contain LIP */
156 unsigned int lbr_lip:1;
163 /* CPL Filtering Supported */
164 unsigned int lbr_cpl:1;
165 /* Branch Filtering Supported */
166 unsigned int lbr_filter:1;
167 /* Call-stack Mode Supported */
168 unsigned int lbr_call_stack:1;
175 /* Mispredict Bit Supported */
176 unsigned int lbr_mispred:1;
177 /* Timed LBRs Supported */
178 unsigned int lbr_timed_lbr:1;
179 /* Branch Type Field Supported */
180 unsigned int lbr_br_type:1;
185 struct x86_pmu_capability {
188 int num_counters_fixed;
191 unsigned int events_mask;
196 * Fixed-purpose performance events:
200 * All 3 fixed-mode PMCs are configured via this single MSR:
202 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
205 * The counts are available in three separate MSRs:
208 /* Instr_Retired.Any: */
209 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
210 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
212 /* CPU_CLK_Unhalted.Core: */
213 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
214 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
216 /* CPU_CLK_Unhalted.Ref: */
217 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
218 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
219 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
222 * We model BTS tracing as another fixed-mode PMC.
224 * We choose a value in the middle of the fixed event range, since lower
225 * values are used by actual fixed events and higher values are used
226 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
228 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
230 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
231 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
232 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
233 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
234 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
235 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
236 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
237 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
240 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
242 * We choose bit 58 because it's used to indicate LBR stack frozen state
243 * for architectural perfmon v4, also we unconditionally mask that bit in
244 * the handle_pmi_common(), so it'll never be set in the overflow handling.
246 * With this fake counter assigned, the guest LBR event user (such as KVM),
247 * can program the LBR registers on its own, and we don't actually do anything
248 * with then in the host context.
250 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
253 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
254 * since it would claim bit 58 which is effectively Fixed26.
256 #define INTEL_FIXED_VLBR_EVENT 0x1b00
265 u64 applicable_counters;
269 struct pebs_meminfo {
277 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
278 u64 r8, r9, r10, r11, r12, r13, r14, r15;
282 u64 xmm[16*2]; /* two entries for each register */
286 * IBS cpuid feature detection
289 #define IBS_CPUID_FEATURES 0x8000001b
292 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
293 * bit 0 is used to indicate the existence of IBS.
295 #define IBS_CAPS_AVAIL (1U<<0)
296 #define IBS_CAPS_FETCHSAM (1U<<1)
297 #define IBS_CAPS_OPSAM (1U<<2)
298 #define IBS_CAPS_RDWROPCNT (1U<<3)
299 #define IBS_CAPS_OPCNT (1U<<4)
300 #define IBS_CAPS_BRNTRGT (1U<<5)
301 #define IBS_CAPS_OPCNTEXT (1U<<6)
302 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
303 #define IBS_CAPS_OPBRNFUSE (1U<<8)
304 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
305 #define IBS_CAPS_OPDATA4 (1U<<10)
307 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
308 | IBS_CAPS_FETCHSAM \
315 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
316 #define IBSCTL_LVT_OFFSET_MASK 0x0F
318 /* IBS fetch bits/masks */
319 #define IBS_FETCH_RAND_EN (1ULL<<57)
320 #define IBS_FETCH_VAL (1ULL<<49)
321 #define IBS_FETCH_ENABLE (1ULL<<48)
322 #define IBS_FETCH_CNT 0xFFFF0000ULL
323 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
327 * The lower 7 bits of the current count are random bits
328 * preloaded by hardware and ignored in software
330 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
331 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
332 #define IBS_OP_CNT_CTL (1ULL<<19)
333 #define IBS_OP_VAL (1ULL<<18)
334 #define IBS_OP_ENABLE (1ULL<<17)
335 #define IBS_OP_MAX_CNT 0x0000FFFFULL
336 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
337 #define IBS_RIP_INVALID (1ULL<<38)
339 #ifdef CONFIG_X86_LOCAL_APIC
340 extern u32 get_ibs_caps(void);
342 static inline u32 get_ibs_caps(void) { return 0; }
345 #ifdef CONFIG_PERF_EVENTS
346 extern void perf_events_lapic_init(void);
349 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
350 * unused and ABI specified to be 0, so nobody should care what we do with
353 * EXACT - the IP points to the exact instruction that triggered the
354 * event (HW bugs exempt).
355 * VM - original X86_VM_MASK; see set_linear_ip().
357 #define PERF_EFLAGS_EXACT (1UL << 3)
358 #define PERF_EFLAGS_VM (1UL << 5)
361 struct x86_perf_regs {
366 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
367 extern unsigned long perf_misc_flags(struct pt_regs *regs);
368 #define perf_misc_flags(regs) perf_misc_flags(regs)
370 #include <asm/stacktrace.h>
373 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
374 * and the comment with PERF_EFLAGS_EXACT.
376 #define perf_arch_fetch_caller_regs(regs, __ip) { \
377 (regs)->ip = (__ip); \
378 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
379 (regs)->cs = __KERNEL_CS; \
383 struct perf_guest_switch_msr {
395 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
396 extern void perf_check_microcode(void);
397 extern int x86_perf_rdpmc_index(struct perf_event *event);
399 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
401 memset(cap, 0, sizeof(*cap));
404 static inline void perf_events_lapic_init(void) { }
405 static inline void perf_check_microcode(void) { }
408 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
409 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
410 extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
412 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
417 static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
423 #ifdef CONFIG_CPU_SUP_INTEL
424 extern void intel_pt_handle_vmx(int on);
426 static inline void intel_pt_handle_vmx(int on)
432 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
433 extern void amd_pmu_enable_virt(void);
434 extern void amd_pmu_disable_virt(void);
436 static inline void amd_pmu_enable_virt(void) { }
437 static inline void amd_pmu_disable_virt(void) { }
440 #define arch_perf_out_copy_user copy_from_user_nmi
442 #endif /* _ASM_X86_PERF_EVENT_H */