1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
5 #include <linux/static_call.h>
8 * Performance event hw details:
11 #define INTEL_PMC_MAX_GENERIC 32
12 #define INTEL_PMC_MAX_FIXED 16
13 #define INTEL_PMC_IDX_FIXED 32
15 #define X86_PMC_IDX_MAX 64
17 #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
18 #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
20 #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
21 #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
23 #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
24 #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
25 #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
26 #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
27 #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
29 #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
30 #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
31 #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
32 #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
33 #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35)
35 #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36)
36 #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40)
38 #define INTEL_FIXED_BITS_MASK 0xFULL
39 #define INTEL_FIXED_BITS_STRIDE 4
40 #define INTEL_FIXED_0_KERNEL (1ULL << 0)
41 #define INTEL_FIXED_0_USER (1ULL << 1)
42 #define INTEL_FIXED_0_ANYTHREAD (1ULL << 2)
43 #define INTEL_FIXED_0_ENABLE_PMI (1ULL << 3)
44 #define INTEL_FIXED_3_METRICS_CLEAR (1ULL << 2)
46 #define HSW_IN_TX (1ULL << 32)
47 #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
48 #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
49 #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
51 #define intel_fixed_bits_by_idx(_idx, _bits) \
52 ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
54 #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
55 #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
56 #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
58 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
59 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
60 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
62 #define AMD64_EVENTSEL_EVENT \
63 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
64 #define INTEL_ARCH_EVENT_MASK \
65 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
67 #define AMD64_L3_SLICE_SHIFT 48
68 #define AMD64_L3_SLICE_MASK \
69 (0xFULL << AMD64_L3_SLICE_SHIFT)
70 #define AMD64_L3_SLICEID_MASK \
71 (0x7ULL << AMD64_L3_SLICE_SHIFT)
73 #define AMD64_L3_THREAD_SHIFT 56
74 #define AMD64_L3_THREAD_MASK \
75 (0xFFULL << AMD64_L3_THREAD_SHIFT)
76 #define AMD64_L3_F19H_THREAD_MASK \
77 (0x3ULL << AMD64_L3_THREAD_SHIFT)
79 #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
80 #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
82 #define AMD64_L3_COREID_SHIFT 42
83 #define AMD64_L3_COREID_MASK \
84 (0x7ULL << AMD64_L3_COREID_SHIFT)
86 #define X86_RAW_EVENT_MASK \
87 (ARCH_PERFMON_EVENTSEL_EVENT | \
88 ARCH_PERFMON_EVENTSEL_UMASK | \
89 ARCH_PERFMON_EVENTSEL_EDGE | \
90 ARCH_PERFMON_EVENTSEL_INV | \
91 ARCH_PERFMON_EVENTSEL_CMASK)
92 #define X86_ALL_EVENT_FLAGS \
93 (ARCH_PERFMON_EVENTSEL_EDGE | \
94 ARCH_PERFMON_EVENTSEL_INV | \
95 ARCH_PERFMON_EVENTSEL_CMASK | \
96 ARCH_PERFMON_EVENTSEL_ANY | \
97 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
99 HSW_IN_TX_CHECKPOINTED)
100 #define AMD64_RAW_EVENT_MASK \
101 (X86_RAW_EVENT_MASK | \
102 AMD64_EVENTSEL_EVENT)
103 #define AMD64_RAW_EVENT_MASK_NB \
104 (AMD64_EVENTSEL_EVENT | \
105 ARCH_PERFMON_EVENTSEL_UMASK)
107 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \
108 (AMD64_EVENTSEL_EVENT | \
111 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB \
112 (ARCH_PERFMON_EVENTSEL_UMASK | \
115 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB \
116 (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB | \
117 AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
119 #define AMD64_PERFMON_V2_ENABLE_UMC BIT_ULL(31)
120 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC GENMASK_ULL(7, 0)
121 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC GENMASK_ULL(9, 8)
122 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC \
123 (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC | \
124 AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
126 #define AMD64_NUM_COUNTERS 4
127 #define AMD64_NUM_COUNTERS_CORE 6
128 #define AMD64_NUM_COUNTERS_NB 4
130 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
131 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
132 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
133 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
134 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
136 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
137 #define ARCH_PERFMON_EVENTS_COUNT 7
139 #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
140 #define PEBS_DATACFG_GP BIT_ULL(1)
141 #define PEBS_DATACFG_XMMS BIT_ULL(2)
142 #define PEBS_DATACFG_LBRS BIT_ULL(3)
143 #define PEBS_DATACFG_LBR_SHIFT 24
145 /* Steal the highest bit of pebs_data_cfg for SW usage */
146 #define PEBS_UPDATE_DS_SW BIT_ULL(63)
149 * Intel "Architectural Performance Monitoring" CPUID
150 * detection/enumeration details:
154 unsigned int version_id:8;
155 unsigned int num_counters:8;
156 unsigned int bit_width:8;
157 unsigned int mask_length:8;
164 unsigned int no_unhalted_core_cycles:1;
165 unsigned int no_instructions_retired:1;
166 unsigned int no_unhalted_reference_cycles:1;
167 unsigned int no_llc_reference:1;
168 unsigned int no_llc_misses:1;
169 unsigned int no_branch_instruction_retired:1;
170 unsigned int no_branch_misses_retired:1;
177 unsigned int num_counters_fixed:5;
178 unsigned int bit_width_fixed:8;
179 unsigned int reserved1:2;
180 unsigned int anythread_deprecated:1;
181 unsigned int reserved2:16;
187 * Intel "Architectural Performance Monitoring extension" CPUID
188 * detection/enumeration details:
190 #define ARCH_PERFMON_EXT_LEAF 0x00000023
191 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
195 unsigned int leaf0:1;
196 /* Counters Sub-Leaf */
197 unsigned int cntr_subleaf:1;
198 /* Auto Counter Reload Sub-Leaf */
199 unsigned int acr_subleaf:1;
200 /* Events Sub-Leaf */
201 unsigned int events_subleaf:1;
202 unsigned int reserved:28;
209 /* UnitMask2 Supported */
210 unsigned int umask2:1;
211 /* EQ-bit Supported */
213 unsigned int reserved:30;
219 * Intel Architectural LBR CPUID detection/enumeration details:
223 /* Supported LBR depth values */
224 unsigned int lbr_depth_mask:8;
225 unsigned int reserved:22;
226 /* Deep C-state Reset */
227 unsigned int lbr_deep_c_reset:1;
228 /* IP values contain LIP */
229 unsigned int lbr_lip:1;
236 /* CPL Filtering Supported */
237 unsigned int lbr_cpl:1;
238 /* Branch Filtering Supported */
239 unsigned int lbr_filter:1;
240 /* Call-stack Mode Supported */
241 unsigned int lbr_call_stack:1;
248 /* Mispredict Bit Supported */
249 unsigned int lbr_mispred:1;
250 /* Timed LBRs Supported */
251 unsigned int lbr_timed_lbr:1;
252 /* Branch Type Field Supported */
253 unsigned int lbr_br_type:1;
254 unsigned int reserved:13;
255 /* Branch counters (Event Logging) Supported */
256 unsigned int lbr_counters:4;
262 * AMD "Extended Performance Monitoring and Debug" CPUID
263 * detection/enumeration details:
265 union cpuid_0x80000022_ebx {
267 /* Number of Core Performance Counters */
268 unsigned int num_core_pmc:4;
269 /* Number of available LBR Stack Entries */
270 unsigned int lbr_v2_stack_sz:6;
271 /* Number of Data Fabric Counters */
272 unsigned int num_df_pmc:6;
273 /* Number of Unified Memory Controller Counters */
274 unsigned int num_umc_pmc:6;
279 struct x86_pmu_capability {
282 int num_counters_fixed;
285 unsigned int events_mask;
287 unsigned int pebs_ept :1;
291 * Fixed-purpose performance events:
294 /* RDPMC offset for Fixed PMCs */
295 #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
296 #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
299 * All the fixed-mode PMCs are configured via this single MSR:
301 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
304 * There is no event-code assigned to the fixed-mode PMCs.
306 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
307 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
308 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
310 * For a fixed-mode PMC, which doesn't have an equivalent event, a
311 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
312 * The pseudo event-code for a fixed-mode PMC must be 0x00.
313 * The pseudo umask-code is 0xX. The X equals the index of the fixed
314 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
316 * The counts are available in separate MSRs:
319 /* Instr_Retired.Any: */
320 #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
321 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
323 /* CPU_CLK_Unhalted.Core: */
324 #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
325 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
327 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
328 #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
329 #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
330 #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
332 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
333 #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
334 #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
335 #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
337 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
338 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
339 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
341 static inline bool use_fixed_pseudo_encoding(u64 code)
343 return !(code & 0xff);
347 * We model BTS tracing as another fixed-mode PMC.
349 * We choose the value 47 for the fixed index of BTS, since lower
350 * values are used by actual fixed events and higher values are used
351 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
353 #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
356 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
357 * each TopDown metric event.
359 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
361 #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
362 #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
363 #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
364 #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
365 #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
366 #define INTEL_PMC_IDX_TD_HEAVY_OPS (INTEL_PMC_IDX_METRIC_BASE + 4)
367 #define INTEL_PMC_IDX_TD_BR_MISPREDICT (INTEL_PMC_IDX_METRIC_BASE + 5)
368 #define INTEL_PMC_IDX_TD_FETCH_LAT (INTEL_PMC_IDX_METRIC_BASE + 6)
369 #define INTEL_PMC_IDX_TD_MEM_BOUND (INTEL_PMC_IDX_METRIC_BASE + 7)
370 #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_MEM_BOUND
371 #define INTEL_PMC_MSK_TOPDOWN ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
372 INTEL_PMC_MSK_FIXED_SLOTS)
375 * There is no event-code assigned to the TopDown events.
377 * For the slots event, use the pseudo code of the fixed counter 3.
379 * For the metric events, the pseudo event-code is 0x00.
380 * The pseudo umask-code starts from the middle of the pseudo event
383 #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
384 /* Level 1 metrics */
385 #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
386 #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
387 #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
388 #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
389 /* Level 2 metrics */
390 #define INTEL_TD_METRIC_HEAVY_OPS 0x8400 /* Heavy Operations metric */
391 #define INTEL_TD_METRIC_BR_MISPREDICT 0x8500 /* Branch Mispredict metric */
392 #define INTEL_TD_METRIC_FETCH_LAT 0x8600 /* Fetch Latency metric */
393 #define INTEL_TD_METRIC_MEM_BOUND 0x8700 /* Memory bound metric */
395 #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_MEM_BOUND
396 #define INTEL_TD_METRIC_NUM 8
398 #define INTEL_TD_CFG_METRIC_CLEAR_BIT 0
399 #define INTEL_TD_CFG_METRIC_CLEAR BIT_ULL(INTEL_TD_CFG_METRIC_CLEAR_BIT)
401 static inline bool is_metric_idx(int idx)
403 return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
406 static inline bool is_topdown_idx(int idx)
408 return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
411 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
412 (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
414 #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
415 #define GLOBAL_STATUS_BUFFER_OVF_BIT 62
416 #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
417 #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
418 #define GLOBAL_STATUS_ASIF BIT_ULL(60)
419 #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
420 #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
421 #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
422 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
423 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
424 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
426 #define GLOBAL_CTRL_EN_PERF_METRICS 48
428 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
430 * We choose bit 58 because it's used to indicate LBR stack frozen state
431 * for architectural perfmon v4, also we unconditionally mask that bit in
432 * the handle_pmi_common(), so it'll never be set in the overflow handling.
434 * With this fake counter assigned, the guest LBR event user (such as KVM),
435 * can program the LBR registers on its own, and we don't actually do anything
436 * with then in the host context.
438 #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
441 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
442 * since it would claim bit 58 which is effectively Fixed26.
444 #define INTEL_FIXED_VLBR_EVENT 0x1b00
455 u64 applicable_counters;
459 struct pebs_meminfo {
465 /* Alder Lake and later */
467 u64 instr_latency:16;
469 u64 cache_latency:16;
477 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
478 u64 r8, r9, r10, r11, r12, r13, r14, r15;
482 u64 xmm[16*2]; /* two entries for each register */
486 * AMD Extended Performance Monitoring and Debug cpuid feature detection
488 #define EXT_PERFMON_DEBUG_FEATURES 0x80000022
491 * IBS cpuid feature detection
494 #define IBS_CPUID_FEATURES 0x8000001b
497 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
498 * bit 0 is used to indicate the existence of IBS.
500 #define IBS_CAPS_AVAIL (1U<<0)
501 #define IBS_CAPS_FETCHSAM (1U<<1)
502 #define IBS_CAPS_OPSAM (1U<<2)
503 #define IBS_CAPS_RDWROPCNT (1U<<3)
504 #define IBS_CAPS_OPCNT (1U<<4)
505 #define IBS_CAPS_BRNTRGT (1U<<5)
506 #define IBS_CAPS_OPCNTEXT (1U<<6)
507 #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
508 #define IBS_CAPS_OPBRNFUSE (1U<<8)
509 #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
510 #define IBS_CAPS_OPDATA4 (1U<<10)
511 #define IBS_CAPS_ZEN4 (1U<<11)
513 #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
514 | IBS_CAPS_FETCHSAM \
521 #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
522 #define IBSCTL_LVT_OFFSET_MASK 0x0F
524 /* IBS fetch bits/masks */
525 #define IBS_FETCH_L3MISSONLY (1ULL<<59)
526 #define IBS_FETCH_RAND_EN (1ULL<<57)
527 #define IBS_FETCH_VAL (1ULL<<49)
528 #define IBS_FETCH_ENABLE (1ULL<<48)
529 #define IBS_FETCH_CNT 0xFFFF0000ULL
530 #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
534 * The lower 7 bits of the current count are random bits
535 * preloaded by hardware and ignored in software
537 #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
538 #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
539 #define IBS_OP_CNT_CTL (1ULL<<19)
540 #define IBS_OP_VAL (1ULL<<18)
541 #define IBS_OP_ENABLE (1ULL<<17)
542 #define IBS_OP_L3MISSONLY (1ULL<<16)
543 #define IBS_OP_MAX_CNT 0x0000FFFFULL
544 #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
545 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
546 #define IBS_RIP_INVALID (1ULL<<38)
548 #ifdef CONFIG_X86_LOCAL_APIC
549 extern u32 get_ibs_caps(void);
550 extern int forward_event_to_ibs(struct perf_event *event);
552 static inline u32 get_ibs_caps(void) { return 0; }
553 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
556 #ifdef CONFIG_PERF_EVENTS
557 extern void perf_events_lapic_init(void);
560 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
561 * unused and ABI specified to be 0, so nobody should care what we do with
564 * EXACT - the IP points to the exact instruction that triggered the
565 * event (HW bugs exempt).
566 * VM - original X86_VM_MASK; see set_linear_ip().
568 #define PERF_EFLAGS_EXACT (1UL << 3)
569 #define PERF_EFLAGS_VM (1UL << 5)
572 struct x86_perf_regs {
577 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs);
578 extern unsigned long perf_arch_misc_flags(struct pt_regs *regs);
579 extern unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs);
580 #define perf_arch_misc_flags(regs) perf_arch_misc_flags(regs)
581 #define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
583 #include <asm/stacktrace.h>
586 * We abuse bit 3 from flags to pass exact information, see
587 * perf_arch_misc_flags() and the comment with PERF_EFLAGS_EXACT.
589 #define perf_arch_fetch_caller_regs(regs, __ip) { \
590 (regs)->ip = (__ip); \
591 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
592 (regs)->cs = __KERNEL_CS; \
596 struct perf_guest_switch_msr {
609 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
610 extern u64 perf_get_hw_event_config(int hw_event);
611 extern void perf_check_microcode(void);
612 extern void perf_clear_dirty_counters(void);
613 extern int x86_perf_rdpmc_index(struct perf_event *event);
615 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
617 memset(cap, 0, sizeof(*cap));
620 static inline u64 perf_get_hw_event_config(int hw_event)
625 static inline void perf_events_lapic_init(void) { }
626 static inline void perf_check_microcode(void) { }
629 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
630 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
631 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
633 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
634 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
636 memset(lbr, 0, sizeof(*lbr));
640 #ifdef CONFIG_CPU_SUP_INTEL
641 extern void intel_pt_handle_vmx(int on);
643 static inline void intel_pt_handle_vmx(int on)
649 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
650 extern void amd_pmu_enable_virt(void);
651 extern void amd_pmu_disable_virt(void);
653 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
655 #define PERF_NEEDS_LOPWR_CB 1
658 * architectural low power callback impacts
659 * drivers/acpi/processor_idle.c
660 * drivers/acpi/acpi_pad.c
662 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
664 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
666 static __always_inline void perf_lopwr_cb(bool lopwr_in)
668 static_call_mod(perf_lopwr_cb)(lopwr_in);
671 #endif /* PERF_NEEDS_LOPWR_CB */
674 static inline void amd_pmu_enable_virt(void) { }
675 static inline void amd_pmu_disable_virt(void) { }
678 #define arch_perf_out_copy_user copy_from_user_nmi
680 #endif /* _ASM_X86_PERF_EVENT_H */