]> Git Repo - linux.git/commitdiff
perf/x86/intel: Support Perfmon MSRs aliasing
authorKan Liang <[email protected]>
Wed, 26 Jun 2024 14:35:40 +0000 (07:35 -0700)
committerPeter Zijlstra <[email protected]>
Thu, 4 Jul 2024 14:00:40 +0000 (16:00 +0200)
The architectural performance monitoring V6 supports a new range of
counters' MSRs in the 19xxH address range. They include all the GP
counter MSRs, the GP control MSRs, and the fixed counter MSRs.

The step between each sibling counter is 4. Add intel_pmu_addr_offset()
to calculate the correct offset.

Add fixedctr in struct x86_pmu to store the address of the fixed counter
0. It can be used to calculate the rest of the fixed counters.

The MSR address of the fixed counter control is not changed.

Signed-off-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Andi Kleen <[email protected]>
Reviewed-by: Ian Rogers <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h
arch/x86/include/asm/msr-index.h

index 8ea1c988e19ba7b3aa6f8a4c3d84b578aa11c4e6..975b0f8a0b00d628fe3056f0e3ae0b83aed73f3c 100644 (file)
@@ -1236,8 +1236,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
                fallthrough;
        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS-1:
                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
-               hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 +
-                               (idx - INTEL_PMC_IDX_FIXED);
+               hwc->event_base = x86_pmu_fixed_ctr_addr(idx - INTEL_PMC_IDX_FIXED);
                hwc->event_base_rdpmc = (idx - INTEL_PMC_IDX_FIXED) |
                                        INTEL_PMC_FIXED_RDPMC_BASE;
                break;
@@ -1573,7 +1572,7 @@ void perf_event_print_debug(void)
        for_each_set_bit(idx, fixed_cntr_mask, X86_PMC_IDX_MAX) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
+               rdmsrl(x86_pmu_fixed_ctr_addr(idx), pmc_count);
 
                pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
                        cpu, idx, pmc_count);
@@ -2483,7 +2482,7 @@ void perf_clear_dirty_counters(void)
                        if (!test_bit(i - INTEL_PMC_IDX_FIXED, hybrid(cpuc->pmu, fixed_cntr_mask)))
                                continue;
 
-                       wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+                       wrmsrl(x86_pmu_fixed_ctr_addr(i - INTEL_PMC_IDX_FIXED), 0);
                } else {
                        wrmsrl(x86_pmu_event_addr(i), 0);
                }
index fa0550e5abe6654051b1f43f5b5792ddb7504211..cd8f2db6cdf6f4a1488edbf5fdccc91965830618 100644 (file)
@@ -2953,7 +2953,7 @@ static void intel_pmu_reset(void)
        for_each_set_bit(idx, fixed_cntr_mask, INTEL_PMC_MAX_FIXED) {
                if (fixed_counter_disabled(idx, cpuc->pmu))
                        continue;
-               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(x86_pmu_fixed_ctr_addr(idx), 0ull);
        }
 
        if (ds)
@@ -5188,6 +5188,7 @@ static __initconst const struct x86_pmu core_pmu = {
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
+       .fixedctr               = MSR_ARCH_PERFMON_FIXED_CTR0,
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
@@ -5241,6 +5242,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
+       .fixedctr               = MSR_ARCH_PERFMON_FIXED_CTR0,
        .event_map              = intel_pmu_event_map,
        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
        .apic                   = 1,
@@ -6176,6 +6178,11 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
        }
 }
 
+static inline int intel_pmu_v6_addr_offset(int index, bool eventsel)
+{
+       return MSR_IA32_PMC_V6_STEP * index;
+}
+
 static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = {
        { hybrid_small, "cpu_atom" },
        { hybrid_big, "cpu_core" },
@@ -7150,6 +7157,14 @@ __init int intel_pmu_init(void)
                pr_cont("full-width counters, ");
        }
 
+       /* Support V6+ MSR Aliasing */
+       if (x86_pmu.version >= 6) {
+               x86_pmu.perfctr = MSR_IA32_PMC_V6_GP0_CTR;
+               x86_pmu.eventsel = MSR_IA32_PMC_V6_GP0_CFG_A;
+               x86_pmu.fixedctr = MSR_IA32_PMC_V6_FX0_CTR;
+               x86_pmu.addr_offset = intel_pmu_v6_addr_offset;
+       }
+
        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
                x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
 
index 55468ea89d23dd7618f88b180990020dd306352c..ac1182141bf67fc47b252f0bd53a9d77ba2b396b 100644 (file)
@@ -787,6 +787,7 @@ struct x86_pmu {
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
+       unsigned        fixedctr;
        int             (*addr_offset)(int index, bool eventsel);
        int             (*rdpmc_index)(int index);
        u64             (*event_map)(int);
@@ -1144,6 +1145,12 @@ static inline unsigned int x86_pmu_event_addr(int index)
                                  x86_pmu.addr_offset(index, false) : index);
 }
 
+static inline unsigned int x86_pmu_fixed_ctr_addr(int index)
+{
+       return x86_pmu.fixedctr + (x86_pmu.addr_offset ?
+                                  x86_pmu.addr_offset(index, false) : index);
+}
+
 static inline int x86_pmu_rdpmc_index(int index)
 {
        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
index e022e6eb766c64050aedcefc5b8962c2049184c6..048081b226d789da55a3757075bb7937723aaa4d 100644 (file)
 #define MSR_RELOAD_PMC0                        0x000014c1
 #define MSR_RELOAD_FIXED_CTR0          0x00001309
 
+/* V6 PMON MSR range */
+#define MSR_IA32_PMC_V6_GP0_CTR                0x1900
+#define MSR_IA32_PMC_V6_GP0_CFG_A      0x1901
+#define MSR_IA32_PMC_V6_FX0_CTR                0x1980
+#define MSR_IA32_PMC_V6_STEP           4
+
 /* KeyID partitioning between MKTME and TDX */
 #define MSR_IA32_MKTME_KEYID_PARTITIONING      0x00000087
 
This page took 0.078431 seconds and 4 git commands to generate.