]> Git Repo - J-linux.git/commitdiff
perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC
authorKan Liang <[email protected]>
Mon, 16 Dec 2024 16:02:52 +0000 (08:02 -0800)
committerPeter Zijlstra <[email protected]>
Fri, 20 Dec 2024 14:31:14 +0000 (15:31 +0100)
The released OCR and FRONTEND events utilized more bits on Lunar Lake
p-core. The corresponding mask in the extra_regs has to be extended to
unblock the extra bits.

Add a dedicated intel_lnc_extra_regs.

Fixes: a932aa0e868f ("perf/x86: Add Lunar Lake and Arrow Lake support")
Reported-by: Andi Kleen <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
arch/x86/events/intel/core.c

index 2e1e268460500a44aa5f62394c23014ab7a0088d..99c590da0ae241ef248ad8cd522c5d05c7ebb4e1 100644 (file)
@@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
+static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
+       INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
+       INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
+       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+       INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
+       INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
+       INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
+       INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+       EVENT_EXTRA_END
+};
 
 EVENT_ATTR_STR(mem-loads,      mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 EVENT_ATTR_STR(mem-loads,      mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
@@ -6422,7 +6432,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
        intel_pmu_init_glc(pmu);
        hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
        hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
-       hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
+       hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
 }
 
 static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
This page took 0.05578 seconds and 4 git commands to generate.