]> Git Repo - J-linux.git/commitdiff
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
authorDhananjay Ugwekar <[email protected]>
Tue, 30 Jul 2024 04:49:18 +0000 (04:49 +0000)
committerIngo Molnar <[email protected]>
Thu, 5 Sep 2024 10:02:14 +0000 (12:02 +0200)
After commit:

  63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")

... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.

This leads to the energy-pkg event scope to be modified to CCD instead of package.

So, change the PMU scope for AMD and Hygon back to package.

On a 12 CCD 1 Package AMD Zen4 Genoa machine:

  Before:

  $ cat /sys/devices/power/cpumask
  0,8,16,24,32,40,48,56,64,72,80,88.

The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.

  After:

  $ cat /sys/devices/power/cpumask
  0

[ mingo: Cleaned up the changelog ]

Signed-off-by: Dhananjay Ugwekar <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Reviewed-by: Kan Liang <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
arch/x86/events/rapl.c

index b985ca79cf97bac7b53e4ccd4707b7c6ac108039..a481a939862e54741106d01f477c2876f9cf0619 100644 (file)
@@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = {                              \
        .event_str      = str,                                                  \
 };
 
+/*
+ * RAPL Package energy counter scope:
+ * 1. AMD/HYGON platforms have a per-PKG package energy counter
+ * 2. For Intel platforms
+ *     2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope
+ *     2.2. Other Intel platforms are single die systems so the scope can be
+ *          considered as either pkg-scope or die-scope, and we are considering
+ *          them as die-scope.
+ */
+#define rapl_pmu_is_pkg_scope()                                \
+       (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||  \
+        boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+
 struct rapl_pmu {
        raw_spinlock_t          lock;
        int                     n_active;
@@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask;
 static u64 rapl_timer_ms;
 static struct perf_msr *rapl_msrs;
 
+/*
+ * Helper functions to get the correct topology macros according to the
+ * RAPL PMU scope.
+ */
+static inline unsigned int get_rapl_pmu_idx(int cpu)
+{
+       return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
+                                        topology_logical_die_id(cpu);
+}
+
+static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
+{
+       return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
+                                        topology_die_cpumask(cpu);
+}
+
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-       unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
+       unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
 
        /*
         * The unsigned check also catches the '-1' return value for non
@@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu)
 
        pmu->cpu = -1;
        /* Find a new cpu to collect rapl events */
-       target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+       target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
 
        /* Migrate rapl events to the new target */
        if (target < nr_cpu_ids) {
@@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu)
 
 static int rapl_cpu_online(unsigned int cpu)
 {
+       s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
+       if (rapl_pmu_idx < 0) {
+               pr_err("topology_logical_(package/die)_id() returned a negative value");
+               return -EINVAL;
+       }
        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
        int target;
 
@@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu)
                pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
                rapl_hrtimer_init(pmu);
 
-               rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
+               rapl_pmus->pmus[rapl_pmu_idx] = pmu;
        }
 
        /*
         * Check if there is an online cpu in the package which collects rapl
         * events already.
         */
-       target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
+       target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
        if (target < nr_cpu_ids)
                return 0;
 
@@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = {
 
 static int __init init_rapl_pmus(void)
 {
-       int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
+       int nr_rapl_pmu = topology_max_packages();
+
+       if (!rapl_pmu_is_pkg_scope())
+               nr_rapl_pmu *= topology_max_dies_per_package();
 
        rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
        if (!rapl_pmus)
This page took 0.065364 seconds and 4 git commands to generate.