]> Git Repo - linux.git/commitdiff
sched/topology: Consolidate and clean up access to a CPU's max compute capacity
authorVincent Guittot <[email protected]>
Mon, 9 Oct 2023 10:36:16 +0000 (12:36 +0200)
committerIngo Molnar <[email protected]>
Mon, 9 Oct 2023 10:59:48 +0000 (12:59 +0200)
Remove the rq::cpu_capacity_orig field and use arch_scale_cpu_capacity()
instead.

The scheduler uses 3 methods to get access to a CPU's max compute capacity:

 - arch_scale_cpu_capacity(cpu) which is the default way to get a CPU's capacity.

 - cpu_capacity_orig field which is periodically updated with
   arch_scale_cpu_capacity().

 - capacity_orig_of(cpu) which encapsulates rq->cpu_capacity_orig.

There is no real need to save the value returned by arch_scale_cpu_capacity()
in struct rq. arch_scale_cpu_capacity() returns:

 - either a per_cpu variable.

 - or a const value for systems which have only one capacity.

Remove rq::cpu_capacity_orig and use arch_scale_cpu_capacity() everywhere.

No functional changes.

Some performance tests on Arm64:

  - small SMP device (hikey): no noticeable changes
  - HMP device (RB5):         hackbench shows minor improvement (1-2%)
  - large smp (thx2):         hackbench and tbench shows minor improvement (1%)

Signed-off-by: Vincent Guittot <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Reviewed-by: Dietmar Eggemann <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Documentation/scheduler/sched-capacity.rst
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/topology.c

index e2c1cf7431588e6bba3bf5b6fd489c85f652d7ec..de414b33dd2abd574c0ece056e085f3d885c9d7e 100644 (file)
@@ -39,14 +39,15 @@ per Hz, leading to::
 -------------------
 
 Two different capacity values are used within the scheduler. A CPU's
-``capacity_orig`` is its maximum attainable capacity, i.e. its maximum
-attainable performance level. A CPU's ``capacity`` is its ``capacity_orig`` to
-which some loss of available performance (e.g. time spent handling IRQs) is
-subtracted.
+``original capacity`` is its maximum attainable capacity, i.e. its maximum
+attainable performance level. This original capacity is returned by
+the function arch_scale_cpu_capacity(). A CPU's ``capacity`` is its ``original
+capacity`` to which some loss of available performance (e.g. time spent
+handling IRQs) is subtracted.
 
 Note that a CPU's ``capacity`` is solely intended to be used by the CFS class,
-while ``capacity_orig`` is class-agnostic. The rest of this document will use
-the term ``capacity`` interchangeably with ``capacity_orig`` for the sake of
+while ``original capacity`` is class-agnostic. The rest of this document will use
+the term ``capacity`` interchangeably with ``original capacity`` for the sake of
 brevity.
 
 1.3 Platform examples
index cf6d3fdd4eb5a6c1eb609377cee7d9a29d37ea08..a3f9cd52eec5c57ff67b6339e02c6d887aaa35a2 100644 (file)
@@ -9929,7 +9929,7 @@ void __init sched_init(void)
 #ifdef CONFIG_SMP
                rq->sd = NULL;
                rq->rd = NULL;
-               rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
+               rq->cpu_capacity = SCHED_CAPACITY_SCALE;
                rq->balance_callback = &balance_push_callback;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
index 57c92d751bcd733053f171bbeee2cd514647e8d8..95baa12a10293e0e92b7a65dbb63add7c23caa12 100644 (file)
@@ -131,7 +131,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
                        if (!dl_task_fits_capacity(p, cpu)) {
                                cpumask_clear_cpu(cpu, later_mask);
 
-                               cap = capacity_orig_of(cpu);
+                               cap = arch_scale_cpu_capacity(cpu);
 
                                if (cap > max_cap ||
                                    (cpu == task_cpu(p) && cap == max_cap)) {
index d98408a274e52ad3d7f6be2c910234c3201a8534..7039a8d5ae9bccaf56daa60b15a243aca9ac8955 100644 (file)
@@ -132,7 +132,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
        int i;
 
        for_each_cpu_and(i, mask, cpu_active_mask)
-               cap += capacity_orig_of(i);
+               cap += arch_scale_cpu_capacity(i);
 
        return cap;
 }
@@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
 static inline unsigned long dl_bw_capacity(int i)
 {
        if (!sched_asym_cpucap_active() &&
-           capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
+           arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
                return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
        } else {
                RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
index 19bb4ac94146571a9d53ef71fe099261bfc64d90..e7c1bafc0460a70e34c2daea451dd17fca073f08 100644 (file)
@@ -4669,7 +4669,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
         * To avoid overestimation of actual task utilization, skip updates if
         * we cannot grant there is idle time in this CPU.
         */
-       if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
+       if (task_util(p) > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))))
                return;
 
        /*
@@ -4717,14 +4717,14 @@ static inline int util_fits_cpu(unsigned long util,
                return fits;
 
        /*
-        * We must use capacity_orig_of() for comparing against uclamp_min and
+        * We must use arch_scale_cpu_capacity() for comparing against uclamp_min and
         * uclamp_max. We only care about capacity pressure (by using
         * capacity_of()) for comparing against the real util.
         *
         * If a task is boosted to 1024 for example, we don't want a tiny
         * pressure to skew the check whether it fits a CPU or not.
         *
-        * Similarly if a task is capped to capacity_orig_of(little_cpu), it
+        * Similarly if a task is capped to arch_scale_cpu_capacity(little_cpu), it
         * should fit a little cpu even if there's some pressure.
         *
         * Only exception is for thermal pressure since it has a direct impact
@@ -4736,7 +4736,7 @@ static inline int util_fits_cpu(unsigned long util,
         * For uclamp_max, we can tolerate a drop in performance level as the
         * goal is to cap the task. So it's okay if it's getting less.
         */
-       capacity_orig = capacity_orig_of(cpu);
+       capacity_orig = arch_scale_cpu_capacity(cpu);
        capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
 
        /*
@@ -7217,7 +7217,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
                 * Look for the CPU with best capacity.
                 */
                else if (fits < 0)
-                       cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
+                       cpu_cap = arch_scale_cpu_capacity(cpu) - thermal_load_avg(cpu_rq(cpu));
 
                /*
                 * First, select CPU which fits better (-1 being better than 0).
@@ -7459,7 +7459,7 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
                util = max(util, util_est);
        }
 
-       return min(util, capacity_orig_of(cpu));
+       return min(util, arch_scale_cpu_capacity(cpu));
 }
 
 unsigned long cpu_util_cfs(int cpu)
@@ -9250,8 +9250,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
        unsigned long capacity = scale_rt_capacity(cpu);
        struct sched_group *sdg = sd->groups;
 
-       cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
-
        if (!capacity)
                capacity = 1;
 
@@ -9327,7 +9325,7 @@ static inline int
 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
 {
        return ((rq->cpu_capacity * sd->imbalance_pct) <
-                               (rq->cpu_capacity_orig * 100));
+                               (arch_scale_cpu_capacity(cpu_of(rq)) * 100));
 }
 
 /*
@@ -9338,7 +9336,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
 static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
 {
        return rq->misfit_task_load &&
-               (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
+               (arch_scale_cpu_capacity(rq->cpu) < rq->rd->max_cpu_capacity ||
                 check_cpu_capacity(rq, sd));
 }
 
index 76d82a096e038b17dad0408508d2a1023cbcad39..e93b69ef919b3c1d64aadc92315b60d8b7fbf8ec 100644 (file)
@@ -471,7 +471,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
        min_cap = uclamp_eff_value(p, UCLAMP_MIN);
        max_cap = uclamp_eff_value(p, UCLAMP_MAX);
 
-       cpu_cap = capacity_orig_of(cpu);
+       cpu_cap = arch_scale_cpu_capacity(cpu);
 
        return cpu_cap >= min(min_cap, max_cap);
 }
index 515eb4cffd5e2a962e0e35b6aaaab9c4fbb2018a..7e7fedcfc580bbe14fec8d330156aa54c805ab45 100644 (file)
@@ -1033,7 +1033,6 @@ struct rq {
        struct sched_domain __rcu       *sd;
 
        unsigned long           cpu_capacity;
-       unsigned long           cpu_capacity_orig;
 
        struct balance_callback *balance_callback;
 
@@ -2967,11 +2966,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
 #endif
 
 #ifdef CONFIG_SMP
-static inline unsigned long capacity_orig_of(int cpu)
-{
-       return cpu_rq(cpu)->cpu_capacity_orig;
-}
-
 /**
  * enum cpu_util_type - CPU utilization type
  * @FREQUENCY_UTIL:    Utilization used to select frequency
index a7b50bba7829dd4d49f336e0f0357fdf8b240acb..1cc595907363bd0966e6270ab86a6fca3a0b20d3 100644 (file)
@@ -2488,12 +2488,15 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        /* Attach the domains */
        rcu_read_lock();
        for_each_cpu(i, cpu_map) {
+               unsigned long capacity;
+
                rq = cpu_rq(i);
                sd = *per_cpu_ptr(d.sd, i);
 
+               capacity = arch_scale_cpu_capacity(i);
                /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
-               if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
-                       WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
+               if (capacity > READ_ONCE(d.rd->max_cpu_capacity))
+                       WRITE_ONCE(d.rd->max_cpu_capacity, capacity);
 
                cpu_attach_domain(sd, d.rd, i);
        }
This page took 0.090961 seconds and 4 git commands to generate.