]> Git Repo - linux.git/commitdiff
sched/fair: Add asymmetric CPU capacity wakeup scan
authorMorten Rasmussen <[email protected]>
Thu, 6 Feb 2020 19:19:54 +0000 (19:19 +0000)
committerThomas Gleixner <[email protected]>
Thu, 20 Feb 2020 20:03:14 +0000 (21:03 +0100)
Issue
=====

On asymmetric CPU capacity topologies, we currently rely on wake_cap() to
drive select_task_rq_fair() towards either:

- its slow-path (find_idlest_cpu()) if either the previous or
  current (waking) CPU has too little capacity for the waking task
- its fast-path (select_idle_sibling()) otherwise

Commit:

  3273163c6775 ("sched/fair: Let asymmetric CPU configurations balance at wake-up")

points out that this relies on the assumption that "[...]the CPU capacities
within an SD_SHARE_PKG_RESOURCES domain (sd_llc) are homogeneous".

This assumption no longer holds on newer generations of big.LITTLE
systems (DynamIQ), which can accommodate CPUs of different compute capacity
within a single LLC domain. To hopefully paint a better picture, a regular
big.LITTLE topology would look like this:

  +---------+ +---------+
  |   L2    | |   L2    |
  +----+----+ +----+----+
  |CPU0|CPU1| |CPU2|CPU3|
  +----+----+ +----+----+
      ^^^         ^^^
    LITTLEs      bigs

which would result in the following scheduler topology:

  DIE [         ] <- sd_asym_cpucapacity
  MC  [   ] [   ] <- sd_llc
       0 1   2 3

Conversely, a DynamIQ topology could look like:

  +-------------------+
  |        L3         |
  +----+----+----+----+
  | L2 | L2 | L2 | L2 |
  +----+----+----+----+
  |CPU0|CPU1|CPU2|CPU3|
  +----+----+----+----+
     ^^^^^     ^^^^^
    LITTLEs    bigs

which would result in the following scheduler topology:

  MC [       ] <- sd_llc, sd_asym_cpucapacity
      0 1 2 3

What this means is that, on DynamIQ systems, we could pass the wake_cap()
test (IOW presume the waking task fits on the CPU capacities of some LLC
domain), thus go through select_idle_sibling().
This function operates on an LLC domain, which here spans both bigs and
LITTLEs, so it could very well pick a CPU of too small capacity for the
task, despite there being fitting idle CPUs - it very much depends on the
CPU iteration order, on which we have absolutely no guarantees
capacity-wise.

Implementation
==============

Introduce yet another select_idle_sibling() helper function that takes CPU
capacity into account. The policy is to pick the first idle CPU which is
big enough for the task (task_util * margin < cpu_capacity). If no
idle CPU is big enough, we pick the idle one with the highest capacity.

Unlike other select_idle_sibling() helpers, this one operates on the
sd_asym_cpucapacity sched_domain pointer, which is guaranteed to span all
known CPU capacities in the system. As such, this will work for both
"legacy" big.LITTLE (LITTLEs & bigs split at MC, joined at DIE) and for
newer DynamIQ systems (e.g. LITTLEs and bigs in the same MC domain).

Note that this limits the scope of select_idle_sibling() to
select_idle_capacity() for asymmetric CPU capacity systems - the LLC domain
will not be scanned, and no further heuristic will be applied.

Signed-off-by: Morten Rasmussen <[email protected]>
Signed-off-by: Valentin Schneider <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Quentin Perret <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
kernel/sched/fair.c

index 1a0ce83e835a9aac3528a88ca7063a23c329d8ba..6fb47a2f73830de9c7f38e4acdf735874e40d642 100644 (file)
@@ -5896,6 +5896,40 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
        return cpu;
 }
 
+/*
+ * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
+ * the task fits. If no CPU is big enough, but there are idle ones, try to
+ * maximize capacity.
+ */
+static int
+select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+{
+       unsigned long best_cap = 0;
+       int cpu, best_cpu = -1;
+       struct cpumask *cpus;
+
+       sync_entity_load_avg(&p->se);
+
+       cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
+       cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+
+       for_each_cpu_wrap(cpu, cpus, target) {
+               unsigned long cpu_cap = capacity_of(cpu);
+
+               if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+                       continue;
+               if (task_fits_capacity(p, cpu_cap))
+                       return cpu;
+
+               if (cpu_cap > best_cap) {
+                       best_cap = cpu_cap;
+                       best_cpu = cpu;
+               }
+       }
+
+       return best_cpu;
+}
+
 /*
  * Try and locate an idle core/thread in the LLC cache domain.
  */
@@ -5904,6 +5938,28 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
        struct sched_domain *sd;
        int i, recent_used_cpu;
 
+       /*
+        * For asymmetric CPU capacity systems, our domain of interest is
+        * sd_asym_cpucapacity rather than sd_llc.
+        */
+       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+               sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
+               /*
+                * On an asymmetric CPU capacity system where an exclusive
+                * cpuset defines a symmetric island (i.e. one unique
+                * capacity_orig value through the cpuset), the key will be set
+                * but the CPUs within that cpuset will not have a domain with
+                * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
+                * capacity path.
+                */
+               if (!sd)
+                       goto symmetric;
+
+               i = select_idle_capacity(p, sd, target);
+               return ((unsigned)i < nr_cpumask_bits) ? i : target;
+       }
+
+symmetric:
        if (available_idle_cpu(target) || sched_idle_cpu(target))
                return target;
 
This page took 0.073566 seconds and 4 git commands to generate.