]> Git Repo - linux.git/commitdiff
sched/eevdf: More PELT vs DELAYED_DEQUEUE
authorPeter Zijlstra <[email protected]>
Mon, 2 Dec 2024 17:45:57 +0000 (18:45 +0100)
committerPeter Zijlstra <[email protected]>
Mon, 9 Dec 2024 10:48:09 +0000 (11:48 +0100)
Vincent and Dietmar noted that while
commit fc1892becd56 ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") fixes
the entity runnable stats, it does not adjust the cfs_rq runnable stats,
which are based off of h_nr_running.

Track h_nr_delayed such that we can discount those and adjust the
signal.

Fixes: fc1892becd56 ("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE")
Closes: https://lore.kernel.org/lkml/[email protected]/
Closes: https://lore.kernel.org/lkml/CAKfTPtCNUvWE_GX5LyvTF-WdxUT=ZgvZZv-4t=eWntg5uOFqiQ@mail.gmail.com/
[ Fixes checkpatch warnings and rebased ]
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reported-by: Dietmar Eggemann <[email protected]>
Reported-by: Vincent Guittot <[email protected]>
Signed-off-by: "Peter Zijlstra (Intel)" <[email protected]>
Signed-off-by: Vincent Guittot <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Dietmar Eggemann <[email protected]>
Tested-by: K Prateek Nayak <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/pelt.c
kernel/sched/sched.h

index a48b2a701ec2792d32e99d259dc69fdbd2724403..a1be00a988bf6f15351f6749d9159813c57604a8 100644 (file)
@@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
        SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
+       SEQ_printf(m, "  .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
        SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
                        cfs_rq->idle_nr_running);
        SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
index 9d7a2dd2c2606868311dadd3ea25a5b0dd90e0f9..97ee48c8bf5ed3fef303ae3f86ebc78fa872af55 100644 (file)
@@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 
-static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+static void set_delayed(struct sched_entity *se)
+{
+       se->sched_delayed = 1;
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+               cfs_rq->h_nr_delayed++;
+               if (cfs_rq_throttled(cfs_rq))
+                       break;
+       }
+}
+
+static void clear_delayed(struct sched_entity *se)
 {
        se->sched_delayed = 0;
+       for_each_sched_entity(se) {
+               struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+               cfs_rq->h_nr_delayed--;
+               if (cfs_rq_throttled(cfs_rq))
+                       break;
+       }
+}
+
+static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
+{
+       clear_delayed(se);
        if (sched_feat(DELAY_ZERO) && se->vlag > 0)
                se->vlag = 0;
 }
@@ -5496,7 +5520,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                if (sched_feat(DELAY_DEQUEUE) && delay &&
                    !entity_eligible(cfs_rq, se)) {
                        update_load_avg(cfs_rq, se, 0);
-                       se->sched_delayed = 1;
+                       set_delayed(se);
                        return false;
                }
        }
@@ -5908,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
        struct rq *rq = rq_of(cfs_rq);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
-       long task_delta, idle_task_delta, dequeue = 1;
+       long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
        long rq_h_nr_running = rq->cfs.h_nr_running;
 
        raw_spin_lock(&cfs_b->lock);
@@ -5941,6 +5965,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
+       delayed_delta = cfs_rq->h_nr_delayed;
        for_each_sched_entity(se) {
                struct cfs_rq *qcfs_rq = cfs_rq_of(se);
                int flags;
@@ -5964,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
+               qcfs_rq->h_nr_delayed -= delayed_delta;
 
                if (qcfs_rq->load.weight) {
                        /* Avoid re-evaluating load for this entity: */
@@ -5986,6 +6012,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
 
                qcfs_rq->h_nr_running -= task_delta;
                qcfs_rq->idle_h_nr_running -= idle_task_delta;
+               qcfs_rq->h_nr_delayed -= delayed_delta;
        }
 
        /* At this point se is NULL and we are at root level*/
@@ -6011,7 +6038,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        struct rq *rq = rq_of(cfs_rq);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
-       long task_delta, idle_task_delta;
+       long task_delta, idle_task_delta, delayed_delta;
        long rq_h_nr_running = rq->cfs.h_nr_running;
 
        se = cfs_rq->tg->se[cpu_of(rq)];
@@ -6047,6 +6074,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 
        task_delta = cfs_rq->h_nr_running;
        idle_task_delta = cfs_rq->idle_h_nr_running;
+       delayed_delta = cfs_rq->h_nr_delayed;
        for_each_sched_entity(se) {
                struct cfs_rq *qcfs_rq = cfs_rq_of(se);
 
@@ -6064,6 +6092,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 
                qcfs_rq->h_nr_running += task_delta;
                qcfs_rq->idle_h_nr_running += idle_task_delta;
+               qcfs_rq->h_nr_delayed += delayed_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(qcfs_rq))
@@ -6081,6 +6110,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 
                qcfs_rq->h_nr_running += task_delta;
                qcfs_rq->idle_h_nr_running += idle_task_delta;
+               qcfs_rq->h_nr_delayed += delayed_delta;
 
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(qcfs_rq))
@@ -6934,7 +6964,7 @@ requeue_delayed_entity(struct sched_entity *se)
        }
 
        update_load_avg(cfs_rq, se, 0);
-       se->sched_delayed = 0;
+       clear_delayed(se);
 }
 
 /*
@@ -6948,6 +6978,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
        int idle_h_nr_running = task_has_idle_policy(p);
+       int h_nr_delayed = 0;
        int task_new = !(flags & ENQUEUE_WAKEUP);
        int rq_h_nr_running = rq->cfs.h_nr_running;
        u64 slice = 0;
@@ -6974,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        if (p->in_iowait)
                cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
 
+       if (task_new)
+               h_nr_delayed = !!se->sched_delayed;
+
        for_each_sched_entity(se) {
                if (se->on_rq) {
                        if (se->sched_delayed)
@@ -6996,6 +7030,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
+               cfs_rq->h_nr_delayed += h_nr_delayed;
 
                if (cfs_rq_is_idle(cfs_rq))
                        idle_h_nr_running = 1;
@@ -7019,6 +7054,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
+               cfs_rq->h_nr_delayed += h_nr_delayed;
 
                if (cfs_rq_is_idle(cfs_rq))
                        idle_h_nr_running = 1;
@@ -7081,6 +7117,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
        struct task_struct *p = NULL;
        int idle_h_nr_running = 0;
        int h_nr_running = 0;
+       int h_nr_delayed = 0;
        struct cfs_rq *cfs_rq;
        u64 slice = 0;
 
@@ -7088,6 +7125,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
                p = task_of(se);
                h_nr_running = 1;
                idle_h_nr_running = task_has_idle_policy(p);
+               if (!task_sleep && !task_delayed)
+                       h_nr_delayed = !!se->sched_delayed;
        } else {
                cfs_rq = group_cfs_rq(se);
                slice = cfs_rq_min_slice(cfs_rq);
@@ -7105,6 +7144,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
 
                cfs_rq->h_nr_running -= h_nr_running;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+               cfs_rq->h_nr_delayed -= h_nr_delayed;
 
                if (cfs_rq_is_idle(cfs_rq))
                        idle_h_nr_running = h_nr_running;
@@ -7143,6 +7183,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
 
                cfs_rq->h_nr_running -= h_nr_running;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+               cfs_rq->h_nr_delayed -= h_nr_delayed;
 
                if (cfs_rq_is_idle(cfs_rq))
                        idle_h_nr_running = h_nr_running;
index fc07382361a88b5e90802191221ab67bc4db71b0..fee75cc2c47b64af70cf319dceb41721d6c48dd7 100644 (file)
@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
 {
        if (___update_load_sum(now, &cfs_rq->avg,
                                scale_load_down(cfs_rq->load.weight),
-                               cfs_rq->h_nr_running,
+                               cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
                                cfs_rq->curr != NULL)) {
 
                ___update_load_avg(&cfs_rq->avg, 1);
index 76f5f53a645fcdc3de25c224813c7ac3b6aefa1a..1e494af2cd23d1fa86a6cccb727c337661056e22 100644 (file)
@@ -649,6 +649,7 @@ struct cfs_rq {
        unsigned int            h_nr_running;      /* SCHED_{NORMAL,BATCH,IDLE} */
        unsigned int            idle_nr_running;   /* SCHED_IDLE */
        unsigned int            idle_h_nr_running; /* SCHED_IDLE */
+       unsigned int            h_nr_delayed;
 
        s64                     avg_vruntime;
        u64                     avg_load;
@@ -898,8 +899,11 @@ struct dl_rq {
 
 static inline void se_update_runnable(struct sched_entity *se)
 {
-       if (!entity_is_task(se))
-               se->runnable_weight = se->my_q->h_nr_running;
+       if (!entity_is_task(se)) {
+               struct cfs_rq *cfs_rq = se->my_q;
+
+               se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
+       }
 }
 
 static inline long se_runnable(struct sched_entity *se)
This page took 0.082237 seconds and 4 git commands to generate.