]> Git Repo - linux.git/commitdiff
sched/nohz: Fix nohz cpu idle load balancing state with cpu hotplug
authorSuresh Siddha <[email protected]>
Fri, 20 Jan 2012 02:28:57 +0000 (18:28 -0800)
committerIngo Molnar <[email protected]>
Thu, 26 Jan 2012 18:38:13 +0000 (19:38 +0100)
With the recent nohz scheduler changes, rq's nohz flag
'NOHZ_TICK_STOPPED' and its associated state doesn't get cleared
immediately after the cpu exits idle. This gets cleared as part
of the next tick seen on that cpu.

For the cpu offline support, we need to clear this state
manually. Fix it by registering a cpu notifier, which clears the
nohz idle load balance state for this rq explicitly during the
CPU_DYING notification.

There won't be any nohz updates for that cpu, after the
CPU_DYING notification. But lets be extra paranoid and skip
updating the nohz state in the select_nohz_load_balancer() if
the cpu is not in active state anymore.

Reported-by: Srivatsa S. Bhat <[email protected]>
Reviewed-and-tested-by: Srivatsa S. Bhat <[email protected]>
Tested-by: Sergey Senozhatsky <[email protected]>
Signed-off-by: Suresh Siddha <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
kernel/sched/fair.c

index 84adb2d66cbd3dc15e653532462203f8c73bba18..7c6414fc669de4f09dc03fc763aaddd8040f98bf 100644 (file)
@@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu)
        return;
 }
 
+static inline void clear_nohz_tick_stopped(int cpu)
+{
+       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+               atomic_dec(&nohz.nr_cpus);
+               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+       }
+}
+
 static inline void set_cpu_sd_state_busy(void)
 {
        struct sched_domain *sd;
@@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick)
 {
        int cpu = smp_processor_id();
 
+       /*
+        * If this cpu is going down, then nothing needs to be done.
+        */
+       if (!cpu_active(cpu))
+               return;
+
        if (stop_tick) {
                if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
                        return;
@@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick)
        }
        return;
 }
+
+static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DYING:
+               clear_nohz_tick_stopped(smp_processor_id());
+               return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
+       }
+}
 #endif
 
 static DEFINE_SPINLOCK(balancing);
@@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        * busy tick after returning from idle, we will update the busy stats.
        */
        set_cpu_sd_state_busy();
-       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
-               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
-               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-               atomic_dec(&nohz.nr_cpus);
-       }
+       clear_nohz_tick_stopped(cpu);
 
        /*
         * None are in tickless mode and hence no need for NOHZ idle load
@@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void)
 
 #ifdef CONFIG_NO_HZ
        zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
+       cpu_notifier(sched_ilb_notifier, 0);
 #endif
 #endif /* SMP */
 
This page took 0.088899 seconds and 4 git commands to generate.