]> Git Repo - J-linux.git/commitdiff
Merge tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <[email protected]>
Mon, 9 Dec 2024 18:28:55 +0000 (10:28 -0800)
committerLinus Torvalds <[email protected]>
Mon, 9 Dec 2024 18:28:55 +0000 (10:28 -0800)
Pull scheduler fixes from Borislav Petkov:

 - Remove wrong enqueueing of a task for a later wakeup when a task
   blocks on a RT mutex

 - Do not setup a new deadline entity on a boosted task as that has
   happened already

 - Update preempt= kernel command line param

 - Prevent needless softirqd wakeups in the idle task's context

 - Detect the case where the idle load balancer CPU becomes busy and
   avoid unnecessary load balancing invocation

 - Remove an unnecessary load balancing need_resched() call in
   nohz_csd_func()

 - Allow for raising of SCHED_SOFTIRQ softirq type on RT but retain the
   warning to catch any other cases

 - Remove a wrong warning when a cpuset update makes the task affinity
   no longer a subset of the cpuset

* tag 'sched_urgent_for_v6.13_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking: rtmutex: Fix wake_q logic in task_blocks_on_rt_mutex
  sched/deadline: Fix warning in migrate_enable for boosted tasks
  sched/core: Update kernel boot parameters for LAZY preempt.
  sched/core: Prevent wakeup of ksoftirqd during idle load balance
  sched/fair: Check idle_cpu() before need_resched() to detect ilb CPU turning busy
  sched/core: Remove the unnecessary need_resched() check in nohz_csd_func()
  softirq: Allow raising SCHED_SOFTIRQ from SMP-call-function on RT kernel
  sched: fix warning in sched_setaffinity
  sched/deadline: Fix replenish_dl_new_period dl_server condition

1  2 
kernel/sched/fair.c

diff --combined kernel/sched/fair.c
index a59ae2e23dafaa91f3c11b2cc27ac447e0bcbc58,05b8f1eb2c149b50df4d3eb81e199c7b199214ab..aa0238ee485724af807797be2647eb6270161116
@@@ -3399,16 -3399,10 +3399,16 @@@ retry_pids
  
                /* Initialise new per-VMA NUMAB state. */
                if (!vma->numab_state) {
 -                      vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
 -                              GFP_KERNEL);
 -                      if (!vma->numab_state)
 +                      struct vma_numab_state *ptr;
 +
 +                      ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
 +                      if (!ptr)
 +                              continue;
 +
 +                      if (cmpxchg(&vma->numab_state, NULL, ptr)) {
 +                              kfree(ptr);
                                continue;
 +                      }
  
                        vma->numab_state->start_scan_seq = mm->numa_scan_seq;
  
@@@ -12574,7 -12568,7 +12574,7 @@@ static void _nohz_idle_balance(struct r
                 * work being done for other CPUs. Next load
                 * balancing owner will pick it up.
                 */
-               if (need_resched()) {
+               if (!idle_cpu(this_cpu) && need_resched()) {
                        if (flags & NOHZ_STATS_KICK)
                                has_blocked_load = true;
                        if (flags & NOHZ_NEXT_KICK)
This page took 0.076324 seconds and 4 git commands to generate.