]> Git Repo - linux.git/commitdiff
Merge branch 'for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
authorLinus Torvalds <[email protected]>
Wed, 23 Mar 2022 19:40:51 +0000 (12:40 -0700)
committerLinus Torvalds <[email protected]>
Wed, 23 Mar 2022 19:40:51 +0000 (12:40 -0700)
Pull workqueue updates from Tejun Heo:
 "Nothing major. Just follow-up cleanups from Lai after the earlier
  synchronization simplification"

* 'for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Convert the type of pool->nr_running to int
  workqueue: Use wake_up_worker() in wq_worker_sleeping() instead of open code
  workqueue: Change the comments of the synchronization about the idle_list
  workqueue: Remove the mb() pair between wq_worker_sleeping() and insert_work()

1  2 
kernel/workqueue.c

diff --combined kernel/workqueue.c
index 52e9abbb77591917d2cb17041543ec37836a3588,835d25e65bb246811f4f0e8834db7ca24b658512..0d2514b4ff0d2baf5020d0bd71fec91ed1f283fa
@@@ -154,15 -154,20 +154,20 @@@ struct worker_pool 
  
        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
  
-       /* The current concurrency level. */
-       atomic_t                nr_running;
+       /*
+        * The counter is incremented in a process context on the associated CPU
+        * w/ preemption disabled, and decremented or reset in the same context
+        * but w/ pool->lock held. The readers grab pool->lock and are
+        * guaranteed to see if the counter reached zero.
+        */
+       int                     nr_running;
  
        struct list_head        worklist;       /* L: list of pending works */
  
        int                     nr_workers;     /* L: total number of workers */
        int                     nr_idle;        /* L: currently idle workers */
  
-       struct list_head        idle_list;      /* X: list of idle workers */
+       struct list_head        idle_list;      /* L: list of idle workers */
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
  
@@@ -777,7 -782,7 +782,7 @@@ static bool work_is_canceling(struct wo
  
  static bool __need_more_worker(struct worker_pool *pool)
  {
-       return !atomic_read(&pool->nr_running);
+       return !pool->nr_running;
  }
  
  /*
@@@ -802,8 -807,7 +807,7 @@@ static bool may_start_working(struct wo
  /* Do I need to keep working?  Called from currently running workers. */
  static bool keep_working(struct worker_pool *pool)
  {
-       return !list_empty(&pool->worklist) &&
-               atomic_read(&pool->nr_running) <= 1;
+       return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
  }
  
  /* Do we need a new worker?  Called from manager. */
@@@ -826,7 -830,7 +830,7 @@@ static bool too_many_workers(struct wor
   * Wake up functions.
   */
  
- /* Return the first idle worker.  Safe with preemption disabled */
+ /* Return the first idle worker.  Called with pool->lock held. */
  static struct worker *first_idle_worker(struct worker_pool *pool)
  {
        if (unlikely(list_empty(&pool->idle_list)))
@@@ -873,7 -877,7 +877,7 @@@ void wq_worker_running(struct task_stru
         */
        preempt_disable();
        if (!(worker->flags & WORKER_NOT_RUNNING))
-               atomic_inc(&worker->pool->nr_running);
+               worker->pool->nr_running++;
        preempt_enable();
        worker->sleeping = 0;
  }
   */
  void wq_worker_sleeping(struct task_struct *task)
  {
-       struct worker *next, *worker = kthread_data(task);
+       struct worker *worker = kthread_data(task);
        struct worker_pool *pool;
  
        /*
                return;
        }
  
-       /*
-        * The counterpart of the following dec_and_test, implied mb,
-        * worklist not empty test sequence is in insert_work().
-        * Please read comment there.
-        *
-        * NOT_RUNNING is clear.  This means that we're bound to and
-        * running on the local cpu w/ rq lock held and preemption
-        * disabled, which in turn means that none else could be
-        * manipulating idle_list, so dereferencing idle_list without pool
-        * lock is safe.
-        */
-       if (atomic_dec_and_test(&pool->nr_running) &&
-           !list_empty(&pool->worklist)) {
-               next = first_idle_worker(pool);
-               if (next)
-                       wake_up_process(next->task);
-       }
+       pool->nr_running--;
+       if (need_more_worker(pool))
+               wake_up_worker(pool);
        raw_spin_unlock_irq(&pool->lock);
  }
  
@@@ -987,7 -977,7 +977,7 @@@ static inline void worker_set_flags(str
        /* If transitioning into NOT_RUNNING, adjust nr_running. */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               atomic_dec(&pool->nr_running);
+               pool->nr_running--;
        }
  
        worker->flags |= flags;
@@@ -1019,7 -1009,7 +1009,7 @@@ static inline void worker_clr_flags(str
         */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
-                       atomic_inc(&pool->nr_running);
+                       pool->nr_running++;
  }
  
  /**
@@@ -1372,13 -1362,6 +1362,6 @@@ static void insert_work(struct pool_wor
        list_add_tail(&work->entry, head);
        get_pwq(pwq);
  
-       /*
-        * Ensure either wq_worker_sleeping() sees the above
-        * list_add_tail() or we see zero nr_running to avoid workers lying
-        * around lazily while there are works to be processed.
-        */
-       smp_mb();
        if (__need_more_worker(pool))
                wake_up_worker(pool);
  }
@@@ -1827,8 -1810,7 +1810,7 @@@ static void worker_enter_idle(struct wo
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
  
        /* Sanity check nr_running. */
-       WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
-                    atomic_read(&pool->nr_running));
+       WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
  }
  
  /**
@@@ -5006,7 -4988,7 +4988,7 @@@ static void unbind_workers(int cpu
                 * an unbound (in terms of concurrency management) pool which
                 * are served by workers tied to the pool.
                 */
-               atomic_set(&pool->nr_running, 0);
+               pool->nr_running = 0;
  
                /*
                 * With concurrency management just turned off, a busy
@@@ -6006,13 -5988,13 +5988,13 @@@ static void __init wq_numa_init(void
  void __init workqueue_init_early(void)
  {
        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
 -      int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
        int i, cpu;
  
        BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
  
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
 -      cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
 +      cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
 +      cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
  
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
  
This page took 0.111165 seconds and 4 git commands to generate.