]> Git Repo - linux.git/commitdiff
Merge branch 'for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
authorLinus Torvalds <[email protected]>
Tue, 23 Feb 2021 01:06:54 +0000 (17:06 -0800)
committerLinus Torvalds <[email protected]>
Tue, 23 Feb 2021 01:06:54 +0000 (17:06 -0800)
Pull qorkqueue updates from Tejun Heo:
 "Tracepoint and comment updates only"

* 'for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Use %s instead of function name
  workqueue: tracing the name of the workqueue instead of it's address
  workqueue: fix annotation for WQ_SYSFS

1  2 
kernel/workqueue.c

diff --combined kernel/workqueue.c
index 894bb885b40b146ef5c13d007427bb0156490bf5,8b1b6160eab696fd2551388a460a78d7c950a52b..0d150da252e81d2781fd7bebc6ad3654222a586d
@@@ -1848,6 -1848,12 +1848,6 @@@ static void worker_attach_to_pool(struc
  {
        mutex_lock(&wq_pool_attach_mutex);
  
 -      /*
 -       * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
 -       * online CPUs.  It'll be re-applied when any of the CPUs come up.
 -       */
 -      set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 -
        /*
         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
         * stable across this function.  See the comments above the flag
         */
        if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
 +      else
 +              kthread_set_per_cpu(worker->task, pool->cpu);
 +
 +      if (worker->rescue_wq)
 +              set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
  
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@@ -1882,7 -1883,6 +1882,7 @@@ static void worker_detach_from_pool(str
  
        mutex_lock(&wq_pool_attach_mutex);
  
 +      kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
  
@@@ -2964,8 -2964,8 +2964,8 @@@ reflush
  
                if (++flush_cnt == 10 ||
                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
-                               wq->name, flush_cnt);
+                       pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
+                               wq->name, __func__, flush_cnt);
  
                mutex_unlock(&wq->mutex);
                goto reflush;
@@@ -4919,10 -4919,8 +4919,10 @@@ static void unbind_workers(int cpu
  
                raw_spin_unlock_irq(&pool->lock);
  
 -              for_each_pool_worker(worker, pool)
 -                      WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
 +              for_each_pool_worker(worker, pool) {
 +                      kthread_set_per_cpu(worker->task, -1);
 +                      WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
 +              }
  
                mutex_unlock(&wq_pool_attach_mutex);
  
@@@ -4974,11 -4972,9 +4974,11 @@@ static void rebind_workers(struct worke
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
 -      for_each_pool_worker(worker, pool)
 +      for_each_pool_worker(worker, pool) {
 +              kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                                                  pool->attrs->cpumask) < 0);
 +      }
  
        raw_spin_lock_irq(&pool->lock);
  
This page took 0.08252 seconds and 4 git commands to generate.