]> Git Repo - J-linux.git/commitdiff
Merge branch 'for-6.10' into test-merge-for-6.10
authorTejun Heo <[email protected]>
Wed, 15 May 2024 21:40:33 +0000 (11:40 -1000)
committerTejun Heo <[email protected]>
Wed, 15 May 2024 21:40:33 +0000 (11:40 -1000)
1  2 
kernel/workqueue.c

diff --combined kernel/workqueue.c
index 80882ae432617365a393f8d2aa2a29f10930441a,3c3154b406984de997cde343e75b3d1970014ae1..003474c9a77d09d3d025b4ceed42c64179551323
@@@ -99,6 -99,7 +99,7 @@@ enum worker_flags 
  
  enum work_cancel_flags {
        WORK_CANCEL_DELAYED     = 1 << 0,       /* canceling a delayed_work */
+       WORK_CANCEL_DISABLE     = 1 << 1,       /* canceling to disable */
  };
  
  enum wq_internal_consts {
@@@ -392,6 -393,12 +393,12 @@@ struct wq_pod_type 
        int                     *cpu_pod;       /* cpu -> pod */
  };
  
+ struct work_offq_data {
+       u32                     pool_id;
+       u32                     disable;
+       u32                     flags;
+ };
  static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
        [WQ_AFFN_DFL]           = "default",
        [WQ_AFFN_CPU]           = "cpu",
@@@ -489,12 -496,6 +496,6 @@@ static struct workqueue_attrs *unbound_
  /* I: attributes used when instantiating ordered pools on demand */
  static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
  
- /*
-  * Used to synchronize multiple cancel_sync attempts on the same work item. See
-  * work_grab_pending() and __cancel_work_sync().
-  */
- static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq);
  /*
   * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
   * process context while holding a pool lock. Bounce to a dedicated kthread
@@@ -763,6 -764,11 +764,11 @@@ static int work_next_color(int color
        return (color + 1) % WORK_NR_COLORS;
  }
  
+ static unsigned long pool_offq_flags(struct worker_pool *pool)
+ {
+       return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0;
+ }
  /*
   * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
   * contain the pointer to the queued pwq.  Once execution starts, the flag
   * corresponding to a work.  Pool is available once the work has been
   * queued anywhere after initialization until it is sync canceled.  pwq is
   * available only while the work item is queued.
-  *
-  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
-  * canceled.  While being canceled, a work item may have its PENDING set
-  * but stay off timer and worklist for arbitrarily long and nobody should
-  * try to steal the PENDING bit.
   */
  static inline void set_work_data(struct work_struct *work, unsigned long data)
  {
@@@ -892,36 -893,26 +893,26 @@@ static struct worker_pool *get_work_poo
        return idr_find(&worker_pool_idr, pool_id);
  }
  
- /**
-  * get_work_pool_id - return the worker pool ID a given work is associated with
-  * @work: the work item of interest
-  *
-  * Return: The worker_pool ID @work was last associated with.
-  * %WORK_OFFQ_POOL_NONE if none.
-  */
- static int get_work_pool_id(struct work_struct *work)
+ static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits)
  {
-       unsigned long data = atomic_long_read(&work->data);
-       if (data & WORK_STRUCT_PWQ)
-               return work_struct_pwq(data)->pool->id;
-       return data >> WORK_OFFQ_POOL_SHIFT;
+       return (v >> shift) & ((1 << bits) - 1);
  }
  
- static void mark_work_canceling(struct work_struct *work)
+ static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
  {
-       unsigned long pool_id = get_work_pool_id(work);
+       WARN_ON_ONCE(data & WORK_STRUCT_PWQ);
  
-       pool_id <<= WORK_OFFQ_POOL_SHIFT;
-       set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING);
+       offqd->pool_id = shift_and_mask(data, WORK_OFFQ_POOL_SHIFT,
+                                       WORK_OFFQ_POOL_BITS);
+       offqd->disable = shift_and_mask(data, WORK_OFFQ_DISABLE_SHIFT,
+                                       WORK_OFFQ_DISABLE_BITS);
+       offqd->flags = data & WORK_OFFQ_FLAG_MASK;
  }
  
- static bool work_is_canceling(struct work_struct *work)
+ static unsigned long work_offqd_pack_flags(struct work_offq_data *offqd)
  {
-       unsigned long data = atomic_long_read(&work->data);
-       return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
+       return ((unsigned long)offqd->disable << WORK_OFFQ_DISABLE_SHIFT) |
+               ((unsigned long)offqd->flags);
  }
  
  /*
@@@ -1277,12 -1268,8 +1268,12 @@@ static bool kick_pool(struct worker_poo
            !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
                struct work_struct *work = list_first_entry(&pool->worklist,
                                                struct work_struct, entry);
 -              p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
 -              get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
 +              int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
 +                                                        cpu_online_mask);
 +              if (wake_cpu < nr_cpu_ids) {
 +                      p->wake_cpu = wake_cpu;
 +                      get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
 +              }
        }
  #endif
        wake_up_process(p);
@@@ -1468,7 -1455,7 +1459,7 @@@ void wq_worker_sleeping(struct task_str
   * wq_worker_tick - a scheduler tick occurred while a kworker is running
   * @task: task currently running
   *
 - * Called from scheduler_tick(). We're in the IRQ context and the current
 + * Called from sched_tick(). We're in the IRQ context and the current
   * worker's fields which follow the 'K' locking rule can be accessed safely.
   */
  void wq_worker_tick(struct task_struct *task)
@@@ -1598,15 -1585,6 +1589,15 @@@ static void wq_update_node_max_active(s
        if (off_cpu >= 0)
                total_cpus--;
  
 +      /* If all CPUs of the wq get offline, use the default values */
 +      if (unlikely(!total_cpus)) {
 +              for_each_node(node)
 +                      wq_node_nr_active(wq, node)->max = min_active;
 +
 +              wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
 +              return;
 +      }
 +
        for_each_node(node) {
                int node_cpus;
  
                              min_active, max_active);
        }
  
 -      wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
 +      wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
  }
  
  /**
@@@ -2067,8 -2045,6 +2058,6 @@@ out_put
   *  1         if @work was pending and we successfully stole PENDING
   *  0         if @work was idle and we claimed PENDING
   *  -EAGAIN   if PENDING couldn't be grabbed at the moment, safe to busy-retry
-  *  -ENOENT   if someone else is canceling @work, this state may persist
-  *            for arbitrarily long
   *  ========  ================================================================
   *
   * Note:
@@@ -2151,7 -2127,8 +2140,8 @@@ static int try_to_grab_pending(struct w
                 * this destroys work->data needed by the next step, stash it.
                 */
                work_data = *work_data_bits(work);
-               set_work_pool_and_keep_pending(work, pool->id, 0);
+               set_work_pool_and_keep_pending(work, pool->id,
+                                              pool_offq_flags(pool));
  
                /* must be the last step, see the function comment */
                pwq_dec_nr_in_flight(pwq, work_data);
  fail:
        rcu_read_unlock();
        local_irq_restore(*irq_flags);
-       if (work_is_canceling(work))
-               return -ENOENT;
-       cpu_relax();
        return -EAGAIN;
  }
  
- struct cwt_wait {
-       wait_queue_entry_t      wait;
-       struct work_struct      *work;
- };
- static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
- {
-       struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
-       if (cwait->work != key)
-               return 0;
-       return autoremove_wake_function(wait, mode, sync, key);
- }
  /**
   * work_grab_pending - steal work item from worklist and disable irq
   * @work: work item to steal
   * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
   * or on worklist.
   *
-  * Must be called in process context. IRQ is disabled on return with IRQ state
+  * Can be called from any context. IRQ is disabled on return with IRQ state
   * stored in *@irq_flags. The caller is responsible for re-enabling it using
   * local_irq_restore().
   *
  static bool work_grab_pending(struct work_struct *work, u32 cflags,
                              unsigned long *irq_flags)
  {
-       struct cwt_wait cwait;
        int ret;
  
-       might_sleep();
- repeat:
-       ret = try_to_grab_pending(work, cflags, irq_flags);
-       if (likely(ret >= 0))
-               return ret;
-       if (ret != -ENOENT)
-               goto repeat;
-       /*
-        * Someone is already canceling. Wait for it to finish. flush_work()
-        * doesn't work for PREEMPT_NONE because we may get woken up between
-        * @work's completion and the other canceling task resuming and clearing
-        * CANCELING - flush_work() will return false immediately as @work is no
-        * longer busy, try_to_grab_pending() will return -ENOENT as @work is
-        * still being canceled and the other canceling task won't be able to
-        * clear CANCELING as we're hogging the CPU.
-        *
-        * Let's wait for completion using a waitqueue. As this may lead to the
-        * thundering herd problem, use a custom wake function which matches
-        * @work along with exclusive wait and wakeup.
-        */
-       init_wait(&cwait.wait);
-       cwait.wait.func = cwt_wakefn;
-       cwait.work = work;
-       prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait,
-                                 TASK_UNINTERRUPTIBLE);
-       if (work_is_canceling(work))
-               schedule();
-       finish_wait(&wq_cancel_waitq, &cwait.wait);
-       goto repeat;
+       while (true) {
+               ret = try_to_grab_pending(work, cflags, irq_flags);
+               if (ret >= 0)
+                       return ret;
+               cpu_relax();
+       }
  }
  
  /**
        rcu_read_unlock();
  }
  
+ static bool clear_pending_if_disabled(struct work_struct *work)
+ {
+       unsigned long data = *work_data_bits(work);
+       struct work_offq_data offqd;
+       if (likely((data & WORK_STRUCT_PWQ) ||
+                  !(data & WORK_OFFQ_DISABLE_MASK)))
+               return false;
+       work_offqd_unpack(&offqd, data);
+       set_work_pool_and_clear_pending(work, offqd.pool_id,
+                                       work_offqd_pack_flags(&offqd));
+       return true;
+ }
  /**
   * queue_work_on - queue work on specific cpu
   * @cpu: CPU number to execute work on
@@@ -2444,7 -2392,8 +2405,8 @@@ bool queue_work_on(int cpu, struct work
  
        local_irq_save(irq_flags);
  
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
+           !clear_pending_if_disabled(work)) {
                __queue_work(cpu, wq, work);
                ret = true;
        }
@@@ -2522,7 -2471,8 +2484,8 @@@ bool queue_work_node(int node, struct w
  
        local_irq_save(irq_flags);
  
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
+           !clear_pending_if_disabled(work)) {
                int cpu = select_numa_node_cpu(node);
  
                __queue_work(cpu, wq, work);
@@@ -2604,7 -2554,8 +2567,8 @@@ bool queue_delayed_work_on(int cpu, str
        /* read the comment in __queue_work() */
        local_irq_save(irq_flags);
  
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
+           !clear_pending_if_disabled(work)) {
                __queue_delayed_work(cpu, wq, dwork, delay);
                ret = true;
        }
@@@ -2636,19 -2587,14 +2600,14 @@@ bool mod_delayed_work_on(int cpu, struc
                         struct delayed_work *dwork, unsigned long delay)
  {
        unsigned long irq_flags;
-       int ret;
+       bool ret;
  
-       do {
-               ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED,
-                                         &irq_flags);
-       } while (unlikely(ret == -EAGAIN));
+       ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags);
  
-       if (likely(ret >= 0)) {
+       if (!clear_pending_if_disabled(&dwork->work))
                __queue_delayed_work(cpu, wq, dwork, delay);
-               local_irq_restore(irq_flags);
-       }
  
-       /* -ENOENT from try_to_grab_pending() becomes %true */
+       local_irq_restore(irq_flags);
        return ret;
  }
  EXPORT_SYMBOL_GPL(mod_delayed_work_on);
@@@ -2677,7 -2623,12 +2636,12 @@@ bool queue_rcu_work(struct workqueue_st
  {
        struct work_struct *work = &rwork->work;
  
-       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+       /*
+        * rcu_work can't be canceled or disabled. Warn if the user reached
+        * inside @rwork and disabled the inner work.
+        */
+       if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
+           !WARN_ON_ONCE(clear_pending_if_disabled(work))) {
                rwork->wq = wq;
                call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
                return true;
@@@ -2953,7 -2904,7 +2917,7 @@@ static void idle_worker_timeout(struct 
                unsigned long expires;
  
                /* idle_list is kept in LIFO order, check the last one */
-               worker = list_entry(pool->idle_list.prev, struct worker, entry);
+               worker = list_last_entry(&pool->idle_list, struct worker, entry);
                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
                do_cull = !time_before(jiffies, expires);
  
@@@ -2995,7 -2946,7 +2959,7 @@@ static void idle_cull_fn(struct work_st
                struct worker *worker;
                unsigned long expires;
  
-               worker = list_entry(pool->idle_list.prev, struct worker, entry);
+               worker = list_last_entry(&pool->idle_list, struct worker, entry);
                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
  
                if (time_before(jiffies, expires)) {
@@@ -3230,7 -3181,7 +3194,7 @@@ __acquires(&pool->lock
         * PENDING and queued state changes happen together while IRQ is
         * disabled.
         */
-       set_work_pool_and_clear_pending(work, pool->id, 0);
+       set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool));
  
        pwq->stats[PWQ_STAT_STARTED]++;
        raw_spin_unlock_irq(&pool->lock);
@@@ -3700,7 -3651,7 +3664,7 @@@ void workqueue_softirq_dead(unsigned in
                if (!need_more_worker(pool))
                        continue;
  
-               INIT_WORK(&dead_work.work, drain_dead_softirq_workfn);
+               INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn);
                dead_work.pool = pool;
                init_completion(&dead_work.done);
  
                        queue_work(system_bh_wq, &dead_work.work);
  
                wait_for_completion(&dead_work.done);
+               destroy_work_on_stack(&dead_work.work);
        }
  }
  
@@@ -4154,8 -4106,6 +4119,6 @@@ static bool start_flush_work(struct wor
        struct pool_workqueue *pwq;
        struct workqueue_struct *wq;
  
-       might_sleep();
        rcu_read_lock();
        pool = get_work_pool(work);
        if (!pool) {
@@@ -4207,6 -4157,7 +4170,7 @@@ already_gone
  static bool __flush_work(struct work_struct *work, bool from_cancel)
  {
        struct wq_barrier barr;
+       unsigned long data;
  
        if (WARN_ON(!wq_online))
                return false;
        if (WARN_ON(!work->func))
                return false;
  
-       if (start_flush_work(work, &barr, from_cancel)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-               return true;
-       } else {
+       if (!start_flush_work(work, &barr, from_cancel))
                return false;
+       /*
+        * start_flush_work() returned %true. If @from_cancel is set, we know
+        * that @work must have been executing during start_flush_work() and
+        * can't currently be queued. Its data must contain OFFQ bits. If @work
+        * was queued on a BH workqueue, we also know that it was running in the
+        * BH context and thus can be busy-waited.
+        */
+       data = *work_data_bits(work);
+       if (from_cancel &&
+           !WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) {
+               /*
+                * On RT, prevent a live lock when %current preempted soft
+                * interrupt processing or prevents ksoftirqd from running by
+                * keeping flipping BH. If the BH work item runs on a different
+                * CPU then this has no effect other than doing the BH
+                * disable/enable dance for nothing. This is copied from
+                * kernel/softirq.c::tasklet_unlock_spin_wait().
+                */
+               while (!try_wait_for_completion(&barr.done)) {
+                       if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+                               local_bh_disable();
+                               local_bh_enable();
+                       } else {
+                               cpu_relax();
+                       }
+               }
+       } else {
+               wait_for_completion(&barr.done);
        }
+       destroy_work_on_stack(&barr.work);
+       return true;
  }
  
  /**
   */
  bool flush_work(struct work_struct *work)
  {
+       might_sleep();
        return __flush_work(work, false);
  }
  EXPORT_SYMBOL_GPL(flush_work);
@@@ -4282,32 -4262,53 +4275,53 @@@ bool flush_rcu_work(struct rcu_work *rw
  }
  EXPORT_SYMBOL(flush_rcu_work);
  
+ static void work_offqd_disable(struct work_offq_data *offqd)
+ {
+       const unsigned long max = (1lu << WORK_OFFQ_DISABLE_BITS) - 1;
+       if (likely(offqd->disable < max))
+               offqd->disable++;
+       else
+               WARN_ONCE(true, "workqueue: work disable count overflowed\n");
+ }
+ static void work_offqd_enable(struct work_offq_data *offqd)
+ {
+       if (likely(offqd->disable > 0))
+               offqd->disable--;
+       else
+               WARN_ONCE(true, "workqueue: work disable count underflowed\n");
+ }
  static bool __cancel_work(struct work_struct *work, u32 cflags)
  {
+       struct work_offq_data offqd;
        unsigned long irq_flags;
        int ret;
  
-       do {
-               ret = try_to_grab_pending(work, cflags, &irq_flags);
-       } while (unlikely(ret == -EAGAIN));
+       ret = work_grab_pending(work, cflags, &irq_flags);
  
-       if (unlikely(ret < 0))
-               return false;
+       work_offqd_unpack(&offqd, *work_data_bits(work));
  
-       set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0);
+       if (cflags & WORK_CANCEL_DISABLE)
+               work_offqd_disable(&offqd);
+       set_work_pool_and_clear_pending(work, offqd.pool_id,
+                                       work_offqd_pack_flags(&offqd));
        local_irq_restore(irq_flags);
        return ret;
  }
  
  static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
  {
-       unsigned long irq_flags;
        bool ret;
  
-       /* claim @work and tell other tasks trying to grab @work to back off */
-       ret = work_grab_pending(work, cflags, &irq_flags);
-       mark_work_canceling(work);
-       local_irq_restore(irq_flags);
+       ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE);
+       if (*work_data_bits(work) & WORK_OFFQ_BH)
+               WARN_ON_ONCE(in_hardirq());
+       else
+               might_sleep();
  
        /*
         * Skip __flush_work() during early boot when we know that @work isn't
        if (wq_online)
                __flush_work(work, true);
  
-       /*
-        * smp_mb() at the end of set_work_pool_and_clear_pending() is paired
-        * with prepare_to_wait() above so that either waitqueue_active() is
-        * visible here or !work_is_canceling() is visible there.
-        */
-       set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0);
-       if (waitqueue_active(&wq_cancel_waitq))
-               __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);
+       if (!(cflags & WORK_CANCEL_DISABLE))
+               enable_work(work);
  
        return ret;
  }
@@@ -4342,19 -4336,19 +4349,19 @@@ EXPORT_SYMBOL(cancel_work)
   * cancel_work_sync - cancel a work and wait for it to finish
   * @work: the work to cancel
   *
-  * Cancel @work and wait for its execution to finish.  This function
-  * can be used even if the work re-queues itself or migrates to
-  * another workqueue.  On return from this function, @work is
-  * guaranteed to be not pending or executing on any CPU.
+  * Cancel @work and wait for its execution to finish. This function can be used
+  * even if the work re-queues itself or migrates to another workqueue. On return
+  * from this function, @work is guaranteed to be not pending or executing on any
+  * CPU as long as there aren't racing enqueues.
   *
-  * cancel_work_sync(&delayed_work->work) must not be used for
-  * delayed_work's.  Use cancel_delayed_work_sync() instead.
+  * cancel_work_sync(&delayed_work->work) must not be used for delayed_work's.
+  * Use cancel_delayed_work_sync() instead.
   *
-  * The caller must ensure that the workqueue on which @work was last
-  * queued can't be destroyed before this function returns.
+  * Must be called from a sleepable context if @work was last queued on a non-BH
+  * workqueue. Can also be called from non-hardirq atomic contexts including BH
+  * if @work was last queued on a BH workqueue.
   *
-  * Return:
-  * %true if @work was pending, %false otherwise.
+  * Returns %true if @work was pending, %false otherwise.
   */
  bool cancel_work_sync(struct work_struct *work)
  {
@@@ -4399,6 -4393,108 +4406,108 @@@ bool cancel_delayed_work_sync(struct de
  }
  EXPORT_SYMBOL(cancel_delayed_work_sync);
  
+ /**
+  * disable_work - Disable and cancel a work item
+  * @work: work item to disable
+  *
+  * Disable @work by incrementing its disable count and cancel it if currently
+  * pending. As long as the disable count is non-zero, any attempt to queue @work
+  * will fail and return %false. The maximum supported disable depth is 2 to the
+  * power of %WORK_OFFQ_DISABLE_BITS, currently 65536.
+  *
+  * Can be called from any context. Returns %true if @work was pending, %false
+  * otherwise.
+  */
+ bool disable_work(struct work_struct *work)
+ {
+       return __cancel_work(work, WORK_CANCEL_DISABLE);
+ }
+ EXPORT_SYMBOL_GPL(disable_work);
+ /**
+  * disable_work_sync - Disable, cancel and drain a work item
+  * @work: work item to disable
+  *
+  * Similar to disable_work() but also wait for @work to finish if currently
+  * executing.
+  *
+  * Must be called from a sleepable context if @work was last queued on a non-BH
+  * workqueue. Can also be called from non-hardirq atomic contexts including BH
+  * if @work was last queued on a BH workqueue.
+  *
+  * Returns %true if @work was pending, %false otherwise.
+  */
+ bool disable_work_sync(struct work_struct *work)
+ {
+       return __cancel_work_sync(work, WORK_CANCEL_DISABLE);
+ }
+ EXPORT_SYMBOL_GPL(disable_work_sync);
+ /**
+  * enable_work - Enable a work item
+  * @work: work item to enable
+  *
+  * Undo disable_work[_sync]() by decrementing @work's disable count. @work can
+  * only be queued if its disable count is 0.
+  *
+  * Can be called from any context. Returns %true if the disable count reached 0.
+  * Otherwise, %false.
+  */
+ bool enable_work(struct work_struct *work)
+ {
+       struct work_offq_data offqd;
+       unsigned long irq_flags;
+       work_grab_pending(work, 0, &irq_flags);
+       work_offqd_unpack(&offqd, *work_data_bits(work));
+       work_offqd_enable(&offqd);
+       set_work_pool_and_clear_pending(work, offqd.pool_id,
+                                       work_offqd_pack_flags(&offqd));
+       local_irq_restore(irq_flags);
+       return !offqd.disable;
+ }
+ EXPORT_SYMBOL_GPL(enable_work);
+ /**
+  * disable_delayed_work - Disable and cancel a delayed work item
+  * @dwork: delayed work item to disable
+  *
+  * disable_work() for delayed work items.
+  */
+ bool disable_delayed_work(struct delayed_work *dwork)
+ {
+       return __cancel_work(&dwork->work,
+                            WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE);
+ }
+ EXPORT_SYMBOL_GPL(disable_delayed_work);
+ /**
+  * disable_delayed_work_sync - Disable, cancel and drain a delayed work item
+  * @dwork: delayed work item to disable
+  *
+  * disable_work_sync() for delayed work items.
+  */
+ bool disable_delayed_work_sync(struct delayed_work *dwork)
+ {
+       return __cancel_work_sync(&dwork->work,
+                                 WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE);
+ }
+ EXPORT_SYMBOL_GPL(disable_delayed_work_sync);
+ /**
+  * enable_delayed_work - Enable a delayed work item
+  * @dwork: delayed work item to enable
+  *
+  * enable_work() for delayed work items.
+  */
+ bool enable_delayed_work(struct delayed_work *dwork)
+ {
+       return enable_work(&dwork->work);
+ }
+ EXPORT_SYMBOL_GPL(enable_delayed_work);
  /**
   * schedule_on_each_cpu - execute a function synchronously on each online CPU
   * @func: the function to call
@@@ -4530,6 -4626,8 +4639,8 @@@ static void wqattrs_clear_for_pool(stru
  {
        attrs->affn_scope = WQ_AFFN_NR_TYPES;
        attrs->ordered = false;
+       if (attrs->affn_strict)
+               cpumask_copy(attrs->cpumask, cpu_possible_mask);
  }
  
  /* hash value of the content of @attr */
@@@ -4538,11 -4636,12 +4649,12 @@@ static u32 wqattrs_hash(const struct wo
        u32 hash = 0;
  
        hash = jhash_1word(attrs->nice, hash);
-       hash = jhash(cpumask_bits(attrs->cpumask),
-                    BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+       hash = jhash_1word(attrs->affn_strict, hash);
        hash = jhash(cpumask_bits(attrs->__pod_cpumask),
                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
-       hash = jhash_1word(attrs->affn_strict, hash);
+       if (!attrs->affn_strict)
+               hash = jhash(cpumask_bits(attrs->cpumask),
+                            BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
        return hash;
  }
  
@@@ -4552,11 -4651,11 +4664,11 @@@ static bool wqattrs_equal(const struct 
  {
        if (a->nice != b->nice)
                return false;
-       if (!cpumask_equal(a->cpumask, b->cpumask))
+       if (a->affn_strict != b->affn_strict)
                return false;
        if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
                return false;
-       if (a->affn_strict != b->affn_strict)
+       if (!a->affn_strict && !cpumask_equal(a->cpumask, b->cpumask))
                return false;
        return true;
  }
@@@ -7148,25 -7247,27 +7260,27 @@@ static ssize_t __wq_cpumask_show(struc
        return written;
  }
  
- static ssize_t wq_unbound_cpumask_show(struct device *dev,
+ static ssize_t cpumask_requested_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
-       return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
+       return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
  }
+ static DEVICE_ATTR_RO(cpumask_requested);
  
- static ssize_t wq_requested_cpumask_show(struct device *dev,
+ static ssize_t cpumask_isolated_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
-       return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
+       return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
  }
+ static DEVICE_ATTR_RO(cpumask_isolated);
  
- static ssize_t wq_isolated_cpumask_show(struct device *dev,
+ static ssize_t cpumask_show(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
-       return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
+       return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
  }
  
- static ssize_t wq_unbound_cpumask_store(struct device *dev,
+ static ssize_t cpumask_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
  {
        cpumask_var_t cpumask;
        free_cpumask_var(cpumask);
        return ret ? ret : count;
  }
+ static DEVICE_ATTR_RW(cpumask);
  
- static struct device_attribute wq_sysfs_cpumask_attrs[] = {
-       __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
-              wq_unbound_cpumask_store),
-       __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL),
-       __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL),
-       __ATTR_NULL,
+ static struct attribute *wq_sysfs_cpumask_attrs[] = {
+       &dev_attr_cpumask.attr,
+       &dev_attr_cpumask_requested.attr,
+       &dev_attr_cpumask_isolated.attr,
+       NULL,
  };
+ ATTRIBUTE_GROUPS(wq_sysfs_cpumask);
  
  static int __init wq_sysfs_init(void)
  {
-       struct device *dev_root;
-       int err;
-       err = subsys_virtual_register(&wq_subsys, NULL);
-       if (err)
-               return err;
-       dev_root = bus_get_dev_root(&wq_subsys);
-       if (dev_root) {
-               struct device_attribute *attr;
-               for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) {
-                       err = device_create_file(dev_root, attr);
-                       if (err)
-                               break;
-               }
-               put_device(dev_root);
-       }
-       return err;
+       return subsys_virtual_register(&wq_subsys, wq_sysfs_cpumask_groups);
  }
  core_initcall(wq_sysfs_init);
  
This page took 0.120455 seconds and 4 git commands to generate.