]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * workqueue.h --- work queue handling for Linux. | |
4 | */ | |
5 | ||
6 | #ifndef _LINUX_WORKQUEUE_H | |
7 | #define _LINUX_WORKQUEUE_H | |
8 | ||
9 | #include <linux/timer.h> | |
10 | #include <linux/linkage.h> | |
11 | #include <linux/bitops.h> | |
4e6045f1 | 12 | #include <linux/lockdep.h> |
7a22ad75 | 13 | #include <linux/threads.h> |
60063497 | 14 | #include <linux/atomic.h> |
e1b6705b | 15 | #include <linux/cpumask_types.h> |
05f0fe6b | 16 | #include <linux/rcupdate.h> |
b2fa8443 | 17 | #include <linux/workqueue_types.h> |
6bb49e59 | 18 | |
a08727ba LT |
19 | /* |
20 | * The first word is the work queue pointer and the flags rolled into | |
21 | * one | |
22 | */ | |
23 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | |
24 | ||
e563d0a7 | 25 | enum work_bits { |
22df02bb | 26 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
e9a8e01f TH |
27 | WORK_STRUCT_INACTIVE_BIT, /* work item is inactive */ |
28 | WORK_STRUCT_PWQ_BIT, /* data points to pwq */ | |
29 | WORK_STRUCT_LINKED_BIT, /* next work is linked to this one */ | |
22df02bb | 30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
e9a8e01f | 31 | WORK_STRUCT_STATIC_BIT, /* static initializer (debugobjects) */ |
22df02bb | 32 | #endif |
e9a8e01f | 33 | WORK_STRUCT_FLAG_BITS, |
22df02bb | 34 | |
e9a8e01f TH |
35 | /* color for workqueue flushing */ |
36 | WORK_STRUCT_COLOR_SHIFT = WORK_STRUCT_FLAG_BITS, | |
73f53c4a TH |
37 | WORK_STRUCT_COLOR_BITS = 4, |
38 | ||
73f53c4a | 39 | /* |
e9a8e01f TH |
40 | * When WORK_STRUCT_PWQ is set, reserve 8 bits off of pwq pointer w/ |
41 | * debugobjects turned off. This makes pwqs aligned to 256 bytes (512 | |
42 | * bytes w/ DEBUG_OBJECTS_WORK) and allows 16 workqueue flush colors. | |
43 | * | |
44 | * MSB | |
45 | * [ pwq pointer ] [ flush color ] [ STRUCT flags ] | |
46 | * 4 bits 4 or 5 bits | |
73f53c4a | 47 | */ |
e9a8e01f | 48 | WORK_STRUCT_PWQ_SHIFT = WORK_STRUCT_COLOR_SHIFT + WORK_STRUCT_COLOR_BITS, |
bbb68dfa | 49 | |
e9a8e01f TH |
50 | /* |
51 | * data contains off-queue information when !WORK_STRUCT_PWQ. | |
52 | * | |
53 | * MSB | |
86898fa6 | 54 | * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] |
456a78ee | 55 | * 16 bits 1 bit 4 or 5 bits |
e9a8e01f TH |
56 | */ |
57 | WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, | |
456a78ee | 58 | WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT, |
e9a8e01f TH |
59 | WORK_OFFQ_FLAG_END, |
60 | WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, | |
bbb68dfa | 61 | |
86898fa6 TH |
62 | WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, |
63 | WORK_OFFQ_DISABLE_BITS = 16, | |
64 | ||
715b06b8 | 65 | /* |
e9a8e01f TH |
66 | * When a work item is off queue, the high bits encode off-queue flags |
67 | * and the last pool it was on. Cap pool ID to 31 bits and use the | |
68 | * highest number to indicate that no pool is associated. | |
715b06b8 | 69 | */ |
86898fa6 | 70 | WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, |
7c3eed5c TH |
71 | WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, |
72 | WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, | |
e563d0a7 TH |
73 | }; |
74 | ||
75 | enum work_flags { | |
76 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | |
77 | WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, | |
78 | WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, | |
79 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | |
80 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | |
81 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | |
82 | #else | |
83 | WORK_STRUCT_STATIC = 0, | |
84 | #endif | |
85 | }; | |
86 | ||
87 | enum wq_misc_consts { | |
88 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), | |
89 | ||
90 | /* not bound to any CPU, prefer the local CPU */ | |
91 | WORK_CPU_UNBOUND = NR_CPUS, | |
92 | ||
dcd989cb TH |
93 | /* bit mask for work_busy() return values */ |
94 | WORK_BUSY_PENDING = 1 << 0, | |
95 | WORK_BUSY_RUNNING = 1 << 1, | |
3d1cb205 TH |
96 | |
97 | /* maximum string length for set_worker_desc() */ | |
231035f1 | 98 | WORKER_DESC_LEN = 32, |
22df02bb TH |
99 | }; |
100 | ||
afa4bb77 | 101 | /* Convenience constants - of type 'unsigned long', not 'enum'! */ |
456a78ee | 102 | #define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT) |
1211f3b2 | 103 | #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) |
86898fa6 | 104 | #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) |
afa4bb77 LT |
105 | #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) |
106 | #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) | |
e9a8e01f | 107 | #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) |
afa4bb77 | 108 | |
a45463cb | 109 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) |
7a22ad75 | 110 | #define WORK_DATA_STATIC_INIT() \ |
a45463cb | 111 | ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) |
a08727ba | 112 | |
52bad64d DH |
113 | struct delayed_work { |
114 | struct work_struct work; | |
1da177e4 | 115 | struct timer_list timer; |
60c057bc LJ |
116 | |
117 | /* target workqueue and CPU ->timer uses to queue ->work */ | |
118 | struct workqueue_struct *wq; | |
1265057f | 119 | int cpu; |
1da177e4 LT |
120 | }; |
121 | ||
05f0fe6b TH |
122 | struct rcu_work { |
123 | struct work_struct work; | |
124 | struct rcu_head rcu; | |
125 | ||
126 | /* target workqueue ->rcu uses to queue ->work */ | |
127 | struct workqueue_struct *wq; | |
128 | }; | |
129 | ||
84193c07 | 130 | enum wq_affn_scope { |
523a301e | 131 | WQ_AFFN_DFL, /* use system default */ |
63c5484e TH |
132 | WQ_AFFN_CPU, /* one pod per CPU */ |
133 | WQ_AFFN_SMT, /* one pod poer SMT */ | |
134 | WQ_AFFN_CACHE, /* one pod per LLC */ | |
84193c07 TH |
135 | WQ_AFFN_NUMA, /* one pod per NUMA node */ |
136 | WQ_AFFN_SYSTEM, /* one pod across the whole system */ | |
137 | ||
138 | WQ_AFFN_NR_TYPES, | |
84193c07 TH |
139 | }; |
140 | ||
42412c3a SF |
141 | /** |
142 | * struct workqueue_attrs - A struct for workqueue attributes. | |
d55262c4 | 143 | * |
42412c3a | 144 | * This can be used to change attributes of an unbound workqueue. |
7a4e344c TH |
145 | */ |
146 | struct workqueue_attrs { | |
42412c3a SF |
147 | /** |
148 | * @nice: nice level | |
149 | */ | |
150 | int nice; | |
151 | ||
152 | /** | |
153 | * @cpumask: allowed CPUs | |
9546b29e TH |
154 | * |
155 | * Work items in this workqueue are affine to these CPUs and not allowed | |
156 | * to execute on other CPUs. A pool serving a workqueue must have the | |
157 | * same @cpumask. | |
42412c3a SF |
158 | */ |
159 | cpumask_var_t cpumask; | |
160 | ||
9546b29e TH |
161 | /** |
162 | * @__pod_cpumask: internal attribute used to create per-pod pools | |
163 | * | |
164 | * Internal use only. | |
165 | * | |
166 | * Per-pod unbound worker pools are used to improve locality. Always a | |
167 | * subset of ->cpumask. A workqueue can be associated with multiple | |
168 | * worker pools with disjoint @__pod_cpumask's. Whether the enforcement | |
169 | * of a pool's @__pod_cpumask is strict depends on @affn_strict. | |
170 | */ | |
171 | cpumask_var_t __pod_cpumask; | |
172 | ||
8639eceb TH |
173 | /** |
174 | * @affn_strict: affinity scope is strict | |
175 | * | |
176 | * If clear, workqueue will make a best-effort attempt at starting the | |
177 | * worker inside @__pod_cpumask but the scheduler is free to migrate it | |
178 | * outside. | |
179 | * | |
180 | * If set, workers are only allowed to run inside @__pod_cpumask. | |
181 | */ | |
182 | bool affn_strict; | |
183 | ||
84193c07 TH |
184 | /* |
185 | * Below fields aren't properties of a worker_pool. They only modify how | |
186 | * :c:func:`apply_workqueue_attrs` select pools and thus don't | |
187 | * participate in pool hash calculations or equality comparisons. | |
ae1296a7 LJ |
188 | * |
189 | * If @affn_strict is set, @cpumask isn't a property of a worker_pool | |
190 | * either. | |
84193c07 TH |
191 | */ |
192 | ||
42412c3a | 193 | /** |
84193c07 | 194 | * @affn_scope: unbound CPU affinity scope |
42412c3a | 195 | * |
84193c07 TH |
196 | * CPU pods are used to improve execution locality of unbound work |
197 | * items. There are multiple pod types, one for each wq_affn_scope, and | |
198 | * every CPU in the system belongs to one pod in every pod type. CPUs | |
199 | * that belong to the same pod share the worker pool. For example, | |
200 | * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker | |
201 | * pool for each NUMA node. | |
202 | */ | |
203 | enum wq_affn_scope affn_scope; | |
204 | ||
205 | /** | |
206 | * @ordered: work items must be executed one by one in queueing order | |
42412c3a | 207 | */ |
af73f5c9 | 208 | bool ordered; |
7a4e344c TH |
209 | }; |
210 | ||
bf6aede7 JD |
211 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
212 | { | |
213 | return container_of(work, struct delayed_work, work); | |
214 | } | |
215 | ||
05f0fe6b TH |
216 | static inline struct rcu_work *to_rcu_work(struct work_struct *work) |
217 | { | |
218 | return container_of(work, struct rcu_work, work); | |
219 | } | |
220 | ||
1fa44eca JB |
221 | struct execute_work { |
222 | struct work_struct work; | |
223 | }; | |
224 | ||
4e6045f1 JB |
225 | #ifdef CONFIG_LOCKDEP |
226 | /* | |
227 | * NB: because we have to copy the lockdep_map, setting _key | |
228 | * here is required, otherwise it could get initialised to the | |
229 | * copy of the lockdep_map! | |
230 | */ | |
231 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | |
232 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | |
233 | #else | |
234 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | |
235 | #endif | |
236 | ||
ee64e7f6 TH |
237 | #define __WORK_INITIALIZER(n, f) { \ |
238 | .data = WORK_DATA_STATIC_INIT(), \ | |
239 | .entry = { &(n).entry, &(n).entry }, \ | |
240 | .func = (f), \ | |
241 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | |
65f27f38 DH |
242 | } |
243 | ||
f991b318 | 244 | #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ |
ee64e7f6 | 245 | .work = __WORK_INITIALIZER((n).work, (f)), \ |
841b86f3 | 246 | .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\ |
e0aecdd8 | 247 | (tflags) | TIMER_IRQSAFE), \ |
dd6414b5 PC |
248 | } |
249 | ||
ee64e7f6 | 250 | #define DECLARE_WORK(n, f) \ |
65f27f38 DH |
251 | struct work_struct n = __WORK_INITIALIZER(n, f) |
252 | ||
ee64e7f6 | 253 | #define DECLARE_DELAYED_WORK(n, f) \ |
f991b318 | 254 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) |
65f27f38 | 255 | |
203b42f7 | 256 | #define DECLARE_DEFERRABLE_WORK(n, f) \ |
f991b318 | 257 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE) |
dd6414b5 | 258 | |
dc186ad7 TG |
259 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
260 | extern void __init_work(struct work_struct *work, int onstack); | |
261 | extern void destroy_work_on_stack(struct work_struct *work); | |
ea2e64f2 | 262 | extern void destroy_delayed_work_on_stack(struct delayed_work *work); |
4690c4ab TH |
263 | static inline unsigned int work_static(struct work_struct *work) |
264 | { | |
22df02bb | 265 | return *work_data_bits(work) & WORK_STRUCT_STATIC; |
4690c4ab | 266 | } |
dc186ad7 TG |
267 | #else |
268 | static inline void __init_work(struct work_struct *work, int onstack) { } | |
269 | static inline void destroy_work_on_stack(struct work_struct *work) { } | |
ea2e64f2 | 270 | static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } |
4690c4ab | 271 | static inline unsigned int work_static(struct work_struct *work) { return 0; } |
dc186ad7 TG |
272 | #endif |
273 | ||
1da177e4 | 274 | /* |
52bad64d | 275 | * initialize all of a work item in one go |
a08727ba | 276 | * |
b9049df5 | 277 | * NOTE! No point in using "atomic_long_set()": using a direct |
a08727ba LT |
278 | * assignment of the work data initializer allows the compiler |
279 | * to generate better code. | |
1da177e4 | 280 | */ |
4e6045f1 | 281 | #ifdef CONFIG_LOCKDEP |
265f3ed0 | 282 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ |
65f27f38 | 283 | do { \ |
dc186ad7 | 284 | __init_work((_work), _onstack); \ |
23b2e599 | 285 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
265f3ed0 | 286 | lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \ |
65f27f38 | 287 | INIT_LIST_HEAD(&(_work)->entry); \ |
f073f922 | 288 | (_work)->func = (_func); \ |
65f27f38 | 289 | } while (0) |
4e6045f1 | 290 | #else |
265f3ed0 | 291 | #define __INIT_WORK_KEY(_work, _func, _onstack, _key) \ |
4e6045f1 | 292 | do { \ |
dc186ad7 | 293 | __init_work((_work), _onstack); \ |
4e6045f1 JB |
294 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
295 | INIT_LIST_HEAD(&(_work)->entry); \ | |
f073f922 | 296 | (_work)->func = (_func); \ |
4e6045f1 JB |
297 | } while (0) |
298 | #endif | |
65f27f38 | 299 | |
265f3ed0 FW |
300 | #define __INIT_WORK(_work, _func, _onstack) \ |
301 | do { \ | |
302 | static __maybe_unused struct lock_class_key __key; \ | |
303 | \ | |
304 | __INIT_WORK_KEY(_work, _func, _onstack, &__key); \ | |
305 | } while (0) | |
306 | ||
ee64e7f6 | 307 | #define INIT_WORK(_work, _func) \ |
9da7dae9 | 308 | __INIT_WORK((_work), (_func), 0) |
dc186ad7 | 309 | |
ee64e7f6 | 310 | #define INIT_WORK_ONSTACK(_work, _func) \ |
9da7dae9 | 311 | __INIT_WORK((_work), (_func), 1) |
dc186ad7 | 312 | |
265f3ed0 FW |
313 | #define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \ |
314 | __INIT_WORK_KEY((_work), (_func), 1, _key) | |
315 | ||
f991b318 | 316 | #define __INIT_DELAYED_WORK(_work, _func, _tflags) \ |
ee64e7f6 TH |
317 | do { \ |
318 | INIT_WORK(&(_work)->work, (_func)); \ | |
919b250f KC |
319 | __init_timer(&(_work)->timer, \ |
320 | delayed_work_timer_fn, \ | |
321 | (_tflags) | TIMER_IRQSAFE); \ | |
52bad64d DH |
322 | } while (0) |
323 | ||
f991b318 | 324 | #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \ |
ee64e7f6 TH |
325 | do { \ |
326 | INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ | |
919b250f KC |
327 | __init_timer_on_stack(&(_work)->timer, \ |
328 | delayed_work_timer_fn, \ | |
329 | (_tflags) | TIMER_IRQSAFE); \ | |
6d612b0f PZ |
330 | } while (0) |
331 | ||
f991b318 TH |
332 | #define INIT_DELAYED_WORK(_work, _func) \ |
333 | __INIT_DELAYED_WORK(_work, _func, 0) | |
334 | ||
335 | #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ | |
336 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0) | |
337 | ||
203b42f7 | 338 | #define INIT_DEFERRABLE_WORK(_work, _func) \ |
f991b318 TH |
339 | __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE) |
340 | ||
341 | #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \ | |
342 | __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE) | |
28287033 | 343 | |
05f0fe6b TH |
344 | #define INIT_RCU_WORK(_work, _func) \ |
345 | INIT_WORK(&(_work)->work, (_func)) | |
346 | ||
347 | #define INIT_RCU_WORK_ONSTACK(_work, _func) \ | |
348 | INIT_WORK_ONSTACK(&(_work)->work, (_func)) | |
349 | ||
365970a1 DH |
350 | /** |
351 | * work_pending - Find out whether a work item is currently pending | |
352 | * @work: The work item in question | |
353 | */ | |
354 | #define work_pending(work) \ | |
22df02bb | 355 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
365970a1 DH |
356 | |
357 | /** | |
358 | * delayed_work_pending - Find out whether a delayable work item is currently | |
359 | * pending | |
355c0663 | 360 | * @w: The work item in question |
365970a1 | 361 | */ |
0221872a LT |
362 | #define delayed_work_pending(w) \ |
363 | work_pending(&(w)->work) | |
365970a1 | 364 | |
c54fce6e TH |
365 | /* |
366 | * Workqueue flags and constants. For details, please refer to | |
42412c3a | 367 | * Documentation/core-api/workqueue.rst. |
c54fce6e | 368 | */ |
e563d0a7 | 369 | enum wq_flags { |
4cb1ef64 | 370 | WQ_BH = 1 << 0, /* execute in bottom half (softirq) context */ |
c7fc77f7 | 371 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
58a69cb4 | 372 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
6370a6ad | 373 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
649027d7 | 374 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
41f50094 | 375 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ |
93e86295 | 376 | WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */ |
b71ab8c2 | 377 | |
cee22a15 VK |
378 | /* |
379 | * Per-cpu workqueues are generally preferred because they tend to | |
380 | * show better performance thanks to cache locality. Per-cpu | |
381 | * workqueues exclude the scheduler from choosing the CPU to | |
382 | * execute the worker threads, which has an unfortunate side effect | |
383 | * of increasing power consumption. | |
384 | * | |
385 | * The scheduler considers a CPU idle if it doesn't have any task | |
386 | * to execute and tries to keep idle cores idle to conserve power; | |
387 | * however, for example, a per-cpu work item scheduled from an | |
388 | * interrupt handler on an idle CPU will force the scheduler to | |
67dc8325 | 389 | * execute the work item on that CPU breaking the idleness, which in |
cee22a15 VK |
390 | * turn may lead to more scheduling choices which are sub-optimal |
391 | * in terms of power consumption. | |
392 | * | |
393 | * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default | |
394 | * but become unbound if workqueue.power_efficient kernel param is | |
395 | * specified. Per-cpu workqueues which are identified to | |
396 | * contribute significantly to power-consumption are identified and | |
397 | * marked with this flag and enabling the power_efficient mode | |
398 | * leads to noticeable power saving at the cost of small | |
399 | * performance disadvantage. | |
400 | * | |
401 | * http://thread.gmane.org/gmane.linux.kernel/1480396 | |
402 | */ | |
403 | WQ_POWER_EFFICIENT = 1 << 7, | |
404 | ||
33e3f0a3 | 405 | __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ |
618b01eb | 406 | __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ |
8719dcea | 407 | __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ |
23d11a58 | 408 | __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ |
4cb1ef64 TH |
409 | |
410 | /* BH wq only allows the following flags */ | |
411 | __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI, | |
e563d0a7 | 412 | }; |
e41e704b | 413 | |
e563d0a7 | 414 | enum wq_consts { |
58143465 | 415 | WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */ |
636b927e | 416 | WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, |
b71ab8c2 | 417 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
5797b1c1 TH |
418 | |
419 | /* | |
420 | * Per-node default cap on min_active. Unless explicitly set, min_active | |
421 | * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see | |
422 | * workqueue_struct->min_active definition. | |
423 | */ | |
424 | WQ_DFL_MIN_ACTIVE = 8, | |
97e37d7b | 425 | }; |
52bad64d | 426 | |
d320c038 TH |
427 | /* |
428 | * System-wide workqueues which are always present. | |
429 | * | |
430 | * system_wq is the one used by schedule[_delayed]_work[_on](). | |
431 | * Multi-CPU multi-threaded. There are users which expect relatively | |
432 | * short queue flush time. Don't queue works which can run for too | |
433 | * long. | |
434 | * | |
73e43544 LJ |
435 | * system_highpri_wq is similar to system_wq but for work items which |
436 | * require WQ_HIGHPRI. | |
437 | * | |
d320c038 TH |
438 | * system_long_wq is similar to system_wq but may host long running |
439 | * works. Queue flushing might take relatively long. | |
440 | * | |
f3421797 TH |
441 | * system_unbound_wq is unbound workqueue. Workers are not bound to |
442 | * any specific CPU, not concurrency managed, and all queued works are | |
443 | * executed immediately as long as max_active limit is not reached and | |
444 | * resources are available. | |
4149efb2 | 445 | * |
24d51add TH |
446 | * system_freezable_wq is equivalent to system_wq except that it's |
447 | * freezable. | |
0668106c VK |
448 | * |
449 | * *_power_efficient_wq are inclined towards saving power and converted | |
450 | * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, | |
451 | * they are same as their non-power-efficient counterparts - e.g. | |
452 | * system_power_efficient_wq is identical to system_wq if | |
453 | * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. | |
4cb1ef64 TH |
454 | * |
455 | * system_bh[_highpri]_wq are convenience interface to softirq. BH work items | |
456 | * are executed in the queueing CPU's BH context in the queueing order. | |
d320c038 TH |
457 | */ |
458 | extern struct workqueue_struct *system_wq; | |
73e43544 | 459 | extern struct workqueue_struct *system_highpri_wq; |
d320c038 | 460 | extern struct workqueue_struct *system_long_wq; |
f3421797 | 461 | extern struct workqueue_struct *system_unbound_wq; |
24d51add | 462 | extern struct workqueue_struct *system_freezable_wq; |
0668106c VK |
463 | extern struct workqueue_struct *system_power_efficient_wq; |
464 | extern struct workqueue_struct *system_freezable_power_efficient_wq; | |
4cb1ef64 TH |
465 | extern struct workqueue_struct *system_bh_wq; |
466 | extern struct workqueue_struct *system_bh_highpri_wq; | |
467 | ||
468 | void workqueue_softirq_action(bool highpri); | |
1acd92d9 | 469 | void workqueue_softirq_dead(unsigned int cpu); |
ae930e0f | 470 | |
b196be89 TH |
471 | /** |
472 | * alloc_workqueue - allocate a workqueue | |
473 | * @fmt: printf format for the name of the workqueue | |
474 | * @flags: WQ_* flags | |
5797b1c1 | 475 | * @max_active: max in-flight work items, 0 for default |
51da7f68 | 476 | * @...: args for @fmt |
b196be89 | 477 | * |
5797b1c1 TH |
478 | * For a per-cpu workqueue, @max_active limits the number of in-flight work |
479 | * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be | |
480 | * executing at most one work item for the workqueue. | |
481 | * | |
482 | * For unbound workqueues, @max_active limits the number of in-flight work items | |
483 | * for the whole system. e.g. @max_active of 16 indicates that that there can be | |
484 | * at most 16 work items executing for the workqueue in the whole system. | |
485 | * | |
486 | * As sharing the same active counter for an unbound workqueue across multiple | |
487 | * NUMA nodes can be expensive, @max_active is distributed to each NUMA node | |
488 | * according to the proportion of the number of online CPUs and enforced | |
489 | * independently. | |
490 | * | |
491 | * Depending on online CPU distribution, a node may end up with per-node | |
492 | * max_active which is significantly lower than @max_active, which can lead to | |
493 | * deadlocks if the per-node concurrency limit is lower than the maximum number | |
494 | * of interdependent work items for the workqueue. | |
495 | * | |
496 | * To guarantee forward progress regardless of online CPU distribution, the | |
497 | * concurrency limit on every node is guaranteed to be equal to or greater than | |
498 | * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means | |
499 | * that the sum of per-node max_active's may be larger than @max_active. | |
500 | * | |
501 | * For detailed information on %WQ_* flags, please refer to | |
42412c3a | 502 | * Documentation/core-api/workqueue.rst. |
b196be89 | 503 | * |
b196be89 TH |
504 | * RETURNS: |
505 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
506 | */ | |
80f0a1f9 REB |
507 | __printf(1, 4) struct workqueue_struct * |
508 | alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); | |
4e6045f1 | 509 | |
ec0a7d44 MB |
510 | #ifdef CONFIG_LOCKDEP |
511 | /** | |
512 | * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map | |
513 | * @fmt: printf format for the name of the workqueue | |
514 | * @flags: WQ_* flags | |
515 | * @max_active: max in-flight work items, 0 for default | |
516 | * @lockdep_map: user-defined lockdep_map | |
517 | * @...: args for @fmt | |
518 | * | |
519 | * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for | |
520 | * workqueues created with the same purpose and to avoid leaking a lockdep_map | |
521 | * on each workqueue creation. | |
522 | * | |
523 | * RETURNS: | |
524 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
525 | */ | |
526 | __printf(1, 5) struct workqueue_struct * | |
527 | alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, | |
528 | struct lockdep_map *lockdep_map, ...); | |
529 | ||
530 | /** | |
531 | * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with | |
532 | * user-defined lockdep_map | |
533 | * | |
534 | * @fmt: printf format for the name of the workqueue | |
535 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) | |
536 | * @lockdep_map: user-defined lockdep_map | |
d156263e | 537 | * @args: args for @fmt |
ec0a7d44 MB |
538 | * |
539 | * Same as alloc_ordered_workqueue but with the a user-define lockdep_map. | |
540 | * Useful for workqueues created with the same purpose and to avoid leaking a | |
541 | * lockdep_map on each workqueue creation. | |
542 | * | |
543 | * RETURNS: | |
544 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
545 | */ | |
9b59a85a | 546 | #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ |
d156263e TH |
547 | alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ |
548 | 1, lockdep_map, ##args) | |
ec0a7d44 MB |
549 | #endif |
550 | ||
81dcaf65 TH |
551 | /** |
552 | * alloc_ordered_workqueue - allocate an ordered workqueue | |
b196be89 | 553 | * @fmt: printf format for the name of the workqueue |
58a69cb4 | 554 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
8bee9dd9 | 555 | * @args: args for @fmt |
81dcaf65 TH |
556 | * |
557 | * Allocate an ordered workqueue. An ordered workqueue executes at | |
558 | * most one work item at any given time in the queued order. They are | |
559 | * implemented as unbound workqueues with @max_active of one. | |
560 | * | |
561 | * RETURNS: | |
562 | * Pointer to the allocated workqueue on success, %NULL on failure. | |
563 | */ | |
ee64e7f6 | 564 | #define alloc_ordered_workqueue(fmt, flags, args...) \ |
3bc1e711 | 565 | alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) |
81dcaf65 | 566 | |
ee64e7f6 | 567 | #define create_workqueue(name) \ |
23d11a58 | 568 | alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) |
ee64e7f6 | 569 | #define create_freezable_workqueue(name) \ |
23d11a58 TH |
570 | alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ |
571 | WQ_MEM_RECLAIM, 1, (name)) | |
ee64e7f6 | 572 | #define create_singlethread_workqueue(name) \ |
23d11a58 | 573 | alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) |
1da177e4 | 574 | |
60b2ebf4 AP |
575 | #define from_work(var, callback_work, work_fieldname) \ |
576 | container_of(callback_work, typeof(*var), work_fieldname) | |
577 | ||
1da177e4 LT |
578 | extern void destroy_workqueue(struct workqueue_struct *wq); |
579 | ||
513c98d0 DJ |
580 | struct workqueue_attrs *alloc_workqueue_attrs(void); |
581 | void free_workqueue_attrs(struct workqueue_attrs *attrs); | |
582 | int apply_workqueue_attrs(struct workqueue_struct *wq, | |
583 | const struct workqueue_attrs *attrs); | |
fe28f631 | 584 | extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask); |
7a4e344c | 585 | |
d4283e93 | 586 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
c1a220e7 | 587 | struct work_struct *work); |
8204e0c1 AD |
588 | extern bool queue_work_node(int node, struct workqueue_struct *wq, |
589 | struct work_struct *work); | |
d4283e93 | 590 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
28e53bdd | 591 | struct delayed_work *work, unsigned long delay); |
8376fe22 TH |
592 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
593 | struct delayed_work *dwork, unsigned long delay); | |
05f0fe6b | 594 | extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); |
28e53bdd | 595 | |
c4f135d6 | 596 | extern void __flush_workqueue(struct workqueue_struct *wq); |
9c5a2ba7 | 597 | extern void drain_workqueue(struct workqueue_struct *wq); |
1da177e4 | 598 | |
65f27f38 | 599 | extern int schedule_on_each_cpu(work_func_t func); |
1da177e4 | 600 | |
65f27f38 | 601 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
1da177e4 | 602 | |
401a8d04 | 603 | extern bool flush_work(struct work_struct *work); |
73b4b532 | 604 | extern bool cancel_work(struct work_struct *work); |
401a8d04 TH |
605 | extern bool cancel_work_sync(struct work_struct *work); |
606 | ||
607 | extern bool flush_delayed_work(struct delayed_work *dwork); | |
57b30ae7 | 608 | extern bool cancel_delayed_work(struct delayed_work *dwork); |
401a8d04 | 609 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); |
28e53bdd | 610 | |
86898fa6 TH |
611 | extern bool disable_work(struct work_struct *work); |
612 | extern bool disable_work_sync(struct work_struct *work); | |
613 | extern bool enable_work(struct work_struct *work); | |
614 | ||
615 | extern bool disable_delayed_work(struct delayed_work *dwork); | |
616 | extern bool disable_delayed_work_sync(struct delayed_work *dwork); | |
617 | extern bool enable_delayed_work(struct delayed_work *dwork); | |
618 | ||
05f0fe6b TH |
619 | extern bool flush_rcu_work(struct rcu_work *rwork); |
620 | ||
dcd989cb TH |
621 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
622 | int max_active); | |
8f172181 TH |
623 | extern void workqueue_set_min_active(struct workqueue_struct *wq, |
624 | int min_active); | |
27d4ee03 | 625 | extern struct work_struct *current_work(void); |
e6267616 | 626 | extern bool current_is_workqueue_rescuer(void); |
d84ff051 | 627 | extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); |
dcd989cb | 628 | extern unsigned int work_busy(struct work_struct *work); |
3d1cb205 TH |
629 | extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); |
630 | extern void print_worker_info(const char *log_lvl, struct task_struct *task); | |
55df0933 | 631 | extern void show_all_workqueues(void); |
704bc669 | 632 | extern void show_freezable_workqueues(void); |
55df0933 | 633 | extern void show_one_workqueue(struct workqueue_struct *wq); |
6b59808b | 634 | extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); |
dcd989cb | 635 | |
8425e3d5 TH |
636 | /** |
637 | * queue_work - queue work on a workqueue | |
638 | * @wq: workqueue to use | |
639 | * @work: work to queue | |
640 | * | |
641 | * Returns %false if @work was already on a queue, %true otherwise. | |
642 | * | |
643 | * We queue the work to the CPU on which it was submitted, but if the CPU dies | |
644 | * it can be processed by another CPU. | |
dbb92f88 AP |
645 | * |
646 | * Memory-ordering properties: If it returns %true, guarantees that all stores | |
647 | * preceding the call to queue_work() in the program order will be visible from | |
648 | * the CPU which will execute @work by the time such work executes, e.g., | |
649 | * | |
650 | * { x is initially 0 } | |
651 | * | |
652 | * CPU0 CPU1 | |
653 | * | |
654 | * WRITE_ONCE(x, 1); [ @work is being executed ] | |
655 | * r0 = queue_work(wq, work); r1 = READ_ONCE(x); | |
656 | * | |
657 | * Forbids: r0 == true && r1 == 0 | |
8425e3d5 TH |
658 | */ |
659 | static inline bool queue_work(struct workqueue_struct *wq, | |
660 | struct work_struct *work) | |
661 | { | |
662 | return queue_work_on(WORK_CPU_UNBOUND, wq, work); | |
663 | } | |
664 | ||
665 | /** | |
666 | * queue_delayed_work - queue work on a workqueue after delay | |
667 | * @wq: workqueue to use | |
668 | * @dwork: delayable work to queue | |
669 | * @delay: number of jiffies to wait before queueing | |
670 | * | |
671 | * Equivalent to queue_delayed_work_on() but tries to use the local CPU. | |
672 | */ | |
673 | static inline bool queue_delayed_work(struct workqueue_struct *wq, | |
674 | struct delayed_work *dwork, | |
675 | unsigned long delay) | |
676 | { | |
677 | return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
678 | } | |
679 | ||
680 | /** | |
681 | * mod_delayed_work - modify delay of or queue a delayed work | |
682 | * @wq: workqueue to use | |
683 | * @dwork: work to queue | |
684 | * @delay: number of jiffies to wait before queueing | |
685 | * | |
686 | * mod_delayed_work_on() on local CPU. | |
687 | */ | |
688 | static inline bool mod_delayed_work(struct workqueue_struct *wq, | |
689 | struct delayed_work *dwork, | |
690 | unsigned long delay) | |
691 | { | |
692 | return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); | |
693 | } | |
694 | ||
695 | /** | |
696 | * schedule_work_on - put work task on a specific cpu | |
697 | * @cpu: cpu to put the work task on | |
698 | * @work: job to be done | |
699 | * | |
700 | * This puts a job on a specific cpu | |
701 | */ | |
702 | static inline bool schedule_work_on(int cpu, struct work_struct *work) | |
703 | { | |
704 | return queue_work_on(cpu, system_wq, work); | |
705 | } | |
706 | ||
707 | /** | |
708 | * schedule_work - put work task in global workqueue | |
709 | * @work: job to be done | |
710 | * | |
711 | * Returns %false if @work was already on the kernel-global workqueue and | |
712 | * %true otherwise. | |
713 | * | |
714 | * This puts a job in the kernel-global workqueue if it was not already | |
715 | * queued and leaves it in the same position on the kernel-global | |
716 | * workqueue otherwise. | |
dbb92f88 AP |
717 | * |
718 | * Shares the same memory-ordering properties of queue_work(), cf. the | |
719 | * DocBook header of queue_work(). | |
8425e3d5 TH |
720 | */ |
721 | static inline bool schedule_work(struct work_struct *work) | |
722 | { | |
723 | return queue_work(system_wq, work); | |
724 | } | |
725 | ||
474a549f AP |
726 | /** |
727 | * enable_and_queue_work - Enable and queue a work item on a specific workqueue | |
728 | * @wq: The target workqueue | |
729 | * @work: The work item to be enabled and queued | |
730 | * | |
731 | * This function combines the operations of enable_work() and queue_work(), | |
732 | * providing a convenient way to enable and queue a work item in a single call. | |
733 | * It invokes enable_work() on @work and then queues it if the disable depth | |
734 | * reached 0. Returns %true if the disable depth reached 0 and @work is queued, | |
735 | * and %false otherwise. | |
736 | * | |
737 | * Note that @work is always queued when disable depth reaches zero. If the | |
738 | * desired behavior is queueing only if certain events took place while @work is | |
739 | * disabled, the user should implement the necessary state tracking and perform | |
740 | * explicit conditional queueing after enable_work(). | |
741 | */ | |
742 | static inline bool enable_and_queue_work(struct workqueue_struct *wq, | |
743 | struct work_struct *work) | |
744 | { | |
745 | if (enable_work(work)) { | |
746 | queue_work(wq, work); | |
747 | return true; | |
748 | } | |
749 | return false; | |
750 | } | |
751 | ||
c4f135d6 TH |
752 | /* |
753 | * Detect attempt to flush system-wide workqueues at compile time when possible. | |
20bdedaf | 754 | * Warn attempt to flush system-wide workqueues at runtime. |
c4f135d6 TH |
755 | * |
756 | * See https://lkml.kernel.org/r/[email protected] | |
757 | * for reasons and steps for converting system-wide workqueues into local workqueues. | |
758 | */ | |
759 | extern void __warn_flushing_systemwide_wq(void) | |
760 | __compiletime_warning("Please avoid flushing system-wide workqueues."); | |
761 | ||
20bdedaf | 762 | /* Please stop using this function, for this function will be removed in near future. */ |
c4f135d6 TH |
763 | #define flush_scheduled_work() \ |
764 | ({ \ | |
20bdedaf | 765 | __warn_flushing_systemwide_wq(); \ |
c4f135d6 TH |
766 | __flush_workqueue(system_wq); \ |
767 | }) | |
768 | ||
c4f135d6 TH |
769 | #define flush_workqueue(wq) \ |
770 | ({ \ | |
771 | struct workqueue_struct *_wq = (wq); \ | |
772 | \ | |
773 | if ((__builtin_constant_p(_wq == system_wq) && \ | |
774 | _wq == system_wq) || \ | |
775 | (__builtin_constant_p(_wq == system_highpri_wq) && \ | |
776 | _wq == system_highpri_wq) || \ | |
777 | (__builtin_constant_p(_wq == system_long_wq) && \ | |
778 | _wq == system_long_wq) || \ | |
779 | (__builtin_constant_p(_wq == system_unbound_wq) && \ | |
780 | _wq == system_unbound_wq) || \ | |
781 | (__builtin_constant_p(_wq == system_freezable_wq) && \ | |
782 | _wq == system_freezable_wq) || \ | |
783 | (__builtin_constant_p(_wq == system_power_efficient_wq) && \ | |
784 | _wq == system_power_efficient_wq) || \ | |
785 | (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \ | |
786 | _wq == system_freezable_power_efficient_wq)) \ | |
787 | __warn_flushing_systemwide_wq(); \ | |
788 | __flush_workqueue(_wq); \ | |
789 | }) | |
37b1ef31 | 790 | |
8425e3d5 TH |
791 | /** |
792 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | |
793 | * @cpu: cpu to use | |
794 | * @dwork: job to be done | |
795 | * @delay: number of jiffies to wait | |
796 | * | |
797 | * After waiting for a given time this puts a job in the kernel-global | |
798 | * workqueue on the specified CPU. | |
799 | */ | |
800 | static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, | |
801 | unsigned long delay) | |
802 | { | |
803 | return queue_delayed_work_on(cpu, system_wq, dwork, delay); | |
804 | } | |
805 | ||
806 | /** | |
807 | * schedule_delayed_work - put work task in global workqueue after delay | |
808 | * @dwork: job to be done | |
809 | * @delay: number of jiffies to wait or 0 for immediate execution | |
810 | * | |
811 | * After waiting for a given time this puts a job in the kernel-global | |
812 | * workqueue. | |
813 | */ | |
814 | static inline bool schedule_delayed_work(struct delayed_work *dwork, | |
815 | unsigned long delay) | |
816 | { | |
817 | return queue_delayed_work(system_wq, dwork, delay); | |
818 | } | |
819 | ||
2d3854a3 | 820 | #ifndef CONFIG_SMP |
d84ff051 | 821 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
2d3854a3 RR |
822 | { |
823 | return fn(arg); | |
824 | } | |
0e8d6a93 TG |
825 | static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) |
826 | { | |
827 | return fn(arg); | |
828 | } | |
2d3854a3 | 829 | #else |
265f3ed0 FW |
830 | long work_on_cpu_key(int cpu, long (*fn)(void *), |
831 | void *arg, struct lock_class_key *key); | |
832 | /* | |
833 | * A new key is defined for each caller to make sure the work | |
834 | * associated with the function doesn't share its locking class. | |
835 | */ | |
836 | #define work_on_cpu(_cpu, _fn, _arg) \ | |
837 | ({ \ | |
838 | static struct lock_class_key __key; \ | |
839 | \ | |
840 | work_on_cpu_key(_cpu, _fn, _arg, &__key); \ | |
841 | }) | |
842 | ||
843 | long work_on_cpu_safe_key(int cpu, long (*fn)(void *), | |
844 | void *arg, struct lock_class_key *key); | |
845 | ||
846 | /* | |
847 | * A new key is defined for each caller to make sure the work | |
848 | * associated with the function doesn't share its locking class. | |
849 | */ | |
850 | #define work_on_cpu_safe(_cpu, _fn, _arg) \ | |
851 | ({ \ | |
852 | static struct lock_class_key __key; \ | |
853 | \ | |
854 | work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ | |
855 | }) | |
2d3854a3 | 856 | #endif /* CONFIG_SMP */ |
a25909a4 | 857 | |
a0a1a5fd TH |
858 | #ifdef CONFIG_FREEZER |
859 | extern void freeze_workqueues_begin(void); | |
860 | extern bool freeze_workqueues_busy(void); | |
861 | extern void thaw_workqueues(void); | |
862 | #endif /* CONFIG_FREEZER */ | |
863 | ||
226223ab TH |
864 | #ifdef CONFIG_SYSFS |
865 | int workqueue_sysfs_register(struct workqueue_struct *wq); | |
866 | #else /* CONFIG_SYSFS */ | |
867 | static inline int workqueue_sysfs_register(struct workqueue_struct *wq) | |
868 | { return 0; } | |
869 | #endif /* CONFIG_SYSFS */ | |
870 | ||
82607adc TH |
871 | #ifdef CONFIG_WQ_WATCHDOG |
872 | void wq_watchdog_touch(int cpu); | |
873 | #else /* CONFIG_WQ_WATCHDOG */ | |
874 | static inline void wq_watchdog_touch(int cpu) { } | |
875 | #endif /* CONFIG_WQ_WATCHDOG */ | |
876 | ||
7ee681b2 TG |
877 | #ifdef CONFIG_SMP |
878 | int workqueue_prepare_cpu(unsigned int cpu); | |
879 | int workqueue_online_cpu(unsigned int cpu); | |
880 | int workqueue_offline_cpu(unsigned int cpu); | |
881 | #endif | |
882 | ||
2333e829 YC |
883 | void __init workqueue_init_early(void); |
884 | void __init workqueue_init(void); | |
2930155b | 885 | void __init workqueue_init_topology(void); |
3347fa09 | 886 | |
1da177e4 | 887 | #endif |