]>
Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8b712842 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
08a9ff32 | 4 | * Copyright (C) 2014 Fujitsu. All rights reserved. |
8b712842 CM |
5 | */ |
6 | ||
7 | #include <linux/kthread.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
8b712842 CM |
9 | #include <linux/list.h> |
10 | #include <linux/spinlock.h> | |
b51912c9 | 11 | #include <linux/freezer.h> |
8b712842 | 12 | #include "async-thread.h" |
52483bc2 | 13 | #include "ctree.h" |
8b712842 | 14 | |
a046e9c8 QW |
15 | #define WORK_DONE_BIT 0 |
16 | #define WORK_ORDER_DONE_BIT 1 | |
17 | #define WORK_HIGH_PRIO_BIT 2 | |
4a69a410 | 18 | |
0bd9289c QW |
19 | #define NO_THRESHOLD (-1) |
20 | #define DFT_THRESHOLD (32) | |
21 | ||
d458b054 | 22 | struct __btrfs_workqueue { |
08a9ff32 | 23 | struct workqueue_struct *normal_wq; |
cb001095 JM |
24 | |
25 | /* File system this workqueue services */ | |
26 | struct btrfs_fs_info *fs_info; | |
27 | ||
08a9ff32 QW |
28 | /* List head pointing to ordered work list */ |
29 | struct list_head ordered_list; | |
30 | ||
31 | /* Spinlock for ordered_list */ | |
32 | spinlock_t list_lock; | |
0bd9289c QW |
33 | |
34 | /* Thresholding related variants */ | |
35 | atomic_t pending; | |
c6dd6ea5 QW |
36 | |
37 | /* Up limit of concurrency workers */ | |
38 | int limit_active; | |
39 | ||
40 | /* Current number of concurrency workers */ | |
41 | int current_active; | |
42 | ||
43 | /* Threshold to change current_active */ | |
0bd9289c QW |
44 | int thresh; |
45 | unsigned int count; | |
46 | spinlock_t thres_lock; | |
08a9ff32 QW |
47 | }; |
48 | ||
d458b054 QW |
49 | struct btrfs_workqueue { |
50 | struct __btrfs_workqueue *normal; | |
51 | struct __btrfs_workqueue *high; | |
1ca08976 QW |
52 | }; |
53 | ||
9e0af237 LB |
54 | static void normal_work_helper(struct btrfs_work *work); |
55 | ||
56 | #define BTRFS_WORK_HELPER(name) \ | |
6939f667 | 57 | noinline_for_stack void btrfs_##name(struct work_struct *arg) \ |
9e0af237 LB |
58 | { \ |
59 | struct btrfs_work *work = container_of(arg, struct btrfs_work, \ | |
60 | normal_work); \ | |
61 | normal_work_helper(work); \ | |
62 | } | |
63 | ||
cb001095 | 64 | struct btrfs_fs_info * |
9a35b637 | 65 | btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) |
cb001095 JM |
66 | { |
67 | return wq->fs_info; | |
68 | } | |
69 | ||
70 | struct btrfs_fs_info * | |
9a35b637 | 71 | btrfs_work_owner(const struct btrfs_work *work) |
cb001095 JM |
72 | { |
73 | return work->wq->fs_info; | |
74 | } | |
75 | ||
9a35b637 | 76 | bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) |
2939e1a8 MP |
77 | { |
78 | /* | |
79 | * We could compare wq->normal->pending with num_online_cpus() | |
80 | * to support "thresh == NO_THRESHOLD" case, but it requires | |
81 | * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's | |
82 | * postpone it until someone needs the support of that case. | |
83 | */ | |
84 | if (wq->normal->thresh == NO_THRESHOLD) | |
85 | return false; | |
86 | ||
87 | return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; | |
88 | } | |
89 | ||
9e0af237 LB |
90 | BTRFS_WORK_HELPER(worker_helper); |
91 | BTRFS_WORK_HELPER(delalloc_helper); | |
92 | BTRFS_WORK_HELPER(flush_delalloc_helper); | |
93 | BTRFS_WORK_HELPER(cache_helper); | |
94 | BTRFS_WORK_HELPER(submit_helper); | |
95 | BTRFS_WORK_HELPER(fixup_helper); | |
96 | BTRFS_WORK_HELPER(endio_helper); | |
97 | BTRFS_WORK_HELPER(endio_meta_helper); | |
98 | BTRFS_WORK_HELPER(endio_meta_write_helper); | |
99 | BTRFS_WORK_HELPER(endio_raid56_helper); | |
8b110e39 | 100 | BTRFS_WORK_HELPER(endio_repair_helper); |
9e0af237 LB |
101 | BTRFS_WORK_HELPER(rmw_helper); |
102 | BTRFS_WORK_HELPER(endio_write_helper); | |
103 | BTRFS_WORK_HELPER(freespace_write_helper); | |
104 | BTRFS_WORK_HELPER(delayed_meta_helper); | |
105 | BTRFS_WORK_HELPER(readahead_helper); | |
106 | BTRFS_WORK_HELPER(qgroup_rescan_helper); | |
107 | BTRFS_WORK_HELPER(extent_refs_helper); | |
108 | BTRFS_WORK_HELPER(scrub_helper); | |
109 | BTRFS_WORK_HELPER(scrubwrc_helper); | |
110 | BTRFS_WORK_HELPER(scrubnc_helper); | |
20b2e302 | 111 | BTRFS_WORK_HELPER(scrubparity_helper); |
9e0af237 LB |
112 | |
113 | static struct __btrfs_workqueue * | |
cb001095 JM |
114 | __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, |
115 | unsigned int flags, int limit_active, int thresh) | |
1ca08976 | 116 | { |
61dd5ae6 | 117 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
1ca08976 | 118 | |
5d99a998 | 119 | if (!ret) |
1ca08976 QW |
120 | return NULL; |
121 | ||
cb001095 | 122 | ret->fs_info = fs_info; |
c6dd6ea5 | 123 | ret->limit_active = limit_active; |
0bd9289c QW |
124 | atomic_set(&ret->pending, 0); |
125 | if (thresh == 0) | |
126 | thresh = DFT_THRESHOLD; | |
127 | /* For low threshold, disabling threshold is a better choice */ | |
128 | if (thresh < DFT_THRESHOLD) { | |
c6dd6ea5 | 129 | ret->current_active = limit_active; |
0bd9289c QW |
130 | ret->thresh = NO_THRESHOLD; |
131 | } else { | |
c6dd6ea5 QW |
132 | /* |
133 | * For threshold-able wq, let its concurrency grow on demand. | |
134 | * Use minimal max_active at alloc time to reduce resource | |
135 | * usage. | |
136 | */ | |
137 | ret->current_active = 1; | |
0bd9289c QW |
138 | ret->thresh = thresh; |
139 | } | |
140 | ||
1ca08976 | 141 | if (flags & WQ_HIGHPRI) |
ce3ded10 DS |
142 | ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, |
143 | ret->current_active, name); | |
1ca08976 | 144 | else |
ce3ded10 DS |
145 | ret->normal_wq = alloc_workqueue("btrfs-%s", flags, |
146 | ret->current_active, name); | |
5d99a998 | 147 | if (!ret->normal_wq) { |
1ca08976 QW |
148 | kfree(ret); |
149 | return NULL; | |
150 | } | |
151 | ||
152 | INIT_LIST_HEAD(&ret->ordered_list); | |
153 | spin_lock_init(&ret->list_lock); | |
0bd9289c | 154 | spin_lock_init(&ret->thres_lock); |
c3a46891 | 155 | trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); |
1ca08976 QW |
156 | return ret; |
157 | } | |
158 | ||
159 | static inline void | |
d458b054 | 160 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); |
1ca08976 | 161 | |
cb001095 JM |
162 | struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, |
163 | const char *name, | |
6f011058 | 164 | unsigned int flags, |
c6dd6ea5 | 165 | int limit_active, |
d458b054 | 166 | int thresh) |
08a9ff32 | 167 | { |
61dd5ae6 | 168 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
08a9ff32 | 169 | |
5d99a998 | 170 | if (!ret) |
08a9ff32 QW |
171 | return NULL; |
172 | ||
cb001095 JM |
173 | ret->normal = __btrfs_alloc_workqueue(fs_info, name, |
174 | flags & ~WQ_HIGHPRI, | |
c6dd6ea5 | 175 | limit_active, thresh); |
5d99a998 | 176 | if (!ret->normal) { |
08a9ff32 QW |
177 | kfree(ret); |
178 | return NULL; | |
179 | } | |
180 | ||
1ca08976 | 181 | if (flags & WQ_HIGHPRI) { |
cb001095 JM |
182 | ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, |
183 | limit_active, thresh); | |
5d99a998 | 184 | if (!ret->high) { |
1ca08976 QW |
185 | __btrfs_destroy_workqueue(ret->normal); |
186 | kfree(ret); | |
187 | return NULL; | |
188 | } | |
189 | } | |
08a9ff32 QW |
190 | return ret; |
191 | } | |
192 | ||
0bd9289c QW |
193 | /* |
194 | * Hook for threshold which will be called in btrfs_queue_work. | |
195 | * This hook WILL be called in IRQ handler context, | |
196 | * so workqueue_set_max_active MUST NOT be called in this hook | |
197 | */ | |
d458b054 | 198 | static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) |
0bd9289c QW |
199 | { |
200 | if (wq->thresh == NO_THRESHOLD) | |
201 | return; | |
202 | atomic_inc(&wq->pending); | |
203 | } | |
204 | ||
205 | /* | |
206 | * Hook for threshold which will be called before executing the work, | |
207 | * This hook is called in kthread content. | |
208 | * So workqueue_set_max_active is called here. | |
209 | */ | |
d458b054 | 210 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
0bd9289c | 211 | { |
c6dd6ea5 | 212 | int new_current_active; |
0bd9289c QW |
213 | long pending; |
214 | int need_change = 0; | |
215 | ||
216 | if (wq->thresh == NO_THRESHOLD) | |
217 | return; | |
218 | ||
219 | atomic_dec(&wq->pending); | |
220 | spin_lock(&wq->thres_lock); | |
221 | /* | |
222 | * Use wq->count to limit the calling frequency of | |
223 | * workqueue_set_max_active. | |
224 | */ | |
225 | wq->count++; | |
226 | wq->count %= (wq->thresh / 4); | |
227 | if (!wq->count) | |
228 | goto out; | |
c6dd6ea5 | 229 | new_current_active = wq->current_active; |
0bd9289c QW |
230 | |
231 | /* | |
232 | * pending may be changed later, but it's OK since we really | |
233 | * don't need it so accurate to calculate new_max_active. | |
234 | */ | |
235 | pending = atomic_read(&wq->pending); | |
236 | if (pending > wq->thresh) | |
c6dd6ea5 | 237 | new_current_active++; |
0bd9289c | 238 | if (pending < wq->thresh / 2) |
c6dd6ea5 QW |
239 | new_current_active--; |
240 | new_current_active = clamp_val(new_current_active, 1, wq->limit_active); | |
241 | if (new_current_active != wq->current_active) { | |
0bd9289c | 242 | need_change = 1; |
c6dd6ea5 | 243 | wq->current_active = new_current_active; |
0bd9289c QW |
244 | } |
245 | out: | |
246 | spin_unlock(&wq->thres_lock); | |
247 | ||
248 | if (need_change) { | |
c6dd6ea5 | 249 | workqueue_set_max_active(wq->normal_wq, wq->current_active); |
0bd9289c QW |
250 | } |
251 | } | |
252 | ||
d458b054 | 253 | static void run_ordered_work(struct __btrfs_workqueue *wq) |
08a9ff32 QW |
254 | { |
255 | struct list_head *list = &wq->ordered_list; | |
d458b054 | 256 | struct btrfs_work *work; |
08a9ff32 QW |
257 | spinlock_t *lock = &wq->list_lock; |
258 | unsigned long flags; | |
259 | ||
260 | while (1) { | |
ac0c7cf8 DS |
261 | void *wtag; |
262 | ||
08a9ff32 QW |
263 | spin_lock_irqsave(lock, flags); |
264 | if (list_empty(list)) | |
265 | break; | |
d458b054 | 266 | work = list_entry(list->next, struct btrfs_work, |
08a9ff32 QW |
267 | ordered_list); |
268 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | |
269 | break; | |
270 | ||
271 | /* | |
272 | * we are going to call the ordered done function, but | |
273 | * we leave the work item on the list as a barrier so | |
274 | * that later work items that are done don't have their | |
275 | * functions called before this one returns | |
276 | */ | |
277 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
278 | break; | |
52483bc2 | 279 | trace_btrfs_ordered_sched(work); |
08a9ff32 QW |
280 | spin_unlock_irqrestore(lock, flags); |
281 | work->ordered_func(work); | |
282 | ||
283 | /* now take the lock again and drop our item from the list */ | |
284 | spin_lock_irqsave(lock, flags); | |
285 | list_del(&work->ordered_list); | |
286 | spin_unlock_irqrestore(lock, flags); | |
287 | ||
288 | /* | |
ac0c7cf8 DS |
289 | * We don't want to call the ordered free functions with the |
290 | * lock held though. Save the work as tag for the trace event, | |
291 | * because the callback could free the structure. | |
08a9ff32 | 292 | */ |
ac0c7cf8 | 293 | wtag = work; |
08a9ff32 | 294 | work->ordered_free(work); |
ac0c7cf8 | 295 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
08a9ff32 QW |
296 | } |
297 | spin_unlock_irqrestore(lock, flags); | |
298 | } | |
299 | ||
9e0af237 | 300 | static void normal_work_helper(struct btrfs_work *work) |
08a9ff32 | 301 | { |
d458b054 | 302 | struct __btrfs_workqueue *wq; |
ac0c7cf8 | 303 | void *wtag; |
08a9ff32 QW |
304 | int need_order = 0; |
305 | ||
08a9ff32 QW |
306 | /* |
307 | * We should not touch things inside work in the following cases: | |
308 | * 1) after work->func() if it has no ordered_free | |
309 | * Since the struct is freed in work->func(). | |
310 | * 2) after setting WORK_DONE_BIT | |
311 | * The work may be freed in other threads almost instantly. | |
312 | * So we save the needed things here. | |
313 | */ | |
314 | if (work->ordered_func) | |
315 | need_order = 1; | |
316 | wq = work->wq; | |
ac0c7cf8 DS |
317 | /* Safe for tracepoints in case work gets freed by the callback */ |
318 | wtag = work; | |
08a9ff32 | 319 | |
52483bc2 | 320 | trace_btrfs_work_sched(work); |
0bd9289c | 321 | thresh_exec_hook(wq); |
08a9ff32 QW |
322 | work->func(work); |
323 | if (need_order) { | |
324 | set_bit(WORK_DONE_BIT, &work->flags); | |
325 | run_ordered_work(wq); | |
326 | } | |
52483bc2 | 327 | if (!need_order) |
ac0c7cf8 | 328 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
08a9ff32 QW |
329 | } |
330 | ||
9e0af237 | 331 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, |
6db8914f QW |
332 | btrfs_func_t func, |
333 | btrfs_func_t ordered_func, | |
334 | btrfs_func_t ordered_free) | |
08a9ff32 QW |
335 | { |
336 | work->func = func; | |
337 | work->ordered_func = ordered_func; | |
338 | work->ordered_free = ordered_free; | |
9e0af237 | 339 | INIT_WORK(&work->normal_work, uniq_func); |
08a9ff32 QW |
340 | INIT_LIST_HEAD(&work->ordered_list); |
341 | work->flags = 0; | |
342 | } | |
343 | ||
d458b054 QW |
344 | static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, |
345 | struct btrfs_work *work) | |
08a9ff32 QW |
346 | { |
347 | unsigned long flags; | |
348 | ||
349 | work->wq = wq; | |
0bd9289c | 350 | thresh_queue_hook(wq); |
08a9ff32 QW |
351 | if (work->ordered_func) { |
352 | spin_lock_irqsave(&wq->list_lock, flags); | |
353 | list_add_tail(&work->ordered_list, &wq->ordered_list); | |
354 | spin_unlock_irqrestore(&wq->list_lock, flags); | |
355 | } | |
52483bc2 | 356 | trace_btrfs_work_queued(work); |
0a95b851 | 357 | queue_work(wq->normal_wq, &work->normal_work); |
08a9ff32 QW |
358 | } |
359 | ||
d458b054 QW |
360 | void btrfs_queue_work(struct btrfs_workqueue *wq, |
361 | struct btrfs_work *work) | |
1ca08976 | 362 | { |
d458b054 | 363 | struct __btrfs_workqueue *dest_wq; |
1ca08976 QW |
364 | |
365 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) | |
366 | dest_wq = wq->high; | |
367 | else | |
368 | dest_wq = wq->normal; | |
369 | __btrfs_queue_work(dest_wq, work); | |
370 | } | |
371 | ||
372 | static inline void | |
d458b054 | 373 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) |
08a9ff32 QW |
374 | { |
375 | destroy_workqueue(wq->normal_wq); | |
c3a46891 | 376 | trace_btrfs_workqueue_destroy(wq); |
08a9ff32 QW |
377 | kfree(wq); |
378 | } | |
379 | ||
d458b054 | 380 | void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) |
1ca08976 QW |
381 | { |
382 | if (!wq) | |
383 | return; | |
384 | if (wq->high) | |
385 | __btrfs_destroy_workqueue(wq->high); | |
386 | __btrfs_destroy_workqueue(wq->normal); | |
ef66af10 | 387 | kfree(wq); |
1ca08976 QW |
388 | } |
389 | ||
c6dd6ea5 | 390 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) |
08a9ff32 | 391 | { |
800ee224 ST |
392 | if (!wq) |
393 | return; | |
c6dd6ea5 | 394 | wq->normal->limit_active = limit_active; |
1ca08976 | 395 | if (wq->high) |
c6dd6ea5 | 396 | wq->high->limit_active = limit_active; |
1ca08976 QW |
397 | } |
398 | ||
d458b054 | 399 | void btrfs_set_work_high_priority(struct btrfs_work *work) |
1ca08976 QW |
400 | { |
401 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | |
08a9ff32 | 402 | } |