]>
Commit | Line | Data |
---|---|---|
40786717 DW |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* | |
3 | * Copyright (C) 2019 Oracle. All Rights Reserved. | |
4 | * Author: Darrick J. Wong <[email protected]> | |
5 | */ | |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
8 | #include "xfs_shared.h" | |
9 | #include "xfs_format.h" | |
10 | #include "xfs_log_format.h" | |
11 | #include "xfs_trans_resv.h" | |
12 | #include "xfs_mount.h" | |
13 | #include "xfs_trace.h" | |
14 | #include "xfs_sysctl.h" | |
15 | #include "xfs_pwork.h" | |
3e5a428b | 16 | #include <linux/nmi.h> |
40786717 DW |
17 | |
18 | /* | |
19 | * Parallel Work Queue | |
20 | * =================== | |
21 | * | |
22 | * Abstract away the details of running a large and "obviously" parallelizable | |
23 | * task across multiple CPUs. Callers initialize the pwork control object with | |
24 | * a desired level of parallelization and a work function. Next, they embed | |
25 | * struct xfs_pwork in whatever structure they use to pass work context to a | |
26 | * worker thread and queue that pwork. The work function will be passed the | |
27 | * pwork item when it is run (from process context) and any returned error will | |
28 | * be recorded in xfs_pwork_ctl.error. Work functions should check for errors | |
29 | * and abort if necessary; the non-zeroness of xfs_pwork_ctl.error does not | |
30 | * stop workqueue item processing. | |
31 | * | |
32 | * This is the rough equivalent of the xfsprogs workqueue code, though we can't | |
33 | * reuse that name here. | |
34 | */ | |
35 | ||
36 | /* Invoke our caller's function. */ | |
37 | static void | |
38 | xfs_pwork_work( | |
39 | struct work_struct *work) | |
40 | { | |
41 | struct xfs_pwork *pwork; | |
42 | struct xfs_pwork_ctl *pctl; | |
43 | int error; | |
44 | ||
45 | pwork = container_of(work, struct xfs_pwork, work); | |
46 | pctl = pwork->pctl; | |
47 | error = pctl->work_fn(pctl->mp, pwork); | |
48 | if (error && !pctl->error) | |
49 | pctl->error = error; | |
3e5a428b DW |
50 | if (atomic_dec_and_test(&pctl->nr_work)) |
51 | wake_up(&pctl->poll_wait); | |
40786717 DW |
52 | } |
53 | ||
54 | /* | |
55 | * Set up control data for parallel work. @work_fn is the function that will | |
56 | * be called. @tag will be written into the kernel threads. @nr_threads is | |
57 | * the level of parallelism desired, or 0 for no limit. | |
58 | */ | |
59 | int | |
60 | xfs_pwork_init( | |
61 | struct xfs_mount *mp, | |
62 | struct xfs_pwork_ctl *pctl, | |
63 | xfs_pwork_work_fn work_fn, | |
f83d436a | 64 | const char *tag) |
40786717 | 65 | { |
f83d436a DW |
66 | unsigned int nr_threads = 0; |
67 | ||
40786717 DW |
68 | #ifdef DEBUG |
69 | if (xfs_globals.pwork_threads >= 0) | |
70 | nr_threads = xfs_globals.pwork_threads; | |
71 | #endif | |
72 | trace_xfs_pwork_init(mp, nr_threads, current->pid); | |
73 | ||
f83d436a DW |
74 | pctl->wq = alloc_workqueue("%s-%d", |
75 | WQ_UNBOUND | WQ_SYSFS | WQ_FREEZABLE, nr_threads, tag, | |
40786717 DW |
76 | current->pid); |
77 | if (!pctl->wq) | |
78 | return -ENOMEM; | |
79 | pctl->work_fn = work_fn; | |
80 | pctl->error = 0; | |
81 | pctl->mp = mp; | |
3e5a428b DW |
82 | atomic_set(&pctl->nr_work, 0); |
83 | init_waitqueue_head(&pctl->poll_wait); | |
40786717 DW |
84 | |
85 | return 0; | |
86 | } | |
87 | ||
88 | /* Queue some parallel work. */ | |
89 | void | |
90 | xfs_pwork_queue( | |
91 | struct xfs_pwork_ctl *pctl, | |
92 | struct xfs_pwork *pwork) | |
93 | { | |
94 | INIT_WORK(&pwork->work, xfs_pwork_work); | |
95 | pwork->pctl = pctl; | |
3e5a428b | 96 | atomic_inc(&pctl->nr_work); |
40786717 DW |
97 | queue_work(pctl->wq, &pwork->work); |
98 | } | |
99 | ||
100 | /* Wait for the work to finish and tear down the control structure. */ | |
101 | int | |
102 | xfs_pwork_destroy( | |
103 | struct xfs_pwork_ctl *pctl) | |
104 | { | |
105 | destroy_workqueue(pctl->wq); | |
106 | pctl->wq = NULL; | |
107 | return pctl->error; | |
108 | } | |
109 | ||
3e5a428b DW |
110 | /* |
111 | * Wait for the work to finish by polling completion status and touch the soft | |
112 | * lockup watchdog. This is for callers such as mount which hold locks. | |
113 | */ | |
114 | void | |
115 | xfs_pwork_poll( | |
116 | struct xfs_pwork_ctl *pctl) | |
117 | { | |
118 | while (wait_event_timeout(pctl->poll_wait, | |
119 | atomic_read(&pctl->nr_work) == 0, HZ) == 0) | |
120 | touch_softlockup_watchdog(); | |
121 | } |