]>
Commit | Line | Data |
---|---|---|
771b53d0 JA |
1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H | |
3 | ||
e941894e | 4 | #include <linux/refcount.h> |
98447d65 | 5 | |
771b53d0 JA |
6 | struct io_wq; |
7 | ||
8 | enum { | |
9 | IO_WQ_WORK_CANCEL = 1, | |
e883a79d PB |
10 | IO_WQ_WORK_HASHED = 2, |
11 | IO_WQ_WORK_UNBOUND = 4, | |
e883a79d | 12 | IO_WQ_WORK_CONCURRENT = 16, |
771b53d0 JA |
13 | |
14 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ | |
15 | }; | |
16 | ||
17 | enum io_wq_cancel { | |
18 | IO_WQ_CANCEL_OK, /* cancelled before started */ | |
19 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ | |
20 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ | |
21 | }; | |
22 | ||
53e043b2 SM |
23 | struct io_wq_work_node { |
24 | struct io_wq_work_node *next; | |
25 | }; | |
26 | ||
27 | struct io_wq_work_list { | |
28 | struct io_wq_work_node *first; | |
29 | struct io_wq_work_node *last; | |
30 | }; | |
31 | ||
0d9521b9 PB |
32 | #define wq_list_for_each(pos, prv, head) \ |
33 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) | |
34 | ||
5eef4e87 PB |
35 | #define wq_list_for_each_resume(pos, prv) \ |
36 | for (; pos; prv = pos, pos = (pos)->next) | |
37 | ||
0d9521b9 PB |
38 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
39 | #define INIT_WQ_LIST(list) do { \ | |
40 | (list)->first = NULL; \ | |
0d9521b9 PB |
41 | } while (0) |
42 | ||
86f3cd1b PB |
43 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
44 | struct io_wq_work_node *pos, | |
45 | struct io_wq_work_list *list) | |
46 | { | |
47 | struct io_wq_work_node *next = pos->next; | |
48 | ||
49 | pos->next = node; | |
50 | node->next = next; | |
51 | if (!next) | |
52 | list->last = node; | |
53 | } | |
54 | ||
6206f0e1 JA |
55 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
56 | struct io_wq_work_list *list) | |
57 | { | |
8724dd8c | 58 | node->next = NULL; |
6206f0e1 | 59 | if (!list->first) { |
e995d512 JA |
60 | list->last = node; |
61 | WRITE_ONCE(list->first, node); | |
6206f0e1 JA |
62 | } else { |
63 | list->last->next = node; | |
64 | list->last = node; | |
65 | } | |
66 | } | |
67 | ||
0d9521b9 PB |
68 | static inline void wq_list_add_head(struct io_wq_work_node *node, |
69 | struct io_wq_work_list *list) | |
70 | { | |
71 | node->next = list->first; | |
72 | if (!node->next) | |
73 | list->last = node; | |
74 | WRITE_ONCE(list->first, node); | |
75 | } | |
76 | ||
86f3cd1b PB |
77 | static inline void wq_list_cut(struct io_wq_work_list *list, |
78 | struct io_wq_work_node *last, | |
6206f0e1 JA |
79 | struct io_wq_work_node *prev) |
80 | { | |
86f3cd1b PB |
81 | /* first in the list, if prev==NULL */ |
82 | if (!prev) | |
83 | WRITE_ONCE(list->first, last->next); | |
84 | else | |
85 | prev->next = last->next; | |
86 | ||
87 | if (last == list->last) | |
6206f0e1 | 88 | list->last = prev; |
86f3cd1b PB |
89 | last->next = NULL; |
90 | } | |
91 | ||
0d9521b9 PB |
92 | static inline void __wq_list_splice(struct io_wq_work_list *list, |
93 | struct io_wq_work_node *to) | |
94 | { | |
95 | list->last->next = to->next; | |
96 | to->next = list->first; | |
97 | INIT_WQ_LIST(list); | |
98 | } | |
99 | ||
100 | static inline bool wq_list_splice(struct io_wq_work_list *list, | |
101 | struct io_wq_work_node *to) | |
102 | { | |
103 | if (!wq_list_empty(list)) { | |
104 | __wq_list_splice(list, to); | |
105 | return true; | |
106 | } | |
107 | return false; | |
108 | } | |
109 | ||
110 | static inline void wq_stack_add_head(struct io_wq_work_node *node, | |
111 | struct io_wq_work_node *stack) | |
112 | { | |
113 | node->next = stack->next; | |
114 | stack->next = node; | |
115 | } | |
116 | ||
86f3cd1b PB |
117 | static inline void wq_list_del(struct io_wq_work_list *list, |
118 | struct io_wq_work_node *node, | |
119 | struct io_wq_work_node *prev) | |
120 | { | |
121 | wq_list_cut(list, node, prev); | |
6206f0e1 JA |
122 | } |
123 | ||
0d9521b9 PB |
124 | static inline |
125 | struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) | |
126 | { | |
127 | struct io_wq_work_node *node = stack->next; | |
6206f0e1 | 128 | |
0d9521b9 PB |
129 | stack->next = node->next; |
130 | return node; | |
131 | } | |
6206f0e1 | 132 | |
771b53d0 | 133 | struct io_wq_work { |
18a542ff | 134 | struct io_wq_work_node list; |
6206f0e1 | 135 | unsigned flags; |
771b53d0 JA |
136 | }; |
137 | ||
86f3cd1b PB |
138 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
139 | { | |
140 | if (!work->list.next) | |
141 | return NULL; | |
142 | ||
143 | return container_of(work->list.next, struct io_wq_work, list); | |
144 | } | |
145 | ||
5280f7e5 PB |
146 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
147 | typedef void (io_wq_work_fn)(struct io_wq_work *); | |
7d723065 | 148 | |
e941894e JA |
149 | struct io_wq_hash { |
150 | refcount_t refs; | |
151 | unsigned long map; | |
152 | struct wait_queue_head wait; | |
153 | }; | |
154 | ||
155 | static inline void io_wq_put_hash(struct io_wq_hash *hash) | |
156 | { | |
157 | if (refcount_dec_and_test(&hash->refs)) | |
158 | kfree(hash); | |
159 | } | |
160 | ||
576a347b | 161 | struct io_wq_data { |
e941894e | 162 | struct io_wq_hash *hash; |
685fe7fe | 163 | struct task_struct *task; |
f5fa38c5 | 164 | io_wq_work_fn *do_work; |
e9fd9396 | 165 | free_work_fn *free_work; |
576a347b JA |
166 | }; |
167 | ||
168 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); | |
17a91051 | 169 | void io_wq_exit_start(struct io_wq *wq); |
afcc4015 | 170 | void io_wq_put_and_exit(struct io_wq *wq); |
771b53d0 JA |
171 | |
172 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); | |
8766dd51 PB |
173 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
174 | ||
fe76421d | 175 | int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); |
2e480058 | 176 | int io_wq_max_workers(struct io_wq *wq, int *new_count); |
fe76421d | 177 | |
8766dd51 PB |
178 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
179 | { | |
180 | return work->flags & IO_WQ_WORK_HASHED; | |
181 | } | |
771b53d0 | 182 | |
62755e35 JA |
183 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
184 | ||
185 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, | |
4f26bda1 | 186 | void *data, bool cancel_all); |
62755e35 | 187 | |
771b53d0 JA |
188 | #if defined(CONFIG_IO_WQ) |
189 | extern void io_wq_worker_sleeping(struct task_struct *); | |
190 | extern void io_wq_worker_running(struct task_struct *); | |
191 | #else | |
192 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) | |
193 | { | |
194 | } | |
195 | static inline void io_wq_worker_running(struct task_struct *tsk) | |
196 | { | |
197 | } | |
525b305d | 198 | #endif |
771b53d0 | 199 | |
525b305d JA |
200 | static inline bool io_wq_current_is_worker(void) |
201 | { | |
3bfe6106 JA |
202 | return in_task() && (current->flags & PF_IO_WORKER) && |
203 | current->pf_io_worker; | |
525b305d JA |
204 | } |
205 | #endif |