]>
Commit | Line | Data |
---|---|---|
de23077e JA |
1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H | |
3 | ||
4 | #include <linux/errno.h> | |
cd40cae2 | 5 | #include <linux/lockdep.h> |
ab1c84d8 | 6 | #include <linux/io_uring_types.h> |
44648532 | 7 | #include <uapi/linux/eventpoll.h> |
ab1c84d8 | 8 | #include "io-wq.h" |
a6b21fbb | 9 | #include "slist.h" |
ab1c84d8 | 10 | #include "filetable.h" |
de23077e | 11 | |
f3b44f92 JA |
12 | #ifndef CREATE_TRACE_POINTS |
13 | #include <trace/events/io_uring.h> | |
14 | #endif | |
15 | ||
97b388d7 JA |
16 | enum { |
17 | IOU_OK = 0, | |
18 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, | |
114eccdf DY |
19 | |
20 | /* | |
91482864 PB |
21 | * Intended only when both IO_URING_F_MULTISHOT is passed |
22 | * to indicate to the poll runner that multishot should be | |
114eccdf DY |
23 | * removed and the result is set on req->cqe.res. |
24 | */ | |
25 | IOU_STOP_MULTISHOT = -ECANCELED, | |
97b388d7 JA |
26 | }; |
27 | ||
aa1df3a3 | 28 | struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); |
68494a65 | 29 | bool io_req_cqe_overflow(struct io_kiocb *req); |
c0e0d6ba | 30 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
b3026767 | 31 | int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked); |
c0e0d6ba | 32 | int io_run_local_work(struct io_ring_ctx *ctx); |
973fc83f | 33 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
1bec951c | 34 | void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); |
52120f0f DY |
35 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, |
36 | bool allow_overflow); | |
9b8c5475 DY |
37 | bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, |
38 | bool allow_overflow); | |
9046c641 PB |
39 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
40 | ||
1bec951c PB |
41 | static inline void io_req_complete_post_tw(struct io_kiocb *req, bool *locked) |
42 | { | |
43 | unsigned flags = *locked ? 0 : IO_URING_F_UNLOCKED; | |
44 | ||
45 | io_req_complete_post(req, flags); | |
46 | } | |
47 | ||
9046c641 PB |
48 | struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); |
49 | ||
50 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); | |
51 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, | |
52 | unsigned issue_flags); | |
53 | ||
f6b543fd JA |
54 | static inline bool io_req_ffs_set(struct io_kiocb *req) |
55 | { | |
56 | return req->flags & REQ_F_FIXED_FILE; | |
57 | } | |
58 | ||
e52d2e58 | 59 | void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); |
9046c641 PB |
60 | bool io_is_uring_fops(struct file *file); |
61 | bool io_alloc_async_data(struct io_kiocb *req); | |
9046c641 PB |
62 | void io_req_task_queue(struct io_kiocb *req); |
63 | void io_queue_iowq(struct io_kiocb *req, bool *dont_use); | |
64 | void io_req_task_complete(struct io_kiocb *req, bool *locked); | |
65 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); | |
66 | void io_req_task_submit(struct io_kiocb *req, bool *locked); | |
67 | void tctx_task_work(struct callback_head *cb); | |
68 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); | |
69 | int io_uring_alloc_task_context(struct task_struct *task, | |
70 | struct io_ring_ctx *ctx); | |
71 | ||
72 | int io_poll_issue(struct io_kiocb *req, bool *locked); | |
73 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); | |
74 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); | |
75 | void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); | |
76 | int io_req_prep_async(struct io_kiocb *req); | |
77 | ||
78 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); | |
79 | void io_wq_submit_work(struct io_wq_work *work); | |
80 | ||
81 | void io_free_req(struct io_kiocb *req); | |
82 | void io_queue_next(struct io_kiocb *req); | |
e70cb608 | 83 | void __io_put_task(struct task_struct *task, int nr); |
63809137 | 84 | void io_task_refs_refill(struct io_uring_task *tctx); |
bd1a3783 | 85 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
9046c641 PB |
86 | |
87 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, | |
88 | bool cancel_all); | |
89 | ||
e52d2e58 PB |
90 | static inline void io_req_task_work_add(struct io_kiocb *req) |
91 | { | |
92 | __io_req_task_work_add(req, true); | |
93 | } | |
94 | ||
9046c641 PB |
95 | #define io_for_each_link(pos, head) \ |
96 | for (pos = (head); pos; pos = pos->link) | |
f3b44f92 | 97 | |
25399321 PB |
98 | static inline void io_cq_lock(struct io_ring_ctx *ctx) |
99 | __acquires(ctx->completion_lock) | |
100 | { | |
101 | spin_lock(&ctx->completion_lock); | |
102 | } | |
103 | ||
104 | void io_cq_unlock_post(struct io_ring_ctx *ctx); | |
105 | ||
aa1df3a3 PB |
106 | static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, |
107 | bool overflow) | |
f3b44f92 JA |
108 | { |
109 | if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { | |
110 | struct io_uring_cqe *cqe = ctx->cqe_cached; | |
111 | ||
f3b44f92 JA |
112 | ctx->cached_cq_tail++; |
113 | ctx->cqe_cached++; | |
b3659a65 PB |
114 | if (ctx->flags & IORING_SETUP_CQE32) |
115 | ctx->cqe_cached++; | |
f3b44f92 JA |
116 | return cqe; |
117 | } | |
118 | ||
aa1df3a3 PB |
119 | return __io_get_cqe(ctx, overflow); |
120 | } | |
121 | ||
122 | static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) | |
123 | { | |
124 | return io_get_cqe_overflow(ctx, false); | |
f3b44f92 JA |
125 | } |
126 | ||
127 | static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, | |
128 | struct io_kiocb *req) | |
129 | { | |
130 | struct io_uring_cqe *cqe; | |
131 | ||
e8c328c3 PB |
132 | /* |
133 | * If we can't get a cq entry, userspace overflowed the | |
134 | * submission (by quite a lot). Increment the overflow count in | |
135 | * the ring. | |
136 | */ | |
137 | cqe = io_get_cqe(ctx); | |
138 | if (unlikely(!cqe)) | |
139 | return io_req_cqe_overflow(req); | |
e0486f3f DY |
140 | |
141 | trace_io_uring_complete(req->ctx, req, req->cqe.user_data, | |
142 | req->cqe.res, req->cqe.flags, | |
143 | (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, | |
144 | (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); | |
145 | ||
e8c328c3 PB |
146 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
147 | ||
148 | if (ctx->flags & IORING_SETUP_CQE32) { | |
f3b44f92 JA |
149 | u64 extra1 = 0, extra2 = 0; |
150 | ||
151 | if (req->flags & REQ_F_CQE32_INIT) { | |
152 | extra1 = req->extra1; | |
153 | extra2 = req->extra2; | |
154 | } | |
155 | ||
e8c328c3 PB |
156 | WRITE_ONCE(cqe->big_cqe[0], extra1); |
157 | WRITE_ONCE(cqe->big_cqe[1], extra2); | |
f3b44f92 | 158 | } |
e8c328c3 | 159 | return true; |
f3b44f92 JA |
160 | } |
161 | ||
531113bb JA |
162 | static inline void req_set_fail(struct io_kiocb *req) |
163 | { | |
164 | req->flags |= REQ_F_FAIL; | |
165 | if (req->flags & REQ_F_CQE_SKIP) { | |
166 | req->flags &= ~REQ_F_CQE_SKIP; | |
167 | req->flags |= REQ_F_SKIP_LINK_CQES; | |
168 | } | |
169 | } | |
170 | ||
de23077e JA |
171 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
172 | { | |
173 | req->cqe.res = res; | |
174 | req->cqe.flags = cflags; | |
175 | } | |
176 | ||
99f15d8d JA |
177 | static inline bool req_has_async_data(struct io_kiocb *req) |
178 | { | |
179 | return req->flags & REQ_F_ASYNC_DATA; | |
180 | } | |
181 | ||
531113bb JA |
182 | static inline void io_put_file(struct file *file) |
183 | { | |
184 | if (file) | |
185 | fput(file); | |
186 | } | |
187 | ||
cd40cae2 JA |
188 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
189 | unsigned issue_flags) | |
190 | { | |
191 | lockdep_assert_held(&ctx->uring_lock); | |
192 | if (issue_flags & IO_URING_F_UNLOCKED) | |
193 | mutex_unlock(&ctx->uring_lock); | |
194 | } | |
195 | ||
196 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, | |
197 | unsigned issue_flags) | |
198 | { | |
199 | /* | |
200 | * "Normal" inline submissions always hold the uring_lock, since we | |
201 | * grab it from the system call. Same is true for the SQPOLL offload. | |
202 | * The only exception is when we've detached the request and issue it | |
203 | * from an async worker thread, grab the lock for that case. | |
204 | */ | |
205 | if (issue_flags & IO_URING_F_UNLOCKED) | |
206 | mutex_lock(&ctx->uring_lock); | |
207 | lockdep_assert_held(&ctx->uring_lock); | |
208 | } | |
209 | ||
f9ead18c JA |
210 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
211 | { | |
212 | /* order cqe stores with ring update */ | |
213 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); | |
214 | } | |
215 | ||
fc86f9d3 PB |
216 | /* requires smb_mb() prior, see wq_has_sleeper() */ |
217 | static inline void __io_cqring_wake(struct io_ring_ctx *ctx) | |
f3b44f92 JA |
218 | { |
219 | /* | |
44648532 JA |
220 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
221 | * won't necessarily wake up all the tasks, io_should_wake() will make | |
222 | * that decision. | |
223 | * | |
224 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter | |
225 | * set in the mask so that if we recurse back into our own poll | |
226 | * waitqueue handlers, we know we have a dependency between eventfd or | |
227 | * epoll and should terminate multishot poll at that point. | |
f3b44f92 | 228 | */ |
fc86f9d3 | 229 | if (waitqueue_active(&ctx->cq_wait)) |
44648532 JA |
230 | __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, |
231 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); | |
f3b44f92 JA |
232 | } |
233 | ||
fc86f9d3 PB |
234 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
235 | { | |
236 | smp_mb(); | |
237 | __io_cqring_wake(ctx); | |
238 | } | |
239 | ||
17437f31 JA |
240 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
241 | { | |
242 | struct io_rings *r = ctx->rings; | |
243 | ||
244 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; | |
245 | } | |
246 | ||
247 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) | |
248 | { | |
249 | struct io_rings *rings = ctx->rings; | |
250 | ||
251 | /* make sure SQ entry isn't read before tail */ | |
252 | return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; | |
253 | } | |
254 | ||
c0e0d6ba | 255 | static inline int io_run_task_work(void) |
17437f31 | 256 | { |
46a525e1 JA |
257 | if (task_work_pending(current)) { |
258 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) | |
259 | clear_notify_signal(); | |
17437f31 | 260 | __set_current_state(TASK_RUNNING); |
46a525e1 | 261 | task_work_run(); |
c0e0d6ba | 262 | return 1; |
17437f31 JA |
263 | } |
264 | ||
c0e0d6ba DY |
265 | return 0; |
266 | } | |
267 | ||
dac6a0ea JA |
268 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
269 | { | |
270 | return test_thread_flag(TIF_NOTIFY_SIGNAL) || | |
271 | !wq_list_empty(&ctx->work_llist); | |
272 | } | |
273 | ||
c0e0d6ba DY |
274 | static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx) |
275 | { | |
276 | int ret = 0; | |
277 | int ret2; | |
278 | ||
279 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) | |
280 | ret = io_run_local_work(ctx); | |
281 | ||
282 | /* want to run this after in case more is added */ | |
283 | ret2 = io_run_task_work(); | |
284 | ||
285 | /* Try propagate error in favour of if tasks were run, | |
286 | * but still make sure to run them if requested | |
287 | */ | |
288 | if (ret >= 0) | |
289 | ret += ret2; | |
290 | ||
291 | return ret; | |
17437f31 JA |
292 | } |
293 | ||
44f87745 PB |
294 | static inline int io_run_local_work_locked(struct io_ring_ctx *ctx) |
295 | { | |
b3026767 DY |
296 | bool locked; |
297 | int ret; | |
298 | ||
44f87745 PB |
299 | if (llist_empty(&ctx->work_llist)) |
300 | return 0; | |
b3026767 DY |
301 | |
302 | locked = true; | |
303 | ret = __io_run_local_work(ctx, &locked); | |
304 | /* shouldn't happen! */ | |
305 | if (WARN_ON_ONCE(!locked)) | |
306 | mutex_lock(&ctx->uring_lock); | |
307 | return ret; | |
44f87745 PB |
308 | } |
309 | ||
aa1e90f6 PB |
310 | static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) |
311 | { | |
312 | if (!*locked) { | |
313 | mutex_lock(&ctx->uring_lock); | |
314 | *locked = true; | |
315 | } | |
316 | } | |
317 | ||
9da070b1 PB |
318 | /* |
319 | * Don't complete immediately but use deferred completion infrastructure. | |
320 | * Protected by ->uring_lock and can only be used either with | |
321 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. | |
322 | */ | |
323 | static inline void io_req_complete_defer(struct io_kiocb *req) | |
324 | __must_hold(&req->ctx->uring_lock) | |
aa1e90f6 PB |
325 | { |
326 | struct io_submit_state *state = &req->ctx->submit_state; | |
327 | ||
9da070b1 PB |
328 | lockdep_assert_held(&req->ctx->uring_lock); |
329 | ||
aa1e90f6 PB |
330 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
331 | } | |
332 | ||
46929b08 PB |
333 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
334 | { | |
335 | if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) | |
336 | __io_commit_cqring_flush(ctx); | |
337 | } | |
338 | ||
e70cb608 PB |
339 | /* must to be called somewhat shortly after putting a request */ |
340 | static inline void io_put_task(struct task_struct *task, int nr) | |
341 | { | |
342 | if (likely(task == current)) | |
343 | task->io_uring->cached_refs += nr; | |
344 | else | |
345 | __io_put_task(task, nr); | |
346 | } | |
347 | ||
63809137 PB |
348 | static inline void io_get_task_refs(int nr) |
349 | { | |
350 | struct io_uring_task *tctx = current->io_uring; | |
351 | ||
352 | tctx->cached_refs -= nr; | |
353 | if (unlikely(tctx->cached_refs < 0)) | |
354 | io_task_refs_refill(tctx); | |
355 | } | |
356 | ||
bd1a3783 PB |
357 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
358 | { | |
359 | return !ctx->submit_state.free_list.next; | |
360 | } | |
361 | ||
362 | static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) | |
363 | { | |
364 | if (unlikely(io_req_cache_empty(ctx))) | |
365 | return __io_alloc_req_refill(ctx); | |
366 | return true; | |
367 | } | |
368 | ||
369 | static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) | |
370 | { | |
371 | struct io_wq_work_node *node; | |
372 | ||
373 | node = wq_stack_extract(&ctx->submit_state.free_list); | |
374 | return container_of(node, struct io_kiocb, comp_list); | |
375 | } | |
376 | ||
76de6749 PB |
377 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
378 | { | |
6567506b PB |
379 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
380 | ctx->submitter_task == current); | |
76de6749 PB |
381 | } |
382 | ||
833b5dff PB |
383 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
384 | { | |
385 | io_req_set_res(req, res, 0); | |
386 | req->io_task_work.func = io_req_task_complete; | |
387 | io_req_task_work_add(req); | |
388 | } | |
389 | ||
de23077e | 390 | #endif |