]> Git Repo - linux.git/blame - io_uring/io_uring.h
Merge tag 'thermal-6.10-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux.git] / io_uring / io_uring.h
CommitLineData
de23077e
JA
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
cd40cae2 5#include <linux/lockdep.h>
b5d3ae20 6#include <linux/resume_user_mode.h>
c1755c25 7#include <linux/kasan.h>
95041b93 8#include <linux/poll.h>
ab1c84d8 9#include <linux/io_uring_types.h>
44648532 10#include <uapi/linux/eventpoll.h>
ab1c84d8 11#include "io-wq.h"
a6b21fbb 12#include "slist.h"
ab1c84d8 13#include "filetable.h"
de23077e 14
f3b44f92
JA
15#ifndef CREATE_TRACE_POINTS
16#include <trace/events/io_uring.h>
17#endif
18
97b388d7
JA
19enum {
20 IOU_OK = 0,
21 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
114eccdf 22
704ea888
JA
23 /*
24 * Requeue the task_work to restart operations on this request. The
25 * actual value isn't important, should just be not an otherwise
26 * valid error code, yet less than -MAX_ERRNO and valid internally.
27 */
28 IOU_REQUEUE = -3072,
29
114eccdf 30 /*
91482864
PB
31 * Intended only when both IO_URING_F_MULTISHOT is passed
32 * to indicate to the poll runner that multishot should be
114eccdf
DY
33 * removed and the result is set on req->cqe.res.
34 */
35 IOU_STOP_MULTISHOT = -ECANCELED,
97b388d7
JA
36};
37
405b4dc1
SR
38struct io_wait_queue {
39 struct wait_queue_entry wq;
40 struct io_ring_ctx *ctx;
41 unsigned cq_tail;
42 unsigned nr_timeouts;
43 ktime_t timeout;
44
8d0c12a8
SR
45#ifdef CONFIG_NET_RX_BUSY_POLL
46 unsigned int napi_busy_poll_to;
47 bool napi_prefer_busy_poll;
48#endif
405b4dc1
SR
49};
50
51static inline bool io_should_wake(struct io_wait_queue *iowq)
52{
53 struct io_ring_ctx *ctx = iowq->ctx;
54 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
55
56 /*
57 * Wake up if we have enough events, or if a timeout occurred since we
58 * started waiting. For timeouts, we always want to return to userspace,
59 * regardless of event count.
60 */
61 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
62}
63
20d6b633 64bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
c0e0d6ba 65int io_run_task_work_sig(struct io_ring_ctx *ctx);
973fc83f 66void io_req_defer_failed(struct io_kiocb *req, s32 res);
b529c96a 67bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
e5c12945 68bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
9046c641
PB
69void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
70
9046c641
PB
71struct file *io_file_get_normal(struct io_kiocb *req, int fd);
72struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
73 unsigned issue_flags);
74
8501fe70 75void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
9046c641 76bool io_alloc_async_data(struct io_kiocb *req);
9046c641 77void io_req_task_queue(struct io_kiocb *req);
a282967c 78void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
9046c641 79void io_req_task_queue_fail(struct io_kiocb *req, int ret);
a282967c 80void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
af5d68f8
JA
81struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
82struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
9046c641
PB
83void tctx_task_work(struct callback_head *cb);
84__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
85int io_uring_alloc_task_context(struct task_struct *task,
86 struct io_ring_ctx *ctx);
87
6e76ac59
JT
88int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
89 int start, int end);
90
a282967c 91int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
9046c641
PB
92int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
93int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
ec26c225 94void __io_submit_flush_completions(struct io_ring_ctx *ctx);
9046c641
PB
95
96struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
97void io_wq_submit_work(struct io_wq_work *work);
98
99void io_free_req(struct io_kiocb *req);
100void io_queue_next(struct io_kiocb *req);
63809137 101void io_task_refs_refill(struct io_uring_task *tctx);
bd1a3783 102bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
9046c641
PB
103
104bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
105 bool cancel_all);
106
c4320315
JA
107enum {
108 IO_EVENTFD_OP_SIGNAL_BIT,
109 IO_EVENTFD_OP_FREE_BIT,
110};
111
112void io_eventfd_ops(struct rcu_head *rcu);
113void io_activate_pollwq(struct io_ring_ctx *ctx);
114
1658633c
JA
115static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
116{
c133b3b0 117#if defined(CONFIG_PROVE_LOCKING)
1658633c
JA
118 lockdep_assert(in_task());
119
120 if (ctx->flags & IORING_SETUP_IOPOLL) {
121 lockdep_assert_held(&ctx->uring_lock);
122 } else if (!ctx->task_complete) {
123 lockdep_assert_held(&ctx->completion_lock);
124 } else if (ctx->submitter_task) {
125 /*
126 * ->submitter_task may be NULL and we can still post a CQE,
127 * if the ring has been setup with IORING_SETUP_R_DISABLED.
128 * Not from an SQE, as those cannot be submitted, but via
129 * updating tagged resources.
130 */
131 if (ctx->submitter_task->flags & PF_EXITING)
132 lockdep_assert(current_work());
133 else
134 lockdep_assert(current == ctx->submitter_task);
135 }
1658633c 136#endif
c133b3b0 137}
f26cc959 138
e52d2e58
PB
139static inline void io_req_task_work_add(struct io_kiocb *req)
140{
8501fe70 141 __io_req_task_work_add(req, 0);
e52d2e58
PB
142}
143
da12d9ab
PB
144static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
145{
146 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
902ce82c 147 ctx->submit_state.cq_flush)
da12d9ab
PB
148 __io_submit_flush_completions(ctx);
149}
150
9046c641
PB
151#define io_for_each_link(pos, head) \
152 for (pos = (head); pos; pos = pos->link)
f3b44f92 153
59fbc409
PB
154static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
155 struct io_uring_cqe **ret,
156 bool overflow)
f3b44f92 157{
20d6b633 158 io_lockdep_assert_cq_locked(ctx);
f3b44f92 159
20d6b633
PB
160 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
161 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
59fbc409 162 return false;
f3b44f92 163 }
59fbc409 164 *ret = ctx->cqe_cached;
20d6b633
PB
165 ctx->cached_cq_tail++;
166 ctx->cqe_cached++;
167 if (ctx->flags & IORING_SETUP_CQE32)
168 ctx->cqe_cached++;
59fbc409 169 return true;
aa1df3a3
PB
170}
171
59fbc409 172static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
aa1df3a3 173{
59fbc409 174 return io_get_cqe_overflow(ctx, ret, false);
f3b44f92
JA
175}
176
093a650b
PB
177static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
178 struct io_kiocb *req)
f3b44f92
JA
179{
180 struct io_uring_cqe *cqe;
181
e8c328c3
PB
182 /*
183 * If we can't get a cq entry, userspace overflowed the
184 * submission (by quite a lot). Increment the overflow count in
185 * the ring.
186 */
59fbc409 187 if (unlikely(!io_get_cqe(ctx, &cqe)))
f66f7342 188 return false;
e0486f3f 189
a0727c73
PB
190 if (trace_io_uring_complete_enabled())
191 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
192 req->cqe.res, req->cqe.flags,
b24c5d75 193 req->big_cqe.extra1, req->big_cqe.extra2);
e0486f3f 194
e8c328c3 195 memcpy(cqe, &req->cqe, sizeof(*cqe));
e8c328c3 196 if (ctx->flags & IORING_SETUP_CQE32) {
b24c5d75
PB
197 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
198 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
f3b44f92 199 }
e8c328c3 200 return true;
f3b44f92
JA
201}
202
531113bb
JA
203static inline void req_set_fail(struct io_kiocb *req)
204{
205 req->flags |= REQ_F_FAIL;
206 if (req->flags & REQ_F_CQE_SKIP) {
207 req->flags &= ~REQ_F_CQE_SKIP;
208 req->flags |= REQ_F_SKIP_LINK_CQES;
209 }
210}
211
de23077e
JA
212static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
213{
214 req->cqe.res = res;
215 req->cqe.flags = cflags;
216}
217
99f15d8d
JA
218static inline bool req_has_async_data(struct io_kiocb *req)
219{
220 return req->flags & REQ_F_ASYNC_DATA;
221}
222
17bc2837 223static inline void io_put_file(struct io_kiocb *req)
531113bb 224{
17bc2837
JA
225 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
226 fput(req->file);
531113bb
JA
227}
228
cd40cae2
JA
229static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
230 unsigned issue_flags)
231{
232 lockdep_assert_held(&ctx->uring_lock);
bfe30bfd 233 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
cd40cae2
JA
234 mutex_unlock(&ctx->uring_lock);
235}
236
237static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
238 unsigned issue_flags)
239{
240 /*
241 * "Normal" inline submissions always hold the uring_lock, since we
242 * grab it from the system call. Same is true for the SQPOLL offload.
243 * The only exception is when we've detached the request and issue it
244 * from an async worker thread, grab the lock for that case.
245 */
bfe30bfd 246 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
cd40cae2
JA
247 mutex_lock(&ctx->uring_lock);
248 lockdep_assert_held(&ctx->uring_lock);
249}
250
f9ead18c
JA
251static inline void io_commit_cqring(struct io_ring_ctx *ctx)
252{
253 /* order cqe stores with ring update */
254 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
255}
256
7b235dd8
PB
257static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
258{
bca39f39 259 if (wq_has_sleeper(&ctx->poll_wq))
7b235dd8
PB
260 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
261 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
262}
263
6e7248ad 264static inline void io_cqring_wake(struct io_ring_ctx *ctx)
f3b44f92
JA
265{
266 /*
44648532
JA
267 * Trigger waitqueue handler on all waiters on our waitqueue. This
268 * won't necessarily wake up all the tasks, io_should_wake() will make
269 * that decision.
270 *
271 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
272 * set in the mask so that if we recurse back into our own poll
273 * waitqueue handlers, we know we have a dependency between eventfd or
274 * epoll and should terminate multishot poll at that point.
f3b44f92 275 */
6e7248ad 276 if (wq_has_sleeper(&ctx->cq_wait))
44648532
JA
277 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
278 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
f3b44f92
JA
279}
280
17437f31
JA
281static inline bool io_sqring_full(struct io_ring_ctx *ctx)
282{
283 struct io_rings *r = ctx->rings;
284
285 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
286}
287
288static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
289{
290 struct io_rings *rings = ctx->rings;
e3ef728f 291 unsigned int entries;
17437f31
JA
292
293 /* make sure SQ entry isn't read before tail */
e3ef728f
JA
294 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
295 return min(entries, ctx->sq_entries);
17437f31
JA
296}
297
c0e0d6ba 298static inline int io_run_task_work(void)
17437f31 299{
af5d68f8
JA
300 bool ret = false;
301
7cfe7a09
JA
302 /*
303 * Always check-and-clear the task_work notification signal. With how
304 * signaling works for task_work, we can find it set with nothing to
305 * run. We need to clear it for that case, like get_signal() does.
306 */
307 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
308 clear_notify_signal();
b5d3ae20
JA
309 /*
310 * PF_IO_WORKER never returns to userspace, so check here if we have
311 * notify work that needs processing.
312 */
af5d68f8
JA
313 if (current->flags & PF_IO_WORKER) {
314 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
315 __set_current_state(TASK_RUNNING);
316 resume_user_mode_work(NULL);
317 }
318 if (current->io_uring) {
319 unsigned int count = 0;
320
321 tctx_task_work_run(current->io_uring, UINT_MAX, &count);
322 if (count)
323 ret = true;
324 }
2f2bb1ff 325 }
46a525e1 326 if (task_work_pending(current)) {
17437f31 327 __set_current_state(TASK_RUNNING);
46a525e1 328 task_work_run();
af5d68f8 329 ret = true;
17437f31
JA
330 }
331
af5d68f8 332 return ret;
c0e0d6ba
DY
333}
334
dac6a0ea
JA
335static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
336{
22537c9f 337 return task_work_pending(current) || !llist_empty(&ctx->work_llist);
dac6a0ea
JA
338}
339
a282967c 340static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
aa1e90f6 341{
8e5b3b89 342 lockdep_assert_held(&ctx->uring_lock);
aa1e90f6
PB
343}
344
9da070b1
PB
345/*
346 * Don't complete immediately but use deferred completion infrastructure.
347 * Protected by ->uring_lock and can only be used either with
348 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
349 */
350static inline void io_req_complete_defer(struct io_kiocb *req)
351 __must_hold(&req->ctx->uring_lock)
aa1e90f6
PB
352{
353 struct io_submit_state *state = &req->ctx->submit_state;
354
9da070b1
PB
355 lockdep_assert_held(&req->ctx->uring_lock);
356
aa1e90f6
PB
357 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
358}
359
46929b08
PB
360static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
361{
bca39f39
PB
362 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
363 ctx->has_evfd || ctx->poll_activated))
46929b08
PB
364 __io_commit_cqring_flush(ctx);
365}
366
63809137
PB
367static inline void io_get_task_refs(int nr)
368{
369 struct io_uring_task *tctx = current->io_uring;
370
371 tctx->cached_refs -= nr;
372 if (unlikely(tctx->cached_refs < 0))
373 io_task_refs_refill(tctx);
374}
375
bd1a3783
PB
376static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
377{
378 return !ctx->submit_state.free_list.next;
379}
380
c1755c25 381extern struct kmem_cache *req_cachep;
b3a4dbc8 382extern struct kmem_cache *io_buf_cachep;
c1755c25 383
c8576f3e 384static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
bd1a3783 385{
c1755c25 386 struct io_kiocb *req;
bd1a3783 387
c1755c25 388 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
c1755c25
BL
389 wq_stack_extract(&ctx->submit_state.free_list);
390 return req;
bd1a3783
PB
391}
392
c8576f3e
PB
393static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
394{
395 if (unlikely(io_req_cache_empty(ctx))) {
396 if (!__io_alloc_req_refill(ctx))
397 return false;
398 }
399 *req = io_extract_req(ctx);
400 return true;
401}
402
140102ae
PB
403static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
404{
405 return likely(ctx->submitter_task == current);
406}
407
76de6749
PB
408static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
409{
6567506b
PB
410 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
411 ctx->submitter_task == current);
76de6749
PB
412}
413
833b5dff
PB
414static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
415{
416 io_req_set_res(req, res, 0);
417 req->io_task_work.func = io_req_task_complete;
418 io_req_task_work_add(req);
419}
420
96c7d4f8
BL
421/*
422 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
423 * slot.
424 */
425static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
426{
427 if (ctx->flags & IORING_SETUP_SQE128)
428 return 2 * sizeof(struct io_uring_sqe);
429 return sizeof(struct io_uring_sqe);
430}
95041b93
JA
431
432static inline bool io_file_can_poll(struct io_kiocb *req)
433{
434 if (req->flags & REQ_F_CAN_POLL)
435 return true;
5fc16fa5 436 if (req->file && file_can_poll(req->file)) {
95041b93
JA
437 req->flags |= REQ_F_CAN_POLL;
438 return true;
439 }
440 return false;
441}
428f1382
JA
442
443enum {
444 IO_CHECK_CQ_OVERFLOW_BIT,
445 IO_CHECK_CQ_DROPPED_BIT,
446};
447
448static inline bool io_has_work(struct io_ring_ctx *ctx)
449{
450 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
451 !llist_empty(&ctx->work_llist);
452}
de23077e 453#endif
This page took 0.226564 seconds and 4 git commands to generate.