4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/resume_user_mode.h>
7 #include <linux/kasan.h>
8 #include <linux/poll.h>
9 #include <linux/io_uring_types.h>
10 #include <uapi/linux/eventpoll.h>
11 #include "alloc_cache.h"
14 #include "filetable.h"
17 #ifndef CREATE_TRACE_POINTS
18 #include <trace/events/io_uring.h>
23 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
26 * Requeue the task_work to restart operations on this request. The
27 * actual value isn't important, should just be not an otherwise
28 * valid error code, yet less than -MAX_ERRNO and valid internally.
33 * Intended only when both IO_URING_F_MULTISHOT is passed
34 * to indicate to the poll runner that multishot should be
35 * removed and the result is set on req->cqe.res.
37 IOU_STOP_MULTISHOT = -ECANCELED,
40 struct io_wait_queue {
41 struct wait_queue_entry wq;
42 struct io_ring_ctx *ctx;
51 #ifdef CONFIG_NET_RX_BUSY_POLL
52 ktime_t napi_busy_poll_dt;
53 bool napi_prefer_busy_poll;
57 static inline bool io_should_wake(struct io_wait_queue *iowq)
59 struct io_ring_ctx *ctx = iowq->ctx;
60 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
63 * Wake up if we have enough events, or if a timeout occurred since we
64 * started waiting. For timeouts, we always want to return to userspace,
65 * regardless of event count.
67 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
70 #define IORING_MAX_ENTRIES 32768
71 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
73 unsigned long rings_size(unsigned int flags, unsigned int sq_entries,
74 unsigned int cq_entries, size_t *sq_offset);
75 int io_uring_fill_params(unsigned entries, struct io_uring_params *p);
76 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
77 int io_run_task_work_sig(struct io_ring_ctx *ctx);
78 void io_req_defer_failed(struct io_kiocb *req, s32 res);
79 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
80 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
81 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
82 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
84 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
85 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
86 unsigned issue_flags);
88 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
89 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
91 bool io_alloc_async_data(struct io_kiocb *req);
92 void io_req_task_queue(struct io_kiocb *req);
93 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
94 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
95 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
96 struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
97 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
98 void tctx_task_work(struct callback_head *cb);
99 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
100 int io_uring_alloc_task_context(struct task_struct *task,
101 struct io_ring_ctx *ctx);
103 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
105 void io_req_queue_iowq(struct io_kiocb *req);
107 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
108 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
109 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
110 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
112 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
113 void io_wq_submit_work(struct io_wq_work *work);
115 void io_free_req(struct io_kiocb *req);
116 void io_queue_next(struct io_kiocb *req);
117 void io_task_refs_refill(struct io_uring_task *tctx);
118 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
120 bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
123 void io_activate_pollwq(struct io_ring_ctx *ctx);
125 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
127 #if defined(CONFIG_PROVE_LOCKING)
128 lockdep_assert(in_task());
130 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
131 lockdep_assert_held(&ctx->uring_lock);
133 if (ctx->flags & IORING_SETUP_IOPOLL) {
134 lockdep_assert_held(&ctx->uring_lock);
135 } else if (!ctx->task_complete) {
136 lockdep_assert_held(&ctx->completion_lock);
137 } else if (ctx->submitter_task) {
139 * ->submitter_task may be NULL and we can still post a CQE,
140 * if the ring has been setup with IORING_SETUP_R_DISABLED.
141 * Not from an SQE, as those cannot be submitted, but via
142 * updating tagged resources.
144 if (!percpu_ref_is_dying(&ctx->refs))
145 lockdep_assert(current == ctx->submitter_task);
150 static inline void io_req_task_work_add(struct io_kiocb *req)
152 __io_req_task_work_add(req, 0);
155 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
157 if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
158 ctx->submit_state.cq_flush)
159 __io_submit_flush_completions(ctx);
162 #define io_for_each_link(pos, head) \
163 for (pos = (head); pos; pos = pos->link)
165 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
166 struct io_uring_cqe **ret,
169 io_lockdep_assert_cq_locked(ctx);
171 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
172 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
175 *ret = ctx->cqe_cached;
176 ctx->cached_cq_tail++;
178 if (ctx->flags & IORING_SETUP_CQE32)
183 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
185 return io_get_cqe_overflow(ctx, ret, false);
188 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
189 struct io_kiocb *req)
191 struct io_uring_cqe *cqe;
194 * If we can't get a cq entry, userspace overflowed the
195 * submission (by quite a lot). Increment the overflow count in
198 if (unlikely(!io_get_cqe(ctx, &cqe)))
202 memcpy(cqe, &req->cqe, sizeof(*cqe));
203 if (ctx->flags & IORING_SETUP_CQE32) {
204 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
205 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
208 if (trace_io_uring_complete_enabled())
209 trace_io_uring_complete(req->ctx, req, cqe);
213 static inline void req_set_fail(struct io_kiocb *req)
215 req->flags |= REQ_F_FAIL;
216 if (req->flags & REQ_F_CQE_SKIP) {
217 req->flags &= ~REQ_F_CQE_SKIP;
218 req->flags |= REQ_F_SKIP_LINK_CQES;
222 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
225 req->cqe.flags = cflags;
228 static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
229 struct io_kiocb *req)
232 req->async_data = io_cache_alloc(cache, GFP_KERNEL);
234 const struct io_issue_def *def = &io_issue_defs[req->opcode];
236 WARN_ON_ONCE(!def->async_size);
237 req->async_data = kmalloc(def->async_size, GFP_KERNEL);
240 req->flags |= REQ_F_ASYNC_DATA;
241 return req->async_data;
244 static inline bool req_has_async_data(struct io_kiocb *req)
246 return req->flags & REQ_F_ASYNC_DATA;
249 static inline void io_put_file(struct io_kiocb *req)
251 if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
255 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
256 unsigned issue_flags)
258 lockdep_assert_held(&ctx->uring_lock);
259 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
260 mutex_unlock(&ctx->uring_lock);
263 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
264 unsigned issue_flags)
267 * "Normal" inline submissions always hold the uring_lock, since we
268 * grab it from the system call. Same is true for the SQPOLL offload.
269 * The only exception is when we've detached the request and issue it
270 * from an async worker thread, grab the lock for that case.
272 if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
273 mutex_lock(&ctx->uring_lock);
274 lockdep_assert_held(&ctx->uring_lock);
277 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
279 /* order cqe stores with ring update */
280 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
283 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
285 if (wq_has_sleeper(&ctx->poll_wq))
286 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
287 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
290 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
293 * Trigger waitqueue handler on all waiters on our waitqueue. This
294 * won't necessarily wake up all the tasks, io_should_wake() will make
297 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
298 * set in the mask so that if we recurse back into our own poll
299 * waitqueue handlers, we know we have a dependency between eventfd or
300 * epoll and should terminate multishot poll at that point.
302 if (wq_has_sleeper(&ctx->cq_wait))
303 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
304 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
307 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
309 struct io_rings *r = ctx->rings;
312 * SQPOLL must use the actual sqring head, as using the cached_sq_head
313 * is race prone if the SQPOLL thread has grabbed entries but not yet
314 * committed them to the ring. For !SQPOLL, this doesn't matter, but
315 * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
316 * just read the actual sqring head unconditionally.
318 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
321 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
323 struct io_rings *rings = ctx->rings;
324 unsigned int entries;
326 /* make sure SQ entry isn't read before tail */
327 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
328 return min(entries, ctx->sq_entries);
331 static inline int io_run_task_work(void)
336 * Always check-and-clear the task_work notification signal. With how
337 * signaling works for task_work, we can find it set with nothing to
338 * run. We need to clear it for that case, like get_signal() does.
340 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
341 clear_notify_signal();
343 * PF_IO_WORKER never returns to userspace, so check here if we have
344 * notify work that needs processing.
346 if (current->flags & PF_IO_WORKER) {
347 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
348 __set_current_state(TASK_RUNNING);
349 resume_user_mode_work(NULL);
351 if (current->io_uring) {
352 unsigned int count = 0;
354 __set_current_state(TASK_RUNNING);
355 tctx_task_work_run(current->io_uring, UINT_MAX, &count);
360 if (task_work_pending(current)) {
361 __set_current_state(TASK_RUNNING);
369 static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
371 return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
374 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
376 return task_work_pending(current) || io_local_work_pending(ctx);
379 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
381 lockdep_assert_held(&ctx->uring_lock);
385 * Don't complete immediately but use deferred completion infrastructure.
386 * Protected by ->uring_lock and can only be used either with
387 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
389 static inline void io_req_complete_defer(struct io_kiocb *req)
390 __must_hold(&req->ctx->uring_lock)
392 struct io_submit_state *state = &req->ctx->submit_state;
394 lockdep_assert_held(&req->ctx->uring_lock);
396 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
399 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
401 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
402 ctx->has_evfd || ctx->poll_activated))
403 __io_commit_cqring_flush(ctx);
406 static inline void io_get_task_refs(int nr)
408 struct io_uring_task *tctx = current->io_uring;
410 tctx->cached_refs -= nr;
411 if (unlikely(tctx->cached_refs < 0))
412 io_task_refs_refill(tctx);
415 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
417 return !ctx->submit_state.free_list.next;
420 extern struct kmem_cache *req_cachep;
421 extern struct kmem_cache *io_buf_cachep;
423 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
425 struct io_kiocb *req;
427 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
428 wq_stack_extract(&ctx->submit_state.free_list);
432 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
434 if (unlikely(io_req_cache_empty(ctx))) {
435 if (!__io_alloc_req_refill(ctx))
438 *req = io_extract_req(ctx);
442 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
444 return likely(ctx->submitter_task == current);
447 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
449 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
450 ctx->submitter_task == current);
454 * Terminate the request if either of these conditions are true:
456 * 1) It's being executed by the original task, but that task is marked
457 * with PF_EXITING as it's exiting.
458 * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
459 * our fallback task_work.
461 static inline bool io_should_terminate_tw(void)
463 return current->flags & (PF_KTHREAD | PF_EXITING);
466 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
468 io_req_set_res(req, res, 0);
469 req->io_task_work.func = io_req_task_complete;
470 io_req_task_work_add(req);
474 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
477 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
479 if (ctx->flags & IORING_SETUP_SQE128)
480 return 2 * sizeof(struct io_uring_sqe);
481 return sizeof(struct io_uring_sqe);
484 static inline bool io_file_can_poll(struct io_kiocb *req)
486 if (req->flags & REQ_F_CAN_POLL)
488 if (req->file && file_can_poll(req->file)) {
489 req->flags |= REQ_F_CAN_POLL;
495 static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
497 if (ctx->clockid == CLOCK_MONOTONIC)
500 return ktime_get_with_offset(ctx->clock_offset);
504 IO_CHECK_CQ_OVERFLOW_BIT,
505 IO_CHECK_CQ_DROPPED_BIT,
508 static inline bool io_has_work(struct io_ring_ctx *ctx)
510 return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
511 io_local_work_pending(ctx);