]>
Commit | Line | Data |
---|---|---|
de23077e JA |
1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H | |
3 | ||
4 | #include <linux/errno.h> | |
cd40cae2 | 5 | #include <linux/lockdep.h> |
ab1c84d8 PB |
6 | #include <linux/io_uring_types.h> |
7 | #include "io-wq.h" | |
8 | #include "filetable.h" | |
de23077e | 9 | |
f3b44f92 JA |
10 | #ifndef CREATE_TRACE_POINTS |
11 | #include <trace/events/io_uring.h> | |
12 | #endif | |
13 | ||
97b388d7 JA |
14 | enum { |
15 | IOU_OK = 0, | |
16 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, | |
17 | }; | |
18 | ||
faf88dde | 19 | struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx); |
68494a65 | 20 | bool io_req_cqe_overflow(struct io_kiocb *req); |
9046c641 PB |
21 | int io_run_task_work_sig(void); |
22 | void io_req_complete_failed(struct io_kiocb *req, s32 res); | |
23 | void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); | |
24 | void io_req_complete_post(struct io_kiocb *req); | |
25 | void __io_req_complete_post(struct io_kiocb *req); | |
26 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); | |
9046c641 PB |
27 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
28 | ||
29 | struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); | |
30 | ||
31 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); | |
32 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, | |
33 | unsigned issue_flags); | |
34 | ||
35 | bool io_is_uring_fops(struct file *file); | |
36 | bool io_alloc_async_data(struct io_kiocb *req); | |
37 | void io_req_task_work_add(struct io_kiocb *req); | |
38 | void io_req_task_prio_work_add(struct io_kiocb *req); | |
39 | void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); | |
40 | void io_req_task_queue(struct io_kiocb *req); | |
41 | void io_queue_iowq(struct io_kiocb *req, bool *dont_use); | |
42 | void io_req_task_complete(struct io_kiocb *req, bool *locked); | |
43 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); | |
44 | void io_req_task_submit(struct io_kiocb *req, bool *locked); | |
45 | void tctx_task_work(struct callback_head *cb); | |
46 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); | |
47 | int io_uring_alloc_task_context(struct task_struct *task, | |
48 | struct io_ring_ctx *ctx); | |
49 | ||
50 | int io_poll_issue(struct io_kiocb *req, bool *locked); | |
51 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); | |
52 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); | |
53 | void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); | |
54 | int io_req_prep_async(struct io_kiocb *req); | |
55 | ||
56 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); | |
57 | void io_wq_submit_work(struct io_wq_work *work); | |
58 | ||
59 | void io_free_req(struct io_kiocb *req); | |
60 | void io_queue_next(struct io_kiocb *req); | |
61 | ||
62 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, | |
63 | bool cancel_all); | |
64 | ||
65 | #define io_for_each_link(pos, head) \ | |
66 | for (pos = (head); pos; pos = pos->link) | |
f3b44f92 | 67 | |
25399321 PB |
68 | static inline void io_cq_lock(struct io_ring_ctx *ctx) |
69 | __acquires(ctx->completion_lock) | |
70 | { | |
71 | spin_lock(&ctx->completion_lock); | |
72 | } | |
73 | ||
74 | void io_cq_unlock_post(struct io_ring_ctx *ctx); | |
75 | ||
f3b44f92 JA |
76 | static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) |
77 | { | |
78 | if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { | |
79 | struct io_uring_cqe *cqe = ctx->cqe_cached; | |
80 | ||
f3b44f92 JA |
81 | ctx->cached_cq_tail++; |
82 | ctx->cqe_cached++; | |
b3659a65 PB |
83 | if (ctx->flags & IORING_SETUP_CQE32) |
84 | ctx->cqe_cached++; | |
f3b44f92 JA |
85 | return cqe; |
86 | } | |
87 | ||
88 | return __io_get_cqe(ctx); | |
89 | } | |
90 | ||
91 | static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, | |
92 | struct io_kiocb *req) | |
93 | { | |
94 | struct io_uring_cqe *cqe; | |
95 | ||
ae5735c6 PB |
96 | trace_io_uring_complete(req->ctx, req, req->cqe.user_data, |
97 | req->cqe.res, req->cqe.flags, | |
98 | (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, | |
99 | (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); | |
e8c328c3 PB |
100 | /* |
101 | * If we can't get a cq entry, userspace overflowed the | |
102 | * submission (by quite a lot). Increment the overflow count in | |
103 | * the ring. | |
104 | */ | |
105 | cqe = io_get_cqe(ctx); | |
106 | if (unlikely(!cqe)) | |
107 | return io_req_cqe_overflow(req); | |
108 | memcpy(cqe, &req->cqe, sizeof(*cqe)); | |
109 | ||
110 | if (ctx->flags & IORING_SETUP_CQE32) { | |
f3b44f92 JA |
111 | u64 extra1 = 0, extra2 = 0; |
112 | ||
113 | if (req->flags & REQ_F_CQE32_INIT) { | |
114 | extra1 = req->extra1; | |
115 | extra2 = req->extra2; | |
116 | } | |
117 | ||
e8c328c3 PB |
118 | WRITE_ONCE(cqe->big_cqe[0], extra1); |
119 | WRITE_ONCE(cqe->big_cqe[1], extra2); | |
f3b44f92 | 120 | } |
e8c328c3 | 121 | return true; |
f3b44f92 JA |
122 | } |
123 | ||
531113bb JA |
124 | static inline void req_set_fail(struct io_kiocb *req) |
125 | { | |
126 | req->flags |= REQ_F_FAIL; | |
127 | if (req->flags & REQ_F_CQE_SKIP) { | |
128 | req->flags &= ~REQ_F_CQE_SKIP; | |
129 | req->flags |= REQ_F_SKIP_LINK_CQES; | |
130 | } | |
131 | } | |
132 | ||
de23077e JA |
133 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
134 | { | |
135 | req->cqe.res = res; | |
136 | req->cqe.flags = cflags; | |
137 | } | |
138 | ||
99f15d8d JA |
139 | static inline bool req_has_async_data(struct io_kiocb *req) |
140 | { | |
141 | return req->flags & REQ_F_ASYNC_DATA; | |
142 | } | |
143 | ||
531113bb JA |
144 | static inline void io_put_file(struct file *file) |
145 | { | |
146 | if (file) | |
147 | fput(file); | |
148 | } | |
149 | ||
cd40cae2 JA |
150 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
151 | unsigned issue_flags) | |
152 | { | |
153 | lockdep_assert_held(&ctx->uring_lock); | |
154 | if (issue_flags & IO_URING_F_UNLOCKED) | |
155 | mutex_unlock(&ctx->uring_lock); | |
156 | } | |
157 | ||
158 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, | |
159 | unsigned issue_flags) | |
160 | { | |
161 | /* | |
162 | * "Normal" inline submissions always hold the uring_lock, since we | |
163 | * grab it from the system call. Same is true for the SQPOLL offload. | |
164 | * The only exception is when we've detached the request and issue it | |
165 | * from an async worker thread, grab the lock for that case. | |
166 | */ | |
167 | if (issue_flags & IO_URING_F_UNLOCKED) | |
168 | mutex_lock(&ctx->uring_lock); | |
169 | lockdep_assert_held(&ctx->uring_lock); | |
170 | } | |
171 | ||
f9ead18c JA |
172 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
173 | { | |
174 | /* order cqe stores with ring update */ | |
175 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); | |
176 | } | |
177 | ||
f3b44f92 JA |
178 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
179 | { | |
180 | /* | |
181 | * wake_up_all() may seem excessive, but io_wake_function() and | |
182 | * io_should_wake() handle the termination of the loop and only | |
183 | * wake as many waiters as we need to. | |
184 | */ | |
185 | if (wq_has_sleeper(&ctx->cq_wait)) | |
186 | wake_up_all(&ctx->cq_wait); | |
187 | } | |
188 | ||
17437f31 JA |
189 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
190 | { | |
191 | struct io_rings *r = ctx->rings; | |
192 | ||
193 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; | |
194 | } | |
195 | ||
196 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) | |
197 | { | |
198 | struct io_rings *rings = ctx->rings; | |
199 | ||
200 | /* make sure SQ entry isn't read before tail */ | |
201 | return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; | |
202 | } | |
203 | ||
204 | static inline bool io_run_task_work(void) | |
205 | { | |
206 | if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) { | |
207 | __set_current_state(TASK_RUNNING); | |
208 | clear_notify_signal(); | |
209 | if (task_work_pending(current)) | |
210 | task_work_run(); | |
211 | return true; | |
212 | } | |
213 | ||
214 | return false; | |
215 | } | |
216 | ||
aa1e90f6 PB |
217 | static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) |
218 | { | |
219 | if (!*locked) { | |
220 | mutex_lock(&ctx->uring_lock); | |
221 | *locked = true; | |
222 | } | |
223 | } | |
224 | ||
225 | static inline void io_req_add_compl_list(struct io_kiocb *req) | |
226 | { | |
227 | struct io_submit_state *state = &req->ctx->submit_state; | |
228 | ||
aa1e90f6 PB |
229 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
230 | } | |
231 | ||
46929b08 PB |
232 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
233 | { | |
234 | if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) | |
235 | __io_commit_cqring_flush(ctx); | |
236 | } | |
237 | ||
de23077e | 238 | #endif |