]>
Commit | Line | Data |
---|---|---|
329061d3 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/poll.h> | |
9 | #include <linux/hashtable.h> | |
10 | #include <linux/io_uring.h> | |
11 | ||
12 | #include <trace/events/io_uring.h> | |
13 | ||
14 | #include <uapi/linux/io_uring.h> | |
15 | ||
329061d3 JA |
16 | #include "io_uring.h" |
17 | #include "refs.h" | |
8d0c12a8 | 18 | #include "napi.h" |
329061d3 | 19 | #include "opdef.h" |
3b77495a | 20 | #include "kbuf.h" |
329061d3 | 21 | #include "poll.h" |
38513c46 | 22 | #include "cancel.h" |
329061d3 JA |
23 | |
24 | struct io_poll_update { | |
25 | struct file *file; | |
26 | u64 old_user_data; | |
27 | u64 new_user_data; | |
28 | __poll_t events; | |
29 | bool update_events; | |
30 | bool update_user_data; | |
31 | }; | |
32 | ||
33 | struct io_poll_table { | |
34 | struct poll_table_struct pt; | |
35 | struct io_kiocb *req; | |
36 | int nr_entries; | |
37 | int error; | |
49f1c68e | 38 | bool owning; |
063a0079 PB |
39 | /* output value, set only if arm poll returns >0 */ |
40 | __poll_t result_mask; | |
329061d3 JA |
41 | }; |
42 | ||
43 | #define IO_POLL_CANCEL_FLAG BIT(31) | |
a26a35e9 PB |
44 | #define IO_POLL_RETRY_FLAG BIT(30) |
45 | #define IO_POLL_REF_MASK GENMASK(29, 0) | |
46 | ||
47 | /* | |
48 | * We usually have 1-2 refs taken, 128 is more than enough and we want to | |
49 | * maximise the margin between this amount and the moment when it overflows. | |
50 | */ | |
51 | #define IO_POLL_REF_BIAS 128 | |
329061d3 | 52 | |
0638cd7b PB |
53 | #define IO_WQE_F_DOUBLE 1 |
54 | ||
1947ddf9 JA |
55 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
56 | void *key); | |
57 | ||
0638cd7b PB |
58 | static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) |
59 | { | |
60 | unsigned long priv = (unsigned long)wqe->private; | |
61 | ||
62 | return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); | |
63 | } | |
64 | ||
65 | static inline bool wqe_is_double(struct wait_queue_entry *wqe) | |
66 | { | |
67 | unsigned long priv = (unsigned long)wqe->private; | |
68 | ||
69 | return priv & IO_WQE_F_DOUBLE; | |
70 | } | |
71 | ||
a26a35e9 PB |
72 | static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) |
73 | { | |
74 | int v; | |
75 | ||
76 | /* | |
77 | * poll_refs are already elevated and we don't have much hope for | |
78 | * grabbing the ownership. Instead of incrementing set a retry flag | |
79 | * to notify the loop that there might have been some change. | |
80 | */ | |
81 | v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); | |
82 | if (v & IO_POLL_REF_MASK) | |
83 | return false; | |
84 | return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); | |
85 | } | |
86 | ||
329061d3 JA |
87 | /* |
88 | * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can | |
89 | * bump it and acquire ownership. It's disallowed to modify requests while not | |
90 | * owning it, that prevents from races for enqueueing task_work's and b/w | |
91 | * arming poll and wakeups. | |
92 | */ | |
93 | static inline bool io_poll_get_ownership(struct io_kiocb *req) | |
94 | { | |
a26a35e9 PB |
95 | if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) |
96 | return io_poll_get_ownership_slowpath(req); | |
329061d3 JA |
97 | return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); |
98 | } | |
99 | ||
100 | static void io_poll_mark_cancelled(struct io_kiocb *req) | |
101 | { | |
102 | atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); | |
103 | } | |
104 | ||
105 | static struct io_poll *io_poll_get_double(struct io_kiocb *req) | |
106 | { | |
107 | /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ | |
108 | if (req->opcode == IORING_OP_POLL_ADD) | |
109 | return req->async_data; | |
110 | return req->apoll->double_poll; | |
111 | } | |
112 | ||
113 | static struct io_poll *io_poll_get_single(struct io_kiocb *req) | |
114 | { | |
115 | if (req->opcode == IORING_OP_POLL_ADD) | |
f2ccb5ae | 116 | return io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
117 | return &req->apoll->poll; |
118 | } | |
119 | ||
120 | static void io_poll_req_insert(struct io_kiocb *req) | |
121 | { | |
e6f89be6 PB |
122 | struct io_hash_table *table = &req->ctx->cancel_table; |
123 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
124 | struct io_hash_bucket *hb = &table->hbs[index]; | |
329061d3 | 125 | |
38513c46 HX |
126 | spin_lock(&hb->lock); |
127 | hlist_add_head(&req->hash_node, &hb->list); | |
128 | spin_unlock(&hb->lock); | |
129 | } | |
130 | ||
131 | static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) | |
132 | { | |
e6f89be6 PB |
133 | struct io_hash_table *table = &req->ctx->cancel_table; |
134 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
135 | spinlock_t *lock = &table->hbs[index].lock; | |
38513c46 HX |
136 | |
137 | spin_lock(lock); | |
138 | hash_del(&req->hash_node); | |
139 | spin_unlock(lock); | |
329061d3 JA |
140 | } |
141 | ||
9ca9fb24 PB |
142 | static void io_poll_req_insert_locked(struct io_kiocb *req) |
143 | { | |
144 | struct io_hash_table *table = &req->ctx->cancel_table_locked; | |
145 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
146 | ||
5576035f PB |
147 | lockdep_assert_held(&req->ctx->uring_lock); |
148 | ||
9ca9fb24 PB |
149 | hlist_add_head(&req->hash_node, &table->hbs[index].list); |
150 | } | |
151 | ||
a282967c | 152 | static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts) |
9ca9fb24 PB |
153 | { |
154 | struct io_ring_ctx *ctx = req->ctx; | |
155 | ||
156 | if (req->flags & REQ_F_HASH_LOCKED) { | |
157 | /* | |
158 | * ->cancel_table_locked is protected by ->uring_lock in | |
159 | * contrast to per bucket spinlocks. Likely, tctx_task_work() | |
160 | * already grabbed the mutex for us, but there is a chance it | |
161 | * failed. | |
162 | */ | |
a282967c | 163 | io_tw_lock(ctx, ts); |
9ca9fb24 | 164 | hash_del(&req->hash_node); |
b21a51e2 | 165 | req->flags &= ~REQ_F_HASH_LOCKED; |
9ca9fb24 PB |
166 | } else { |
167 | io_poll_req_delete(req, ctx); | |
168 | } | |
169 | } | |
170 | ||
1947ddf9 | 171 | static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) |
329061d3 JA |
172 | { |
173 | poll->head = NULL; | |
174 | #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) | |
175 | /* mask in events that we always want/need */ | |
176 | poll->events = events | IO_POLL_UNMASK; | |
177 | INIT_LIST_HEAD(&poll->wait.entry); | |
1947ddf9 | 178 | init_waitqueue_func_entry(&poll->wait, io_poll_wake); |
329061d3 JA |
179 | } |
180 | ||
181 | static inline void io_poll_remove_entry(struct io_poll *poll) | |
182 | { | |
183 | struct wait_queue_head *head = smp_load_acquire(&poll->head); | |
184 | ||
185 | if (head) { | |
186 | spin_lock_irq(&head->lock); | |
187 | list_del_init(&poll->wait.entry); | |
188 | poll->head = NULL; | |
189 | spin_unlock_irq(&head->lock); | |
190 | } | |
191 | } | |
192 | ||
193 | static void io_poll_remove_entries(struct io_kiocb *req) | |
194 | { | |
195 | /* | |
196 | * Nothing to do if neither of those flags are set. Avoid dipping | |
197 | * into the poll/apoll/double cachelines if we can. | |
198 | */ | |
199 | if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) | |
200 | return; | |
201 | ||
202 | /* | |
203 | * While we hold the waitqueue lock and the waitqueue is nonempty, | |
204 | * wake_up_pollfree() will wait for us. However, taking the waitqueue | |
205 | * lock in the first place can race with the waitqueue being freed. | |
206 | * | |
207 | * We solve this as eventpoll does: by taking advantage of the fact that | |
208 | * all users of wake_up_pollfree() will RCU-delay the actual free. If | |
209 | * we enter rcu_read_lock() and see that the pointer to the queue is | |
210 | * non-NULL, we can then lock it without the memory being freed out from | |
211 | * under us. | |
212 | * | |
213 | * Keep holding rcu_read_lock() as long as we hold the queue lock, in | |
214 | * case the caller deletes the entry from the queue, leaving it empty. | |
215 | * In that case, only RCU prevents the queue memory from being freed. | |
216 | */ | |
217 | rcu_read_lock(); | |
218 | if (req->flags & REQ_F_SINGLE_POLL) | |
219 | io_poll_remove_entry(io_poll_get_single(req)); | |
220 | if (req->flags & REQ_F_DOUBLE_POLL) | |
221 | io_poll_remove_entry(io_poll_get_double(req)); | |
222 | rcu_read_unlock(); | |
223 | } | |
224 | ||
2ba69707 DY |
225 | enum { |
226 | IOU_POLL_DONE = 0, | |
227 | IOU_POLL_NO_ACTION = 1, | |
114eccdf | 228 | IOU_POLL_REMOVE_POLL_USE_RES = 2, |
6e5aedb9 | 229 | IOU_POLL_REISSUE = 3, |
704ea888 | 230 | IOU_POLL_REQUEUE = 4, |
2ba69707 DY |
231 | }; |
232 | ||
e84b01a8 JA |
233 | static void __io_poll_execute(struct io_kiocb *req, int mask) |
234 | { | |
235 | unsigned flags = 0; | |
236 | ||
237 | io_req_set_res(req, mask, 0); | |
238 | req->io_task_work.func = io_poll_task_func; | |
239 | ||
240 | trace_io_uring_task_add(req, mask); | |
241 | ||
242 | if (!(req->flags & REQ_F_POLL_NO_LAZY)) | |
243 | flags = IOU_F_TWQ_LAZY_WAKE; | |
244 | __io_req_task_work_add(req, flags); | |
245 | } | |
246 | ||
247 | static inline void io_poll_execute(struct io_kiocb *req, int res) | |
248 | { | |
249 | if (io_poll_get_ownership(req)) | |
250 | __io_poll_execute(req, res); | |
251 | } | |
252 | ||
329061d3 JA |
253 | /* |
254 | * All poll tw should go through this. Checks for poll events, manages | |
255 | * references, does rewait, etc. | |
256 | * | |
6e5aedb9 JA |
257 | * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action |
258 | * require, which is either spurious wakeup or multishot CQE is served. | |
259 | * IOU_POLL_DONE when it's done with the request, then the mask is stored in | |
260 | * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot | |
261 | * poll and that the result is stored in req->cqe. | |
329061d3 | 262 | */ |
a282967c | 263 | static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) |
329061d3 | 264 | { |
6e5aedb9 | 265 | int v; |
329061d3 JA |
266 | |
267 | /* req->task == current here, checking PF_EXITING is safe */ | |
268 | if (unlikely(req->task->flags & PF_EXITING)) | |
269 | return -ECANCELED; | |
270 | ||
271 | do { | |
272 | v = atomic_read(&req->poll_refs); | |
273 | ||
9805fa2d PB |
274 | if (unlikely(v != 1)) { |
275 | /* tw should be the owner and so have some refs */ | |
276 | if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) | |
c3bfb57e | 277 | return IOU_POLL_NO_ACTION; |
9805fa2d PB |
278 | if (v & IO_POLL_CANCEL_FLAG) |
279 | return -ECANCELED; | |
a26a35e9 | 280 | /* |
9805fa2d PB |
281 | * cqe.res contains only events of the first wake up |
282 | * and all others are to be lost. Redo vfs_poll() to get | |
283 | * up to date state. | |
a26a35e9 | 284 | */ |
9805fa2d PB |
285 | if ((v & IO_POLL_REF_MASK) != 1) |
286 | req->cqe.res = 0; | |
287 | ||
288 | if (v & IO_POLL_RETRY_FLAG) { | |
289 | req->cqe.res = 0; | |
290 | /* | |
291 | * We won't find new events that came in between | |
292 | * vfs_poll and the ref put unless we clear the | |
293 | * flag in advance. | |
294 | */ | |
295 | atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); | |
296 | v &= ~IO_POLL_RETRY_FLAG; | |
297 | } | |
a26a35e9 | 298 | } |
329061d3 | 299 | |
2ba69707 | 300 | /* the mask was stashed in __io_poll_execute */ |
329061d3 JA |
301 | if (!req->cqe.res) { |
302 | struct poll_table_struct pt = { ._key = req->apoll_events }; | |
303 | req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; | |
6e5aedb9 JA |
304 | /* |
305 | * We got woken with a mask, but someone else got to | |
306 | * it first. The above vfs_poll() doesn't add us back | |
307 | * to the waitqueue, so if we get nothing back, we | |
308 | * should be safe and attempt a reissue. | |
309 | */ | |
8caa03f1 JA |
310 | if (unlikely(!req->cqe.res)) { |
311 | /* Multishot armed need not reissue */ | |
312 | if (!(req->apoll_events & EPOLLONESHOT)) | |
313 | continue; | |
6e5aedb9 | 314 | return IOU_POLL_REISSUE; |
8caa03f1 | 315 | } |
329061d3 | 316 | } |
329061d3 | 317 | if (req->apoll_events & EPOLLONESHOT) |
2ba69707 | 318 | return IOU_POLL_DONE; |
329061d3 JA |
319 | |
320 | /* multishot, just fill a CQE and proceed */ | |
321 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { | |
322 | __poll_t mask = mangle_poll(req->cqe.res & | |
323 | req->apoll_events); | |
329061d3 | 324 | |
b6b2bb58 PB |
325 | if (!io_fill_cqe_req_aux(req, ts->locked, mask, |
326 | IORING_CQE_F_MORE)) { | |
a2da6763 DY |
327 | io_req_set_res(req, mask, 0); |
328 | return IOU_POLL_REMOVE_POLL_USE_RES; | |
329 | } | |
d245bca6 | 330 | } else { |
a282967c | 331 | int ret = io_poll_issue(req, ts); |
114eccdf DY |
332 | if (ret == IOU_STOP_MULTISHOT) |
333 | return IOU_POLL_REMOVE_POLL_USE_RES; | |
704ea888 JA |
334 | else if (ret == IOU_REQUEUE) |
335 | return IOU_POLL_REQUEUE; | |
2ba69707 | 336 | if (ret < 0) |
d245bca6 PB |
337 | return ret; |
338 | } | |
329061d3 | 339 | |
b98186ae PB |
340 | /* force the next iteration to vfs_poll() */ |
341 | req->cqe.res = 0; | |
342 | ||
329061d3 JA |
343 | /* |
344 | * Release all references, retry if someone tried to restart | |
345 | * task_work while we were executing it. | |
346 | */ | |
3cdc4be1 JA |
347 | v &= IO_POLL_REF_MASK; |
348 | } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); | |
329061d3 | 349 | |
2ba69707 | 350 | return IOU_POLL_NO_ACTION; |
329061d3 JA |
351 | } |
352 | ||
c92fcfc2 | 353 | void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts) |
329061d3 | 354 | { |
329061d3 JA |
355 | int ret; |
356 | ||
a282967c | 357 | ret = io_poll_check_events(req, ts); |
704ea888 | 358 | if (ret == IOU_POLL_NO_ACTION) { |
329061d3 | 359 | return; |
704ea888 JA |
360 | } else if (ret == IOU_POLL_REQUEUE) { |
361 | __io_poll_execute(req, 0); | |
362 | return; | |
363 | } | |
329061d3 | 364 | io_poll_remove_entries(req); |
a282967c | 365 | io_poll_tw_hash_eject(req, ts); |
9ca9fb24 | 366 | |
443e5755 PB |
367 | if (req->opcode == IORING_OP_POLL_ADD) { |
368 | if (ret == IOU_POLL_DONE) { | |
369 | struct io_poll *poll; | |
329061d3 | 370 | |
443e5755 PB |
371 | poll = io_kiocb_to_cmd(req, struct io_poll); |
372 | req->cqe.res = mangle_poll(req->cqe.res & poll->events); | |
6e5aedb9 | 373 | } else if (ret == IOU_POLL_REISSUE) { |
a282967c | 374 | io_req_task_submit(req, ts); |
6e5aedb9 | 375 | return; |
443e5755 PB |
376 | } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { |
377 | req->cqe.res = ret; | |
378 | req_set_fail(req); | |
379 | } | |
329061d3 | 380 | |
443e5755 | 381 | io_req_set_res(req, req->cqe.res, 0); |
a282967c | 382 | io_req_task_complete(req, ts); |
443e5755 | 383 | } else { |
a282967c | 384 | io_tw_lock(req->ctx, ts); |
443e5755 PB |
385 | |
386 | if (ret == IOU_POLL_REMOVE_POLL_USE_RES) | |
a282967c | 387 | io_req_task_complete(req, ts); |
6e5aedb9 | 388 | else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE) |
a282967c | 389 | io_req_task_submit(req, ts); |
443e5755 PB |
390 | else |
391 | io_req_defer_failed(req, ret); | |
392 | } | |
329061d3 JA |
393 | } |
394 | ||
329061d3 JA |
395 | static void io_poll_cancel_req(struct io_kiocb *req) |
396 | { | |
397 | io_poll_mark_cancelled(req); | |
398 | /* kick tw, which should complete the request */ | |
13a99017 | 399 | io_poll_execute(req, 0); |
329061d3 JA |
400 | } |
401 | ||
329061d3 JA |
402 | #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) |
403 | ||
fe991a76 JA |
404 | static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) |
405 | { | |
406 | io_poll_mark_cancelled(req); | |
407 | /* we have to kick tw in case it's not already */ | |
408 | io_poll_execute(req, 0); | |
409 | ||
410 | /* | |
411 | * If the waitqueue is being freed early but someone is already | |
412 | * holds ownership over it, we have to tear down the request as | |
413 | * best we can. That means immediately removing the request from | |
414 | * its waitqueue and preventing all further accesses to the | |
415 | * waitqueue via the request. | |
416 | */ | |
417 | list_del_init(&poll->wait.entry); | |
418 | ||
419 | /* | |
420 | * Careful: this *must* be the last step, since as soon | |
421 | * as req->head is NULL'ed out, the request can be | |
422 | * completed and freed, since aio_poll_complete_work() | |
423 | * will no longer need to take the waitqueue lock. | |
424 | */ | |
425 | smp_store_release(&poll->head, NULL); | |
426 | return 1; | |
427 | } | |
428 | ||
329061d3 JA |
429 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
430 | void *key) | |
431 | { | |
432 | struct io_kiocb *req = wqe_to_req(wait); | |
433 | struct io_poll *poll = container_of(wait, struct io_poll, wait); | |
434 | __poll_t mask = key_to_poll(key); | |
435 | ||
fe991a76 JA |
436 | if (unlikely(mask & POLLFREE)) |
437 | return io_pollfree_wake(req, poll); | |
329061d3 JA |
438 | |
439 | /* for instances that support it check for an event match first */ | |
440 | if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) | |
441 | return 0; | |
442 | ||
443 | if (io_poll_get_ownership(req)) { | |
44648532 JA |
444 | /* |
445 | * If we trigger a multishot poll off our own wakeup path, | |
446 | * disable multishot as there is a circular dependency between | |
447 | * CQ posting and triggering the event. | |
448 | */ | |
449 | if (mask & EPOLL_URING_WAKE) | |
450 | poll->events |= EPOLLONESHOT; | |
451 | ||
329061d3 JA |
452 | /* optional, saves extra locking for removal in tw handler */ |
453 | if (mask && poll->events & EPOLLONESHOT) { | |
454 | list_del_init(&poll->wait.entry); | |
455 | poll->head = NULL; | |
456 | if (wqe_is_double(wait)) | |
457 | req->flags &= ~REQ_F_DOUBLE_POLL; | |
458 | else | |
459 | req->flags &= ~REQ_F_SINGLE_POLL; | |
460 | } | |
13a99017 | 461 | __io_poll_execute(req, mask); |
329061d3 JA |
462 | } |
463 | return 1; | |
464 | } | |
465 | ||
30a33669 PB |
466 | /* fails only when polling is already completing by the first entry */ |
467 | static bool io_poll_double_prepare(struct io_kiocb *req) | |
49f1c68e PB |
468 | { |
469 | struct wait_queue_head *head; | |
470 | struct io_poll *poll = io_poll_get_single(req); | |
471 | ||
472 | /* head is RCU protected, see io_poll_remove_entries() comments */ | |
473 | rcu_read_lock(); | |
474 | head = smp_load_acquire(&poll->head); | |
7a121ced | 475 | /* |
30a33669 PB |
476 | * poll arm might not hold ownership and so race for req->flags with |
477 | * io_poll_wake(). There is only one poll entry queued, serialise with | |
478 | * it by taking its head lock. As we're still arming the tw hanlder | |
479 | * is not going to be run, so there are no races with it. | |
7a121ced | 480 | */ |
30a33669 | 481 | if (head) { |
49f1c68e | 482 | spin_lock_irq(&head->lock); |
30a33669 PB |
483 | req->flags |= REQ_F_DOUBLE_POLL; |
484 | if (req->opcode == IORING_OP_POLL_ADD) | |
485 | req->flags |= REQ_F_ASYNC_DATA; | |
49f1c68e | 486 | spin_unlock_irq(&head->lock); |
30a33669 | 487 | } |
49f1c68e | 488 | rcu_read_unlock(); |
30a33669 | 489 | return !!head; |
49f1c68e PB |
490 | } |
491 | ||
329061d3 JA |
492 | static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, |
493 | struct wait_queue_head *head, | |
494 | struct io_poll **poll_ptr) | |
495 | { | |
496 | struct io_kiocb *req = pt->req; | |
497 | unsigned long wqe_private = (unsigned long) req; | |
498 | ||
499 | /* | |
500 | * The file being polled uses multiple waitqueues for poll handling | |
501 | * (e.g. one for read, one for write). Setup a separate io_poll | |
502 | * if this happens. | |
503 | */ | |
504 | if (unlikely(pt->nr_entries)) { | |
505 | struct io_poll *first = poll; | |
506 | ||
507 | /* double add on the same waitqueue head, ignore */ | |
508 | if (first->head == head) | |
509 | return; | |
510 | /* already have a 2nd entry, fail a third attempt */ | |
511 | if (*poll_ptr) { | |
512 | if ((*poll_ptr)->head == head) | |
513 | return; | |
514 | pt->error = -EINVAL; | |
515 | return; | |
516 | } | |
517 | ||
518 | poll = kmalloc(sizeof(*poll), GFP_ATOMIC); | |
519 | if (!poll) { | |
520 | pt->error = -ENOMEM; | |
521 | return; | |
522 | } | |
49f1c68e | 523 | |
329061d3 | 524 | /* mark as double wq entry */ |
0638cd7b | 525 | wqe_private |= IO_WQE_F_DOUBLE; |
1947ddf9 | 526 | io_init_poll_iocb(poll, first->events); |
30a33669 PB |
527 | if (!io_poll_double_prepare(req)) { |
528 | /* the request is completing, just back off */ | |
529 | kfree(poll); | |
530 | return; | |
531 | } | |
329061d3 | 532 | *poll_ptr = poll; |
49f1c68e PB |
533 | } else { |
534 | /* fine to modify, there is no poll queued to race with us */ | |
535 | req->flags |= REQ_F_SINGLE_POLL; | |
329061d3 JA |
536 | } |
537 | ||
329061d3 JA |
538 | pt->nr_entries++; |
539 | poll->head = head; | |
540 | poll->wait.private = (void *) wqe_private; | |
541 | ||
595e5228 JA |
542 | if (poll->events & EPOLLEXCLUSIVE) { |
543 | /* | |
544 | * Exclusive waits may only wake a limited amount of entries | |
545 | * rather than all of them, this may interfere with lazy | |
546 | * wake if someone does wait(events > 1). Ensure we don't do | |
547 | * lazy wake for those, as we need to process each one as they | |
548 | * come in. | |
549 | */ | |
550 | req->flags |= REQ_F_POLL_NO_LAZY; | |
329061d3 | 551 | add_wait_queue_exclusive(head, &poll->wait); |
595e5228 | 552 | } else { |
329061d3 | 553 | add_wait_queue(head, &poll->wait); |
595e5228 | 554 | } |
329061d3 JA |
555 | } |
556 | ||
557 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |
558 | struct poll_table_struct *p) | |
559 | { | |
560 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
f2ccb5ae | 561 | struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); |
329061d3 JA |
562 | |
563 | __io_queue_proc(poll, pt, head, | |
564 | (struct io_poll **) &pt->req->async_data); | |
565 | } | |
566 | ||
49f1c68e PB |
567 | static bool io_poll_can_finish_inline(struct io_kiocb *req, |
568 | struct io_poll_table *pt) | |
569 | { | |
570 | return pt->owning || io_poll_get_ownership(req); | |
571 | } | |
572 | ||
febb985c JA |
573 | static void io_poll_add_hash(struct io_kiocb *req) |
574 | { | |
575 | if (req->flags & REQ_F_HASH_LOCKED) | |
576 | io_poll_req_insert_locked(req); | |
577 | else | |
578 | io_poll_req_insert(req); | |
579 | } | |
580 | ||
de08356f PB |
581 | /* |
582 | * Returns 0 when it's handed over for polling. The caller owns the requests if | |
583 | * it returns non-zero, but otherwise should not touch it. Negative values | |
584 | * contain an error code. When the result is >0, the polling has completed | |
585 | * inline and ipt.result_mask is set to the mask. | |
586 | */ | |
329061d3 JA |
587 | static int __io_arm_poll_handler(struct io_kiocb *req, |
588 | struct io_poll *poll, | |
49f1c68e PB |
589 | struct io_poll_table *ipt, __poll_t mask, |
590 | unsigned issue_flags) | |
329061d3 | 591 | { |
329061d3 | 592 | INIT_HLIST_NODE(&req->hash_node); |
1947ddf9 | 593 | io_init_poll_iocb(poll, mask); |
329061d3 | 594 | poll->file = req->file; |
329061d3 JA |
595 | req->apoll_events = poll->events; |
596 | ||
597 | ipt->pt._key = mask; | |
598 | ipt->req = req; | |
599 | ipt->error = 0; | |
600 | ipt->nr_entries = 0; | |
329061d3 | 601 | /* |
49f1c68e PB |
602 | * Polling is either completed here or via task_work, so if we're in the |
603 | * task context we're naturally serialised with tw by merit of running | |
604 | * the same task. When it's io-wq, take the ownership to prevent tw | |
605 | * from running. However, when we're in the task context, skip taking | |
606 | * it as an optimisation. | |
607 | * | |
608 | * Note: even though the request won't be completed/freed, without | |
609 | * ownership we still can race with io_poll_wake(). | |
610 | * io_poll_can_finish_inline() tries to deal with that. | |
329061d3 | 611 | */ |
49f1c68e | 612 | ipt->owning = issue_flags & IO_URING_F_UNLOCKED; |
49f1c68e | 613 | atomic_set(&req->poll_refs, (int)ipt->owning); |
e8375e43 PB |
614 | |
615 | /* io-wq doesn't hold uring_lock */ | |
616 | if (issue_flags & IO_URING_F_UNLOCKED) | |
617 | req->flags &= ~REQ_F_HASH_LOCKED; | |
618 | ||
329061d3 JA |
619 | mask = vfs_poll(req->file, &ipt->pt) & poll->events; |
620 | ||
de08356f PB |
621 | if (unlikely(ipt->error || !ipt->nr_entries)) { |
622 | io_poll_remove_entries(req); | |
623 | ||
49f1c68e PB |
624 | if (!io_poll_can_finish_inline(req, ipt)) { |
625 | io_poll_mark_cancelled(req); | |
626 | return 0; | |
627 | } else if (mask && (poll->events & EPOLLET)) { | |
de08356f PB |
628 | ipt->result_mask = mask; |
629 | return 1; | |
de08356f | 630 | } |
49f1c68e | 631 | return ipt->error ?: -EINVAL; |
de08356f PB |
632 | } |
633 | ||
b9ba8a44 JA |
634 | if (mask && |
635 | ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { | |
febb985c JA |
636 | if (!io_poll_can_finish_inline(req, ipt)) { |
637 | io_poll_add_hash(req); | |
49f1c68e | 638 | return 0; |
febb985c | 639 | } |
329061d3 | 640 | io_poll_remove_entries(req); |
063a0079 | 641 | ipt->result_mask = mask; |
329061d3 | 642 | /* no one else has access to the req, forget about the ref */ |
063a0079 | 643 | return 1; |
329061d3 | 644 | } |
b9ba8a44 | 645 | |
febb985c | 646 | io_poll_add_hash(req); |
329061d3 | 647 | |
49f1c68e PB |
648 | if (mask && (poll->events & EPOLLET) && |
649 | io_poll_can_finish_inline(req, ipt)) { | |
13a99017 | 650 | __io_poll_execute(req, mask); |
329061d3 JA |
651 | return 0; |
652 | } | |
8d0c12a8 | 653 | io_napi_add(req); |
329061d3 | 654 | |
49f1c68e PB |
655 | if (ipt->owning) { |
656 | /* | |
2f389343 PB |
657 | * Try to release ownership. If we see a change of state, e.g. |
658 | * poll was waken up, queue up a tw, it'll deal with it. | |
49f1c68e | 659 | */ |
2f389343 | 660 | if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) |
49f1c68e PB |
661 | __io_poll_execute(req, 0); |
662 | } | |
329061d3 JA |
663 | return 0; |
664 | } | |
665 | ||
666 | static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, | |
667 | struct poll_table_struct *p) | |
668 | { | |
669 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
670 | struct async_poll *apoll = pt->req->apoll; | |
671 | ||
672 | __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); | |
673 | } | |
674 | ||
c16bda37 JA |
675 | /* |
676 | * We can't reliably detect loops in repeated poll triggers and issue | |
677 | * subsequently failing. But rather than fail these immediately, allow a | |
678 | * certain amount of retries before we give up. Given that this condition | |
679 | * should _rarely_ trigger even once, we should be fine with a larger value. | |
680 | */ | |
681 | #define APOLL_MAX_RETRY 128 | |
682 | ||
5204aa8c PB |
683 | static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, |
684 | unsigned issue_flags) | |
685 | { | |
686 | struct io_ring_ctx *ctx = req->ctx; | |
9b797a37 | 687 | struct io_cache_entry *entry; |
5204aa8c PB |
688 | struct async_poll *apoll; |
689 | ||
690 | if (req->flags & REQ_F_POLLED) { | |
691 | apoll = req->apoll; | |
692 | kfree(apoll->double_poll); | |
df730ec2 XL |
693 | } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { |
694 | entry = io_alloc_cache_get(&ctx->apoll_cache); | |
695 | if (entry == NULL) | |
696 | goto alloc_apoll; | |
9b797a37 | 697 | apoll = container_of(entry, struct async_poll, cache); |
c16bda37 | 698 | apoll->poll.retries = APOLL_MAX_RETRY; |
5204aa8c | 699 | } else { |
df730ec2 | 700 | alloc_apoll: |
5204aa8c PB |
701 | apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); |
702 | if (unlikely(!apoll)) | |
703 | return NULL; | |
c16bda37 | 704 | apoll->poll.retries = APOLL_MAX_RETRY; |
5204aa8c PB |
705 | } |
706 | apoll->double_poll = NULL; | |
707 | req->apoll = apoll; | |
c16bda37 JA |
708 | if (unlikely(!--apoll->poll.retries)) |
709 | return NULL; | |
5204aa8c PB |
710 | return apoll; |
711 | } | |
712 | ||
329061d3 JA |
713 | int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) |
714 | { | |
a7dd2782 | 715 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
329061d3 JA |
716 | struct async_poll *apoll; |
717 | struct io_poll_table ipt; | |
b9ba8a44 | 718 | __poll_t mask = POLLPRI | POLLERR | EPOLLET; |
329061d3 JA |
719 | int ret; |
720 | ||
9ca9fb24 PB |
721 | /* |
722 | * apoll requests already grab the mutex to complete in the tw handler, | |
723 | * so removal from the mutex-backed hash is free, use it by default. | |
724 | */ | |
e8375e43 | 725 | req->flags |= REQ_F_HASH_LOCKED; |
9ca9fb24 | 726 | |
329061d3 JA |
727 | if (!def->pollin && !def->pollout) |
728 | return IO_APOLL_ABORTED; | |
95041b93 | 729 | if (!io_file_can_poll(req)) |
329061d3 | 730 | return IO_APOLL_ABORTED; |
329061d3 JA |
731 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) |
732 | mask |= EPOLLONESHOT; | |
733 | ||
734 | if (def->pollin) { | |
735 | mask |= EPOLLIN | EPOLLRDNORM; | |
736 | ||
737 | /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ | |
738 | if (req->flags & REQ_F_CLEAR_POLLIN) | |
739 | mask &= ~EPOLLIN; | |
740 | } else { | |
741 | mask |= EPOLLOUT | EPOLLWRNORM; | |
742 | } | |
743 | if (def->poll_exclusive) | |
744 | mask |= EPOLLEXCLUSIVE; | |
5204aa8c PB |
745 | |
746 | apoll = io_req_alloc_apoll(req, issue_flags); | |
747 | if (!apoll) | |
748 | return IO_APOLL_ABORTED; | |
005308f7 | 749 | req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); |
329061d3 JA |
750 | req->flags |= REQ_F_POLLED; |
751 | ipt.pt._qproc = io_async_queue_proc; | |
752 | ||
753 | io_kbuf_recycle(req, issue_flags); | |
754 | ||
49f1c68e | 755 | ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); |
de08356f PB |
756 | if (ret) |
757 | return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; | |
48863ffd | 758 | trace_io_uring_poll_arm(req, mask, apoll->poll.events); |
329061d3 JA |
759 | return IO_APOLL_OK; |
760 | } | |
761 | ||
9ca9fb24 PB |
762 | static __cold bool io_poll_remove_all_table(struct task_struct *tsk, |
763 | struct io_hash_table *table, | |
764 | bool cancel_all) | |
329061d3 | 765 | { |
e6f89be6 | 766 | unsigned nr_buckets = 1U << table->hash_bits; |
329061d3 JA |
767 | struct hlist_node *tmp; |
768 | struct io_kiocb *req; | |
769 | bool found = false; | |
770 | int i; | |
771 | ||
e6f89be6 PB |
772 | for (i = 0; i < nr_buckets; i++) { |
773 | struct io_hash_bucket *hb = &table->hbs[i]; | |
329061d3 | 774 | |
38513c46 HX |
775 | spin_lock(&hb->lock); |
776 | hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { | |
329061d3 JA |
777 | if (io_match_task_safe(req, tsk, cancel_all)) { |
778 | hlist_del_init(&req->hash_node); | |
779 | io_poll_cancel_req(req); | |
780 | found = true; | |
781 | } | |
782 | } | |
38513c46 | 783 | spin_unlock(&hb->lock); |
329061d3 | 784 | } |
329061d3 JA |
785 | return found; |
786 | } | |
787 | ||
9ca9fb24 PB |
788 | /* |
789 | * Returns true if we found and killed one or more poll requests | |
790 | */ | |
791 | __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, | |
792 | bool cancel_all) | |
793 | __must_hold(&ctx->uring_lock) | |
794 | { | |
b321823a PB |
795 | bool ret; |
796 | ||
797 | ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); | |
798 | ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); | |
799 | return ret; | |
9ca9fb24 PB |
800 | } |
801 | ||
329061d3 | 802 | static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, |
1ab1edb0 | 803 | struct io_cancel_data *cd, |
e6f89be6 | 804 | struct io_hash_table *table, |
1ab1edb0 | 805 | struct io_hash_bucket **out_bucket) |
329061d3 | 806 | { |
329061d3 | 807 | struct io_kiocb *req; |
e6f89be6 PB |
808 | u32 index = hash_long(cd->data, table->hash_bits); |
809 | struct io_hash_bucket *hb = &table->hbs[index]; | |
329061d3 | 810 | |
1ab1edb0 PB |
811 | *out_bucket = NULL; |
812 | ||
38513c46 HX |
813 | spin_lock(&hb->lock); |
814 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
329061d3 JA |
815 | if (cd->data != req->cqe.user_data) |
816 | continue; | |
817 | if (poll_only && req->opcode != IORING_OP_POLL_ADD) | |
818 | continue; | |
819 | if (cd->flags & IORING_ASYNC_CANCEL_ALL) { | |
521223d7 | 820 | if (io_cancel_match_sequence(req, cd->seq)) |
329061d3 | 821 | continue; |
329061d3 | 822 | } |
1ab1edb0 | 823 | *out_bucket = hb; |
329061d3 JA |
824 | return req; |
825 | } | |
38513c46 | 826 | spin_unlock(&hb->lock); |
329061d3 JA |
827 | return NULL; |
828 | } | |
829 | ||
830 | static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, | |
1ab1edb0 | 831 | struct io_cancel_data *cd, |
e6f89be6 | 832 | struct io_hash_table *table, |
1ab1edb0 | 833 | struct io_hash_bucket **out_bucket) |
329061d3 | 834 | { |
e6f89be6 | 835 | unsigned nr_buckets = 1U << table->hash_bits; |
329061d3 JA |
836 | struct io_kiocb *req; |
837 | int i; | |
838 | ||
1ab1edb0 PB |
839 | *out_bucket = NULL; |
840 | ||
e6f89be6 PB |
841 | for (i = 0; i < nr_buckets; i++) { |
842 | struct io_hash_bucket *hb = &table->hbs[i]; | |
329061d3 | 843 | |
38513c46 HX |
844 | spin_lock(&hb->lock); |
845 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
a30badf6 JA |
846 | if (io_cancel_req_match(req, cd)) { |
847 | *out_bucket = hb; | |
848 | return req; | |
849 | } | |
329061d3 | 850 | } |
38513c46 | 851 | spin_unlock(&hb->lock); |
329061d3 JA |
852 | } |
853 | return NULL; | |
854 | } | |
855 | ||
9ca9fb24 | 856 | static int io_poll_disarm(struct io_kiocb *req) |
329061d3 | 857 | { |
9ca9fb24 PB |
858 | if (!req) |
859 | return -ENOENT; | |
329061d3 | 860 | if (!io_poll_get_ownership(req)) |
9ca9fb24 | 861 | return -EALREADY; |
329061d3 JA |
862 | io_poll_remove_entries(req); |
863 | hash_del(&req->hash_node); | |
9ca9fb24 | 864 | return 0; |
329061d3 JA |
865 | } |
866 | ||
a2cdd519 | 867 | static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
e6f89be6 | 868 | struct io_hash_table *table) |
329061d3 | 869 | { |
1ab1edb0 | 870 | struct io_hash_bucket *bucket; |
329061d3 JA |
871 | struct io_kiocb *req; |
872 | ||
d7b8b079 JA |
873 | if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP | |
874 | IORING_ASYNC_CANCEL_ANY)) | |
e6f89be6 | 875 | req = io_poll_file_find(ctx, cd, table, &bucket); |
329061d3 | 876 | else |
e6f89be6 | 877 | req = io_poll_find(ctx, false, cd, table, &bucket); |
1ab1edb0 PB |
878 | |
879 | if (req) | |
880 | io_poll_cancel_req(req); | |
881 | if (bucket) | |
882 | spin_unlock(&bucket->lock); | |
883 | return req ? 0 : -ENOENT; | |
329061d3 JA |
884 | } |
885 | ||
5d7943d9 PB |
886 | int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
887 | unsigned issue_flags) | |
a2cdd519 | 888 | { |
9ca9fb24 PB |
889 | int ret; |
890 | ||
891 | ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); | |
892 | if (ret != -ENOENT) | |
893 | return ret; | |
894 | ||
895 | io_ring_submit_lock(ctx, issue_flags); | |
896 | ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); | |
897 | io_ring_submit_unlock(ctx, issue_flags); | |
898 | return ret; | |
a2cdd519 PB |
899 | } |
900 | ||
329061d3 JA |
901 | static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, |
902 | unsigned int flags) | |
903 | { | |
904 | u32 events; | |
905 | ||
906 | events = READ_ONCE(sqe->poll32_events); | |
907 | #ifdef __BIG_ENDIAN | |
908 | events = swahw32(events); | |
909 | #endif | |
910 | if (!(flags & IORING_POLL_ADD_MULTI)) | |
911 | events |= EPOLLONESHOT; | |
b9ba8a44 JA |
912 | if (!(flags & IORING_POLL_ADD_LEVEL)) |
913 | events |= EPOLLET; | |
914 | return demangle_poll(events) | | |
915 | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); | |
329061d3 JA |
916 | } |
917 | ||
918 | int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
919 | { | |
f2ccb5ae | 920 | struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); |
329061d3 JA |
921 | u32 flags; |
922 | ||
923 | if (sqe->buf_index || sqe->splice_fd_in) | |
924 | return -EINVAL; | |
925 | flags = READ_ONCE(sqe->len); | |
926 | if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | | |
927 | IORING_POLL_ADD_MULTI)) | |
928 | return -EINVAL; | |
929 | /* meaningless without update */ | |
930 | if (flags == IORING_POLL_ADD_MULTI) | |
931 | return -EINVAL; | |
932 | ||
933 | upd->old_user_data = READ_ONCE(sqe->addr); | |
934 | upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; | |
935 | upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; | |
936 | ||
937 | upd->new_user_data = READ_ONCE(sqe->off); | |
938 | if (!upd->update_user_data && upd->new_user_data) | |
939 | return -EINVAL; | |
940 | if (upd->update_events) | |
941 | upd->events = io_poll_parse_events(sqe, flags); | |
942 | else if (sqe->poll32_events) | |
943 | return -EINVAL; | |
944 | ||
945 | return 0; | |
946 | } | |
947 | ||
948 | int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
949 | { | |
f2ccb5ae | 950 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
951 | u32 flags; |
952 | ||
953 | if (sqe->buf_index || sqe->off || sqe->addr) | |
954 | return -EINVAL; | |
955 | flags = READ_ONCE(sqe->len); | |
d59bd748 | 956 | if (flags & ~IORING_POLL_ADD_MULTI) |
329061d3 JA |
957 | return -EINVAL; |
958 | if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) | |
959 | return -EINVAL; | |
960 | ||
329061d3 JA |
961 | poll->events = io_poll_parse_events(sqe, flags); |
962 | return 0; | |
963 | } | |
964 | ||
965 | int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) | |
966 | { | |
f2ccb5ae | 967 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
968 | struct io_poll_table ipt; |
969 | int ret; | |
970 | ||
971 | ipt.pt._qproc = io_poll_queue_proc; | |
972 | ||
9ca9fb24 PB |
973 | /* |
974 | * If sqpoll or single issuer, there is no contention for ->uring_lock | |
975 | * and we'll end up holding it in tw handlers anyway. | |
976 | */ | |
e8375e43 | 977 | if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) |
9ca9fb24 | 978 | req->flags |= REQ_F_HASH_LOCKED; |
9ca9fb24 | 979 | |
49f1c68e | 980 | ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); |
de08356f | 981 | if (ret > 0) { |
063a0079 | 982 | io_req_set_res(req, ipt.result_mask, 0); |
329061d3 JA |
983 | return IOU_OK; |
984 | } | |
de08356f | 985 | return ret ?: IOU_ISSUE_SKIP_COMPLETE; |
329061d3 JA |
986 | } |
987 | ||
988 | int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) | |
989 | { | |
f2ccb5ae | 990 | struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); |
329061d3 | 991 | struct io_ring_ctx *ctx = req->ctx; |
ad711c5d | 992 | struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, }; |
1ab1edb0 | 993 | struct io_hash_bucket *bucket; |
329061d3 JA |
994 | struct io_kiocb *preq; |
995 | int ret2, ret = 0; | |
ef7dfac5 | 996 | struct io_tw_state ts = { .locked = true }; |
329061d3 | 997 | |
ef7dfac5 | 998 | io_ring_submit_lock(ctx, issue_flags); |
e6f89be6 | 999 | preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); |
9ca9fb24 | 1000 | ret2 = io_poll_disarm(preq); |
1ab1edb0 PB |
1001 | if (bucket) |
1002 | spin_unlock(&bucket->lock); | |
9ca9fb24 PB |
1003 | if (!ret2) |
1004 | goto found; | |
1005 | if (ret2 != -ENOENT) { | |
1006 | ret = ret2; | |
38513c46 HX |
1007 | goto out; |
1008 | } | |
9ca9fb24 | 1009 | |
9ca9fb24 PB |
1010 | preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); |
1011 | ret2 = io_poll_disarm(preq); | |
1012 | if (bucket) | |
1013 | spin_unlock(&bucket->lock); | |
9ca9fb24 PB |
1014 | if (ret2) { |
1015 | ret = ret2; | |
329061d3 JA |
1016 | goto out; |
1017 | } | |
329061d3 | 1018 | |
9ca9fb24 | 1019 | found: |
bce5d70c PB |
1020 | if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { |
1021 | ret = -EFAULT; | |
1022 | goto out; | |
1023 | } | |
1024 | ||
329061d3 JA |
1025 | if (poll_update->update_events || poll_update->update_user_data) { |
1026 | /* only mask one event flags, keep behavior flags */ | |
1027 | if (poll_update->update_events) { | |
f2ccb5ae | 1028 | struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); |
329061d3 JA |
1029 | |
1030 | poll->events &= ~0xffff; | |
1031 | poll->events |= poll_update->events & 0xffff; | |
1032 | poll->events |= IO_POLL_UNMASK; | |
1033 | } | |
1034 | if (poll_update->update_user_data) | |
1035 | preq->cqe.user_data = poll_update->new_user_data; | |
1036 | ||
ef7dfac5 | 1037 | ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED); |
329061d3 JA |
1038 | /* successfully updated, don't complete poll request */ |
1039 | if (!ret2 || ret2 == -EIOCBQUEUED) | |
1040 | goto out; | |
1041 | } | |
1042 | ||
1043 | req_set_fail(preq); | |
1044 | io_req_set_res(preq, -ECANCELED, 0); | |
a282967c | 1045 | io_req_task_complete(preq, &ts); |
329061d3 | 1046 | out: |
ef7dfac5 | 1047 | io_ring_submit_unlock(ctx, issue_flags); |
329061d3 JA |
1048 | if (ret < 0) { |
1049 | req_set_fail(req); | |
1050 | return ret; | |
1051 | } | |
1052 | /* complete update request, we're done with it */ | |
1053 | io_req_set_res(req, ret, 0); | |
1054 | return IOU_OK; | |
1055 | } | |
9da7471e | 1056 | |
9b797a37 | 1057 | void io_apoll_cache_free(struct io_cache_entry *entry) |
9da7471e | 1058 | { |
9b797a37 | 1059 | kfree(container_of(entry, struct async_poll, cache)); |
9da7471e | 1060 | } |