1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
30 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
35 * Returns true if the request matches the criteria outlined by 'cd'.
37 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
39 bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
41 if (req->ctx != cd->ctx)
44 if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
45 match_user_data = true;
47 if (cd->flags & IORING_ASYNC_CANCEL_ANY)
49 if (cd->flags & IORING_ASYNC_CANCEL_FD) {
50 if (req->file != cd->file)
53 if (cd->flags & IORING_ASYNC_CANCEL_OP) {
54 if (req->opcode != cd->opcode)
57 if (match_user_data && req->cqe.user_data != cd->data)
59 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
61 if (io_cancel_match_sequence(req, cd->seq))
68 static bool io_cancel_cb(struct io_wq_work *work, void *data)
70 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
71 struct io_cancel_data *cd = data;
73 return io_cancel_req_match(req, cd);
76 static int io_async_cancel_one(struct io_uring_task *tctx,
77 struct io_cancel_data *cd)
79 enum io_wq_cancel cancel_ret;
83 if (!tctx || !tctx->io_wq)
86 all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
87 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
92 case IO_WQ_CANCEL_RUNNING:
95 case IO_WQ_CANCEL_NOTFOUND:
103 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104 unsigned issue_flags)
106 struct io_ring_ctx *ctx = cd->ctx;
109 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
111 ret = io_async_cancel_one(tctx, cd);
113 * Fall-through even for -EALREADY, as we may have poll armed
114 * that need unarming.
119 ret = io_poll_cancel(ctx, cd, issue_flags);
123 ret = io_waitid_cancel(ctx, cd, issue_flags);
127 ret = io_futex_cancel(ctx, cd, issue_flags);
131 spin_lock(&ctx->completion_lock);
132 if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133 ret = io_timeout_cancel(ctx, cd);
134 spin_unlock(&ctx->completion_lock);
138 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
140 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
142 if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
144 if (sqe->off || sqe->splice_fd_in)
147 cancel->addr = READ_ONCE(sqe->addr);
148 cancel->flags = READ_ONCE(sqe->cancel_flags);
149 if (cancel->flags & ~CANCEL_FLAGS)
151 if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
154 cancel->fd = READ_ONCE(sqe->fd);
156 if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
159 cancel->opcode = READ_ONCE(sqe->len);
165 static int __io_async_cancel(struct io_cancel_data *cd,
166 struct io_uring_task *tctx,
167 unsigned int issue_flags)
169 bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170 struct io_ring_ctx *ctx = cd->ctx;
171 struct io_tctx_node *node;
175 ret = io_try_cancel(tctx, cd, issue_flags);
183 /* slow path, try all io-wq's */
184 io_ring_submit_lock(ctx, issue_flags);
186 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187 ret = io_async_cancel_one(node->task->io_uring, cd);
188 if (ret != -ENOENT) {
194 io_ring_submit_unlock(ctx, issue_flags);
195 return all ? nr : ret;
198 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
200 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
201 struct io_cancel_data cd = {
203 .data = cancel->addr,
204 .flags = cancel->flags,
205 .opcode = cancel->opcode,
206 .seq = atomic_inc_return(&req->ctx->cancel_seq),
208 struct io_uring_task *tctx = req->tctx;
211 if (cd.flags & IORING_ASYNC_CANCEL_FD) {
212 if (req->flags & REQ_F_FIXED_FILE ||
213 cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
214 req->flags |= REQ_F_FIXED_FILE;
215 req->file = io_file_get_fixed(req, cancel->fd,
218 req->file = io_file_get_normal(req, cancel->fd);
227 ret = __io_async_cancel(&cd, tctx, issue_flags);
231 io_req_set_res(req, ret, 0);
235 static int __io_sync_cancel(struct io_uring_task *tctx,
236 struct io_cancel_data *cd, int fd)
238 struct io_ring_ctx *ctx = cd->ctx;
240 /* fixed must be grabbed every time since we drop the uring_lock */
241 if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
242 (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
243 struct io_rsrc_node *node;
245 node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
248 cd->file = io_slot_file(node);
253 return __io_async_cancel(cd, tctx, 0);
256 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
257 __must_hold(&ctx->uring_lock)
259 struct io_cancel_data cd = {
261 .seq = atomic_inc_return(&ctx->cancel_seq),
263 ktime_t timeout = KTIME_MAX;
264 struct io_uring_sync_cancel_reg sc;
265 struct file *file = NULL;
269 if (copy_from_user(&sc, arg, sizeof(sc)))
271 if (sc.flags & ~CANCEL_FLAGS)
273 for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
276 for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
282 cd.opcode = sc.opcode;
284 /* we can grab a normal file descriptor upfront */
285 if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
286 !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
293 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
295 /* found something, done! */
296 if (ret != -EALREADY)
299 if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
300 struct timespec64 ts = {
301 .tv_sec = sc.timeout.tv_sec,
302 .tv_nsec = sc.timeout.tv_nsec
305 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
309 * Keep looking until we get -ENOENT. we'll get woken everytime
310 * every time a request completes and will retry the cancelation.
313 cd.seq = atomic_inc_return(&ctx->cancel_seq);
315 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
317 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
319 mutex_unlock(&ctx->uring_lock);
320 if (ret != -EALREADY)
323 ret = io_run_task_work_sig(ctx);
326 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
331 mutex_lock(&ctx->uring_lock);
334 finish_wait(&ctx->cq_wait, &wait);
335 mutex_lock(&ctx->uring_lock);
337 if (ret == -ENOENT || ret > 0)