1 // SPDX-License-Identifier: GPL-2.0
3 * Code related to the io_uring_register() syscall
5 * Copyright (C) 2023 Jens Axboe
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/syscalls.h>
10 #include <linux/refcount.h>
11 #include <linux/bits.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/nospec.h>
17 #include <linux/compat.h>
18 #include <linux/io_uring.h>
19 #include <linux/io_uring_types.h>
34 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
35 IORING_REGISTER_LAST + IORING_OP_LAST)
37 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
40 struct io_uring_probe *p;
44 if (nr_args > IORING_OP_LAST)
45 nr_args = IORING_OP_LAST;
47 size = struct_size(p, ops, nr_args);
48 p = kzalloc(size, GFP_KERNEL);
53 if (copy_from_user(p, arg, size))
56 if (memchr_inv(p, 0, size))
59 p->last_op = IORING_OP_LAST - 1;
61 for (i = 0; i < nr_args; i++) {
63 if (io_uring_op_supported(i))
64 p->ops[i].flags = IO_URING_OP_SUPPORTED;
69 if (copy_to_user(arg, p, size))
76 int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
78 const struct cred *creds;
80 creds = xa_erase(&ctx->personalities, id);
90 static int io_register_personality(struct io_ring_ctx *ctx)
92 const struct cred *creds;
96 creds = get_current_cred();
98 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
99 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
107 static __cold int io_parse_restrictions(void __user *arg, unsigned int nr_args,
108 struct io_restriction *restrictions)
110 struct io_uring_restriction *res;
114 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
117 size = array_size(nr_args, sizeof(*res));
118 if (size == SIZE_MAX)
121 res = memdup_user(arg, size);
127 for (i = 0; i < nr_args; i++) {
128 switch (res[i].opcode) {
129 case IORING_RESTRICTION_REGISTER_OP:
130 if (res[i].register_op >= IORING_REGISTER_LAST)
132 __set_bit(res[i].register_op, restrictions->register_op);
134 case IORING_RESTRICTION_SQE_OP:
135 if (res[i].sqe_op >= IORING_OP_LAST)
137 __set_bit(res[i].sqe_op, restrictions->sqe_op);
139 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
140 restrictions->sqe_flags_allowed = res[i].sqe_flags;
142 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
143 restrictions->sqe_flags_required = res[i].sqe_flags;
157 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
158 void __user *arg, unsigned int nr_args)
162 /* Restrictions allowed only if rings started disabled */
163 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
166 /* We allow only a single restrictions registration */
167 if (ctx->restrictions.registered)
170 ret = io_parse_restrictions(arg, nr_args, &ctx->restrictions);
171 /* Reset all restrictions if an error happened */
173 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
175 ctx->restrictions.registered = true;
179 static int io_register_enable_rings(struct io_ring_ctx *ctx)
181 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
184 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
185 WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
187 * Lazy activation attempts would fail if it was polled before
188 * submitter_task is set.
190 if (wq_has_sleeper(&ctx->poll_wq))
191 io_activate_pollwq(ctx);
194 if (ctx->restrictions.registered)
197 ctx->flags &= ~IORING_SETUP_R_DISABLED;
198 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
199 wake_up(&ctx->sq_data->wait);
203 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
204 cpumask_var_t new_mask)
208 if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
209 ret = io_wq_cpu_affinity(current->io_uring, new_mask);
211 mutex_unlock(&ctx->uring_lock);
212 ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
213 mutex_lock(&ctx->uring_lock);
219 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
220 void __user *arg, unsigned len)
222 cpumask_var_t new_mask;
225 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
228 cpumask_clear(new_mask);
229 if (len > cpumask_size())
230 len = cpumask_size();
233 if (in_compat_syscall())
234 ret = compat_get_bitmap(cpumask_bits(new_mask),
235 (const compat_ulong_t __user *)arg,
236 len * 8 /* CHAR_BIT */);
239 ret = copy_from_user(new_mask, arg, len);
242 free_cpumask_var(new_mask);
246 ret = __io_register_iowq_aff(ctx, new_mask);
247 free_cpumask_var(new_mask);
251 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
253 return __io_register_iowq_aff(ctx, NULL);
256 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
258 __must_hold(&ctx->uring_lock)
260 struct io_tctx_node *node;
261 struct io_uring_task *tctx = NULL;
262 struct io_sq_data *sqd = NULL;
266 if (copy_from_user(new_count, arg, sizeof(new_count)))
268 for (i = 0; i < ARRAY_SIZE(new_count); i++)
269 if (new_count[i] > INT_MAX)
272 if (ctx->flags & IORING_SETUP_SQPOLL) {
276 * Observe the correct sqd->lock -> ctx->uring_lock
277 * ordering. Fine to drop uring_lock here, we hold
280 refcount_inc(&sqd->refs);
281 mutex_unlock(&ctx->uring_lock);
282 mutex_lock(&sqd->lock);
283 mutex_lock(&ctx->uring_lock);
285 tctx = sqd->thread->io_uring;
288 tctx = current->io_uring;
291 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
293 for (i = 0; i < ARRAY_SIZE(new_count); i++)
295 ctx->iowq_limits[i] = new_count[i];
296 ctx->iowq_limits_set = true;
298 if (tctx && tctx->io_wq) {
299 ret = io_wq_max_workers(tctx->io_wq, new_count);
303 memset(new_count, 0, sizeof(new_count));
307 mutex_unlock(&ctx->uring_lock);
308 mutex_unlock(&sqd->lock);
310 mutex_lock(&ctx->uring_lock);
313 if (copy_to_user(arg, new_count, sizeof(new_count)))
316 /* that's it for SQPOLL, only the SQPOLL task creates requests */
320 /* now propagate the restriction to all registered users */
321 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
322 tctx = node->task->io_uring;
323 if (WARN_ON_ONCE(!tctx->io_wq))
326 for (i = 0; i < ARRAY_SIZE(new_count); i++)
327 new_count[i] = ctx->iowq_limits[i];
328 /* ignore errors, it always returns zero anyway */
329 (void)io_wq_max_workers(tctx->io_wq, new_count);
334 mutex_unlock(&ctx->uring_lock);
335 mutex_unlock(&sqd->lock);
337 mutex_lock(&ctx->uring_lock);
342 static int io_register_clock(struct io_ring_ctx *ctx,
343 struct io_uring_clock_register __user *arg)
345 struct io_uring_clock_register reg;
347 if (copy_from_user(®, arg, sizeof(reg)))
349 if (memchr_inv(®.__resv, 0, sizeof(reg.__resv)))
352 switch (reg.clockid) {
353 case CLOCK_MONOTONIC:
354 ctx->clock_offset = 0;
357 ctx->clock_offset = TK_OFFS_BOOT;
363 ctx->clockid = reg.clockid;
368 * State to maintain until we can swap. Both new and old state, used for
369 * either mapping or freeing.
371 struct io_ring_ctx_rings {
372 struct io_rings *rings;
373 struct io_uring_sqe *sq_sqes;
375 struct io_mapped_region sq_region;
376 struct io_mapped_region ring_region;
379 static void io_register_free_rings(struct io_ring_ctx *ctx,
380 struct io_uring_params *p,
381 struct io_ring_ctx_rings *r)
383 io_free_region(ctx, &r->sq_region);
384 io_free_region(ctx, &r->ring_region);
387 #define swap_old(ctx, o, n, field) \
389 (o).field = (ctx)->field; \
390 (ctx)->field = (n).field; \
393 #define RESIZE_FLAGS (IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP)
394 #define COPY_FLAGS (IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQE128 | \
395 IORING_SETUP_CQE32 | IORING_SETUP_NO_MMAP)
397 static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
399 struct io_uring_region_desc rd;
400 struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
401 size_t size, sq_array_offset;
402 unsigned i, tail, old_head;
403 struct io_uring_params p;
406 /* for single issuer, must be owner resizing */
407 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER &&
408 current != ctx->submitter_task)
410 /* limited to DEFER_TASKRUN for now */
411 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
413 if (copy_from_user(&p, arg, sizeof(p)))
415 if (p.flags & ~RESIZE_FLAGS)
418 /* properties that are always inherited */
419 p.flags |= (ctx->flags & COPY_FLAGS);
421 ret = io_uring_fill_params(p.sq_entries, &p);
425 /* nothing to do, but copy params back */
426 if (p.sq_entries == ctx->sq_entries && p.cq_entries == ctx->cq_entries) {
427 if (copy_to_user(arg, &p, sizeof(p)))
432 size = rings_size(p.flags, p.sq_entries, p.cq_entries,
434 if (size == SIZE_MAX)
437 memset(&rd, 0, sizeof(rd));
438 rd.size = PAGE_ALIGN(size);
439 if (p.flags & IORING_SETUP_NO_MMAP) {
440 rd.user_addr = p.cq_off.user_addr;
441 rd.flags |= IORING_MEM_REGION_TYPE_USER;
443 ret = io_create_region_mmap_safe(ctx, &n.ring_region, &rd, IORING_OFF_CQ_RING);
445 io_register_free_rings(ctx, &p, &n);
448 n.rings = io_region_get_ptr(&n.ring_region);
451 * At this point n.rings is shared with userspace, just like o.rings
452 * is as well. While we don't expect userspace to modify it while
453 * a resize is in progress, and it's most likely that userspace will
454 * shoot itself in the foot if it does, we can't always assume good
455 * intent... Use read/write once helpers from here on to indicate the
456 * shared nature of it.
458 WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1);
459 WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1);
460 WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries);
461 WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries);
463 if (copy_to_user(arg, &p, sizeof(p))) {
464 io_register_free_rings(ctx, &p, &n);
468 if (p.flags & IORING_SETUP_SQE128)
469 size = array_size(2 * sizeof(struct io_uring_sqe), p.sq_entries);
471 size = array_size(sizeof(struct io_uring_sqe), p.sq_entries);
472 if (size == SIZE_MAX) {
473 io_register_free_rings(ctx, &p, &n);
477 memset(&rd, 0, sizeof(rd));
478 rd.size = PAGE_ALIGN(size);
479 if (p.flags & IORING_SETUP_NO_MMAP) {
480 rd.user_addr = p.sq_off.user_addr;
481 rd.flags |= IORING_MEM_REGION_TYPE_USER;
483 ret = io_create_region_mmap_safe(ctx, &n.sq_region, &rd, IORING_OFF_SQES);
485 io_register_free_rings(ctx, &p, &n);
488 n.sq_sqes = io_region_get_ptr(&n.sq_region);
491 * If using SQPOLL, park the thread
494 mutex_unlock(&ctx->uring_lock);
495 io_sq_thread_park(ctx->sq_data);
496 mutex_lock(&ctx->uring_lock);
500 * We'll do the swap. Grab the ctx->mmap_lock, which will exclude
501 * any new mmap's on the ring fd. Clear out existing mappings to prevent
502 * mmap from seeing them, as we'll unmap them. Any attempt to mmap
503 * existing rings beyond this point will fail. Not that it could proceed
504 * at this point anyway, as the io_uring mmap side needs go grab the
505 * ctx->mmap_lock as well. Likewise, hold the completion lock over the
506 * duration of the actual swap.
508 mutex_lock(&ctx->mmap_lock);
509 spin_lock(&ctx->completion_lock);
510 o.rings = ctx->rings;
512 o.sq_sqes = ctx->sq_sqes;
516 * Now copy SQ and CQ entries, if any. If either of the destination
517 * rings can't hold what is already there, then fail the operation.
519 tail = READ_ONCE(o.rings->sq.tail);
520 old_head = READ_ONCE(o.rings->sq.head);
521 if (tail - old_head > p.sq_entries)
523 for (i = old_head; i < tail; i++) {
524 unsigned src_head = i & (ctx->sq_entries - 1);
525 unsigned dst_head = i & (p.sq_entries - 1);
527 n.sq_sqes[dst_head] = o.sq_sqes[src_head];
529 WRITE_ONCE(n.rings->sq.head, old_head);
530 WRITE_ONCE(n.rings->sq.tail, tail);
532 tail = READ_ONCE(o.rings->cq.tail);
533 old_head = READ_ONCE(o.rings->cq.head);
534 if (tail - old_head > p.cq_entries) {
536 /* restore old rings, and return -EOVERFLOW via cleanup path */
537 ctx->rings = o.rings;
538 ctx->sq_sqes = o.sq_sqes;
543 for (i = old_head; i < tail; i++) {
544 unsigned src_head = i & (ctx->cq_entries - 1);
545 unsigned dst_head = i & (p.cq_entries - 1);
547 n.rings->cqes[dst_head] = o.rings->cqes[src_head];
549 WRITE_ONCE(n.rings->cq.head, old_head);
550 WRITE_ONCE(n.rings->cq.tail, tail);
551 /* invalidate cached cqe refill */
552 ctx->cqe_cached = ctx->cqe_sentinel = NULL;
554 WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
555 atomic_set(&n.rings->sq_flags, atomic_read(&o.rings->sq_flags));
556 WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
557 WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
559 /* all done, store old pointers and assign new ones */
560 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
561 ctx->sq_array = (u32 *)((char *)n.rings + sq_array_offset);
563 ctx->sq_entries = p.sq_entries;
564 ctx->cq_entries = p.cq_entries;
566 ctx->rings = n.rings;
567 ctx->sq_sqes = n.sq_sqes;
568 swap_old(ctx, o, n, ring_region);
569 swap_old(ctx, o, n, sq_region);
573 spin_unlock(&ctx->completion_lock);
574 mutex_unlock(&ctx->mmap_lock);
575 io_register_free_rings(ctx, &p, to_free);
578 io_sq_thread_unpark(ctx->sq_data);
583 static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
585 struct io_uring_mem_region_reg __user *reg_uptr = uarg;
586 struct io_uring_mem_region_reg reg;
587 struct io_uring_region_desc __user *rd_uptr;
588 struct io_uring_region_desc rd;
591 if (io_region_is_set(&ctx->param_region))
593 if (copy_from_user(®, reg_uptr, sizeof(reg)))
595 rd_uptr = u64_to_user_ptr(reg.region_uptr);
596 if (copy_from_user(&rd, rd_uptr, sizeof(rd)))
598 if (memchr_inv(®.__resv, 0, sizeof(reg.__resv)))
600 if (reg.flags & ~IORING_MEM_REGION_REG_WAIT_ARG)
604 * This ensures there are no waiters. Waiters are unlocked and it's
605 * hard to synchronise with them, especially if we need to initialise
608 if ((reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) &&
609 !(ctx->flags & IORING_SETUP_R_DISABLED))
612 ret = io_create_region_mmap_safe(ctx, &ctx->param_region, &rd,
613 IORING_MAP_OFF_PARAM_REGION);
616 if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
617 io_free_region(ctx, &ctx->param_region);
621 if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
622 ctx->cq_wait_arg = io_region_get_ptr(&ctx->param_region);
623 ctx->cq_wait_size = rd.size;
628 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
629 void __user *arg, unsigned nr_args)
630 __releases(ctx->uring_lock)
631 __acquires(ctx->uring_lock)
636 * We don't quiesce the refs for register anymore and so it can't be
637 * dying as we're holding a file ref here.
639 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
642 if (ctx->submitter_task && ctx->submitter_task != current)
645 if (ctx->restricted) {
646 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
647 if (!test_bit(opcode, ctx->restrictions.register_op))
652 case IORING_REGISTER_BUFFERS:
656 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
658 case IORING_UNREGISTER_BUFFERS:
662 ret = io_sqe_buffers_unregister(ctx);
664 case IORING_REGISTER_FILES:
668 ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
670 case IORING_UNREGISTER_FILES:
674 ret = io_sqe_files_unregister(ctx);
676 case IORING_REGISTER_FILES_UPDATE:
677 ret = io_register_files_update(ctx, arg, nr_args);
679 case IORING_REGISTER_EVENTFD:
683 ret = io_eventfd_register(ctx, arg, 0);
685 case IORING_REGISTER_EVENTFD_ASYNC:
689 ret = io_eventfd_register(ctx, arg, 1);
691 case IORING_UNREGISTER_EVENTFD:
695 ret = io_eventfd_unregister(ctx);
697 case IORING_REGISTER_PROBE:
699 if (!arg || nr_args > 256)
701 ret = io_probe(ctx, arg, nr_args);
703 case IORING_REGISTER_PERSONALITY:
707 ret = io_register_personality(ctx);
709 case IORING_UNREGISTER_PERSONALITY:
713 ret = io_unregister_personality(ctx, nr_args);
715 case IORING_REGISTER_ENABLE_RINGS:
719 ret = io_register_enable_rings(ctx);
721 case IORING_REGISTER_RESTRICTIONS:
722 ret = io_register_restrictions(ctx, arg, nr_args);
724 case IORING_REGISTER_FILES2:
725 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
727 case IORING_REGISTER_FILES_UPDATE2:
728 ret = io_register_rsrc_update(ctx, arg, nr_args,
731 case IORING_REGISTER_BUFFERS2:
732 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
734 case IORING_REGISTER_BUFFERS_UPDATE:
735 ret = io_register_rsrc_update(ctx, arg, nr_args,
738 case IORING_REGISTER_IOWQ_AFF:
740 if (!arg || !nr_args)
742 ret = io_register_iowq_aff(ctx, arg, nr_args);
744 case IORING_UNREGISTER_IOWQ_AFF:
748 ret = io_unregister_iowq_aff(ctx);
750 case IORING_REGISTER_IOWQ_MAX_WORKERS:
752 if (!arg || nr_args != 2)
754 ret = io_register_iowq_max_workers(ctx, arg);
756 case IORING_REGISTER_RING_FDS:
757 ret = io_ringfd_register(ctx, arg, nr_args);
759 case IORING_UNREGISTER_RING_FDS:
760 ret = io_ringfd_unregister(ctx, arg, nr_args);
762 case IORING_REGISTER_PBUF_RING:
764 if (!arg || nr_args != 1)
766 ret = io_register_pbuf_ring(ctx, arg);
768 case IORING_UNREGISTER_PBUF_RING:
770 if (!arg || nr_args != 1)
772 ret = io_unregister_pbuf_ring(ctx, arg);
774 case IORING_REGISTER_SYNC_CANCEL:
776 if (!arg || nr_args != 1)
778 ret = io_sync_cancel(ctx, arg);
780 case IORING_REGISTER_FILE_ALLOC_RANGE:
784 ret = io_register_file_alloc_range(ctx, arg);
786 case IORING_REGISTER_PBUF_STATUS:
788 if (!arg || nr_args != 1)
790 ret = io_register_pbuf_status(ctx, arg);
792 case IORING_REGISTER_NAPI:
794 if (!arg || nr_args != 1)
796 ret = io_register_napi(ctx, arg);
798 case IORING_UNREGISTER_NAPI:
802 ret = io_unregister_napi(ctx, arg);
804 case IORING_REGISTER_CLOCK:
808 ret = io_register_clock(ctx, arg);
810 case IORING_REGISTER_CLONE_BUFFERS:
812 if (!arg || nr_args != 1)
814 ret = io_register_clone_buffers(ctx, arg);
816 case IORING_REGISTER_RESIZE_RINGS:
818 if (!arg || nr_args != 1)
820 ret = io_register_resize_rings(ctx, arg);
822 case IORING_REGISTER_MEM_REGION:
824 if (!arg || nr_args != 1)
826 ret = io_register_mem_region(ctx, arg);
837 * Given an 'fd' value, return the ctx associated with if. If 'registered' is
838 * true, then the registered index is used. Otherwise, the normal fd table.
839 * Caller must call fput() on the returned file, unless it's an ERR_PTR.
841 struct file *io_uring_register_get_file(unsigned int fd, bool registered)
847 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
848 * need only dereference our task private array to find it.
850 struct io_uring_task *tctx = current->io_uring;
852 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
853 return ERR_PTR(-EINVAL);
854 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
855 file = tctx->registered_rings[fd];
863 return ERR_PTR(-EBADF);
864 if (io_is_uring_fops(file))
867 return ERR_PTR(-EOPNOTSUPP);
871 * "blind" registration opcodes are ones where there's no ring given, and
872 * hence the source fd must be -1.
874 static int io_uring_register_blind(unsigned int opcode, void __user *arg,
875 unsigned int nr_args)
878 case IORING_REGISTER_SEND_MSG_RING: {
879 struct io_uring_sqe sqe;
881 if (!arg || nr_args != 1)
883 if (copy_from_user(&sqe, arg, sizeof(sqe)))
885 /* no flags supported */
888 if (sqe.opcode == IORING_OP_MSG_RING)
889 return io_uring_sync_msg_ring(&sqe);
896 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
897 void __user *, arg, unsigned int, nr_args)
899 struct io_ring_ctx *ctx;
902 bool use_registered_ring;
904 use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
905 opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
907 if (opcode >= IORING_REGISTER_LAST)
911 return io_uring_register_blind(opcode, arg, nr_args);
913 file = io_uring_register_get_file(fd, use_registered_ring);
915 return PTR_ERR(file);
916 ctx = file->private_data;
918 mutex_lock(&ctx->uring_lock);
919 ret = __io_uring_register(ctx, opcode, arg, nr_args);
921 trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
922 ctx->buf_table.nr, ret);
923 mutex_unlock(&ctx->uring_lock);