1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
48 struct sockaddr __user *addr;
51 bool seen_econnaborted;
67 struct compat_msghdr __user *umsg_compat;
68 struct user_msghdr __user *umsg;
74 unsigned nr_multishot_loops;
76 /* initialised and used only by !msg send variants */
80 void __user *msg_control;
81 /* used only for send zerocopy */
82 struct io_kiocb *notif;
86 * Number of times we'll try and do receives if there's more data. If we
87 * exceed this limit, then add us to the back of the queue and retry from
88 * there. This helps fairness between flooding clients.
90 #define MULTISHOT_MAX_RETRY 32
92 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
94 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
96 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
97 sqe->buf_index || sqe->splice_fd_in))
100 shutdown->how = READ_ONCE(sqe->len);
101 req->flags |= REQ_F_FORCE_ASYNC;
105 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
107 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
111 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
113 sock = sock_from_file(req->file);
117 ret = __sys_shutdown_sock(sock, shutdown->how);
118 io_req_set_res(req, ret, 0);
122 static bool io_net_retry(struct socket *sock, int flags)
124 if (!(flags & MSG_WAITALL))
126 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
129 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
131 if (kmsg->free_iov) {
132 kfree(kmsg->free_iov);
133 kmsg->free_iov_nr = 0;
134 kmsg->free_iov = NULL;
138 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
140 struct io_async_msghdr *hdr = req->async_data;
143 /* can't recycle, ensure we free the iovec if we have one */
144 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
145 io_netmsg_iovec_free(hdr);
149 /* Let normal cleanup path reap it if we fail adding to the cache */
151 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
153 kasan_mempool_poison_object(iov);
154 req->async_data = NULL;
155 req->flags &= ~REQ_F_ASYNC_DATA;
159 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
161 struct io_ring_ctx *ctx = req->ctx;
162 struct io_async_msghdr *hdr;
164 hdr = io_alloc_cache_get(&ctx->netmsg_cache);
167 kasan_mempool_unpoison_object(hdr->free_iov,
168 hdr->free_iov_nr * sizeof(struct iovec));
169 req->flags |= REQ_F_NEED_CLEANUP;
171 req->flags |= REQ_F_ASYNC_DATA;
172 req->async_data = hdr;
176 if (!io_alloc_async_data(req)) {
177 hdr = req->async_data;
178 hdr->free_iov_nr = 0;
179 hdr->free_iov = NULL;
185 /* assign new iovec to kmsg, if we need to */
186 static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
190 req->flags |= REQ_F_NEED_CLEANUP;
191 kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
193 kfree(kmsg->free_iov);
194 kmsg->free_iov = iov;
199 static inline void io_mshot_prep_retry(struct io_kiocb *req,
200 struct io_async_msghdr *kmsg)
202 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
204 req->flags &= ~REQ_F_BL_EMPTY;
206 sr->len = 0; /* get from the provided buffer */
207 req->buf_index = sr->buf_group;
211 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
212 struct io_async_msghdr *iomsg,
213 struct compat_msghdr *msg, int ddir)
215 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
216 struct compat_iovec __user *uiov;
220 if (iomsg->free_iov) {
221 nr_segs = iomsg->free_iov_nr;
222 iov = iomsg->free_iov;
224 iov = &iomsg->fast_iov;
228 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
231 uiov = compat_ptr(msg->msg_iov);
232 if (req->flags & REQ_F_BUFFER_SELECT) {
235 if (msg->msg_iovlen == 0) {
236 sr->len = iov->iov_len = 0;
237 iov->iov_base = NULL;
238 } else if (msg->msg_iovlen > 1) {
241 if (!access_ok(uiov, sizeof(*uiov)))
243 if (__get_user(clen, &uiov->iov_len))
253 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
254 nr_segs, &iov, &iomsg->msg.msg_iter, true);
255 if (unlikely(ret < 0))
258 return io_net_vec_assign(req, iomsg, iov);
262 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
263 struct user_msghdr *msg, int ddir)
265 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
269 if (iomsg->free_iov) {
270 nr_segs = iomsg->free_iov_nr;
271 iov = iomsg->free_iov;
273 iov = &iomsg->fast_iov;
277 if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
281 unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
282 unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
283 unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
284 unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
285 unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
286 unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
289 if (req->flags & REQ_F_BUFFER_SELECT) {
290 if (msg->msg_iovlen == 0) {
291 sr->len = iov->iov_len = 0;
292 iov->iov_base = NULL;
293 } else if (msg->msg_iovlen > 1) {
297 /* we only need the length for provided buffers */
298 if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
300 unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
302 sr->len = iov->iov_len;
311 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
312 &iov, &iomsg->msg.msg_iter, false);
313 if (unlikely(ret < 0))
316 return io_net_vec_assign(req, iomsg, iov);
319 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
320 struct io_async_msghdr *iomsg)
322 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
323 struct user_msghdr msg;
326 iomsg->msg.msg_name = &iomsg->addr;
327 iomsg->msg.msg_iter.nr_segs = 0;
330 if (unlikely(req->ctx->compat)) {
331 struct compat_msghdr cmsg;
333 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
337 return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
341 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
345 ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
347 /* save msg_control as sys_sendmsg() overwrites it */
348 sr->msg_control = iomsg->msg.msg_control_user;
352 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
354 struct io_async_msghdr *io = req->async_data;
356 io_netmsg_iovec_free(io);
359 static int io_send_setup(struct io_kiocb *req)
361 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
362 struct io_async_msghdr *kmsg = req->async_data;
365 kmsg->msg.msg_name = NULL;
366 kmsg->msg.msg_namelen = 0;
367 kmsg->msg.msg_control = NULL;
368 kmsg->msg.msg_controllen = 0;
369 kmsg->msg.msg_ubuf = NULL;
372 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
373 if (unlikely(ret < 0))
375 kmsg->msg.msg_name = &kmsg->addr;
376 kmsg->msg.msg_namelen = sr->addr_len;
378 if (!io_do_buffer_select(req)) {
379 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
380 &kmsg->msg.msg_iter);
381 if (unlikely(ret < 0))
387 static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
389 struct io_async_msghdr *kmsg;
392 kmsg = io_msg_alloc_async(req);
396 return io_send_setup(req);
397 ret = io_sendmsg_copy_hdr(req, kmsg);
399 req->flags |= REQ_F_NEED_CLEANUP;
403 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
405 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
407 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
411 if (req->opcode == IORING_OP_SEND) {
412 if (READ_ONCE(sqe->__pad3[0]))
414 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
415 sr->addr_len = READ_ONCE(sqe->addr_len);
416 } else if (sqe->addr2 || sqe->file_index) {
420 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
421 sr->len = READ_ONCE(sqe->len);
422 sr->flags = READ_ONCE(sqe->ioprio);
423 if (sr->flags & ~SENDMSG_FLAGS)
425 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
426 if (sr->msg_flags & MSG_DONTWAIT)
427 req->flags |= REQ_F_NOWAIT;
428 if (sr->flags & IORING_RECVSEND_BUNDLE) {
429 if (req->opcode == IORING_OP_SENDMSG)
431 if (!(req->flags & REQ_F_BUFFER_SELECT))
433 sr->msg_flags |= MSG_WAITALL;
434 sr->buf_group = req->buf_index;
435 req->buf_list = NULL;
437 if (req->flags & REQ_F_BUFFER_SELECT && sr->len)
441 if (req->ctx->compat)
442 sr->msg_flags |= MSG_CMSG_COMPAT;
444 return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
447 static void io_req_msg_cleanup(struct io_kiocb *req,
448 unsigned int issue_flags)
450 req->flags &= ~REQ_F_NEED_CLEANUP;
451 io_netmsg_recycle(req, issue_flags);
455 * For bundle completions, we need to figure out how many segments we consumed.
456 * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
457 * could be using an ITER_IOVEC. If the latter, then if we consumed all of
458 * the segments, then it's a trivial questiont o answer. If we have residual
459 * data in the iter, then loop the segments to figure out how much we
462 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
467 /* no data is always zero segments, and a ubuf is always 1 segment */
470 if (iter_is_ubuf(&kmsg->msg.msg_iter))
473 iov = kmsg->free_iov;
475 iov = &kmsg->fast_iov;
477 /* if all data was transferred, it's basic pointer math */
478 if (!iov_iter_count(&kmsg->msg.msg_iter))
479 return iter_iov(&kmsg->msg.msg_iter) - iov;
481 /* short transfer, count segments */
484 int this_len = min_t(int, iov[nbufs].iov_len, ret);
493 static inline bool io_send_finish(struct io_kiocb *req, int *ret,
494 struct io_async_msghdr *kmsg,
495 unsigned issue_flags)
497 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
498 bool bundle_finished = *ret <= 0;
501 if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
502 cflags = io_put_kbuf(req, issue_flags);
506 cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret), issue_flags);
508 if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
512 * Fill CQE for this receive and see if we should keep trying to
513 * receive from this socket.
515 if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
516 io_mshot_prep_retry(req, kmsg);
520 /* Otherwise stop bundle and use the current result. */
522 io_req_set_res(req, *ret, cflags);
527 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
529 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
530 struct io_async_msghdr *kmsg = req->async_data;
536 sock = sock_from_file(req->file);
540 if (!(req->flags & REQ_F_POLLED) &&
541 (sr->flags & IORING_RECVSEND_POLL_FIRST))
544 flags = sr->msg_flags;
545 if (issue_flags & IO_URING_F_NONBLOCK)
546 flags |= MSG_DONTWAIT;
547 if (flags & MSG_WAITALL)
548 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
550 kmsg->msg.msg_control_user = sr->msg_control;
552 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
555 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
557 if (ret > 0 && io_net_retry(sock, flags)) {
558 kmsg->msg.msg_controllen = 0;
559 kmsg->msg.msg_control = NULL;
561 req->flags |= REQ_F_BL_NO_RECYCLE;
564 if (ret == -ERESTARTSYS)
568 io_req_msg_cleanup(req, issue_flags);
571 else if (sr->done_io)
573 io_req_set_res(req, ret, 0);
577 int io_send(struct io_kiocb *req, unsigned int issue_flags)
579 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
580 struct io_async_msghdr *kmsg = req->async_data;
586 sock = sock_from_file(req->file);
590 if (!(req->flags & REQ_F_POLLED) &&
591 (sr->flags & IORING_RECVSEND_POLL_FIRST))
594 flags = sr->msg_flags;
595 if (issue_flags & IO_URING_F_NONBLOCK)
596 flags |= MSG_DONTWAIT;
599 if (io_do_buffer_select(req)) {
600 struct buf_sel_arg arg = {
601 .iovs = &kmsg->fast_iov,
606 if (kmsg->free_iov) {
607 arg.nr_iovs = kmsg->free_iov_nr;
608 arg.iovs = kmsg->free_iov;
609 arg.mode = KBUF_MODE_FREE;
612 if (!(sr->flags & IORING_RECVSEND_BUNDLE))
615 arg.mode |= KBUF_MODE_EXPAND;
617 ret = io_buffers_select(req, &arg, issue_flags);
618 if (unlikely(ret < 0))
621 sr->len = arg.out_len;
622 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, arg.iovs, ret,
624 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
625 kmsg->free_iov_nr = ret;
626 kmsg->free_iov = arg.iovs;
627 req->flags |= REQ_F_NEED_CLEANUP;
632 * If MSG_WAITALL is set, or this is a bundle send, then we need
633 * the full amount. If just bundle is set, if we do a short send
634 * then we complete the bundle sequence rather than continue on.
636 if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
637 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
639 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
640 kmsg->msg.msg_flags = flags;
641 ret = sock_sendmsg(sock, &kmsg->msg);
643 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
646 if (ret > 0 && io_net_retry(sock, flags)) {
650 req->flags |= REQ_F_BL_NO_RECYCLE;
653 if (ret == -ERESTARTSYS)
659 else if (sr->done_io)
662 if (!io_send_finish(req, &ret, kmsg, issue_flags))
665 io_req_msg_cleanup(req, issue_flags);
669 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
670 struct io_async_msghdr *iomsg,
671 int namelen, size_t controllen)
673 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
674 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
677 if (unlikely(namelen < 0))
679 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
682 if (check_add_overflow(hdr, controllen, &hdr))
685 iomsg->namelen = namelen;
686 iomsg->controllen = controllen;
693 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
694 struct io_async_msghdr *iomsg)
696 struct user_msghdr msg;
699 iomsg->msg.msg_name = &iomsg->addr;
700 iomsg->msg.msg_iter.nr_segs = 0;
703 if (unlikely(req->ctx->compat)) {
704 struct compat_msghdr cmsg;
706 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
710 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
714 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
715 cmsg.msg_controllen);
719 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
723 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
727 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
731 static int io_recvmsg_prep_setup(struct io_kiocb *req)
733 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
734 struct io_async_msghdr *kmsg;
737 kmsg = io_msg_alloc_async(req);
741 if (req->opcode == IORING_OP_RECV) {
742 kmsg->msg.msg_name = NULL;
743 kmsg->msg.msg_namelen = 0;
744 kmsg->msg.msg_control = NULL;
745 kmsg->msg.msg_get_inq = 1;
746 kmsg->msg.msg_controllen = 0;
747 kmsg->msg.msg_iocb = NULL;
748 kmsg->msg.msg_ubuf = NULL;
750 if (!io_do_buffer_select(req)) {
751 ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
752 &kmsg->msg.msg_iter);
759 ret = io_recvmsg_copy_hdr(req, kmsg);
761 req->flags |= REQ_F_NEED_CLEANUP;
765 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
766 IORING_RECVSEND_BUNDLE)
768 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
770 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
774 if (unlikely(sqe->file_index || sqe->addr2))
777 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
778 sr->len = READ_ONCE(sqe->len);
779 sr->flags = READ_ONCE(sqe->ioprio);
780 if (sr->flags & ~RECVMSG_FLAGS)
782 sr->msg_flags = READ_ONCE(sqe->msg_flags);
783 if (sr->msg_flags & MSG_DONTWAIT)
784 req->flags |= REQ_F_NOWAIT;
785 if (sr->msg_flags & MSG_ERRQUEUE)
786 req->flags |= REQ_F_CLEAR_POLLIN;
787 if (req->flags & REQ_F_BUFFER_SELECT) {
789 * Store the buffer group for this multishot receive separately,
790 * as if we end up doing an io-wq based issue that selects a
791 * buffer, it has to be committed immediately and that will
792 * clear ->buf_list. This means we lose the link to the buffer
793 * list, and the eventual buffer put on completion then cannot
796 sr->buf_group = req->buf_index;
797 req->buf_list = NULL;
799 if (sr->flags & IORING_RECV_MULTISHOT) {
800 if (!(req->flags & REQ_F_BUFFER_SELECT))
802 if (sr->msg_flags & MSG_WAITALL)
804 if (req->opcode == IORING_OP_RECV && sr->len)
806 req->flags |= REQ_F_APOLL_MULTISHOT;
808 if (sr->flags & IORING_RECVSEND_BUNDLE) {
809 if (req->opcode == IORING_OP_RECVMSG)
814 if (req->ctx->compat)
815 sr->msg_flags |= MSG_CMSG_COMPAT;
817 sr->nr_multishot_loops = 0;
818 return io_recvmsg_prep_setup(req);
822 * Finishes io_recv and io_recvmsg.
824 * Returns true if it is actually finished, or false if it should run
825 * again (for multishot).
827 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
828 struct io_async_msghdr *kmsg,
829 bool mshot_finished, unsigned issue_flags)
831 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
832 unsigned int cflags = 0;
834 if (kmsg->msg.msg_inq > 0)
835 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
837 if (sr->flags & IORING_RECVSEND_BUNDLE) {
838 cflags |= io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
840 /* bundle with no more immediate buffers, we're done */
841 if (req->flags & REQ_F_BL_EMPTY)
844 cflags |= io_put_kbuf(req, issue_flags);
848 * Fill CQE for this receive and see if we should keep trying to
849 * receive from this socket.
851 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
852 io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
853 int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
855 io_mshot_prep_retry(req, kmsg);
856 /* Known not-empty or unknown state, retry */
857 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
858 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
860 /* mshot retries exceeded, force a requeue */
861 sr->nr_multishot_loops = 0;
862 mshot_retry_ret = IOU_REQUEUE;
864 if (issue_flags & IO_URING_F_MULTISHOT)
865 *ret = mshot_retry_ret;
871 /* Finish the request / stop multishot. */
873 io_req_set_res(req, *ret, cflags);
875 if (issue_flags & IO_URING_F_MULTISHOT)
876 *ret = IOU_STOP_MULTISHOT;
879 io_req_msg_cleanup(req, issue_flags);
883 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
884 struct io_sr_msg *sr, void __user **buf,
887 unsigned long ubuf = (unsigned long) *buf;
890 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
895 if (kmsg->controllen) {
896 unsigned long control = ubuf + hdr - kmsg->controllen;
898 kmsg->msg.msg_control_user = (void __user *) control;
899 kmsg->msg.msg_controllen = kmsg->controllen;
902 sr->buf = *buf; /* stash for later copy */
903 *buf = (void __user *) (ubuf + hdr);
904 kmsg->payloadlen = *len = *len - hdr;
908 struct io_recvmsg_multishot_hdr {
909 struct io_uring_recvmsg_out msg;
910 struct sockaddr_storage addr;
913 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
914 struct io_async_msghdr *kmsg,
915 unsigned int flags, bool *finished)
919 struct io_recvmsg_multishot_hdr hdr;
922 kmsg->msg.msg_name = &hdr.addr;
923 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
924 kmsg->msg.msg_namelen = 0;
926 if (sock->file->f_flags & O_NONBLOCK)
927 flags |= MSG_DONTWAIT;
929 err = sock_recvmsg(sock, &kmsg->msg, flags);
930 *finished = err <= 0;
934 hdr.msg = (struct io_uring_recvmsg_out) {
935 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
936 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
939 hdr.msg.payloadlen = err;
940 if (err > kmsg->payloadlen)
941 err = kmsg->payloadlen;
943 copy_len = sizeof(struct io_uring_recvmsg_out);
944 if (kmsg->msg.msg_namelen > kmsg->namelen)
945 copy_len += kmsg->namelen;
947 copy_len += kmsg->msg.msg_namelen;
950 * "fromlen shall refer to the value before truncation.."
953 hdr.msg.namelen = kmsg->msg.msg_namelen;
955 /* ensure that there is no gap between hdr and sockaddr_storage */
956 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
957 sizeof(struct io_uring_recvmsg_out));
958 if (copy_to_user(io->buf, &hdr, copy_len)) {
963 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
964 kmsg->controllen + err;
967 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
969 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
970 struct io_async_msghdr *kmsg = req->async_data;
973 int ret, min_ret = 0;
974 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
975 bool mshot_finished = true;
977 sock = sock_from_file(req->file);
981 if (!(req->flags & REQ_F_POLLED) &&
982 (sr->flags & IORING_RECVSEND_POLL_FIRST))
985 flags = sr->msg_flags;
987 flags |= MSG_DONTWAIT;
990 if (io_do_buffer_select(req)) {
992 size_t len = sr->len;
994 buf = io_buffer_select(req, &len, issue_flags);
998 if (req->flags & REQ_F_APOLL_MULTISHOT) {
999 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1001 io_kbuf_recycle(req, issue_flags);
1006 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1009 kmsg->msg.msg_get_inq = 1;
1010 kmsg->msg.msg_inq = -1;
1011 if (req->flags & REQ_F_APOLL_MULTISHOT) {
1012 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1015 /* disable partial retry for recvmsg with cmsg attached */
1016 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1017 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1019 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1020 kmsg->uaddr, flags);
1023 if (ret < min_ret) {
1024 if (ret == -EAGAIN && force_nonblock) {
1025 if (issue_flags & IO_URING_F_MULTISHOT) {
1026 io_kbuf_recycle(req, issue_flags);
1027 return IOU_ISSUE_SKIP_COMPLETE;
1031 if (ret > 0 && io_net_retry(sock, flags)) {
1033 req->flags |= REQ_F_BL_NO_RECYCLE;
1036 if (ret == -ERESTARTSYS)
1039 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1045 else if (sr->done_io)
1048 io_kbuf_recycle(req, issue_flags);
1050 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1051 goto retry_multishot;
1056 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1057 size_t *len, unsigned int issue_flags)
1059 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1063 * If the ring isn't locked, then don't use the peek interface
1064 * to grab multiple buffers as we will lock/unlock between
1065 * this selection and posting the buffers.
1067 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1068 sr->flags & IORING_RECVSEND_BUNDLE) {
1069 struct buf_sel_arg arg = {
1070 .iovs = &kmsg->fast_iov,
1072 .mode = KBUF_MODE_EXPAND,
1075 if (kmsg->free_iov) {
1076 arg.nr_iovs = kmsg->free_iov_nr;
1077 arg.iovs = kmsg->free_iov;
1078 arg.mode |= KBUF_MODE_FREE;
1081 if (kmsg->msg.msg_inq > 0)
1082 arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1084 ret = io_buffers_peek(req, &arg);
1085 if (unlikely(ret < 0))
1088 /* special case 1 vec, can be a fast path */
1090 sr->buf = arg.iovs[0].iov_base;
1091 sr->len = arg.iovs[0].iov_len;
1094 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1096 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1097 kmsg->free_iov_nr = ret;
1098 kmsg->free_iov = arg.iovs;
1099 req->flags |= REQ_F_NEED_CLEANUP;
1105 buf = io_buffer_select(req, len, issue_flags);
1111 ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1112 &kmsg->msg.msg_iter);
1120 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1122 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1123 struct io_async_msghdr *kmsg = req->async_data;
1124 struct socket *sock;
1126 int ret, min_ret = 0;
1127 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1128 size_t len = sr->len;
1130 if (!(req->flags & REQ_F_POLLED) &&
1131 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1134 sock = sock_from_file(req->file);
1135 if (unlikely(!sock))
1138 flags = sr->msg_flags;
1140 flags |= MSG_DONTWAIT;
1143 if (io_do_buffer_select(req)) {
1144 ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1145 if (unlikely(ret)) {
1146 kmsg->msg.msg_inq = -1;
1152 kmsg->msg.msg_flags = 0;
1153 kmsg->msg.msg_inq = -1;
1155 if (flags & MSG_WAITALL)
1156 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1158 ret = sock_recvmsg(sock, &kmsg->msg, flags);
1159 if (ret < min_ret) {
1160 if (ret == -EAGAIN && force_nonblock) {
1161 if (issue_flags & IO_URING_F_MULTISHOT) {
1162 io_kbuf_recycle(req, issue_flags);
1163 return IOU_ISSUE_SKIP_COMPLETE;
1168 if (ret > 0 && io_net_retry(sock, flags)) {
1172 req->flags |= REQ_F_BL_NO_RECYCLE;
1175 if (ret == -ERESTARTSYS)
1178 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1185 else if (sr->done_io)
1188 io_kbuf_recycle(req, issue_flags);
1190 if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
1191 goto retry_multishot;
1196 void io_send_zc_cleanup(struct io_kiocb *req)
1198 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1199 struct io_async_msghdr *io = req->async_data;
1201 if (req_has_async_data(req))
1202 io_netmsg_iovec_free(io);
1204 io_notif_flush(zc->notif);
1209 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1210 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1212 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1214 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1215 struct io_ring_ctx *ctx = req->ctx;
1216 struct io_kiocb *notif;
1219 req->flags |= REQ_F_POLL_NO_LAZY;
1221 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1223 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1224 if (req->flags & REQ_F_CQE_SKIP)
1227 notif = zc->notif = io_alloc_notif(ctx);
1230 notif->cqe.user_data = req->cqe.user_data;
1232 notif->cqe.flags = IORING_CQE_F_NOTIF;
1233 req->flags |= REQ_F_NEED_CLEANUP;
1235 zc->flags = READ_ONCE(sqe->ioprio);
1236 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1237 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1239 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1240 struct io_notif_data *nd = io_notif_to_data(notif);
1242 nd->zc_report = true;
1243 nd->zc_used = false;
1244 nd->zc_copied = false;
1248 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1249 unsigned idx = READ_ONCE(sqe->buf_index);
1251 if (unlikely(idx >= ctx->nr_user_bufs))
1253 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1254 req->imu = READ_ONCE(ctx->user_bufs[idx]);
1255 io_req_set_rsrc_node(notif, ctx, 0);
1258 if (req->opcode == IORING_OP_SEND_ZC) {
1259 if (READ_ONCE(sqe->__pad3[0]))
1261 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1262 zc->addr_len = READ_ONCE(sqe->addr_len);
1264 if (unlikely(sqe->addr2 || sqe->file_index))
1266 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1270 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1271 zc->len = READ_ONCE(sqe->len);
1272 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1273 if (zc->msg_flags & MSG_DONTWAIT)
1274 req->flags |= REQ_F_NOWAIT;
1276 #ifdef CONFIG_COMPAT
1277 if (req->ctx->compat)
1278 zc->msg_flags |= MSG_CMSG_COMPAT;
1280 return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
1283 static int io_sg_from_iter_iovec(struct sk_buff *skb,
1284 struct iov_iter *from, size_t length)
1286 skb_zcopy_downgrade_managed(skb);
1287 return zerocopy_fill_skb_from_iter(skb, from, length);
1290 static int io_sg_from_iter(struct sk_buff *skb,
1291 struct iov_iter *from, size_t length)
1293 struct skb_shared_info *shinfo = skb_shinfo(skb);
1294 int frag = shinfo->nr_frags;
1296 struct bvec_iter bi;
1298 unsigned long truesize = 0;
1301 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1302 else if (unlikely(!skb_zcopy_managed(skb)))
1303 return zerocopy_fill_skb_from_iter(skb, from, length);
1305 bi.bi_size = min(from->count, length);
1306 bi.bi_bvec_done = from->iov_offset;
1309 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1310 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1313 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1314 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1315 v.bv_offset, v.bv_len);
1316 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1321 shinfo->nr_frags = frag;
1322 from->bvec += bi.bi_idx;
1323 from->nr_segs -= bi.bi_idx;
1324 from->count -= copied;
1325 from->iov_offset = bi.bi_bvec_done;
1327 skb->data_len += copied;
1329 skb->truesize += truesize;
1333 static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
1335 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1338 if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1339 ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, req->imu,
1340 (u64)(uintptr_t)sr->buf, sr->len);
1343 kmsg->msg.sg_from_iter = io_sg_from_iter;
1345 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1348 ret = io_notif_account_mem(sr->notif, sr->len);
1351 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1357 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1359 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1360 struct io_async_msghdr *kmsg = req->async_data;
1361 struct socket *sock;
1363 int ret, min_ret = 0;
1365 sock = sock_from_file(req->file);
1366 if (unlikely(!sock))
1368 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1371 if (!(req->flags & REQ_F_POLLED) &&
1372 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1376 ret = io_send_zc_import(req, kmsg);
1381 msg_flags = zc->msg_flags;
1382 if (issue_flags & IO_URING_F_NONBLOCK)
1383 msg_flags |= MSG_DONTWAIT;
1384 if (msg_flags & MSG_WAITALL)
1385 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1386 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1388 kmsg->msg.msg_flags = msg_flags;
1389 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1390 ret = sock_sendmsg(sock, &kmsg->msg);
1392 if (unlikely(ret < min_ret)) {
1393 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1396 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1400 req->flags |= REQ_F_BL_NO_RECYCLE;
1403 if (ret == -ERESTARTSYS)
1410 else if (zc->done_io)
1414 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1415 * flushing notif to io_send_zc_cleanup()
1417 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1418 io_notif_flush(zc->notif);
1419 io_req_msg_cleanup(req, 0);
1421 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1425 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1427 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1428 struct io_async_msghdr *kmsg = req->async_data;
1429 struct socket *sock;
1431 int ret, min_ret = 0;
1433 sock = sock_from_file(req->file);
1434 if (unlikely(!sock))
1436 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1439 if (!(req->flags & REQ_F_POLLED) &&
1440 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1443 flags = sr->msg_flags;
1444 if (issue_flags & IO_URING_F_NONBLOCK)
1445 flags |= MSG_DONTWAIT;
1446 if (flags & MSG_WAITALL)
1447 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1449 kmsg->msg.msg_control_user = sr->msg_control;
1450 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1451 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1452 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1454 if (unlikely(ret < min_ret)) {
1455 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1458 if (ret > 0 && io_net_retry(sock, flags)) {
1460 req->flags |= REQ_F_BL_NO_RECYCLE;
1463 if (ret == -ERESTARTSYS)
1470 else if (sr->done_io)
1474 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1475 * flushing notif to io_send_zc_cleanup()
1477 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1478 io_notif_flush(sr->notif);
1479 io_req_msg_cleanup(req, 0);
1481 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1485 void io_sendrecv_fail(struct io_kiocb *req)
1487 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1490 req->cqe.res = sr->done_io;
1492 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1493 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1494 req->cqe.flags |= IORING_CQE_F_MORE;
1497 #define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1498 IORING_ACCEPT_POLL_FIRST)
1500 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1502 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1504 if (sqe->len || sqe->buf_index)
1507 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1508 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1509 accept->flags = READ_ONCE(sqe->accept_flags);
1510 accept->nofile = rlimit(RLIMIT_NOFILE);
1511 accept->iou_flags = READ_ONCE(sqe->ioprio);
1512 if (accept->iou_flags & ~ACCEPT_FLAGS)
1515 accept->file_slot = READ_ONCE(sqe->file_index);
1516 if (accept->file_slot) {
1517 if (accept->flags & SOCK_CLOEXEC)
1519 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1520 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1523 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1525 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1526 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1527 if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1528 req->flags |= REQ_F_APOLL_MULTISHOT;
1529 if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1530 req->flags |= REQ_F_NOWAIT;
1534 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1536 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1537 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1538 bool fixed = !!accept->file_slot;
1539 struct proto_accept_arg arg = {
1540 .flags = force_nonblock ? O_NONBLOCK : 0,
1546 if (!(req->flags & REQ_F_POLLED) &&
1547 accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1552 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1553 if (unlikely(fd < 0))
1558 file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1563 ret = PTR_ERR(file);
1564 if (ret == -EAGAIN && force_nonblock &&
1565 !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1567 * if it's multishot and polled, we don't need to
1568 * return EAGAIN to arm the poll infra since it
1569 * has already been done
1571 if (issue_flags & IO_URING_F_MULTISHOT)
1572 return IOU_ISSUE_SKIP_COMPLETE;
1575 if (ret == -ERESTARTSYS)
1578 } else if (!fixed) {
1579 fd_install(fd, file);
1582 ret = io_fixed_fd_install(req, issue_flags, file,
1588 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1590 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1591 io_req_set_res(req, ret, cflags);
1597 if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1598 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1600 if (issue_flags & IO_URING_F_MULTISHOT)
1601 return IOU_ISSUE_SKIP_COMPLETE;
1605 io_req_set_res(req, ret, cflags);
1606 return IOU_STOP_MULTISHOT;
1609 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1611 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1613 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1616 sock->domain = READ_ONCE(sqe->fd);
1617 sock->type = READ_ONCE(sqe->off);
1618 sock->protocol = READ_ONCE(sqe->len);
1619 sock->file_slot = READ_ONCE(sqe->file_index);
1620 sock->nofile = rlimit(RLIMIT_NOFILE);
1622 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1623 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1625 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1630 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1632 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1633 bool fixed = !!sock->file_slot;
1638 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1639 if (unlikely(fd < 0))
1642 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1646 ret = PTR_ERR(file);
1647 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1649 if (ret == -ERESTARTSYS)
1652 } else if (!fixed) {
1653 fd_install(fd, file);
1656 ret = io_fixed_fd_install(req, issue_flags, file,
1659 io_req_set_res(req, ret, 0);
1663 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1665 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1666 struct io_async_msghdr *io;
1668 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1671 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1672 conn->addr_len = READ_ONCE(sqe->addr2);
1673 conn->in_progress = conn->seen_econnaborted = false;
1675 io = io_msg_alloc_async(req);
1679 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1682 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1684 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1685 struct io_async_msghdr *io = req->async_data;
1686 unsigned file_flags;
1688 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1690 file_flags = force_nonblock ? O_NONBLOCK : 0;
1692 ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1694 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1695 && force_nonblock) {
1696 if (ret == -EINPROGRESS) {
1697 connect->in_progress = true;
1698 } else if (ret == -ECONNABORTED) {
1699 if (connect->seen_econnaborted)
1701 connect->seen_econnaborted = true;
1705 if (connect->in_progress) {
1707 * At least bluetooth will return -EBADFD on a re-connect
1708 * attempt, and it's (supposedly) also valid to get -EISCONN
1709 * which means the previous result is good. For both of these,
1710 * grab the sock_error() and use that for the completion.
1712 if (ret == -EBADFD || ret == -EISCONN)
1713 ret = sock_error(sock_from_file(req->file)->sk);
1715 if (ret == -ERESTARTSYS)
1720 io_req_msg_cleanup(req, issue_flags);
1721 io_req_set_res(req, ret, 0);
1725 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1727 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1728 struct sockaddr __user *uaddr;
1729 struct io_async_msghdr *io;
1731 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1734 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1735 bind->addr_len = READ_ONCE(sqe->addr2);
1737 io = io_msg_alloc_async(req);
1740 return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1743 int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1745 struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1746 struct io_async_msghdr *io = req->async_data;
1747 struct socket *sock;
1750 sock = sock_from_file(req->file);
1751 if (unlikely(!sock))
1754 ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1757 io_req_set_res(req, ret, 0);
1761 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1763 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1765 if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1768 listen->backlog = READ_ONCE(sqe->len);
1772 int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1774 struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1775 struct socket *sock;
1778 sock = sock_from_file(req->file);
1779 if (unlikely(!sock))
1782 ret = __sys_listen_socket(sock, listen->backlog);
1785 io_req_set_res(req, ret, 0);
1789 void io_netmsg_cache_free(const void *entry)
1791 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1793 if (kmsg->free_iov) {
1794 kasan_mempool_unpoison_object(kmsg->free_iov,
1795 kmsg->free_iov_nr * sizeof(struct iovec));
1796 io_netmsg_iovec_free(kmsg);