1 // SPDX-License-Identifier: GPL-2.0
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
9 /* ring mapped provided buffers */
11 /* buffers are consumed incrementally rather than always fully */
15 struct io_buffer_list {
17 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
18 * then these are classic provided buffers and ->buf_list is used.
21 struct list_head buf_list;
22 struct io_uring_buf_ring *buf_ring;
26 /* below is for ring provided buffers */
34 struct io_mapped_region region;
38 struct list_head list;
46 /* can alloc a bigger vec */
48 /* if bigger vec allocated, free old one */
56 unsigned short nr_iovs;
60 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
61 unsigned int issue_flags);
62 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
63 unsigned int issue_flags);
64 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
65 void io_destroy_buffers(struct io_ring_ctx *ctx);
67 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
68 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
70 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
71 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
73 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
74 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
75 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
77 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
79 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
81 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
84 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
87 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
88 * the flag and hence ensure that bl->head doesn't get incremented.
89 * If the tail has already been incremented, hang on to it.
90 * The exception is partial io, that case we should increment bl->head
91 * to monopolize the buffer.
94 req->buf_index = req->buf_list->bgid;
95 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
101 static inline bool io_do_buffer_select(struct io_kiocb *req)
103 if (!(req->flags & REQ_F_BUFFER_SELECT))
105 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
108 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
110 if (req->flags & REQ_F_BL_NO_RECYCLE)
112 if (req->flags & REQ_F_BUFFER_SELECTED)
113 return io_kbuf_recycle_legacy(req, issue_flags);
114 if (req->flags & REQ_F_BUFFER_RING)
115 return io_kbuf_recycle_ring(req);
119 /* Mapped buffer ring, return io_uring_buf from head */
120 #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
122 static inline bool io_kbuf_commit(struct io_kiocb *req,
123 struct io_buffer_list *bl, int len, int nr)
125 if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
128 req->flags &= ~REQ_F_BUFFERS_COMMIT;
130 if (unlikely(len < 0))
133 if (bl->flags & IOBL_INC) {
134 struct io_uring_buf *buf;
136 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
137 if (WARN_ON_ONCE(len > buf->len))
150 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
152 struct io_buffer_list *bl = req->buf_list;
156 ret = io_kbuf_commit(req, bl, len, nr);
157 req->buf_index = bl->bgid;
159 req->flags &= ~REQ_F_BUFFER_RING;
163 static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
164 struct list_head *list)
166 if (req->flags & REQ_F_BUFFER_RING) {
167 __io_put_kbuf_ring(req, len, 1);
169 req->buf_index = req->kbuf->bgid;
170 list_add(&req->kbuf->list, list);
171 req->flags &= ~REQ_F_BUFFER_SELECTED;
175 static inline void io_kbuf_drop(struct io_kiocb *req)
177 lockdep_assert_held(&req->ctx->completion_lock);
179 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
182 /* len == 0 is fine here, non-ring will always drop all of it */
183 __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
186 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
187 int nbufs, unsigned issue_flags)
191 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
194 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
195 if (req->flags & REQ_F_BUFFER_RING) {
196 if (!__io_put_kbuf_ring(req, len, nbufs))
197 ret |= IORING_CQE_F_BUF_MORE;
199 __io_put_kbuf(req, len, issue_flags);
204 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
205 unsigned issue_flags)
207 return __io_put_kbufs(req, len, 1, issue_flags);
210 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
211 int nbufs, unsigned issue_flags)
213 return __io_put_kbufs(req, len, nbufs, issue_flags);