]>
Commit | Line | Data |
---|---|---|
3b77495a JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #ifndef IOU_KBUF_H | |
3 | #define IOU_KBUF_H | |
4 | ||
5 | #include <uapi/linux/io_uring.h> | |
6 | ||
7 | struct io_buffer_list { | |
8 | /* | |
9 | * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, | |
10 | * then these are classic provided buffers and ->buf_list is used. | |
11 | */ | |
12 | union { | |
13 | struct list_head buf_list; | |
14 | struct { | |
15 | struct page **buf_pages; | |
16 | struct io_uring_buf_ring *buf_ring; | |
17 | }; | |
18 | }; | |
19 | __u16 bgid; | |
20 | ||
21 | /* below is for ring provided buffers */ | |
22 | __u16 buf_nr_pages; | |
23 | __u16 nr_entries; | |
24 | __u16 head; | |
25 | __u16 mask; | |
26 | }; | |
27 | ||
28 | struct io_buffer { | |
29 | struct list_head list; | |
30 | __u64 addr; | |
31 | __u32 len; | |
32 | __u16 bid; | |
33 | __u16 bgid; | |
34 | }; | |
35 | ||
36 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, | |
37 | unsigned int issue_flags); | |
3b77495a JA |
38 | void io_destroy_buffers(struct io_ring_ctx *ctx); |
39 | ||
40 | int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
41 | int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
42 | ||
43 | int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); | |
44 | int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); | |
45 | ||
46 | int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
47 | int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); | |
48 | ||
53ccf69b PB |
49 | unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); |
50 | ||
024b8fde | 51 | void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); |
795bbbc8 HX |
52 | |
53 | static inline void io_kbuf_recycle_ring(struct io_kiocb *req) | |
54 | { | |
55 | /* | |
56 | * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear | |
57 | * the flag and hence ensure that bl->head doesn't get incremented. | |
58 | * If the tail has already been incremented, hang on to it. | |
59 | * The exception is partial io, that case we should increment bl->head | |
60 | * to monopolize the buffer. | |
61 | */ | |
62 | if (req->buf_list) { | |
63 | if (req->flags & REQ_F_PARTIAL_IO) { | |
64 | /* | |
65 | * If we end up here, then the io_uring_lock has | |
66 | * been kept held since we retrieved the buffer. | |
67 | * For the io-wq case, we already cleared | |
68 | * req->buf_list when the buffer was retrieved, | |
69 | * hence it cannot be set here for that case. | |
70 | */ | |
71 | req->buf_list->head++; | |
72 | req->buf_list = NULL; | |
73 | } else { | |
74 | req->buf_index = req->buf_list->bgid; | |
75 | req->flags &= ~REQ_F_BUFFER_RING; | |
76 | } | |
77 | } | |
78 | } | |
024b8fde | 79 | |
3b77495a JA |
80 | static inline bool io_do_buffer_select(struct io_kiocb *req) |
81 | { | |
82 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
83 | return false; | |
84 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
85 | } | |
86 | ||
87 | static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) | |
88 | { | |
024b8fde HX |
89 | if (req->flags & REQ_F_BUFFER_SELECTED) |
90 | io_kbuf_recycle_legacy(req, issue_flags); | |
91 | if (req->flags & REQ_F_BUFFER_RING) | |
92 | io_kbuf_recycle_ring(req); | |
3b77495a JA |
93 | } |
94 | ||
53ccf69b PB |
95 | static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req, |
96 | struct list_head *list) | |
3b77495a | 97 | { |
32f3c434 DY |
98 | unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); |
99 | ||
3b77495a | 100 | if (req->flags & REQ_F_BUFFER_RING) { |
32f3c434 DY |
101 | if (req->buf_list) { |
102 | req->buf_index = req->buf_list->bgid; | |
3b77495a | 103 | req->buf_list->head++; |
32f3c434 | 104 | } |
3b77495a JA |
105 | req->flags &= ~REQ_F_BUFFER_RING; |
106 | } else { | |
32f3c434 | 107 | req->buf_index = req->kbuf->bgid; |
3b77495a JA |
108 | list_add(&req->kbuf->list, list); |
109 | req->flags &= ~REQ_F_BUFFER_SELECTED; | |
110 | } | |
111 | ||
32f3c434 | 112 | return ret; |
3b77495a JA |
113 | } |
114 | ||
115 | static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req) | |
116 | { | |
117 | lockdep_assert_held(&req->ctx->completion_lock); | |
118 | ||
119 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
120 | return 0; | |
53ccf69b | 121 | return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); |
3b77495a JA |
122 | } |
123 | ||
124 | static inline unsigned int io_put_kbuf(struct io_kiocb *req, | |
125 | unsigned issue_flags) | |
126 | { | |
3b77495a JA |
127 | |
128 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
129 | return 0; | |
53ccf69b | 130 | return __io_put_kbuf(req, issue_flags); |
3b77495a JA |
131 | } |
132 | #endif |