]>
Commit | Line | Data |
---|---|---|
e4d5639d | 1 | /* |
2278a69e | 2 | * Helpers for using (partial) iovecs. |
e4d5639d AS |
3 | * |
4 | * Copyright (C) 2010 Red Hat, Inc. | |
5 | * | |
6 | * Author(s): | |
7 | * Amit Shah <[email protected]> | |
2278a69e | 8 | * Michael Tokarev <[email protected]> |
e4d5639d AS |
9 | * |
10 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
11 | * the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
cb9c377f PB |
14 | #ifndef IOV_H |
15 | #define IOV_H | |
16 | ||
dcf6f5e1 MT |
17 | /** |
18 | * count and return data size, in bytes, of an iovec | |
19 | * starting at `iov' of `iov_cnt' number of elements. | |
20 | */ | |
21 | size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt); | |
22 | ||
23 | /** | |
24 | * Copy from single continuous buffer to scatter-gather vector of buffers | |
25 | * (iovec) and back like memcpy() between two continuous memory regions. | |
26 | * Data in single continuous buffer starting at address `buf' and | |
27 | * `bytes' bytes long will be copied to/from an iovec `iov' with | |
28 | * `iov_cnt' number of elements, starting at byte position `offset' | |
29 | * within the iovec. If the iovec does not contain enough space, | |
30 | * only part of data will be copied, up to the end of the iovec. | |
31 | * Number of bytes actually copied will be returned, which is | |
32 | * min(bytes, iov_size(iov)-offset) | |
2278a69e | 33 | * `Offset' must point to the inside of iovec. |
dcf6f5e1 | 34 | */ |
ad523bca PB |
35 | size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt, |
36 | size_t offset, const void *buf, size_t bytes); | |
37 | size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt, | |
7d37435b | 38 | size_t offset, void *buf, size_t bytes); |
ad523bca PB |
39 | |
40 | static inline size_t | |
41 | iov_from_buf(const struct iovec *iov, unsigned int iov_cnt, | |
42 | size_t offset, const void *buf, size_t bytes) | |
43 | { | |
44 | if (__builtin_constant_p(bytes) && iov_cnt && | |
45 | offset <= iov[0].iov_len && bytes <= iov[0].iov_len - offset) { | |
46 | memcpy(iov[0].iov_base + offset, buf, bytes); | |
47 | return bytes; | |
48 | } else { | |
49 | return iov_from_buf_full(iov, iov_cnt, offset, buf, bytes); | |
50 | } | |
51 | } | |
52 | ||
53 | static inline size_t | |
54 | iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, | |
55 | size_t offset, void *buf, size_t bytes) | |
56 | { | |
57 | if (__builtin_constant_p(bytes) && iov_cnt && | |
58 | offset <= iov[0].iov_len && bytes <= iov[0].iov_len - offset) { | |
59 | memcpy(buf, iov[0].iov_base + offset, bytes); | |
60 | return bytes; | |
61 | } else { | |
62 | return iov_to_buf_full(iov, iov_cnt, offset, buf, bytes); | |
63 | } | |
64 | } | |
dcf6f5e1 MT |
65 | |
66 | /** | |
67 | * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements, | |
68 | * starting at byte offset `start', to value `fillc', repeating it | |
2278a69e | 69 | * `bytes' number of times. `Offset' must point to the inside of iovec. |
dcf6f5e1 MT |
70 | * If `bytes' is large enough, only last bytes portion of iovec, |
71 | * up to the end of it, will be filled with the specified value. | |
72 | * Function return actual number of bytes processed, which is | |
73 | * min(size, iov_size(iov) - offset). | |
74 | */ | |
75 | size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt, | |
76 | size_t offset, int fillc, size_t bytes); | |
77 | ||
3e80bf93 MT |
78 | /* |
79 | * Send/recv data from/to iovec buffers directly | |
80 | * | |
81 | * `offset' bytes in the beginning of iovec buffer are skipped and | |
82 | * next `bytes' bytes are used, which must be within data of iovec. | |
83 | * | |
25e5e4c7 | 84 | * r = iov_send_recv(sockfd, iov, iovcnt, offset, bytes, true); |
3e80bf93 MT |
85 | * |
86 | * is logically equivalent to | |
87 | * | |
88 | * char *buf = malloc(bytes); | |
89 | * iov_to_buf(iov, iovcnt, offset, buf, bytes); | |
90 | * r = send(sockfd, buf, bytes, 0); | |
91 | * free(buf); | |
25e5e4c7 MT |
92 | * |
93 | * For iov_send_recv() _whole_ area being sent or received | |
94 | * should be within the iovec, not only beginning of it. | |
3e80bf93 | 95 | */ |
6b64640d | 96 | ssize_t iov_send_recv(int sockfd, const struct iovec *iov, unsigned iov_cnt, |
e3e87df4 | 97 | size_t offset, size_t bytes, bool do_send); |
25e5e4c7 MT |
98 | #define iov_recv(sockfd, iov, iov_cnt, offset, bytes) \ |
99 | iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, false) | |
100 | #define iov_send(sockfd, iov, iov_cnt, offset, bytes) \ | |
101 | iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, true) | |
3e80bf93 | 102 | |
dcf6f5e1 MT |
103 | /** |
104 | * Produce a text hexdump of iovec `iov' with `iov_cnt' number of elements | |
105 | * in file `fp', prefixing each line with `prefix' and processing not more | |
106 | * than `limit' data bytes. | |
107 | */ | |
3a1dca94 GH |
108 | void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt, |
109 | FILE *fp, const char *prefix, size_t limit); | |
d336336c MT |
110 | |
111 | /* | |
112 | * Partial copy of vector from iov to dst_iov (data is not copied). | |
113 | * dst_iov overlaps iov at a specified offset. | |
114 | * size of dst_iov is at most bytes. dst vector count is returned. | |
115 | */ | |
116 | unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt, | |
117 | const struct iovec *iov, unsigned int iov_cnt, | |
118 | size_t offset, size_t bytes); | |
cb9c377f | 119 | |
d0277635 SH |
120 | /* |
121 | * Remove a given number of bytes from the front or back of a vector. | |
122 | * This may update iov and/or iov_cnt to exclude iovec elements that are | |
123 | * no longer required. | |
124 | * | |
125 | * The number of bytes actually discarded is returned. This number may be | |
126 | * smaller than requested if the vector is too small. | |
127 | */ | |
128 | size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt, | |
129 | size_t bytes); | |
130 | size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt, | |
131 | size_t bytes); | |
132 | ||
9dd6f7c2 SH |
133 | /* Information needed to undo an iov_discard_*() operation */ |
134 | typedef struct { | |
135 | struct iovec *modified_iov; | |
136 | struct iovec orig; | |
137 | } IOVDiscardUndo; | |
138 | ||
139 | /* | |
140 | * Undo an iov_discard_front_undoable() or iov_discard_back_undoable() | |
141 | * operation. If multiple operations are made then each one needs a separate | |
142 | * IOVDiscardUndo and iov_discard_undo() must be called in the reverse order | |
143 | * that the operations were made. | |
144 | */ | |
145 | void iov_discard_undo(IOVDiscardUndo *undo); | |
146 | ||
147 | /* | |
148 | * Undoable versions of iov_discard_front() and iov_discard_back(). Use | |
149 | * iov_discard_undo() to reset to the state before the discard operations. | |
150 | */ | |
151 | size_t iov_discard_front_undoable(struct iovec **iov, unsigned int *iov_cnt, | |
152 | size_t bytes, IOVDiscardUndo *undo); | |
153 | size_t iov_discard_back_undoable(struct iovec *iov, unsigned int *iov_cnt, | |
154 | size_t bytes, IOVDiscardUndo *undo); | |
155 | ||
daf015ef MA |
156 | typedef struct QEMUIOVector { |
157 | struct iovec *iov; | |
158 | int niov; | |
a1ca3ed5 VSO |
159 | |
160 | /* | |
161 | * For external @iov (qemu_iovec_init_external()) or allocated @iov | |
162 | * (qemu_iovec_init()), @size is the cumulative size of iovecs and | |
163 | * @local_iov is invalid and unused. | |
164 | * | |
165 | * For embedded @iov (QEMU_IOVEC_INIT_BUF() or qemu_iovec_init_buf()), | |
166 | * @iov is equal to &@local_iov, and @size is valid, as it has same | |
167 | * offset and type as @local_iov.iov_len, which is guaranteed by | |
168 | * static assertion below. | |
169 | * | |
170 | * @nalloc is always valid and is -1 both for embedded and external | |
171 | * cases. It is included in the union only to ensure the padding prior | |
172 | * to the @size field will not result in a 0-length array. | |
173 | */ | |
174 | union { | |
175 | struct { | |
176 | int nalloc; | |
177 | struct iovec local_iov; | |
178 | }; | |
179 | struct { | |
180 | char __pad[sizeof(int) + offsetof(struct iovec, iov_len)]; | |
181 | size_t size; | |
182 | }; | |
183 | }; | |
daf015ef MA |
184 | } QEMUIOVector; |
185 | ||
a1ca3ed5 VSO |
186 | QEMU_BUILD_BUG_ON(offsetof(QEMUIOVector, size) != |
187 | offsetof(QEMUIOVector, local_iov.iov_len)); | |
188 | ||
189 | #define QEMU_IOVEC_INIT_BUF(self, buf, len) \ | |
190 | { \ | |
191 | .iov = &(self).local_iov, \ | |
192 | .niov = 1, \ | |
193 | .nalloc = -1, \ | |
194 | .local_iov = { \ | |
195 | .iov_base = (void *)(buf), /* cast away const */ \ | |
196 | .iov_len = (len), \ | |
197 | }, \ | |
198 | } | |
199 | ||
200 | /* | |
201 | * qemu_iovec_init_buf | |
202 | * | |
203 | * Initialize embedded QEMUIOVector. | |
204 | * | |
205 | * Note: "const" is used over @buf pointer to make it simple to pass | |
206 | * const pointers, appearing in read functions. Then this "const" is | |
207 | * cast away by QEMU_IOVEC_INIT_BUF(). | |
208 | */ | |
209 | static inline void qemu_iovec_init_buf(QEMUIOVector *qiov, | |
210 | const void *buf, size_t len) | |
211 | { | |
212 | *qiov = (QEMUIOVector) QEMU_IOVEC_INIT_BUF(*qiov, buf, len); | |
213 | } | |
214 | ||
215 | static inline void *qemu_iovec_buf(QEMUIOVector *qiov) | |
216 | { | |
217 | /* Only supports embedded iov */ | |
218 | assert(qiov->nalloc == -1 && qiov->iov == &qiov->local_iov); | |
219 | ||
220 | return qiov->local_iov.iov_base; | |
221 | } | |
222 | ||
daf015ef MA |
223 | void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint); |
224 | void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov); | |
d953169d VSO |
225 | void qemu_iovec_init_extended( |
226 | QEMUIOVector *qiov, | |
227 | void *head_buf, size_t head_len, | |
228 | QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len, | |
229 | void *tail_buf, size_t tail_len); | |
230 | void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source, | |
231 | size_t offset, size_t len); | |
5396234b | 232 | int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len); |
daf015ef MA |
233 | void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len); |
234 | void qemu_iovec_concat(QEMUIOVector *dst, | |
235 | QEMUIOVector *src, size_t soffset, size_t sbytes); | |
236 | size_t qemu_iovec_concat_iov(QEMUIOVector *dst, | |
237 | struct iovec *src_iov, unsigned int src_cnt, | |
238 | size_t soffset, size_t sbytes); | |
f76889e7 | 239 | bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t qiov_offeset, size_t bytes); |
daf015ef MA |
240 | void qemu_iovec_destroy(QEMUIOVector *qiov); |
241 | void qemu_iovec_reset(QEMUIOVector *qiov); | |
242 | size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset, | |
243 | void *buf, size_t bytes); | |
244 | size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset, | |
245 | const void *buf, size_t bytes); | |
246 | size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset, | |
247 | int fillc, size_t bytes); | |
248 | ssize_t qemu_iovec_compare(QEMUIOVector *a, QEMUIOVector *b); | |
249 | void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf); | |
250 | void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes); | |
251 | ||
cb9c377f | 252 | #endif |