]>
Commit | Line | Data |
---|---|---|
7875e18e | 1 | /* |
e228a5d0 | 2 | * Copyright (c) 2006, 2020 Oracle and/or its affiliates. |
7875e18e AG |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
bc3b2d7f | 35 | #include <linux/export.h> |
01883eda SV |
36 | #include <linux/skbuff.h> |
37 | #include <linux/list.h> | |
38 | #include <linux/errqueue.h> | |
7875e18e AG |
39 | |
40 | #include "rds.h" | |
7875e18e | 41 | |
7875e18e AG |
42 | static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = { |
43 | [RDS_EXTHDR_NONE] = 0, | |
44 | [RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version), | |
45 | [RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma), | |
46 | [RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest), | |
5916e2c1 | 47 | [RDS_EXTHDR_NPATHS] = sizeof(u16), |
905dd418 | 48 | [RDS_EXTHDR_GEN_NUM] = sizeof(u32), |
7875e18e AG |
49 | }; |
50 | ||
7875e18e AG |
51 | void rds_message_addref(struct rds_message *rm) |
52 | { | |
6c5a1c4a RE |
53 | rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount)); |
54 | refcount_inc(&rm->m_refcount); | |
7875e18e | 55 | } |
616b757a | 56 | EXPORT_SYMBOL_GPL(rds_message_addref); |
7875e18e | 57 | |
9426bbc6 | 58 | static inline bool rds_zcookie_add(struct rds_msg_zcopy_info *info, u32 cookie) |
01883eda | 59 | { |
9426bbc6 | 60 | struct rds_zcopy_cookies *ck = &info->zcookies; |
401910db | 61 | int ncookies = ck->num; |
01883eda | 62 | |
401910db | 63 | if (ncookies == RDS_MAX_ZCOOKIES) |
01883eda | 64 | return false; |
401910db SV |
65 | ck->cookies[ncookies] = cookie; |
66 | ck->num = ++ncookies; | |
01883eda SV |
67 | return true; |
68 | } | |
69 | ||
571e6776 | 70 | static struct rds_msg_zcopy_info *rds_info_from_znotifier(struct rds_znotifier *znotif) |
9426bbc6 SV |
71 | { |
72 | return container_of(znotif, struct rds_msg_zcopy_info, znotif); | |
73 | } | |
74 | ||
75 | void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q) | |
76 | { | |
77 | unsigned long flags; | |
78 | LIST_HEAD(copy); | |
79 | struct rds_msg_zcopy_info *info, *tmp; | |
80 | ||
81 | spin_lock_irqsave(&q->lock, flags); | |
82 | list_splice(&q->zcookie_head, ©); | |
83 | INIT_LIST_HEAD(&q->zcookie_head); | |
84 | spin_unlock_irqrestore(&q->lock, flags); | |
85 | ||
86 | list_for_each_entry_safe(info, tmp, ©, rs_zcookie_next) { | |
87 | list_del(&info->rs_zcookie_next); | |
88 | kfree(info); | |
89 | } | |
90 | } | |
91 | ||
01883eda SV |
92 | static void rds_rm_zerocopy_callback(struct rds_sock *rs, |
93 | struct rds_znotifier *znotif) | |
94 | { | |
9426bbc6 SV |
95 | struct rds_msg_zcopy_info *info; |
96 | struct rds_msg_zcopy_queue *q; | |
01883eda | 97 | u32 cookie = znotif->z_cookie; |
401910db | 98 | struct rds_zcopy_cookies *ck; |
9426bbc6 SV |
99 | struct list_head *head; |
100 | unsigned long flags; | |
01883eda | 101 | |
9426bbc6 | 102 | mm_unaccount_pinned_pages(&znotif->z_mmp); |
401910db | 103 | q = &rs->rs_zcookie_queue; |
01883eda | 104 | spin_lock_irqsave(&q->lock, flags); |
9426bbc6 SV |
105 | head = &q->zcookie_head; |
106 | if (!list_empty(head)) { | |
107 | info = list_entry(head, struct rds_msg_zcopy_info, | |
108 | rs_zcookie_next); | |
109 | if (info && rds_zcookie_add(info, cookie)) { | |
110 | spin_unlock_irqrestore(&q->lock, flags); | |
111 | kfree(rds_info_from_znotifier(znotif)); | |
112 | /* caller invokes rds_wake_sk_sleep() */ | |
113 | return; | |
114 | } | |
01883eda SV |
115 | } |
116 | ||
9426bbc6 SV |
117 | info = rds_info_from_znotifier(znotif); |
118 | ck = &info->zcookies; | |
401910db | 119 | memset(ck, 0, sizeof(*ck)); |
9426bbc6 SV |
120 | WARN_ON(!rds_zcookie_add(info, cookie)); |
121 | list_add_tail(&q->zcookie_head, &info->rs_zcookie_next); | |
01883eda SV |
122 | |
123 | spin_unlock_irqrestore(&q->lock, flags); | |
401910db | 124 | /* caller invokes rds_wake_sk_sleep() */ |
01883eda SV |
125 | } |
126 | ||
7875e18e AG |
127 | /* |
128 | * This relies on dma_map_sg() not touching sg[].page during merging. | |
129 | */ | |
130 | static void rds_message_purge(struct rds_message *rm) | |
131 | { | |
ea8994cb | 132 | unsigned long i, flags; |
01883eda | 133 | bool zcopy = false; |
7875e18e AG |
134 | |
135 | if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) | |
136 | return; | |
137 | ||
ea8994cb SV |
138 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
139 | if (rm->m_rs) { | |
01883eda SV |
140 | struct rds_sock *rs = rm->m_rs; |
141 | ||
142 | if (rm->data.op_mmp_znotifier) { | |
143 | zcopy = true; | |
144 | rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier); | |
401910db | 145 | rds_wake_sk_sleep(rs); |
01883eda SV |
146 | rm->data.op_mmp_znotifier = NULL; |
147 | } | |
148 | sock_put(rds_rs_to_sk(rs)); | |
ea8994cb SV |
149 | rm->m_rs = NULL; |
150 | } | |
151 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | |
7875e18e | 152 | |
01883eda SV |
153 | for (i = 0; i < rm->data.op_nents; i++) { |
154 | /* XXX will have to put_page for page refs */ | |
155 | if (!zcopy) | |
156 | __free_page(sg_page(&rm->data.op_sg[i])); | |
157 | else | |
158 | put_page(sg_page(&rm->data.op_sg[i])); | |
159 | } | |
160 | rm->data.op_nents = 0; | |
161 | ||
f8b3aaf2 AG |
162 | if (rm->rdma.op_active) |
163 | rds_rdma_free_op(&rm->rdma); | |
164 | if (rm->rdma.op_rdma_mr) | |
e228a5d0 | 165 | kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final); |
d0ab25a8 AG |
166 | |
167 | if (rm->atomic.op_active) | |
168 | rds_atomic_free_op(&rm->atomic); | |
169 | if (rm->atomic.op_rdma_mr) | |
e228a5d0 | 170 | kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final); |
7875e18e | 171 | } |
7875e18e AG |
172 | |
173 | void rds_message_put(struct rds_message *rm) | |
174 | { | |
6c5a1c4a RE |
175 | rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount)); |
176 | WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm); | |
177 | if (refcount_dec_and_test(&rm->m_refcount)) { | |
7875e18e AG |
178 | BUG_ON(!list_empty(&rm->m_sock_item)); |
179 | BUG_ON(!list_empty(&rm->m_conn_item)); | |
180 | rds_message_purge(rm); | |
181 | ||
182 | kfree(rm); | |
183 | } | |
184 | } | |
616b757a | 185 | EXPORT_SYMBOL_GPL(rds_message_put); |
7875e18e | 186 | |
7875e18e AG |
187 | void rds_message_populate_header(struct rds_header *hdr, __be16 sport, |
188 | __be16 dport, u64 seq) | |
189 | { | |
190 | hdr->h_flags = 0; | |
191 | hdr->h_sport = sport; | |
192 | hdr->h_dport = dport; | |
193 | hdr->h_sequence = cpu_to_be64(seq); | |
194 | hdr->h_exthdr[0] = RDS_EXTHDR_NONE; | |
195 | } | |
616b757a | 196 | EXPORT_SYMBOL_GPL(rds_message_populate_header); |
7875e18e | 197 | |
ff51bf84 | 198 | int rds_message_add_extension(struct rds_header *hdr, unsigned int type, |
199 | const void *data, unsigned int len) | |
7875e18e AG |
200 | { |
201 | unsigned int ext_len = sizeof(u8) + len; | |
202 | unsigned char *dst; | |
203 | ||
204 | /* For now, refuse to add more than one extension header */ | |
205 | if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE) | |
206 | return 0; | |
207 | ||
f64f9e71 | 208 | if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type]) |
7875e18e AG |
209 | return 0; |
210 | ||
211 | if (ext_len >= RDS_HEADER_EXT_SPACE) | |
212 | return 0; | |
213 | dst = hdr->h_exthdr; | |
214 | ||
215 | *dst++ = type; | |
216 | memcpy(dst, data, len); | |
217 | ||
218 | dst[len] = RDS_EXTHDR_NONE; | |
219 | return 1; | |
220 | } | |
616b757a | 221 | EXPORT_SYMBOL_GPL(rds_message_add_extension); |
7875e18e AG |
222 | |
223 | /* | |
224 | * If a message has extension headers, retrieve them here. | |
225 | * Call like this: | |
226 | * | |
227 | * unsigned int pos = 0; | |
228 | * | |
229 | * while (1) { | |
230 | * buflen = sizeof(buffer); | |
231 | * type = rds_message_next_extension(hdr, &pos, buffer, &buflen); | |
232 | * if (type == RDS_EXTHDR_NONE) | |
233 | * break; | |
234 | * ... | |
235 | * } | |
236 | */ | |
237 | int rds_message_next_extension(struct rds_header *hdr, | |
238 | unsigned int *pos, void *buf, unsigned int *buflen) | |
239 | { | |
240 | unsigned int offset, ext_type, ext_len; | |
241 | u8 *src = hdr->h_exthdr; | |
242 | ||
243 | offset = *pos; | |
244 | if (offset >= RDS_HEADER_EXT_SPACE) | |
245 | goto none; | |
246 | ||
247 | /* Get the extension type and length. For now, the | |
248 | * length is implied by the extension type. */ | |
249 | ext_type = src[offset++]; | |
250 | ||
251 | if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX) | |
252 | goto none; | |
253 | ext_len = rds_exthdr_size[ext_type]; | |
254 | if (offset + ext_len > RDS_HEADER_EXT_SPACE) | |
255 | goto none; | |
256 | ||
257 | *pos = offset + ext_len; | |
258 | if (ext_len < *buflen) | |
259 | *buflen = ext_len; | |
260 | memcpy(buf, src + offset, *buflen); | |
261 | return ext_type; | |
262 | ||
263 | none: | |
264 | *pos = RDS_HEADER_EXT_SPACE; | |
265 | *buflen = 0; | |
266 | return RDS_EXTHDR_NONE; | |
267 | } | |
268 | ||
7875e18e AG |
269 | int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset) |
270 | { | |
271 | struct rds_ext_header_rdma_dest ext_hdr; | |
272 | ||
273 | ext_hdr.h_rdma_rkey = cpu_to_be32(r_key); | |
274 | ext_hdr.h_rdma_offset = cpu_to_be32(offset); | |
275 | return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr)); | |
276 | } | |
616b757a | 277 | EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); |
7875e18e | 278 | |
fc445084 AG |
279 | /* |
280 | * Each rds_message is allocated with extra space for the scatterlist entries | |
281 | * rds ops will need. This is to minimize memory allocation count. Then, each rds op | |
282 | * can grab SGs when initializing its part of the rds_message. | |
283 | */ | |
284 | struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) | |
7875e18e AG |
285 | { |
286 | struct rds_message *rm; | |
287 | ||
ece6b0a2 CW |
288 | if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message)) |
289 | return NULL; | |
290 | ||
fc445084 | 291 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); |
7875e18e AG |
292 | if (!rm) |
293 | goto out; | |
294 | ||
fc445084 AG |
295 | rm->m_used_sgs = 0; |
296 | rm->m_total_sgs = extra_len / sizeof(struct scatterlist); | |
297 | ||
6c5a1c4a | 298 | refcount_set(&rm->m_refcount, 1); |
7875e18e AG |
299 | INIT_LIST_HEAD(&rm->m_sock_item); |
300 | INIT_LIST_HEAD(&rm->m_conn_item); | |
301 | spin_lock_init(&rm->m_rs_lock); | |
c83188dc | 302 | init_waitqueue_head(&rm->m_flush_wait); |
7875e18e AG |
303 | |
304 | out: | |
305 | return rm; | |
306 | } | |
307 | ||
fc445084 AG |
308 | /* |
309 | * RDS ops use this to grab SG entries from the rm's sg pool. | |
310 | */ | |
7dba9203 | 311 | struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) |
fc445084 AG |
312 | { |
313 | struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; | |
314 | struct scatterlist *sg_ret; | |
315 | ||
c75ab8a5 | 316 | if (nents <= 0) { |
317 | pr_warn("rds: alloc sgs failed! nents <= 0\n"); | |
7dba9203 | 318 | return ERR_PTR(-EINVAL); |
c75ab8a5 | 319 | } |
fc445084 | 320 | |
c75ab8a5 | 321 | if (rm->m_used_sgs + nents > rm->m_total_sgs) { |
322 | pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n", | |
323 | rm->m_total_sgs, rm->m_used_sgs, nents); | |
7dba9203 | 324 | return ERR_PTR(-ENOMEM); |
c75ab8a5 | 325 | } |
d139ff09 | 326 | |
fc445084 | 327 | sg_ret = &sg_first[rm->m_used_sgs]; |
f4dd96f7 | 328 | sg_init_table(sg_ret, nents); |
fc445084 AG |
329 | rm->m_used_sgs += nents; |
330 | ||
331 | return sg_ret; | |
332 | } | |
333 | ||
7875e18e AG |
334 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) |
335 | { | |
336 | struct rds_message *rm; | |
337 | unsigned int i; | |
eeb2c4fb | 338 | int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE); |
ff87e97a | 339 | int extra_bytes = num_sgs * sizeof(struct scatterlist); |
7875e18e | 340 | |
f2ec76f2 | 341 | rm = rds_message_alloc(extra_bytes, GFP_NOWAIT); |
8690bfa1 | 342 | if (!rm) |
7875e18e AG |
343 | return ERR_PTR(-ENOMEM); |
344 | ||
345 | set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); | |
346 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | |
eeb2c4fb | 347 | rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE); |
7dba9203 JG |
348 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); |
349 | if (IS_ERR(rm->data.op_sg)) { | |
aa58163a | 350 | rds_message_put(rm); |
7dba9203 | 351 | return ERR_CAST(rm->data.op_sg); |
aa58163a | 352 | } |
7875e18e | 353 | |
6c7cc6e4 AG |
354 | for (i = 0; i < rm->data.op_nents; ++i) { |
355 | sg_set_page(&rm->data.op_sg[i], | |
7875e18e AG |
356 | virt_to_page(page_addrs[i]), |
357 | PAGE_SIZE, 0); | |
358 | } | |
359 | ||
360 | return rm; | |
361 | } | |
362 | ||
496c7f3c | 363 | static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from) |
7875e18e | 364 | { |
7875e18e | 365 | struct scatterlist *sg; |
fc445084 | 366 | int ret = 0; |
0cebacce | 367 | int length = iov_iter_count(from); |
d40a126b | 368 | int total_copied = 0; |
9426bbc6 | 369 | struct rds_msg_zcopy_info *info; |
7875e18e | 370 | |
083735f4 | 371 | rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); |
7875e18e AG |
372 | |
373 | /* | |
374 | * now allocate and copy in the data payload. | |
375 | */ | |
6c7cc6e4 | 376 | sg = rm->data.op_sg; |
7875e18e | 377 | |
9426bbc6 SV |
378 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
379 | if (!info) | |
d40a126b | 380 | return -ENOMEM; |
9426bbc6 SV |
381 | INIT_LIST_HEAD(&info->rs_zcookie_next); |
382 | rm->data.op_mmp_znotifier = &info->znotif; | |
d40a126b SV |
383 | if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp, |
384 | length)) { | |
385 | ret = -ENOMEM; | |
386 | goto err; | |
387 | } | |
388 | while (iov_iter_count(from)) { | |
389 | struct page *pages; | |
390 | size_t start; | |
391 | ssize_t copied; | |
392 | ||
393 | copied = iov_iter_get_pages(from, &pages, PAGE_SIZE, | |
394 | 1, &start); | |
395 | if (copied < 0) { | |
396 | struct mmpin *mmp; | |
397 | int i; | |
398 | ||
399 | for (i = 0; i < rm->data.op_nents; i++) | |
400 | put_page(sg_page(&rm->data.op_sg[i])); | |
401 | mmp = &rm->data.op_mmp_znotifier->z_mmp; | |
402 | mm_unaccount_pinned_pages(mmp); | |
403 | ret = -EFAULT; | |
0cebacce SV |
404 | goto err; |
405 | } | |
d40a126b SV |
406 | total_copied += copied; |
407 | iov_iter_advance(from, copied); | |
408 | length -= copied; | |
409 | sg_set_page(sg, pages, copied, start); | |
410 | rm->data.op_nents++; | |
411 | sg++; | |
412 | } | |
413 | WARN_ON_ONCE(length != 0); | |
414 | return ret; | |
0cebacce | 415 | err: |
9426bbc6 | 416 | kfree(info); |
d40a126b SV |
417 | rm->data.op_mmp_znotifier = NULL; |
418 | return ret; | |
419 | } | |
420 | ||
421 | int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, | |
422 | bool zcopy) | |
423 | { | |
424 | unsigned long to_copy, nbytes; | |
425 | unsigned long sg_off; | |
426 | struct scatterlist *sg; | |
427 | int ret = 0; | |
428 | ||
429 | rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); | |
430 | ||
431 | /* now allocate and copy in the data payload. */ | |
432 | sg = rm->data.op_sg; | |
433 | sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ | |
434 | ||
435 | if (zcopy) | |
436 | return rds_message_zcopy_from_user(rm, from); | |
0cebacce | 437 | |
083735f4 | 438 | while (iov_iter_count(from)) { |
8690bfa1 | 439 | if (!sg_page(sg)) { |
083735f4 | 440 | ret = rds_page_remainder_alloc(sg, iov_iter_count(from), |
7875e18e AG |
441 | GFP_HIGHUSER); |
442 | if (ret) | |
083735f4 | 443 | return ret; |
6c7cc6e4 | 444 | rm->data.op_nents++; |
7875e18e AG |
445 | sg_off = 0; |
446 | } | |
447 | ||
083735f4 AV |
448 | to_copy = min_t(unsigned long, iov_iter_count(from), |
449 | sg->length - sg_off); | |
7875e18e | 450 | |
083735f4 | 451 | rds_stats_add(s_copy_from_user, to_copy); |
d0a47d32 SV |
452 | nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, |
453 | to_copy, from); | |
454 | if (nbytes != to_copy) | |
083735f4 | 455 | return -EFAULT; |
7875e18e | 456 | |
7875e18e AG |
457 | sg_off += to_copy; |
458 | ||
459 | if (sg_off == sg->length) | |
460 | sg++; | |
461 | } | |
462 | ||
fc445084 | 463 | return ret; |
7875e18e AG |
464 | } |
465 | ||
c310e72c | 466 | int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to) |
7875e18e AG |
467 | { |
468 | struct rds_message *rm; | |
7875e18e AG |
469 | struct scatterlist *sg; |
470 | unsigned long to_copy; | |
7875e18e AG |
471 | unsigned long vec_off; |
472 | int copied; | |
473 | int ret; | |
474 | u32 len; | |
475 | ||
476 | rm = container_of(inc, struct rds_message, m_inc); | |
477 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
478 | ||
6c7cc6e4 | 479 | sg = rm->data.op_sg; |
7875e18e AG |
480 | vec_off = 0; |
481 | copied = 0; | |
482 | ||
c310e72c | 483 | while (iov_iter_count(to) && copied < len) { |
6ff4a8ad GU |
484 | to_copy = min_t(unsigned long, iov_iter_count(to), |
485 | sg->length - vec_off); | |
7875e18e AG |
486 | to_copy = min_t(unsigned long, to_copy, len - copied); |
487 | ||
c310e72c AV |
488 | rds_stats_add(s_copy_to_user, to_copy); |
489 | ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, | |
490 | to_copy, to); | |
491 | if (ret != to_copy) | |
492 | return -EFAULT; | |
7875e18e | 493 | |
7875e18e AG |
494 | vec_off += to_copy; |
495 | copied += to_copy; | |
496 | ||
497 | if (vec_off == sg->length) { | |
498 | vec_off = 0; | |
499 | sg++; | |
500 | } | |
501 | } | |
502 | ||
503 | return copied; | |
504 | } | |
505 | ||
506 | /* | |
507 | * If the message is still on the send queue, wait until the transport | |
508 | * is done with it. This is particularly important for RDMA operations. | |
509 | */ | |
510 | void rds_message_wait(struct rds_message *rm) | |
511 | { | |
c83188dc | 512 | wait_event_interruptible(rm->m_flush_wait, |
7875e18e AG |
513 | !test_bit(RDS_MSG_MAPPED, &rm->m_flags)); |
514 | } | |
515 | ||
516 | void rds_message_unmapped(struct rds_message *rm) | |
517 | { | |
518 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
c83188dc | 519 | wake_up_interruptible(&rm->m_flush_wait); |
7875e18e | 520 | } |
616b757a | 521 | EXPORT_SYMBOL_GPL(rds_message_unmapped); |