]>
Commit | Line | Data |
---|---|---|
eff5f53b | 1 | /* |
eee2fa6a | 2 | * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved. |
eff5f53b AG |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/pagemap.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
eff5f53b AG |
35 | #include <linux/rbtree.h> |
36 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ | |
37 | ||
21f79afa | 38 | #include "rds.h" |
eff5f53b AG |
39 | |
40 | /* | |
41 | * XXX | |
42 | * - build with sparse | |
eff5f53b AG |
43 | * - should we detect duplicate keys on a socket? hmm. |
44 | * - an rdma is an mlock, apply rlimit? | |
45 | */ | |
46 | ||
47 | /* | |
48 | * get the number of pages by looking at the page indices that the start and | |
49 | * end addresses fall in. | |
50 | * | |
51 | * Returns 0 if the vec is invalid. It is invalid if the number of bytes | |
52 | * causes the address to wrap or overflows an unsigned int. This comes | |
53 | * from being stored in the 'length' member of 'struct scatterlist'. | |
54 | */ | |
55 | static unsigned int rds_pages_in_vec(struct rds_iovec *vec) | |
56 | { | |
57 | if ((vec->addr + vec->bytes <= vec->addr) || | |
58 | (vec->bytes > (u64)UINT_MAX)) | |
59 | return 0; | |
60 | ||
61 | return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - | |
62 | (vec->addr >> PAGE_SHIFT); | |
63 | } | |
64 | ||
65 | static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, | |
66 | struct rds_mr *insert) | |
67 | { | |
68 | struct rb_node **p = &root->rb_node; | |
69 | struct rb_node *parent = NULL; | |
70 | struct rds_mr *mr; | |
71 | ||
72 | while (*p) { | |
73 | parent = *p; | |
74 | mr = rb_entry(parent, struct rds_mr, r_rb_node); | |
75 | ||
76 | if (key < mr->r_key) | |
77 | p = &(*p)->rb_left; | |
78 | else if (key > mr->r_key) | |
79 | p = &(*p)->rb_right; | |
80 | else | |
81 | return mr; | |
82 | } | |
83 | ||
84 | if (insert) { | |
85 | rb_link_node(&insert->r_rb_node, parent, p); | |
86 | rb_insert_color(&insert->r_rb_node, root); | |
803ea850 | 87 | refcount_inc(&insert->r_refcount); |
eff5f53b AG |
88 | } |
89 | return NULL; | |
90 | } | |
91 | ||
92 | /* | |
93 | * Destroy the transport-specific part of a MR. | |
94 | */ | |
95 | static void rds_destroy_mr(struct rds_mr *mr) | |
96 | { | |
97 | struct rds_sock *rs = mr->r_sock; | |
98 | void *trans_private = NULL; | |
99 | unsigned long flags; | |
100 | ||
101 | rdsdebug("RDS: destroy mr key is %x refcnt %u\n", | |
803ea850 | 102 | mr->r_key, refcount_read(&mr->r_refcount)); |
eff5f53b AG |
103 | |
104 | if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) | |
105 | return; | |
106 | ||
107 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
108 | if (!RB_EMPTY_NODE(&mr->r_rb_node)) | |
109 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); | |
110 | trans_private = mr->r_trans_private; | |
111 | mr->r_trans_private = NULL; | |
112 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
113 | ||
114 | if (trans_private) | |
115 | mr->r_trans->free_mr(trans_private, mr->r_invalidate); | |
116 | } | |
117 | ||
118 | void __rds_put_mr_final(struct rds_mr *mr) | |
119 | { | |
120 | rds_destroy_mr(mr); | |
121 | kfree(mr); | |
122 | } | |
123 | ||
124 | /* | |
125 | * By the time this is called we can't have any more ioctls called on | |
126 | * the socket so we don't need to worry about racing with others. | |
127 | */ | |
128 | void rds_rdma_drop_keys(struct rds_sock *rs) | |
129 | { | |
130 | struct rds_mr *mr; | |
131 | struct rb_node *node; | |
35b52c70 | 132 | unsigned long flags; |
eff5f53b AG |
133 | |
134 | /* Release any MRs associated with this socket */ | |
35b52c70 | 135 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
eff5f53b | 136 | while ((node = rb_first(&rs->rs_rdma_keys))) { |
a763f78c | 137 | mr = rb_entry(node, struct rds_mr, r_rb_node); |
eff5f53b AG |
138 | if (mr->r_trans == rs->rs_transport) |
139 | mr->r_invalidate = 0; | |
35b52c70 TY |
140 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
141 | RB_CLEAR_NODE(&mr->r_rb_node); | |
142 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
143 | rds_destroy_mr(mr); | |
eff5f53b | 144 | rds_mr_put(mr); |
35b52c70 | 145 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); |
eff5f53b | 146 | } |
35b52c70 | 147 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
eff5f53b AG |
148 | |
149 | if (rs->rs_transport && rs->rs_transport->flush_mrs) | |
150 | rs->rs_transport->flush_mrs(); | |
151 | } | |
152 | ||
153 | /* | |
154 | * Helper function to pin user pages. | |
155 | */ | |
156 | static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, | |
157 | struct page **pages, int write) | |
158 | { | |
159 | int ret; | |
160 | ||
830eb7d5 | 161 | ret = get_user_pages_fast(user_addr, nr_pages, write, pages); |
eff5f53b | 162 | |
7acd4a79 | 163 | if (ret >= 0 && ret < nr_pages) { |
eff5f53b AG |
164 | while (ret--) |
165 | put_page(pages[ret]); | |
166 | ret = -EFAULT; | |
167 | } | |
168 | ||
169 | return ret; | |
170 | } | |
171 | ||
172 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, | |
9e630bcb AR |
173 | u64 *cookie_ret, struct rds_mr **mr_ret, |
174 | struct rds_conn_path *cp) | |
eff5f53b AG |
175 | { |
176 | struct rds_mr *mr = NULL, *found; | |
177 | unsigned int nr_pages; | |
178 | struct page **pages = NULL; | |
179 | struct scatterlist *sg; | |
180 | void *trans_private; | |
181 | unsigned long flags; | |
182 | rds_rdma_cookie_t cookie; | |
183 | unsigned int nents; | |
184 | long i; | |
185 | int ret; | |
186 | ||
eee2fa6a | 187 | if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) { |
eff5f53b AG |
188 | ret = -ENOTCONN; /* XXX not a great errno */ |
189 | goto out; | |
190 | } | |
191 | ||
8690bfa1 | 192 | if (!rs->rs_transport->get_mr) { |
eff5f53b AG |
193 | ret = -EOPNOTSUPP; |
194 | goto out; | |
195 | } | |
196 | ||
197 | nr_pages = rds_pages_in_vec(&args->vec); | |
198 | if (nr_pages == 0) { | |
199 | ret = -EINVAL; | |
200 | goto out; | |
201 | } | |
202 | ||
f9fb69ad AR |
203 | /* Restrict the size of mr irrespective of underlying transport |
204 | * To account for unaligned mr regions, subtract one from nr_pages | |
205 | */ | |
206 | if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { | |
207 | ret = -EMSGSIZE; | |
208 | goto out; | |
209 | } | |
210 | ||
eff5f53b AG |
211 | rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", |
212 | args->vec.addr, args->vec.bytes, nr_pages); | |
213 | ||
214 | /* XXX clamp nr_pages to limit the size of this alloc? */ | |
215 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | |
8690bfa1 | 216 | if (!pages) { |
eff5f53b AG |
217 | ret = -ENOMEM; |
218 | goto out; | |
219 | } | |
220 | ||
221 | mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); | |
8690bfa1 | 222 | if (!mr) { |
eff5f53b AG |
223 | ret = -ENOMEM; |
224 | goto out; | |
225 | } | |
226 | ||
803ea850 | 227 | refcount_set(&mr->r_refcount, 1); |
eff5f53b AG |
228 | RB_CLEAR_NODE(&mr->r_rb_node); |
229 | mr->r_trans = rs->rs_transport; | |
230 | mr->r_sock = rs; | |
231 | ||
232 | if (args->flags & RDS_RDMA_USE_ONCE) | |
233 | mr->r_use_once = 1; | |
234 | if (args->flags & RDS_RDMA_INVALIDATE) | |
235 | mr->r_invalidate = 1; | |
236 | if (args->flags & RDS_RDMA_READWRITE) | |
237 | mr->r_write = 1; | |
238 | ||
239 | /* | |
240 | * Pin the pages that make up the user buffer and transfer the page | |
241 | * pointers to the mr's sg array. We check to see if we've mapped | |
242 | * the whole region after transferring the partial page references | |
243 | * to the sg array so that we can have one page ref cleanup path. | |
244 | * | |
245 | * For now we have no flag that tells us whether the mapping is | |
246 | * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to | |
247 | * the zero page. | |
248 | */ | |
d22faec2 | 249 | ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); |
eff5f53b AG |
250 | if (ret < 0) |
251 | goto out; | |
252 | ||
253 | nents = ret; | |
254 | sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); | |
8690bfa1 | 255 | if (!sg) { |
eff5f53b AG |
256 | ret = -ENOMEM; |
257 | goto out; | |
258 | } | |
259 | WARN_ON(!nents); | |
260 | sg_init_table(sg, nents); | |
261 | ||
262 | /* Stick all pages into the scatterlist */ | |
263 | for (i = 0 ; i < nents; i++) | |
264 | sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); | |
265 | ||
266 | rdsdebug("RDS: trans_private nents is %u\n", nents); | |
267 | ||
268 | /* Obtain a transport specific MR. If this succeeds, the | |
269 | * s/g list is now owned by the MR. | |
270 | * Note that dma_map() implies that pending writes are | |
271 | * flushed to RAM, so no dma_sync is needed here. */ | |
272 | trans_private = rs->rs_transport->get_mr(sg, nents, rs, | |
9e630bcb AR |
273 | &mr->r_key, |
274 | cp ? cp->cp_conn : NULL); | |
eff5f53b AG |
275 | |
276 | if (IS_ERR(trans_private)) { | |
277 | for (i = 0 ; i < nents; i++) | |
278 | put_page(sg_page(&sg[i])); | |
279 | kfree(sg); | |
280 | ret = PTR_ERR(trans_private); | |
281 | goto out; | |
282 | } | |
283 | ||
284 | mr->r_trans_private = trans_private; | |
285 | ||
286 | rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", | |
287 | mr->r_key, (void *)(unsigned long) args->cookie_addr); | |
288 | ||
289 | /* The user may pass us an unaligned address, but we can only | |
290 | * map page aligned regions. So we keep the offset, and build | |
291 | * a 64bit cookie containing <R_Key, offset> and pass that | |
292 | * around. */ | |
293 | cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); | |
294 | if (cookie_ret) | |
295 | *cookie_ret = cookie; | |
296 | ||
297 | if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { | |
298 | ret = -EFAULT; | |
299 | goto out; | |
300 | } | |
301 | ||
302 | /* Inserting the new MR into the rbtree bumps its | |
303 | * reference count. */ | |
304 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
305 | found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); | |
306 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
307 | ||
308 | BUG_ON(found && found != mr); | |
309 | ||
310 | rdsdebug("RDS: get_mr key is %x\n", mr->r_key); | |
311 | if (mr_ret) { | |
803ea850 | 312 | refcount_inc(&mr->r_refcount); |
eff5f53b AG |
313 | *mr_ret = mr; |
314 | } | |
315 | ||
316 | ret = 0; | |
317 | out: | |
318 | kfree(pages); | |
319 | if (mr) | |
320 | rds_mr_put(mr); | |
321 | return ret; | |
322 | } | |
323 | ||
324 | int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) | |
325 | { | |
326 | struct rds_get_mr_args args; | |
327 | ||
328 | if (optlen != sizeof(struct rds_get_mr_args)) | |
329 | return -EINVAL; | |
330 | ||
331 | if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, | |
332 | sizeof(struct rds_get_mr_args))) | |
333 | return -EFAULT; | |
334 | ||
9e630bcb | 335 | return __rds_rdma_map(rs, &args, NULL, NULL, NULL); |
eff5f53b AG |
336 | } |
337 | ||
244546f0 AG |
338 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) |
339 | { | |
340 | struct rds_get_mr_for_dest_args args; | |
341 | struct rds_get_mr_args new_args; | |
342 | ||
343 | if (optlen != sizeof(struct rds_get_mr_for_dest_args)) | |
344 | return -EINVAL; | |
345 | ||
346 | if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, | |
347 | sizeof(struct rds_get_mr_for_dest_args))) | |
348 | return -EFAULT; | |
349 | ||
350 | /* | |
351 | * Initially, just behave like get_mr(). | |
352 | * TODO: Implement get_mr as wrapper around this | |
353 | * and deprecate it. | |
354 | */ | |
355 | new_args.vec = args.vec; | |
356 | new_args.cookie_addr = args.cookie_addr; | |
357 | new_args.flags = args.flags; | |
358 | ||
9e630bcb | 359 | return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL); |
244546f0 AG |
360 | } |
361 | ||
eff5f53b AG |
362 | /* |
363 | * Free the MR indicated by the given R_Key | |
364 | */ | |
365 | int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) | |
366 | { | |
367 | struct rds_free_mr_args args; | |
368 | struct rds_mr *mr; | |
369 | unsigned long flags; | |
370 | ||
371 | if (optlen != sizeof(struct rds_free_mr_args)) | |
372 | return -EINVAL; | |
373 | ||
374 | if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, | |
375 | sizeof(struct rds_free_mr_args))) | |
376 | return -EFAULT; | |
377 | ||
378 | /* Special case - a null cookie means flush all unused MRs */ | |
379 | if (args.cookie == 0) { | |
380 | if (!rs->rs_transport || !rs->rs_transport->flush_mrs) | |
381 | return -EINVAL; | |
382 | rs->rs_transport->flush_mrs(); | |
383 | return 0; | |
384 | } | |
385 | ||
386 | /* Look up the MR given its R_key and remove it from the rbtree | |
387 | * so nobody else finds it. | |
388 | * This should also prevent races with rds_rdma_unuse. | |
389 | */ | |
390 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
391 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); | |
392 | if (mr) { | |
393 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); | |
394 | RB_CLEAR_NODE(&mr->r_rb_node); | |
395 | if (args.flags & RDS_RDMA_INVALIDATE) | |
396 | mr->r_invalidate = 1; | |
397 | } | |
398 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
399 | ||
400 | if (!mr) | |
401 | return -EINVAL; | |
402 | ||
403 | /* | |
404 | * call rds_destroy_mr() ourselves so that we're sure it's done by the time | |
405 | * we return. If we let rds_mr_put() do it it might not happen until | |
406 | * someone else drops their ref. | |
407 | */ | |
408 | rds_destroy_mr(mr); | |
409 | rds_mr_put(mr); | |
410 | return 0; | |
411 | } | |
412 | ||
413 | /* | |
414 | * This is called when we receive an extension header that | |
415 | * tells us this MR was used. It allows us to implement | |
416 | * use_once semantics | |
417 | */ | |
418 | void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) | |
419 | { | |
420 | struct rds_mr *mr; | |
421 | unsigned long flags; | |
422 | int zot_me = 0; | |
423 | ||
424 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
425 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); | |
3ef13f3c | 426 | if (!mr) { |
c536a068 SS |
427 | pr_debug("rds: trying to unuse MR with unknown r_key %u!\n", |
428 | r_key); | |
3ef13f3c AG |
429 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
430 | return; | |
431 | } | |
432 | ||
433 | if (mr->r_use_once || force) { | |
eff5f53b AG |
434 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); |
435 | RB_CLEAR_NODE(&mr->r_rb_node); | |
436 | zot_me = 1; | |
3ef13f3c | 437 | } |
eff5f53b AG |
438 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
439 | ||
440 | /* May have to issue a dma_sync on this memory region. | |
441 | * Note we could avoid this if the operation was a RDMA READ, | |
442 | * but at this point we can't tell. */ | |
3ef13f3c AG |
443 | if (mr->r_trans->sync_mr) |
444 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); | |
eff5f53b | 445 | |
3ef13f3c AG |
446 | /* If the MR was marked as invalidate, this will |
447 | * trigger an async flush. */ | |
3f6b3143 | 448 | if (zot_me) { |
3ef13f3c | 449 | rds_destroy_mr(mr); |
3f6b3143 | 450 | rds_mr_put(mr); |
451 | } | |
eff5f53b AG |
452 | } |
453 | ||
f8b3aaf2 | 454 | void rds_rdma_free_op(struct rm_rdma_op *ro) |
eff5f53b AG |
455 | { |
456 | unsigned int i; | |
457 | ||
f8b3aaf2 AG |
458 | for (i = 0; i < ro->op_nents; i++) { |
459 | struct page *page = sg_page(&ro->op_sg[i]); | |
eff5f53b AG |
460 | |
461 | /* Mark page dirty if it was possibly modified, which | |
462 | * is the case for a RDMA_READ which copies from remote | |
463 | * to local memory */ | |
f8b3aaf2 | 464 | if (!ro->op_write) { |
5c240fa2 | 465 | WARN_ON(!page->mapping && irqs_disabled()); |
eff5f53b | 466 | set_page_dirty(page); |
561c7df6 | 467 | } |
eff5f53b AG |
468 | put_page(page); |
469 | } | |
470 | ||
f8b3aaf2 AG |
471 | kfree(ro->op_notifier); |
472 | ro->op_notifier = NULL; | |
473 | ro->op_active = 0; | |
ff87e97a AG |
474 | } |
475 | ||
d0ab25a8 AG |
476 | void rds_atomic_free_op(struct rm_atomic_op *ao) |
477 | { | |
478 | struct page *page = sg_page(ao->op_sg); | |
479 | ||
480 | /* Mark page dirty if it was possibly modified, which | |
481 | * is the case for a RDMA_READ which copies from remote | |
482 | * to local memory */ | |
483 | set_page_dirty(page); | |
484 | put_page(page); | |
485 | ||
486 | kfree(ao->op_notifier); | |
487 | ao->op_notifier = NULL; | |
488 | ao->op_active = 0; | |
489 | } | |
490 | ||
491 | ||
ff87e97a | 492 | /* |
fc8162e3 | 493 | * Count the number of pages needed to describe an incoming iovec array. |
ff87e97a | 494 | */ |
fc8162e3 AG |
495 | static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) |
496 | { | |
497 | int tot_pages = 0; | |
498 | unsigned int nr_pages; | |
499 | unsigned int i; | |
500 | ||
501 | /* figure out the number of pages in the vector */ | |
502 | for (i = 0; i < nr_iovecs; i++) { | |
503 | nr_pages = rds_pages_in_vec(&iov[i]); | |
504 | if (nr_pages == 0) | |
505 | return -EINVAL; | |
506 | ||
507 | tot_pages += nr_pages; | |
508 | ||
509 | /* | |
510 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, | |
511 | * so tot_pages cannot overflow without first going negative. | |
512 | */ | |
513 | if (tot_pages < 0) | |
514 | return -EINVAL; | |
515 | } | |
516 | ||
517 | return tot_pages; | |
518 | } | |
519 | ||
520 | int rds_rdma_extra_size(struct rds_rdma_args *args) | |
ff87e97a AG |
521 | { |
522 | struct rds_iovec vec; | |
523 | struct rds_iovec __user *local_vec; | |
fc8162e3 | 524 | int tot_pages = 0; |
ff87e97a AG |
525 | unsigned int nr_pages; |
526 | unsigned int i; | |
527 | ||
528 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; | |
529 | ||
c0955087 MG |
530 | if (args->nr_local == 0) |
531 | return -EINVAL; | |
532 | ||
ff87e97a AG |
533 | /* figure out the number of pages in the vector */ |
534 | for (i = 0; i < args->nr_local; i++) { | |
535 | if (copy_from_user(&vec, &local_vec[i], | |
536 | sizeof(struct rds_iovec))) | |
537 | return -EFAULT; | |
538 | ||
539 | nr_pages = rds_pages_in_vec(&vec); | |
540 | if (nr_pages == 0) | |
541 | return -EINVAL; | |
542 | ||
543 | tot_pages += nr_pages; | |
1b1f693d LT |
544 | |
545 | /* | |
546 | * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, | |
547 | * so tot_pages cannot overflow without first going negative. | |
548 | */ | |
fc8162e3 | 549 | if (tot_pages < 0) |
1b1f693d | 550 | return -EINVAL; |
ff87e97a AG |
551 | } |
552 | ||
fc8162e3 | 553 | return tot_pages * sizeof(struct scatterlist); |
eff5f53b AG |
554 | } |
555 | ||
556 | /* | |
4324879d AG |
557 | * The application asks for a RDMA transfer. |
558 | * Extract all arguments and set up the rdma_op | |
eff5f53b | 559 | */ |
4324879d AG |
560 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, |
561 | struct cmsghdr *cmsg) | |
eff5f53b | 562 | { |
4324879d | 563 | struct rds_rdma_args *args; |
f8b3aaf2 | 564 | struct rm_rdma_op *op = &rm->rdma; |
9b9d2e00 | 565 | int nr_pages; |
eff5f53b AG |
566 | unsigned int nr_bytes; |
567 | struct page **pages = NULL; | |
fc8162e3 AG |
568 | struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; |
569 | int iov_size; | |
eff5f53b | 570 | unsigned int i, j; |
ff87e97a | 571 | int ret = 0; |
eff5f53b | 572 | |
4324879d | 573 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) |
f8b3aaf2 | 574 | || rm->rdma.op_active) |
4324879d AG |
575 | return -EINVAL; |
576 | ||
577 | args = CMSG_DATA(cmsg); | |
eff5f53b | 578 | |
eee2fa6a | 579 | if (ipv6_addr_any(&rs->rs_bound_addr)) { |
eff5f53b | 580 | ret = -ENOTCONN; /* XXX not a great errno */ |
dee49f20 | 581 | goto out_ret; |
eff5f53b AG |
582 | } |
583 | ||
218854af | 584 | if (args->nr_local > UIO_MAXIOV) { |
eff5f53b | 585 | ret = -EMSGSIZE; |
dee49f20 | 586 | goto out_ret; |
eff5f53b AG |
587 | } |
588 | ||
fc8162e3 AG |
589 | /* Check whether to allocate the iovec area */ |
590 | iov_size = args->nr_local * sizeof(struct rds_iovec); | |
591 | if (args->nr_local > UIO_FASTIOV) { | |
592 | iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); | |
593 | if (!iovs) { | |
594 | ret = -ENOMEM; | |
dee49f20 | 595 | goto out_ret; |
fc8162e3 AG |
596 | } |
597 | } | |
598 | ||
599 | if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { | |
600 | ret = -EFAULT; | |
601 | goto out; | |
602 | } | |
603 | ||
604 | nr_pages = rds_rdma_pages(iovs, args->nr_local); | |
a09f69c4 AG |
605 | if (nr_pages < 0) { |
606 | ret = -EINVAL; | |
eff5f53b | 607 | goto out; |
a09f69c4 | 608 | } |
eff5f53b | 609 | |
ff87e97a AG |
610 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); |
611 | if (!pages) { | |
eff5f53b AG |
612 | ret = -ENOMEM; |
613 | goto out; | |
614 | } | |
615 | ||
f8b3aaf2 AG |
616 | op->op_write = !!(args->flags & RDS_RDMA_READWRITE); |
617 | op->op_fence = !!(args->flags & RDS_RDMA_FENCE); | |
618 | op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); | |
2c3a5f9a | 619 | op->op_silent = !!(args->flags & RDS_RDMA_SILENT); |
f8b3aaf2 AG |
620 | op->op_active = 1; |
621 | op->op_recverr = rs->rs_recverr; | |
eff5f53b | 622 | WARN_ON(!nr_pages); |
f8b3aaf2 | 623 | op->op_sg = rds_message_alloc_sgs(rm, nr_pages); |
d139ff09 AG |
624 | if (!op->op_sg) { |
625 | ret = -ENOMEM; | |
626 | goto out; | |
627 | } | |
eff5f53b | 628 | |
f8b3aaf2 | 629 | if (op->op_notify || op->op_recverr) { |
eff5f53b AG |
630 | /* We allocate an uninitialized notifier here, because |
631 | * we don't want to do that in the completion handler. We | |
632 | * would have to use GFP_ATOMIC there, and don't want to deal | |
633 | * with failed allocations. | |
634 | */ | |
f8b3aaf2 AG |
635 | op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); |
636 | if (!op->op_notifier) { | |
eff5f53b AG |
637 | ret = -ENOMEM; |
638 | goto out; | |
639 | } | |
f8b3aaf2 AG |
640 | op->op_notifier->n_user_token = args->user_token; |
641 | op->op_notifier->n_status = RDS_RDMA_SUCCESS; | |
941f8d55 SS |
642 | |
643 | /* Enable rmda notification on data operation for composite | |
644 | * rds messages and make sure notification is enabled only | |
645 | * for the data operation which follows it so that application | |
646 | * gets notified only after full message gets delivered. | |
647 | */ | |
648 | if (rm->data.op_sg) { | |
649 | rm->rdma.op_notify = 0; | |
650 | rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); | |
651 | } | |
eff5f53b AG |
652 | } |
653 | ||
654 | /* The cookie contains the R_Key of the remote memory region, and | |
655 | * optionally an offset into it. This is how we implement RDMA into | |
656 | * unaligned memory. | |
657 | * When setting up the RDMA, we need to add that offset to the | |
658 | * destination address (which is really an offset into the MR) | |
659 | * FIXME: We may want to move this into ib_rdma.c | |
660 | */ | |
f8b3aaf2 AG |
661 | op->op_rkey = rds_rdma_cookie_key(args->cookie); |
662 | op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); | |
eff5f53b AG |
663 | |
664 | nr_bytes = 0; | |
665 | ||
666 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", | |
667 | (unsigned long long)args->nr_local, | |
668 | (unsigned long long)args->remote_vec.addr, | |
f8b3aaf2 | 669 | op->op_rkey); |
eff5f53b AG |
670 | |
671 | for (i = 0; i < args->nr_local; i++) { | |
fc8162e3 AG |
672 | struct rds_iovec *iov = &iovs[i]; |
673 | /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ | |
674 | unsigned int nr = rds_pages_in_vec(iov); | |
eff5f53b | 675 | |
fc8162e3 AG |
676 | rs->rs_user_addr = iov->addr; |
677 | rs->rs_user_bytes = iov->bytes; | |
eff5f53b | 678 | |
eff5f53b AG |
679 | /* If it's a WRITE operation, we want to pin the pages for reading. |
680 | * If it's a READ operation, we need to pin the pages for writing. | |
681 | */ | |
fc8162e3 | 682 | ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); |
eff5f53b AG |
683 | if (ret < 0) |
684 | goto out; | |
1d2e3f39 | 685 | else |
686 | ret = 0; | |
eff5f53b | 687 | |
fc8162e3 AG |
688 | rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", |
689 | nr_bytes, nr, iov->bytes, iov->addr); | |
eff5f53b | 690 | |
fc8162e3 | 691 | nr_bytes += iov->bytes; |
eff5f53b AG |
692 | |
693 | for (j = 0; j < nr; j++) { | |
fc8162e3 | 694 | unsigned int offset = iov->addr & ~PAGE_MASK; |
ff87e97a | 695 | struct scatterlist *sg; |
eff5f53b | 696 | |
f8b3aaf2 | 697 | sg = &op->op_sg[op->op_nents + j]; |
eff5f53b | 698 | sg_set_page(sg, pages[j], |
fc8162e3 | 699 | min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), |
eff5f53b AG |
700 | offset); |
701 | ||
fc8162e3 AG |
702 | rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", |
703 | sg->offset, sg->length, iov->addr, iov->bytes); | |
eff5f53b | 704 | |
fc8162e3 AG |
705 | iov->addr += sg->length; |
706 | iov->bytes -= sg->length; | |
eff5f53b AG |
707 | } |
708 | ||
f8b3aaf2 | 709 | op->op_nents += nr; |
eff5f53b AG |
710 | } |
711 | ||
eff5f53b AG |
712 | if (nr_bytes > args->remote_vec.bytes) { |
713 | rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", | |
714 | nr_bytes, | |
715 | (unsigned int) args->remote_vec.bytes); | |
716 | ret = -EINVAL; | |
717 | goto out; | |
718 | } | |
f8b3aaf2 | 719 | op->op_bytes = nr_bytes; |
eff5f53b | 720 | |
eff5f53b | 721 | out: |
fc8162e3 AG |
722 | if (iovs != iovstack) |
723 | sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); | |
eff5f53b | 724 | kfree(pages); |
dee49f20 | 725 | out_ret: |
ff87e97a AG |
726 | if (ret) |
727 | rds_rdma_free_op(op); | |
f4a3fc03 AG |
728 | else |
729 | rds_stats_inc(s_send_rdma); | |
4324879d AG |
730 | |
731 | return ret; | |
eff5f53b AG |
732 | } |
733 | ||
734 | /* | |
735 | * The application wants us to pass an RDMA destination (aka MR) | |
736 | * to the remote | |
737 | */ | |
738 | int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, | |
739 | struct cmsghdr *cmsg) | |
740 | { | |
741 | unsigned long flags; | |
742 | struct rds_mr *mr; | |
743 | u32 r_key; | |
744 | int err = 0; | |
745 | ||
f64f9e71 JP |
746 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || |
747 | rm->m_rdma_cookie != 0) | |
eff5f53b AG |
748 | return -EINVAL; |
749 | ||
750 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); | |
751 | ||
752 | /* We are reusing a previously mapped MR here. Most likely, the | |
753 | * application has written to the buffer, so we need to explicitly | |
754 | * flush those writes to RAM. Otherwise the HCA may not see them | |
755 | * when doing a DMA from that buffer. | |
756 | */ | |
757 | r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); | |
758 | ||
759 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
760 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); | |
8690bfa1 | 761 | if (!mr) |
eff5f53b AG |
762 | err = -EINVAL; /* invalid r_key */ |
763 | else | |
803ea850 | 764 | refcount_inc(&mr->r_refcount); |
eff5f53b AG |
765 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); |
766 | ||
767 | if (mr) { | |
768 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); | |
f8b3aaf2 | 769 | rm->rdma.op_rdma_mr = mr; |
eff5f53b AG |
770 | } |
771 | return err; | |
772 | } | |
773 | ||
774 | /* | |
775 | * The application passes us an address range it wants to enable RDMA | |
776 | * to/from. We map the area, and save the <R_Key,offset> pair | |
777 | * in rm->m_rdma_cookie. This causes it to be sent along to the peer | |
778 | * in an extension header. | |
779 | */ | |
780 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, | |
781 | struct cmsghdr *cmsg) | |
782 | { | |
f64f9e71 JP |
783 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || |
784 | rm->m_rdma_cookie != 0) | |
eff5f53b AG |
785 | return -EINVAL; |
786 | ||
9e630bcb AR |
787 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, |
788 | &rm->rdma.op_rdma_mr, rm->m_conn_path); | |
eff5f53b | 789 | } |
15133f6e AG |
790 | |
791 | /* | |
792 | * Fill in rds_message for an atomic request. | |
793 | */ | |
794 | int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, | |
795 | struct cmsghdr *cmsg) | |
796 | { | |
797 | struct page *page = NULL; | |
798 | struct rds_atomic_args *args; | |
799 | int ret = 0; | |
800 | ||
801 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args)) | |
802 | || rm->atomic.op_active) | |
803 | return -EINVAL; | |
804 | ||
805 | args = CMSG_DATA(cmsg); | |
806 | ||
20c72bd5 AG |
807 | /* Nonmasked & masked cmsg ops converted to masked hw ops */ |
808 | switch (cmsg->cmsg_type) { | |
809 | case RDS_CMSG_ATOMIC_FADD: | |
810 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; | |
811 | rm->atomic.op_m_fadd.add = args->fadd.add; | |
812 | rm->atomic.op_m_fadd.nocarry_mask = 0; | |
813 | break; | |
814 | case RDS_CMSG_MASKED_ATOMIC_FADD: | |
15133f6e | 815 | rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD; |
20c72bd5 AG |
816 | rm->atomic.op_m_fadd.add = args->m_fadd.add; |
817 | rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask; | |
818 | break; | |
819 | case RDS_CMSG_ATOMIC_CSWP: | |
820 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; | |
821 | rm->atomic.op_m_cswp.compare = args->cswp.compare; | |
822 | rm->atomic.op_m_cswp.swap = args->cswp.swap; | |
823 | rm->atomic.op_m_cswp.compare_mask = ~0; | |
824 | rm->atomic.op_m_cswp.swap_mask = ~0; | |
825 | break; | |
826 | case RDS_CMSG_MASKED_ATOMIC_CSWP: | |
827 | rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP; | |
828 | rm->atomic.op_m_cswp.compare = args->m_cswp.compare; | |
829 | rm->atomic.op_m_cswp.swap = args->m_cswp.swap; | |
830 | rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask; | |
831 | rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask; | |
832 | break; | |
833 | default: | |
834 | BUG(); /* should never happen */ | |
15133f6e AG |
835 | } |
836 | ||
15133f6e | 837 | rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); |
2c3a5f9a | 838 | rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT); |
7e3bd65e | 839 | rm->atomic.op_active = 1; |
15133f6e AG |
840 | rm->atomic.op_recverr = rs->rs_recverr; |
841 | rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); | |
d139ff09 AG |
842 | if (!rm->atomic.op_sg) { |
843 | ret = -ENOMEM; | |
844 | goto err; | |
845 | } | |
15133f6e AG |
846 | |
847 | /* verify 8 byte-aligned */ | |
848 | if (args->local_addr & 0x7) { | |
849 | ret = -EFAULT; | |
850 | goto err; | |
851 | } | |
852 | ||
853 | ret = rds_pin_pages(args->local_addr, 1, &page, 1); | |
854 | if (ret != 1) | |
855 | goto err; | |
856 | ret = 0; | |
857 | ||
858 | sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); | |
859 | ||
860 | if (rm->atomic.op_notify || rm->atomic.op_recverr) { | |
861 | /* We allocate an uninitialized notifier here, because | |
862 | * we don't want to do that in the completion handler. We | |
863 | * would have to use GFP_ATOMIC there, and don't want to deal | |
864 | * with failed allocations. | |
865 | */ | |
866 | rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL); | |
867 | if (!rm->atomic.op_notifier) { | |
868 | ret = -ENOMEM; | |
869 | goto err; | |
870 | } | |
871 | ||
872 | rm->atomic.op_notifier->n_user_token = args->user_token; | |
873 | rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS; | |
874 | } | |
875 | ||
40589e74 | 876 | rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie); |
15133f6e AG |
877 | rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie); |
878 | ||
15133f6e AG |
879 | return ret; |
880 | err: | |
881 | if (page) | |
882 | put_page(page); | |
7d11f77f | 883 | rm->atomic.op_active = 0; |
15133f6e AG |
884 | kfree(rm->atomic.op_notifier); |
885 | ||
886 | return ret; | |
887 | } |