2 * Copyright (c) 2007 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pagemap.h>
34 #include <linux/slab.h>
35 #include <linux/rbtree.h>
36 #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
43 * - should we detect duplicate keys on a socket? hmm.
44 * - an rdma is an mlock, apply rlimit?
48 * get the number of pages by looking at the page indices that the start and
49 * end addresses fall in.
51 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
52 * causes the address to wrap or overflows an unsigned int. This comes
53 * from being stored in the 'length' member of 'struct scatterlist'.
55 static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
57 if ((vec->addr + vec->bytes <= vec->addr) ||
58 (vec->bytes > (u64)UINT_MAX))
61 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62 (vec->addr >> PAGE_SHIFT);
65 static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66 struct rds_mr *insert)
68 struct rb_node **p = &root->rb_node;
69 struct rb_node *parent = NULL;
74 mr = rb_entry(parent, struct rds_mr, r_rb_node);
78 else if (key > mr->r_key)
85 rb_link_node(&insert->r_rb_node, parent, p);
86 rb_insert_color(&insert->r_rb_node, root);
87 refcount_inc(&insert->r_refcount);
93 * Destroy the transport-specific part of a MR.
95 static void rds_destroy_mr(struct rds_mr *mr)
97 struct rds_sock *rs = mr->r_sock;
98 void *trans_private = NULL;
101 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102 mr->r_key, refcount_read(&mr->r_refcount));
104 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
107 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
108 if (!RB_EMPTY_NODE(&mr->r_rb_node))
109 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
110 trans_private = mr->r_trans_private;
111 mr->r_trans_private = NULL;
112 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
115 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
118 void __rds_put_mr_final(struct rds_mr *mr)
125 * By the time this is called we can't have any more ioctls called on
126 * the socket so we don't need to worry about racing with others.
128 void rds_rdma_drop_keys(struct rds_sock *rs)
131 struct rb_node *node;
134 /* Release any MRs associated with this socket */
135 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
136 while ((node = rb_first(&rs->rs_rdma_keys))) {
137 mr = rb_entry(node, struct rds_mr, r_rb_node);
138 if (mr->r_trans == rs->rs_transport)
139 mr->r_invalidate = 0;
140 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
141 RB_CLEAR_NODE(&mr->r_rb_node);
142 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
145 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
147 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
149 if (rs->rs_transport && rs->rs_transport->flush_mrs)
150 rs->rs_transport->flush_mrs();
154 * Helper function to pin user pages.
156 static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
157 struct page **pages, int write)
161 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
163 if (ret >= 0 && ret < nr_pages) {
165 put_page(pages[ret]);
172 static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173 u64 *cookie_ret, struct rds_mr **mr_ret)
175 struct rds_mr *mr = NULL, *found;
176 unsigned int nr_pages;
177 struct page **pages = NULL;
178 struct scatterlist *sg;
181 rds_rdma_cookie_t cookie;
186 if (rs->rs_bound_addr == 0) {
187 ret = -ENOTCONN; /* XXX not a great errno */
191 if (!rs->rs_transport->get_mr) {
196 nr_pages = rds_pages_in_vec(&args->vec);
202 /* Restrict the size of mr irrespective of underlying transport
203 * To account for unaligned mr regions, subtract one from nr_pages
205 if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
210 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
211 args->vec.addr, args->vec.bytes, nr_pages);
213 /* XXX clamp nr_pages to limit the size of this alloc? */
214 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
220 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
226 refcount_set(&mr->r_refcount, 1);
227 RB_CLEAR_NODE(&mr->r_rb_node);
228 mr->r_trans = rs->rs_transport;
231 if (args->flags & RDS_RDMA_USE_ONCE)
233 if (args->flags & RDS_RDMA_INVALIDATE)
234 mr->r_invalidate = 1;
235 if (args->flags & RDS_RDMA_READWRITE)
239 * Pin the pages that make up the user buffer and transfer the page
240 * pointers to the mr's sg array. We check to see if we've mapped
241 * the whole region after transferring the partial page references
242 * to the sg array so that we can have one page ref cleanup path.
244 * For now we have no flag that tells us whether the mapping is
245 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
248 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
253 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
259 sg_init_table(sg, nents);
261 /* Stick all pages into the scatterlist */
262 for (i = 0 ; i < nents; i++)
263 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
265 rdsdebug("RDS: trans_private nents is %u\n", nents);
267 /* Obtain a transport specific MR. If this succeeds, the
268 * s/g list is now owned by the MR.
269 * Note that dma_map() implies that pending writes are
270 * flushed to RAM, so no dma_sync is needed here. */
271 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
274 if (IS_ERR(trans_private)) {
275 for (i = 0 ; i < nents; i++)
276 put_page(sg_page(&sg[i]));
278 ret = PTR_ERR(trans_private);
282 mr->r_trans_private = trans_private;
284 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
285 mr->r_key, (void *)(unsigned long) args->cookie_addr);
287 /* The user may pass us an unaligned address, but we can only
288 * map page aligned regions. So we keep the offset, and build
289 * a 64bit cookie containing <R_Key, offset> and pass that
291 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
293 *cookie_ret = cookie;
295 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
300 /* Inserting the new MR into the rbtree bumps its
301 * reference count. */
302 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
303 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
304 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
306 BUG_ON(found && found != mr);
308 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
310 refcount_inc(&mr->r_refcount);
322 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
324 struct rds_get_mr_args args;
326 if (optlen != sizeof(struct rds_get_mr_args))
329 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
330 sizeof(struct rds_get_mr_args)))
333 return __rds_rdma_map(rs, &args, NULL, NULL);
336 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
338 struct rds_get_mr_for_dest_args args;
339 struct rds_get_mr_args new_args;
341 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
344 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
345 sizeof(struct rds_get_mr_for_dest_args)))
349 * Initially, just behave like get_mr().
350 * TODO: Implement get_mr as wrapper around this
353 new_args.vec = args.vec;
354 new_args.cookie_addr = args.cookie_addr;
355 new_args.flags = args.flags;
357 return __rds_rdma_map(rs, &new_args, NULL, NULL);
361 * Free the MR indicated by the given R_Key
363 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
365 struct rds_free_mr_args args;
369 if (optlen != sizeof(struct rds_free_mr_args))
372 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
373 sizeof(struct rds_free_mr_args)))
376 /* Special case - a null cookie means flush all unused MRs */
377 if (args.cookie == 0) {
378 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
380 rs->rs_transport->flush_mrs();
384 /* Look up the MR given its R_key and remove it from the rbtree
385 * so nobody else finds it.
386 * This should also prevent races with rds_rdma_unuse.
388 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
389 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
391 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
392 RB_CLEAR_NODE(&mr->r_rb_node);
393 if (args.flags & RDS_RDMA_INVALIDATE)
394 mr->r_invalidate = 1;
396 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
402 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
403 * we return. If we let rds_mr_put() do it it might not happen until
404 * someone else drops their ref.
412 * This is called when we receive an extension header that
413 * tells us this MR was used. It allows us to implement
416 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
422 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
423 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
425 pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
427 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
431 if (mr->r_use_once || force) {
432 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
433 RB_CLEAR_NODE(&mr->r_rb_node);
436 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
438 /* May have to issue a dma_sync on this memory region.
439 * Note we could avoid this if the operation was a RDMA READ,
440 * but at this point we can't tell. */
441 if (mr->r_trans->sync_mr)
442 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
444 /* If the MR was marked as invalidate, this will
445 * trigger an async flush. */
452 void rds_rdma_free_op(struct rm_rdma_op *ro)
456 for (i = 0; i < ro->op_nents; i++) {
457 struct page *page = sg_page(&ro->op_sg[i]);
459 /* Mark page dirty if it was possibly modified, which
460 * is the case for a RDMA_READ which copies from remote
463 WARN_ON(!page->mapping && irqs_disabled());
464 set_page_dirty(page);
469 kfree(ro->op_notifier);
470 ro->op_notifier = NULL;
474 void rds_atomic_free_op(struct rm_atomic_op *ao)
476 struct page *page = sg_page(ao->op_sg);
478 /* Mark page dirty if it was possibly modified, which
479 * is the case for a RDMA_READ which copies from remote
481 set_page_dirty(page);
484 kfree(ao->op_notifier);
485 ao->op_notifier = NULL;
491 * Count the number of pages needed to describe an incoming iovec array.
493 static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
496 unsigned int nr_pages;
499 /* figure out the number of pages in the vector */
500 for (i = 0; i < nr_iovecs; i++) {
501 nr_pages = rds_pages_in_vec(&iov[i]);
505 tot_pages += nr_pages;
508 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
509 * so tot_pages cannot overflow without first going negative.
518 int rds_rdma_extra_size(struct rds_rdma_args *args)
520 struct rds_iovec vec;
521 struct rds_iovec __user *local_vec;
523 unsigned int nr_pages;
526 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
528 /* figure out the number of pages in the vector */
529 for (i = 0; i < args->nr_local; i++) {
530 if (copy_from_user(&vec, &local_vec[i],
531 sizeof(struct rds_iovec)))
534 nr_pages = rds_pages_in_vec(&vec);
538 tot_pages += nr_pages;
541 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
542 * so tot_pages cannot overflow without first going negative.
548 return tot_pages * sizeof(struct scatterlist);
552 * The application asks for a RDMA transfer.
553 * Extract all arguments and set up the rdma_op
555 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
556 struct cmsghdr *cmsg)
558 struct rds_rdma_args *args;
559 struct rm_rdma_op *op = &rm->rdma;
561 unsigned int nr_bytes;
562 struct page **pages = NULL;
563 struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
568 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
569 || rm->rdma.op_active)
572 args = CMSG_DATA(cmsg);
574 if (rs->rs_bound_addr == 0) {
575 ret = -ENOTCONN; /* XXX not a great errno */
579 if (args->nr_local > UIO_MAXIOV) {
584 /* Check whether to allocate the iovec area */
585 iov_size = args->nr_local * sizeof(struct rds_iovec);
586 if (args->nr_local > UIO_FASTIOV) {
587 iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
594 if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
599 nr_pages = rds_rdma_pages(iovs, args->nr_local);
605 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
611 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
612 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
613 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
614 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
616 op->op_recverr = rs->rs_recverr;
618 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
624 if (op->op_notify || op->op_recverr) {
625 /* We allocate an uninitialized notifier here, because
626 * we don't want to do that in the completion handler. We
627 * would have to use GFP_ATOMIC there, and don't want to deal
628 * with failed allocations.
630 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
631 if (!op->op_notifier) {
635 op->op_notifier->n_user_token = args->user_token;
636 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
638 /* Enable rmda notification on data operation for composite
639 * rds messages and make sure notification is enabled only
640 * for the data operation which follows it so that application
641 * gets notified only after full message gets delivered.
643 if (rm->data.op_sg) {
644 rm->rdma.op_notify = 0;
645 rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
649 /* The cookie contains the R_Key of the remote memory region, and
650 * optionally an offset into it. This is how we implement RDMA into
652 * When setting up the RDMA, we need to add that offset to the
653 * destination address (which is really an offset into the MR)
654 * FIXME: We may want to move this into ib_rdma.c
656 op->op_rkey = rds_rdma_cookie_key(args->cookie);
657 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
661 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
662 (unsigned long long)args->nr_local,
663 (unsigned long long)args->remote_vec.addr,
666 for (i = 0; i < args->nr_local; i++) {
667 struct rds_iovec *iov = &iovs[i];
668 /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
669 unsigned int nr = rds_pages_in_vec(iov);
671 rs->rs_user_addr = iov->addr;
672 rs->rs_user_bytes = iov->bytes;
674 /* If it's a WRITE operation, we want to pin the pages for reading.
675 * If it's a READ operation, we need to pin the pages for writing.
677 ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
683 rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
684 nr_bytes, nr, iov->bytes, iov->addr);
686 nr_bytes += iov->bytes;
688 for (j = 0; j < nr; j++) {
689 unsigned int offset = iov->addr & ~PAGE_MASK;
690 struct scatterlist *sg;
692 sg = &op->op_sg[op->op_nents + j];
693 sg_set_page(sg, pages[j],
694 min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
697 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
698 sg->offset, sg->length, iov->addr, iov->bytes);
700 iov->addr += sg->length;
701 iov->bytes -= sg->length;
707 if (nr_bytes > args->remote_vec.bytes) {
708 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
710 (unsigned int) args->remote_vec.bytes);
714 op->op_bytes = nr_bytes;
717 if (iovs != iovstack)
718 sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
722 rds_rdma_free_op(op);
724 rds_stats_inc(s_send_rdma);
730 * The application wants us to pass an RDMA destination (aka MR)
733 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
734 struct cmsghdr *cmsg)
741 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
742 rm->m_rdma_cookie != 0)
745 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
747 /* We are reusing a previously mapped MR here. Most likely, the
748 * application has written to the buffer, so we need to explicitly
749 * flush those writes to RAM. Otherwise the HCA may not see them
750 * when doing a DMA from that buffer.
752 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
754 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
755 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
757 err = -EINVAL; /* invalid r_key */
759 refcount_inc(&mr->r_refcount);
760 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
763 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
764 rm->rdma.op_rdma_mr = mr;
770 * The application passes us an address range it wants to enable RDMA
771 * to/from. We map the area, and save the <R_Key,offset> pair
772 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
773 * in an extension header.
775 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
776 struct cmsghdr *cmsg)
778 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
779 rm->m_rdma_cookie != 0)
782 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
786 * Fill in rds_message for an atomic request.
788 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
789 struct cmsghdr *cmsg)
791 struct page *page = NULL;
792 struct rds_atomic_args *args;
795 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
796 || rm->atomic.op_active)
799 args = CMSG_DATA(cmsg);
801 /* Nonmasked & masked cmsg ops converted to masked hw ops */
802 switch (cmsg->cmsg_type) {
803 case RDS_CMSG_ATOMIC_FADD:
804 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
805 rm->atomic.op_m_fadd.add = args->fadd.add;
806 rm->atomic.op_m_fadd.nocarry_mask = 0;
808 case RDS_CMSG_MASKED_ATOMIC_FADD:
809 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
810 rm->atomic.op_m_fadd.add = args->m_fadd.add;
811 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
813 case RDS_CMSG_ATOMIC_CSWP:
814 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
815 rm->atomic.op_m_cswp.compare = args->cswp.compare;
816 rm->atomic.op_m_cswp.swap = args->cswp.swap;
817 rm->atomic.op_m_cswp.compare_mask = ~0;
818 rm->atomic.op_m_cswp.swap_mask = ~0;
820 case RDS_CMSG_MASKED_ATOMIC_CSWP:
821 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
822 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
823 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
824 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
825 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
828 BUG(); /* should never happen */
831 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
832 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
833 rm->atomic.op_active = 1;
834 rm->atomic.op_recverr = rs->rs_recverr;
835 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
836 if (!rm->atomic.op_sg) {
841 /* verify 8 byte-aligned */
842 if (args->local_addr & 0x7) {
847 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
852 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
854 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
855 /* We allocate an uninitialized notifier here, because
856 * we don't want to do that in the completion handler. We
857 * would have to use GFP_ATOMIC there, and don't want to deal
858 * with failed allocations.
860 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
861 if (!rm->atomic.op_notifier) {
866 rm->atomic.op_notifier->n_user_token = args->user_token;
867 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
870 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
871 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
877 kfree(rm->atomic.op_notifier);