1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/libnvdimm.h>
12 /* Return a random 8 bit key value that is
13 * different than the last_key. Set last_key to -1
14 * if this is the first key for an MR or MW
16 u8 rxe_get_next_key(u32 last_key)
21 get_random_bytes(&key, 1);
22 } while (key == last_key);
27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
29 switch (mr->ibmr.type) {
34 case IB_MR_TYPE_MEM_REG:
35 if (iova < mr->ibmr.iova ||
36 iova + length > mr->ibmr.iova + mr->ibmr.length) {
37 rxe_dbg_mr(mr, "iova/length out of range");
43 rxe_dbg_mr(mr, "mr type not supported\n");
48 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
49 | IB_ACCESS_REMOTE_WRITE \
50 | IB_ACCESS_REMOTE_ATOMIC)
52 static void rxe_mr_init(int access, struct rxe_mr *mr)
54 u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
55 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
57 /* set ibmr->l/rkey and also copy into private l/rkey
58 * for user MRs these will always be the same
59 * for cases where caller 'owns' the key portion
60 * they may be different until REG_MR WQE is executed.
62 mr->lkey = mr->ibmr.lkey = lkey;
63 mr->rkey = mr->ibmr.rkey = rkey;
66 mr->ibmr.page_size = PAGE_SIZE;
67 mr->page_mask = PAGE_MASK;
68 mr->page_shift = PAGE_SHIFT;
69 mr->state = RXE_MR_STATE_INVALID;
72 void rxe_mr_init_dma(int access, struct rxe_mr *mr)
74 rxe_mr_init(access, mr);
76 mr->state = RXE_MR_STATE_VALID;
77 mr->ibmr.type = IB_MR_TYPE_DMA;
80 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
82 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
85 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
87 return iova & (mr_page_size(mr) - 1);
90 static bool is_pmem_page(struct page *pg)
92 unsigned long paddr = page_to_phys(pg);
94 return REGION_INTERSECTS ==
95 region_intersects(paddr, PAGE_SIZE, IORESOURCE_MEM,
96 IORES_DESC_PERSISTENT_MEMORY);
99 static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
101 XA_STATE(xas, &mr->page_list, 0);
102 struct sg_page_iter sg_iter;
104 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
106 __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
107 if (!__sg_page_iter_next(&sg_iter))
113 page = sg_page_iter_page(&sg_iter);
115 if (persistent && !is_pmem_page(page)) {
116 rxe_dbg_mr(mr, "Page can't be persistent\n");
117 xas_set_err(&xas, -EINVAL);
121 xas_store(&xas, page);
125 if (!__sg_page_iter_next(&sg_iter))
129 } while (xas_nomem(&xas, GFP_KERNEL));
131 return xas_error(&xas);
134 int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
135 int access, struct rxe_mr *mr)
137 struct ib_umem *umem;
140 rxe_mr_init(access, mr);
142 xa_init(&mr->page_list);
144 umem = ib_umem_get(&rxe->ib_dev, start, length, access);
146 rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
148 return PTR_ERR(umem);
151 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
153 ib_umem_release(umem);
158 mr->ibmr.type = IB_MR_TYPE_USER;
159 mr->state = RXE_MR_STATE_VALID;
164 static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
166 XA_STATE(xas, &mr->page_list, 0);
170 xa_init(&mr->page_list);
174 while (i != num_buf) {
175 xas_store(&xas, XA_ZERO_ENTRY);
182 } while (xas_nomem(&xas, GFP_KERNEL));
184 err = xas_error(&xas);
188 mr->num_buf = num_buf;
193 int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
197 /* always allow remote access for FMRs */
198 rxe_mr_init(IB_ACCESS_REMOTE, mr);
200 err = rxe_mr_alloc(mr, max_pages);
204 mr->state = RXE_MR_STATE_FREE;
205 mr->ibmr.type = IB_MR_TYPE_MEM_REG;
213 static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
215 struct rxe_mr *mr = to_rmr(ibmr);
216 struct page *page = ib_virt_dma_to_page(dma_addr);
217 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
220 if (persistent && !is_pmem_page(page)) {
221 rxe_dbg_mr(mr, "Page cannot be persistent\n");
225 if (unlikely(mr->nbuf == mr->num_buf))
228 err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
236 int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
237 int sg_nents, unsigned int *sg_offset)
239 struct rxe_mr *mr = to_rmr(ibmr);
240 unsigned int page_size = mr_page_size(mr);
243 mr->page_shift = ilog2(page_size);
244 mr->page_mask = ~((u64)page_size - 1);
245 mr->page_offset = mr->ibmr.iova & (page_size - 1);
247 return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
250 static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
251 unsigned int length, enum rxe_mr_copy_dir dir)
253 unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
254 unsigned long index = rxe_mr_iova_to_index(mr, iova);
260 page = xa_load(&mr->page_list, index);
264 bytes = min_t(unsigned int, length,
265 mr_page_size(mr) - page_offset);
266 va = kmap_local_page(page);
267 if (dir == RXE_FROM_MR_OBJ)
268 memcpy(addr, va + page_offset, bytes);
270 memcpy(va + page_offset, addr, bytes);
282 static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
283 unsigned int length, enum rxe_mr_copy_dir dir)
285 unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
291 page = ib_virt_dma_to_page(dma_addr);
292 bytes = min_t(unsigned int, length,
293 PAGE_SIZE - page_offset);
294 va = kmap_local_page(page);
296 if (dir == RXE_TO_MR_OBJ)
297 memcpy(va + page_offset, addr, bytes);
299 memcpy(addr, va + page_offset, bytes);
309 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
310 unsigned int length, enum rxe_mr_copy_dir dir)
320 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
321 rxe_mr_copy_dma(mr, iova, addr, length, dir);
325 err = mr_check_range(mr, iova, length);
327 rxe_dbg_mr(mr, "iova out of range");
331 return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
334 /* copy data in or out of a wqe, i.e. sg list
335 * under the control of a dma descriptor
340 struct rxe_dma_info *dma,
343 enum rxe_mr_copy_dir dir)
346 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
347 int offset = dma->sge_offset;
348 int resid = dma->resid;
349 struct rxe_mr *mr = NULL;
356 if (length > resid) {
361 if (sge->length && (offset < sge->length)) {
362 mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
372 if (offset >= sge->length) {
381 if (dma->cur_sge >= dma->num_sge) {
387 mr = lookup_mr(pd, access, sge->lkey,
398 if (bytes > sge->length - offset)
399 bytes = sge->length - offset;
402 iova = sge->addr + offset;
403 err = rxe_mr_copy(mr, iova, addr, bytes, dir);
414 dma->sge_offset = offset;
429 int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
431 unsigned int page_offset;
438 /* mr must be valid even if length is zero */
445 if (mr->ibmr.type == IB_MR_TYPE_DMA)
448 err = mr_check_range(mr, iova, length);
453 index = rxe_mr_iova_to_index(mr, iova);
454 page = xa_load(&mr->page_list, index);
455 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
458 bytes = min_t(unsigned int, length,
459 mr_page_size(mr) - page_offset);
461 va = kmap_local_page(page);
462 arch_wb_cache_pmem(va + page_offset, bytes);
473 /* Guarantee atomicity of atomic operations at the machine level. */
474 static DEFINE_SPINLOCK(atomic_ops_lock);
476 int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
477 u64 compare, u64 swap_add, u64 *orig_val)
479 unsigned int page_offset;
484 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
485 rxe_dbg_mr(mr, "mr not in valid state");
486 return RESPST_ERR_RKEY_VIOLATION;
489 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
490 page_offset = iova & (PAGE_SIZE - 1);
491 page = ib_virt_dma_to_page(iova);
496 err = mr_check_range(mr, iova, sizeof(value));
498 rxe_dbg_mr(mr, "iova out of range");
499 return RESPST_ERR_RKEY_VIOLATION;
501 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
502 index = rxe_mr_iova_to_index(mr, iova);
503 page = xa_load(&mr->page_list, index);
505 return RESPST_ERR_RKEY_VIOLATION;
508 if (unlikely(page_offset & 0x7)) {
509 rxe_dbg_mr(mr, "iova not aligned");
510 return RESPST_ERR_MISALIGNED_ATOMIC;
513 va = kmap_local_page(page);
515 spin_lock_bh(&atomic_ops_lock);
516 value = *orig_val = va[page_offset >> 3];
518 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
519 if (value == compare)
520 va[page_offset >> 3] = swap_add;
523 va[page_offset >> 3] = value;
525 spin_unlock_bh(&atomic_ops_lock);
532 #if defined CONFIG_64BIT
533 /* only implemented or called for 64 bit architectures */
534 int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
536 unsigned int page_offset;
540 /* See IBA oA19-28 */
541 if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
542 rxe_dbg_mr(mr, "mr not in valid state");
543 return RESPST_ERR_RKEY_VIOLATION;
546 if (mr->ibmr.type == IB_MR_TYPE_DMA) {
547 page_offset = iova & (PAGE_SIZE - 1);
548 page = ib_virt_dma_to_page(iova);
553 /* See IBA oA19-28 */
554 err = mr_check_range(mr, iova, sizeof(value));
556 rxe_dbg_mr(mr, "iova out of range");
557 return RESPST_ERR_RKEY_VIOLATION;
559 page_offset = rxe_mr_iova_to_page_offset(mr, iova);
560 index = rxe_mr_iova_to_index(mr, iova);
561 page = xa_load(&mr->page_list, index);
563 return RESPST_ERR_RKEY_VIOLATION;
566 /* See IBA A19.4.2 */
567 if (unlikely(page_offset & 0x7)) {
568 rxe_dbg_mr(mr, "misaligned address");
569 return RESPST_ERR_MISALIGNED_ATOMIC;
572 va = kmap_local_page(page);
574 /* Do atomic write after all prior operations have completed */
575 smp_store_release(&va[page_offset >> 3], value);
582 int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
584 return RESPST_ERR_UNSUPPORTED_OPCODE;
588 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
590 struct rxe_sge *sge = &dma->sge[dma->cur_sge];
591 int offset = dma->sge_offset;
592 int resid = dma->resid;
597 if (offset >= sge->length) {
601 if (dma->cur_sge >= dma->num_sge)
607 if (bytes > sge->length - offset)
608 bytes = sge->length - offset;
615 dma->sge_offset = offset;
621 struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
622 enum rxe_mr_lookup_type type)
625 struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
626 int index = key >> 8;
628 mr = rxe_pool_get_index(&rxe->mr_pool, index);
632 if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
633 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
634 mr_pd(mr) != pd || ((access & mr->access) != access) ||
635 mr->state != RXE_MR_STATE_VALID)) {
643 int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
645 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
649 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8);
651 rxe_dbg_qp(qp, "No MR for key %#x\n", key);
656 if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) {
657 rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n",
658 key, (mr->rkey ? mr->rkey : mr->lkey));
663 if (atomic_read(&mr->num_mw) > 0) {
664 rxe_dbg_mr(mr, "Attempt to invalidate an MR while bound to MWs\n");
669 if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) {
670 rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type);
675 mr->state = RXE_MR_STATE_FREE;
684 /* user can (re)register fast MR by executing a REG_MR WQE.
685 * user is expected to hold a reference on the ib mr until the
687 * Once a fast MR is created this is the only way to change the
688 * private keys. It is the responsibility of the user to maintain
689 * the ib mr keys in sync with rxe mr keys.
691 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
693 struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
694 u32 key = wqe->wr.wr.reg.key;
695 u32 access = wqe->wr.wr.reg.access;
697 /* user can only register MR in free state */
698 if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
699 rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey);
703 /* user can only register mr with qp in same protection domain */
704 if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
705 rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n");
709 /* user is only allowed to change key portion of l/rkey */
710 if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
711 rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n",
718 mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
719 mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
720 mr->state = RXE_MR_STATE_VALID;
725 void rxe_mr_cleanup(struct rxe_pool_elem *elem)
727 struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
730 ib_umem_release(mr->umem);
732 if (mr->ibmr.type != IB_MR_TYPE_DMA)
733 xa_destroy(&mr->page_list);