]> Git Repo - linux.git/commitdiff
RDMA: Use the sg_table directly and remove the opencoded version from umem
authorMaor Gottlieb <[email protected]>
Tue, 24 Aug 2021 14:25:31 +0000 (17:25 +0300)
committerJason Gunthorpe <[email protected]>
Tue, 24 Aug 2021 22:52:40 +0000 (19:52 -0300)
This allows using the normal sg_table APIs and makes all the code
cleaner. Remove sgt, nents and nmapd from ib_umem.

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Maor Gottlieb <[email protected]>
Signed-off-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
13 files changed:
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_dmabuf.c
drivers/infiniband/hw/hns/hns_roce_db.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/mlx4/doorbell.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/doorbell.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rdmavt/mr.c
drivers/infiniband/sw/rxe/rxe_mr.c
include/rdma/ib_umem.h
include/rdma/ib_verbs.h

index 42481e7a72e8e21660bc68cb64039d94411b5d53..86d479772fbc64d4dcd1ad00d00a76dec0e58ad2 100644 (file)
@@ -51,11 +51,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
        struct scatterlist *sg;
        unsigned int i;
 
-       if (umem->nmap > 0)
-               ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
-                               DMA_BIDIRECTIONAL);
+       if (dirty)
+               ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
+                                          DMA_BIDIRECTIONAL, 0);
 
-       for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
+       for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
                unpin_user_page_range_dirty_lock(sg_page(sg),
                        DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
 
@@ -111,7 +111,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
        /* offset into first SGL */
        pgoff = umem->address & ~PAGE_MASK;
 
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+       for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
                /* Walk SGL and reduce max page size if VA/PA bits differ
                 * for any address.
                 */
@@ -121,7 +121,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
                 * the maximum possible page size as the low bits of the iova
                 * must be zero when starting the next chunk.
                 */
-               if (i != (umem->nmap - 1))
+               if (i != (umem->sgt_append.sgt.nents - 1))
                        mask |= va;
                pgoff = 0;
        }
@@ -231,30 +231,19 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
                        &umem->sgt_append, page_list, pinned, 0,
                        pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
                        npages, GFP_KERNEL);
-               umem->sg_nents = umem->sgt_append.sgt.nents;
                if (ret) {
-                       memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
-                              sizeof(umem->sgt_append.sgt));
                        unpin_user_pages_dirty_lock(page_list, pinned, 0);
                        goto umem_release;
                }
        }
 
-       memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
-              sizeof(umem->sgt_append.sgt));
        if (access & IB_ACCESS_RELAXED_ORDERING)
                dma_attr |= DMA_ATTR_WEAK_ORDERING;
 
-       umem->nmap =
-               ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
-                                   DMA_BIDIRECTIONAL, dma_attr);
-
-       if (!umem->nmap) {
-               ret = -ENOMEM;
+       ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
+                                      DMA_BIDIRECTIONAL, dma_attr);
+       if (ret)
                goto umem_release;
-       }
-
-       ret = 0;
        goto out;
 
 umem_release:
@@ -314,7 +303,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
                return -EINVAL;
        }
 
-       ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
+       ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
+                                umem->sgt_append.sgt.orig_nents, dst, length,
                                 offset + ib_umem_offset(umem));
 
        if (ret < 0)
index c6e875619fac715472877fff56241838f9796e6d..e824baf4640d1b29c31fc04a420da8f77d10d9cb 100644 (file)
@@ -55,9 +55,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
                cur += sg_dma_len(sg);
        }
 
-       umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
-       umem_dmabuf->umem.sg_head.nents = nmap;
-       umem_dmabuf->umem.nmap = nmap;
+       umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
+       umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
        umem_dmabuf->sgt = sgt;
 
 wait_fence:
index d40ea3d87260dd09c24c8db112f5582538c3c0ec..751470c7a2cef4527fa962955ea3675f6446a0af 100644 (file)
@@ -42,8 +42,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
 
 found:
        offset = virt - page_addr;
-       db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
-       db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
+       db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
+       db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
        db->u.user_page = page;
        refcount_inc(&page->refcount);
 
index 717147ed0519db05f3a4106de76f4aa543aaf01a..e2114f2134bb23f7dcdf76d32012635370cbf03f 100644 (file)
@@ -2235,7 +2235,7 @@ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
        pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
 
        if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
-               iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+               iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
 
        rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
                *pbl = rdma_block_iter_dma_address(&biter);
index d41f03ccb0e1cec0605dcea4c3d46b92fd9e6491..9bbd695a9fd58cb372b133521ab1f81880fcc5a5 100644 (file)
@@ -75,7 +75,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
        list_add(&page->list, &context->db_page_list);
 
 found:
-       db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+       db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+                 (virt & ~PAGE_MASK);
        db->u.user_page = page;
        ++page->refcnt;
 
index 50becc0e4b62230cde584116ad49c12d3e0316f1..04a67b48160863e4b4dd83c032ebf2cc1d2f1dcc 100644 (file)
@@ -200,7 +200,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
        mtt_shift = mtt->page_shift;
        mtt_size = 1ULL << mtt_shift;
 
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+       for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
                if (cur_start_addr + len == sg_dma_address(sg)) {
                        /* still the same block */
                        len += sg_dma_len(sg);
@@ -273,7 +273,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
 
        *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
 
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+       for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
                /*
                 * Initialization - save the first chunk start as the
                 * current_block_start - block means contiguous pages.
index 9ca2e61807ec98c5f5efda5670f30fe3c267a8c1..6398e2f4857925923c6cd8dd776878d7d07f7166 100644 (file)
@@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
        list_add(&page->list, &context->db_page_list);
 
 found:
-       db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+       db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+                 (virt & ~PAGE_MASK);
        db->u.user_page = page;
        ++page->refcnt;
 
index 3f1c5a4f158bfabed032b5c166d2f085df49be01..a520ac8ab68c304e53e0d5ac16e461d51c1951e2 100644 (file)
@@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
        orig_sg_length = sg.length;
 
        cur_mtt = mtt;
-       rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
+       rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter,
+                            mr->umem->sgt_append.sgt.nents,
                             BIT(mr->page_shift)) {
                if (cur_mtt == (void *)mtt + sg.length) {
                        dma_sync_single_for_device(ddev, sg.addr, sg.length,
index fdc47ef7d861fd0305dfedd9d4f2710b548f31c0..f23d324bd5e1ebb404c6629bff7ad599aafce69d 100644 (file)
@@ -1481,7 +1481,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
                return PTR_ERR(srq->prod_umem);
        }
 
-       sg = srq->prod_umem->sg_head.sgl;
+       sg = srq->prod_umem->sgt_append.sgt.sgl;
        srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
 
        return 0;
index 34b7af6ab9c2c1cc12e43d933984e64bd054b2d6..dfb99a56d952d42627f3f450c4ffc8fba2e2cd34 100644 (file)
@@ -410,7 +410,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->mr.page_shift = PAGE_SHIFT;
        m = 0;
        n = 0;
-       for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+       for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
                void *vaddr;
 
                vaddr = page_address(sg_page_iter_page(&sg_iter));
index be4bcb420fab3a8840d387fae107a403a7d73413..b5fcb14350c796995fe2382238f3ffa3b71f32d2 100644 (file)
@@ -143,7 +143,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
        if (length > 0) {
                buf = map[0]->buf;
 
-               for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+               for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
                        if (num_buf >= RXE_BUF_PER_MAP) {
                                map++;
                                buf = map[0]->buf;
index 33cb23b2ee3cb43a06e2f1386b627a8f542b7898..5ae9dff74dac8c8b500fb06b457335cc69cf52ab 100644 (file)
@@ -26,10 +26,7 @@ struct ib_umem {
        u32 is_odp : 1;
        u32 is_dmabuf : 1;
        struct work_struct      work;
-       struct sg_append_table  sgt_append;
-       struct sg_table sg_head;
-       int             nmap;
-       unsigned int    sg_nents;
+       struct sg_append_table sgt_append;
 };
 
 struct ib_umem_dmabuf {
@@ -57,7 +54,7 @@ static inline int ib_umem_offset(struct ib_umem *umem)
 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
                                               unsigned long pgsz)
 {
-       return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) &
+       return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
               (pgsz - 1);
 }
 
@@ -78,7 +75,8 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
                                                struct ib_umem *umem,
                                                unsigned long pgsz)
 {
-       __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+       __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
+                               umem->sgt_append.sgt.nents, pgsz);
 }
 
 /**
@@ -129,7 +127,7 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
                                                    unsigned long pgsz_bitmap,
                                                    u64 pgoff_bitmask)
 {
-       struct scatterlist *sg = umem->sg_head.sgl;
+       struct scatterlist *sg = umem->sgt_append.sgt.sgl;
        dma_addr_t dma_addr;
 
        dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
index 371df1c80aeb6c77c687c4291a5983a23ab1322f..2dba30849731e1d2f18e7366aa091f2e323b283c 100644 (file)
@@ -4057,6 +4057,34 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                   dma_attrs);
 }
 
+/**
+ * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
+ * @dev: The device for which the DMA addresses are to be created
+ * @sg: The sg_table object describing the buffer
+ * @direction: The direction of the DMA
+ * @attrs: Optional DMA attributes for the map operation
+ */
+static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
+                                          struct sg_table *sgt,
+                                          enum dma_data_direction direction,
+                                          unsigned long dma_attrs)
+{
+       if (ib_uses_virt_dma(dev)) {
+               ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
+               return 0;
+       }
+       return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
+static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
+                                             struct sg_table *sgt,
+                                             enum dma_data_direction direction,
+                                             unsigned long dma_attrs)
+{
+       if (!ib_uses_virt_dma(dev))
+               dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
+}
+
 /**
  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
  * @dev: The device for which the DMA addresses are to be created
This page took 0.102057 seconds and 4 git commands to generate.