1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2020 Intel Corporation. All rights reserved.
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
17 struct dma_buf_attach_ops;
20 struct ib_device *ibdev;
21 struct mm_struct *owning_mm;
24 unsigned long address;
28 struct sg_append_table sgt_append;
31 struct ib_umem_dmabuf {
33 struct dma_buf_attachment *attach;
35 struct scatterlist *first_sg;
36 struct scatterlist *last_sg;
37 unsigned long first_sg_offset;
38 unsigned long last_sg_trim;
44 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
46 return container_of(umem, struct ib_umem_dmabuf, umem);
49 /* Returns the offset of the umem start relative to the first page. */
50 static inline int ib_umem_offset(struct ib_umem *umem)
52 return umem->address & ~PAGE_MASK;
55 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 ALIGN_DOWN(umem->iova, pgsz))) /
70 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 umem->sgt_append.sgt.nents, pgsz);
81 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
82 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
85 static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
87 return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
91 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
92 * @umem: umem to iterate over
93 * @pgsz: Page size to split the list into
95 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
96 * returned DMA blocks will be aligned to pgsz and span the range:
97 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
99 * Performs exactly ib_umem_num_dma_blocks() iterations.
101 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
102 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
103 __rdma_umem_block_iter_next(biter);)
105 #ifdef CONFIG_INFINIBAND_USER_MEM
107 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
108 size_t size, int access);
109 void ib_umem_release(struct ib_umem *umem);
110 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
112 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
113 unsigned long pgsz_bitmap,
117 * ib_umem_find_best_pgoff - Find best HW page size
120 * @pgsz_bitmap bitmap of HW supported page sizes
121 * @pgoff_bitmask: Mask of bits that can be represented with an offset
123 * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
124 * an IOVA it accepts a bitmask specifying what address bits can be represented
125 * with a page offset.
127 * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
128 * and can support aligned offsets up to 4032 then pgoff_bitmask would be
131 * If the pgoff_bitmask requires either alignment in the low bit or an
132 * unavailable page size for the high bits, this function returns 0.
134 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
135 unsigned long pgsz_bitmap,
138 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
141 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
142 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
143 dma_addr & pgoff_bitmask);
146 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
147 unsigned long offset, size_t size,
149 const struct dma_buf_attach_ops *ops);
150 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
151 unsigned long offset,
154 struct ib_umem_dmabuf *
155 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
156 struct device *dma_device,
157 unsigned long offset, size_t size,
159 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
160 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
161 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
162 void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
164 #else /* CONFIG_INFINIBAND_USER_MEM */
166 #include <linux/err.h>
168 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
169 unsigned long addr, size_t size,
172 return ERR_PTR(-EOPNOTSUPP);
174 static inline void ib_umem_release(struct ib_umem *umem) { }
175 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
179 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
180 unsigned long pgsz_bitmap,
185 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
186 unsigned long pgsz_bitmap,
192 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
193 unsigned long offset,
196 struct dma_buf_attach_ops *ops)
198 return ERR_PTR(-EOPNOTSUPP);
200 static inline struct ib_umem_dmabuf *
201 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
202 size_t size, int fd, int access)
204 return ERR_PTR(-EOPNOTSUPP);
207 static inline struct ib_umem_dmabuf *
208 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
209 struct device *dma_device,
210 unsigned long offset, size_t size,
213 return ERR_PTR(-EOPNOTSUPP);
216 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
220 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
221 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
222 static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
224 #endif /* CONFIG_INFINIBAND_USER_MEM */
225 #endif /* IB_UMEM_H */