]>
Commit | Line | Data |
---|---|---|
6bf9d8f6 | 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
f7c6a7b5 RD |
2 | /* |
3 | * Copyright (c) 2007 Cisco Systems. All rights reserved. | |
368c0159 | 4 | * Copyright (c) 2020 Intel Corporation. All rights reserved. |
f7c6a7b5 RD |
5 | */ |
6 | ||
7 | #ifndef IB_UMEM_H | |
8 | #define IB_UMEM_H | |
9 | ||
10 | #include <linux/list.h> | |
11 | #include <linux/scatterlist.h> | |
e8edc6e0 | 12 | #include <linux/workqueue.h> |
b0ea0fa5 | 13 | #include <rdma/ib_verbs.h> |
f7c6a7b5 RD |
14 | |
15 | struct ib_ucontext; | |
8ada2c1c | 16 | struct ib_umem_odp; |
368c0159 | 17 | struct dma_buf_attach_ops; |
f7c6a7b5 RD |
18 | |
19 | struct ib_umem { | |
47f725ee | 20 | struct ib_device *ibdev; |
d4b4dd1b | 21 | struct mm_struct *owning_mm; |
a665aca8 | 22 | u64 iova; |
f7c6a7b5 | 23 | size_t length; |
406f9e5f | 24 | unsigned long address; |
597ecc5a | 25 | u32 writable : 1; |
597ecc5a | 26 | u32 is_odp : 1; |
368c0159 | 27 | u32 is_dmabuf : 1; |
79fbd3e1 | 28 | struct sg_append_table sgt_append; |
f7c6a7b5 RD |
29 | }; |
30 | ||
368c0159 JX |
31 | struct ib_umem_dmabuf { |
32 | struct ib_umem umem; | |
33 | struct dma_buf_attachment *attach; | |
34 | struct sg_table *sgt; | |
35 | struct scatterlist *first_sg; | |
36 | struct scatterlist *last_sg; | |
37 | unsigned long first_sg_offset; | |
38 | unsigned long last_sg_trim; | |
39 | void *private; | |
1e4df4a2 | 40 | u8 pinned : 1; |
253c61dc | 41 | u8 revoked : 1; |
368c0159 JX |
42 | }; |
43 | ||
44 | static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) | |
45 | { | |
46 | return container_of(umem, struct ib_umem_dmabuf, umem); | |
47 | } | |
48 | ||
406f9e5f HE |
49 | /* Returns the offset of the umem start relative to the first page. */ |
50 | static inline int ib_umem_offset(struct ib_umem *umem) | |
51 | { | |
d2183c6f | 52 | return umem->address & ~PAGE_MASK; |
406f9e5f HE |
53 | } |
54 | ||
b045db62 JG |
55 | static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, |
56 | unsigned long pgsz) | |
57 | { | |
79fbd3e1 | 58 | return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & |
b045db62 JG |
59 | (pgsz - 1); |
60 | } | |
61 | ||
a665aca8 JG |
62 | static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, |
63 | unsigned long pgsz) | |
64 | { | |
65 | return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - | |
66 | ALIGN_DOWN(umem->iova, pgsz))) / | |
67 | pgsz; | |
68 | } | |
69 | ||
406f9e5f HE |
70 | static inline size_t ib_umem_num_pages(struct ib_umem *umem) |
71 | { | |
a665aca8 | 72 | return ib_umem_num_dma_blocks(umem, PAGE_SIZE); |
406f9e5f HE |
73 | } |
74 | ||
ebc24096 JG |
75 | static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, |
76 | struct ib_umem *umem, | |
77 | unsigned long pgsz) | |
78 | { | |
79fbd3e1 MG |
79 | __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, |
80 | umem->sgt_append.sgt.nents, pgsz); | |
4fbc3a52 MM |
81 | biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); |
82 | biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); | |
83 | } | |
84 | ||
85 | static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) | |
86 | { | |
87 | return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; | |
ebc24096 JG |
88 | } |
89 | ||
90 | /** | |
91 | * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem | |
92 | * @umem: umem to iterate over | |
93 | * @pgsz: Page size to split the list into | |
94 | * | |
95 | * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The | |
96 | * returned DMA blocks will be aligned to pgsz and span the range: | |
97 | * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) | |
a665aca8 JG |
98 | * |
99 | * Performs exactly ib_umem_num_dma_blocks() iterations. | |
ebc24096 JG |
100 | */ |
101 | #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ | |
102 | for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ | |
4fbc3a52 | 103 | __rdma_umem_block_iter_next(biter);) |
ebc24096 | 104 | |
f7c6a7b5 RD |
105 | #ifdef CONFIG_INFINIBAND_USER_MEM |
106 | ||
c320e527 | 107 | struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, |
72b894b0 | 108 | size_t size, int access); |
f7c6a7b5 | 109 | void ib_umem_release(struct ib_umem *umem); |
c5d76f13 HE |
110 | int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
111 | size_t length); | |
4a353399 SS |
112 | unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
113 | unsigned long pgsz_bitmap, | |
114 | unsigned long virt); | |
368c0159 | 115 | |
b045db62 JG |
116 | /** |
117 | * ib_umem_find_best_pgoff - Find best HW page size | |
118 | * | |
119 | * @umem: umem struct | |
120 | * @pgsz_bitmap bitmap of HW supported page sizes | |
121 | * @pgoff_bitmask: Mask of bits that can be represented with an offset | |
122 | * | |
123 | * This is very similar to ib_umem_find_best_pgsz() except instead of accepting | |
124 | * an IOVA it accepts a bitmask specifying what address bits can be represented | |
125 | * with a page offset. | |
126 | * | |
127 | * For instance if the HW has multiple page sizes, requires 64 byte alignemnt, | |
128 | * and can support aligned offsets up to 4032 then pgoff_bitmask would be | |
129 | * "111111000000". | |
130 | * | |
131 | * If the pgoff_bitmask requires either alignment in the low bit or an | |
132 | * unavailable page size for the high bits, this function returns 0. | |
133 | */ | |
134 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, | |
135 | unsigned long pgsz_bitmap, | |
136 | u64 pgoff_bitmask) | |
137 | { | |
79fbd3e1 | 138 | struct scatterlist *sg = umem->sgt_append.sgt.sgl; |
b045db62 JG |
139 | dma_addr_t dma_addr; |
140 | ||
141 | dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); | |
142 | return ib_umem_find_best_pgsz(umem, pgsz_bitmap, | |
143 | dma_addr & pgoff_bitmask); | |
144 | } | |
f7c6a7b5 | 145 | |
368c0159 JX |
146 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
147 | unsigned long offset, size_t size, | |
148 | int fd, int access, | |
149 | const struct dma_buf_attach_ops *ops); | |
1e4df4a2 GP |
150 | struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, |
151 | unsigned long offset, | |
152 | size_t size, int fd, | |
153 | int access); | |
682358fd YH |
154 | struct ib_umem_dmabuf * |
155 | ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device, | |
156 | struct device *dma_device, | |
157 | unsigned long offset, size_t size, | |
158 | int fd, int access); | |
368c0159 JX |
159 | int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); |
160 | void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); | |
161 | void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); | |
253c61dc | 162 | void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf); |
368c0159 | 163 | |
f7c6a7b5 RD |
164 | #else /* CONFIG_INFINIBAND_USER_MEM */ |
165 | ||
166 | #include <linux/err.h> | |
167 | ||
c320e527 | 168 | static inline struct ib_umem *ib_umem_get(struct ib_device *device, |
f7c6a7b5 | 169 | unsigned long addr, size_t size, |
72b894b0 | 170 | int access) |
b0ea0fa5 | 171 | { |
368c0159 | 172 | return ERR_PTR(-EOPNOTSUPP); |
f7c6a7b5 RD |
173 | } |
174 | static inline void ib_umem_release(struct ib_umem *umem) { } | |
c1395a2a HE |
175 | static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
176 | size_t length) { | |
368c0159 | 177 | return -EOPNOTSUPP; |
c1395a2a | 178 | } |
61690d01 JG |
179 | static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
180 | unsigned long pgsz_bitmap, | |
181 | unsigned long virt) | |
182 | { | |
183 | return 0; | |
4a353399 | 184 | } |
b045db62 JG |
185 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
186 | unsigned long pgsz_bitmap, | |
187 | u64 pgoff_bitmask) | |
188 | { | |
189 | return 0; | |
190 | } | |
368c0159 JX |
191 | static inline |
192 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, | |
193 | unsigned long offset, | |
194 | size_t size, int fd, | |
195 | int access, | |
196 | struct dma_buf_attach_ops *ops) | |
197 | { | |
198 | return ERR_PTR(-EOPNOTSUPP); | |
199 | } | |
1e4df4a2 GP |
200 | static inline struct ib_umem_dmabuf * |
201 | ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset, | |
202 | size_t size, int fd, int access) | |
203 | { | |
204 | return ERR_PTR(-EOPNOTSUPP); | |
205 | } | |
682358fd YH |
206 | |
207 | static inline struct ib_umem_dmabuf * | |
208 | ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device, | |
209 | struct device *dma_device, | |
210 | unsigned long offset, size_t size, | |
211 | int fd, int access) | |
212 | { | |
213 | return ERR_PTR(-EOPNOTSUPP); | |
214 | } | |
215 | ||
368c0159 JX |
216 | static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) |
217 | { | |
218 | return -EOPNOTSUPP; | |
219 | } | |
220 | static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } | |
221 | static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } | |
253c61dc | 222 | static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {} |
4a353399 | 223 | |
f7c6a7b5 | 224 | #endif /* CONFIG_INFINIBAND_USER_MEM */ |
f7c6a7b5 | 225 | #endif /* IB_UMEM_H */ |