]> Git Repo - J-linux.git/blob - drivers/infiniband/core/umem_dmabuf.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / infiniband / core / umem_dmabuf.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10
11 #include "uverbs.h"
12
13 MODULE_IMPORT_NS("DMA_BUF");
14
15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
16 {
17         struct sg_table *sgt;
18         struct scatterlist *sg;
19         unsigned long start, end, cur = 0;
20         unsigned int nmap = 0;
21         long ret;
22         int i;
23
24         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
25
26         if (umem_dmabuf->revoked)
27                 return -EINVAL;
28
29         if (umem_dmabuf->sgt)
30                 goto wait_fence;
31
32         sgt = dma_buf_map_attachment(umem_dmabuf->attach,
33                                      DMA_BIDIRECTIONAL);
34         if (IS_ERR(sgt))
35                 return PTR_ERR(sgt);
36
37         /* modify the sg list in-place to match umem address and length */
38
39         start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
40         end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
41                     PAGE_SIZE);
42         for_each_sgtable_dma_sg(sgt, sg, i) {
43                 if (start < cur + sg_dma_len(sg) && cur < end)
44                         nmap++;
45                 if (cur <= start && start < cur + sg_dma_len(sg)) {
46                         unsigned long offset = start - cur;
47
48                         umem_dmabuf->first_sg = sg;
49                         umem_dmabuf->first_sg_offset = offset;
50                         sg_dma_address(sg) += offset;
51                         sg_dma_len(sg) -= offset;
52                         cur += offset;
53                 }
54                 if (cur < end && end <= cur + sg_dma_len(sg)) {
55                         unsigned long trim = cur + sg_dma_len(sg) - end;
56
57                         umem_dmabuf->last_sg = sg;
58                         umem_dmabuf->last_sg_trim = trim;
59                         sg_dma_len(sg) -= trim;
60                         break;
61                 }
62                 cur += sg_dma_len(sg);
63         }
64
65         umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
66         umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
67         umem_dmabuf->sgt = sgt;
68
69 wait_fence:
70         /*
71          * Although the sg list is valid now, the content of the pages
72          * may be not up-to-date. Wait for the exporter to finish
73          * the migration.
74          */
75         ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
76                                      DMA_RESV_USAGE_KERNEL,
77                                      false, MAX_SCHEDULE_TIMEOUT);
78         if (ret < 0)
79                 return ret;
80         if (ret == 0)
81                 return -ETIMEDOUT;
82         return 0;
83 }
84 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
85
86 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
87 {
88         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
89
90         if (!umem_dmabuf->sgt)
91                 return;
92
93         /* retore the original sg list */
94         if (umem_dmabuf->first_sg) {
95                 sg_dma_address(umem_dmabuf->first_sg) -=
96                         umem_dmabuf->first_sg_offset;
97                 sg_dma_len(umem_dmabuf->first_sg) +=
98                         umem_dmabuf->first_sg_offset;
99                 umem_dmabuf->first_sg = NULL;
100                 umem_dmabuf->first_sg_offset = 0;
101         }
102         if (umem_dmabuf->last_sg) {
103                 sg_dma_len(umem_dmabuf->last_sg) +=
104                         umem_dmabuf->last_sg_trim;
105                 umem_dmabuf->last_sg = NULL;
106                 umem_dmabuf->last_sg_trim = 0;
107         }
108
109         dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
110                                  DMA_BIDIRECTIONAL);
111
112         umem_dmabuf->sgt = NULL;
113 }
114 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
115
116 static struct ib_umem_dmabuf *
117 ib_umem_dmabuf_get_with_dma_device(struct ib_device *device,
118                                    struct device *dma_device,
119                                    unsigned long offset, size_t size,
120                                    int fd, int access,
121                                    const struct dma_buf_attach_ops *ops)
122 {
123         struct dma_buf *dmabuf;
124         struct ib_umem_dmabuf *umem_dmabuf;
125         struct ib_umem *umem;
126         unsigned long end;
127         struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
128
129         if (check_add_overflow(offset, (unsigned long)size, &end))
130                 return ret;
131
132         if (unlikely(!ops || !ops->move_notify))
133                 return ret;
134
135         dmabuf = dma_buf_get(fd);
136         if (IS_ERR(dmabuf))
137                 return ERR_CAST(dmabuf);
138
139         if (dmabuf->size < end)
140                 goto out_release_dmabuf;
141
142         umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
143         if (!umem_dmabuf) {
144                 ret = ERR_PTR(-ENOMEM);
145                 goto out_release_dmabuf;
146         }
147
148         umem = &umem_dmabuf->umem;
149         umem->ibdev = device;
150         umem->length = size;
151         umem->address = offset;
152         umem->writable = ib_access_writable(access);
153         umem->is_dmabuf = 1;
154
155         if (!ib_umem_num_pages(umem))
156                 goto out_free_umem;
157
158         umem_dmabuf->attach = dma_buf_dynamic_attach(
159                                         dmabuf,
160                                         dma_device,
161                                         ops,
162                                         umem_dmabuf);
163         if (IS_ERR(umem_dmabuf->attach)) {
164                 ret = ERR_CAST(umem_dmabuf->attach);
165                 goto out_free_umem;
166         }
167         return umem_dmabuf;
168
169 out_free_umem:
170         kfree(umem_dmabuf);
171
172 out_release_dmabuf:
173         dma_buf_put(dmabuf);
174         return ret;
175 }
176
177 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
178                                           unsigned long offset, size_t size,
179                                           int fd, int access,
180                                           const struct dma_buf_attach_ops *ops)
181 {
182         return ib_umem_dmabuf_get_with_dma_device(device, device->dma_device,
183                                                   offset, size, fd, access, ops);
184 }
185 EXPORT_SYMBOL(ib_umem_dmabuf_get);
186
187 static void
188 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
189 {
190         struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
191
192         ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
193                                "Invalidate callback should not be called when memory is pinned\n");
194 }
195
196 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
197         .allow_peer2peer = true,
198         .move_notify = ib_umem_dmabuf_unsupported_move_notify,
199 };
200
201 struct ib_umem_dmabuf *
202 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
203                                           struct device *dma_device,
204                                           unsigned long offset, size_t size,
205                                           int fd, int access)
206 {
207         struct ib_umem_dmabuf *umem_dmabuf;
208         int err;
209
210         umem_dmabuf = ib_umem_dmabuf_get_with_dma_device(device, dma_device, offset,
211                                                          size, fd, access,
212                                                          &ib_umem_dmabuf_attach_pinned_ops);
213         if (IS_ERR(umem_dmabuf))
214                 return umem_dmabuf;
215
216         dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
217         err = dma_buf_pin(umem_dmabuf->attach);
218         if (err)
219                 goto err_release;
220         umem_dmabuf->pinned = 1;
221
222         err = ib_umem_dmabuf_map_pages(umem_dmabuf);
223         if (err)
224                 goto err_unpin;
225         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
226
227         return umem_dmabuf;
228
229 err_unpin:
230         dma_buf_unpin(umem_dmabuf->attach);
231 err_release:
232         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
233         ib_umem_release(&umem_dmabuf->umem);
234         return ERR_PTR(err);
235 }
236 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
237
238 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
239                                                  unsigned long offset,
240                                                  size_t size, int fd,
241                                                  int access)
242 {
243         return ib_umem_dmabuf_get_pinned_with_dma_device(device, device->dma_device,
244                                                          offset, size, fd, access);
245 }
246 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
247
248 void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf)
249 {
250         struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
251
252         dma_resv_lock(dmabuf->resv, NULL);
253         if (umem_dmabuf->revoked)
254                 goto end;
255         ib_umem_dmabuf_unmap_pages(umem_dmabuf);
256         if (umem_dmabuf->pinned) {
257                 dma_buf_unpin(umem_dmabuf->attach);
258                 umem_dmabuf->pinned = 0;
259         }
260         umem_dmabuf->revoked = 1;
261 end:
262         dma_resv_unlock(dmabuf->resv);
263 }
264 EXPORT_SYMBOL(ib_umem_dmabuf_revoke);
265
266 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
267 {
268         struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
269
270         ib_umem_dmabuf_revoke(umem_dmabuf);
271
272         dma_buf_detach(dmabuf, umem_dmabuf->attach);
273         dma_buf_put(dmabuf);
274         kfree(umem_dmabuf);
275 }
This page took 0.042946 seconds and 4 git commands to generate.