]>
Commit | Line | Data |
---|---|---|
932d6562 OA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* | |
4 | * Xen dma-buf functionality for gntdev. | |
5 | * | |
a240d6e4 OA |
6 | * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c. |
7 | * | |
932d6562 OA |
8 | * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. |
9 | */ | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
a240d6e4 | 13 | #include <linux/dma-buf.h> |
932d6562 OA |
14 | #include <linux/slab.h> |
15 | #include <linux/types.h> | |
16 | #include <linux/uaccess.h> | |
16b0314a | 17 | #include <linux/module.h> |
932d6562 OA |
18 | |
19 | #include <xen/xen.h> | |
20 | #include <xen/grant_table.h> | |
21 | ||
22 | #include "gntdev-common.h" | |
23 | #include "gntdev-dmabuf.h" | |
24 | ||
16b0314a GKH |
25 | MODULE_IMPORT_NS(DMA_BUF); |
26 | ||
a240d6e4 OA |
27 | struct gntdev_dmabuf { |
28 | struct gntdev_dmabuf_priv *priv; | |
29 | struct dma_buf *dmabuf; | |
30 | struct list_head next; | |
31 | int fd; | |
32 | ||
33 | union { | |
34 | struct { | |
35 | /* Exported buffers are reference counted. */ | |
36 | struct kref refcount; | |
37 | ||
38 | struct gntdev_priv *priv; | |
39 | struct gntdev_grant_map *map; | |
40 | } exp; | |
bf8dc55b OA |
41 | struct { |
42 | /* Granted references of the imported buffer. */ | |
43 | grant_ref_t *refs; | |
44 | /* Scatter-gather table of the imported buffer. */ | |
45 | struct sg_table *sgt; | |
46 | /* dma-buf attachment of the imported buffer. */ | |
47 | struct dma_buf_attachment *attach; | |
48 | } imp; | |
a240d6e4 OA |
49 | } u; |
50 | ||
51 | /* Number of pages this buffer has. */ | |
52 | int nr_pages; | |
53 | /* Pages of this buffer. */ | |
54 | struct page **pages; | |
55 | }; | |
56 | ||
57 | struct gntdev_dmabuf_wait_obj { | |
58 | struct list_head next; | |
59 | struct gntdev_dmabuf *gntdev_dmabuf; | |
60 | struct completion completion; | |
61 | }; | |
62 | ||
63 | struct gntdev_dmabuf_attachment { | |
64 | struct sg_table *sgt; | |
65 | enum dma_data_direction dir; | |
66 | }; | |
67 | ||
932d6562 OA |
68 | struct gntdev_dmabuf_priv { |
69 | /* List of exported DMA buffers. */ | |
70 | struct list_head exp_list; | |
71 | /* List of wait objects. */ | |
72 | struct list_head exp_wait_list; | |
bf8dc55b OA |
73 | /* List of imported DMA buffers. */ |
74 | struct list_head imp_list; | |
932d6562 OA |
75 | /* This is the lock which protects dma_buf_xxx lists. */ |
76 | struct mutex lock; | |
fa13e665 OA |
77 | /* |
78 | * We reference this file while exporting dma-bufs, so | |
79 | * the grant device context is not destroyed while there are | |
80 | * external users alive. | |
81 | */ | |
82 | struct file *filp; | |
932d6562 OA |
83 | }; |
84 | ||
85 | /* DMA buffer export support. */ | |
86 | ||
87 | /* Implementation of wait for exported DMA buffer to be released. */ | |
88 | ||
a240d6e4 OA |
89 | static void dmabuf_exp_release(struct kref *kref); |
90 | ||
91 | static struct gntdev_dmabuf_wait_obj * | |
92 | dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv, | |
93 | struct gntdev_dmabuf *gntdev_dmabuf) | |
94 | { | |
95 | struct gntdev_dmabuf_wait_obj *obj; | |
96 | ||
97 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | |
98 | if (!obj) | |
99 | return ERR_PTR(-ENOMEM); | |
100 | ||
101 | init_completion(&obj->completion); | |
102 | obj->gntdev_dmabuf = gntdev_dmabuf; | |
103 | ||
104 | mutex_lock(&priv->lock); | |
105 | list_add(&obj->next, &priv->exp_wait_list); | |
106 | /* Put our reference and wait for gntdev_dmabuf's release to fire. */ | |
107 | kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); | |
108 | mutex_unlock(&priv->lock); | |
109 | return obj; | |
110 | } | |
111 | ||
112 | static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv, | |
113 | struct gntdev_dmabuf_wait_obj *obj) | |
114 | { | |
115 | mutex_lock(&priv->lock); | |
116 | list_del(&obj->next); | |
117 | mutex_unlock(&priv->lock); | |
118 | kfree(obj); | |
119 | } | |
120 | ||
121 | static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj, | |
122 | u32 wait_to_ms) | |
123 | { | |
124 | if (wait_for_completion_timeout(&obj->completion, | |
125 | msecs_to_jiffies(wait_to_ms)) <= 0) | |
126 | return -ETIMEDOUT; | |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
131 | static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv, | |
132 | struct gntdev_dmabuf *gntdev_dmabuf) | |
133 | { | |
134 | struct gntdev_dmabuf_wait_obj *obj; | |
135 | ||
136 | list_for_each_entry(obj, &priv->exp_wait_list, next) | |
137 | if (obj->gntdev_dmabuf == gntdev_dmabuf) { | |
138 | pr_debug("Found gntdev_dmabuf in the wait list, wake\n"); | |
139 | complete_all(&obj->completion); | |
140 | break; | |
141 | } | |
142 | } | |
143 | ||
144 | static struct gntdev_dmabuf * | |
145 | dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd) | |
146 | { | |
147 | struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); | |
148 | ||
149 | mutex_lock(&priv->lock); | |
150 | list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next) | |
151 | if (gntdev_dmabuf->fd == fd) { | |
152 | pr_debug("Found gntdev_dmabuf in the wait list\n"); | |
153 | kref_get(&gntdev_dmabuf->u.exp.refcount); | |
154 | ret = gntdev_dmabuf; | |
155 | break; | |
156 | } | |
157 | mutex_unlock(&priv->lock); | |
158 | return ret; | |
159 | } | |
160 | ||
932d6562 OA |
161 | static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd, |
162 | int wait_to_ms) | |
163 | { | |
a240d6e4 OA |
164 | struct gntdev_dmabuf *gntdev_dmabuf; |
165 | struct gntdev_dmabuf_wait_obj *obj; | |
166 | int ret; | |
167 | ||
168 | pr_debug("Will wait for dma-buf with fd %d\n", fd); | |
169 | /* | |
170 | * Try to find the DMA buffer: if not found means that | |
171 | * either the buffer has already been released or file descriptor | |
172 | * provided is wrong. | |
173 | */ | |
174 | gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd); | |
175 | if (IS_ERR(gntdev_dmabuf)) | |
176 | return PTR_ERR(gntdev_dmabuf); | |
177 | ||
178 | /* | |
179 | * gntdev_dmabuf still exists and is reference count locked by us now, | |
180 | * so prepare to wait: allocate wait object and add it to the wait list, | |
181 | * so we can find it on release. | |
182 | */ | |
183 | obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf); | |
184 | if (IS_ERR(obj)) | |
185 | return PTR_ERR(obj); | |
186 | ||
187 | ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms); | |
188 | dmabuf_exp_wait_obj_free(priv, obj); | |
189 | return ret; | |
190 | } | |
191 | ||
192 | /* DMA buffer export support. */ | |
193 | ||
194 | static struct sg_table * | |
195 | dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages) | |
196 | { | |
197 | struct sg_table *sgt; | |
198 | int ret; | |
199 | ||
200 | sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); | |
201 | if (!sgt) { | |
202 | ret = -ENOMEM; | |
203 | goto out; | |
204 | } | |
205 | ||
206 | ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, | |
207 | nr_pages << PAGE_SHIFT, | |
208 | GFP_KERNEL); | |
209 | if (ret) | |
210 | goto out; | |
211 | ||
212 | return sgt; | |
213 | ||
214 | out: | |
215 | kfree(sgt); | |
216 | return ERR_PTR(ret); | |
217 | } | |
218 | ||
219 | static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf, | |
a240d6e4 OA |
220 | struct dma_buf_attachment *attach) |
221 | { | |
222 | struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach; | |
223 | ||
224 | gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach), | |
225 | GFP_KERNEL); | |
226 | if (!gntdev_dmabuf_attach) | |
227 | return -ENOMEM; | |
228 | ||
229 | gntdev_dmabuf_attach->dir = DMA_NONE; | |
230 | attach->priv = gntdev_dmabuf_attach; | |
231 | return 0; | |
232 | } | |
233 | ||
234 | static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf, | |
235 | struct dma_buf_attachment *attach) | |
236 | { | |
237 | struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; | |
238 | ||
239 | if (gntdev_dmabuf_attach) { | |
240 | struct sg_table *sgt = gntdev_dmabuf_attach->sgt; | |
241 | ||
242 | if (sgt) { | |
243 | if (gntdev_dmabuf_attach->dir != DMA_NONE) | |
d1749eb1 MS |
244 | dma_unmap_sgtable(attach->dev, sgt, |
245 | gntdev_dmabuf_attach->dir, | |
246 | DMA_ATTR_SKIP_CPU_SYNC); | |
a240d6e4 OA |
247 | sg_free_table(sgt); |
248 | } | |
249 | ||
250 | kfree(sgt); | |
251 | kfree(gntdev_dmabuf_attach); | |
252 | attach->priv = NULL; | |
253 | } | |
254 | } | |
255 | ||
256 | static struct sg_table * | |
257 | dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach, | |
258 | enum dma_data_direction dir) | |
259 | { | |
260 | struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv; | |
261 | struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv; | |
262 | struct sg_table *sgt; | |
263 | ||
264 | pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages, | |
265 | attach->dev); | |
266 | ||
267 | if (dir == DMA_NONE || !gntdev_dmabuf_attach) | |
268 | return ERR_PTR(-EINVAL); | |
269 | ||
270 | /* Return the cached mapping when possible. */ | |
271 | if (gntdev_dmabuf_attach->dir == dir) | |
272 | return gntdev_dmabuf_attach->sgt; | |
273 | ||
274 | /* | |
275 | * Two mappings with different directions for the same attachment are | |
276 | * not allowed. | |
277 | */ | |
278 | if (gntdev_dmabuf_attach->dir != DMA_NONE) | |
279 | return ERR_PTR(-EBUSY); | |
280 | ||
281 | sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, | |
282 | gntdev_dmabuf->nr_pages); | |
283 | if (!IS_ERR(sgt)) { | |
d1749eb1 MS |
284 | if (dma_map_sgtable(attach->dev, sgt, dir, |
285 | DMA_ATTR_SKIP_CPU_SYNC)) { | |
a240d6e4 OA |
286 | sg_free_table(sgt); |
287 | kfree(sgt); | |
288 | sgt = ERR_PTR(-ENOMEM); | |
289 | } else { | |
290 | gntdev_dmabuf_attach->sgt = sgt; | |
291 | gntdev_dmabuf_attach->dir = dir; | |
292 | } | |
293 | } | |
294 | if (IS_ERR(sgt)) | |
295 | pr_debug("Failed to map sg table for dev %p\n", attach->dev); | |
296 | return sgt; | |
297 | } | |
298 | ||
299 | static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach, | |
300 | struct sg_table *sgt, | |
301 | enum dma_data_direction dir) | |
302 | { | |
303 | /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */ | |
304 | } | |
305 | ||
306 | static void dmabuf_exp_release(struct kref *kref) | |
307 | { | |
308 | struct gntdev_dmabuf *gntdev_dmabuf = | |
309 | container_of(kref, struct gntdev_dmabuf, u.exp.refcount); | |
310 | ||
311 | dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf); | |
312 | list_del(&gntdev_dmabuf->next); | |
fa13e665 | 313 | fput(gntdev_dmabuf->priv->filp); |
a240d6e4 OA |
314 | kfree(gntdev_dmabuf); |
315 | } | |
316 | ||
317 | static void dmabuf_exp_remove_map(struct gntdev_priv *priv, | |
318 | struct gntdev_grant_map *map) | |
319 | { | |
320 | mutex_lock(&priv->lock); | |
321 | list_del(&map->next); | |
322 | gntdev_put_map(NULL /* already removed */, map); | |
323 | mutex_unlock(&priv->lock); | |
324 | } | |
325 | ||
326 | static void dmabuf_exp_ops_release(struct dma_buf *dma_buf) | |
327 | { | |
328 | struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv; | |
329 | struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv; | |
330 | ||
331 | dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv, | |
332 | gntdev_dmabuf->u.exp.map); | |
333 | mutex_lock(&priv->lock); | |
334 | kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release); | |
335 | mutex_unlock(&priv->lock); | |
336 | } | |
337 | ||
a240d6e4 OA |
338 | static const struct dma_buf_ops dmabuf_exp_ops = { |
339 | .attach = dmabuf_exp_ops_attach, | |
340 | .detach = dmabuf_exp_ops_detach, | |
341 | .map_dma_buf = dmabuf_exp_ops_map_dma_buf, | |
342 | .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf, | |
343 | .release = dmabuf_exp_ops_release, | |
a240d6e4 OA |
344 | }; |
345 | ||
346 | struct gntdev_dmabuf_export_args { | |
347 | struct gntdev_priv *priv; | |
348 | struct gntdev_grant_map *map; | |
349 | struct gntdev_dmabuf_priv *dmabuf_priv; | |
350 | struct device *dev; | |
351 | int count; | |
352 | struct page **pages; | |
353 | u32 fd; | |
354 | }; | |
355 | ||
356 | static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args) | |
357 | { | |
358 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); | |
359 | struct gntdev_dmabuf *gntdev_dmabuf; | |
360 | int ret; | |
361 | ||
362 | gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); | |
363 | if (!gntdev_dmabuf) | |
364 | return -ENOMEM; | |
365 | ||
366 | kref_init(&gntdev_dmabuf->u.exp.refcount); | |
367 | ||
368 | gntdev_dmabuf->priv = args->dmabuf_priv; | |
369 | gntdev_dmabuf->nr_pages = args->count; | |
370 | gntdev_dmabuf->pages = args->pages; | |
371 | gntdev_dmabuf->u.exp.priv = args->priv; | |
372 | gntdev_dmabuf->u.exp.map = args->map; | |
373 | ||
374 | exp_info.exp_name = KBUILD_MODNAME; | |
375 | if (args->dev->driver && args->dev->driver->owner) | |
376 | exp_info.owner = args->dev->driver->owner; | |
377 | else | |
378 | exp_info.owner = THIS_MODULE; | |
379 | exp_info.ops = &dmabuf_exp_ops; | |
380 | exp_info.size = args->count << PAGE_SHIFT; | |
381 | exp_info.flags = O_RDWR; | |
382 | exp_info.priv = gntdev_dmabuf; | |
383 | ||
384 | gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info); | |
385 | if (IS_ERR(gntdev_dmabuf->dmabuf)) { | |
386 | ret = PTR_ERR(gntdev_dmabuf->dmabuf); | |
387 | gntdev_dmabuf->dmabuf = NULL; | |
388 | goto fail; | |
389 | } | |
390 | ||
391 | ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC); | |
392 | if (ret < 0) | |
393 | goto fail; | |
394 | ||
395 | gntdev_dmabuf->fd = ret; | |
396 | args->fd = ret; | |
397 | ||
398 | pr_debug("Exporting DMA buffer with fd %d\n", ret); | |
399 | ||
400 | mutex_lock(&args->dmabuf_priv->lock); | |
401 | list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list); | |
402 | mutex_unlock(&args->dmabuf_priv->lock); | |
fa13e665 | 403 | get_file(gntdev_dmabuf->priv->filp); |
a240d6e4 OA |
404 | return 0; |
405 | ||
406 | fail: | |
407 | if (gntdev_dmabuf->dmabuf) | |
408 | dma_buf_put(gntdev_dmabuf->dmabuf); | |
409 | kfree(gntdev_dmabuf); | |
410 | return ret; | |
411 | } | |
412 | ||
413 | static struct gntdev_grant_map * | |
414 | dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags, | |
415 | int count) | |
416 | { | |
417 | struct gntdev_grant_map *map; | |
418 | ||
3b06ac67 | 419 | if (unlikely(gntdev_test_page_count(count))) |
a240d6e4 OA |
420 | return ERR_PTR(-EINVAL); |
421 | ||
422 | if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) && | |
423 | (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) { | |
424 | pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags); | |
425 | return ERR_PTR(-EINVAL); | |
426 | } | |
427 | ||
428 | map = gntdev_alloc_map(priv, count, dmabuf_flags); | |
429 | if (!map) | |
430 | return ERR_PTR(-ENOMEM); | |
431 | ||
a240d6e4 | 432 | return map; |
932d6562 OA |
433 | } |
434 | ||
435 | static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags, | |
436 | int count, u32 domid, u32 *refs, u32 *fd) | |
437 | { | |
a240d6e4 OA |
438 | struct gntdev_grant_map *map; |
439 | struct gntdev_dmabuf_export_args args; | |
440 | int i, ret; | |
441 | ||
442 | map = dmabuf_exp_alloc_backing_storage(priv, flags, count); | |
443 | if (IS_ERR(map)) | |
444 | return PTR_ERR(map); | |
445 | ||
446 | for (i = 0; i < count; i++) { | |
447 | map->grants[i].domid = domid; | |
448 | map->grants[i].ref = refs[i]; | |
449 | } | |
450 | ||
451 | mutex_lock(&priv->lock); | |
452 | gntdev_add_map(priv, map); | |
453 | mutex_unlock(&priv->lock); | |
454 | ||
455 | map->flags |= GNTMAP_host_map; | |
456 | #if defined(CONFIG_X86) | |
457 | map->flags |= GNTMAP_device_map; | |
458 | #endif | |
459 | ||
460 | ret = gntdev_map_grant_pages(map); | |
461 | if (ret < 0) | |
462 | goto out; | |
463 | ||
464 | args.priv = priv; | |
465 | args.map = map; | |
466 | args.dev = priv->dma_dev; | |
467 | args.dmabuf_priv = priv->dmabuf_priv; | |
468 | args.count = map->count; | |
469 | args.pages = map->pages; | |
470 | args.fd = -1; /* Shut up unnecessary gcc warning for i386 */ | |
471 | ||
472 | ret = dmabuf_exp_from_pages(&args); | |
473 | if (ret < 0) | |
474 | goto out; | |
475 | ||
476 | *fd = args.fd; | |
477 | return 0; | |
478 | ||
479 | out: | |
480 | dmabuf_exp_remove_map(priv, map); | |
481 | return ret; | |
932d6562 OA |
482 | } |
483 | ||
484 | /* DMA buffer import support. */ | |
485 | ||
bf8dc55b OA |
486 | static int |
487 | dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs, | |
488 | int count, int domid) | |
489 | { | |
490 | grant_ref_t priv_gref_head; | |
491 | int i, ret; | |
492 | ||
493 | ret = gnttab_alloc_grant_references(count, &priv_gref_head); | |
494 | if (ret < 0) { | |
495 | pr_debug("Cannot allocate grant references, ret %d\n", ret); | |
496 | return ret; | |
497 | } | |
498 | ||
499 | for (i = 0; i < count; i++) { | |
500 | int cur_ref; | |
501 | ||
502 | cur_ref = gnttab_claim_grant_reference(&priv_gref_head); | |
503 | if (cur_ref < 0) { | |
504 | ret = cur_ref; | |
505 | pr_debug("Cannot claim grant reference, ret %d\n", ret); | |
506 | goto out; | |
507 | } | |
508 | ||
509 | gnttab_grant_foreign_access_ref(cur_ref, domid, | |
510 | xen_page_to_gfn(pages[i]), 0); | |
511 | refs[i] = cur_ref; | |
512 | } | |
513 | ||
514 | return 0; | |
515 | ||
516 | out: | |
517 | gnttab_free_grant_references(priv_gref_head); | |
518 | return ret; | |
519 | } | |
520 | ||
521 | static void dmabuf_imp_end_foreign_access(u32 *refs, int count) | |
522 | { | |
523 | int i; | |
524 | ||
525 | for (i = 0; i < count; i++) | |
bd506c78 | 526 | if (refs[i] != INVALID_GRANT_REF) |
49f8b459 | 527 | gnttab_end_foreign_access(refs[i], NULL); |
bf8dc55b OA |
528 | } |
529 | ||
530 | static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf) | |
531 | { | |
532 | kfree(gntdev_dmabuf->pages); | |
533 | kfree(gntdev_dmabuf->u.imp.refs); | |
534 | kfree(gntdev_dmabuf); | |
535 | } | |
536 | ||
537 | static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count) | |
538 | { | |
539 | struct gntdev_dmabuf *gntdev_dmabuf; | |
540 | int i; | |
541 | ||
542 | gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL); | |
543 | if (!gntdev_dmabuf) | |
2789e83c | 544 | goto fail_no_free; |
bf8dc55b OA |
545 | |
546 | gntdev_dmabuf->u.imp.refs = kcalloc(count, | |
547 | sizeof(gntdev_dmabuf->u.imp.refs[0]), | |
548 | GFP_KERNEL); | |
549 | if (!gntdev_dmabuf->u.imp.refs) | |
550 | goto fail; | |
551 | ||
552 | gntdev_dmabuf->pages = kcalloc(count, | |
553 | sizeof(gntdev_dmabuf->pages[0]), | |
554 | GFP_KERNEL); | |
555 | if (!gntdev_dmabuf->pages) | |
556 | goto fail; | |
557 | ||
558 | gntdev_dmabuf->nr_pages = count; | |
559 | ||
560 | for (i = 0; i < count; i++) | |
bd506c78 | 561 | gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF; |
bf8dc55b OA |
562 | |
563 | return gntdev_dmabuf; | |
564 | ||
565 | fail: | |
566 | dmabuf_imp_free_storage(gntdev_dmabuf); | |
2789e83c | 567 | fail_no_free: |
bf8dc55b OA |
568 | return ERR_PTR(-ENOMEM); |
569 | } | |
570 | ||
932d6562 OA |
571 | static struct gntdev_dmabuf * |
572 | dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, | |
573 | int fd, int count, int domid) | |
574 | { | |
bf8dc55b OA |
575 | struct gntdev_dmabuf *gntdev_dmabuf, *ret; |
576 | struct dma_buf *dma_buf; | |
577 | struct dma_buf_attachment *attach; | |
578 | struct sg_table *sgt; | |
579 | struct sg_page_iter sg_iter; | |
580 | int i; | |
581 | ||
582 | dma_buf = dma_buf_get(fd); | |
583 | if (IS_ERR(dma_buf)) | |
584 | return ERR_CAST(dma_buf); | |
585 | ||
586 | gntdev_dmabuf = dmabuf_imp_alloc_storage(count); | |
587 | if (IS_ERR(gntdev_dmabuf)) { | |
588 | ret = gntdev_dmabuf; | |
589 | goto fail_put; | |
590 | } | |
591 | ||
592 | gntdev_dmabuf->priv = priv; | |
593 | gntdev_dmabuf->fd = fd; | |
594 | ||
595 | attach = dma_buf_attach(dma_buf, dev); | |
596 | if (IS_ERR(attach)) { | |
597 | ret = ERR_CAST(attach); | |
598 | goto fail_free_obj; | |
599 | } | |
600 | ||
601 | gntdev_dmabuf->u.imp.attach = attach; | |
602 | ||
e841ad86 | 603 | sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); |
bf8dc55b OA |
604 | if (IS_ERR(sgt)) { |
605 | ret = ERR_CAST(sgt); | |
606 | goto fail_detach; | |
607 | } | |
608 | ||
5fa4e6f1 OA |
609 | /* Check that we have zero offset. */ |
610 | if (sgt->sgl->offset) { | |
611 | ret = ERR_PTR(-EINVAL); | |
612 | pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", | |
613 | sgt->sgl->offset); | |
614 | goto fail_unmap; | |
615 | } | |
616 | ||
bf8dc55b OA |
617 | /* Check number of pages that imported buffer has. */ |
618 | if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { | |
619 | ret = ERR_PTR(-EINVAL); | |
620 | pr_debug("DMA buffer has %zu pages, user-space expects %d\n", | |
621 | attach->dmabuf->size, gntdev_dmabuf->nr_pages); | |
622 | goto fail_unmap; | |
623 | } | |
624 | ||
625 | gntdev_dmabuf->u.imp.sgt = sgt; | |
626 | ||
627 | /* Now convert sgt to array of pages and check for page validity. */ | |
628 | i = 0; | |
d1749eb1 | 629 | for_each_sgtable_page(sgt, &sg_iter, 0) { |
bf8dc55b OA |
630 | struct page *page = sg_page_iter_page(&sg_iter); |
631 | /* | |
632 | * Check if page is valid: this can happen if we are given | |
633 | * a page from VRAM or other resources which are not backed | |
634 | * by a struct page. | |
635 | */ | |
636 | if (!pfn_valid(page_to_pfn(page))) { | |
637 | ret = ERR_PTR(-EINVAL); | |
638 | goto fail_unmap; | |
639 | } | |
640 | ||
641 | gntdev_dmabuf->pages[i++] = page; | |
642 | } | |
643 | ||
644 | ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages, | |
645 | gntdev_dmabuf->u.imp.refs, | |
646 | count, domid)); | |
647 | if (IS_ERR(ret)) | |
648 | goto fail_end_access; | |
649 | ||
650 | pr_debug("Imported DMA buffer with fd %d\n", fd); | |
651 | ||
652 | mutex_lock(&priv->lock); | |
653 | list_add(&gntdev_dmabuf->next, &priv->imp_list); | |
654 | mutex_unlock(&priv->lock); | |
655 | ||
656 | return gntdev_dmabuf; | |
657 | ||
658 | fail_end_access: | |
659 | dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count); | |
660 | fail_unmap: | |
e841ad86 | 661 | dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); |
bf8dc55b OA |
662 | fail_detach: |
663 | dma_buf_detach(dma_buf, attach); | |
664 | fail_free_obj: | |
665 | dmabuf_imp_free_storage(gntdev_dmabuf); | |
666 | fail_put: | |
667 | dma_buf_put(dma_buf); | |
668 | return ret; | |
932d6562 OA |
669 | } |
670 | ||
bf8dc55b OA |
671 | /* |
672 | * Find the hyper dma-buf by its file descriptor and remove | |
673 | * it from the buffer's list. | |
674 | */ | |
675 | static struct gntdev_dmabuf * | |
676 | dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd) | |
932d6562 | 677 | { |
bf8dc55b OA |
678 | struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); |
679 | ||
680 | mutex_lock(&priv->lock); | |
681 | list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) { | |
682 | if (gntdev_dmabuf->fd == fd) { | |
683 | pr_debug("Found gntdev_dmabuf in the import list\n"); | |
684 | ret = gntdev_dmabuf; | |
685 | list_del(&gntdev_dmabuf->next); | |
686 | break; | |
687 | } | |
688 | } | |
689 | mutex_unlock(&priv->lock); | |
690 | return ret; | |
932d6562 OA |
691 | } |
692 | ||
693 | static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd) | |
694 | { | |
bf8dc55b OA |
695 | struct gntdev_dmabuf *gntdev_dmabuf; |
696 | struct dma_buf_attachment *attach; | |
697 | struct dma_buf *dma_buf; | |
698 | ||
699 | gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd); | |
700 | if (IS_ERR(gntdev_dmabuf)) | |
701 | return PTR_ERR(gntdev_dmabuf); | |
702 | ||
703 | pr_debug("Releasing DMA buffer with fd %d\n", fd); | |
704 | ||
705 | dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, | |
706 | gntdev_dmabuf->nr_pages); | |
707 | ||
708 | attach = gntdev_dmabuf->u.imp.attach; | |
709 | ||
710 | if (gntdev_dmabuf->u.imp.sgt) | |
e841ad86 DO |
711 | dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt, |
712 | DMA_BIDIRECTIONAL); | |
bf8dc55b OA |
713 | dma_buf = attach->dmabuf; |
714 | dma_buf_detach(attach->dmabuf, attach); | |
715 | dma_buf_put(dma_buf); | |
716 | ||
717 | dmabuf_imp_free_storage(gntdev_dmabuf); | |
718 | return 0; | |
932d6562 OA |
719 | } |
720 | ||
068e79f4 OA |
721 | static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv) |
722 | { | |
723 | struct gntdev_dmabuf *q, *gntdev_dmabuf; | |
724 | ||
725 | list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) | |
726 | dmabuf_imp_release(priv, gntdev_dmabuf->fd); | |
727 | } | |
728 | ||
932d6562 OA |
729 | /* DMA buffer IOCTL support. */ |
730 | ||
731 | long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod, | |
732 | struct ioctl_gntdev_dmabuf_exp_from_refs __user *u) | |
733 | { | |
734 | struct ioctl_gntdev_dmabuf_exp_from_refs op; | |
735 | u32 *refs; | |
736 | long ret; | |
737 | ||
738 | if (use_ptemod) { | |
739 | pr_debug("Cannot provide dma-buf: use_ptemode %d\n", | |
740 | use_ptemod); | |
741 | return -EINVAL; | |
742 | } | |
743 | ||
744 | if (copy_from_user(&op, u, sizeof(op)) != 0) | |
745 | return -EFAULT; | |
746 | ||
3b06ac67 | 747 | if (unlikely(gntdev_test_page_count(op.count))) |
932d6562 OA |
748 | return -EINVAL; |
749 | ||
750 | refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL); | |
751 | if (!refs) | |
752 | return -ENOMEM; | |
753 | ||
754 | if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) { | |
755 | ret = -EFAULT; | |
756 | goto out; | |
757 | } | |
758 | ||
759 | ret = dmabuf_exp_from_refs(priv, op.flags, op.count, | |
760 | op.domid, refs, &op.fd); | |
761 | if (ret) | |
762 | goto out; | |
763 | ||
764 | if (copy_to_user(u, &op, sizeof(op)) != 0) | |
765 | ret = -EFAULT; | |
766 | ||
767 | out: | |
768 | kfree(refs); | |
769 | return ret; | |
770 | } | |
771 | ||
772 | long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv, | |
773 | struct ioctl_gntdev_dmabuf_exp_wait_released __user *u) | |
774 | { | |
775 | struct ioctl_gntdev_dmabuf_exp_wait_released op; | |
776 | ||
777 | if (copy_from_user(&op, u, sizeof(op)) != 0) | |
778 | return -EFAULT; | |
779 | ||
780 | return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd, | |
781 | op.wait_to_ms); | |
782 | } | |
783 | ||
784 | long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv, | |
785 | struct ioctl_gntdev_dmabuf_imp_to_refs __user *u) | |
786 | { | |
787 | struct ioctl_gntdev_dmabuf_imp_to_refs op; | |
788 | struct gntdev_dmabuf *gntdev_dmabuf; | |
789 | long ret; | |
790 | ||
791 | if (copy_from_user(&op, u, sizeof(op)) != 0) | |
792 | return -EFAULT; | |
793 | ||
3b06ac67 | 794 | if (unlikely(gntdev_test_page_count(op.count))) |
932d6562 OA |
795 | return -EINVAL; |
796 | ||
797 | gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv, | |
798 | priv->dma_dev, op.fd, | |
799 | op.count, op.domid); | |
800 | if (IS_ERR(gntdev_dmabuf)) | |
801 | return PTR_ERR(gntdev_dmabuf); | |
802 | ||
bf8dc55b | 803 | if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs, |
932d6562 OA |
804 | sizeof(*u->refs) * op.count) != 0) { |
805 | ret = -EFAULT; | |
806 | goto out_release; | |
807 | } | |
808 | return 0; | |
809 | ||
810 | out_release: | |
811 | dmabuf_imp_release(priv->dmabuf_priv, op.fd); | |
812 | return ret; | |
813 | } | |
814 | ||
815 | long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv, | |
816 | struct ioctl_gntdev_dmabuf_imp_release __user *u) | |
817 | { | |
818 | struct ioctl_gntdev_dmabuf_imp_release op; | |
819 | ||
820 | if (copy_from_user(&op, u, sizeof(op)) != 0) | |
821 | return -EFAULT; | |
822 | ||
823 | return dmabuf_imp_release(priv->dmabuf_priv, op.fd); | |
824 | } | |
825 | ||
fa13e665 | 826 | struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp) |
932d6562 OA |
827 | { |
828 | struct gntdev_dmabuf_priv *priv; | |
829 | ||
830 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
831 | if (!priv) | |
832 | return ERR_PTR(-ENOMEM); | |
833 | ||
a240d6e4 OA |
834 | mutex_init(&priv->lock); |
835 | INIT_LIST_HEAD(&priv->exp_list); | |
836 | INIT_LIST_HEAD(&priv->exp_wait_list); | |
bf8dc55b | 837 | INIT_LIST_HEAD(&priv->imp_list); |
a240d6e4 | 838 | |
fa13e665 OA |
839 | priv->filp = filp; |
840 | ||
932d6562 OA |
841 | return priv; |
842 | } | |
843 | ||
844 | void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv) | |
845 | { | |
068e79f4 | 846 | dmabuf_imp_release_all(priv); |
932d6562 OA |
847 | kfree(priv); |
848 | } |