]> Git Repo - linux.git/blob - drivers/dma-buf/udmabuf.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / dma-buf / udmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/hugetlb.h>
14 #include <linux/slab.h>
15 #include <linux/udmabuf.h>
16 #include <linux/vmalloc.h>
17 #include <linux/iosys-map.h>
18
19 static int list_limit = 1024;
20 module_param(list_limit, int, 0644);
21 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
22
23 static int size_limit_mb = 64;
24 module_param(size_limit_mb, int, 0644);
25 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
26
27 struct udmabuf {
28         pgoff_t pagecount;
29         struct folio **folios;
30         struct sg_table *sg;
31         struct miscdevice *device;
32         pgoff_t *offsets;
33         struct list_head unpin_list;
34 };
35
36 struct udmabuf_folio {
37         struct folio *folio;
38         struct list_head list;
39 };
40
41 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
42 {
43         struct vm_area_struct *vma = vmf->vma;
44         struct udmabuf *ubuf = vma->vm_private_data;
45         pgoff_t pgoff = vmf->pgoff;
46         unsigned long pfn;
47
48         if (pgoff >= ubuf->pagecount)
49                 return VM_FAULT_SIGBUS;
50
51         pfn = folio_pfn(ubuf->folios[pgoff]);
52         pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
53
54         return vmf_insert_pfn(vma, vmf->address, pfn);
55 }
56
57 static const struct vm_operations_struct udmabuf_vm_ops = {
58         .fault = udmabuf_vm_fault,
59 };
60
61 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
62 {
63         struct udmabuf *ubuf = buf->priv;
64
65         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
66                 return -EINVAL;
67
68         vma->vm_ops = &udmabuf_vm_ops;
69         vma->vm_private_data = ubuf;
70         vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
71         return 0;
72 }
73
74 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
75 {
76         struct udmabuf *ubuf = buf->priv;
77         struct page **pages;
78         void *vaddr;
79         pgoff_t pg;
80
81         dma_resv_assert_held(buf->resv);
82
83         pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
84         if (!pages)
85                 return -ENOMEM;
86
87         for (pg = 0; pg < ubuf->pagecount; pg++)
88                 pages[pg] = &ubuf->folios[pg]->page;
89
90         vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
91         kfree(pages);
92         if (!vaddr)
93                 return -EINVAL;
94
95         iosys_map_set_vaddr(map, vaddr);
96         return 0;
97 }
98
99 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
100 {
101         struct udmabuf *ubuf = buf->priv;
102
103         dma_resv_assert_held(buf->resv);
104
105         vm_unmap_ram(map->vaddr, ubuf->pagecount);
106 }
107
108 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
109                                      enum dma_data_direction direction)
110 {
111         struct udmabuf *ubuf = buf->priv;
112         struct sg_table *sg;
113         struct scatterlist *sgl;
114         unsigned int i = 0;
115         int ret;
116
117         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
118         if (!sg)
119                 return ERR_PTR(-ENOMEM);
120
121         ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
122         if (ret < 0)
123                 goto err_alloc;
124
125         for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
126                 sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
127                              ubuf->offsets[i]);
128
129         ret = dma_map_sgtable(dev, sg, direction, 0);
130         if (ret < 0)
131                 goto err_map;
132         return sg;
133
134 err_map:
135         sg_free_table(sg);
136 err_alloc:
137         kfree(sg);
138         return ERR_PTR(ret);
139 }
140
141 static void put_sg_table(struct device *dev, struct sg_table *sg,
142                          enum dma_data_direction direction)
143 {
144         dma_unmap_sgtable(dev, sg, direction, 0);
145         sg_free_table(sg);
146         kfree(sg);
147 }
148
149 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
150                                     enum dma_data_direction direction)
151 {
152         return get_sg_table(at->dev, at->dmabuf, direction);
153 }
154
155 static void unmap_udmabuf(struct dma_buf_attachment *at,
156                           struct sg_table *sg,
157                           enum dma_data_direction direction)
158 {
159         return put_sg_table(at->dev, sg, direction);
160 }
161
162 static void unpin_all_folios(struct list_head *unpin_list)
163 {
164         struct udmabuf_folio *ubuf_folio;
165
166         while (!list_empty(unpin_list)) {
167                 ubuf_folio = list_first_entry(unpin_list,
168                                               struct udmabuf_folio, list);
169                 unpin_folio(ubuf_folio->folio);
170
171                 list_del(&ubuf_folio->list);
172                 kfree(ubuf_folio);
173         }
174 }
175
176 static int add_to_unpin_list(struct list_head *unpin_list,
177                              struct folio *folio)
178 {
179         struct udmabuf_folio *ubuf_folio;
180
181         ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL);
182         if (!ubuf_folio)
183                 return -ENOMEM;
184
185         ubuf_folio->folio = folio;
186         list_add_tail(&ubuf_folio->list, unpin_list);
187         return 0;
188 }
189
190 static void release_udmabuf(struct dma_buf *buf)
191 {
192         struct udmabuf *ubuf = buf->priv;
193         struct device *dev = ubuf->device->this_device;
194
195         if (ubuf->sg)
196                 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
197
198         unpin_all_folios(&ubuf->unpin_list);
199         kfree(ubuf->offsets);
200         kfree(ubuf->folios);
201         kfree(ubuf);
202 }
203
204 static int begin_cpu_udmabuf(struct dma_buf *buf,
205                              enum dma_data_direction direction)
206 {
207         struct udmabuf *ubuf = buf->priv;
208         struct device *dev = ubuf->device->this_device;
209         int ret = 0;
210
211         if (!ubuf->sg) {
212                 ubuf->sg = get_sg_table(dev, buf, direction);
213                 if (IS_ERR(ubuf->sg)) {
214                         ret = PTR_ERR(ubuf->sg);
215                         ubuf->sg = NULL;
216                 }
217         } else {
218                 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
219                                     direction);
220         }
221
222         return ret;
223 }
224
225 static int end_cpu_udmabuf(struct dma_buf *buf,
226                            enum dma_data_direction direction)
227 {
228         struct udmabuf *ubuf = buf->priv;
229         struct device *dev = ubuf->device->this_device;
230
231         if (!ubuf->sg)
232                 return -EINVAL;
233
234         dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
235         return 0;
236 }
237
238 static const struct dma_buf_ops udmabuf_ops = {
239         .cache_sgt_mapping = true,
240         .map_dma_buf       = map_udmabuf,
241         .unmap_dma_buf     = unmap_udmabuf,
242         .release           = release_udmabuf,
243         .mmap              = mmap_udmabuf,
244         .vmap              = vmap_udmabuf,
245         .vunmap            = vunmap_udmabuf,
246         .begin_cpu_access  = begin_cpu_udmabuf,
247         .end_cpu_access    = end_cpu_udmabuf,
248 };
249
250 #define SEALS_WANTED (F_SEAL_SHRINK)
251 #define SEALS_DENIED (F_SEAL_WRITE)
252
253 static int check_memfd_seals(struct file *memfd)
254 {
255         int seals;
256
257         if (!memfd)
258                 return -EBADFD;
259
260         if (!shmem_file(memfd) && !is_file_hugepages(memfd))
261                 return -EBADFD;
262
263         seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
264         if (seals == -EINVAL)
265                 return -EBADFD;
266
267         if ((seals & SEALS_WANTED) != SEALS_WANTED ||
268             (seals & SEALS_DENIED) != 0)
269                 return -EINVAL;
270
271         return 0;
272 }
273
274 static int export_udmabuf(struct udmabuf *ubuf,
275                           struct miscdevice *device,
276                           u32 flags)
277 {
278         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
279         struct dma_buf *buf;
280
281         ubuf->device = device;
282         exp_info.ops  = &udmabuf_ops;
283         exp_info.size = ubuf->pagecount << PAGE_SHIFT;
284         exp_info.priv = ubuf;
285         exp_info.flags = O_RDWR;
286
287         buf = dma_buf_export(&exp_info);
288         if (IS_ERR(buf))
289                 return PTR_ERR(buf);
290
291         return dma_buf_fd(buf, flags);
292 }
293
294 static long udmabuf_create(struct miscdevice *device,
295                            struct udmabuf_create_list *head,
296                            struct udmabuf_create_item *list)
297 {
298         pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0;
299         long nr_folios, ret = -EINVAL;
300         struct file *memfd = NULL;
301         struct folio **folios;
302         struct udmabuf *ubuf;
303         u32 i, j, k, flags;
304         loff_t end;
305
306         ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
307         if (!ubuf)
308                 return -ENOMEM;
309
310         INIT_LIST_HEAD(&ubuf->unpin_list);
311         pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
312         for (i = 0; i < head->count; i++) {
313                 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
314                         goto err;
315                 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
316                         goto err;
317                 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
318                 if (ubuf->pagecount > pglimit)
319                         goto err;
320         }
321
322         if (!ubuf->pagecount)
323                 goto err;
324
325         ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios),
326                                     GFP_KERNEL);
327         if (!ubuf->folios) {
328                 ret = -ENOMEM;
329                 goto err;
330         }
331         ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets),
332                                 GFP_KERNEL);
333         if (!ubuf->offsets) {
334                 ret = -ENOMEM;
335                 goto err;
336         }
337
338         pgbuf = 0;
339         for (i = 0; i < head->count; i++) {
340                 memfd = fget(list[i].memfd);
341                 ret = check_memfd_seals(memfd);
342                 if (ret < 0)
343                         goto err;
344
345                 pgcnt = list[i].size >> PAGE_SHIFT;
346                 folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL);
347                 if (!folios) {
348                         ret = -ENOMEM;
349                         goto err;
350                 }
351
352                 end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1;
353                 ret = memfd_pin_folios(memfd, list[i].offset, end,
354                                        folios, pgcnt, &pgoff);
355                 if (ret <= 0) {
356                         kfree(folios);
357                         if (!ret)
358                                 ret = -EINVAL;
359                         goto err;
360                 }
361
362                 nr_folios = ret;
363                 pgoff >>= PAGE_SHIFT;
364                 for (j = 0, k = 0; j < pgcnt; j++) {
365                         ubuf->folios[pgbuf] = folios[k];
366                         ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT;
367
368                         if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) {
369                                 ret = add_to_unpin_list(&ubuf->unpin_list,
370                                                         folios[k]);
371                                 if (ret < 0) {
372                                         kfree(folios);
373                                         goto err;
374                                 }
375                         }
376
377                         pgbuf++;
378                         if (++pgoff == folio_nr_pages(folios[k])) {
379                                 pgoff = 0;
380                                 if (++k == nr_folios)
381                                         break;
382                         }
383                 }
384
385                 kfree(folios);
386                 fput(memfd);
387                 memfd = NULL;
388         }
389
390         flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
391         ret = export_udmabuf(ubuf, device, flags);
392         if (ret < 0)
393                 goto err;
394
395         return ret;
396
397 err:
398         if (memfd)
399                 fput(memfd);
400         unpin_all_folios(&ubuf->unpin_list);
401         kfree(ubuf->offsets);
402         kfree(ubuf->folios);
403         kfree(ubuf);
404         return ret;
405 }
406
407 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
408 {
409         struct udmabuf_create create;
410         struct udmabuf_create_list head;
411         struct udmabuf_create_item list;
412
413         if (copy_from_user(&create, (void __user *)arg,
414                            sizeof(create)))
415                 return -EFAULT;
416
417         head.flags  = create.flags;
418         head.count  = 1;
419         list.memfd  = create.memfd;
420         list.offset = create.offset;
421         list.size   = create.size;
422
423         return udmabuf_create(filp->private_data, &head, &list);
424 }
425
426 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
427 {
428         struct udmabuf_create_list head;
429         struct udmabuf_create_item *list;
430         int ret = -EINVAL;
431         u32 lsize;
432
433         if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
434                 return -EFAULT;
435         if (head.count > list_limit)
436                 return -EINVAL;
437         lsize = sizeof(struct udmabuf_create_item) * head.count;
438         list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
439         if (IS_ERR(list))
440                 return PTR_ERR(list);
441
442         ret = udmabuf_create(filp->private_data, &head, list);
443         kfree(list);
444         return ret;
445 }
446
447 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
448                           unsigned long arg)
449 {
450         long ret;
451
452         switch (ioctl) {
453         case UDMABUF_CREATE:
454                 ret = udmabuf_ioctl_create(filp, arg);
455                 break;
456         case UDMABUF_CREATE_LIST:
457                 ret = udmabuf_ioctl_create_list(filp, arg);
458                 break;
459         default:
460                 ret = -ENOTTY;
461                 break;
462         }
463         return ret;
464 }
465
466 static const struct file_operations udmabuf_fops = {
467         .owner          = THIS_MODULE,
468         .unlocked_ioctl = udmabuf_ioctl,
469 #ifdef CONFIG_COMPAT
470         .compat_ioctl   = udmabuf_ioctl,
471 #endif
472 };
473
474 static struct miscdevice udmabuf_misc = {
475         .minor          = MISC_DYNAMIC_MINOR,
476         .name           = "udmabuf",
477         .fops           = &udmabuf_fops,
478 };
479
480 static int __init udmabuf_dev_init(void)
481 {
482         int ret;
483
484         ret = misc_register(&udmabuf_misc);
485         if (ret < 0) {
486                 pr_err("Could not initialize udmabuf device\n");
487                 return ret;
488         }
489
490         ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
491                                            DMA_BIT_MASK(64));
492         if (ret < 0) {
493                 pr_err("Could not setup DMA mask for udmabuf device\n");
494                 misc_deregister(&udmabuf_misc);
495                 return ret;
496         }
497
498         return 0;
499 }
500
501 static void __exit udmabuf_dev_exit(void)
502 {
503         misc_deregister(&udmabuf_misc);
504 }
505
506 module_init(udmabuf_dev_init)
507 module_exit(udmabuf_dev_exit)
508
509 MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
This page took 0.062363 seconds and 4 git commands to generate.