]> Git Repo - linux.git/blob - drivers/dma-buf/heaps/cma_heap.c
md: fix missing flush of sync_work
[linux.git] / drivers / dma-buf / heaps / cma_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <[email protected]> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *      Andrew F. Davis <[email protected]>
11  */
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24
25
26 struct cma_heap {
27         struct dma_heap *heap;
28         struct cma *cma;
29 };
30
31 struct cma_heap_buffer {
32         struct cma_heap *heap;
33         struct list_head attachments;
34         struct mutex lock;
35         unsigned long len;
36         struct page *cma_pages;
37         struct page **pages;
38         pgoff_t pagecount;
39         int vmap_cnt;
40         void *vaddr;
41 };
42
43 struct dma_heap_attachment {
44         struct device *dev;
45         struct sg_table table;
46         struct list_head list;
47         bool mapped;
48 };
49
50 static int cma_heap_attach(struct dma_buf *dmabuf,
51                            struct dma_buf_attachment *attachment)
52 {
53         struct cma_heap_buffer *buffer = dmabuf->priv;
54         struct dma_heap_attachment *a;
55         int ret;
56
57         a = kzalloc(sizeof(*a), GFP_KERNEL);
58         if (!a)
59                 return -ENOMEM;
60
61         ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62                                         buffer->pagecount, 0,
63                                         buffer->pagecount << PAGE_SHIFT,
64                                         GFP_KERNEL);
65         if (ret) {
66                 kfree(a);
67                 return ret;
68         }
69
70         a->dev = attachment->dev;
71         INIT_LIST_HEAD(&a->list);
72         a->mapped = false;
73
74         attachment->priv = a;
75
76         mutex_lock(&buffer->lock);
77         list_add(&a->list, &buffer->attachments);
78         mutex_unlock(&buffer->lock);
79
80         return 0;
81 }
82
83 static void cma_heap_detach(struct dma_buf *dmabuf,
84                             struct dma_buf_attachment *attachment)
85 {
86         struct cma_heap_buffer *buffer = dmabuf->priv;
87         struct dma_heap_attachment *a = attachment->priv;
88
89         mutex_lock(&buffer->lock);
90         list_del(&a->list);
91         mutex_unlock(&buffer->lock);
92
93         sg_free_table(&a->table);
94         kfree(a);
95 }
96
97 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98                                              enum dma_data_direction direction)
99 {
100         struct dma_heap_attachment *a = attachment->priv;
101         struct sg_table *table = &a->table;
102         int ret;
103
104         ret = dma_map_sgtable(attachment->dev, table, direction, 0);
105         if (ret)
106                 return ERR_PTR(-ENOMEM);
107         a->mapped = true;
108         return table;
109 }
110
111 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
112                                    struct sg_table *table,
113                                    enum dma_data_direction direction)
114 {
115         struct dma_heap_attachment *a = attachment->priv;
116
117         a->mapped = false;
118         dma_unmap_sgtable(attachment->dev, table, direction, 0);
119 }
120
121 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
122                                              enum dma_data_direction direction)
123 {
124         struct cma_heap_buffer *buffer = dmabuf->priv;
125         struct dma_heap_attachment *a;
126
127         mutex_lock(&buffer->lock);
128
129         if (buffer->vmap_cnt)
130                 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
131
132         list_for_each_entry(a, &buffer->attachments, list) {
133                 if (!a->mapped)
134                         continue;
135                 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
136         }
137         mutex_unlock(&buffer->lock);
138
139         return 0;
140 }
141
142 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
143                                            enum dma_data_direction direction)
144 {
145         struct cma_heap_buffer *buffer = dmabuf->priv;
146         struct dma_heap_attachment *a;
147
148         mutex_lock(&buffer->lock);
149
150         if (buffer->vmap_cnt)
151                 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
152
153         list_for_each_entry(a, &buffer->attachments, list) {
154                 if (!a->mapped)
155                         continue;
156                 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
157         }
158         mutex_unlock(&buffer->lock);
159
160         return 0;
161 }
162
163 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
164 {
165         struct vm_area_struct *vma = vmf->vma;
166         struct cma_heap_buffer *buffer = vma->vm_private_data;
167
168         if (vmf->pgoff > buffer->pagecount)
169                 return VM_FAULT_SIGBUS;
170
171         vmf->page = buffer->pages[vmf->pgoff];
172         get_page(vmf->page);
173
174         return 0;
175 }
176
177 static const struct vm_operations_struct dma_heap_vm_ops = {
178         .fault = cma_heap_vm_fault,
179 };
180
181 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
182 {
183         struct cma_heap_buffer *buffer = dmabuf->priv;
184
185         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
186                 return -EINVAL;
187
188         vma->vm_ops = &dma_heap_vm_ops;
189         vma->vm_private_data = buffer;
190
191         return 0;
192 }
193
194 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
195 {
196         void *vaddr;
197
198         vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
199         if (!vaddr)
200                 return ERR_PTR(-ENOMEM);
201
202         return vaddr;
203 }
204
205 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
206 {
207         struct cma_heap_buffer *buffer = dmabuf->priv;
208         void *vaddr;
209         int ret = 0;
210
211         mutex_lock(&buffer->lock);
212         if (buffer->vmap_cnt) {
213                 buffer->vmap_cnt++;
214                 iosys_map_set_vaddr(map, buffer->vaddr);
215                 goto out;
216         }
217
218         vaddr = cma_heap_do_vmap(buffer);
219         if (IS_ERR(vaddr)) {
220                 ret = PTR_ERR(vaddr);
221                 goto out;
222         }
223         buffer->vaddr = vaddr;
224         buffer->vmap_cnt++;
225         iosys_map_set_vaddr(map, buffer->vaddr);
226 out:
227         mutex_unlock(&buffer->lock);
228
229         return ret;
230 }
231
232 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
233 {
234         struct cma_heap_buffer *buffer = dmabuf->priv;
235
236         mutex_lock(&buffer->lock);
237         if (!--buffer->vmap_cnt) {
238                 vunmap(buffer->vaddr);
239                 buffer->vaddr = NULL;
240         }
241         mutex_unlock(&buffer->lock);
242         iosys_map_clear(map);
243 }
244
245 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
246 {
247         struct cma_heap_buffer *buffer = dmabuf->priv;
248         struct cma_heap *cma_heap = buffer->heap;
249
250         if (buffer->vmap_cnt > 0) {
251                 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
252                 vunmap(buffer->vaddr);
253                 buffer->vaddr = NULL;
254         }
255
256         /* free page list */
257         kfree(buffer->pages);
258         /* release memory */
259         cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
260         kfree(buffer);
261 }
262
263 static const struct dma_buf_ops cma_heap_buf_ops = {
264         .attach = cma_heap_attach,
265         .detach = cma_heap_detach,
266         .map_dma_buf = cma_heap_map_dma_buf,
267         .unmap_dma_buf = cma_heap_unmap_dma_buf,
268         .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
269         .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
270         .mmap = cma_heap_mmap,
271         .vmap = cma_heap_vmap,
272         .vunmap = cma_heap_vunmap,
273         .release = cma_heap_dma_buf_release,
274 };
275
276 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
277                                          unsigned long len,
278                                          unsigned long fd_flags,
279                                          unsigned long heap_flags)
280 {
281         struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
282         struct cma_heap_buffer *buffer;
283         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
284         size_t size = PAGE_ALIGN(len);
285         pgoff_t pagecount = size >> PAGE_SHIFT;
286         unsigned long align = get_order(size);
287         struct page *cma_pages;
288         struct dma_buf *dmabuf;
289         int ret = -ENOMEM;
290         pgoff_t pg;
291
292         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
293         if (!buffer)
294                 return ERR_PTR(-ENOMEM);
295
296         INIT_LIST_HEAD(&buffer->attachments);
297         mutex_init(&buffer->lock);
298         buffer->len = size;
299
300         if (align > CONFIG_CMA_ALIGNMENT)
301                 align = CONFIG_CMA_ALIGNMENT;
302
303         cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
304         if (!cma_pages)
305                 goto free_buffer;
306
307         /* Clear the cma pages */
308         if (PageHighMem(cma_pages)) {
309                 unsigned long nr_clear_pages = pagecount;
310                 struct page *page = cma_pages;
311
312                 while (nr_clear_pages > 0) {
313                         void *vaddr = kmap_atomic(page);
314
315                         memset(vaddr, 0, PAGE_SIZE);
316                         kunmap_atomic(vaddr);
317                         /*
318                          * Avoid wasting time zeroing memory if the process
319                          * has been killed by by SIGKILL
320                          */
321                         if (fatal_signal_pending(current))
322                                 goto free_cma;
323                         page++;
324                         nr_clear_pages--;
325                 }
326         } else {
327                 memset(page_address(cma_pages), 0, size);
328         }
329
330         buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
331         if (!buffer->pages) {
332                 ret = -ENOMEM;
333                 goto free_cma;
334         }
335
336         for (pg = 0; pg < pagecount; pg++)
337                 buffer->pages[pg] = &cma_pages[pg];
338
339         buffer->cma_pages = cma_pages;
340         buffer->heap = cma_heap;
341         buffer->pagecount = pagecount;
342
343         /* create the dmabuf */
344         exp_info.exp_name = dma_heap_get_name(heap);
345         exp_info.ops = &cma_heap_buf_ops;
346         exp_info.size = buffer->len;
347         exp_info.flags = fd_flags;
348         exp_info.priv = buffer;
349         dmabuf = dma_buf_export(&exp_info);
350         if (IS_ERR(dmabuf)) {
351                 ret = PTR_ERR(dmabuf);
352                 goto free_pages;
353         }
354         return dmabuf;
355
356 free_pages:
357         kfree(buffer->pages);
358 free_cma:
359         cma_release(cma_heap->cma, cma_pages, pagecount);
360 free_buffer:
361         kfree(buffer);
362
363         return ERR_PTR(ret);
364 }
365
366 static const struct dma_heap_ops cma_heap_ops = {
367         .allocate = cma_heap_allocate,
368 };
369
370 static int __add_cma_heap(struct cma *cma, void *data)
371 {
372         struct cma_heap *cma_heap;
373         struct dma_heap_export_info exp_info;
374
375         cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
376         if (!cma_heap)
377                 return -ENOMEM;
378         cma_heap->cma = cma;
379
380         exp_info.name = cma_get_name(cma);
381         exp_info.ops = &cma_heap_ops;
382         exp_info.priv = cma_heap;
383
384         cma_heap->heap = dma_heap_add(&exp_info);
385         if (IS_ERR(cma_heap->heap)) {
386                 int ret = PTR_ERR(cma_heap->heap);
387
388                 kfree(cma_heap);
389                 return ret;
390         }
391
392         return 0;
393 }
394
395 static int add_default_cma_heap(void)
396 {
397         struct cma *default_cma = dev_get_cma_area(NULL);
398         int ret = 0;
399
400         if (default_cma)
401                 ret = __add_cma_heap(default_cma, NULL);
402
403         return ret;
404 }
405 module_init(add_default_cma_heap);
406 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
This page took 0.095964 seconds and 4 git commands to generate.