1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF System heap exporter
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/dma-resv.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
25 static struct dma_heap *sys_heap;
27 struct system_heap_buffer {
28 struct dma_heap *heap;
29 struct list_head attachments;
32 struct sg_table sg_table;
37 struct dma_heap_attachment {
39 struct sg_table *table;
40 struct list_head list;
44 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
45 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
48 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
50 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52 * of order 0 pages can significantly improve the performance of many IOMMUs
53 * by reducing TLB pressure and time spent updating page tables.
55 static const unsigned int orders[] = {8, 4, 0};
56 #define NUM_ORDERS ARRAY_SIZE(orders)
58 static struct sg_table *dup_sg_table(struct sg_table *table)
60 struct sg_table *new_table;
62 struct scatterlist *sg, *new_sg;
64 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
66 return ERR_PTR(-ENOMEM);
68 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
71 return ERR_PTR(-ENOMEM);
74 new_sg = new_table->sgl;
75 for_each_sgtable_sg(table, sg, i) {
76 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77 new_sg = sg_next(new_sg);
83 static int system_heap_attach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
86 struct system_heap_buffer *buffer = dmabuf->priv;
87 struct dma_heap_attachment *a;
88 struct sg_table *table;
90 a = kzalloc(sizeof(*a), GFP_KERNEL);
94 table = dup_sg_table(&buffer->sg_table);
101 a->dev = attachment->dev;
102 INIT_LIST_HEAD(&a->list);
105 attachment->priv = a;
107 mutex_lock(&buffer->lock);
108 list_add(&a->list, &buffer->attachments);
109 mutex_unlock(&buffer->lock);
114 static void system_heap_detach(struct dma_buf *dmabuf,
115 struct dma_buf_attachment *attachment)
117 struct system_heap_buffer *buffer = dmabuf->priv;
118 struct dma_heap_attachment *a = attachment->priv;
120 mutex_lock(&buffer->lock);
122 mutex_unlock(&buffer->lock);
124 sg_free_table(a->table);
129 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130 enum dma_data_direction direction)
132 struct dma_heap_attachment *a = attachment->priv;
133 struct sg_table *table = a->table;
136 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
144 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145 struct sg_table *table,
146 enum dma_data_direction direction)
148 struct dma_heap_attachment *a = attachment->priv;
151 dma_unmap_sgtable(attachment->dev, table, direction, 0);
154 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155 enum dma_data_direction direction)
157 struct system_heap_buffer *buffer = dmabuf->priv;
158 struct dma_heap_attachment *a;
160 mutex_lock(&buffer->lock);
162 if (buffer->vmap_cnt)
163 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
165 list_for_each_entry(a, &buffer->attachments, list) {
168 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
170 mutex_unlock(&buffer->lock);
175 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176 enum dma_data_direction direction)
178 struct system_heap_buffer *buffer = dmabuf->priv;
179 struct dma_heap_attachment *a;
181 mutex_lock(&buffer->lock);
183 if (buffer->vmap_cnt)
184 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
186 list_for_each_entry(a, &buffer->attachments, list) {
189 dma_sync_sgtable_for_device(a->dev, a->table, direction);
191 mutex_unlock(&buffer->lock);
196 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
198 struct system_heap_buffer *buffer = dmabuf->priv;
199 struct sg_table *table = &buffer->sg_table;
200 unsigned long addr = vma->vm_start;
201 struct sg_page_iter piter;
204 dma_resv_assert_held(dmabuf->resv);
206 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
207 struct page *page = sg_page_iter_page(&piter);
209 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
214 if (addr >= vma->vm_end)
220 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
222 struct sg_table *table = &buffer->sg_table;
223 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
224 struct page **pages = vmalloc(sizeof(struct page *) * npages);
225 struct page **tmp = pages;
226 struct sg_page_iter piter;
230 return ERR_PTR(-ENOMEM);
232 for_each_sgtable_page(table, &piter, 0) {
233 WARN_ON(tmp - pages >= npages);
234 *tmp++ = sg_page_iter_page(&piter);
237 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
241 return ERR_PTR(-ENOMEM);
246 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
248 struct system_heap_buffer *buffer = dmabuf->priv;
252 mutex_lock(&buffer->lock);
253 if (buffer->vmap_cnt) {
255 iosys_map_set_vaddr(map, buffer->vaddr);
259 vaddr = system_heap_do_vmap(buffer);
261 ret = PTR_ERR(vaddr);
265 buffer->vaddr = vaddr;
267 iosys_map_set_vaddr(map, buffer->vaddr);
269 mutex_unlock(&buffer->lock);
274 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
276 struct system_heap_buffer *buffer = dmabuf->priv;
278 mutex_lock(&buffer->lock);
279 if (!--buffer->vmap_cnt) {
280 vunmap(buffer->vaddr);
281 buffer->vaddr = NULL;
283 mutex_unlock(&buffer->lock);
284 iosys_map_clear(map);
287 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
289 struct system_heap_buffer *buffer = dmabuf->priv;
290 struct sg_table *table;
291 struct scatterlist *sg;
294 table = &buffer->sg_table;
295 for_each_sgtable_sg(table, sg, i) {
296 struct page *page = sg_page(sg);
298 __free_pages(page, compound_order(page));
300 sg_free_table(table);
304 static const struct dma_buf_ops system_heap_buf_ops = {
305 .attach = system_heap_attach,
306 .detach = system_heap_detach,
307 .map_dma_buf = system_heap_map_dma_buf,
308 .unmap_dma_buf = system_heap_unmap_dma_buf,
309 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
310 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
311 .mmap = system_heap_mmap,
312 .vmap = system_heap_vmap,
313 .vunmap = system_heap_vunmap,
314 .release = system_heap_dma_buf_release,
317 static struct page *alloc_largest_available(unsigned long size,
318 unsigned int max_order)
323 for (i = 0; i < NUM_ORDERS; i++) {
324 if (size < (PAGE_SIZE << orders[i]))
326 if (max_order < orders[i])
329 page = alloc_pages(order_flags[i], orders[i]);
337 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
339 unsigned long fd_flags,
340 unsigned long heap_flags)
342 struct system_heap_buffer *buffer;
343 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
344 unsigned long size_remaining = len;
345 unsigned int max_order = orders[0];
346 struct dma_buf *dmabuf;
347 struct sg_table *table;
348 struct scatterlist *sg;
349 struct list_head pages;
350 struct page *page, *tmp_page;
351 int i, ret = -ENOMEM;
353 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
355 return ERR_PTR(-ENOMEM);
357 INIT_LIST_HEAD(&buffer->attachments);
358 mutex_init(&buffer->lock);
362 INIT_LIST_HEAD(&pages);
364 while (size_remaining > 0) {
366 * Avoid trying to allocate memory if the process
367 * has been killed by SIGKILL
369 if (fatal_signal_pending(current)) {
374 page = alloc_largest_available(size_remaining, max_order);
378 list_add_tail(&page->lru, &pages);
379 size_remaining -= page_size(page);
380 max_order = compound_order(page);
384 table = &buffer->sg_table;
385 if (sg_alloc_table(table, i, GFP_KERNEL))
389 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
390 sg_set_page(sg, page, page_size(page), 0);
392 list_del(&page->lru);
395 /* create the dmabuf */
396 exp_info.exp_name = dma_heap_get_name(heap);
397 exp_info.ops = &system_heap_buf_ops;
398 exp_info.size = buffer->len;
399 exp_info.flags = fd_flags;
400 exp_info.priv = buffer;
401 dmabuf = dma_buf_export(&exp_info);
402 if (IS_ERR(dmabuf)) {
403 ret = PTR_ERR(dmabuf);
409 for_each_sgtable_sg(table, sg, i) {
410 struct page *p = sg_page(sg);
412 __free_pages(p, compound_order(p));
414 sg_free_table(table);
416 list_for_each_entry_safe(page, tmp_page, &pages, lru)
417 __free_pages(page, compound_order(page));
423 static const struct dma_heap_ops system_heap_ops = {
424 .allocate = system_heap_allocate,
427 static int system_heap_create(void)
429 struct dma_heap_export_info exp_info;
431 exp_info.name = "system";
432 exp_info.ops = &system_heap_ops;
433 exp_info.priv = NULL;
435 sys_heap = dma_heap_add(&exp_info);
436 if (IS_ERR(sys_heap))
437 return PTR_ERR(sys_heap);
441 module_init(system_heap_create);