1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF System heap exporter
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
19 #include <linux/module.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 static struct dma_heap *sys_heap;
26 struct system_heap_buffer {
27 struct dma_heap *heap;
28 struct list_head attachments;
31 struct sg_table sg_table;
36 struct dma_heap_attachment {
38 struct sg_table *table;
39 struct list_head list;
43 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
44 #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
45 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
48 static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
50 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52 * of order 0 pages can significantly improve the performance of many IOMMUs
53 * by reducing TLB pressure and time spent updating page tables.
55 static const unsigned int orders[] = {8, 4, 0};
56 #define NUM_ORDERS ARRAY_SIZE(orders)
58 static struct sg_table *dup_sg_table(struct sg_table *table)
60 struct sg_table *new_table;
62 struct scatterlist *sg, *new_sg;
64 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
66 return ERR_PTR(-ENOMEM);
68 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
71 return ERR_PTR(-ENOMEM);
74 new_sg = new_table->sgl;
75 for_each_sgtable_sg(table, sg, i) {
76 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77 new_sg = sg_next(new_sg);
83 static int system_heap_attach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
86 struct system_heap_buffer *buffer = dmabuf->priv;
87 struct dma_heap_attachment *a;
88 struct sg_table *table;
90 a = kzalloc(sizeof(*a), GFP_KERNEL);
94 table = dup_sg_table(&buffer->sg_table);
101 a->dev = attachment->dev;
102 INIT_LIST_HEAD(&a->list);
105 attachment->priv = a;
107 mutex_lock(&buffer->lock);
108 list_add(&a->list, &buffer->attachments);
109 mutex_unlock(&buffer->lock);
114 static void system_heap_detach(struct dma_buf *dmabuf,
115 struct dma_buf_attachment *attachment)
117 struct system_heap_buffer *buffer = dmabuf->priv;
118 struct dma_heap_attachment *a = attachment->priv;
120 mutex_lock(&buffer->lock);
122 mutex_unlock(&buffer->lock);
124 sg_free_table(a->table);
129 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130 enum dma_data_direction direction)
132 struct dma_heap_attachment *a = attachment->priv;
133 struct sg_table *table = a->table;
136 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
144 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145 struct sg_table *table,
146 enum dma_data_direction direction)
148 struct dma_heap_attachment *a = attachment->priv;
151 dma_unmap_sgtable(attachment->dev, table, direction, 0);
154 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155 enum dma_data_direction direction)
157 struct system_heap_buffer *buffer = dmabuf->priv;
158 struct dma_heap_attachment *a;
160 mutex_lock(&buffer->lock);
162 if (buffer->vmap_cnt)
163 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
165 list_for_each_entry(a, &buffer->attachments, list) {
168 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
170 mutex_unlock(&buffer->lock);
175 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176 enum dma_data_direction direction)
178 struct system_heap_buffer *buffer = dmabuf->priv;
179 struct dma_heap_attachment *a;
181 mutex_lock(&buffer->lock);
183 if (buffer->vmap_cnt)
184 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
186 list_for_each_entry(a, &buffer->attachments, list) {
189 dma_sync_sgtable_for_device(a->dev, a->table, direction);
191 mutex_unlock(&buffer->lock);
196 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
198 struct system_heap_buffer *buffer = dmabuf->priv;
199 struct sg_table *table = &buffer->sg_table;
200 unsigned long addr = vma->vm_start;
201 struct sg_page_iter piter;
204 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
205 struct page *page = sg_page_iter_page(&piter);
207 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
212 if (addr >= vma->vm_end)
218 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
220 struct sg_table *table = &buffer->sg_table;
221 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
222 struct page **pages = vmalloc(sizeof(struct page *) * npages);
223 struct page **tmp = pages;
224 struct sg_page_iter piter;
228 return ERR_PTR(-ENOMEM);
230 for_each_sgtable_page(table, &piter, 0) {
231 WARN_ON(tmp - pages >= npages);
232 *tmp++ = sg_page_iter_page(&piter);
235 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
239 return ERR_PTR(-ENOMEM);
244 static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
246 struct system_heap_buffer *buffer = dmabuf->priv;
250 mutex_lock(&buffer->lock);
251 if (buffer->vmap_cnt) {
253 dma_buf_map_set_vaddr(map, buffer->vaddr);
257 vaddr = system_heap_do_vmap(buffer);
259 ret = PTR_ERR(vaddr);
263 buffer->vaddr = vaddr;
265 dma_buf_map_set_vaddr(map, buffer->vaddr);
267 mutex_unlock(&buffer->lock);
272 static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
274 struct system_heap_buffer *buffer = dmabuf->priv;
276 mutex_lock(&buffer->lock);
277 if (!--buffer->vmap_cnt) {
278 vunmap(buffer->vaddr);
279 buffer->vaddr = NULL;
281 mutex_unlock(&buffer->lock);
282 dma_buf_map_clear(map);
285 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
287 struct system_heap_buffer *buffer = dmabuf->priv;
288 struct sg_table *table;
289 struct scatterlist *sg;
292 table = &buffer->sg_table;
293 for_each_sg(table->sgl, sg, table->nents, i) {
294 struct page *page = sg_page(sg);
296 __free_pages(page, compound_order(page));
298 sg_free_table(table);
302 static const struct dma_buf_ops system_heap_buf_ops = {
303 .attach = system_heap_attach,
304 .detach = system_heap_detach,
305 .map_dma_buf = system_heap_map_dma_buf,
306 .unmap_dma_buf = system_heap_unmap_dma_buf,
307 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
308 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
309 .mmap = system_heap_mmap,
310 .vmap = system_heap_vmap,
311 .vunmap = system_heap_vunmap,
312 .release = system_heap_dma_buf_release,
315 static struct page *alloc_largest_available(unsigned long size,
316 unsigned int max_order)
321 for (i = 0; i < NUM_ORDERS; i++) {
322 if (size < (PAGE_SIZE << orders[i]))
324 if (max_order < orders[i])
327 page = alloc_pages(order_flags[i], orders[i]);
335 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
337 unsigned long fd_flags,
338 unsigned long heap_flags)
340 struct system_heap_buffer *buffer;
341 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
342 unsigned long size_remaining = len;
343 unsigned int max_order = orders[0];
344 struct dma_buf *dmabuf;
345 struct sg_table *table;
346 struct scatterlist *sg;
347 struct list_head pages;
348 struct page *page, *tmp_page;
349 int i, ret = -ENOMEM;
351 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
353 return ERR_PTR(-ENOMEM);
355 INIT_LIST_HEAD(&buffer->attachments);
356 mutex_init(&buffer->lock);
360 INIT_LIST_HEAD(&pages);
362 while (size_remaining > 0) {
364 * Avoid trying to allocate memory if the process
365 * has been killed by SIGKILL
367 if (fatal_signal_pending(current)) {
372 page = alloc_largest_available(size_remaining, max_order);
376 list_add_tail(&page->lru, &pages);
377 size_remaining -= page_size(page);
378 max_order = compound_order(page);
382 table = &buffer->sg_table;
383 if (sg_alloc_table(table, i, GFP_KERNEL))
387 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
388 sg_set_page(sg, page, page_size(page), 0);
390 list_del(&page->lru);
393 /* create the dmabuf */
394 exp_info.exp_name = dma_heap_get_name(heap);
395 exp_info.ops = &system_heap_buf_ops;
396 exp_info.size = buffer->len;
397 exp_info.flags = fd_flags;
398 exp_info.priv = buffer;
399 dmabuf = dma_buf_export(&exp_info);
400 if (IS_ERR(dmabuf)) {
401 ret = PTR_ERR(dmabuf);
407 for_each_sgtable_sg(table, sg, i) {
408 struct page *p = sg_page(sg);
410 __free_pages(p, compound_order(p));
412 sg_free_table(table);
414 list_for_each_entry_safe(page, tmp_page, &pages, lru)
415 __free_pages(page, compound_order(page));
421 static const struct dma_heap_ops system_heap_ops = {
422 .allocate = system_heap_allocate,
425 static int system_heap_create(void)
427 struct dma_heap_export_info exp_info;
429 exp_info.name = "system";
430 exp_info.ops = &system_heap_ops;
431 exp_info.priv = NULL;
433 sys_heap = dma_heap_add(&exp_info);
434 if (IS_ERR(sys_heap))
435 return PTR_ERR(sys_heap);
439 module_init(system_heap_create);
440 MODULE_LICENSE("GPL v2");