1 // SPDX-License-Identifier: GPL-2.0-only
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <drm/drm_drv.h>
19 #include <drm/drm_prime.h>
20 #include <drm/tegra_drm.h>
25 MODULE_IMPORT_NS("DMA_BUF");
27 static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
29 dma_addr_t next = ~(dma_addr_t)0;
30 unsigned int count = 0, i;
31 struct scatterlist *s;
33 for_each_sg(sgl, s, nents, i) {
34 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
38 if (sg_dma_address(s) != next) {
39 next = sg_dma_address(s) + sg_dma_len(s);
47 static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
49 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
52 static void tegra_bo_put(struct host1x_bo *bo)
54 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
56 drm_gem_object_put(&obj->gem);
59 static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
60 enum dma_data_direction direction)
62 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
63 struct drm_gem_object *gem = &obj->gem;
64 struct host1x_bo_mapping *map;
67 map = kzalloc(sizeof(*map), GFP_KERNEL);
69 return ERR_PTR(-ENOMEM);
72 map->bo = host1x_bo_get(bo);
73 map->direction = direction;
77 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
80 struct dma_buf *buf = obj->dma_buf;
82 map->attach = dma_buf_attach(buf, dev);
83 if (IS_ERR(map->attach)) {
84 err = PTR_ERR(map->attach);
88 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
89 if (IS_ERR(map->sgt)) {
90 dma_buf_detach(buf, map->attach);
91 err = PTR_ERR(map->sgt);
96 err = sgt_dma_count_chunks(map->sgt);
97 map->size = gem->size;
103 * If we don't have a mapping for this buffer yet, return an SG table
104 * so that host1x can do the mapping for us via the DMA API.
106 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
114 * If the buffer object was allocated from the explicit IOMMU
115 * API code paths, construct an SG table from the pages.
117 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
123 * If the buffer object had no pages allocated and if it was
124 * not imported, it had to be allocated with the DMA API, so
125 * the DMA API helper can be used.
127 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
132 err = dma_map_sgtable(dev, map->sgt, direction, 0);
138 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
139 * existing IOVA address of our mapping.
142 map->phys = sg_dma_address(map->sgt->sgl);
145 map->phys = obj->iova;
149 map->size = gem->size;
154 sg_free_table(map->sgt);
161 static void tegra_bo_unpin(struct host1x_bo_mapping *map)
164 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
166 dma_buf_detach(map->attach->dmabuf, map->attach);
168 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
169 sg_free_table(map->sgt);
173 host1x_bo_put(map->bo);
177 static void *tegra_bo_mmap(struct host1x_bo *bo)
179 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
180 struct iosys_map map = { 0 };
188 ret = dma_buf_vmap_unlocked(obj->dma_buf, &map);
195 vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
196 pgprot_writecombine(PAGE_KERNEL));
198 return ERR_PTR(-ENOMEM);
203 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
205 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
206 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
212 return dma_buf_vunmap_unlocked(obj->dma_buf, &map);
217 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
219 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
221 drm_gem_object_get(&obj->gem);
226 static const struct host1x_bo_ops tegra_bo_ops = {
230 .unpin = tegra_bo_unpin,
231 .mmap = tegra_bo_mmap,
232 .munmap = tegra_bo_munmap,
235 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
237 int prot = IOMMU_READ | IOMMU_WRITE;
243 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
247 mutex_lock(&tegra->mm_lock);
249 err = drm_mm_insert_node_generic(&tegra->mm,
250 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
252 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
257 bo->iova = bo->mm->start;
259 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
261 dev_err(tegra->drm->dev, "failed to map buffer\n");
266 mutex_unlock(&tegra->mm_lock);
271 drm_mm_remove_node(bo->mm);
273 mutex_unlock(&tegra->mm_lock);
278 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
283 mutex_lock(&tegra->mm_lock);
284 iommu_unmap(tegra->domain, bo->iova, bo->size);
285 drm_mm_remove_node(bo->mm);
286 mutex_unlock(&tegra->mm_lock);
293 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
294 .free = tegra_bo_free_object,
295 .export = tegra_gem_prime_export,
296 .vm_ops = &tegra_bo_vm_ops,
299 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
305 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
307 return ERR_PTR(-ENOMEM);
309 bo->gem.funcs = &tegra_gem_object_funcs;
311 host1x_bo_init(&bo->base, &tegra_bo_ops);
312 size = round_up(size, PAGE_SIZE);
314 err = drm_gem_object_init(drm, &bo->gem, size);
318 err = drm_gem_create_mmap_offset(&bo->gem);
325 drm_gem_object_release(&bo->gem);
331 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
334 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
335 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
336 sg_free_table(bo->sgt);
338 } else if (bo->vaddr) {
339 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
343 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
347 bo->pages = drm_gem_get_pages(&bo->gem);
348 if (IS_ERR(bo->pages))
349 return PTR_ERR(bo->pages);
351 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
353 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
354 if (IS_ERR(bo->sgt)) {
355 err = PTR_ERR(bo->sgt);
359 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
366 sg_free_table(bo->sgt);
369 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
373 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
375 struct tegra_drm *tegra = drm->dev_private;
379 err = tegra_bo_get_pages(drm, bo);
383 err = tegra_bo_iommu_map(tegra, bo);
385 tegra_bo_free(drm, bo);
389 size_t size = bo->gem.size;
391 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
392 GFP_KERNEL | __GFP_NOWARN);
395 "failed to allocate buffer of size %zu\n",
404 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
410 bo = tegra_bo_alloc_object(drm, size);
414 err = tegra_bo_alloc(drm, bo);
418 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
419 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
421 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
422 bo->flags |= TEGRA_BO_BOTTOM_UP;
427 drm_gem_object_release(&bo->gem);
432 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
433 struct drm_device *drm,
441 bo = tegra_bo_create(drm, size, flags);
445 err = drm_gem_handle_create(file, &bo->gem, handle);
447 tegra_bo_free_object(&bo->gem);
451 drm_gem_object_put(&bo->gem);
456 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
459 struct tegra_drm *tegra = drm->dev_private;
460 struct dma_buf_attachment *attach;
464 bo = tegra_bo_alloc_object(drm, buf->size);
469 * If we need to use IOMMU API to map the dma-buf into the internally managed
470 * domain, map it first to the DRM device to get an sgt.
473 attach = dma_buf_attach(buf, drm->dev);
474 if (IS_ERR(attach)) {
475 err = PTR_ERR(attach);
479 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
480 if (IS_ERR(bo->sgt)) {
481 err = PTR_ERR(bo->sgt);
485 err = tegra_bo_iommu_map(tegra, bo);
489 bo->gem.import_attach = attach;
498 if (!IS_ERR_OR_NULL(bo->sgt))
499 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
501 dma_buf_detach(buf, attach);
504 drm_gem_object_release(&bo->gem);
509 void tegra_bo_free_object(struct drm_gem_object *gem)
511 struct tegra_drm *tegra = gem->dev->dev_private;
512 struct host1x_bo_mapping *mapping, *tmp;
513 struct tegra_bo *bo = to_tegra_bo(gem);
515 /* remove all mappings of this buffer object from any caches */
516 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
518 host1x_bo_unpin(mapping);
520 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
521 dev_name(mapping->dev));
525 tegra_bo_iommu_unmap(tegra, bo);
527 if (gem->import_attach) {
528 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
530 dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
534 tegra_bo_free(gem->dev, bo);
537 dma_buf_put(bo->dma_buf);
539 drm_gem_object_release(gem);
543 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
544 struct drm_mode_create_dumb *args)
546 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
547 struct tegra_drm *tegra = drm->dev_private;
550 args->pitch = round_up(min_pitch, tegra->pitch_align);
551 args->size = args->pitch * args->height;
553 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
561 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
563 struct vm_area_struct *vma = vmf->vma;
564 struct drm_gem_object *gem = vma->vm_private_data;
565 struct tegra_bo *bo = to_tegra_bo(gem);
570 return VM_FAULT_SIGBUS;
572 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
573 page = bo->pages[offset];
575 return vmf_insert_page(vma, vmf->address, page);
578 const struct vm_operations_struct tegra_bo_vm_ops = {
579 .fault = tegra_bo_fault,
580 .open = drm_gem_vm_open,
581 .close = drm_gem_vm_close,
584 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
586 struct tegra_bo *bo = to_tegra_bo(gem);
589 unsigned long vm_pgoff = vma->vm_pgoff;
593 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
594 * and set the vm_pgoff (used as a fake buffer offset by DRM)
595 * to 0 as we want to map the whole buffer.
597 vm_flags_clear(vma, VM_PFNMAP);
600 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
603 drm_gem_vm_close(vma);
607 vma->vm_pgoff = vm_pgoff;
609 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
611 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
613 vma->vm_page_prot = pgprot_writecombine(prot);
619 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
621 struct drm_gem_object *gem;
624 err = drm_gem_mmap(file, vma);
628 gem = vma->vm_private_data;
630 return __tegra_gem_mmap(gem, vma);
633 static struct sg_table *
634 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
635 enum dma_data_direction dir)
637 struct drm_gem_object *gem = attach->dmabuf->priv;
638 struct tegra_bo *bo = to_tegra_bo(gem);
639 struct sg_table *sgt;
641 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
646 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
647 0, gem->size, GFP_KERNEL) < 0)
650 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
655 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
666 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
667 struct sg_table *sgt,
668 enum dma_data_direction dir)
670 struct drm_gem_object *gem = attach->dmabuf->priv;
671 struct tegra_bo *bo = to_tegra_bo(gem);
674 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
680 static void tegra_gem_prime_release(struct dma_buf *buf)
682 drm_gem_dmabuf_release(buf);
685 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
686 enum dma_data_direction direction)
688 struct drm_gem_object *gem = buf->priv;
689 struct tegra_bo *bo = to_tegra_bo(gem);
690 struct drm_device *drm = gem->dev;
693 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
698 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
699 enum dma_data_direction direction)
701 struct drm_gem_object *gem = buf->priv;
702 struct tegra_bo *bo = to_tegra_bo(gem);
703 struct drm_device *drm = gem->dev;
706 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
711 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
713 struct drm_gem_object *gem = buf->priv;
716 err = drm_gem_mmap_obj(gem, gem->size, vma);
720 return __tegra_gem_mmap(gem, vma);
723 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
725 struct drm_gem_object *gem = buf->priv;
726 struct tegra_bo *bo = to_tegra_bo(gem);
729 vaddr = tegra_bo_mmap(&bo->base);
731 return PTR_ERR(vaddr);
733 iosys_map_set_vaddr(map, vaddr);
738 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
740 struct drm_gem_object *gem = buf->priv;
741 struct tegra_bo *bo = to_tegra_bo(gem);
743 tegra_bo_munmap(&bo->base, map->vaddr);
746 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
747 .map_dma_buf = tegra_gem_prime_map_dma_buf,
748 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
749 .release = tegra_gem_prime_release,
750 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
751 .end_cpu_access = tegra_gem_prime_end_cpu_access,
752 .mmap = tegra_gem_prime_mmap,
753 .vmap = tegra_gem_prime_vmap,
754 .vunmap = tegra_gem_prime_vunmap,
757 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
760 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
762 exp_info.exp_name = KBUILD_MODNAME;
763 exp_info.owner = gem->dev->driver->fops->owner;
764 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
765 exp_info.size = gem->size;
766 exp_info.flags = flags;
769 return drm_gem_dmabuf_export(gem->dev, &exp_info);
772 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
777 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
778 struct drm_gem_object *gem = buf->priv;
780 if (gem->dev == drm) {
781 drm_gem_object_get(gem);
786 bo = tegra_bo_import(drm, buf);
793 struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
795 struct drm_gem_object *gem;
798 gem = drm_gem_object_lookup(file, handle);
802 bo = to_tegra_bo(gem);