2 * NVIDIA Tegra DRM GEM helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
7 * Based on the GEM/CMA helpers
9 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/dma-buf.h>
17 #include <linux/iommu.h>
18 #include <drm/tegra_drm.h>
23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
25 return container_of(bo, struct tegra_bo, base);
28 static void tegra_bo_put(struct host1x_bo *bo)
30 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
32 drm_gem_object_unreference_unlocked(&obj->gem);
35 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
37 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
42 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
46 static void *tegra_bo_mmap(struct host1x_bo *bo)
48 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
53 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
57 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
59 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
61 return obj->vaddr + page * PAGE_SIZE;
64 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
69 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
71 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
73 drm_gem_object_reference(&obj->gem);
78 static const struct host1x_bo_ops tegra_bo_ops = {
82 .unpin = tegra_bo_unpin,
83 .mmap = tegra_bo_mmap,
84 .munmap = tegra_bo_munmap,
85 .kmap = tegra_bo_kmap,
86 .kunmap = tegra_bo_kunmap,
89 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
91 int prot = IOMMU_READ | IOMMU_WRITE;
97 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
101 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
104 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
109 bo->paddr = bo->mm->start;
111 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
112 bo->sgt->nents, prot);
114 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
123 drm_mm_remove_node(bo->mm);
129 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
134 iommu_unmap(tegra->domain, bo->paddr, bo->size);
135 drm_mm_remove_node(bo->mm);
141 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
147 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
149 return ERR_PTR(-ENOMEM);
151 host1x_bo_init(&bo->base, &tegra_bo_ops);
152 size = round_up(size, PAGE_SIZE);
154 err = drm_gem_object_init(drm, &bo->gem, size);
158 err = drm_gem_create_mmap_offset(&bo->gem);
165 drm_gem_object_release(&bo->gem);
171 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
174 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
175 sg_free_table(bo->sgt);
177 } else if (bo->vaddr) {
178 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
183 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
185 struct scatterlist *s;
188 bo->pages = drm_gem_get_pages(&bo->gem);
189 if (IS_ERR(bo->pages))
190 return PTR_ERR(bo->pages);
192 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
194 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
199 * Fake up the SG table so that dma_sync_sg_for_device() can be used
200 * to flush the pages associated with it.
202 * TODO: Replace this by drm_clflash_sg() once it can be implemented
203 * without relying on symbols that are not exported.
205 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
206 sg_dma_address(s) = sg_phys(s);
208 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
214 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
215 return PTR_ERR(bo->sgt);
218 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
220 struct tegra_drm *tegra = drm->dev_private;
224 err = tegra_bo_get_pages(drm, bo);
228 err = tegra_bo_iommu_map(tegra, bo);
230 tegra_bo_free(drm, bo);
234 size_t size = bo->gem.size;
236 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
237 GFP_KERNEL | __GFP_NOWARN);
240 "failed to allocate buffer of size %zu\n",
249 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
255 bo = tegra_bo_alloc_object(drm, size);
259 err = tegra_bo_alloc(drm, bo);
263 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
264 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
266 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
267 bo->flags |= TEGRA_BO_BOTTOM_UP;
272 drm_gem_object_release(&bo->gem);
277 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
278 struct drm_device *drm,
286 bo = tegra_bo_create(drm, size, flags);
290 err = drm_gem_handle_create(file, &bo->gem, handle);
292 tegra_bo_free_object(&bo->gem);
296 drm_gem_object_unreference_unlocked(&bo->gem);
301 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
304 struct tegra_drm *tegra = drm->dev_private;
305 struct dma_buf_attachment *attach;
309 bo = tegra_bo_alloc_object(drm, buf->size);
313 attach = dma_buf_attach(buf, drm->dev);
314 if (IS_ERR(attach)) {
315 err = PTR_ERR(attach);
321 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
327 if (IS_ERR(bo->sgt)) {
328 err = PTR_ERR(bo->sgt);
333 err = tegra_bo_iommu_map(tegra, bo);
337 if (bo->sgt->nents > 1) {
342 bo->paddr = sg_dma_address(bo->sgt->sgl);
345 bo->gem.import_attach = attach;
350 if (!IS_ERR_OR_NULL(bo->sgt))
351 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
353 dma_buf_detach(buf, attach);
356 drm_gem_object_release(&bo->gem);
361 void tegra_bo_free_object(struct drm_gem_object *gem)
363 struct tegra_drm *tegra = gem->dev->dev_private;
364 struct tegra_bo *bo = to_tegra_bo(gem);
367 tegra_bo_iommu_unmap(tegra, bo);
369 if (gem->import_attach) {
370 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
372 drm_prime_gem_destroy(gem, NULL);
374 tegra_bo_free(gem->dev, bo);
377 drm_gem_object_release(gem);
381 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
382 struct drm_mode_create_dumb *args)
384 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
385 struct tegra_drm *tegra = drm->dev_private;
388 args->pitch = round_up(min_pitch, tegra->pitch_align);
389 args->size = args->pitch * args->height;
391 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
399 int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
400 u32 handle, u64 *offset)
402 struct drm_gem_object *gem;
405 gem = drm_gem_object_lookup(drm, file, handle);
407 dev_err(drm->dev, "failed to lookup GEM object\n");
411 bo = to_tegra_bo(gem);
413 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
415 drm_gem_object_unreference_unlocked(gem);
420 static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
422 struct drm_gem_object *gem = vma->vm_private_data;
423 struct tegra_bo *bo = to_tegra_bo(gem);
429 return VM_FAULT_SIGBUS;
431 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
432 page = bo->pages[offset];
434 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
441 return VM_FAULT_NOPAGE;
447 return VM_FAULT_SIGBUS;
450 const struct vm_operations_struct tegra_bo_vm_ops = {
451 .fault = tegra_bo_fault,
452 .open = drm_gem_vm_open,
453 .close = drm_gem_vm_close,
456 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
458 struct drm_gem_object *gem;
462 ret = drm_gem_mmap(file, vma);
466 gem = vma->vm_private_data;
467 bo = to_tegra_bo(gem);
470 unsigned long vm_pgoff = vma->vm_pgoff;
472 vma->vm_flags &= ~VM_PFNMAP;
475 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
476 bo->paddr, gem->size);
478 drm_gem_vm_close(vma);
482 vma->vm_pgoff = vm_pgoff;
484 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
486 vma->vm_flags |= VM_MIXEDMAP;
487 vma->vm_flags &= ~VM_PFNMAP;
489 vma->vm_page_prot = pgprot_writecombine(prot);
495 static struct sg_table *
496 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
497 enum dma_data_direction dir)
499 struct drm_gem_object *gem = attach->dmabuf->priv;
500 struct tegra_bo *bo = to_tegra_bo(gem);
501 struct sg_table *sgt;
503 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
508 struct scatterlist *sg;
511 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
514 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
515 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
517 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
520 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
523 sg_dma_address(sgt->sgl) = bo->paddr;
524 sg_dma_len(sgt->sgl) = gem->size;
535 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
536 struct sg_table *sgt,
537 enum dma_data_direction dir)
539 struct drm_gem_object *gem = attach->dmabuf->priv;
540 struct tegra_bo *bo = to_tegra_bo(gem);
543 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
549 static void tegra_gem_prime_release(struct dma_buf *buf)
551 drm_gem_dmabuf_release(buf);
554 static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
560 static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
566 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
571 static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
576 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
581 static void *tegra_gem_prime_vmap(struct dma_buf *buf)
583 struct drm_gem_object *gem = buf->priv;
584 struct tegra_bo *bo = to_tegra_bo(gem);
589 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
593 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
594 .map_dma_buf = tegra_gem_prime_map_dma_buf,
595 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
596 .release = tegra_gem_prime_release,
597 .kmap_atomic = tegra_gem_prime_kmap_atomic,
598 .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
599 .kmap = tegra_gem_prime_kmap,
600 .kunmap = tegra_gem_prime_kunmap,
601 .mmap = tegra_gem_prime_mmap,
602 .vmap = tegra_gem_prime_vmap,
603 .vunmap = tegra_gem_prime_vunmap,
606 struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
607 struct drm_gem_object *gem,
610 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
612 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
613 exp_info.size = gem->size;
614 exp_info.flags = flags;
617 return dma_buf_export(&exp_info);
620 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
625 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
626 struct drm_gem_object *gem = buf->priv;
628 if (gem->dev == drm) {
629 drm_gem_object_reference(gem);
634 bo = tegra_bo_import(drm, buf);