1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
13 #include <drm/drm_prime.h>
16 #include "msm_fence.h"
21 static void update_inactive(struct msm_gem_object *msm_obj);
23 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
31 static bool use_pages(struct drm_gem_object *obj)
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
48 * Let this be a cautionary tail of abstraction gone wrong.
51 static void sync_for_device(struct msm_gem_object *msm_obj)
53 struct device *dev = msm_obj->base.dev->dev;
55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
58 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 struct device *dev = msm_obj->base.dev->dev;
62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
65 /* allocate pages from VRAM carveout, used when no IOMMU: */
66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 return ERR_PTR(-ENOMEM);
78 spin_lock(&priv->vram.lock);
79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
80 spin_unlock(&priv->vram.lock);
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
95 static struct page **get_pages(struct drm_gem_object *obj)
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 WARN_ON(!msm_gem_is_locked(obj));
101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
104 int npages = obj->size >> PAGE_SHIFT;
107 p = drm_gem_get_pages(obj);
109 p = get_pages_vram(obj, npages);
112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
120 if (IS_ERR(msm_obj->sgt)) {
121 void *ptr = ERR_CAST(msm_obj->sgt);
123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
132 sync_for_device(msm_obj);
135 return msm_obj->pages;
138 static void put_pages_vram(struct drm_gem_object *obj)
140 struct msm_gem_object *msm_obj = to_msm_bo(obj);
141 struct msm_drm_private *priv = obj->dev->dev_private;
143 spin_lock(&priv->vram.lock);
144 drm_mm_remove_node(msm_obj->vram_node);
145 spin_unlock(&priv->vram.lock);
147 kvfree(msm_obj->pages);
150 static void put_pages(struct drm_gem_object *obj)
152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
154 if (msm_obj->pages) {
156 /* For non-cached buffers, ensure the new
157 * pages are clean because display controller,
158 * GPU, etc. are not coherent:
160 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
161 sync_for_cpu(msm_obj);
163 sg_free_table(msm_obj->sgt);
168 drm_gem_put_pages(obj, msm_obj->pages, true, false);
172 msm_obj->pages = NULL;
176 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
178 struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
185 return ERR_PTR(-EBUSY);
193 void msm_gem_put_pages(struct drm_gem_object *obj)
195 /* when we start tracking the pin count, then do something here */
198 int msm_gem_mmap_obj(struct drm_gem_object *obj,
199 struct vm_area_struct *vma)
201 struct msm_gem_object *msm_obj = to_msm_bo(obj);
203 vma->vm_flags &= ~VM_PFNMAP;
204 vma->vm_flags |= VM_MIXEDMAP;
206 if (msm_obj->flags & MSM_BO_WC) {
207 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
208 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
209 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
212 * Shunt off cached objs to shmem file so they have their own
213 * address_space (so unmap_mapping_range does what we want,
214 * in particular in the case of mmap'd dmabufs)
217 vma_set_file(vma, obj->filp);
219 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
225 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
229 ret = drm_gem_mmap(filp, vma);
231 DBG("mmap failed: %d", ret);
235 return msm_gem_mmap_obj(vma->vm_private_data, vma);
238 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
240 struct vm_area_struct *vma = vmf->vma;
241 struct drm_gem_object *obj = vma->vm_private_data;
242 struct msm_gem_object *msm_obj = to_msm_bo(obj);
250 * vm_ops.open/drm_gem_mmap_obj and close get and put
251 * a reference on obj. So, we dont need to hold one here.
253 err = msm_gem_lock_interruptible(obj);
255 ret = VM_FAULT_NOPAGE;
259 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
261 return VM_FAULT_SIGBUS;
264 /* make sure we have pages attached now */
265 pages = get_pages(obj);
267 ret = vmf_error(PTR_ERR(pages));
271 /* We don't use vmf->pgoff since that has the fake offset: */
272 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
274 pfn = page_to_pfn(pages[pgoff]);
276 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
277 pfn, pfn << PAGE_SHIFT);
279 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
286 /** get mmap offset */
287 static uint64_t mmap_offset(struct drm_gem_object *obj)
289 struct drm_device *dev = obj->dev;
292 WARN_ON(!msm_gem_is_locked(obj));
294 /* Make it mmapable */
295 ret = drm_gem_create_mmap_offset(obj);
298 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
302 return drm_vma_node_offset_addr(&obj->vma_node);
305 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
310 offset = mmap_offset(obj);
315 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
316 struct msm_gem_address_space *aspace)
318 struct msm_gem_object *msm_obj = to_msm_bo(obj);
319 struct msm_gem_vma *vma;
321 WARN_ON(!msm_gem_is_locked(obj));
323 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
325 return ERR_PTR(-ENOMEM);
327 vma->aspace = aspace;
329 list_add_tail(&vma->list, &msm_obj->vmas);
334 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
335 struct msm_gem_address_space *aspace)
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 struct msm_gem_vma *vma;
340 WARN_ON(!msm_gem_is_locked(obj));
342 list_for_each_entry(vma, &msm_obj->vmas, list) {
343 if (vma->aspace == aspace)
350 static void del_vma(struct msm_gem_vma *vma)
355 list_del(&vma->list);
359 /* Called with msm_obj locked */
361 put_iova_spaces(struct drm_gem_object *obj)
363 struct msm_gem_object *msm_obj = to_msm_bo(obj);
364 struct msm_gem_vma *vma;
366 WARN_ON(!msm_gem_is_locked(obj));
368 list_for_each_entry(vma, &msm_obj->vmas, list) {
370 msm_gem_purge_vma(vma->aspace, vma);
371 msm_gem_close_vma(vma->aspace, vma);
376 /* Called with msm_obj locked */
378 put_iova_vmas(struct drm_gem_object *obj)
380 struct msm_gem_object *msm_obj = to_msm_bo(obj);
381 struct msm_gem_vma *vma, *tmp;
383 WARN_ON(!msm_gem_is_locked(obj));
385 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
390 static int get_iova_locked(struct drm_gem_object *obj,
391 struct msm_gem_address_space *aspace, uint64_t *iova,
392 u64 range_start, u64 range_end)
394 struct msm_gem_vma *vma;
397 WARN_ON(!msm_gem_is_locked(obj));
399 vma = lookup_vma(obj, aspace);
402 vma = add_vma(obj, aspace);
406 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
407 range_start, range_end);
418 static int msm_gem_pin_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace)
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 struct msm_gem_vma *vma;
424 int prot = IOMMU_READ;
426 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
429 if (msm_obj->flags & MSM_BO_MAP_PRIV)
432 WARN_ON(!msm_gem_is_locked(obj));
434 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
437 vma = lookup_vma(obj, aspace);
441 pages = get_pages(obj);
443 return PTR_ERR(pages);
445 return msm_gem_map_vma(aspace, vma, prot,
446 msm_obj->sgt, obj->size >> PAGE_SHIFT);
449 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
450 struct msm_gem_address_space *aspace, uint64_t *iova,
451 u64 range_start, u64 range_end)
456 WARN_ON(!msm_gem_is_locked(obj));
458 ret = get_iova_locked(obj, aspace, &local,
459 range_start, range_end);
462 ret = msm_gem_pin_iova(obj, aspace);
471 * get iova and pin it. Should have a matching put
472 * limits iova to specified range (in pages)
474 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
475 struct msm_gem_address_space *aspace, uint64_t *iova,
476 u64 range_start, u64 range_end)
481 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
487 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
488 struct msm_gem_address_space *aspace, uint64_t *iova)
490 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
493 /* get iova and pin it. Should have a matching put */
494 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
495 struct msm_gem_address_space *aspace, uint64_t *iova)
497 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
501 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
502 * valid for the life of the object
504 int msm_gem_get_iova(struct drm_gem_object *obj,
505 struct msm_gem_address_space *aspace, uint64_t *iova)
510 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
516 /* get iova without taking a reference, used in places where you have
517 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
519 uint64_t msm_gem_iova(struct drm_gem_object *obj,
520 struct msm_gem_address_space *aspace)
522 struct msm_gem_vma *vma;
525 vma = lookup_vma(obj, aspace);
529 return vma ? vma->iova : 0;
533 * Locked variant of msm_gem_unpin_iova()
535 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
536 struct msm_gem_address_space *aspace)
538 struct msm_gem_vma *vma;
540 WARN_ON(!msm_gem_is_locked(obj));
542 vma = lookup_vma(obj, aspace);
545 msm_gem_unmap_vma(aspace, vma);
549 * Unpin a iova by updating the reference counts. The memory isn't actually
550 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
553 void msm_gem_unpin_iova(struct drm_gem_object *obj,
554 struct msm_gem_address_space *aspace)
557 msm_gem_unpin_iova_locked(obj, aspace);
561 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
562 struct drm_mode_create_dumb *args)
564 args->pitch = align_pitch(args->width, args->bpp);
565 args->size = PAGE_ALIGN(args->pitch * args->height);
566 return msm_gem_new_handle(dev, file, args->size,
567 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
570 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
571 uint32_t handle, uint64_t *offset)
573 struct drm_gem_object *obj;
576 /* GEM does all our handle to object mapping */
577 obj = drm_gem_object_lookup(file, handle);
583 *offset = msm_gem_mmap_offset(obj);
585 drm_gem_object_put(obj);
591 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
593 struct msm_gem_object *msm_obj = to_msm_bo(obj);
596 WARN_ON(!msm_gem_is_locked(obj));
598 if (obj->import_attach)
599 return ERR_PTR(-ENODEV);
601 if (WARN_ON(msm_obj->madv > madv)) {
602 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
603 msm_obj->madv, madv);
604 return ERR_PTR(-EBUSY);
607 /* increment vmap_count *before* vmap() call, so shrinker can
608 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
609 * This guarantees that we won't try to msm_gem_vunmap() this
610 * same object from within the vmap() call (while we already
613 msm_obj->vmap_count++;
615 if (!msm_obj->vaddr) {
616 struct page **pages = get_pages(obj);
618 ret = PTR_ERR(pages);
621 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
622 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
623 if (msm_obj->vaddr == NULL) {
629 return msm_obj->vaddr;
632 msm_obj->vmap_count--;
636 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
638 return get_vaddr(obj, MSM_MADV_WILLNEED);
641 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
646 ret = msm_gem_get_vaddr_locked(obj);
653 * Don't use this! It is for the very special case of dumping
654 * submits from GPU hangs or faults, were the bo may already
655 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
658 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
660 return get_vaddr(obj, __MSM_MADV_PURGED);
663 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
665 struct msm_gem_object *msm_obj = to_msm_bo(obj);
667 WARN_ON(!msm_gem_is_locked(obj));
668 WARN_ON(msm_obj->vmap_count < 1);
670 msm_obj->vmap_count--;
673 void msm_gem_put_vaddr(struct drm_gem_object *obj)
676 msm_gem_put_vaddr_locked(obj);
680 /* Update madvise status, returns true if not purged, else
683 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
685 struct msm_gem_object *msm_obj = to_msm_bo(obj);
689 if (msm_obj->madv != __MSM_MADV_PURGED)
690 msm_obj->madv = madv;
692 madv = msm_obj->madv;
694 /* If the obj is inactive, we might need to move it
695 * between inactive lists
697 if (msm_obj->active_count == 0)
698 update_inactive(msm_obj);
702 return (madv != __MSM_MADV_PURGED);
705 void msm_gem_purge(struct drm_gem_object *obj)
707 struct drm_device *dev = obj->dev;
708 struct msm_gem_object *msm_obj = to_msm_bo(obj);
710 WARN_ON(!is_purgeable(msm_obj));
711 WARN_ON(obj->import_attach);
713 put_iova_spaces(obj);
721 msm_obj->madv = __MSM_MADV_PURGED;
723 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
724 drm_gem_free_mmap_offset(obj);
726 /* Our goal here is to return as much of the memory as
727 * is possible back to the system as we are called from OOM.
728 * To do this we must instruct the shmfs to drop all of its
729 * backing pages, *now*.
731 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
733 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
737 void msm_gem_vunmap(struct drm_gem_object *obj)
739 struct msm_gem_object *msm_obj = to_msm_bo(obj);
741 WARN_ON(!msm_gem_is_locked(obj));
743 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
746 vunmap(msm_obj->vaddr);
747 msm_obj->vaddr = NULL;
750 /* must be called before _move_to_active().. */
751 int msm_gem_sync_object(struct drm_gem_object *obj,
752 struct msm_fence_context *fctx, bool exclusive)
754 struct dma_resv_list *fobj;
755 struct dma_fence *fence;
758 fobj = dma_resv_get_list(obj->resv);
759 if (!fobj || (fobj->shared_count == 0)) {
760 fence = dma_resv_get_excl(obj->resv);
761 /* don't need to wait on our own fences, since ring is fifo */
762 if (fence && (fence->context != fctx->context)) {
763 ret = dma_fence_wait(fence, true);
769 if (!exclusive || !fobj)
772 for (i = 0; i < fobj->shared_count; i++) {
773 fence = rcu_dereference_protected(fobj->shared[i],
774 dma_resv_held(obj->resv));
775 if (fence->context != fctx->context) {
776 ret = dma_fence_wait(fence, true);
785 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
787 struct msm_gem_object *msm_obj = to_msm_bo(obj);
788 struct msm_drm_private *priv = obj->dev->dev_private;
791 WARN_ON(!msm_gem_is_locked(obj));
792 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
794 if (msm_obj->active_count++ == 0) {
795 mutex_lock(&priv->mm_lock);
796 list_del_init(&msm_obj->mm_list);
797 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
798 mutex_unlock(&priv->mm_lock);
802 void msm_gem_active_put(struct drm_gem_object *obj)
804 struct msm_gem_object *msm_obj = to_msm_bo(obj);
807 WARN_ON(!msm_gem_is_locked(obj));
809 if (--msm_obj->active_count == 0) {
810 update_inactive(msm_obj);
814 static void update_inactive(struct msm_gem_object *msm_obj)
816 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
818 mutex_lock(&priv->mm_lock);
819 WARN_ON(msm_obj->active_count != 0);
821 list_del_init(&msm_obj->mm_list);
822 if (msm_obj->madv == MSM_MADV_WILLNEED)
823 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
825 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
827 mutex_unlock(&priv->mm_lock);
830 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
832 bool write = !!(op & MSM_PREP_WRITE);
833 unsigned long remain =
834 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
837 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
840 return remain == 0 ? -EBUSY : -ETIMEDOUT;
844 /* TODO cache maintenance */
849 int msm_gem_cpu_fini(struct drm_gem_object *obj)
851 /* TODO cache maintenance */
855 #ifdef CONFIG_DEBUG_FS
856 static void describe_fence(struct dma_fence *fence, const char *type,
859 if (!dma_fence_is_signaled(fence))
860 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
861 fence->ops->get_driver_name(fence),
862 fence->ops->get_timeline_name(fence),
866 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
868 struct msm_gem_object *msm_obj = to_msm_bo(obj);
869 struct dma_resv *robj = obj->resv;
870 struct dma_resv_list *fobj;
871 struct dma_fence *fence;
872 struct msm_gem_vma *vma;
873 uint64_t off = drm_vma_node_start(&obj->vma_node);
878 switch (msm_obj->madv) {
879 case __MSM_MADV_PURGED:
882 case MSM_MADV_DONTNEED:
885 case MSM_MADV_WILLNEED:
891 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
892 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
893 obj->name, kref_read(&obj->refcount),
894 off, msm_obj->vaddr);
896 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
898 if (!list_empty(&msm_obj->vmas)) {
900 seq_puts(m, " vmas:");
902 list_for_each_entry(vma, &msm_obj->vmas, list) {
903 const char *name, *comm;
905 struct msm_gem_address_space *aspace = vma->aspace;
906 struct task_struct *task =
907 get_pid_task(aspace->pid, PIDTYPE_PID);
909 comm = kstrdup(task->comm, GFP_KERNEL);
917 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
918 name, comm ? ":" : "", comm ? comm : "",
919 vma->aspace, vma->iova,
920 vma->mapped ? "mapped" : "unmapped",
929 fobj = rcu_dereference(robj->fence);
931 unsigned int i, shared_count = fobj->shared_count;
933 for (i = 0; i < shared_count; i++) {
934 fence = rcu_dereference(fobj->shared[i]);
935 describe_fence(fence, "Shared", m);
939 fence = rcu_dereference(robj->fence_excl);
941 describe_fence(fence, "Exclusive", m);
947 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
949 struct msm_gem_object *msm_obj;
953 seq_puts(m, " flags id ref offset kaddr size madv name\n");
954 list_for_each_entry(msm_obj, list, mm_list) {
955 struct drm_gem_object *obj = &msm_obj->base;
957 msm_gem_describe(obj, m);
962 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
966 /* don't call directly! Use drm_gem_object_put_locked() and friends */
967 void msm_gem_free_object(struct drm_gem_object *obj)
969 struct msm_gem_object *msm_obj = to_msm_bo(obj);
970 struct drm_device *dev = obj->dev;
971 struct msm_drm_private *priv = dev->dev_private;
973 mutex_lock(&priv->mm_lock);
974 list_del(&msm_obj->mm_list);
975 mutex_unlock(&priv->mm_lock);
979 /* object should not be on active list: */
980 WARN_ON(is_active(msm_obj));
982 put_iova_spaces(obj);
984 if (obj->import_attach) {
985 WARN_ON(msm_obj->vaddr);
987 /* Don't drop the pages for imported dmabuf, as they are not
988 * ours, just free the array we allocated:
991 kvfree(msm_obj->pages);
995 /* dma_buf_detach() grabs resv lock, so we need to unlock
996 * prior to drm_prime_gem_destroy
1000 drm_prime_gem_destroy(obj, msm_obj->sgt);
1002 msm_gem_vunmap(obj);
1005 msm_gem_unlock(obj);
1008 drm_gem_object_release(obj);
1013 /* convenience method to construct a GEM buffer object, and userspace handle */
1014 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1015 uint32_t size, uint32_t flags, uint32_t *handle,
1018 struct drm_gem_object *obj;
1021 obj = msm_gem_new(dev, size, flags);
1024 return PTR_ERR(obj);
1027 msm_gem_object_set_name(obj, "%s", name);
1029 ret = drm_gem_handle_create(file, obj, handle);
1031 /* drop reference from allocate - handle holds it now */
1032 drm_gem_object_put(obj);
1037 static const struct vm_operations_struct vm_ops = {
1038 .fault = msm_gem_fault,
1039 .open = drm_gem_vm_open,
1040 .close = drm_gem_vm_close,
1043 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1044 .free = msm_gem_free_object,
1045 .pin = msm_gem_prime_pin,
1046 .unpin = msm_gem_prime_unpin,
1047 .get_sg_table = msm_gem_prime_get_sg_table,
1048 .vmap = msm_gem_prime_vmap,
1049 .vunmap = msm_gem_prime_vunmap,
1053 static int msm_gem_new_impl(struct drm_device *dev,
1054 uint32_t size, uint32_t flags,
1055 struct drm_gem_object **obj)
1057 struct msm_gem_object *msm_obj;
1059 switch (flags & MSM_BO_CACHE_MASK) {
1060 case MSM_BO_UNCACHED:
1065 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1066 (flags & MSM_BO_CACHE_MASK));
1070 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1074 msm_obj->flags = flags;
1075 msm_obj->madv = MSM_MADV_WILLNEED;
1077 INIT_LIST_HEAD(&msm_obj->submit_entry);
1078 INIT_LIST_HEAD(&msm_obj->vmas);
1080 *obj = &msm_obj->base;
1081 (*obj)->funcs = &msm_gem_object_funcs;
1086 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1087 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1089 struct msm_drm_private *priv = dev->dev_private;
1090 struct msm_gem_object *msm_obj;
1091 struct drm_gem_object *obj = NULL;
1092 bool use_vram = false;
1095 size = PAGE_ALIGN(size);
1097 if (!msm_use_mmu(dev))
1099 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1102 if (WARN_ON(use_vram && !priv->vram.size))
1103 return ERR_PTR(-EINVAL);
1105 /* Disallow zero sized objects as they make the underlying
1106 * infrastructure grumpy
1109 return ERR_PTR(-EINVAL);
1111 ret = msm_gem_new_impl(dev, size, flags, &obj);
1115 msm_obj = to_msm_bo(obj);
1118 struct msm_gem_vma *vma;
1119 struct page **pages;
1121 drm_gem_private_object_init(dev, obj, size);
1125 vma = add_vma(obj, NULL);
1126 msm_gem_unlock(obj);
1132 to_msm_bo(obj)->vram_node = &vma->node;
1135 pages = get_pages(obj);
1136 msm_gem_unlock(obj);
1137 if (IS_ERR(pages)) {
1138 ret = PTR_ERR(pages);
1142 vma->iova = physaddr(obj);
1144 ret = drm_gem_object_init(dev, obj, size);
1148 * Our buffers are kept pinned, so allocating them from the
1149 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1150 * See comments above new_inode() why this is required _and_
1151 * expected if you're going to pin these pages.
1153 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1156 mutex_lock(&priv->mm_lock);
1157 /* Initially obj is idle, obj->madv == WILLNEED: */
1158 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
1159 mutex_unlock(&priv->mm_lock);
1164 if (struct_mutex_locked) {
1165 drm_gem_object_put_locked(obj);
1167 drm_gem_object_put(obj);
1169 return ERR_PTR(ret);
1172 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1173 uint32_t size, uint32_t flags)
1175 return _msm_gem_new(dev, size, flags, true);
1178 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1179 uint32_t size, uint32_t flags)
1181 return _msm_gem_new(dev, size, flags, false);
1184 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1185 struct dma_buf *dmabuf, struct sg_table *sgt)
1187 struct msm_drm_private *priv = dev->dev_private;
1188 struct msm_gem_object *msm_obj;
1189 struct drm_gem_object *obj;
1193 /* if we don't have IOMMU, don't bother pretending we can import: */
1194 if (!msm_use_mmu(dev)) {
1195 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1196 return ERR_PTR(-EINVAL);
1199 size = PAGE_ALIGN(dmabuf->size);
1201 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1205 drm_gem_private_object_init(dev, obj, size);
1207 npages = size / PAGE_SIZE;
1209 msm_obj = to_msm_bo(obj);
1212 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1213 if (!msm_obj->pages) {
1214 msm_gem_unlock(obj);
1219 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1221 msm_gem_unlock(obj);
1225 msm_gem_unlock(obj);
1227 mutex_lock(&priv->mm_lock);
1228 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
1229 mutex_unlock(&priv->mm_lock);
1234 drm_gem_object_put(obj);
1235 return ERR_PTR(ret);
1238 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1239 uint32_t flags, struct msm_gem_address_space *aspace,
1240 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1243 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1247 return ERR_CAST(obj);
1250 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1255 vaddr = msm_gem_get_vaddr(obj);
1256 if (IS_ERR(vaddr)) {
1257 msm_gem_unpin_iova(obj, aspace);
1258 ret = PTR_ERR(vaddr);
1268 drm_gem_object_put_locked(obj);
1270 drm_gem_object_put(obj);
1272 return ERR_PTR(ret);
1276 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1277 uint32_t flags, struct msm_gem_address_space *aspace,
1278 struct drm_gem_object **bo, uint64_t *iova)
1280 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1283 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1284 uint32_t flags, struct msm_gem_address_space *aspace,
1285 struct drm_gem_object **bo, uint64_t *iova)
1287 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1290 void msm_gem_kernel_put(struct drm_gem_object *bo,
1291 struct msm_gem_address_space *aspace, bool locked)
1293 if (IS_ERR_OR_NULL(bo))
1296 msm_gem_put_vaddr(bo);
1297 msm_gem_unpin_iova(bo, aspace);
1300 drm_gem_object_put_locked(bo);
1302 drm_gem_object_put(bo);
1305 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1307 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1314 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);