1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
14 #include <drm/drm_prime.h>
17 #include "msm_fence.h"
22 static void update_inactive(struct msm_gem_object *msm_obj);
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
26 struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 struct msm_drm_private *priv = obj->dev->dev_private;
28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 static bool use_pages(struct drm_gem_object *obj)
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 return !msm_obj->vram_node;
39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40 * API. Really GPU cache is out of scope here (handled on cmdstream)
41 * and all we need to do is invalidate newly allocated pages before
42 * mapping to CPU as uncached/writecombine.
44 * On top of this, we have the added headache, that depending on
45 * display generation, the display's iommu may be wired up to either
46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47 * that here we either have dma-direct or iommu ops.
49 * Let this be a cautionary tail of abstraction gone wrong.
52 static void sync_for_device(struct msm_gem_object *msm_obj)
54 struct device *dev = msm_obj->base.dev->dev;
56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
61 struct device *dev = msm_obj->base.dev->dev;
63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
69 struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 struct msm_drm_private *priv = obj->dev->dev_private;
75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
77 return ERR_PTR(-ENOMEM);
79 spin_lock(&priv->vram.lock);
80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 spin_unlock(&priv->vram.lock);
87 paddr = physaddr(obj);
88 for (i = 0; i < npages; i++) {
89 p[i] = pfn_to_page(__phys_to_pfn(paddr));
96 static struct page **get_pages(struct drm_gem_object *obj)
98 struct msm_gem_object *msm_obj = to_msm_bo(obj);
100 GEM_WARN_ON(!msm_gem_is_locked(obj));
102 if (!msm_obj->pages) {
103 struct drm_device *dev = obj->dev;
105 int npages = obj->size >> PAGE_SHIFT;
108 p = drm_gem_get_pages(obj);
110 p = get_pages_vram(obj, npages);
113 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
120 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 if (IS_ERR(msm_obj->sgt)) {
122 void *ptr = ERR_CAST(msm_obj->sgt);
124 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
129 /* For non-cached buffers, ensure the new pages are clean
130 * because display controller, GPU, etc. are not coherent:
132 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
133 sync_for_device(msm_obj);
135 update_inactive(msm_obj);
138 return msm_obj->pages;
141 static void put_pages_vram(struct drm_gem_object *obj)
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
150 kvfree(msm_obj->pages);
153 static void put_pages(struct drm_gem_object *obj)
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157 if (msm_obj->pages) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
164 sync_for_cpu(msm_obj);
166 sg_free_table(msm_obj->sgt);
172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
176 msm_obj->pages = NULL;
180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
189 return ERR_PTR(-EBUSY);
195 msm_obj->pin_count++;
196 update_inactive(msm_obj);
203 void msm_gem_put_pages(struct drm_gem_object *obj)
205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
208 msm_obj->pin_count--;
209 GEM_WARN_ON(msm_obj->pin_count < 0);
210 update_inactive(msm_obj);
214 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
216 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
217 return pgprot_writecombine(prot);
221 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
223 struct vm_area_struct *vma = vmf->vma;
224 struct drm_gem_object *obj = vma->vm_private_data;
225 struct msm_gem_object *msm_obj = to_msm_bo(obj);
233 * vm_ops.open/drm_gem_mmap_obj and close get and put
234 * a reference on obj. So, we dont need to hold one here.
236 err = msm_gem_lock_interruptible(obj);
238 ret = VM_FAULT_NOPAGE;
242 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
244 return VM_FAULT_SIGBUS;
247 /* make sure we have pages attached now */
248 pages = get_pages(obj);
250 ret = vmf_error(PTR_ERR(pages));
254 /* We don't use vmf->pgoff since that has the fake offset: */
255 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
257 pfn = page_to_pfn(pages[pgoff]);
259 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
260 pfn, pfn << PAGE_SHIFT);
262 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
269 /** get mmap offset */
270 static uint64_t mmap_offset(struct drm_gem_object *obj)
272 struct drm_device *dev = obj->dev;
275 GEM_WARN_ON(!msm_gem_is_locked(obj));
277 /* Make it mmapable */
278 ret = drm_gem_create_mmap_offset(obj);
281 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
285 return drm_vma_node_offset_addr(&obj->vma_node);
288 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
293 offset = mmap_offset(obj);
298 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
299 struct msm_gem_address_space *aspace)
301 struct msm_gem_object *msm_obj = to_msm_bo(obj);
302 struct msm_gem_vma *vma;
304 GEM_WARN_ON(!msm_gem_is_locked(obj));
306 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
308 return ERR_PTR(-ENOMEM);
310 vma->aspace = aspace;
312 list_add_tail(&vma->list, &msm_obj->vmas);
317 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
318 struct msm_gem_address_space *aspace)
320 struct msm_gem_object *msm_obj = to_msm_bo(obj);
321 struct msm_gem_vma *vma;
323 GEM_WARN_ON(!msm_gem_is_locked(obj));
325 list_for_each_entry(vma, &msm_obj->vmas, list) {
326 if (vma->aspace == aspace)
333 static void del_vma(struct msm_gem_vma *vma)
338 list_del(&vma->list);
343 * If close is true, this also closes the VMA (releasing the allocated
344 * iova range) in addition to removing the iommu mapping. In the eviction
345 * case (!close), we keep the iova allocated, but only remove the iommu
349 put_iova_spaces(struct drm_gem_object *obj, bool close)
351 struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 struct msm_gem_vma *vma;
354 GEM_WARN_ON(!msm_gem_is_locked(obj));
356 list_for_each_entry(vma, &msm_obj->vmas, list) {
358 msm_gem_purge_vma(vma->aspace, vma);
360 msm_gem_close_vma(vma->aspace, vma);
365 /* Called with msm_obj locked */
367 put_iova_vmas(struct drm_gem_object *obj)
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 struct msm_gem_vma *vma, *tmp;
372 GEM_WARN_ON(!msm_gem_is_locked(obj));
374 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
379 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
380 struct msm_gem_address_space *aspace,
381 u64 range_start, u64 range_end)
383 struct msm_gem_vma *vma;
385 GEM_WARN_ON(!msm_gem_is_locked(obj));
387 vma = lookup_vma(obj, aspace);
392 vma = add_vma(obj, aspace);
396 ret = msm_gem_init_vma(aspace, vma, obj->size,
397 range_start, range_end);
403 GEM_WARN_ON(vma->iova < range_start);
404 GEM_WARN_ON((vma->iova + obj->size) > range_end);
410 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
412 struct msm_gem_object *msm_obj = to_msm_bo(obj);
414 int ret, prot = IOMMU_READ;
416 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
419 if (msm_obj->flags & MSM_BO_MAP_PRIV)
422 if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
425 GEM_WARN_ON(!msm_gem_is_locked(obj));
427 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
430 pages = get_pages(obj);
432 return PTR_ERR(pages);
434 ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
437 msm_obj->pin_count++;
442 void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
444 struct msm_gem_object *msm_obj = to_msm_bo(obj);
446 GEM_WARN_ON(!msm_gem_is_locked(obj));
448 msm_gem_unpin_vma(vma);
450 msm_obj->pin_count--;
451 GEM_WARN_ON(msm_obj->pin_count < 0);
453 update_inactive(msm_obj);
456 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
457 struct msm_gem_address_space *aspace)
459 return get_vma_locked(obj, aspace, 0, U64_MAX);
462 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
463 struct msm_gem_address_space *aspace, uint64_t *iova,
464 u64 range_start, u64 range_end)
466 struct msm_gem_vma *vma;
469 GEM_WARN_ON(!msm_gem_is_locked(obj));
471 vma = get_vma_locked(obj, aspace, range_start, range_end);
475 ret = msm_gem_pin_vma_locked(obj, vma);
483 * get iova and pin it. Should have a matching put
484 * limits iova to specified range (in pages)
486 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
487 struct msm_gem_address_space *aspace, uint64_t *iova,
488 u64 range_start, u64 range_end)
493 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
499 /* get iova and pin it. Should have a matching put */
500 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
501 struct msm_gem_address_space *aspace, uint64_t *iova)
503 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
507 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
508 * valid for the life of the object
510 int msm_gem_get_iova(struct drm_gem_object *obj,
511 struct msm_gem_address_space *aspace, uint64_t *iova)
513 struct msm_gem_vma *vma;
517 vma = get_vma_locked(obj, aspace, 0, U64_MAX);
528 static int clear_iova(struct drm_gem_object *obj,
529 struct msm_gem_address_space *aspace)
531 struct msm_gem_vma *vma = lookup_vma(obj, aspace);
536 if (msm_gem_vma_inuse(vma))
539 msm_gem_purge_vma(vma->aspace, vma);
540 msm_gem_close_vma(vma->aspace, vma);
547 * Get the requested iova but don't pin it. Fails if the requested iova is
548 * not available. Doesn't need a put because iovas are currently valid for
549 * the life of the object.
551 * Setting an iova of zero will clear the vma.
553 int msm_gem_set_iova(struct drm_gem_object *obj,
554 struct msm_gem_address_space *aspace, uint64_t iova)
560 ret = clear_iova(obj, aspace);
562 struct msm_gem_vma *vma;
563 vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
566 } else if (GEM_WARN_ON(vma->iova != iova)) {
567 clear_iova(obj, aspace);
577 * Unpin a iova by updating the reference counts. The memory isn't actually
578 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
581 void msm_gem_unpin_iova(struct drm_gem_object *obj,
582 struct msm_gem_address_space *aspace)
584 struct msm_gem_vma *vma;
587 vma = lookup_vma(obj, aspace);
588 if (!GEM_WARN_ON(!vma)) {
589 msm_gem_unpin_vma_locked(obj, vma);
594 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
595 struct drm_mode_create_dumb *args)
597 args->pitch = align_pitch(args->width, args->bpp);
598 args->size = PAGE_ALIGN(args->pitch * args->height);
599 return msm_gem_new_handle(dev, file, args->size,
600 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
603 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
604 uint32_t handle, uint64_t *offset)
606 struct drm_gem_object *obj;
609 /* GEM does all our handle to object mapping */
610 obj = drm_gem_object_lookup(file, handle);
616 *offset = msm_gem_mmap_offset(obj);
618 drm_gem_object_put(obj);
624 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
626 struct msm_gem_object *msm_obj = to_msm_bo(obj);
629 GEM_WARN_ON(!msm_gem_is_locked(obj));
631 if (obj->import_attach)
632 return ERR_PTR(-ENODEV);
634 if (GEM_WARN_ON(msm_obj->madv > madv)) {
635 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
636 msm_obj->madv, madv);
637 return ERR_PTR(-EBUSY);
640 /* increment vmap_count *before* vmap() call, so shrinker can
641 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
642 * This guarantees that we won't try to msm_gem_vunmap() this
643 * same object from within the vmap() call (while we already
646 msm_obj->vmap_count++;
648 if (!msm_obj->vaddr) {
649 struct page **pages = get_pages(obj);
651 ret = PTR_ERR(pages);
654 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
655 VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
656 if (msm_obj->vaddr == NULL) {
661 update_inactive(msm_obj);
664 return msm_obj->vaddr;
667 msm_obj->vmap_count--;
671 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
673 return get_vaddr(obj, MSM_MADV_WILLNEED);
676 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
681 ret = msm_gem_get_vaddr_locked(obj);
688 * Don't use this! It is for the very special case of dumping
689 * submits from GPU hangs or faults, were the bo may already
690 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
693 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
695 return get_vaddr(obj, __MSM_MADV_PURGED);
698 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
700 struct msm_gem_object *msm_obj = to_msm_bo(obj);
702 GEM_WARN_ON(!msm_gem_is_locked(obj));
703 GEM_WARN_ON(msm_obj->vmap_count < 1);
705 msm_obj->vmap_count--;
708 void msm_gem_put_vaddr(struct drm_gem_object *obj)
711 msm_gem_put_vaddr_locked(obj);
715 /* Update madvise status, returns true if not purged, else
718 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
720 struct msm_gem_object *msm_obj = to_msm_bo(obj);
724 if (msm_obj->madv != __MSM_MADV_PURGED)
725 msm_obj->madv = madv;
727 madv = msm_obj->madv;
729 /* If the obj is inactive, we might need to move it
730 * between inactive lists
732 if (msm_obj->active_count == 0)
733 update_inactive(msm_obj);
737 return (madv != __MSM_MADV_PURGED);
740 void msm_gem_purge(struct drm_gem_object *obj)
742 struct drm_device *dev = obj->dev;
743 struct msm_gem_object *msm_obj = to_msm_bo(obj);
745 GEM_WARN_ON(!msm_gem_is_locked(obj));
746 GEM_WARN_ON(!is_purgeable(msm_obj));
748 /* Get rid of any iommu mapping(s): */
749 put_iova_spaces(obj, true);
753 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
759 msm_obj->madv = __MSM_MADV_PURGED;
760 update_inactive(msm_obj);
762 drm_gem_free_mmap_offset(obj);
764 /* Our goal here is to return as much of the memory as
765 * is possible back to the system as we are called from OOM.
766 * To do this we must instruct the shmfs to drop all of its
767 * backing pages, *now*.
769 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
771 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
776 * Unpin the backing pages and make them available to be swapped out.
778 void msm_gem_evict(struct drm_gem_object *obj)
780 struct drm_device *dev = obj->dev;
781 struct msm_gem_object *msm_obj = to_msm_bo(obj);
783 GEM_WARN_ON(!msm_gem_is_locked(obj));
784 GEM_WARN_ON(is_unevictable(msm_obj));
785 GEM_WARN_ON(!msm_obj->evictable);
786 GEM_WARN_ON(msm_obj->active_count);
788 /* Get rid of any iommu mapping(s): */
789 put_iova_spaces(obj, false);
791 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
795 update_inactive(msm_obj);
798 void msm_gem_vunmap(struct drm_gem_object *obj)
800 struct msm_gem_object *msm_obj = to_msm_bo(obj);
802 GEM_WARN_ON(!msm_gem_is_locked(obj));
804 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
807 vunmap(msm_obj->vaddr);
808 msm_obj->vaddr = NULL;
811 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
813 struct msm_gem_object *msm_obj = to_msm_bo(obj);
814 struct msm_drm_private *priv = obj->dev->dev_private;
817 GEM_WARN_ON(!msm_gem_is_locked(obj));
818 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
819 GEM_WARN_ON(msm_obj->dontneed);
821 if (msm_obj->active_count++ == 0) {
822 mutex_lock(&priv->mm_lock);
823 if (msm_obj->evictable)
824 mark_unevictable(msm_obj);
825 list_move_tail(&msm_obj->mm_list, &gpu->active_list);
826 mutex_unlock(&priv->mm_lock);
830 void msm_gem_active_put(struct drm_gem_object *obj)
832 struct msm_gem_object *msm_obj = to_msm_bo(obj);
835 GEM_WARN_ON(!msm_gem_is_locked(obj));
837 if (--msm_obj->active_count == 0) {
838 update_inactive(msm_obj);
842 static void update_inactive(struct msm_gem_object *msm_obj)
844 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
846 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
848 if (msm_obj->active_count != 0)
851 mutex_lock(&priv->mm_lock);
853 if (msm_obj->dontneed)
854 mark_unpurgeable(msm_obj);
855 if (msm_obj->evictable)
856 mark_unevictable(msm_obj);
858 list_del(&msm_obj->mm_list);
859 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
860 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
861 mark_evictable(msm_obj);
862 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
863 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
864 mark_purgeable(msm_obj);
866 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
867 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
870 mutex_unlock(&priv->mm_lock);
873 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
875 bool write = !!(op & MSM_PREP_WRITE);
876 unsigned long remain =
877 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
880 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
883 return remain == 0 ? -EBUSY : -ETIMEDOUT;
887 /* TODO cache maintenance */
892 int msm_gem_cpu_fini(struct drm_gem_object *obj)
894 /* TODO cache maintenance */
898 #ifdef CONFIG_DEBUG_FS
899 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
900 struct msm_gem_stats *stats)
902 struct msm_gem_object *msm_obj = to_msm_bo(obj);
903 struct dma_resv *robj = obj->resv;
904 struct msm_gem_vma *vma;
905 uint64_t off = drm_vma_node_start(&obj->vma_node);
911 stats->all.size += obj->size;
913 if (is_active(msm_obj)) {
914 stats->active.count++;
915 stats->active.size += obj->size;
918 if (msm_obj->pages) {
919 stats->resident.count++;
920 stats->resident.size += obj->size;
923 switch (msm_obj->madv) {
924 case __MSM_MADV_PURGED:
925 stats->purged.count++;
926 stats->purged.size += obj->size;
929 case MSM_MADV_DONTNEED:
930 stats->purgeable.count++;
931 stats->purgeable.size += obj->size;
934 case MSM_MADV_WILLNEED:
940 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
941 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
942 obj->name, kref_read(&obj->refcount),
943 off, msm_obj->vaddr);
945 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
947 if (!list_empty(&msm_obj->vmas)) {
949 seq_puts(m, " vmas:");
951 list_for_each_entry(vma, &msm_obj->vmas, list) {
952 const char *name, *comm;
954 struct msm_gem_address_space *aspace = vma->aspace;
955 struct task_struct *task =
956 get_pid_task(aspace->pid, PIDTYPE_PID);
958 comm = kstrdup(task->comm, GFP_KERNEL);
959 put_task_struct(task);
967 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
968 name, comm ? ":" : "", comm ? comm : "",
969 vma->aspace, vma->iova,
970 vma->mapped ? "mapped" : "unmapped",
971 msm_gem_vma_inuse(vma));
978 dma_resv_describe(robj, m);
982 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
984 struct msm_gem_stats stats = {};
985 struct msm_gem_object *msm_obj;
987 seq_puts(m, " flags id ref offset kaddr size madv name\n");
988 list_for_each_entry(msm_obj, list, node) {
989 struct drm_gem_object *obj = &msm_obj->base;
991 msm_gem_describe(obj, m, &stats);
994 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
995 stats.all.count, stats.all.size);
996 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
997 stats.active.count, stats.active.size);
998 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
999 stats.resident.count, stats.resident.size);
1000 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1001 stats.purgeable.count, stats.purgeable.size);
1002 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1003 stats.purged.count, stats.purged.size);
1007 /* don't call directly! Use drm_gem_object_put() */
1008 void msm_gem_free_object(struct drm_gem_object *obj)
1010 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1011 struct drm_device *dev = obj->dev;
1012 struct msm_drm_private *priv = dev->dev_private;
1014 mutex_lock(&priv->obj_lock);
1015 list_del(&msm_obj->node);
1016 mutex_unlock(&priv->obj_lock);
1018 mutex_lock(&priv->mm_lock);
1019 if (msm_obj->dontneed)
1020 mark_unpurgeable(msm_obj);
1021 list_del(&msm_obj->mm_list);
1022 mutex_unlock(&priv->mm_lock);
1026 /* object should not be on active list: */
1027 GEM_WARN_ON(is_active(msm_obj));
1029 put_iova_spaces(obj, true);
1031 if (obj->import_attach) {
1032 GEM_WARN_ON(msm_obj->vaddr);
1034 /* Don't drop the pages for imported dmabuf, as they are not
1035 * ours, just free the array we allocated:
1037 kvfree(msm_obj->pages);
1041 /* dma_buf_detach() grabs resv lock, so we need to unlock
1042 * prior to drm_prime_gem_destroy
1044 msm_gem_unlock(obj);
1046 drm_prime_gem_destroy(obj, msm_obj->sgt);
1048 msm_gem_vunmap(obj);
1051 msm_gem_unlock(obj);
1054 drm_gem_object_release(obj);
1059 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1061 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1063 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
1064 vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1069 /* convenience method to construct a GEM buffer object, and userspace handle */
1070 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1071 uint32_t size, uint32_t flags, uint32_t *handle,
1074 struct drm_gem_object *obj;
1077 obj = msm_gem_new(dev, size, flags);
1080 return PTR_ERR(obj);
1083 msm_gem_object_set_name(obj, "%s", name);
1085 ret = drm_gem_handle_create(file, obj, handle);
1087 /* drop reference from allocate - handle holds it now */
1088 drm_gem_object_put(obj);
1093 static const struct vm_operations_struct vm_ops = {
1094 .fault = msm_gem_fault,
1095 .open = drm_gem_vm_open,
1096 .close = drm_gem_vm_close,
1099 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1100 .free = msm_gem_free_object,
1101 .pin = msm_gem_prime_pin,
1102 .unpin = msm_gem_prime_unpin,
1103 .get_sg_table = msm_gem_prime_get_sg_table,
1104 .vmap = msm_gem_prime_vmap,
1105 .vunmap = msm_gem_prime_vunmap,
1106 .mmap = msm_gem_object_mmap,
1110 static int msm_gem_new_impl(struct drm_device *dev,
1111 uint32_t size, uint32_t flags,
1112 struct drm_gem_object **obj)
1114 struct msm_drm_private *priv = dev->dev_private;
1115 struct msm_gem_object *msm_obj;
1117 switch (flags & MSM_BO_CACHE_MASK) {
1118 case MSM_BO_UNCACHED:
1122 case MSM_BO_CACHED_COHERENT:
1123 if (priv->has_cached_coherent)
1127 DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1128 (flags & MSM_BO_CACHE_MASK));
1132 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1136 msm_obj->flags = flags;
1137 msm_obj->madv = MSM_MADV_WILLNEED;
1139 INIT_LIST_HEAD(&msm_obj->node);
1140 INIT_LIST_HEAD(&msm_obj->vmas);
1142 *obj = &msm_obj->base;
1143 (*obj)->funcs = &msm_gem_object_funcs;
1148 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1150 struct msm_drm_private *priv = dev->dev_private;
1151 struct msm_gem_object *msm_obj;
1152 struct drm_gem_object *obj = NULL;
1153 bool use_vram = false;
1156 size = PAGE_ALIGN(size);
1158 if (!msm_use_mmu(dev))
1160 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1163 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1164 return ERR_PTR(-EINVAL);
1166 /* Disallow zero sized objects as they make the underlying
1167 * infrastructure grumpy
1170 return ERR_PTR(-EINVAL);
1172 ret = msm_gem_new_impl(dev, size, flags, &obj);
1174 return ERR_PTR(ret);
1176 msm_obj = to_msm_bo(obj);
1179 struct msm_gem_vma *vma;
1180 struct page **pages;
1182 drm_gem_private_object_init(dev, obj, size);
1186 vma = add_vma(obj, NULL);
1187 msm_gem_unlock(obj);
1193 to_msm_bo(obj)->vram_node = &vma->node;
1195 /* Call chain get_pages() -> update_inactive() tries to
1196 * access msm_obj->mm_list, but it is not initialized yet.
1197 * To avoid NULL pointer dereference error, initialize
1198 * mm_list to be empty.
1200 INIT_LIST_HEAD(&msm_obj->mm_list);
1203 pages = get_pages(obj);
1204 msm_gem_unlock(obj);
1205 if (IS_ERR(pages)) {
1206 ret = PTR_ERR(pages);
1210 vma->iova = physaddr(obj);
1212 ret = drm_gem_object_init(dev, obj, size);
1216 * Our buffers are kept pinned, so allocating them from the
1217 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1218 * See comments above new_inode() why this is required _and_
1219 * expected if you're going to pin these pages.
1221 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1224 mutex_lock(&priv->mm_lock);
1225 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1226 mutex_unlock(&priv->mm_lock);
1228 mutex_lock(&priv->obj_lock);
1229 list_add_tail(&msm_obj->node, &priv->objects);
1230 mutex_unlock(&priv->obj_lock);
1235 drm_gem_object_put(obj);
1236 return ERR_PTR(ret);
1239 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1240 struct dma_buf *dmabuf, struct sg_table *sgt)
1242 struct msm_drm_private *priv = dev->dev_private;
1243 struct msm_gem_object *msm_obj;
1244 struct drm_gem_object *obj;
1248 /* if we don't have IOMMU, don't bother pretending we can import: */
1249 if (!msm_use_mmu(dev)) {
1250 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1251 return ERR_PTR(-EINVAL);
1254 size = PAGE_ALIGN(dmabuf->size);
1256 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1258 return ERR_PTR(ret);
1260 drm_gem_private_object_init(dev, obj, size);
1262 npages = size / PAGE_SIZE;
1264 msm_obj = to_msm_bo(obj);
1267 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1268 if (!msm_obj->pages) {
1269 msm_gem_unlock(obj);
1274 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1276 msm_gem_unlock(obj);
1280 msm_gem_unlock(obj);
1282 mutex_lock(&priv->mm_lock);
1283 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1284 mutex_unlock(&priv->mm_lock);
1286 mutex_lock(&priv->obj_lock);
1287 list_add_tail(&msm_obj->node, &priv->objects);
1288 mutex_unlock(&priv->obj_lock);
1293 drm_gem_object_put(obj);
1294 return ERR_PTR(ret);
1297 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1298 uint32_t flags, struct msm_gem_address_space *aspace,
1299 struct drm_gem_object **bo, uint64_t *iova)
1302 struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1306 return ERR_CAST(obj);
1309 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1314 vaddr = msm_gem_get_vaddr(obj);
1315 if (IS_ERR(vaddr)) {
1316 msm_gem_unpin_iova(obj, aspace);
1317 ret = PTR_ERR(vaddr);
1326 drm_gem_object_put(obj);
1328 return ERR_PTR(ret);
1332 void msm_gem_kernel_put(struct drm_gem_object *bo,
1333 struct msm_gem_address_space *aspace)
1335 if (IS_ERR_OR_NULL(bo))
1338 msm_gem_put_vaddr(bo);
1339 msm_gem_unpin_iova(bo, aspace);
1340 drm_gem_object_put(bo);
1343 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1345 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1352 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);