1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
12 msm_gem_address_space_destroy(struct kref *kref)
14 struct msm_gem_address_space *aspace = container_of(kref,
15 struct msm_gem_address_space, kref);
17 drm_mm_takedown(&aspace->mm);
19 aspace->mmu->funcs->destroy(aspace->mmu);
25 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
28 kref_put(&aspace->kref, msm_gem_address_space_destroy);
31 struct msm_gem_address_space *
32 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
34 if (!IS_ERR_OR_NULL(aspace))
35 kref_get(&aspace->kref);
40 /* Actually unmap memory for the vma */
41 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
42 struct msm_gem_vma *vma)
44 unsigned size = vma->node.size << PAGE_SHIFT;
46 /* Print a message if we try to purge a vma in use */
47 if (WARN_ON(vma->inuse > 0))
50 /* Don't do anything if the memory isn't mapped */
55 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
60 /* Remove reference counts for the mapping */
61 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
62 struct msm_gem_vma *vma)
64 if (!WARN_ON(!vma->iova))
69 msm_gem_map_vma(struct msm_gem_address_space *aspace,
70 struct msm_gem_vma *vma, int prot,
71 struct sg_table *sgt, int npages)
73 unsigned size = npages << PAGE_SHIFT;
76 if (WARN_ON(!vma->iova))
79 /* Increase the usage counter */
87 if (aspace && aspace->mmu)
88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
99 /* Close an iova. Warn if it is still in use */
100 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
101 struct msm_gem_vma *vma)
103 if (WARN_ON(vma->inuse > 0 || vma->mapped))
106 spin_lock(&aspace->lock);
108 drm_mm_remove_node(&vma->node);
109 spin_unlock(&aspace->lock);
113 msm_gem_address_space_put(aspace);
116 /* Initialize a new vma and allocate an iova for it */
117 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
118 struct msm_gem_vma *vma, int npages,
119 u64 range_start, u64 range_end)
123 if (WARN_ON(vma->iova))
126 spin_lock(&aspace->lock);
127 ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
128 0, range_start, range_end, 0);
129 spin_unlock(&aspace->lock);
134 vma->iova = vma->node.start << PAGE_SHIFT;
137 kref_get(&aspace->kref);
142 struct msm_gem_address_space *
143 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
144 u64 va_start, u64 size)
146 struct msm_gem_address_space *aspace;
149 return ERR_CAST(mmu);
151 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
153 return ERR_PTR(-ENOMEM);
155 spin_lock_init(&aspace->lock);
159 drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
161 kref_init(&aspace->kref);