1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
13 msm_gem_address_space_destroy(struct kref *kref)
15 struct msm_gem_address_space *aspace = container_of(kref,
16 struct msm_gem_address_space, kref);
18 drm_mm_takedown(&aspace->mm);
20 aspace->mmu->funcs->destroy(aspace->mmu);
26 void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
29 kref_put(&aspace->kref, msm_gem_address_space_destroy);
32 struct msm_gem_address_space *
33 msm_gem_address_space_get(struct msm_gem_address_space *aspace)
35 if (!IS_ERR_OR_NULL(aspace))
36 kref_get(&aspace->kref);
41 bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
46 while (vma->fence_mask) {
47 unsigned idx = ffs(vma->fence_mask) - 1;
49 if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
52 vma->fence_mask &= ~BIT(idx);
58 /* Actually unmap memory for the vma */
59 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
60 struct msm_gem_vma *vma)
62 unsigned size = vma->node.size;
64 /* Print a message if we try to purge a vma in use */
65 GEM_WARN_ON(msm_gem_vma_inuse(vma));
67 /* Don't do anything if the memory isn't mapped */
72 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
77 /* Remove reference counts for the mapping */
78 void msm_gem_unpin_vma(struct msm_gem_vma *vma)
80 if (GEM_WARN_ON(!vma->inuse))
82 if (!GEM_WARN_ON(!vma->iova))
86 /* Replace pin reference with fence: */
87 void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
89 vma->fctx[fctx->index] = fctx;
90 vma->fence[fctx->index] = fctx->last_fence;
91 vma->fence_mask |= BIT(fctx->index);
92 msm_gem_unpin_vma(vma);
95 /* Map and pin vma: */
97 msm_gem_map_vma(struct msm_gem_address_space *aspace,
98 struct msm_gem_vma *vma, int prot,
99 struct sg_table *sgt, int size)
103 if (GEM_WARN_ON(!vma->iova))
106 /* Increase the usage counter */
114 if (aspace && aspace->mmu)
115 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
126 /* Close an iova. Warn if it is still in use */
127 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
128 struct msm_gem_vma *vma)
130 GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
132 spin_lock(&aspace->lock);
134 drm_mm_remove_node(&vma->node);
135 spin_unlock(&aspace->lock);
139 msm_gem_address_space_put(aspace);
142 /* Initialize a new vma and allocate an iova for it */
143 int msm_gem_init_vma(struct msm_gem_address_space *aspace,
144 struct msm_gem_vma *vma, int size,
145 u64 range_start, u64 range_end)
149 if (GEM_WARN_ON(vma->iova))
152 spin_lock(&aspace->lock);
153 ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
155 range_start, range_end, 0);
156 spin_unlock(&aspace->lock);
161 vma->iova = vma->node.start;
164 kref_get(&aspace->kref);
169 struct msm_gem_address_space *
170 msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
171 u64 va_start, u64 size)
173 struct msm_gem_address_space *aspace;
176 return ERR_CAST(mmu);
178 aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
180 return ERR_PTR(-ENOMEM);
182 spin_lock_init(&aspace->lock);
185 aspace->va_start = va_start;
186 aspace->va_size = size;
188 drm_mm_init(&aspace->mm, va_start, size);
190 kref_init(&aspace->kref);