1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
14 struct iommu_domain *domain;
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
20 struct msm_iommu_pagetable {
22 struct msm_mmu *parent;
23 struct io_pgtable_ops *pgtbl_ops;
27 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
29 return container_of(mmu, struct msm_iommu_pagetable, base);
32 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
35 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
36 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
39 /* Unmap the block one page at a time */
41 unmapped += ops->unmap(ops, iova, 4096, NULL);
46 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
48 return (unmapped == size) ? 0 : -EINVAL;
51 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
52 struct sg_table *sgt, size_t len, int prot)
54 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
55 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
56 struct scatterlist *sg;
61 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
62 size_t size = sg->length;
63 phys_addr_t phys = sg_phys(sg);
65 /* Map the block one page at a time */
67 if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
68 msm_iommu_pagetable_unmap(mmu, iova, mapped);
82 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
84 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
85 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
86 struct adreno_smmu_priv *adreno_smmu =
87 dev_get_drvdata(pagetable->parent->dev);
90 * If this is the last attached pagetable for the parent,
91 * disable TTBR0 in the arm-smmu driver
93 if (atomic_dec_return(&iommu->pagetables) == 0)
94 adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
96 free_io_pgtable_ops(pagetable->pgtbl_ops);
100 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101 phys_addr_t *ttbr, int *asid)
103 struct msm_iommu_pagetable *pagetable;
105 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
108 pagetable = to_pagetable(mmu);
111 *ttbr = pagetable->ttbr;
114 *asid = pagetable->asid;
119 static const struct msm_mmu_funcs pagetable_funcs = {
120 .map = msm_iommu_pagetable_map,
121 .unmap = msm_iommu_pagetable_unmap,
122 .destroy = msm_iommu_pagetable_destroy,
125 static void msm_iommu_tlb_flush_all(void *cookie)
129 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130 size_t granule, void *cookie)
134 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135 unsigned long iova, size_t granule, void *cookie)
139 static const struct iommu_flush_ops null_tlb_ops = {
140 .tlb_flush_all = msm_iommu_tlb_flush_all,
141 .tlb_flush_walk = msm_iommu_tlb_flush_walk,
142 .tlb_add_page = msm_iommu_tlb_add_page,
145 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
146 unsigned long iova, int flags, void *arg);
148 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
150 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
151 struct msm_iommu *iommu = to_msm_iommu(parent);
152 struct msm_iommu_pagetable *pagetable;
153 const struct io_pgtable_cfg *ttbr1_cfg = NULL;
154 struct io_pgtable_cfg ttbr0_cfg;
157 /* Get the pagetable configuration from the domain */
158 if (adreno_smmu->cookie)
159 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
161 return ERR_PTR(-ENODEV);
164 * Defer setting the fault handler until we have a valid adreno_smmu
165 * to avoid accidentially installing a GPU specific fault handler for
166 * the display's iommu
168 iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
170 pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
172 return ERR_PTR(-ENOMEM);
174 msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
175 MSM_MMU_IOMMU_PAGETABLE);
177 /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
178 ttbr0_cfg = *ttbr1_cfg;
180 /* The incoming cfg will have the TTBR1 quirk enabled */
181 ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
182 ttbr0_cfg.tlb = &null_tlb_ops;
184 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
185 &ttbr0_cfg, iommu->domain);
187 if (!pagetable->pgtbl_ops) {
189 return ERR_PTR(-ENOMEM);
193 * If this is the first pagetable that we've allocated, send it back to
194 * the arm-smmu driver as a trigger to set up TTBR0
196 if (atomic_inc_return(&iommu->pagetables) == 1) {
197 /* Enable stall on iommu fault: */
198 adreno_smmu->set_stall(adreno_smmu->cookie, true);
200 ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
202 free_io_pgtable_ops(pagetable->pgtbl_ops);
208 /* Needed later for TLB flush */
209 pagetable->parent = parent;
210 pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
213 * TODO we would like each set of page tables to have a unique ASID
214 * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
215 * end up flushing the ASID used for TTBR1 pagetables, which is not
216 * what we want. So for now just use the same ASID as TTBR1.
220 return &pagetable->base;
223 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
224 unsigned long iova, int flags, void *arg)
226 struct msm_iommu *iommu = arg;
227 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
228 struct adreno_smmu_fault_info info, *ptr = NULL;
230 if (adreno_smmu->get_fault_info) {
231 adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
235 if (iommu->base.handler)
236 return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
238 pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
242 static void msm_iommu_resume_translation(struct msm_mmu *mmu)
244 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
246 adreno_smmu->resume_translation(adreno_smmu->cookie, true);
249 static void msm_iommu_detach(struct msm_mmu *mmu)
251 struct msm_iommu *iommu = to_msm_iommu(mmu);
253 iommu_detach_device(iommu->domain, mmu->dev);
256 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
257 struct sg_table *sgt, size_t len, int prot)
259 struct msm_iommu *iommu = to_msm_iommu(mmu);
262 /* The arm-smmu driver expects the addresses to be sign extended */
263 if (iova & BIT_ULL(48))
264 iova |= GENMASK_ULL(63, 49);
266 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
269 return (ret == len) ? 0 : -EINVAL;
272 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
274 struct msm_iommu *iommu = to_msm_iommu(mmu);
276 if (iova & BIT_ULL(48))
277 iova |= GENMASK_ULL(63, 49);
279 iommu_unmap(iommu->domain, iova, len);
284 static void msm_iommu_destroy(struct msm_mmu *mmu)
286 struct msm_iommu *iommu = to_msm_iommu(mmu);
287 iommu_domain_free(iommu->domain);
291 static const struct msm_mmu_funcs funcs = {
292 .detach = msm_iommu_detach,
293 .map = msm_iommu_map,
294 .unmap = msm_iommu_unmap,
295 .destroy = msm_iommu_destroy,
296 .resume_translation = msm_iommu_resume_translation,
299 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
301 struct msm_iommu *iommu;
305 return ERR_PTR(-ENODEV);
307 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
309 return ERR_PTR(-ENOMEM);
311 iommu->domain = domain;
312 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
314 atomic_set(&iommu->pagetables, 0);
316 ret = iommu_attach_device(iommu->domain, dev);