]>
Commit | Line | Data |
---|---|---|
b17336c5 HZ |
1 | /* |
2 | * Copyright (c) 2015-2016 MediaTek Inc. | |
3 | * Author: Honghui Zhang <[email protected]> | |
4 | * | |
5 | * Based on driver/iommu/mtk_iommu.c | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | #include <linux/bootmem.h> | |
17 | #include <linux/bug.h> | |
18 | #include <linux/clk.h> | |
19 | #include <linux/component.h> | |
20 | #include <linux/device.h> | |
21 | #include <linux/dma-iommu.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/io.h> | |
25 | #include <linux/iommu.h> | |
26 | #include <linux/iopoll.h> | |
27 | #include <linux/kmemleak.h> | |
28 | #include <linux/list.h> | |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_iommu.h> | |
31 | #include <linux/of_irq.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <linux/platform_device.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/spinlock.h> | |
36 | #include <asm/barrier.h> | |
37 | #include <asm/dma-iommu.h> | |
38 | #include <linux/module.h> | |
39 | #include <dt-bindings/memory/mt2701-larb-port.h> | |
40 | #include <soc/mediatek/smi.h> | |
41 | #include "mtk_iommu.h" | |
42 | ||
43 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
44 | ||
45 | #define F_ALL_INVLD 0x2 | |
46 | #define F_MMU_INV_RANGE 0x1 | |
47 | #define F_INVLD_EN0 BIT(0) | |
48 | #define F_INVLD_EN1 BIT(1) | |
49 | ||
50 | #define F_MMU_FAULT_VA_MSK 0xfffff000 | |
51 | #define MTK_PROTECT_PA_ALIGN 128 | |
52 | ||
53 | #define REG_MMU_CTRL_REG 0x210 | |
54 | #define F_MMU_CTRL_COHERENT_EN BIT(8) | |
55 | #define REG_MMU_IVRP_PADDR 0x214 | |
56 | #define REG_MMU_INT_CONTROL 0x220 | |
57 | #define F_INT_TRANSLATION_FAULT BIT(0) | |
58 | #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) | |
59 | #define F_INT_INVALID_PA_FAULT BIT(2) | |
60 | #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) | |
61 | #define F_INT_TABLE_WALK_FAULT BIT(4) | |
62 | #define F_INT_TLB_MISS_FAULT BIT(5) | |
63 | #define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6) | |
64 | #define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7) | |
65 | ||
66 | #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5) | |
67 | #define F_INT_CLR_BIT BIT(12) | |
68 | ||
69 | #define REG_MMU_FAULT_ST 0x224 | |
70 | #define REG_MMU_FAULT_VA 0x228 | |
71 | #define REG_MMU_INVLD_PA 0x22C | |
72 | #define REG_MMU_INT_ID 0x388 | |
73 | #define REG_MMU_INVALIDATE 0x5c0 | |
74 | #define REG_MMU_INVLD_START_A 0x5c4 | |
75 | #define REG_MMU_INVLD_END_A 0x5c8 | |
76 | ||
77 | #define REG_MMU_INV_SEL 0x5d8 | |
78 | #define REG_MMU_STANDARD_AXI_MODE 0x5e8 | |
79 | ||
80 | #define REG_MMU_DCM 0x5f0 | |
81 | #define F_MMU_DCM_ON BIT(1) | |
82 | #define REG_MMU_CPE_DONE 0x60c | |
83 | #define F_DESC_VALID 0x2 | |
84 | #define F_DESC_NONSEC BIT(3) | |
85 | #define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7)) | |
86 | #define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF) | |
87 | /* MTK generation one iommu HW only support 4K size mapping */ | |
88 | #define MT2701_IOMMU_PAGE_SHIFT 12 | |
89 | #define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) | |
90 | ||
91 | /* | |
92 | * MTK m4u support 4GB iova address space, and only support 4K page | |
93 | * mapping. So the pagetable size should be exactly as 4M. | |
94 | */ | |
95 | #define M2701_IOMMU_PGT_SIZE SZ_4M | |
96 | ||
97 | struct mtk_iommu_domain { | |
98 | spinlock_t pgtlock; /* lock for page table */ | |
99 | struct iommu_domain domain; | |
100 | u32 *pgt_va; | |
101 | dma_addr_t pgt_pa; | |
102 | struct mtk_iommu_data *data; | |
103 | }; | |
104 | ||
105 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) | |
106 | { | |
107 | return container_of(dom, struct mtk_iommu_domain, domain); | |
108 | } | |
109 | ||
110 | static const int mt2701_m4u_in_larb[] = { | |
111 | LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, | |
112 | LARB2_PORT_OFFSET, LARB3_PORT_OFFSET | |
113 | }; | |
114 | ||
115 | static inline int mt2701_m4u_to_larb(int id) | |
116 | { | |
117 | int i; | |
118 | ||
119 | for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--) | |
120 | if ((id) >= mt2701_m4u_in_larb[i]) | |
121 | return i; | |
122 | ||
123 | return 0; | |
124 | } | |
125 | ||
126 | static inline int mt2701_m4u_to_port(int id) | |
127 | { | |
128 | int larb = mt2701_m4u_to_larb(id); | |
129 | ||
130 | return id - mt2701_m4u_in_larb[larb]; | |
131 | } | |
132 | ||
133 | static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) | |
134 | { | |
135 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
136 | data->base + REG_MMU_INV_SEL); | |
137 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); | |
138 | wmb(); /* Make sure the tlb flush all done */ | |
139 | } | |
140 | ||
141 | static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, | |
142 | unsigned long iova, size_t size) | |
143 | { | |
144 | int ret; | |
145 | u32 tmp; | |
146 | ||
147 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
148 | data->base + REG_MMU_INV_SEL); | |
149 | writel_relaxed(iova & F_MMU_FAULT_VA_MSK, | |
150 | data->base + REG_MMU_INVLD_START_A); | |
151 | writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK, | |
152 | data->base + REG_MMU_INVLD_END_A); | |
153 | writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); | |
154 | ||
155 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, | |
156 | tmp, tmp != 0, 10, 100000); | |
157 | if (ret) { | |
158 | dev_warn(data->dev, | |
159 | "Partial TLB flush timed out, falling back to full flush\n"); | |
160 | mtk_iommu_tlb_flush_all(data); | |
161 | } | |
162 | /* Clear the CPE status */ | |
163 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
164 | } | |
165 | ||
166 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
167 | { | |
168 | struct mtk_iommu_data *data = dev_id; | |
169 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
170 | u32 int_state, regval, fault_iova, fault_pa; | |
171 | unsigned int fault_larb, fault_port; | |
172 | ||
173 | /* Read error information from registers */ | |
174 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST); | |
175 | fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); | |
176 | ||
177 | fault_iova &= F_MMU_FAULT_VA_MSK; | |
178 | fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); | |
179 | regval = readl_relaxed(data->base + REG_MMU_INT_ID); | |
180 | fault_larb = MT2701_M4U_TF_LARB(regval); | |
181 | fault_port = MT2701_M4U_TF_PORT(regval); | |
182 | ||
183 | /* | |
184 | * MTK v1 iommu HW could not determine whether the fault is read or | |
185 | * write fault, report as read fault. | |
186 | */ | |
187 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, | |
188 | IOMMU_FAULT_READ)) | |
189 | dev_err_ratelimited(data->dev, | |
190 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n", | |
191 | int_state, fault_iova, fault_pa, | |
192 | fault_larb, fault_port); | |
193 | ||
194 | /* Interrupt clear */ | |
195 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL); | |
196 | regval |= F_INT_CLR_BIT; | |
197 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); | |
198 | ||
199 | mtk_iommu_tlb_flush_all(data); | |
200 | ||
201 | return IRQ_HANDLED; | |
202 | } | |
203 | ||
204 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
205 | struct device *dev, bool enable) | |
206 | { | |
207 | struct mtk_iommu_client_priv *head, *cur, *next; | |
208 | struct mtk_smi_larb_iommu *larb_mmu; | |
209 | unsigned int larbid, portid; | |
210 | ||
211 | head = dev->archdata.iommu; | |
212 | list_for_each_entry_safe(cur, next, &head->client, client) { | |
213 | larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id); | |
214 | portid = mt2701_m4u_to_port(cur->mtk_m4u_id); | |
215 | larb_mmu = &data->smi_imu.larb_imu[larbid]; | |
216 | ||
217 | dev_dbg(dev, "%s iommu port: %d\n", | |
218 | enable ? "enable" : "disable", portid); | |
219 | ||
220 | if (enable) | |
221 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
222 | else | |
223 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
224 | } | |
225 | } | |
226 | ||
227 | static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) | |
228 | { | |
229 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
230 | ||
231 | spin_lock_init(&dom->pgtlock); | |
232 | ||
233 | dom->pgt_va = dma_zalloc_coherent(data->dev, | |
234 | M2701_IOMMU_PGT_SIZE, | |
235 | &dom->pgt_pa, GFP_KERNEL); | |
236 | if (!dom->pgt_va) | |
237 | return -ENOMEM; | |
238 | ||
239 | writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR); | |
240 | ||
241 | dom->data = data; | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
247 | { | |
248 | struct mtk_iommu_domain *dom; | |
249 | ||
250 | if (type != IOMMU_DOMAIN_UNMANAGED) | |
251 | return NULL; | |
252 | ||
253 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
254 | if (!dom) | |
255 | return NULL; | |
256 | ||
257 | return &dom->domain; | |
258 | } | |
259 | ||
260 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
261 | { | |
262 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
263 | struct mtk_iommu_data *data = dom->data; | |
264 | ||
265 | dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, | |
266 | dom->pgt_va, dom->pgt_pa); | |
267 | kfree(to_mtk_domain(domain)); | |
268 | } | |
269 | ||
270 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
271 | struct device *dev) | |
272 | { | |
273 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
274 | struct mtk_iommu_client_priv *priv = dev->archdata.iommu; | |
275 | struct mtk_iommu_data *data; | |
276 | int ret; | |
277 | ||
278 | if (!priv) | |
279 | return -ENODEV; | |
280 | ||
281 | data = dev_get_drvdata(priv->m4udev); | |
282 | if (!data->m4u_dom) { | |
283 | data->m4u_dom = dom; | |
284 | ret = mtk_iommu_domain_finalise(data); | |
285 | if (ret) { | |
286 | data->m4u_dom = NULL; | |
287 | return ret; | |
288 | } | |
289 | } | |
290 | ||
291 | mtk_iommu_config(data, dev, true); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
296 | struct device *dev) | |
297 | { | |
298 | struct mtk_iommu_client_priv *priv = dev->archdata.iommu; | |
299 | struct mtk_iommu_data *data; | |
300 | ||
301 | if (!priv) | |
302 | return; | |
303 | ||
304 | data = dev_get_drvdata(priv->m4udev); | |
305 | mtk_iommu_config(data, dev, false); | |
306 | } | |
307 | ||
308 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
309 | phys_addr_t paddr, size_t size, int prot) | |
310 | { | |
311 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
312 | unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; | |
313 | unsigned long flags; | |
314 | unsigned int i; | |
315 | u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); | |
316 | u32 pabase = (u32)paddr; | |
317 | int map_size = 0; | |
318 | ||
319 | spin_lock_irqsave(&dom->pgtlock, flags); | |
320 | for (i = 0; i < page_num; i++) { | |
321 | if (pgt_base_iova[i]) { | |
322 | memset(pgt_base_iova, 0, i * sizeof(u32)); | |
323 | break; | |
324 | } | |
325 | pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC; | |
326 | pabase += MT2701_IOMMU_PAGE_SIZE; | |
327 | map_size += MT2701_IOMMU_PAGE_SIZE; | |
328 | } | |
329 | ||
330 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
331 | ||
332 | mtk_iommu_tlb_flush_range(dom->data, iova, size); | |
333 | ||
334 | return map_size == size ? 0 : -EEXIST; | |
335 | } | |
336 | ||
337 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
338 | unsigned long iova, size_t size) | |
339 | { | |
340 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
341 | unsigned long flags; | |
342 | u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); | |
343 | unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; | |
344 | ||
345 | spin_lock_irqsave(&dom->pgtlock, flags); | |
346 | memset(pgt_base_iova, 0, page_num * sizeof(u32)); | |
347 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
348 | ||
349 | mtk_iommu_tlb_flush_range(dom->data, iova, size); | |
350 | ||
351 | return size; | |
352 | } | |
353 | ||
354 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, | |
355 | dma_addr_t iova) | |
356 | { | |
357 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
358 | unsigned long flags; | |
359 | phys_addr_t pa; | |
360 | ||
361 | spin_lock_irqsave(&dom->pgtlock, flags); | |
362 | pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT)); | |
363 | pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1)); | |
364 | spin_unlock_irqrestore(&dom->pgtlock, flags); | |
365 | ||
366 | return pa; | |
367 | } | |
368 | ||
369 | /* | |
370 | * MTK generation one iommu HW only support one iommu domain, and all the client | |
371 | * sharing the same iova address space. | |
372 | */ | |
373 | static int mtk_iommu_create_mapping(struct device *dev, | |
374 | struct of_phandle_args *args) | |
375 | { | |
376 | struct mtk_iommu_client_priv *head, *priv, *next; | |
377 | struct platform_device *m4updev; | |
378 | struct dma_iommu_mapping *mtk_mapping; | |
379 | struct device *m4udev; | |
380 | int ret; | |
381 | ||
382 | if (args->args_count != 1) { | |
383 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
384 | args->args_count); | |
385 | return -EINVAL; | |
386 | } | |
387 | ||
388 | if (!dev->archdata.iommu) { | |
389 | /* Get the m4u device */ | |
390 | m4updev = of_find_device_by_node(args->np); | |
391 | if (WARN_ON(!m4updev)) | |
392 | return -EINVAL; | |
393 | ||
394 | head = kzalloc(sizeof(*head), GFP_KERNEL); | |
395 | if (!head) | |
396 | return -ENOMEM; | |
397 | ||
398 | dev->archdata.iommu = head; | |
399 | INIT_LIST_HEAD(&head->client); | |
400 | head->m4udev = &m4updev->dev; | |
401 | } else { | |
402 | head = dev->archdata.iommu; | |
403 | } | |
404 | ||
405 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
406 | if (!priv) { | |
407 | ret = -ENOMEM; | |
408 | goto err_free_mem; | |
409 | } | |
410 | priv->mtk_m4u_id = args->args[0]; | |
411 | list_add_tail(&priv->client, &head->client); | |
412 | ||
413 | m4udev = head->m4udev; | |
414 | mtk_mapping = m4udev->archdata.iommu; | |
415 | if (!mtk_mapping) { | |
416 | /* MTK iommu support 4GB iova address space. */ | |
417 | mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, | |
418 | 0, 1ULL << 32); | |
419 | if (IS_ERR(mtk_mapping)) { | |
420 | ret = PTR_ERR(mtk_mapping); | |
421 | goto err_free_mem; | |
422 | } | |
423 | m4udev->archdata.iommu = mtk_mapping; | |
424 | } | |
425 | ||
426 | ret = arm_iommu_attach_device(dev, mtk_mapping); | |
427 | if (ret) | |
428 | goto err_release_mapping; | |
429 | ||
430 | return 0; | |
431 | ||
432 | err_release_mapping: | |
433 | arm_iommu_release_mapping(mtk_mapping); | |
434 | m4udev->archdata.iommu = NULL; | |
435 | err_free_mem: | |
436 | list_for_each_entry_safe(priv, next, &head->client, client) | |
437 | kfree(priv); | |
438 | kfree(head); | |
439 | dev->archdata.iommu = NULL; | |
440 | return ret; | |
441 | } | |
442 | ||
443 | static int mtk_iommu_add_device(struct device *dev) | |
444 | { | |
445 | struct iommu_group *group; | |
446 | struct of_phandle_args iommu_spec; | |
447 | struct of_phandle_iterator it; | |
448 | int err; | |
449 | ||
450 | of_for_each_phandle(&it, err, dev->of_node, "iommus", | |
451 | "#iommu-cells", 0) { | |
452 | int count = of_phandle_iterator_args(&it, iommu_spec.args, | |
453 | MAX_PHANDLE_ARGS); | |
454 | iommu_spec.np = of_node_get(it.node); | |
455 | iommu_spec.args_count = count; | |
456 | ||
457 | mtk_iommu_create_mapping(dev, &iommu_spec); | |
458 | of_node_put(iommu_spec.np); | |
459 | } | |
460 | ||
461 | if (!dev->archdata.iommu) /* Not a iommu client device */ | |
462 | return -ENODEV; | |
463 | ||
464 | group = iommu_group_get_for_dev(dev); | |
465 | if (IS_ERR(group)) | |
466 | return PTR_ERR(group); | |
467 | ||
468 | iommu_group_put(group); | |
469 | return 0; | |
470 | } | |
471 | ||
472 | static void mtk_iommu_remove_device(struct device *dev) | |
473 | { | |
474 | struct mtk_iommu_client_priv *head, *cur, *next; | |
475 | ||
476 | head = dev->archdata.iommu; | |
477 | if (!head) | |
478 | return; | |
479 | ||
480 | list_for_each_entry_safe(cur, next, &head->client, client) { | |
481 | list_del(&cur->client); | |
482 | kfree(cur); | |
483 | } | |
484 | kfree(head); | |
485 | dev->archdata.iommu = NULL; | |
486 | ||
487 | iommu_group_remove_device(dev); | |
488 | } | |
489 | ||
490 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) | |
491 | { | |
492 | struct mtk_iommu_data *data; | |
493 | struct mtk_iommu_client_priv *priv; | |
494 | ||
495 | priv = dev->archdata.iommu; | |
496 | if (!priv) | |
497 | return ERR_PTR(-ENODEV); | |
498 | ||
499 | /* All the client devices are in the same m4u iommu-group */ | |
500 | data = dev_get_drvdata(priv->m4udev); | |
501 | if (!data->m4u_group) { | |
502 | data->m4u_group = iommu_group_alloc(); | |
503 | if (IS_ERR(data->m4u_group)) | |
504 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); | |
505 | } | |
506 | return data->m4u_group; | |
507 | } | |
508 | ||
509 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) | |
510 | { | |
511 | u32 regval; | |
512 | int ret; | |
513 | ||
514 | ret = clk_prepare_enable(data->bclk); | |
515 | if (ret) { | |
516 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
517 | return ret; | |
518 | } | |
519 | ||
520 | regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2); | |
521 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); | |
522 | ||
523 | regval = F_INT_TRANSLATION_FAULT | | |
524 | F_INT_MAIN_MULTI_HIT_FAULT | | |
525 | F_INT_INVALID_PA_FAULT | | |
526 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
527 | F_INT_TABLE_WALK_FAULT | | |
528 | F_INT_TLB_MISS_FAULT | | |
529 | F_INT_PFH_DMA_FIFO_OVERFLOW | | |
530 | F_INT_MISS_DMA_FIFO_OVERFLOW; | |
531 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); | |
532 | ||
533 | /* protect memory,hw will write here while translation fault */ | |
534 | writel_relaxed(data->protect_base, | |
535 | data->base + REG_MMU_IVRP_PADDR); | |
536 | ||
537 | writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); | |
538 | ||
539 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
540 | dev_name(data->dev), (void *)data)) { | |
541 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
542 | clk_disable_unprepare(data->bclk); | |
543 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
544 | return -ENODEV; | |
545 | } | |
546 | ||
547 | return 0; | |
548 | } | |
549 | ||
550 | static struct iommu_ops mtk_iommu_ops = { | |
551 | .domain_alloc = mtk_iommu_domain_alloc, | |
552 | .domain_free = mtk_iommu_domain_free, | |
553 | .attach_dev = mtk_iommu_attach_device, | |
554 | .detach_dev = mtk_iommu_detach_device, | |
555 | .map = mtk_iommu_map, | |
556 | .unmap = mtk_iommu_unmap, | |
557 | .map_sg = default_iommu_map_sg, | |
558 | .iova_to_phys = mtk_iommu_iova_to_phys, | |
559 | .add_device = mtk_iommu_add_device, | |
560 | .remove_device = mtk_iommu_remove_device, | |
561 | .device_group = mtk_iommu_device_group, | |
562 | .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, | |
563 | }; | |
564 | ||
565 | static const struct of_device_id mtk_iommu_of_ids[] = { | |
566 | { .compatible = "mediatek,mt2701-m4u", }, | |
567 | {} | |
568 | }; | |
569 | ||
570 | static const struct component_master_ops mtk_iommu_com_ops = { | |
571 | .bind = mtk_iommu_bind, | |
572 | .unbind = mtk_iommu_unbind, | |
573 | }; | |
574 | ||
575 | static int mtk_iommu_probe(struct platform_device *pdev) | |
576 | { | |
577 | struct mtk_iommu_data *data; | |
578 | struct device *dev = &pdev->dev; | |
579 | struct resource *res; | |
580 | struct component_match *match = NULL; | |
581 | struct of_phandle_args larb_spec; | |
582 | struct of_phandle_iterator it; | |
583 | void *protect; | |
584 | int larb_nr, ret, err; | |
585 | ||
586 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
587 | if (!data) | |
588 | return -ENOMEM; | |
589 | ||
590 | data->dev = dev; | |
591 | ||
592 | /* Protect memory. HW will access here while translation fault.*/ | |
593 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, | |
594 | GFP_KERNEL | GFP_DMA); | |
595 | if (!protect) | |
596 | return -ENOMEM; | |
597 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
598 | ||
599 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
600 | data->base = devm_ioremap_resource(dev, res); | |
601 | if (IS_ERR(data->base)) | |
602 | return PTR_ERR(data->base); | |
603 | ||
604 | data->irq = platform_get_irq(pdev, 0); | |
605 | if (data->irq < 0) | |
606 | return data->irq; | |
607 | ||
608 | data->bclk = devm_clk_get(dev, "bclk"); | |
609 | if (IS_ERR(data->bclk)) | |
610 | return PTR_ERR(data->bclk); | |
611 | ||
612 | larb_nr = 0; | |
613 | of_for_each_phandle(&it, err, dev->of_node, | |
614 | "mediatek,larbs", NULL, 0) { | |
615 | struct platform_device *plarbdev; | |
616 | int count = of_phandle_iterator_args(&it, larb_spec.args, | |
617 | MAX_PHANDLE_ARGS); | |
618 | ||
619 | if (count) | |
620 | continue; | |
621 | ||
622 | larb_spec.np = of_node_get(it.node); | |
623 | if (!of_device_is_available(larb_spec.np)) | |
624 | continue; | |
625 | ||
626 | plarbdev = of_find_device_by_node(larb_spec.np); | |
627 | of_node_put(larb_spec.np); | |
628 | if (!plarbdev) { | |
629 | plarbdev = of_platform_device_create( | |
630 | larb_spec.np, NULL, | |
631 | platform_bus_type.dev_root); | |
632 | if (!plarbdev) | |
633 | return -EPROBE_DEFER; | |
634 | } | |
635 | ||
636 | data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; | |
637 | component_match_add(dev, &match, compare_of, larb_spec.np); | |
638 | larb_nr++; | |
639 | } | |
640 | ||
641 | data->smi_imu.larb_nr = larb_nr; | |
642 | ||
643 | platform_set_drvdata(pdev, data); | |
644 | ||
645 | ret = mtk_iommu_hw_init(data); | |
646 | if (ret) | |
647 | return ret; | |
648 | ||
649 | if (!iommu_present(&platform_bus_type)) | |
650 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
651 | ||
652 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
653 | } | |
654 | ||
655 | static int mtk_iommu_remove(struct platform_device *pdev) | |
656 | { | |
657 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
658 | ||
659 | if (iommu_present(&platform_bus_type)) | |
660 | bus_set_iommu(&platform_bus_type, NULL); | |
661 | ||
662 | clk_disable_unprepare(data->bclk); | |
663 | devm_free_irq(&pdev->dev, data->irq, data); | |
664 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
665 | return 0; | |
666 | } | |
667 | ||
668 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) | |
669 | { | |
670 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
671 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
672 | void __iomem *base = data->base; | |
673 | ||
674 | reg->standard_axi_mode = readl_relaxed(base + | |
675 | REG_MMU_STANDARD_AXI_MODE); | |
676 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM); | |
677 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
678 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL); | |
679 | return 0; | |
680 | } | |
681 | ||
682 | static int __maybe_unused mtk_iommu_resume(struct device *dev) | |
683 | { | |
684 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
685 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
686 | void __iomem *base = data->base; | |
687 | ||
688 | writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); | |
689 | writel_relaxed(reg->standard_axi_mode, | |
690 | base + REG_MMU_STANDARD_AXI_MODE); | |
691 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM); | |
692 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
693 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL); | |
694 | writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR); | |
695 | return 0; | |
696 | } | |
697 | ||
131bc8eb | 698 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
b17336c5 HZ |
699 | SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
700 | }; | |
701 | ||
702 | static struct platform_driver mtk_iommu_driver = { | |
703 | .probe = mtk_iommu_probe, | |
704 | .remove = mtk_iommu_remove, | |
705 | .driver = { | |
706 | .name = "mtk-iommu", | |
707 | .of_match_table = mtk_iommu_of_ids, | |
708 | .pm = &mtk_iommu_pm_ops, | |
709 | } | |
710 | }; | |
711 | ||
712 | static int __init m4u_init(void) | |
713 | { | |
714 | return platform_driver_register(&mtk_iommu_driver); | |
715 | } | |
716 | ||
717 | static void __exit m4u_exit(void) | |
718 | { | |
719 | return platform_driver_unregister(&mtk_iommu_driver); | |
720 | } | |
721 | ||
722 | subsys_initcall(m4u_init); | |
723 | module_exit(m4u_exit); | |
724 | ||
725 | MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations"); | |
726 | MODULE_AUTHOR("Honghui Zhang <[email protected]>"); | |
727 | MODULE_LICENSE("GPL v2"); |