]>
Commit | Line | Data |
---|---|---|
7a31f6f4 HD |
1 | /* |
2 | * IOMMU API for SMMU in Tegra30 | |
3 | * | |
4 | * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/pagemap.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/iommu.h> | |
32 | #include <linux/io.h> | |
33 | ||
34 | #include <asm/page.h> | |
35 | #include <asm/cacheflush.h> | |
36 | ||
37 | #include <mach/iomap.h> | |
38 | #include <mach/smmu.h> | |
39 | ||
40 | /* bitmap of the page sizes currently supported */ | |
41 | #define SMMU_IOMMU_PGSIZES (SZ_4K) | |
42 | ||
43 | #define SMMU_CONFIG 0x10 | |
44 | #define SMMU_CONFIG_DISABLE 0 | |
45 | #define SMMU_CONFIG_ENABLE 1 | |
46 | ||
47 | #define SMMU_TLB_CONFIG 0x14 | |
48 | #define SMMU_TLB_CONFIG_STATS__MASK (1 << 31) | |
49 | #define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31) | |
50 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) | |
51 | #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 | |
52 | #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 | |
53 | ||
54 | #define SMMU_PTC_CONFIG 0x18 | |
55 | #define SMMU_PTC_CONFIG_STATS__MASK (1 << 31) | |
56 | #define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31) | |
57 | #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) | |
58 | #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f | |
59 | #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f | |
60 | ||
61 | #define SMMU_PTB_ASID 0x1c | |
62 | #define SMMU_PTB_ASID_CURRENT_SHIFT 0 | |
63 | ||
64 | #define SMMU_PTB_DATA 0x20 | |
65 | #define SMMU_PTB_DATA_RESET_VAL 0 | |
66 | #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29 | |
67 | #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30 | |
68 | #define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31 | |
69 | ||
70 | #define SMMU_TLB_FLUSH 0x30 | |
71 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL 0 | |
72 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2 | |
73 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3 | |
74 | #define SMMU_TLB_FLUSH_ASID_SHIFT 29 | |
75 | #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0 | |
76 | #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1 | |
77 | #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31 | |
78 | ||
79 | #define SMMU_PTC_FLUSH 0x34 | |
80 | #define SMMU_PTC_FLUSH_TYPE_ALL 0 | |
81 | #define SMMU_PTC_FLUSH_TYPE_ADR 1 | |
82 | #define SMMU_PTC_FLUSH_ADR_SHIFT 4 | |
83 | ||
84 | #define SMMU_ASID_SECURITY 0x38 | |
85 | ||
86 | #define SMMU_STATS_TLB_HIT_COUNT 0x1f0 | |
87 | #define SMMU_STATS_TLB_MISS_COUNT 0x1f4 | |
88 | #define SMMU_STATS_PTC_HIT_COUNT 0x1f8 | |
89 | #define SMMU_STATS_PTC_MISS_COUNT 0x1fc | |
90 | ||
91 | #define SMMU_TRANSLATION_ENABLE_0 0x228 | |
92 | #define SMMU_TRANSLATION_ENABLE_1 0x22c | |
93 | #define SMMU_TRANSLATION_ENABLE_2 0x230 | |
94 | ||
95 | #define SMMU_AFI_ASID 0x238 /* PCIE */ | |
96 | #define SMMU_AVPC_ASID 0x23c /* AVP */ | |
97 | #define SMMU_DC_ASID 0x240 /* Display controller */ | |
98 | #define SMMU_DCB_ASID 0x244 /* Display controller B */ | |
99 | #define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */ | |
100 | #define SMMU_G2_ASID 0x24c /* 2D engine */ | |
101 | #define SMMU_HC_ASID 0x250 /* Host1x */ | |
102 | #define SMMU_HDA_ASID 0x254 /* High-def audio */ | |
103 | #define SMMU_ISP_ASID 0x258 /* Image signal processor */ | |
104 | #define SMMU_MPE_ASID 0x264 /* MPEG encoder */ | |
105 | #define SMMU_NV_ASID 0x268 /* (3D) */ | |
106 | #define SMMU_NV2_ASID 0x26c /* (3D) */ | |
107 | #define SMMU_PPCS_ASID 0x270 /* AHB */ | |
108 | #define SMMU_SATA_ASID 0x278 /* SATA */ | |
109 | #define SMMU_VDE_ASID 0x27c /* Video decoder */ | |
110 | #define SMMU_VI_ASID 0x280 /* Video input */ | |
111 | ||
112 | #define SMMU_PDE_NEXT_SHIFT 28 | |
113 | ||
114 | /* AHB Arbiter Registers */ | |
115 | #define AHB_XBAR_CTRL 0xe0 | |
116 | #define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1 | |
117 | #define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17 | |
118 | ||
119 | #define SMMU_NUM_ASIDS 4 | |
120 | #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 | |
121 | #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ | |
122 | #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 | |
123 | #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */ | |
124 | #define SMMU_TLB_FLUSH_VA(iova, which) \ | |
125 | ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \ | |
126 | SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \ | |
127 | SMMU_TLB_FLUSH_VA_MATCH_##which) | |
128 | #define SMMU_PTB_ASID_CUR(n) \ | |
129 | ((n) << SMMU_PTB_ASID_CURRENT_SHIFT) | |
130 | #define SMMU_TLB_FLUSH_ASID_MATCH_disable \ | |
131 | (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \ | |
132 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | |
133 | #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \ | |
134 | (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \ | |
135 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | |
136 | ||
137 | #define SMMU_PAGE_SHIFT 12 | |
138 | #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) | |
139 | ||
140 | #define SMMU_PDIR_COUNT 1024 | |
141 | #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) | |
142 | #define SMMU_PTBL_COUNT 1024 | |
143 | #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT) | |
144 | #define SMMU_PDIR_SHIFT 12 | |
145 | #define SMMU_PDE_SHIFT 12 | |
146 | #define SMMU_PTE_SHIFT 12 | |
147 | #define SMMU_PFN_MASK 0x000fffff | |
148 | ||
149 | #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) | |
150 | #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) | |
151 | #define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22) | |
152 | ||
153 | #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) | |
154 | #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) | |
155 | #define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT) | |
156 | #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT) | |
157 | #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
158 | ||
159 | #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
160 | ||
161 | #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
162 | #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT) | |
163 | #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR) | |
164 | ||
165 | #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
166 | #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR) | |
167 | ||
168 | #define SMMU_MK_PDIR(page, attr) \ | |
169 | ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr)) | |
170 | #define SMMU_MK_PDE(page, attr) \ | |
171 | (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr)) | |
172 | #define SMMU_EX_PTBL_PAGE(pde) \ | |
173 | pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK) | |
174 | #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr)) | |
175 | ||
176 | #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31)) | |
177 | #define SMMU_ASID_DISABLE 0 | |
178 | #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) | |
179 | ||
180 | #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) | |
181 | #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) | |
182 | #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) | |
183 | #define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0) | |
184 | ||
185 | #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID | |
186 | ||
187 | static const u32 smmu_hwgrp_asid_reg[] = { | |
188 | HWGRP_INIT(AFI), | |
189 | HWGRP_INIT(AVPC), | |
190 | HWGRP_INIT(DC), | |
191 | HWGRP_INIT(DCB), | |
192 | HWGRP_INIT(EPP), | |
193 | HWGRP_INIT(G2), | |
194 | HWGRP_INIT(HC), | |
195 | HWGRP_INIT(HDA), | |
196 | HWGRP_INIT(ISP), | |
197 | HWGRP_INIT(MPE), | |
198 | HWGRP_INIT(NV), | |
199 | HWGRP_INIT(NV2), | |
200 | HWGRP_INIT(PPCS), | |
201 | HWGRP_INIT(SATA), | |
202 | HWGRP_INIT(VDE), | |
203 | HWGRP_INIT(VI), | |
204 | }; | |
205 | #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x]) | |
206 | ||
207 | /* | |
208 | * Per client for address space | |
209 | */ | |
210 | struct smmu_client { | |
211 | struct device *dev; | |
212 | struct list_head list; | |
213 | struct smmu_as *as; | |
214 | u32 hwgrp; | |
215 | }; | |
216 | ||
217 | /* | |
218 | * Per address space | |
219 | */ | |
220 | struct smmu_as { | |
221 | struct smmu_device *smmu; /* back pointer to container */ | |
222 | unsigned int asid; | |
223 | spinlock_t lock; /* for pagetable */ | |
224 | struct page *pdir_page; | |
225 | unsigned long pdir_attr; | |
226 | unsigned long pde_attr; | |
227 | unsigned long pte_attr; | |
228 | unsigned int *pte_count; | |
229 | ||
230 | struct list_head client; | |
231 | spinlock_t client_lock; /* for client list */ | |
232 | }; | |
233 | ||
234 | /* | |
235 | * Per SMMU device - IOMMU device | |
236 | */ | |
237 | struct smmu_device { | |
238 | void __iomem *regs, *regs_ahbarb; | |
239 | unsigned long iovmm_base; /* remappable base address */ | |
240 | unsigned long page_count; /* total remappable size */ | |
241 | spinlock_t lock; | |
242 | char *name; | |
243 | struct device *dev; | |
244 | int num_as; | |
245 | struct smmu_as *as; /* Run-time allocated array */ | |
246 | struct page *avp_vector_page; /* dummy page shared by all AS's */ | |
247 | ||
248 | /* | |
249 | * Register image savers for suspend/resume | |
250 | */ | |
251 | unsigned long translation_enable_0; | |
252 | unsigned long translation_enable_1; | |
253 | unsigned long translation_enable_2; | |
254 | unsigned long asid_security; | |
255 | }; | |
256 | ||
257 | static struct smmu_device *smmu_handle; /* unique for a system */ | |
258 | ||
259 | /* | |
260 | * SMMU/AHB register accessors | |
261 | */ | |
262 | static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) | |
263 | { | |
264 | return readl(smmu->regs + offs); | |
265 | } | |
266 | static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) | |
267 | { | |
268 | writel(val, smmu->regs + offs); | |
269 | } | |
270 | ||
271 | static inline u32 ahb_read(struct smmu_device *smmu, size_t offs) | |
272 | { | |
273 | return readl(smmu->regs_ahbarb + offs); | |
274 | } | |
275 | static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs) | |
276 | { | |
277 | writel(val, smmu->regs_ahbarb + offs); | |
278 | } | |
279 | ||
280 | #define VA_PAGE_TO_PA(va, page) \ | |
281 | (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) | |
282 | ||
283 | #define FLUSH_CPU_DCACHE(va, page, size) \ | |
284 | do { \ | |
285 | unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ | |
286 | __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \ | |
287 | outer_flush_range(_pa_, _pa_+(size_t)(size)); \ | |
288 | } while (0) | |
289 | ||
290 | /* | |
291 | * Any interaction between any block on PPSB and a block on APB or AHB | |
292 | * must have these read-back barriers to ensure the APB/AHB bus | |
293 | * transaction is complete before initiating activity on the PPSB | |
294 | * block. | |
295 | */ | |
296 | #define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG) | |
297 | ||
298 | #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) | |
299 | ||
300 | static int __smmu_client_set_hwgrp(struct smmu_client *c, | |
301 | unsigned long map, int on) | |
302 | { | |
303 | int i; | |
304 | struct smmu_as *as = c->as; | |
305 | u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); | |
306 | struct smmu_device *smmu = as->smmu; | |
307 | ||
308 | WARN_ON(!on && map); | |
309 | if (on && !map) | |
310 | return -EINVAL; | |
311 | if (!on) | |
312 | map = smmu_client_hwgrp(c); | |
313 | ||
314 | for_each_set_bit(i, &map, HWGRP_COUNT) { | |
315 | offs = HWGRP_ASID_REG(i); | |
316 | val = smmu_read(smmu, offs); | |
317 | if (on) { | |
318 | if (WARN_ON(val & mask)) | |
319 | goto err_hw_busy; | |
320 | val |= mask; | |
321 | } else { | |
322 | WARN_ON((val & mask) == mask); | |
323 | val &= ~mask; | |
324 | } | |
325 | smmu_write(smmu, val, offs); | |
326 | } | |
327 | FLUSH_SMMU_REGS(smmu); | |
328 | c->hwgrp = map; | |
329 | return 0; | |
330 | ||
331 | err_hw_busy: | |
332 | for_each_set_bit(i, &map, HWGRP_COUNT) { | |
333 | offs = HWGRP_ASID_REG(i); | |
334 | val = smmu_read(smmu, offs); | |
335 | val &= ~mask; | |
336 | smmu_write(smmu, val, offs); | |
337 | } | |
338 | return -EBUSY; | |
339 | } | |
340 | ||
341 | static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) | |
342 | { | |
343 | u32 val; | |
344 | unsigned long flags; | |
345 | struct smmu_as *as = c->as; | |
346 | struct smmu_device *smmu = as->smmu; | |
347 | ||
348 | spin_lock_irqsave(&smmu->lock, flags); | |
349 | val = __smmu_client_set_hwgrp(c, map, on); | |
350 | spin_unlock_irqrestore(&smmu->lock, flags); | |
351 | return val; | |
352 | } | |
353 | ||
354 | /* | |
355 | * Flush all TLB entries and all PTC entries | |
356 | * Caller must lock smmu | |
357 | */ | |
358 | static void smmu_flush_regs(struct smmu_device *smmu, int enable) | |
359 | { | |
360 | u32 val; | |
361 | ||
362 | smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); | |
363 | FLUSH_SMMU_REGS(smmu); | |
364 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | |
365 | SMMU_TLB_FLUSH_ASID_MATCH_disable; | |
366 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
367 | ||
368 | if (enable) | |
369 | smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); | |
370 | FLUSH_SMMU_REGS(smmu); | |
371 | } | |
372 | ||
373 | static void smmu_setup_regs(struct smmu_device *smmu) | |
374 | { | |
375 | int i; | |
376 | u32 val; | |
377 | ||
378 | for (i = 0; i < smmu->num_as; i++) { | |
379 | struct smmu_as *as = &smmu->as[i]; | |
380 | struct smmu_client *c; | |
381 | ||
382 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
383 | val = as->pdir_page ? | |
384 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) : | |
385 | SMMU_PTB_DATA_RESET_VAL; | |
386 | smmu_write(smmu, val, SMMU_PTB_DATA); | |
387 | ||
388 | list_for_each_entry(c, &as->client, list) | |
389 | __smmu_client_set_hwgrp(c, c->hwgrp, 1); | |
390 | } | |
391 | ||
392 | smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0); | |
393 | smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); | |
394 | smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); | |
395 | smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); | |
396 | smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG); | |
397 | smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG); | |
398 | ||
399 | smmu_flush_regs(smmu, 1); | |
400 | ||
401 | val = ahb_read(smmu, AHB_XBAR_CTRL); | |
402 | val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE << | |
403 | AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT; | |
404 | ahb_write(smmu, val, AHB_XBAR_CTRL); | |
405 | } | |
406 | ||
407 | static void flush_ptc_and_tlb(struct smmu_device *smmu, | |
408 | struct smmu_as *as, dma_addr_t iova, | |
409 | unsigned long *pte, struct page *page, int is_pde) | |
410 | { | |
411 | u32 val; | |
412 | unsigned long tlb_flush_va = is_pde | |
413 | ? SMMU_TLB_FLUSH_VA(iova, SECTION) | |
414 | : SMMU_TLB_FLUSH_VA(iova, GROUP); | |
415 | ||
416 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page); | |
417 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | |
418 | FLUSH_SMMU_REGS(smmu); | |
419 | val = tlb_flush_va | | |
420 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | |
421 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | |
422 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
423 | FLUSH_SMMU_REGS(smmu); | |
424 | } | |
425 | ||
426 | static void free_ptbl(struct smmu_as *as, dma_addr_t iova) | |
427 | { | |
428 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | |
429 | unsigned long *pdir = (unsigned long *)page_address(as->pdir_page); | |
430 | ||
431 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | |
432 | dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn); | |
433 | ||
434 | ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn])); | |
435 | __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn])); | |
436 | pdir[pdn] = _PDE_VACANT(pdn); | |
437 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | |
438 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | |
439 | as->pdir_page, 1); | |
440 | } | |
441 | } | |
442 | ||
443 | static void free_pdir(struct smmu_as *as) | |
444 | { | |
445 | unsigned addr; | |
446 | int count; | |
447 | struct device *dev = as->smmu->dev; | |
448 | ||
449 | if (!as->pdir_page) | |
450 | return; | |
451 | ||
452 | addr = as->smmu->iovmm_base; | |
453 | count = as->smmu->page_count; | |
454 | while (count-- > 0) { | |
455 | free_ptbl(as, addr); | |
456 | addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT; | |
457 | } | |
458 | ClearPageReserved(as->pdir_page); | |
459 | __free_page(as->pdir_page); | |
460 | as->pdir_page = NULL; | |
461 | devm_kfree(dev, as->pte_count); | |
462 | as->pte_count = NULL; | |
463 | } | |
464 | ||
465 | /* | |
466 | * Maps PTBL for given iova and returns the PTE address | |
467 | * Caller must unmap the mapped PTBL returned in *ptbl_page_p | |
468 | */ | |
469 | static unsigned long *locate_pte(struct smmu_as *as, | |
470 | dma_addr_t iova, bool allocate, | |
471 | struct page **ptbl_page_p, | |
472 | unsigned int **count) | |
473 | { | |
474 | unsigned long ptn = SMMU_ADDR_TO_PFN(iova); | |
475 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | |
476 | unsigned long *pdir = page_address(as->pdir_page); | |
477 | unsigned long *ptbl; | |
478 | ||
479 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | |
480 | /* Mapped entry table already exists */ | |
481 | *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]); | |
482 | ptbl = page_address(*ptbl_page_p); | |
483 | } else if (!allocate) { | |
484 | return NULL; | |
485 | } else { | |
486 | int pn; | |
487 | unsigned long addr = SMMU_PDN_TO_ADDR(pdn); | |
488 | ||
489 | /* Vacant - allocate a new page table */ | |
490 | dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn); | |
491 | ||
492 | *ptbl_page_p = alloc_page(GFP_ATOMIC); | |
493 | if (!*ptbl_page_p) { | |
494 | dev_err(as->smmu->dev, | |
495 | "failed to allocate smmu_device page table\n"); | |
496 | return NULL; | |
497 | } | |
498 | SetPageReserved(*ptbl_page_p); | |
499 | ptbl = (unsigned long *)page_address(*ptbl_page_p); | |
500 | for (pn = 0; pn < SMMU_PTBL_COUNT; | |
501 | pn++, addr += SMMU_PAGE_SIZE) { | |
502 | ptbl[pn] = _PTE_VACANT(addr); | |
503 | } | |
504 | FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE); | |
505 | pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, | |
506 | as->pde_attr | _PDE_NEXT); | |
507 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | |
508 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | |
509 | as->pdir_page, 1); | |
510 | } | |
511 | *count = &as->pte_count[pdn]; | |
512 | ||
513 | return &ptbl[ptn % SMMU_PTBL_COUNT]; | |
514 | } | |
515 | ||
516 | #ifdef CONFIG_SMMU_SIG_DEBUG | |
517 | static void put_signature(struct smmu_as *as, | |
518 | dma_addr_t iova, unsigned long pfn) | |
519 | { | |
520 | struct page *page; | |
521 | unsigned long *vaddr; | |
522 | ||
523 | page = pfn_to_page(pfn); | |
524 | vaddr = page_address(page); | |
525 | if (!vaddr) | |
526 | return; | |
527 | ||
528 | vaddr[0] = iova; | |
529 | vaddr[1] = pfn << PAGE_SHIFT; | |
530 | FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2); | |
531 | } | |
532 | #else | |
533 | static inline void put_signature(struct smmu_as *as, | |
534 | unsigned long addr, unsigned long pfn) | |
535 | { | |
536 | } | |
537 | #endif | |
538 | ||
539 | /* | |
540 | * Caller must lock/unlock as | |
541 | */ | |
542 | static int alloc_pdir(struct smmu_as *as) | |
543 | { | |
544 | unsigned long *pdir; | |
545 | int pdn; | |
546 | u32 val; | |
547 | struct smmu_device *smmu = as->smmu; | |
548 | ||
549 | if (as->pdir_page) | |
550 | return 0; | |
551 | ||
552 | as->pte_count = devm_kzalloc(smmu->dev, | |
553 | sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL); | |
554 | if (!as->pte_count) { | |
555 | dev_err(smmu->dev, | |
556 | "failed to allocate smmu_device PTE cunters\n"); | |
557 | return -ENOMEM; | |
558 | } | |
559 | as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA); | |
560 | if (!as->pdir_page) { | |
561 | dev_err(smmu->dev, | |
562 | "failed to allocate smmu_device page directory\n"); | |
563 | devm_kfree(smmu->dev, as->pte_count); | |
564 | as->pte_count = NULL; | |
565 | return -ENOMEM; | |
566 | } | |
567 | SetPageReserved(as->pdir_page); | |
568 | pdir = page_address(as->pdir_page); | |
569 | ||
570 | for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) | |
571 | pdir[pdn] = _PDE_VACANT(pdn); | |
572 | FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); | |
573 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page); | |
574 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | |
575 | FLUSH_SMMU_REGS(as->smmu); | |
576 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | |
577 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | |
578 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | |
579 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
580 | FLUSH_SMMU_REGS(as->smmu); | |
581 | ||
582 | return 0; | |
583 | } | |
584 | ||
585 | static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) | |
586 | { | |
587 | unsigned long *pte; | |
588 | struct page *page; | |
589 | unsigned int *count; | |
590 | ||
591 | pte = locate_pte(as, iova, false, &page, &count); | |
592 | if (WARN_ON(!pte)) | |
593 | return; | |
594 | ||
595 | if (WARN_ON(*pte == _PTE_VACANT(iova))) | |
596 | return; | |
597 | ||
598 | *pte = _PTE_VACANT(iova); | |
599 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | |
600 | flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); | |
601 | if (!--(*count)) { | |
602 | free_ptbl(as, iova); | |
603 | smmu_flush_regs(as->smmu, 0); | |
604 | } | |
605 | } | |
606 | ||
607 | static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, | |
608 | unsigned long pfn) | |
609 | { | |
610 | struct smmu_device *smmu = as->smmu; | |
611 | unsigned long *pte; | |
612 | unsigned int *count; | |
613 | struct page *page; | |
614 | ||
615 | pte = locate_pte(as, iova, true, &page, &count); | |
616 | if (WARN_ON(!pte)) | |
617 | return; | |
618 | ||
619 | if (*pte == _PTE_VACANT(iova)) | |
620 | (*count)++; | |
621 | *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); | |
622 | if (unlikely((*pte == _PTE_VACANT(iova)))) | |
623 | (*count)--; | |
624 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | |
625 | flush_ptc_and_tlb(smmu, as, iova, pte, page, 0); | |
626 | put_signature(as, iova, pfn); | |
627 | } | |
628 | ||
629 | static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
630 | phys_addr_t pa, size_t bytes, int prot) | |
631 | { | |
632 | struct smmu_as *as = domain->priv; | |
633 | unsigned long pfn = __phys_to_pfn(pa); | |
634 | unsigned long flags; | |
635 | ||
636 | dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa); | |
637 | ||
638 | if (!pfn_valid(pfn)) | |
639 | return -ENOMEM; | |
640 | ||
641 | spin_lock_irqsave(&as->lock, flags); | |
642 | __smmu_iommu_map_pfn(as, iova, pfn); | |
643 | spin_unlock_irqrestore(&as->lock, flags); | |
644 | return 0; | |
645 | } | |
646 | ||
647 | static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
648 | size_t bytes) | |
649 | { | |
650 | struct smmu_as *as = domain->priv; | |
651 | unsigned long flags; | |
652 | ||
653 | dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); | |
654 | ||
655 | spin_lock_irqsave(&as->lock, flags); | |
656 | __smmu_iommu_unmap(as, iova); | |
657 | spin_unlock_irqrestore(&as->lock, flags); | |
658 | return SMMU_PAGE_SIZE; | |
659 | } | |
660 | ||
661 | static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | |
662 | unsigned long iova) | |
663 | { | |
664 | struct smmu_as *as = domain->priv; | |
665 | unsigned long *pte; | |
666 | unsigned int *count; | |
667 | struct page *page; | |
668 | unsigned long pfn; | |
669 | unsigned long flags; | |
670 | ||
671 | spin_lock_irqsave(&as->lock, flags); | |
672 | ||
673 | pte = locate_pte(as, iova, true, &page, &count); | |
674 | pfn = *pte & SMMU_PFN_MASK; | |
675 | WARN_ON(!pfn_valid(pfn)); | |
676 | dev_dbg(as->smmu->dev, | |
677 | "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid); | |
678 | ||
679 | spin_unlock_irqrestore(&as->lock, flags); | |
680 | return PFN_PHYS(pfn); | |
681 | } | |
682 | ||
683 | static int smmu_iommu_domain_has_cap(struct iommu_domain *domain, | |
684 | unsigned long cap) | |
685 | { | |
686 | return 0; | |
687 | } | |
688 | ||
689 | static int smmu_iommu_attach_dev(struct iommu_domain *domain, | |
690 | struct device *dev) | |
691 | { | |
692 | struct smmu_as *as = domain->priv; | |
693 | struct smmu_device *smmu = as->smmu; | |
694 | struct smmu_client *client, *c; | |
695 | u32 map; | |
696 | int err; | |
697 | ||
698 | client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); | |
699 | if (!client) | |
700 | return -ENOMEM; | |
701 | client->dev = dev; | |
702 | client->as = as; | |
703 | map = (unsigned long)dev->platform_data; | |
704 | if (!map) | |
705 | return -EINVAL; | |
706 | ||
707 | err = smmu_client_enable_hwgrp(client, map); | |
708 | if (err) | |
709 | goto err_hwgrp; | |
710 | ||
711 | spin_lock(&as->client_lock); | |
712 | list_for_each_entry(c, &as->client, list) { | |
713 | if (c->dev == dev) { | |
714 | dev_err(smmu->dev, | |
715 | "%s is already attached\n", dev_name(c->dev)); | |
716 | err = -EINVAL; | |
717 | goto err_client; | |
718 | } | |
719 | } | |
720 | list_add(&client->list, &as->client); | |
721 | spin_unlock(&as->client_lock); | |
722 | ||
723 | /* | |
724 | * Reserve "page zero" for AVP vectors using a common dummy | |
725 | * page. | |
726 | */ | |
727 | if (map & HWG_AVPC) { | |
728 | struct page *page; | |
729 | ||
730 | page = as->smmu->avp_vector_page; | |
731 | __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); | |
732 | ||
733 | pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); | |
734 | } | |
735 | ||
736 | dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev)); | |
737 | return 0; | |
738 | ||
739 | err_client: | |
740 | smmu_client_disable_hwgrp(client); | |
741 | spin_unlock(&as->client_lock); | |
742 | err_hwgrp: | |
743 | devm_kfree(smmu->dev, client); | |
744 | return err; | |
745 | } | |
746 | ||
747 | static void smmu_iommu_detach_dev(struct iommu_domain *domain, | |
748 | struct device *dev) | |
749 | { | |
750 | struct smmu_as *as = domain->priv; | |
751 | struct smmu_device *smmu = as->smmu; | |
752 | struct smmu_client *c; | |
753 | ||
754 | spin_lock(&as->client_lock); | |
755 | ||
756 | list_for_each_entry(c, &as->client, list) { | |
757 | if (c->dev == dev) { | |
758 | smmu_client_disable_hwgrp(c); | |
759 | list_del(&c->list); | |
760 | devm_kfree(smmu->dev, c); | |
761 | c->as = NULL; | |
762 | dev_dbg(smmu->dev, | |
763 | "%s is detached\n", dev_name(c->dev)); | |
764 | goto out; | |
765 | } | |
766 | } | |
767 | dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); | |
768 | out: | |
769 | spin_unlock(&as->client_lock); | |
770 | } | |
771 | ||
772 | static int smmu_iommu_domain_init(struct iommu_domain *domain) | |
773 | { | |
774 | int i; | |
775 | unsigned long flags; | |
776 | struct smmu_as *as; | |
777 | struct smmu_device *smmu = smmu_handle; | |
778 | ||
779 | /* Look for a free AS with lock held */ | |
780 | for (i = 0; i < smmu->num_as; i++) { | |
781 | struct smmu_as *tmp = &smmu->as[i]; | |
782 | ||
783 | spin_lock_irqsave(&tmp->lock, flags); | |
784 | if (!tmp->pdir_page) { | |
785 | as = tmp; | |
786 | goto found; | |
787 | } | |
788 | spin_unlock_irqrestore(&tmp->lock, flags); | |
789 | } | |
790 | dev_err(smmu->dev, "no free AS\n"); | |
791 | return -ENODEV; | |
792 | ||
793 | found: | |
794 | if (alloc_pdir(as) < 0) | |
795 | goto err_alloc_pdir; | |
796 | ||
797 | spin_lock(&smmu->lock); | |
798 | ||
799 | /* Update PDIR register */ | |
800 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
801 | smmu_write(smmu, | |
802 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); | |
803 | FLUSH_SMMU_REGS(smmu); | |
804 | ||
805 | spin_unlock(&smmu->lock); | |
806 | ||
807 | spin_unlock_irqrestore(&as->lock, flags); | |
808 | domain->priv = as; | |
809 | ||
810 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | |
811 | return 0; | |
812 | ||
813 | err_alloc_pdir: | |
814 | spin_unlock_irqrestore(&as->lock, flags); | |
815 | return -ENODEV; | |
816 | } | |
817 | ||
818 | static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | |
819 | { | |
820 | struct smmu_as *as = domain->priv; | |
821 | struct smmu_device *smmu = as->smmu; | |
822 | unsigned long flags; | |
823 | ||
824 | spin_lock_irqsave(&as->lock, flags); | |
825 | ||
826 | if (as->pdir_page) { | |
827 | spin_lock(&smmu->lock); | |
828 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
829 | smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA); | |
830 | FLUSH_SMMU_REGS(smmu); | |
831 | spin_unlock(&smmu->lock); | |
832 | ||
833 | free_pdir(as); | |
834 | } | |
835 | ||
836 | if (!list_empty(&as->client)) { | |
837 | struct smmu_client *c; | |
838 | ||
839 | list_for_each_entry(c, &as->client, list) | |
840 | smmu_iommu_detach_dev(domain, c->dev); | |
841 | } | |
842 | ||
843 | spin_unlock_irqrestore(&as->lock, flags); | |
844 | ||
845 | domain->priv = NULL; | |
846 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | |
847 | } | |
848 | ||
849 | static struct iommu_ops smmu_iommu_ops = { | |
850 | .domain_init = smmu_iommu_domain_init, | |
851 | .domain_destroy = smmu_iommu_domain_destroy, | |
852 | .attach_dev = smmu_iommu_attach_dev, | |
853 | .detach_dev = smmu_iommu_detach_dev, | |
854 | .map = smmu_iommu_map, | |
855 | .unmap = smmu_iommu_unmap, | |
856 | .iova_to_phys = smmu_iommu_iova_to_phys, | |
857 | .domain_has_cap = smmu_iommu_domain_has_cap, | |
858 | .pgsize_bitmap = SMMU_IOMMU_PGSIZES, | |
859 | }; | |
860 | ||
861 | static int tegra_smmu_suspend(struct device *dev) | |
862 | { | |
863 | struct smmu_device *smmu = dev_get_drvdata(dev); | |
864 | ||
865 | smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); | |
866 | smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); | |
867 | smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); | |
868 | smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); | |
869 | return 0; | |
870 | } | |
871 | ||
872 | static int tegra_smmu_resume(struct device *dev) | |
873 | { | |
874 | struct smmu_device *smmu = dev_get_drvdata(dev); | |
875 | unsigned long flags; | |
876 | ||
877 | spin_lock_irqsave(&smmu->lock, flags); | |
878 | smmu_setup_regs(smmu); | |
879 | spin_unlock_irqrestore(&smmu->lock, flags); | |
880 | return 0; | |
881 | } | |
882 | ||
883 | static int tegra_smmu_probe(struct platform_device *pdev) | |
884 | { | |
885 | struct smmu_device *smmu; | |
886 | struct resource *regs, *regs2, *window; | |
887 | struct device *dev = &pdev->dev; | |
888 | int i, err = 0; | |
889 | ||
890 | if (smmu_handle) | |
891 | return -EIO; | |
892 | ||
893 | BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); | |
894 | ||
895 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
896 | regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
897 | window = platform_get_resource(pdev, IORESOURCE_MEM, 2); | |
898 | if (!regs || !regs2 || !window) { | |
899 | dev_err(dev, "No SMMU resources\n"); | |
900 | return -ENODEV; | |
901 | } | |
902 | ||
903 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | |
904 | if (!smmu) { | |
905 | dev_err(dev, "failed to allocate smmu_device\n"); | |
906 | return -ENOMEM; | |
907 | } | |
908 | ||
909 | smmu->dev = dev; | |
910 | smmu->num_as = SMMU_NUM_ASIDS; | |
911 | smmu->iovmm_base = (unsigned long)window->start; | |
912 | smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT; | |
913 | smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs)); | |
914 | smmu->regs_ahbarb = devm_ioremap(dev, regs2->start, | |
915 | resource_size(regs2)); | |
916 | if (!smmu->regs || !smmu->regs_ahbarb) { | |
917 | dev_err(dev, "failed to remap SMMU registers\n"); | |
918 | err = -ENXIO; | |
919 | goto fail; | |
920 | } | |
921 | ||
922 | smmu->translation_enable_0 = ~0; | |
923 | smmu->translation_enable_1 = ~0; | |
924 | smmu->translation_enable_2 = ~0; | |
925 | smmu->asid_security = 0; | |
926 | ||
927 | smmu->as = devm_kzalloc(dev, | |
928 | sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL); | |
929 | if (!smmu->as) { | |
930 | dev_err(dev, "failed to allocate smmu_as\n"); | |
931 | err = -ENOMEM; | |
932 | goto fail; | |
933 | } | |
934 | ||
935 | for (i = 0; i < smmu->num_as; i++) { | |
936 | struct smmu_as *as = &smmu->as[i]; | |
937 | ||
938 | as->smmu = smmu; | |
939 | as->asid = i; | |
940 | as->pdir_attr = _PDIR_ATTR; | |
941 | as->pde_attr = _PDE_ATTR; | |
942 | as->pte_attr = _PTE_ATTR; | |
943 | ||
944 | spin_lock_init(&as->lock); | |
945 | INIT_LIST_HEAD(&as->client); | |
946 | } | |
947 | spin_lock_init(&smmu->lock); | |
948 | smmu_setup_regs(smmu); | |
949 | platform_set_drvdata(pdev, smmu); | |
950 | ||
951 | smmu->avp_vector_page = alloc_page(GFP_KERNEL); | |
952 | if (!smmu->avp_vector_page) | |
953 | goto fail; | |
954 | ||
955 | smmu_handle = smmu; | |
956 | return 0; | |
957 | ||
958 | fail: | |
959 | if (smmu->avp_vector_page) | |
960 | __free_page(smmu->avp_vector_page); | |
961 | if (smmu->regs) | |
962 | devm_iounmap(dev, smmu->regs); | |
963 | if (smmu->regs_ahbarb) | |
964 | devm_iounmap(dev, smmu->regs_ahbarb); | |
965 | if (smmu && smmu->as) { | |
966 | for (i = 0; i < smmu->num_as; i++) { | |
967 | if (smmu->as[i].pdir_page) { | |
968 | ClearPageReserved(smmu->as[i].pdir_page); | |
969 | __free_page(smmu->as[i].pdir_page); | |
970 | } | |
971 | } | |
972 | devm_kfree(dev, smmu->as); | |
973 | } | |
974 | devm_kfree(dev, smmu); | |
975 | return err; | |
976 | } | |
977 | ||
978 | static int tegra_smmu_remove(struct platform_device *pdev) | |
979 | { | |
980 | struct smmu_device *smmu = platform_get_drvdata(pdev); | |
981 | struct device *dev = smmu->dev; | |
982 | ||
983 | smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); | |
984 | platform_set_drvdata(pdev, NULL); | |
985 | if (smmu->as) { | |
986 | int i; | |
987 | ||
988 | for (i = 0; i < smmu->num_as; i++) | |
989 | free_pdir(&smmu->as[i]); | |
990 | devm_kfree(dev, smmu->as); | |
991 | } | |
992 | if (smmu->avp_vector_page) | |
993 | __free_page(smmu->avp_vector_page); | |
994 | if (smmu->regs) | |
995 | devm_iounmap(dev, smmu->regs); | |
996 | if (smmu->regs_ahbarb) | |
997 | devm_iounmap(dev, smmu->regs_ahbarb); | |
998 | devm_kfree(dev, smmu); | |
999 | smmu_handle = NULL; | |
1000 | return 0; | |
1001 | } | |
1002 | ||
1003 | const struct dev_pm_ops tegra_smmu_pm_ops = { | |
1004 | .suspend = tegra_smmu_suspend, | |
1005 | .resume = tegra_smmu_resume, | |
1006 | }; | |
1007 | ||
1008 | static struct platform_driver tegra_smmu_driver = { | |
1009 | .probe = tegra_smmu_probe, | |
1010 | .remove = tegra_smmu_remove, | |
1011 | .driver = { | |
1012 | .owner = THIS_MODULE, | |
1013 | .name = "tegra-smmu", | |
1014 | .pm = &tegra_smmu_pm_ops, | |
1015 | }, | |
1016 | }; | |
1017 | ||
1018 | static int __devinit tegra_smmu_init(void) | |
1019 | { | |
1020 | bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); | |
1021 | return platform_driver_register(&tegra_smmu_driver); | |
1022 | } | |
1023 | ||
1024 | static void __exit tegra_smmu_exit(void) | |
1025 | { | |
1026 | platform_driver_unregister(&tegra_smmu_driver); | |
1027 | } | |
1028 | ||
1029 | subsys_initcall(tegra_smmu_init); | |
1030 | module_exit(tegra_smmu_exit); | |
1031 | ||
1032 | MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); | |
1033 | MODULE_AUTHOR("Hiroshi DOYU <[email protected]>"); | |
1034 | MODULE_LICENSE("GPL v2"); |