]>
Commit | Line | Data |
---|---|---|
7a31f6f4 HD |
1 | /* |
2 | * IOMMU API for SMMU in Tegra30 | |
3 | * | |
4 | * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | */ | |
19 | ||
20 | #define pr_fmt(fmt) "%s(): " fmt, __func__ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/pagemap.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/iommu.h> | |
32 | #include <linux/io.h> | |
0760e8fa HD |
33 | #include <linux/of.h> |
34 | #include <linux/of_iommu.h> | |
7a31f6f4 HD |
35 | |
36 | #include <asm/page.h> | |
37 | #include <asm/cacheflush.h> | |
38 | ||
39 | #include <mach/iomap.h> | |
40 | #include <mach/smmu.h> | |
0760e8fa | 41 | #include <mach/tegra-ahb.h> |
7a31f6f4 HD |
42 | |
43 | /* bitmap of the page sizes currently supported */ | |
44 | #define SMMU_IOMMU_PGSIZES (SZ_4K) | |
45 | ||
46 | #define SMMU_CONFIG 0x10 | |
47 | #define SMMU_CONFIG_DISABLE 0 | |
48 | #define SMMU_CONFIG_ENABLE 1 | |
49 | ||
50 | #define SMMU_TLB_CONFIG 0x14 | |
51 | #define SMMU_TLB_CONFIG_STATS__MASK (1 << 31) | |
52 | #define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31) | |
53 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) | |
54 | #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 | |
55 | #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 | |
56 | ||
57 | #define SMMU_PTC_CONFIG 0x18 | |
58 | #define SMMU_PTC_CONFIG_STATS__MASK (1 << 31) | |
59 | #define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31) | |
60 | #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) | |
61 | #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f | |
62 | #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f | |
63 | ||
64 | #define SMMU_PTB_ASID 0x1c | |
65 | #define SMMU_PTB_ASID_CURRENT_SHIFT 0 | |
66 | ||
67 | #define SMMU_PTB_DATA 0x20 | |
68 | #define SMMU_PTB_DATA_RESET_VAL 0 | |
69 | #define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29 | |
70 | #define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30 | |
71 | #define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31 | |
72 | ||
73 | #define SMMU_TLB_FLUSH 0x30 | |
74 | #define SMMU_TLB_FLUSH_VA_MATCH_ALL 0 | |
75 | #define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2 | |
76 | #define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3 | |
77 | #define SMMU_TLB_FLUSH_ASID_SHIFT 29 | |
78 | #define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0 | |
79 | #define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1 | |
80 | #define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31 | |
81 | ||
82 | #define SMMU_PTC_FLUSH 0x34 | |
83 | #define SMMU_PTC_FLUSH_TYPE_ALL 0 | |
84 | #define SMMU_PTC_FLUSH_TYPE_ADR 1 | |
85 | #define SMMU_PTC_FLUSH_ADR_SHIFT 4 | |
86 | ||
87 | #define SMMU_ASID_SECURITY 0x38 | |
88 | ||
89 | #define SMMU_STATS_TLB_HIT_COUNT 0x1f0 | |
90 | #define SMMU_STATS_TLB_MISS_COUNT 0x1f4 | |
91 | #define SMMU_STATS_PTC_HIT_COUNT 0x1f8 | |
92 | #define SMMU_STATS_PTC_MISS_COUNT 0x1fc | |
93 | ||
94 | #define SMMU_TRANSLATION_ENABLE_0 0x228 | |
95 | #define SMMU_TRANSLATION_ENABLE_1 0x22c | |
96 | #define SMMU_TRANSLATION_ENABLE_2 0x230 | |
97 | ||
98 | #define SMMU_AFI_ASID 0x238 /* PCIE */ | |
99 | #define SMMU_AVPC_ASID 0x23c /* AVP */ | |
100 | #define SMMU_DC_ASID 0x240 /* Display controller */ | |
101 | #define SMMU_DCB_ASID 0x244 /* Display controller B */ | |
102 | #define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */ | |
103 | #define SMMU_G2_ASID 0x24c /* 2D engine */ | |
104 | #define SMMU_HC_ASID 0x250 /* Host1x */ | |
105 | #define SMMU_HDA_ASID 0x254 /* High-def audio */ | |
106 | #define SMMU_ISP_ASID 0x258 /* Image signal processor */ | |
107 | #define SMMU_MPE_ASID 0x264 /* MPEG encoder */ | |
108 | #define SMMU_NV_ASID 0x268 /* (3D) */ | |
109 | #define SMMU_NV2_ASID 0x26c /* (3D) */ | |
110 | #define SMMU_PPCS_ASID 0x270 /* AHB */ | |
111 | #define SMMU_SATA_ASID 0x278 /* SATA */ | |
112 | #define SMMU_VDE_ASID 0x27c /* Video decoder */ | |
113 | #define SMMU_VI_ASID 0x280 /* Video input */ | |
114 | ||
115 | #define SMMU_PDE_NEXT_SHIFT 28 | |
116 | ||
7a31f6f4 HD |
117 | #define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 |
118 | #define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ | |
119 | #define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 | |
120 | #define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */ | |
121 | #define SMMU_TLB_FLUSH_VA(iova, which) \ | |
122 | ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \ | |
123 | SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \ | |
124 | SMMU_TLB_FLUSH_VA_MATCH_##which) | |
125 | #define SMMU_PTB_ASID_CUR(n) \ | |
126 | ((n) << SMMU_PTB_ASID_CURRENT_SHIFT) | |
127 | #define SMMU_TLB_FLUSH_ASID_MATCH_disable \ | |
128 | (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \ | |
129 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | |
130 | #define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \ | |
131 | (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \ | |
132 | SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) | |
133 | ||
134 | #define SMMU_PAGE_SHIFT 12 | |
135 | #define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) | |
0760e8fa | 136 | #define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1) |
7a31f6f4 HD |
137 | |
138 | #define SMMU_PDIR_COUNT 1024 | |
139 | #define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) | |
140 | #define SMMU_PTBL_COUNT 1024 | |
141 | #define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT) | |
142 | #define SMMU_PDIR_SHIFT 12 | |
143 | #define SMMU_PDE_SHIFT 12 | |
144 | #define SMMU_PTE_SHIFT 12 | |
145 | #define SMMU_PFN_MASK 0x000fffff | |
146 | ||
147 | #define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) | |
148 | #define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) | |
149 | #define SMMU_PDN_TO_ADDR(addr) ((pdn) << 22) | |
150 | ||
151 | #define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) | |
152 | #define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) | |
153 | #define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT) | |
154 | #define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT) | |
155 | #define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
156 | ||
157 | #define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
158 | ||
159 | #define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
160 | #define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT) | |
161 | #define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR) | |
162 | ||
163 | #define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE) | |
164 | #define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR) | |
165 | ||
166 | #define SMMU_MK_PDIR(page, attr) \ | |
167 | ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr)) | |
168 | #define SMMU_MK_PDE(page, attr) \ | |
169 | (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr)) | |
170 | #define SMMU_EX_PTBL_PAGE(pde) \ | |
171 | pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK) | |
172 | #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr)) | |
173 | ||
174 | #define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31)) | |
175 | #define SMMU_ASID_DISABLE 0 | |
176 | #define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) | |
177 | ||
0760e8fa HD |
178 | #define NUM_SMMU_REG_BANKS 3 |
179 | ||
7a31f6f4 HD |
180 | #define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) |
181 | #define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) | |
182 | #define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) | |
183 | #define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0) | |
184 | ||
185 | #define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID | |
186 | ||
187 | static const u32 smmu_hwgrp_asid_reg[] = { | |
188 | HWGRP_INIT(AFI), | |
189 | HWGRP_INIT(AVPC), | |
190 | HWGRP_INIT(DC), | |
191 | HWGRP_INIT(DCB), | |
192 | HWGRP_INIT(EPP), | |
193 | HWGRP_INIT(G2), | |
194 | HWGRP_INIT(HC), | |
195 | HWGRP_INIT(HDA), | |
196 | HWGRP_INIT(ISP), | |
197 | HWGRP_INIT(MPE), | |
198 | HWGRP_INIT(NV), | |
199 | HWGRP_INIT(NV2), | |
200 | HWGRP_INIT(PPCS), | |
201 | HWGRP_INIT(SATA), | |
202 | HWGRP_INIT(VDE), | |
203 | HWGRP_INIT(VI), | |
204 | }; | |
205 | #define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x]) | |
206 | ||
207 | /* | |
208 | * Per client for address space | |
209 | */ | |
210 | struct smmu_client { | |
211 | struct device *dev; | |
212 | struct list_head list; | |
213 | struct smmu_as *as; | |
214 | u32 hwgrp; | |
215 | }; | |
216 | ||
217 | /* | |
218 | * Per address space | |
219 | */ | |
220 | struct smmu_as { | |
221 | struct smmu_device *smmu; /* back pointer to container */ | |
222 | unsigned int asid; | |
223 | spinlock_t lock; /* for pagetable */ | |
224 | struct page *pdir_page; | |
225 | unsigned long pdir_attr; | |
226 | unsigned long pde_attr; | |
227 | unsigned long pte_attr; | |
228 | unsigned int *pte_count; | |
229 | ||
230 | struct list_head client; | |
231 | spinlock_t client_lock; /* for client list */ | |
232 | }; | |
233 | ||
234 | /* | |
235 | * Per SMMU device - IOMMU device | |
236 | */ | |
237 | struct smmu_device { | |
0760e8fa | 238 | void __iomem *regs[NUM_SMMU_REG_BANKS]; |
7a31f6f4 HD |
239 | unsigned long iovmm_base; /* remappable base address */ |
240 | unsigned long page_count; /* total remappable size */ | |
241 | spinlock_t lock; | |
242 | char *name; | |
243 | struct device *dev; | |
7a31f6f4 HD |
244 | struct page *avp_vector_page; /* dummy page shared by all AS's */ |
245 | ||
246 | /* | |
247 | * Register image savers for suspend/resume | |
248 | */ | |
249 | unsigned long translation_enable_0; | |
250 | unsigned long translation_enable_1; | |
251 | unsigned long translation_enable_2; | |
252 | unsigned long asid_security; | |
0760e8fa HD |
253 | |
254 | struct device_node *ahb; | |
a3b24915 HD |
255 | |
256 | int num_as; | |
257 | struct smmu_as as[0]; /* Run-time allocated array */ | |
7a31f6f4 HD |
258 | }; |
259 | ||
260 | static struct smmu_device *smmu_handle; /* unique for a system */ | |
261 | ||
262 | /* | |
0760e8fa | 263 | * SMMU register accessors |
7a31f6f4 HD |
264 | */ |
265 | static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) | |
266 | { | |
0760e8fa HD |
267 | BUG_ON(offs < 0x10); |
268 | if (offs < 0x3c) | |
269 | return readl(smmu->regs[0] + offs - 0x10); | |
270 | BUG_ON(offs < 0x1f0); | |
271 | if (offs < 0x200) | |
272 | return readl(smmu->regs[1] + offs - 0x1f0); | |
273 | BUG_ON(offs < 0x228); | |
274 | if (offs < 0x284) | |
275 | return readl(smmu->regs[2] + offs - 0x228); | |
276 | BUG(); | |
7a31f6f4 HD |
277 | } |
278 | ||
0760e8fa | 279 | static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) |
7a31f6f4 | 280 | { |
0760e8fa HD |
281 | BUG_ON(offs < 0x10); |
282 | if (offs < 0x3c) { | |
283 | writel(val, smmu->regs[0] + offs - 0x10); | |
284 | return; | |
285 | } | |
286 | BUG_ON(offs < 0x1f0); | |
287 | if (offs < 0x200) { | |
288 | writel(val, smmu->regs[1] + offs - 0x1f0); | |
289 | return; | |
290 | } | |
291 | BUG_ON(offs < 0x228); | |
292 | if (offs < 0x284) { | |
293 | writel(val, smmu->regs[2] + offs - 0x228); | |
294 | return; | |
295 | } | |
296 | BUG(); | |
7a31f6f4 HD |
297 | } |
298 | ||
299 | #define VA_PAGE_TO_PA(va, page) \ | |
300 | (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) | |
301 | ||
302 | #define FLUSH_CPU_DCACHE(va, page, size) \ | |
303 | do { \ | |
304 | unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ | |
305 | __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \ | |
306 | outer_flush_range(_pa_, _pa_+(size_t)(size)); \ | |
307 | } while (0) | |
308 | ||
309 | /* | |
310 | * Any interaction between any block on PPSB and a block on APB or AHB | |
311 | * must have these read-back barriers to ensure the APB/AHB bus | |
312 | * transaction is complete before initiating activity on the PPSB | |
313 | * block. | |
314 | */ | |
315 | #define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG) | |
316 | ||
317 | #define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) | |
318 | ||
319 | static int __smmu_client_set_hwgrp(struct smmu_client *c, | |
320 | unsigned long map, int on) | |
321 | { | |
322 | int i; | |
323 | struct smmu_as *as = c->as; | |
324 | u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); | |
325 | struct smmu_device *smmu = as->smmu; | |
326 | ||
327 | WARN_ON(!on && map); | |
328 | if (on && !map) | |
329 | return -EINVAL; | |
330 | if (!on) | |
331 | map = smmu_client_hwgrp(c); | |
332 | ||
333 | for_each_set_bit(i, &map, HWGRP_COUNT) { | |
334 | offs = HWGRP_ASID_REG(i); | |
335 | val = smmu_read(smmu, offs); | |
336 | if (on) { | |
337 | if (WARN_ON(val & mask)) | |
338 | goto err_hw_busy; | |
339 | val |= mask; | |
340 | } else { | |
341 | WARN_ON((val & mask) == mask); | |
342 | val &= ~mask; | |
343 | } | |
344 | smmu_write(smmu, val, offs); | |
345 | } | |
346 | FLUSH_SMMU_REGS(smmu); | |
347 | c->hwgrp = map; | |
348 | return 0; | |
349 | ||
350 | err_hw_busy: | |
351 | for_each_set_bit(i, &map, HWGRP_COUNT) { | |
352 | offs = HWGRP_ASID_REG(i); | |
353 | val = smmu_read(smmu, offs); | |
354 | val &= ~mask; | |
355 | smmu_write(smmu, val, offs); | |
356 | } | |
357 | return -EBUSY; | |
358 | } | |
359 | ||
360 | static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) | |
361 | { | |
362 | u32 val; | |
363 | unsigned long flags; | |
364 | struct smmu_as *as = c->as; | |
365 | struct smmu_device *smmu = as->smmu; | |
366 | ||
367 | spin_lock_irqsave(&smmu->lock, flags); | |
368 | val = __smmu_client_set_hwgrp(c, map, on); | |
369 | spin_unlock_irqrestore(&smmu->lock, flags); | |
370 | return val; | |
371 | } | |
372 | ||
373 | /* | |
374 | * Flush all TLB entries and all PTC entries | |
375 | * Caller must lock smmu | |
376 | */ | |
377 | static void smmu_flush_regs(struct smmu_device *smmu, int enable) | |
378 | { | |
379 | u32 val; | |
380 | ||
381 | smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); | |
382 | FLUSH_SMMU_REGS(smmu); | |
383 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | |
384 | SMMU_TLB_FLUSH_ASID_MATCH_disable; | |
385 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
386 | ||
387 | if (enable) | |
388 | smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); | |
389 | FLUSH_SMMU_REGS(smmu); | |
390 | } | |
391 | ||
0760e8fa | 392 | static int smmu_setup_regs(struct smmu_device *smmu) |
7a31f6f4 HD |
393 | { |
394 | int i; | |
395 | u32 val; | |
396 | ||
397 | for (i = 0; i < smmu->num_as; i++) { | |
398 | struct smmu_as *as = &smmu->as[i]; | |
399 | struct smmu_client *c; | |
400 | ||
401 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
402 | val = as->pdir_page ? | |
403 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) : | |
404 | SMMU_PTB_DATA_RESET_VAL; | |
405 | smmu_write(smmu, val, SMMU_PTB_DATA); | |
406 | ||
407 | list_for_each_entry(c, &as->client, list) | |
408 | __smmu_client_set_hwgrp(c, c->hwgrp, 1); | |
409 | } | |
410 | ||
411 | smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0); | |
412 | smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); | |
413 | smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); | |
414 | smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); | |
415 | smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG); | |
416 | smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG); | |
417 | ||
418 | smmu_flush_regs(smmu, 1); | |
419 | ||
0760e8fa | 420 | return tegra_ahb_enable_smmu(smmu->ahb); |
7a31f6f4 HD |
421 | } |
422 | ||
423 | static void flush_ptc_and_tlb(struct smmu_device *smmu, | |
424 | struct smmu_as *as, dma_addr_t iova, | |
425 | unsigned long *pte, struct page *page, int is_pde) | |
426 | { | |
427 | u32 val; | |
428 | unsigned long tlb_flush_va = is_pde | |
429 | ? SMMU_TLB_FLUSH_VA(iova, SECTION) | |
430 | : SMMU_TLB_FLUSH_VA(iova, GROUP); | |
431 | ||
432 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page); | |
433 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | |
434 | FLUSH_SMMU_REGS(smmu); | |
435 | val = tlb_flush_va | | |
436 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | |
437 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | |
438 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
439 | FLUSH_SMMU_REGS(smmu); | |
440 | } | |
441 | ||
442 | static void free_ptbl(struct smmu_as *as, dma_addr_t iova) | |
443 | { | |
444 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | |
445 | unsigned long *pdir = (unsigned long *)page_address(as->pdir_page); | |
446 | ||
447 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | |
448 | dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn); | |
449 | ||
450 | ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn])); | |
451 | __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn])); | |
452 | pdir[pdn] = _PDE_VACANT(pdn); | |
453 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | |
454 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | |
455 | as->pdir_page, 1); | |
456 | } | |
457 | } | |
458 | ||
459 | static void free_pdir(struct smmu_as *as) | |
460 | { | |
461 | unsigned addr; | |
462 | int count; | |
463 | struct device *dev = as->smmu->dev; | |
464 | ||
465 | if (!as->pdir_page) | |
466 | return; | |
467 | ||
468 | addr = as->smmu->iovmm_base; | |
469 | count = as->smmu->page_count; | |
470 | while (count-- > 0) { | |
471 | free_ptbl(as, addr); | |
472 | addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT; | |
473 | } | |
474 | ClearPageReserved(as->pdir_page); | |
475 | __free_page(as->pdir_page); | |
476 | as->pdir_page = NULL; | |
477 | devm_kfree(dev, as->pte_count); | |
478 | as->pte_count = NULL; | |
479 | } | |
480 | ||
481 | /* | |
482 | * Maps PTBL for given iova and returns the PTE address | |
483 | * Caller must unmap the mapped PTBL returned in *ptbl_page_p | |
484 | */ | |
485 | static unsigned long *locate_pte(struct smmu_as *as, | |
486 | dma_addr_t iova, bool allocate, | |
487 | struct page **ptbl_page_p, | |
488 | unsigned int **count) | |
489 | { | |
490 | unsigned long ptn = SMMU_ADDR_TO_PFN(iova); | |
491 | unsigned long pdn = SMMU_ADDR_TO_PDN(iova); | |
492 | unsigned long *pdir = page_address(as->pdir_page); | |
493 | unsigned long *ptbl; | |
494 | ||
495 | if (pdir[pdn] != _PDE_VACANT(pdn)) { | |
496 | /* Mapped entry table already exists */ | |
497 | *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]); | |
498 | ptbl = page_address(*ptbl_page_p); | |
499 | } else if (!allocate) { | |
500 | return NULL; | |
501 | } else { | |
502 | int pn; | |
503 | unsigned long addr = SMMU_PDN_TO_ADDR(pdn); | |
504 | ||
505 | /* Vacant - allocate a new page table */ | |
506 | dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn); | |
507 | ||
508 | *ptbl_page_p = alloc_page(GFP_ATOMIC); | |
509 | if (!*ptbl_page_p) { | |
510 | dev_err(as->smmu->dev, | |
511 | "failed to allocate smmu_device page table\n"); | |
512 | return NULL; | |
513 | } | |
514 | SetPageReserved(*ptbl_page_p); | |
515 | ptbl = (unsigned long *)page_address(*ptbl_page_p); | |
516 | for (pn = 0; pn < SMMU_PTBL_COUNT; | |
517 | pn++, addr += SMMU_PAGE_SIZE) { | |
518 | ptbl[pn] = _PTE_VACANT(addr); | |
519 | } | |
520 | FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE); | |
521 | pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, | |
522 | as->pde_attr | _PDE_NEXT); | |
523 | FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); | |
524 | flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], | |
525 | as->pdir_page, 1); | |
526 | } | |
527 | *count = &as->pte_count[pdn]; | |
528 | ||
529 | return &ptbl[ptn % SMMU_PTBL_COUNT]; | |
530 | } | |
531 | ||
532 | #ifdef CONFIG_SMMU_SIG_DEBUG | |
533 | static void put_signature(struct smmu_as *as, | |
534 | dma_addr_t iova, unsigned long pfn) | |
535 | { | |
536 | struct page *page; | |
537 | unsigned long *vaddr; | |
538 | ||
539 | page = pfn_to_page(pfn); | |
540 | vaddr = page_address(page); | |
541 | if (!vaddr) | |
542 | return; | |
543 | ||
544 | vaddr[0] = iova; | |
545 | vaddr[1] = pfn << PAGE_SHIFT; | |
546 | FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2); | |
547 | } | |
548 | #else | |
549 | static inline void put_signature(struct smmu_as *as, | |
550 | unsigned long addr, unsigned long pfn) | |
551 | { | |
552 | } | |
553 | #endif | |
554 | ||
555 | /* | |
f9a4f063 | 556 | * Caller must not hold as->lock |
7a31f6f4 HD |
557 | */ |
558 | static int alloc_pdir(struct smmu_as *as) | |
559 | { | |
f9a4f063 | 560 | unsigned long *pdir, flags; |
9e971a03 | 561 | int pdn, err = 0; |
7a31f6f4 HD |
562 | u32 val; |
563 | struct smmu_device *smmu = as->smmu; | |
9e971a03 HD |
564 | struct page *page; |
565 | unsigned int *cnt; | |
7a31f6f4 | 566 | |
9e971a03 | 567 | /* |
f9a4f063 | 568 | * do the allocation, then grab as->lock |
9e971a03 | 569 | */ |
9e971a03 | 570 | cnt = devm_kzalloc(smmu->dev, |
f9a4f063 JR |
571 | sizeof(cnt[0]) * SMMU_PDIR_COUNT, |
572 | GFP_KERNEL); | |
9e971a03 | 573 | page = alloc_page(GFP_KERNEL | __GFP_DMA); |
7a31f6f4 | 574 | |
f9a4f063 | 575 | spin_lock_irqsave(&as->lock, flags); |
7a31f6f4 | 576 | |
9e971a03 HD |
577 | if (as->pdir_page) { |
578 | /* We raced, free the redundant */ | |
579 | err = -EAGAIN; | |
580 | goto err_out; | |
7a31f6f4 | 581 | } |
9e971a03 HD |
582 | |
583 | if (!page || !cnt) { | |
584 | dev_err(smmu->dev, "failed to allocate at %s\n", __func__); | |
585 | err = -ENOMEM; | |
586 | goto err_out; | |
7a31f6f4 | 587 | } |
9e971a03 HD |
588 | |
589 | as->pdir_page = page; | |
590 | as->pte_count = cnt; | |
591 | ||
7a31f6f4 HD |
592 | SetPageReserved(as->pdir_page); |
593 | pdir = page_address(as->pdir_page); | |
594 | ||
595 | for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) | |
596 | pdir[pdn] = _PDE_VACANT(pdn); | |
597 | FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); | |
598 | val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page); | |
599 | smmu_write(smmu, val, SMMU_PTC_FLUSH); | |
600 | FLUSH_SMMU_REGS(as->smmu); | |
601 | val = SMMU_TLB_FLUSH_VA_MATCH_ALL | | |
602 | SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | | |
603 | (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); | |
604 | smmu_write(smmu, val, SMMU_TLB_FLUSH); | |
605 | FLUSH_SMMU_REGS(as->smmu); | |
606 | ||
f9a4f063 JR |
607 | spin_unlock_irqrestore(&as->lock, flags); |
608 | ||
7a31f6f4 | 609 | return 0; |
9e971a03 HD |
610 | |
611 | err_out: | |
f9a4f063 JR |
612 | spin_unlock_irqrestore(&as->lock, flags); |
613 | ||
9e971a03 HD |
614 | devm_kfree(smmu->dev, cnt); |
615 | if (page) | |
616 | __free_page(page); | |
617 | return err; | |
7a31f6f4 HD |
618 | } |
619 | ||
620 | static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) | |
621 | { | |
622 | unsigned long *pte; | |
623 | struct page *page; | |
624 | unsigned int *count; | |
625 | ||
626 | pte = locate_pte(as, iova, false, &page, &count); | |
627 | if (WARN_ON(!pte)) | |
628 | return; | |
629 | ||
630 | if (WARN_ON(*pte == _PTE_VACANT(iova))) | |
631 | return; | |
632 | ||
633 | *pte = _PTE_VACANT(iova); | |
634 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | |
635 | flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); | |
636 | if (!--(*count)) { | |
637 | free_ptbl(as, iova); | |
638 | smmu_flush_regs(as->smmu, 0); | |
639 | } | |
640 | } | |
641 | ||
642 | static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, | |
643 | unsigned long pfn) | |
644 | { | |
645 | struct smmu_device *smmu = as->smmu; | |
646 | unsigned long *pte; | |
647 | unsigned int *count; | |
648 | struct page *page; | |
649 | ||
650 | pte = locate_pte(as, iova, true, &page, &count); | |
651 | if (WARN_ON(!pte)) | |
652 | return; | |
653 | ||
654 | if (*pte == _PTE_VACANT(iova)) | |
655 | (*count)++; | |
656 | *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); | |
657 | if (unlikely((*pte == _PTE_VACANT(iova)))) | |
658 | (*count)--; | |
659 | FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); | |
660 | flush_ptc_and_tlb(smmu, as, iova, pte, page, 0); | |
661 | put_signature(as, iova, pfn); | |
662 | } | |
663 | ||
664 | static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
665 | phys_addr_t pa, size_t bytes, int prot) | |
666 | { | |
667 | struct smmu_as *as = domain->priv; | |
668 | unsigned long pfn = __phys_to_pfn(pa); | |
669 | unsigned long flags; | |
670 | ||
671 | dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa); | |
672 | ||
673 | if (!pfn_valid(pfn)) | |
674 | return -ENOMEM; | |
675 | ||
676 | spin_lock_irqsave(&as->lock, flags); | |
677 | __smmu_iommu_map_pfn(as, iova, pfn); | |
678 | spin_unlock_irqrestore(&as->lock, flags); | |
679 | return 0; | |
680 | } | |
681 | ||
682 | static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova, | |
683 | size_t bytes) | |
684 | { | |
685 | struct smmu_as *as = domain->priv; | |
686 | unsigned long flags; | |
687 | ||
688 | dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); | |
689 | ||
690 | spin_lock_irqsave(&as->lock, flags); | |
691 | __smmu_iommu_unmap(as, iova); | |
692 | spin_unlock_irqrestore(&as->lock, flags); | |
693 | return SMMU_PAGE_SIZE; | |
694 | } | |
695 | ||
696 | static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, | |
697 | unsigned long iova) | |
698 | { | |
699 | struct smmu_as *as = domain->priv; | |
700 | unsigned long *pte; | |
701 | unsigned int *count; | |
702 | struct page *page; | |
703 | unsigned long pfn; | |
704 | unsigned long flags; | |
705 | ||
706 | spin_lock_irqsave(&as->lock, flags); | |
707 | ||
708 | pte = locate_pte(as, iova, true, &page, &count); | |
709 | pfn = *pte & SMMU_PFN_MASK; | |
710 | WARN_ON(!pfn_valid(pfn)); | |
711 | dev_dbg(as->smmu->dev, | |
712 | "iova:%08lx pfn:%08lx asid:%d\n", iova, pfn, as->asid); | |
713 | ||
714 | spin_unlock_irqrestore(&as->lock, flags); | |
715 | return PFN_PHYS(pfn); | |
716 | } | |
717 | ||
718 | static int smmu_iommu_domain_has_cap(struct iommu_domain *domain, | |
719 | unsigned long cap) | |
720 | { | |
721 | return 0; | |
722 | } | |
723 | ||
724 | static int smmu_iommu_attach_dev(struct iommu_domain *domain, | |
725 | struct device *dev) | |
726 | { | |
727 | struct smmu_as *as = domain->priv; | |
728 | struct smmu_device *smmu = as->smmu; | |
729 | struct smmu_client *client, *c; | |
730 | u32 map; | |
731 | int err; | |
732 | ||
733 | client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); | |
734 | if (!client) | |
735 | return -ENOMEM; | |
736 | client->dev = dev; | |
737 | client->as = as; | |
738 | map = (unsigned long)dev->platform_data; | |
739 | if (!map) | |
740 | return -EINVAL; | |
741 | ||
742 | err = smmu_client_enable_hwgrp(client, map); | |
743 | if (err) | |
744 | goto err_hwgrp; | |
745 | ||
746 | spin_lock(&as->client_lock); | |
747 | list_for_each_entry(c, &as->client, list) { | |
748 | if (c->dev == dev) { | |
749 | dev_err(smmu->dev, | |
750 | "%s is already attached\n", dev_name(c->dev)); | |
751 | err = -EINVAL; | |
752 | goto err_client; | |
753 | } | |
754 | } | |
755 | list_add(&client->list, &as->client); | |
756 | spin_unlock(&as->client_lock); | |
757 | ||
758 | /* | |
759 | * Reserve "page zero" for AVP vectors using a common dummy | |
760 | * page. | |
761 | */ | |
762 | if (map & HWG_AVPC) { | |
763 | struct page *page; | |
764 | ||
765 | page = as->smmu->avp_vector_page; | |
766 | __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); | |
767 | ||
768 | pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); | |
769 | } | |
770 | ||
90730917 | 771 | dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev)); |
7a31f6f4 HD |
772 | return 0; |
773 | ||
774 | err_client: | |
775 | smmu_client_disable_hwgrp(client); | |
776 | spin_unlock(&as->client_lock); | |
777 | err_hwgrp: | |
778 | devm_kfree(smmu->dev, client); | |
779 | return err; | |
780 | } | |
781 | ||
782 | static void smmu_iommu_detach_dev(struct iommu_domain *domain, | |
783 | struct device *dev) | |
784 | { | |
785 | struct smmu_as *as = domain->priv; | |
786 | struct smmu_device *smmu = as->smmu; | |
787 | struct smmu_client *c; | |
788 | ||
789 | spin_lock(&as->client_lock); | |
790 | ||
791 | list_for_each_entry(c, &as->client, list) { | |
792 | if (c->dev == dev) { | |
793 | smmu_client_disable_hwgrp(c); | |
794 | list_del(&c->list); | |
795 | devm_kfree(smmu->dev, c); | |
796 | c->as = NULL; | |
797 | dev_dbg(smmu->dev, | |
798 | "%s is detached\n", dev_name(c->dev)); | |
799 | goto out; | |
800 | } | |
801 | } | |
9579a974 | 802 | dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); |
7a31f6f4 HD |
803 | out: |
804 | spin_unlock(&as->client_lock); | |
805 | } | |
806 | ||
807 | static int smmu_iommu_domain_init(struct iommu_domain *domain) | |
808 | { | |
d1d076f1 | 809 | int i, err = -EAGAIN; |
7a31f6f4 HD |
810 | unsigned long flags; |
811 | struct smmu_as *as; | |
812 | struct smmu_device *smmu = smmu_handle; | |
813 | ||
814 | /* Look for a free AS with lock held */ | |
815 | for (i = 0; i < smmu->num_as; i++) { | |
9e971a03 | 816 | as = &smmu->as[i]; |
d2453b2c HD |
817 | |
818 | if (as->pdir_page) | |
819 | continue; | |
820 | ||
821 | err = alloc_pdir(as); | |
822 | if (!err) | |
823 | goto found; | |
824 | ||
9e971a03 HD |
825 | if (err != -EAGAIN) |
826 | break; | |
7a31f6f4 | 827 | } |
9e971a03 HD |
828 | if (i == smmu->num_as) |
829 | dev_err(smmu->dev, "no free AS\n"); | |
830 | return err; | |
7a31f6f4 HD |
831 | |
832 | found: | |
f9a4f063 | 833 | spin_lock_irqsave(&smmu->lock, flags); |
7a31f6f4 HD |
834 | |
835 | /* Update PDIR register */ | |
836 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
837 | smmu_write(smmu, | |
838 | SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); | |
839 | FLUSH_SMMU_REGS(smmu); | |
840 | ||
f9a4f063 | 841 | spin_unlock_irqrestore(&smmu->lock, flags); |
7a31f6f4 | 842 | |
7a31f6f4 HD |
843 | domain->priv = as; |
844 | ||
23349902 HD |
845 | domain->geometry.aperture_start = smmu->iovmm_base; |
846 | domain->geometry.aperture_end = smmu->iovmm_base + | |
847 | smmu->page_count * SMMU_PAGE_SIZE - 1; | |
848 | domain->geometry.force_aperture = true; | |
849 | ||
7a31f6f4 | 850 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); |
7a31f6f4 | 851 | |
7a31f6f4 | 852 | return 0; |
7a31f6f4 HD |
853 | } |
854 | ||
855 | static void smmu_iommu_domain_destroy(struct iommu_domain *domain) | |
856 | { | |
857 | struct smmu_as *as = domain->priv; | |
858 | struct smmu_device *smmu = as->smmu; | |
859 | unsigned long flags; | |
860 | ||
861 | spin_lock_irqsave(&as->lock, flags); | |
862 | ||
863 | if (as->pdir_page) { | |
864 | spin_lock(&smmu->lock); | |
865 | smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); | |
866 | smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA); | |
867 | FLUSH_SMMU_REGS(smmu); | |
868 | spin_unlock(&smmu->lock); | |
869 | ||
870 | free_pdir(as); | |
871 | } | |
872 | ||
873 | if (!list_empty(&as->client)) { | |
874 | struct smmu_client *c; | |
875 | ||
876 | list_for_each_entry(c, &as->client, list) | |
877 | smmu_iommu_detach_dev(domain, c->dev); | |
878 | } | |
879 | ||
880 | spin_unlock_irqrestore(&as->lock, flags); | |
881 | ||
882 | domain->priv = NULL; | |
883 | dev_dbg(smmu->dev, "smmu_as@%p\n", as); | |
884 | } | |
885 | ||
886 | static struct iommu_ops smmu_iommu_ops = { | |
887 | .domain_init = smmu_iommu_domain_init, | |
888 | .domain_destroy = smmu_iommu_domain_destroy, | |
889 | .attach_dev = smmu_iommu_attach_dev, | |
890 | .detach_dev = smmu_iommu_detach_dev, | |
891 | .map = smmu_iommu_map, | |
892 | .unmap = smmu_iommu_unmap, | |
893 | .iova_to_phys = smmu_iommu_iova_to_phys, | |
894 | .domain_has_cap = smmu_iommu_domain_has_cap, | |
895 | .pgsize_bitmap = SMMU_IOMMU_PGSIZES, | |
896 | }; | |
897 | ||
898 | static int tegra_smmu_suspend(struct device *dev) | |
899 | { | |
900 | struct smmu_device *smmu = dev_get_drvdata(dev); | |
901 | ||
902 | smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); | |
903 | smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); | |
904 | smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); | |
905 | smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); | |
906 | return 0; | |
907 | } | |
908 | ||
909 | static int tegra_smmu_resume(struct device *dev) | |
910 | { | |
911 | struct smmu_device *smmu = dev_get_drvdata(dev); | |
912 | unsigned long flags; | |
0760e8fa | 913 | int err; |
7a31f6f4 HD |
914 | |
915 | spin_lock_irqsave(&smmu->lock, flags); | |
0760e8fa | 916 | err = smmu_setup_regs(smmu); |
7a31f6f4 | 917 | spin_unlock_irqrestore(&smmu->lock, flags); |
0760e8fa | 918 | return err; |
7a31f6f4 HD |
919 | } |
920 | ||
921 | static int tegra_smmu_probe(struct platform_device *pdev) | |
922 | { | |
923 | struct smmu_device *smmu; | |
7a31f6f4 | 924 | struct device *dev = &pdev->dev; |
0760e8fa | 925 | int i, asids, err = 0; |
ff763629 HD |
926 | dma_addr_t uninitialized_var(base); |
927 | size_t bytes, uninitialized_var(size); | |
7a31f6f4 HD |
928 | |
929 | if (smmu_handle) | |
930 | return -EIO; | |
931 | ||
932 | BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); | |
933 | ||
a3b24915 | 934 | if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids)) |
7a31f6f4 | 935 | return -ENODEV; |
7a31f6f4 | 936 | |
a3b24915 HD |
937 | bytes = sizeof(*smmu) + asids * sizeof(*smmu->as); |
938 | smmu = devm_kzalloc(dev, bytes, GFP_KERNEL); | |
7a31f6f4 HD |
939 | if (!smmu) { |
940 | dev_err(dev, "failed to allocate smmu_device\n"); | |
941 | return -ENOMEM; | |
942 | } | |
943 | ||
0760e8fa HD |
944 | for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) { |
945 | struct resource *res; | |
946 | ||
947 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | |
948 | if (!res) | |
949 | return -ENODEV; | |
950 | smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res); | |
951 | if (!smmu->regs[i]) | |
952 | return -EBUSY; | |
7a31f6f4 HD |
953 | } |
954 | ||
0760e8fa HD |
955 | err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size); |
956 | if (err) | |
957 | return -ENODEV; | |
958 | ||
959 | if (size & SMMU_PAGE_MASK) | |
960 | return -EINVAL; | |
961 | ||
962 | size >>= SMMU_PAGE_SHIFT; | |
963 | if (!size) | |
964 | return -EINVAL; | |
965 | ||
0760e8fa HD |
966 | smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0); |
967 | if (!smmu->ahb) | |
968 | return -ENODEV; | |
969 | ||
970 | smmu->dev = dev; | |
971 | smmu->num_as = asids; | |
972 | smmu->iovmm_base = base; | |
973 | smmu->page_count = size; | |
974 | ||
7a31f6f4 HD |
975 | smmu->translation_enable_0 = ~0; |
976 | smmu->translation_enable_1 = ~0; | |
977 | smmu->translation_enable_2 = ~0; | |
978 | smmu->asid_security = 0; | |
979 | ||
7a31f6f4 HD |
980 | for (i = 0; i < smmu->num_as; i++) { |
981 | struct smmu_as *as = &smmu->as[i]; | |
982 | ||
983 | as->smmu = smmu; | |
984 | as->asid = i; | |
985 | as->pdir_attr = _PDIR_ATTR; | |
986 | as->pde_attr = _PDE_ATTR; | |
987 | as->pte_attr = _PTE_ATTR; | |
988 | ||
989 | spin_lock_init(&as->lock); | |
990 | INIT_LIST_HEAD(&as->client); | |
991 | } | |
992 | spin_lock_init(&smmu->lock); | |
0760e8fa HD |
993 | err = smmu_setup_regs(smmu); |
994 | if (err) | |
0547c2f5 | 995 | return err; |
7a31f6f4 HD |
996 | platform_set_drvdata(pdev, smmu); |
997 | ||
998 | smmu->avp_vector_page = alloc_page(GFP_KERNEL); | |
999 | if (!smmu->avp_vector_page) | |
0547c2f5 | 1000 | return -ENOMEM; |
7a31f6f4 HD |
1001 | |
1002 | smmu_handle = smmu; | |
1003 | return 0; | |
7a31f6f4 HD |
1004 | } |
1005 | ||
1006 | static int tegra_smmu_remove(struct platform_device *pdev) | |
1007 | { | |
1008 | struct smmu_device *smmu = platform_get_drvdata(pdev); | |
0547c2f5 | 1009 | int i; |
7a31f6f4 HD |
1010 | |
1011 | smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); | |
0547c2f5 HD |
1012 | for (i = 0; i < smmu->num_as; i++) |
1013 | free_pdir(&smmu->as[i]); | |
1014 | __free_page(smmu->avp_vector_page); | |
7a31f6f4 HD |
1015 | smmu_handle = NULL; |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | const struct dev_pm_ops tegra_smmu_pm_ops = { | |
1020 | .suspend = tegra_smmu_suspend, | |
1021 | .resume = tegra_smmu_resume, | |
1022 | }; | |
1023 | ||
0760e8fa HD |
1024 | #ifdef CONFIG_OF |
1025 | static struct of_device_id tegra_smmu_of_match[] __devinitdata = { | |
1026 | { .compatible = "nvidia,tegra30-smmu", }, | |
1027 | { }, | |
1028 | }; | |
1029 | MODULE_DEVICE_TABLE(of, tegra_smmu_of_match); | |
1030 | #endif | |
1031 | ||
7a31f6f4 HD |
1032 | static struct platform_driver tegra_smmu_driver = { |
1033 | .probe = tegra_smmu_probe, | |
1034 | .remove = tegra_smmu_remove, | |
1035 | .driver = { | |
1036 | .owner = THIS_MODULE, | |
1037 | .name = "tegra-smmu", | |
1038 | .pm = &tegra_smmu_pm_ops, | |
0760e8fa | 1039 | .of_match_table = of_match_ptr(tegra_smmu_of_match), |
7a31f6f4 HD |
1040 | }, |
1041 | }; | |
1042 | ||
1043 | static int __devinit tegra_smmu_init(void) | |
1044 | { | |
1045 | bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); | |
1046 | return platform_driver_register(&tegra_smmu_driver); | |
1047 | } | |
1048 | ||
1049 | static void __exit tegra_smmu_exit(void) | |
1050 | { | |
1051 | platform_driver_unregister(&tegra_smmu_driver); | |
1052 | } | |
1053 | ||
1054 | subsys_initcall(tegra_smmu_init); | |
1055 | module_exit(tegra_smmu_exit); | |
1056 | ||
1057 | MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); | |
1058 | MODULE_AUTHOR("Hiroshi DOYU <[email protected]>"); | |
0760e8fa | 1059 | MODULE_ALIAS("platform:tegra-smmu"); |
7a31f6f4 | 1060 | MODULE_LICENSE("GPL v2"); |