]>
Commit | Line | Data |
---|---|---|
45ae7cff WD |
1 | /* |
2 | * IOMMU API for ARM architected SMMU implementations. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program; if not, write to the Free Software | |
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
16 | * | |
17 | * Copyright (C) 2013 ARM Limited | |
18 | * | |
19 | * Author: Will Deacon <[email protected]> | |
20 | * | |
21 | * This driver currently supports: | |
22 | * - SMMUv1 and v2 implementations | |
23 | * - Stream-matching and stream-indexing | |
24 | * - v7/v8 long-descriptor format | |
25 | * - Non-secure access to the SMMU | |
45ae7cff WD |
26 | * - Context fault reporting |
27 | */ | |
28 | ||
29 | #define pr_fmt(fmt) "arm-smmu: " fmt | |
30 | ||
31 | #include <linux/delay.h> | |
32 | #include <linux/dma-mapping.h> | |
33 | #include <linux/err.h> | |
34 | #include <linux/interrupt.h> | |
35 | #include <linux/io.h> | |
36 | #include <linux/iommu.h> | |
859a732e | 37 | #include <linux/iopoll.h> |
45ae7cff WD |
38 | #include <linux/module.h> |
39 | #include <linux/of.h> | |
bae2c2d4 | 40 | #include <linux/of_address.h> |
a9a1b0b5 | 41 | #include <linux/pci.h> |
45ae7cff WD |
42 | #include <linux/platform_device.h> |
43 | #include <linux/slab.h> | |
44 | #include <linux/spinlock.h> | |
45 | ||
46 | #include <linux/amba/bus.h> | |
47 | ||
518f7136 | 48 | #include "io-pgtable.h" |
45ae7cff WD |
49 | |
50 | /* Maximum number of stream IDs assigned to a single device */ | |
636e97b0 | 51 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
45ae7cff WD |
52 | |
53 | /* Maximum number of context banks per SMMU */ | |
54 | #define ARM_SMMU_MAX_CBS 128 | |
55 | ||
56 | /* Maximum number of mapping groups per SMMU */ | |
57 | #define ARM_SMMU_MAX_SMRS 128 | |
58 | ||
45ae7cff WD |
59 | /* SMMU global address space */ |
60 | #define ARM_SMMU_GR0(smmu) ((smmu)->base) | |
c757e852 | 61 | #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift)) |
45ae7cff | 62 | |
3a5df8ff AH |
63 | /* |
64 | * SMMU global address space with conditional offset to access secure | |
65 | * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, | |
66 | * nsGFSYNR0: 0x450) | |
67 | */ | |
68 | #define ARM_SMMU_GR0_NS(smmu) \ | |
69 | ((smmu)->base + \ | |
70 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | |
71 | ? 0x400 : 0)) | |
72 | ||
668b4ada TC |
73 | #ifdef CONFIG_64BIT |
74 | #define smmu_writeq writeq_relaxed | |
75 | #else | |
76 | #define smmu_writeq(reg64, addr) \ | |
77 | do { \ | |
78 | u64 __val = (reg64); \ | |
79 | void __iomem *__addr = (addr); \ | |
80 | writel_relaxed(__val >> 32, __addr + 4); \ | |
81 | writel_relaxed(__val, __addr); \ | |
82 | } while (0) | |
83 | #endif | |
84 | ||
45ae7cff WD |
85 | /* Configuration registers */ |
86 | #define ARM_SMMU_GR0_sCR0 0x0 | |
87 | #define sCR0_CLIENTPD (1 << 0) | |
88 | #define sCR0_GFRE (1 << 1) | |
89 | #define sCR0_GFIE (1 << 2) | |
90 | #define sCR0_GCFGFRE (1 << 4) | |
91 | #define sCR0_GCFGFIE (1 << 5) | |
92 | #define sCR0_USFCFG (1 << 10) | |
93 | #define sCR0_VMIDPNE (1 << 11) | |
94 | #define sCR0_PTM (1 << 12) | |
95 | #define sCR0_FB (1 << 13) | |
96 | #define sCR0_BSU_SHIFT 14 | |
97 | #define sCR0_BSU_MASK 0x3 | |
98 | ||
99 | /* Identification registers */ | |
100 | #define ARM_SMMU_GR0_ID0 0x20 | |
101 | #define ARM_SMMU_GR0_ID1 0x24 | |
102 | #define ARM_SMMU_GR0_ID2 0x28 | |
103 | #define ARM_SMMU_GR0_ID3 0x2c | |
104 | #define ARM_SMMU_GR0_ID4 0x30 | |
105 | #define ARM_SMMU_GR0_ID5 0x34 | |
106 | #define ARM_SMMU_GR0_ID6 0x38 | |
107 | #define ARM_SMMU_GR0_ID7 0x3c | |
108 | #define ARM_SMMU_GR0_sGFSR 0x48 | |
109 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | |
110 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | |
111 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | |
45ae7cff WD |
112 | |
113 | #define ID0_S1TS (1 << 30) | |
114 | #define ID0_S2TS (1 << 29) | |
115 | #define ID0_NTS (1 << 28) | |
116 | #define ID0_SMS (1 << 27) | |
859a732e | 117 | #define ID0_ATOSNS (1 << 26) |
45ae7cff WD |
118 | #define ID0_CTTW (1 << 14) |
119 | #define ID0_NUMIRPT_SHIFT 16 | |
120 | #define ID0_NUMIRPT_MASK 0xff | |
3c8766d0 OH |
121 | #define ID0_NUMSIDB_SHIFT 9 |
122 | #define ID0_NUMSIDB_MASK 0xf | |
45ae7cff WD |
123 | #define ID0_NUMSMRG_SHIFT 0 |
124 | #define ID0_NUMSMRG_MASK 0xff | |
125 | ||
126 | #define ID1_PAGESIZE (1 << 31) | |
127 | #define ID1_NUMPAGENDXB_SHIFT 28 | |
128 | #define ID1_NUMPAGENDXB_MASK 7 | |
129 | #define ID1_NUMS2CB_SHIFT 16 | |
130 | #define ID1_NUMS2CB_MASK 0xff | |
131 | #define ID1_NUMCB_SHIFT 0 | |
132 | #define ID1_NUMCB_MASK 0xff | |
133 | ||
134 | #define ID2_OAS_SHIFT 4 | |
135 | #define ID2_OAS_MASK 0xf | |
136 | #define ID2_IAS_SHIFT 0 | |
137 | #define ID2_IAS_MASK 0xf | |
138 | #define ID2_UBS_SHIFT 8 | |
139 | #define ID2_UBS_MASK 0xf | |
140 | #define ID2_PTFS_4K (1 << 12) | |
141 | #define ID2_PTFS_16K (1 << 13) | |
142 | #define ID2_PTFS_64K (1 << 14) | |
143 | ||
45ae7cff | 144 | /* Global TLB invalidation */ |
45ae7cff WD |
145 | #define ARM_SMMU_GR0_TLBIVMID 0x64 |
146 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | |
147 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | |
148 | #define ARM_SMMU_GR0_sTLBGSYNC 0x70 | |
149 | #define ARM_SMMU_GR0_sTLBGSTATUS 0x74 | |
150 | #define sTLBGSTATUS_GSACTIVE (1 << 0) | |
151 | #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ | |
152 | ||
153 | /* Stream mapping registers */ | |
154 | #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2)) | |
155 | #define SMR_VALID (1 << 31) | |
156 | #define SMR_MASK_SHIFT 16 | |
157 | #define SMR_MASK_MASK 0x7fff | |
158 | #define SMR_ID_SHIFT 0 | |
159 | #define SMR_ID_MASK 0x7fff | |
160 | ||
161 | #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) | |
162 | #define S2CR_CBNDX_SHIFT 0 | |
163 | #define S2CR_CBNDX_MASK 0xff | |
164 | #define S2CR_TYPE_SHIFT 16 | |
165 | #define S2CR_TYPE_MASK 0x3 | |
166 | #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT) | |
167 | #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT) | |
168 | #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT) | |
169 | ||
170 | /* Context bank attribute registers */ | |
171 | #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) | |
172 | #define CBAR_VMID_SHIFT 0 | |
173 | #define CBAR_VMID_MASK 0xff | |
57ca90f6 WD |
174 | #define CBAR_S1_BPSHCFG_SHIFT 8 |
175 | #define CBAR_S1_BPSHCFG_MASK 3 | |
176 | #define CBAR_S1_BPSHCFG_NSH 3 | |
45ae7cff WD |
177 | #define CBAR_S1_MEMATTR_SHIFT 12 |
178 | #define CBAR_S1_MEMATTR_MASK 0xf | |
179 | #define CBAR_S1_MEMATTR_WB 0xf | |
180 | #define CBAR_TYPE_SHIFT 16 | |
181 | #define CBAR_TYPE_MASK 0x3 | |
182 | #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT) | |
183 | #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT) | |
184 | #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT) | |
185 | #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT) | |
186 | #define CBAR_IRPTNDX_SHIFT 24 | |
187 | #define CBAR_IRPTNDX_MASK 0xff | |
188 | ||
189 | #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2)) | |
190 | #define CBA2R_RW64_32BIT (0 << 0) | |
191 | #define CBA2R_RW64_64BIT (1 << 0) | |
192 | ||
193 | /* Translation context bank */ | |
194 | #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1)) | |
c757e852 | 195 | #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift)) |
45ae7cff WD |
196 | |
197 | #define ARM_SMMU_CB_SCTLR 0x0 | |
198 | #define ARM_SMMU_CB_RESUME 0x8 | |
199 | #define ARM_SMMU_CB_TTBCR2 0x10 | |
668b4ada TC |
200 | #define ARM_SMMU_CB_TTBR0 0x20 |
201 | #define ARM_SMMU_CB_TTBR1 0x28 | |
45ae7cff WD |
202 | #define ARM_SMMU_CB_TTBCR 0x30 |
203 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | |
518f7136 | 204 | #define ARM_SMMU_CB_S1_MAIR1 0x3c |
859a732e MH |
205 | #define ARM_SMMU_CB_PAR_LO 0x50 |
206 | #define ARM_SMMU_CB_PAR_HI 0x54 | |
45ae7cff WD |
207 | #define ARM_SMMU_CB_FSR 0x58 |
208 | #define ARM_SMMU_CB_FAR_LO 0x60 | |
209 | #define ARM_SMMU_CB_FAR_HI 0x64 | |
210 | #define ARM_SMMU_CB_FSYNR0 0x68 | |
518f7136 | 211 | #define ARM_SMMU_CB_S1_TLBIVA 0x600 |
1463fe44 | 212 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 |
518f7136 WD |
213 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 |
214 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | |
215 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | |
661d962f | 216 | #define ARM_SMMU_CB_ATS1PR 0x800 |
859a732e | 217 | #define ARM_SMMU_CB_ATSR 0x8f0 |
45ae7cff WD |
218 | |
219 | #define SCTLR_S1_ASIDPNE (1 << 12) | |
220 | #define SCTLR_CFCFG (1 << 7) | |
221 | #define SCTLR_CFIE (1 << 6) | |
222 | #define SCTLR_CFRE (1 << 5) | |
223 | #define SCTLR_E (1 << 4) | |
224 | #define SCTLR_AFE (1 << 2) | |
225 | #define SCTLR_TRE (1 << 1) | |
226 | #define SCTLR_M (1 << 0) | |
227 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | |
228 | ||
859a732e MH |
229 | #define CB_PAR_F (1 << 0) |
230 | ||
231 | #define ATSR_ACTIVE (1 << 0) | |
232 | ||
45ae7cff WD |
233 | #define RESUME_RETRY (0 << 0) |
234 | #define RESUME_TERMINATE (1 << 0) | |
235 | ||
45ae7cff | 236 | #define TTBCR2_SEP_SHIFT 15 |
5dc5616e | 237 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
45ae7cff | 238 | |
668b4ada | 239 | #define TTBRn_ASID_SHIFT 48 |
45ae7cff WD |
240 | |
241 | #define FSR_MULTI (1 << 31) | |
242 | #define FSR_SS (1 << 30) | |
243 | #define FSR_UUT (1 << 8) | |
244 | #define FSR_ASF (1 << 7) | |
245 | #define FSR_TLBLKF (1 << 6) | |
246 | #define FSR_TLBMCF (1 << 5) | |
247 | #define FSR_EF (1 << 4) | |
248 | #define FSR_PF (1 << 3) | |
249 | #define FSR_AFF (1 << 2) | |
250 | #define FSR_TF (1 << 1) | |
251 | ||
2907320d MH |
252 | #define FSR_IGN (FSR_AFF | FSR_ASF | \ |
253 | FSR_TLBMCF | FSR_TLBLKF) | |
254 | #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \ | |
adaba320 | 255 | FSR_EF | FSR_PF | FSR_TF | FSR_IGN) |
45ae7cff WD |
256 | |
257 | #define FSYNR0_WNR (1 << 4) | |
258 | ||
4cf740b0 | 259 | static int force_stage; |
e3ce0c94 | 260 | module_param_named(force_stage, force_stage, int, S_IRUGO); |
4cf740b0 WD |
261 | MODULE_PARM_DESC(force_stage, |
262 | "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); | |
263 | ||
09360403 RM |
264 | enum arm_smmu_arch_version { |
265 | ARM_SMMU_V1 = 1, | |
266 | ARM_SMMU_V2, | |
267 | }; | |
268 | ||
45ae7cff WD |
269 | struct arm_smmu_smr { |
270 | u8 idx; | |
271 | u16 mask; | |
272 | u16 id; | |
273 | }; | |
274 | ||
a9a1b0b5 | 275 | struct arm_smmu_master_cfg { |
45ae7cff WD |
276 | int num_streamids; |
277 | u16 streamids[MAX_MASTER_STREAMIDS]; | |
45ae7cff WD |
278 | struct arm_smmu_smr *smrs; |
279 | }; | |
280 | ||
a9a1b0b5 WD |
281 | struct arm_smmu_master { |
282 | struct device_node *of_node; | |
a9a1b0b5 WD |
283 | struct rb_node node; |
284 | struct arm_smmu_master_cfg cfg; | |
285 | }; | |
286 | ||
45ae7cff WD |
287 | struct arm_smmu_device { |
288 | struct device *dev; | |
45ae7cff WD |
289 | |
290 | void __iomem *base; | |
291 | unsigned long size; | |
c757e852 | 292 | unsigned long pgshift; |
45ae7cff WD |
293 | |
294 | #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0) | |
295 | #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1) | |
296 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | |
297 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | |
298 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | |
859a732e | 299 | #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) |
45ae7cff | 300 | u32 features; |
3a5df8ff AH |
301 | |
302 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | |
303 | u32 options; | |
09360403 | 304 | enum arm_smmu_arch_version version; |
45ae7cff WD |
305 | |
306 | u32 num_context_banks; | |
307 | u32 num_s2_context_banks; | |
308 | DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS); | |
309 | atomic_t irptndx; | |
310 | ||
311 | u32 num_mapping_groups; | |
312 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | |
313 | ||
518f7136 WD |
314 | unsigned long va_size; |
315 | unsigned long ipa_size; | |
316 | unsigned long pa_size; | |
45ae7cff WD |
317 | |
318 | u32 num_global_irqs; | |
319 | u32 num_context_irqs; | |
320 | unsigned int *irqs; | |
321 | ||
45ae7cff WD |
322 | struct list_head list; |
323 | struct rb_root masters; | |
324 | }; | |
325 | ||
326 | struct arm_smmu_cfg { | |
45ae7cff WD |
327 | u8 cbndx; |
328 | u8 irptndx; | |
329 | u32 cbar; | |
45ae7cff | 330 | }; |
faea13b7 | 331 | #define INVALID_IRPTNDX 0xff |
45ae7cff | 332 | |
ecfadb6e WD |
333 | #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) |
334 | #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) | |
335 | ||
c752ce45 WD |
336 | enum arm_smmu_domain_stage { |
337 | ARM_SMMU_DOMAIN_S1 = 0, | |
338 | ARM_SMMU_DOMAIN_S2, | |
339 | ARM_SMMU_DOMAIN_NESTED, | |
340 | }; | |
341 | ||
45ae7cff | 342 | struct arm_smmu_domain { |
44680eed | 343 | struct arm_smmu_device *smmu; |
518f7136 WD |
344 | struct io_pgtable_ops *pgtbl_ops; |
345 | spinlock_t pgtbl_lock; | |
44680eed | 346 | struct arm_smmu_cfg cfg; |
c752ce45 | 347 | enum arm_smmu_domain_stage stage; |
518f7136 | 348 | struct mutex init_mutex; /* Protects smmu pointer */ |
1d672638 | 349 | struct iommu_domain domain; |
45ae7cff WD |
350 | }; |
351 | ||
518f7136 WD |
352 | static struct iommu_ops arm_smmu_ops; |
353 | ||
45ae7cff WD |
354 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
355 | static LIST_HEAD(arm_smmu_devices); | |
356 | ||
3a5df8ff AH |
357 | struct arm_smmu_option_prop { |
358 | u32 opt; | |
359 | const char *prop; | |
360 | }; | |
361 | ||
2907320d | 362 | static struct arm_smmu_option_prop arm_smmu_options[] = { |
3a5df8ff AH |
363 | { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, |
364 | { 0, NULL}, | |
365 | }; | |
366 | ||
1d672638 JR |
367 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) |
368 | { | |
369 | return container_of(dom, struct arm_smmu_domain, domain); | |
370 | } | |
371 | ||
3a5df8ff AH |
372 | static void parse_driver_options(struct arm_smmu_device *smmu) |
373 | { | |
374 | int i = 0; | |
2907320d | 375 | |
3a5df8ff AH |
376 | do { |
377 | if (of_property_read_bool(smmu->dev->of_node, | |
378 | arm_smmu_options[i].prop)) { | |
379 | smmu->options |= arm_smmu_options[i].opt; | |
380 | dev_notice(smmu->dev, "option %s\n", | |
381 | arm_smmu_options[i].prop); | |
382 | } | |
383 | } while (arm_smmu_options[++i].opt); | |
384 | } | |
385 | ||
8f68f8e2 | 386 | static struct device_node *dev_get_dev_node(struct device *dev) |
a9a1b0b5 WD |
387 | { |
388 | if (dev_is_pci(dev)) { | |
389 | struct pci_bus *bus = to_pci_dev(dev)->bus; | |
2907320d | 390 | |
a9a1b0b5 WD |
391 | while (!pci_is_root_bus(bus)) |
392 | bus = bus->parent; | |
8f68f8e2 | 393 | return bus->bridge->parent->of_node; |
a9a1b0b5 WD |
394 | } |
395 | ||
8f68f8e2 | 396 | return dev->of_node; |
a9a1b0b5 WD |
397 | } |
398 | ||
45ae7cff WD |
399 | static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, |
400 | struct device_node *dev_node) | |
401 | { | |
402 | struct rb_node *node = smmu->masters.rb_node; | |
403 | ||
404 | while (node) { | |
405 | struct arm_smmu_master *master; | |
2907320d | 406 | |
45ae7cff WD |
407 | master = container_of(node, struct arm_smmu_master, node); |
408 | ||
409 | if (dev_node < master->of_node) | |
410 | node = node->rb_left; | |
411 | else if (dev_node > master->of_node) | |
412 | node = node->rb_right; | |
413 | else | |
414 | return master; | |
415 | } | |
416 | ||
417 | return NULL; | |
418 | } | |
419 | ||
a9a1b0b5 | 420 | static struct arm_smmu_master_cfg * |
8f68f8e2 | 421 | find_smmu_master_cfg(struct device *dev) |
a9a1b0b5 | 422 | { |
8f68f8e2 WD |
423 | struct arm_smmu_master_cfg *cfg = NULL; |
424 | struct iommu_group *group = iommu_group_get(dev); | |
a9a1b0b5 | 425 | |
8f68f8e2 WD |
426 | if (group) { |
427 | cfg = iommu_group_get_iommudata(group); | |
428 | iommu_group_put(group); | |
429 | } | |
a9a1b0b5 | 430 | |
8f68f8e2 | 431 | return cfg; |
a9a1b0b5 WD |
432 | } |
433 | ||
45ae7cff WD |
434 | static int insert_smmu_master(struct arm_smmu_device *smmu, |
435 | struct arm_smmu_master *master) | |
436 | { | |
437 | struct rb_node **new, *parent; | |
438 | ||
439 | new = &smmu->masters.rb_node; | |
440 | parent = NULL; | |
441 | while (*new) { | |
2907320d MH |
442 | struct arm_smmu_master *this |
443 | = container_of(*new, struct arm_smmu_master, node); | |
45ae7cff WD |
444 | |
445 | parent = *new; | |
446 | if (master->of_node < this->of_node) | |
447 | new = &((*new)->rb_left); | |
448 | else if (master->of_node > this->of_node) | |
449 | new = &((*new)->rb_right); | |
450 | else | |
451 | return -EEXIST; | |
452 | } | |
453 | ||
454 | rb_link_node(&master->node, parent, new); | |
455 | rb_insert_color(&master->node, &smmu->masters); | |
456 | return 0; | |
457 | } | |
458 | ||
459 | static int register_smmu_master(struct arm_smmu_device *smmu, | |
460 | struct device *dev, | |
461 | struct of_phandle_args *masterspec) | |
462 | { | |
463 | int i; | |
464 | struct arm_smmu_master *master; | |
465 | ||
466 | master = find_smmu_master(smmu, masterspec->np); | |
467 | if (master) { | |
468 | dev_err(dev, | |
469 | "rejecting multiple registrations for master device %s\n", | |
470 | masterspec->np->name); | |
471 | return -EBUSY; | |
472 | } | |
473 | ||
474 | if (masterspec->args_count > MAX_MASTER_STREAMIDS) { | |
475 | dev_err(dev, | |
476 | "reached maximum number (%d) of stream IDs for master device %s\n", | |
477 | MAX_MASTER_STREAMIDS, masterspec->np->name); | |
478 | return -ENOSPC; | |
479 | } | |
480 | ||
481 | master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); | |
482 | if (!master) | |
483 | return -ENOMEM; | |
484 | ||
a9a1b0b5 WD |
485 | master->of_node = masterspec->np; |
486 | master->cfg.num_streamids = masterspec->args_count; | |
45ae7cff | 487 | |
3c8766d0 OH |
488 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
489 | u16 streamid = masterspec->args[i]; | |
45ae7cff | 490 | |
3c8766d0 OH |
491 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && |
492 | (streamid >= smmu->num_mapping_groups)) { | |
493 | dev_err(dev, | |
494 | "stream ID for master device %s greater than maximum allowed (%d)\n", | |
495 | masterspec->np->name, smmu->num_mapping_groups); | |
496 | return -ERANGE; | |
497 | } | |
498 | master->cfg.streamids[i] = streamid; | |
499 | } | |
45ae7cff WD |
500 | return insert_smmu_master(smmu, master); |
501 | } | |
502 | ||
44680eed | 503 | static struct arm_smmu_device *find_smmu_for_device(struct device *dev) |
45ae7cff | 504 | { |
44680eed | 505 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 506 | struct arm_smmu_master *master = NULL; |
8f68f8e2 | 507 | struct device_node *dev_node = dev_get_dev_node(dev); |
45ae7cff WD |
508 | |
509 | spin_lock(&arm_smmu_devices_lock); | |
44680eed | 510 | list_for_each_entry(smmu, &arm_smmu_devices, list) { |
a9a1b0b5 WD |
511 | master = find_smmu_master(smmu, dev_node); |
512 | if (master) | |
513 | break; | |
514 | } | |
45ae7cff | 515 | spin_unlock(&arm_smmu_devices_lock); |
44680eed | 516 | |
a9a1b0b5 | 517 | return master ? smmu : NULL; |
45ae7cff WD |
518 | } |
519 | ||
520 | static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) | |
521 | { | |
522 | int idx; | |
523 | ||
524 | do { | |
525 | idx = find_next_zero_bit(map, end, start); | |
526 | if (idx == end) | |
527 | return -ENOSPC; | |
528 | } while (test_and_set_bit(idx, map)); | |
529 | ||
530 | return idx; | |
531 | } | |
532 | ||
533 | static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |
534 | { | |
535 | clear_bit(idx, map); | |
536 | } | |
537 | ||
538 | /* Wait for any pending TLB invalidations to complete */ | |
518f7136 | 539 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
45ae7cff WD |
540 | { |
541 | int count = 0; | |
542 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
543 | ||
544 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC); | |
545 | while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS) | |
546 | & sTLBGSTATUS_GSACTIVE) { | |
547 | cpu_relax(); | |
548 | if (++count == TLB_LOOP_TIMEOUT) { | |
549 | dev_err_ratelimited(smmu->dev, | |
550 | "TLB sync timed out -- SMMU may be deadlocked\n"); | |
551 | return; | |
552 | } | |
553 | udelay(1); | |
554 | } | |
555 | } | |
556 | ||
518f7136 WD |
557 | static void arm_smmu_tlb_sync(void *cookie) |
558 | { | |
559 | struct arm_smmu_domain *smmu_domain = cookie; | |
560 | __arm_smmu_tlb_sync(smmu_domain->smmu); | |
561 | } | |
562 | ||
563 | static void arm_smmu_tlb_inv_context(void *cookie) | |
1463fe44 | 564 | { |
518f7136 | 565 | struct arm_smmu_domain *smmu_domain = cookie; |
44680eed WD |
566 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
567 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
1463fe44 | 568 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
518f7136 | 569 | void __iomem *base; |
1463fe44 WD |
570 | |
571 | if (stage1) { | |
572 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
ecfadb6e WD |
573 | writel_relaxed(ARM_SMMU_CB_ASID(cfg), |
574 | base + ARM_SMMU_CB_S1_TLBIASID); | |
1463fe44 WD |
575 | } else { |
576 | base = ARM_SMMU_GR0(smmu); | |
ecfadb6e WD |
577 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), |
578 | base + ARM_SMMU_GR0_TLBIVMID); | |
1463fe44 WD |
579 | } |
580 | ||
518f7136 WD |
581 | __arm_smmu_tlb_sync(smmu); |
582 | } | |
583 | ||
584 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | |
06c610e8 | 585 | size_t granule, bool leaf, void *cookie) |
518f7136 WD |
586 | { |
587 | struct arm_smmu_domain *smmu_domain = cookie; | |
588 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
589 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
590 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | |
591 | void __iomem *reg; | |
592 | ||
593 | if (stage1) { | |
594 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
595 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | |
596 | ||
597 | if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { | |
598 | iova &= ~12UL; | |
599 | iova |= ARM_SMMU_CB_ASID(cfg); | |
75df1386 RM |
600 | do { |
601 | writel_relaxed(iova, reg); | |
602 | iova += granule; | |
603 | } while (size -= granule); | |
518f7136 WD |
604 | #ifdef CONFIG_64BIT |
605 | } else { | |
606 | iova >>= 12; | |
607 | iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; | |
75df1386 RM |
608 | do { |
609 | writeq_relaxed(iova, reg); | |
610 | iova += granule >> 12; | |
611 | } while (size -= granule); | |
518f7136 WD |
612 | #endif |
613 | } | |
614 | #ifdef CONFIG_64BIT | |
615 | } else if (smmu->version == ARM_SMMU_V2) { | |
616 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
617 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | |
618 | ARM_SMMU_CB_S2_TLBIIPAS2; | |
75df1386 RM |
619 | iova >>= 12; |
620 | do { | |
621 | writeq_relaxed(iova, reg); | |
622 | iova += granule >> 12; | |
623 | } while (size -= granule); | |
518f7136 WD |
624 | #endif |
625 | } else { | |
626 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | |
627 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); | |
628 | } | |
629 | } | |
630 | ||
518f7136 WD |
631 | static struct iommu_gather_ops arm_smmu_gather_ops = { |
632 | .tlb_flush_all = arm_smmu_tlb_inv_context, | |
633 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | |
634 | .tlb_sync = arm_smmu_tlb_sync, | |
518f7136 WD |
635 | }; |
636 | ||
45ae7cff WD |
637 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
638 | { | |
639 | int flags, ret; | |
640 | u32 fsr, far, fsynr, resume; | |
641 | unsigned long iova; | |
642 | struct iommu_domain *domain = dev; | |
1d672638 | 643 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
644 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
645 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
45ae7cff WD |
646 | void __iomem *cb_base; |
647 | ||
44680eed | 648 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
45ae7cff WD |
649 | fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); |
650 | ||
651 | if (!(fsr & FSR_FAULT)) | |
652 | return IRQ_NONE; | |
653 | ||
654 | if (fsr & FSR_IGN) | |
655 | dev_err_ratelimited(smmu->dev, | |
70c9a7db | 656 | "Unexpected context fault (fsr 0x%x)\n", |
45ae7cff WD |
657 | fsr); |
658 | ||
659 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | |
660 | flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
661 | ||
662 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO); | |
663 | iova = far; | |
664 | #ifdef CONFIG_64BIT | |
665 | far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI); | |
666 | iova |= ((unsigned long)far << 32); | |
667 | #endif | |
668 | ||
669 | if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { | |
670 | ret = IRQ_HANDLED; | |
671 | resume = RESUME_RETRY; | |
672 | } else { | |
2ef0f031 AH |
673 | dev_err_ratelimited(smmu->dev, |
674 | "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", | |
44680eed | 675 | iova, fsynr, cfg->cbndx); |
45ae7cff WD |
676 | ret = IRQ_NONE; |
677 | resume = RESUME_TERMINATE; | |
678 | } | |
679 | ||
680 | /* Clear the faulting FSR */ | |
681 | writel(fsr, cb_base + ARM_SMMU_CB_FSR); | |
682 | ||
683 | /* Retry or terminate any stalled transactions */ | |
684 | if (fsr & FSR_SS) | |
685 | writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME); | |
686 | ||
687 | return ret; | |
688 | } | |
689 | ||
690 | static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |
691 | { | |
692 | u32 gfsr, gfsynr0, gfsynr1, gfsynr2; | |
693 | struct arm_smmu_device *smmu = dev; | |
3a5df8ff | 694 | void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); |
45ae7cff WD |
695 | |
696 | gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); | |
697 | gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); | |
698 | gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); | |
699 | gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); | |
700 | ||
3a5df8ff AH |
701 | if (!gfsr) |
702 | return IRQ_NONE; | |
703 | ||
45ae7cff WD |
704 | dev_err_ratelimited(smmu->dev, |
705 | "Unexpected global fault, this could be serious\n"); | |
706 | dev_err_ratelimited(smmu->dev, | |
707 | "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", | |
708 | gfsr, gfsynr0, gfsynr1, gfsynr2); | |
709 | ||
710 | writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); | |
adaba320 | 711 | return IRQ_HANDLED; |
45ae7cff WD |
712 | } |
713 | ||
518f7136 WD |
714 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
715 | struct io_pgtable_cfg *pgtbl_cfg) | |
45ae7cff WD |
716 | { |
717 | u32 reg; | |
668b4ada | 718 | u64 reg64; |
45ae7cff | 719 | bool stage1; |
44680eed WD |
720 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
721 | struct arm_smmu_device *smmu = smmu_domain->smmu; | |
c88ae5de | 722 | void __iomem *cb_base, *gr1_base; |
45ae7cff | 723 | |
45ae7cff | 724 | gr1_base = ARM_SMMU_GR1(smmu); |
44680eed WD |
725 | stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
726 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
45ae7cff | 727 | |
4a1c93cb WD |
728 | if (smmu->version > ARM_SMMU_V1) { |
729 | /* | |
730 | * CBA2R. | |
731 | * *Must* be initialised before CBAR thanks to VMID16 | |
732 | * architectural oversight affected some implementations. | |
733 | */ | |
734 | #ifdef CONFIG_64BIT | |
735 | reg = CBA2R_RW64_64BIT; | |
736 | #else | |
737 | reg = CBA2R_RW64_32BIT; | |
738 | #endif | |
739 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | |
740 | } | |
741 | ||
45ae7cff | 742 | /* CBAR */ |
44680eed | 743 | reg = cfg->cbar; |
09360403 | 744 | if (smmu->version == ARM_SMMU_V1) |
2907320d | 745 | reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT; |
45ae7cff | 746 | |
57ca90f6 WD |
747 | /* |
748 | * Use the weakest shareability/memory types, so they are | |
749 | * overridden by the ttbcr/pte. | |
750 | */ | |
751 | if (stage1) { | |
752 | reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | | |
753 | (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); | |
754 | } else { | |
44680eed | 755 | reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT; |
57ca90f6 | 756 | } |
44680eed | 757 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); |
45ae7cff | 758 | |
518f7136 WD |
759 | /* TTBRs */ |
760 | if (stage1) { | |
668b4ada TC |
761 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; |
762 | ||
763 | reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT; | |
764 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); | |
765 | ||
766 | reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; | |
767 | reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT; | |
768 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1); | |
518f7136 | 769 | } else { |
668b4ada TC |
770 | reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; |
771 | smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0); | |
518f7136 | 772 | } |
a65217a4 | 773 | |
518f7136 WD |
774 | /* TTBCR */ |
775 | if (stage1) { | |
776 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | |
777 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
778 | if (smmu->version > ARM_SMMU_V1) { | |
779 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | |
5dc5616e | 780 | reg |= TTBCR2_SEP_UPSTREAM; |
518f7136 | 781 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
45ae7cff WD |
782 | } |
783 | } else { | |
518f7136 WD |
784 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
785 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | |
45ae7cff WD |
786 | } |
787 | ||
518f7136 | 788 | /* MAIRs (stage-1 only) */ |
45ae7cff | 789 | if (stage1) { |
518f7136 | 790 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; |
45ae7cff | 791 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
518f7136 WD |
792 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; |
793 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | |
45ae7cff WD |
794 | } |
795 | ||
45ae7cff WD |
796 | /* SCTLR */ |
797 | reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; | |
798 | if (stage1) | |
799 | reg |= SCTLR_S1_ASIDPNE; | |
800 | #ifdef __BIG_ENDIAN | |
801 | reg |= SCTLR_E; | |
802 | #endif | |
25724841 | 803 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); |
45ae7cff WD |
804 | } |
805 | ||
806 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |
44680eed | 807 | struct arm_smmu_device *smmu) |
45ae7cff | 808 | { |
a18037b2 | 809 | int irq, start, ret = 0; |
518f7136 WD |
810 | unsigned long ias, oas; |
811 | struct io_pgtable_ops *pgtbl_ops; | |
812 | struct io_pgtable_cfg pgtbl_cfg; | |
813 | enum io_pgtable_fmt fmt; | |
1d672638 | 814 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed | 815 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
45ae7cff | 816 | |
518f7136 | 817 | mutex_lock(&smmu_domain->init_mutex); |
a18037b2 MH |
818 | if (smmu_domain->smmu) |
819 | goto out_unlock; | |
820 | ||
c752ce45 WD |
821 | /* |
822 | * Mapping the requested stage onto what we support is surprisingly | |
823 | * complicated, mainly because the spec allows S1+S2 SMMUs without | |
824 | * support for nested translation. That means we end up with the | |
825 | * following table: | |
826 | * | |
827 | * Requested Supported Actual | |
828 | * S1 N S1 | |
829 | * S1 S1+S2 S1 | |
830 | * S1 S2 S2 | |
831 | * S1 S1 S1 | |
832 | * N N N | |
833 | * N S1+S2 S2 | |
834 | * N S2 S2 | |
835 | * N S1 S1 | |
836 | * | |
837 | * Note that you can't actually request stage-2 mappings. | |
838 | */ | |
839 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) | |
840 | smmu_domain->stage = ARM_SMMU_DOMAIN_S2; | |
841 | if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) | |
842 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
843 | ||
844 | switch (smmu_domain->stage) { | |
845 | case ARM_SMMU_DOMAIN_S1: | |
846 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | |
847 | start = smmu->num_s2_context_banks; | |
518f7136 WD |
848 | ias = smmu->va_size; |
849 | oas = smmu->ipa_size; | |
850 | if (IS_ENABLED(CONFIG_64BIT)) | |
851 | fmt = ARM_64_LPAE_S1; | |
852 | else | |
853 | fmt = ARM_32_LPAE_S1; | |
c752ce45 WD |
854 | break; |
855 | case ARM_SMMU_DOMAIN_NESTED: | |
45ae7cff WD |
856 | /* |
857 | * We will likely want to change this if/when KVM gets | |
858 | * involved. | |
859 | */ | |
c752ce45 | 860 | case ARM_SMMU_DOMAIN_S2: |
9c5c92e3 WD |
861 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
862 | start = 0; | |
518f7136 WD |
863 | ias = smmu->ipa_size; |
864 | oas = smmu->pa_size; | |
865 | if (IS_ENABLED(CONFIG_64BIT)) | |
866 | fmt = ARM_64_LPAE_S2; | |
867 | else | |
868 | fmt = ARM_32_LPAE_S2; | |
c752ce45 WD |
869 | break; |
870 | default: | |
871 | ret = -EINVAL; | |
872 | goto out_unlock; | |
45ae7cff WD |
873 | } |
874 | ||
875 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | |
876 | smmu->num_context_banks); | |
877 | if (IS_ERR_VALUE(ret)) | |
a18037b2 | 878 | goto out_unlock; |
45ae7cff | 879 | |
44680eed | 880 | cfg->cbndx = ret; |
09360403 | 881 | if (smmu->version == ARM_SMMU_V1) { |
44680eed WD |
882 | cfg->irptndx = atomic_inc_return(&smmu->irptndx); |
883 | cfg->irptndx %= smmu->num_context_irqs; | |
45ae7cff | 884 | } else { |
44680eed | 885 | cfg->irptndx = cfg->cbndx; |
45ae7cff WD |
886 | } |
887 | ||
518f7136 WD |
888 | pgtbl_cfg = (struct io_pgtable_cfg) { |
889 | .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, | |
890 | .ias = ias, | |
891 | .oas = oas, | |
892 | .tlb = &arm_smmu_gather_ops, | |
2df7a25c | 893 | .iommu_dev = smmu->dev, |
518f7136 WD |
894 | }; |
895 | ||
896 | smmu_domain->smmu = smmu; | |
897 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); | |
898 | if (!pgtbl_ops) { | |
899 | ret = -ENOMEM; | |
900 | goto out_clear_smmu; | |
901 | } | |
902 | ||
903 | /* Update our support page sizes to reflect the page table format */ | |
904 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | |
a18037b2 | 905 | |
518f7136 WD |
906 | /* Initialise the context bank with our page table cfg */ |
907 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | |
908 | ||
909 | /* | |
910 | * Request context fault interrupt. Do this last to avoid the | |
911 | * handler seeing a half-initialised domain state. | |
912 | */ | |
44680eed | 913 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
45ae7cff WD |
914 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
915 | "arm-smmu-context-fault", domain); | |
916 | if (IS_ERR_VALUE(ret)) { | |
917 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | |
44680eed WD |
918 | cfg->irptndx, irq); |
919 | cfg->irptndx = INVALID_IRPTNDX; | |
45ae7cff WD |
920 | } |
921 | ||
518f7136 WD |
922 | mutex_unlock(&smmu_domain->init_mutex); |
923 | ||
924 | /* Publish page table ops for map/unmap */ | |
925 | smmu_domain->pgtbl_ops = pgtbl_ops; | |
a9a1b0b5 | 926 | return 0; |
45ae7cff | 927 | |
518f7136 WD |
928 | out_clear_smmu: |
929 | smmu_domain->smmu = NULL; | |
a18037b2 | 930 | out_unlock: |
518f7136 | 931 | mutex_unlock(&smmu_domain->init_mutex); |
45ae7cff WD |
932 | return ret; |
933 | } | |
934 | ||
935 | static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |
936 | { | |
1d672638 | 937 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
44680eed WD |
938 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
939 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1463fe44 | 940 | void __iomem *cb_base; |
45ae7cff WD |
941 | int irq; |
942 | ||
943 | if (!smmu) | |
944 | return; | |
945 | ||
518f7136 WD |
946 | /* |
947 | * Disable the context bank and free the page tables before freeing | |
948 | * it. | |
949 | */ | |
44680eed | 950 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1463fe44 | 951 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1463fe44 | 952 | |
44680eed WD |
953 | if (cfg->irptndx != INVALID_IRPTNDX) { |
954 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | |
45ae7cff WD |
955 | free_irq(irq, domain); |
956 | } | |
957 | ||
44830b0c | 958 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); |
44680eed | 959 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
45ae7cff WD |
960 | } |
961 | ||
1d672638 | 962 | static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) |
45ae7cff WD |
963 | { |
964 | struct arm_smmu_domain *smmu_domain; | |
45ae7cff | 965 | |
1d672638 JR |
966 | if (type != IOMMU_DOMAIN_UNMANAGED) |
967 | return NULL; | |
45ae7cff WD |
968 | /* |
969 | * Allocate the domain and initialise some of its data structures. | |
970 | * We can't really do anything meaningful until we've added a | |
971 | * master. | |
972 | */ | |
973 | smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); | |
974 | if (!smmu_domain) | |
1d672638 | 975 | return NULL; |
45ae7cff | 976 | |
518f7136 WD |
977 | mutex_init(&smmu_domain->init_mutex); |
978 | spin_lock_init(&smmu_domain->pgtbl_lock); | |
1d672638 JR |
979 | |
980 | return &smmu_domain->domain; | |
45ae7cff WD |
981 | } |
982 | ||
1d672638 | 983 | static void arm_smmu_domain_free(struct iommu_domain *domain) |
45ae7cff | 984 | { |
1d672638 | 985 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
1463fe44 WD |
986 | |
987 | /* | |
988 | * Free the domain resources. We assume that all devices have | |
989 | * already been detached. | |
990 | */ | |
45ae7cff | 991 | arm_smmu_destroy_domain_context(domain); |
45ae7cff WD |
992 | kfree(smmu_domain); |
993 | } | |
994 | ||
995 | static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 996 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
997 | { |
998 | int i; | |
999 | struct arm_smmu_smr *smrs; | |
1000 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1001 | ||
1002 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)) | |
1003 | return 0; | |
1004 | ||
a9a1b0b5 | 1005 | if (cfg->smrs) |
45ae7cff WD |
1006 | return -EEXIST; |
1007 | ||
2907320d | 1008 | smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL); |
45ae7cff | 1009 | if (!smrs) { |
a9a1b0b5 WD |
1010 | dev_err(smmu->dev, "failed to allocate %d SMRs\n", |
1011 | cfg->num_streamids); | |
45ae7cff WD |
1012 | return -ENOMEM; |
1013 | } | |
1014 | ||
44680eed | 1015 | /* Allocate the SMRs on the SMMU */ |
a9a1b0b5 | 1016 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1017 | int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0, |
1018 | smmu->num_mapping_groups); | |
1019 | if (IS_ERR_VALUE(idx)) { | |
1020 | dev_err(smmu->dev, "failed to allocate free SMR\n"); | |
1021 | goto err_free_smrs; | |
1022 | } | |
1023 | ||
1024 | smrs[i] = (struct arm_smmu_smr) { | |
1025 | .idx = idx, | |
1026 | .mask = 0, /* We don't currently share SMRs */ | |
a9a1b0b5 | 1027 | .id = cfg->streamids[i], |
45ae7cff WD |
1028 | }; |
1029 | } | |
1030 | ||
1031 | /* It worked! Now, poke the actual hardware */ | |
a9a1b0b5 | 1032 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff WD |
1033 | u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT | |
1034 | smrs[i].mask << SMR_MASK_SHIFT; | |
1035 | writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx)); | |
1036 | } | |
1037 | ||
a9a1b0b5 | 1038 | cfg->smrs = smrs; |
45ae7cff WD |
1039 | return 0; |
1040 | ||
1041 | err_free_smrs: | |
1042 | while (--i >= 0) | |
1043 | __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx); | |
1044 | kfree(smrs); | |
1045 | return -ENOSPC; | |
1046 | } | |
1047 | ||
1048 | static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |
a9a1b0b5 | 1049 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1050 | { |
1051 | int i; | |
1052 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
a9a1b0b5 | 1053 | struct arm_smmu_smr *smrs = cfg->smrs; |
45ae7cff | 1054 | |
43b412be WD |
1055 | if (!smrs) |
1056 | return; | |
1057 | ||
45ae7cff | 1058 | /* Invalidate the SMRs before freeing back to the allocator */ |
a9a1b0b5 | 1059 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1060 | u8 idx = smrs[i].idx; |
2907320d | 1061 | |
45ae7cff WD |
1062 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx)); |
1063 | __arm_smmu_free_bitmap(smmu->smr_map, idx); | |
1064 | } | |
1065 | ||
a9a1b0b5 | 1066 | cfg->smrs = NULL; |
45ae7cff WD |
1067 | kfree(smrs); |
1068 | } | |
1069 | ||
45ae7cff | 1070 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
a9a1b0b5 | 1071 | struct arm_smmu_master_cfg *cfg) |
45ae7cff WD |
1072 | { |
1073 | int i, ret; | |
44680eed | 1074 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
45ae7cff WD |
1075 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1076 | ||
8f68f8e2 | 1077 | /* Devices in an IOMMU group may already be configured */ |
a9a1b0b5 | 1078 | ret = arm_smmu_master_configure_smrs(smmu, cfg); |
45ae7cff | 1079 | if (ret) |
8f68f8e2 | 1080 | return ret == -EEXIST ? 0 : ret; |
45ae7cff | 1081 | |
a9a1b0b5 | 1082 | for (i = 0; i < cfg->num_streamids; ++i) { |
45ae7cff | 1083 | u32 idx, s2cr; |
2907320d | 1084 | |
a9a1b0b5 | 1085 | idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; |
6069d23c | 1086 | s2cr = S2CR_TYPE_TRANS | |
44680eed | 1087 | (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT); |
45ae7cff WD |
1088 | writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx)); |
1089 | } | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | |
a9a1b0b5 | 1095 | struct arm_smmu_master_cfg *cfg) |
45ae7cff | 1096 | { |
43b412be | 1097 | int i; |
44680eed | 1098 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
43b412be | 1099 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
45ae7cff | 1100 | |
8f68f8e2 WD |
1101 | /* An IOMMU group is torn down by the first device to be removed */ |
1102 | if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs) | |
1103 | return; | |
45ae7cff WD |
1104 | |
1105 | /* | |
1106 | * We *must* clear the S2CR first, because freeing the SMR means | |
1107 | * that it can be re-allocated immediately. | |
1108 | */ | |
43b412be WD |
1109 | for (i = 0; i < cfg->num_streamids; ++i) { |
1110 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | |
1111 | ||
1112 | writel_relaxed(S2CR_TYPE_BYPASS, | |
1113 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | |
1114 | } | |
1115 | ||
a9a1b0b5 | 1116 | arm_smmu_master_free_smrs(smmu, cfg); |
45ae7cff WD |
1117 | } |
1118 | ||
1119 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |
1120 | { | |
a18037b2 | 1121 | int ret; |
1d672638 | 1122 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1123 | struct arm_smmu_device *smmu; |
a9a1b0b5 | 1124 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1125 | |
8f68f8e2 | 1126 | smmu = find_smmu_for_device(dev); |
44680eed | 1127 | if (!smmu) { |
45ae7cff WD |
1128 | dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); |
1129 | return -ENXIO; | |
1130 | } | |
1131 | ||
844e35bd WD |
1132 | if (dev->archdata.iommu) { |
1133 | dev_err(dev, "already attached to IOMMU domain\n"); | |
1134 | return -EEXIST; | |
1135 | } | |
1136 | ||
518f7136 WD |
1137 | /* Ensure that the domain is finalised */ |
1138 | ret = arm_smmu_init_domain_context(domain, smmu); | |
1139 | if (IS_ERR_VALUE(ret)) | |
1140 | return ret; | |
1141 | ||
45ae7cff | 1142 | /* |
44680eed WD |
1143 | * Sanity check the domain. We don't support domains across |
1144 | * different SMMUs. | |
45ae7cff | 1145 | */ |
518f7136 | 1146 | if (smmu_domain->smmu != smmu) { |
45ae7cff WD |
1147 | dev_err(dev, |
1148 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | |
a18037b2 MH |
1149 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1150 | return -EINVAL; | |
45ae7cff | 1151 | } |
45ae7cff WD |
1152 | |
1153 | /* Looks ok, so add the device to the domain */ | |
8f68f8e2 | 1154 | cfg = find_smmu_master_cfg(dev); |
a9a1b0b5 | 1155 | if (!cfg) |
45ae7cff WD |
1156 | return -ENODEV; |
1157 | ||
844e35bd WD |
1158 | ret = arm_smmu_domain_add_master(smmu_domain, cfg); |
1159 | if (!ret) | |
1160 | dev->archdata.iommu = domain; | |
45ae7cff WD |
1161 | return ret; |
1162 | } | |
1163 | ||
1164 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |
1165 | { | |
1d672638 | 1166 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
a9a1b0b5 | 1167 | struct arm_smmu_master_cfg *cfg; |
45ae7cff | 1168 | |
8f68f8e2 | 1169 | cfg = find_smmu_master_cfg(dev); |
844e35bd WD |
1170 | if (!cfg) |
1171 | return; | |
1172 | ||
1173 | dev->archdata.iommu = NULL; | |
1174 | arm_smmu_domain_remove_master(smmu_domain, cfg); | |
45ae7cff WD |
1175 | } |
1176 | ||
45ae7cff | 1177 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
b410aed9 | 1178 | phys_addr_t paddr, size_t size, int prot) |
45ae7cff | 1179 | { |
518f7136 WD |
1180 | int ret; |
1181 | unsigned long flags; | |
1d672638 | 1182 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1183 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1184 | |
518f7136 | 1185 | if (!ops) |
45ae7cff WD |
1186 | return -ENODEV; |
1187 | ||
518f7136 WD |
1188 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1189 | ret = ops->map(ops, iova, paddr, size, prot); | |
1190 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1191 | return ret; | |
45ae7cff WD |
1192 | } |
1193 | ||
1194 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | |
1195 | size_t size) | |
1196 | { | |
518f7136 WD |
1197 | size_t ret; |
1198 | unsigned long flags; | |
1d672638 | 1199 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1200 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1201 | |
518f7136 WD |
1202 | if (!ops) |
1203 | return 0; | |
1204 | ||
1205 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | |
1206 | ret = ops->unmap(ops, iova, size); | |
1207 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | |
1208 | return ret; | |
45ae7cff WD |
1209 | } |
1210 | ||
859a732e MH |
1211 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
1212 | dma_addr_t iova) | |
1213 | { | |
1d672638 | 1214 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
859a732e MH |
1215 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1216 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | |
1217 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1218 | struct device *dev = smmu->dev; | |
1219 | void __iomem *cb_base; | |
1220 | u32 tmp; | |
1221 | u64 phys; | |
661d962f | 1222 | unsigned long va; |
859a732e MH |
1223 | |
1224 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | |
1225 | ||
661d962f RM |
1226 | /* ATS1 registers can only be written atomically */ |
1227 | va = iova & ~0xfffUL; | |
661d962f | 1228 | if (smmu->version == ARM_SMMU_V2) |
668b4ada | 1229 | smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR); |
661d962f | 1230 | else |
661d962f | 1231 | writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR); |
859a732e MH |
1232 | |
1233 | if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, | |
1234 | !(tmp & ATSR_ACTIVE), 5, 50)) { | |
1235 | dev_err(dev, | |
077124c9 | 1236 | "iova to phys timed out on %pad. Falling back to software table walk.\n", |
859a732e MH |
1237 | &iova); |
1238 | return ops->iova_to_phys(ops, iova); | |
1239 | } | |
1240 | ||
1241 | phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); | |
1242 | phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; | |
1243 | ||
1244 | if (phys & CB_PAR_F) { | |
1245 | dev_err(dev, "translation fault!\n"); | |
1246 | dev_err(dev, "PAR = 0x%llx\n", phys); | |
1247 | return 0; | |
1248 | } | |
1249 | ||
1250 | return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); | |
1251 | } | |
1252 | ||
45ae7cff | 1253 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
859a732e | 1254 | dma_addr_t iova) |
45ae7cff | 1255 | { |
518f7136 WD |
1256 | phys_addr_t ret; |
1257 | unsigned long flags; | |
1d672638 | 1258 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
518f7136 | 1259 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
45ae7cff | 1260 | |
518f7136 | 1261 | if (!ops) |
a44a9791 | 1262 | return 0; |
45ae7cff | 1263 | |
518f7136 | 1264 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
83a60ed8 BR |
1265 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && |
1266 | smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { | |
859a732e | 1267 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
83a60ed8 | 1268 | } else { |
859a732e | 1269 | ret = ops->iova_to_phys(ops, iova); |
83a60ed8 BR |
1270 | } |
1271 | ||
518f7136 | 1272 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
859a732e | 1273 | |
518f7136 | 1274 | return ret; |
45ae7cff WD |
1275 | } |
1276 | ||
1fd0c775 | 1277 | static bool arm_smmu_capable(enum iommu_cap cap) |
45ae7cff | 1278 | { |
d0948945 WD |
1279 | switch (cap) { |
1280 | case IOMMU_CAP_CACHE_COHERENCY: | |
1fd0c775 JR |
1281 | /* |
1282 | * Return true here as the SMMU can always send out coherent | |
1283 | * requests. | |
1284 | */ | |
1285 | return true; | |
d0948945 | 1286 | case IOMMU_CAP_INTR_REMAP: |
1fd0c775 | 1287 | return true; /* MSIs are just memory writes */ |
0029a8dd AM |
1288 | case IOMMU_CAP_NOEXEC: |
1289 | return true; | |
d0948945 | 1290 | default: |
1fd0c775 | 1291 | return false; |
d0948945 | 1292 | } |
45ae7cff | 1293 | } |
45ae7cff | 1294 | |
a9a1b0b5 WD |
1295 | static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data) |
1296 | { | |
1297 | *((u16 *)data) = alias; | |
1298 | return 0; /* Continue walking */ | |
45ae7cff WD |
1299 | } |
1300 | ||
8f68f8e2 WD |
1301 | static void __arm_smmu_release_pci_iommudata(void *data) |
1302 | { | |
1303 | kfree(data); | |
1304 | } | |
1305 | ||
af659932 JR |
1306 | static int arm_smmu_init_pci_device(struct pci_dev *pdev, |
1307 | struct iommu_group *group) | |
45ae7cff | 1308 | { |
03edb226 | 1309 | struct arm_smmu_master_cfg *cfg; |
af659932 JR |
1310 | u16 sid; |
1311 | int i; | |
a9a1b0b5 | 1312 | |
03edb226 WD |
1313 | cfg = iommu_group_get_iommudata(group); |
1314 | if (!cfg) { | |
a9a1b0b5 | 1315 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); |
af659932 JR |
1316 | if (!cfg) |
1317 | return -ENOMEM; | |
a9a1b0b5 | 1318 | |
03edb226 WD |
1319 | iommu_group_set_iommudata(group, cfg, |
1320 | __arm_smmu_release_pci_iommudata); | |
1321 | } | |
8f68f8e2 | 1322 | |
af659932 JR |
1323 | if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) |
1324 | return -ENOSPC; | |
a9a1b0b5 | 1325 | |
03edb226 WD |
1326 | /* |
1327 | * Assume Stream ID == Requester ID for now. | |
1328 | * We need a way to describe the ID mappings in FDT. | |
1329 | */ | |
1330 | pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); | |
1331 | for (i = 0; i < cfg->num_streamids; ++i) | |
1332 | if (cfg->streamids[i] == sid) | |
1333 | break; | |
1334 | ||
1335 | /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ | |
1336 | if (i == cfg->num_streamids) | |
1337 | cfg->streamids[cfg->num_streamids++] = sid; | |
5fc63a7c | 1338 | |
03edb226 | 1339 | return 0; |
45ae7cff WD |
1340 | } |
1341 | ||
af659932 JR |
1342 | static int arm_smmu_init_platform_device(struct device *dev, |
1343 | struct iommu_group *group) | |
03edb226 | 1344 | { |
03edb226 | 1345 | struct arm_smmu_device *smmu = find_smmu_for_device(dev); |
af659932 | 1346 | struct arm_smmu_master *master; |
03edb226 WD |
1347 | |
1348 | if (!smmu) | |
1349 | return -ENODEV; | |
1350 | ||
1351 | master = find_smmu_master(smmu, dev->of_node); | |
1352 | if (!master) | |
1353 | return -ENODEV; | |
1354 | ||
03edb226 | 1355 | iommu_group_set_iommudata(group, &master->cfg, NULL); |
af659932 JR |
1356 | |
1357 | return 0; | |
03edb226 WD |
1358 | } |
1359 | ||
1360 | static int arm_smmu_add_device(struct device *dev) | |
1361 | { | |
af659932 | 1362 | struct iommu_group *group; |
03edb226 | 1363 | |
af659932 JR |
1364 | group = iommu_group_get_for_dev(dev); |
1365 | if (IS_ERR(group)) | |
1366 | return PTR_ERR(group); | |
03edb226 | 1367 | |
9a4a9d8c | 1368 | iommu_group_put(group); |
af659932 | 1369 | return 0; |
03edb226 WD |
1370 | } |
1371 | ||
45ae7cff WD |
1372 | static void arm_smmu_remove_device(struct device *dev) |
1373 | { | |
5fc63a7c | 1374 | iommu_group_remove_device(dev); |
45ae7cff WD |
1375 | } |
1376 | ||
af659932 JR |
1377 | static struct iommu_group *arm_smmu_device_group(struct device *dev) |
1378 | { | |
1379 | struct iommu_group *group; | |
1380 | int ret; | |
1381 | ||
1382 | if (dev_is_pci(dev)) | |
1383 | group = pci_device_group(dev); | |
1384 | else | |
1385 | group = generic_device_group(dev); | |
1386 | ||
1387 | if (IS_ERR(group)) | |
1388 | return group; | |
1389 | ||
1390 | if (dev_is_pci(dev)) | |
1391 | ret = arm_smmu_init_pci_device(to_pci_dev(dev), group); | |
1392 | else | |
1393 | ret = arm_smmu_init_platform_device(dev, group); | |
1394 | ||
1395 | if (ret) { | |
1396 | iommu_group_put(group); | |
1397 | group = ERR_PTR(ret); | |
1398 | } | |
1399 | ||
1400 | return group; | |
1401 | } | |
1402 | ||
c752ce45 WD |
1403 | static int arm_smmu_domain_get_attr(struct iommu_domain *domain, |
1404 | enum iommu_attr attr, void *data) | |
1405 | { | |
1d672638 | 1406 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 WD |
1407 | |
1408 | switch (attr) { | |
1409 | case DOMAIN_ATTR_NESTING: | |
1410 | *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); | |
1411 | return 0; | |
1412 | default: | |
1413 | return -ENODEV; | |
1414 | } | |
1415 | } | |
1416 | ||
1417 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | |
1418 | enum iommu_attr attr, void *data) | |
1419 | { | |
518f7136 | 1420 | int ret = 0; |
1d672638 | 1421 | struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); |
c752ce45 | 1422 | |
518f7136 WD |
1423 | mutex_lock(&smmu_domain->init_mutex); |
1424 | ||
c752ce45 WD |
1425 | switch (attr) { |
1426 | case DOMAIN_ATTR_NESTING: | |
518f7136 WD |
1427 | if (smmu_domain->smmu) { |
1428 | ret = -EPERM; | |
1429 | goto out_unlock; | |
1430 | } | |
1431 | ||
c752ce45 WD |
1432 | if (*(int *)data) |
1433 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | |
1434 | else | |
1435 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | |
1436 | ||
518f7136 | 1437 | break; |
c752ce45 | 1438 | default: |
518f7136 | 1439 | ret = -ENODEV; |
c752ce45 | 1440 | } |
518f7136 WD |
1441 | |
1442 | out_unlock: | |
1443 | mutex_unlock(&smmu_domain->init_mutex); | |
1444 | return ret; | |
c752ce45 WD |
1445 | } |
1446 | ||
518f7136 | 1447 | static struct iommu_ops arm_smmu_ops = { |
c752ce45 | 1448 | .capable = arm_smmu_capable, |
1d672638 JR |
1449 | .domain_alloc = arm_smmu_domain_alloc, |
1450 | .domain_free = arm_smmu_domain_free, | |
c752ce45 WD |
1451 | .attach_dev = arm_smmu_attach_dev, |
1452 | .detach_dev = arm_smmu_detach_dev, | |
1453 | .map = arm_smmu_map, | |
1454 | .unmap = arm_smmu_unmap, | |
76771c93 | 1455 | .map_sg = default_iommu_map_sg, |
c752ce45 WD |
1456 | .iova_to_phys = arm_smmu_iova_to_phys, |
1457 | .add_device = arm_smmu_add_device, | |
1458 | .remove_device = arm_smmu_remove_device, | |
af659932 | 1459 | .device_group = arm_smmu_device_group, |
c752ce45 WD |
1460 | .domain_get_attr = arm_smmu_domain_get_attr, |
1461 | .domain_set_attr = arm_smmu_domain_set_attr, | |
518f7136 | 1462 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
45ae7cff WD |
1463 | }; |
1464 | ||
1465 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |
1466 | { | |
1467 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
659db6f6 | 1468 | void __iomem *cb_base; |
45ae7cff | 1469 | int i = 0; |
659db6f6 AH |
1470 | u32 reg; |
1471 | ||
3a5df8ff AH |
1472 | /* clear global FSR */ |
1473 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
1474 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); | |
45ae7cff WD |
1475 | |
1476 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | |
1477 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | |
3c8766d0 | 1478 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
2907320d MH |
1479 | writel_relaxed(S2CR_TYPE_BYPASS, |
1480 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | |
45ae7cff WD |
1481 | } |
1482 | ||
659db6f6 AH |
1483 | /* Make sure all context banks are disabled and clear CB_FSR */ |
1484 | for (i = 0; i < smmu->num_context_banks; ++i) { | |
1485 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); | |
1486 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | |
1487 | writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); | |
1488 | } | |
1463fe44 | 1489 | |
45ae7cff | 1490 | /* Invalidate the TLB, just in case */ |
45ae7cff WD |
1491 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1492 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | |
1493 | ||
3a5df8ff | 1494 | reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
659db6f6 | 1495 | |
45ae7cff | 1496 | /* Enable fault reporting */ |
659db6f6 | 1497 | reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); |
45ae7cff WD |
1498 | |
1499 | /* Disable TLB broadcasting. */ | |
659db6f6 | 1500 | reg |= (sCR0_VMIDPNE | sCR0_PTM); |
45ae7cff WD |
1501 | |
1502 | /* Enable client access, but bypass when no mapping is found */ | |
659db6f6 | 1503 | reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); |
45ae7cff WD |
1504 | |
1505 | /* Disable forced broadcasting */ | |
659db6f6 | 1506 | reg &= ~sCR0_FB; |
45ae7cff WD |
1507 | |
1508 | /* Don't upgrade barriers */ | |
659db6f6 | 1509 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); |
45ae7cff WD |
1510 | |
1511 | /* Push the button */ | |
518f7136 | 1512 | __arm_smmu_tlb_sync(smmu); |
3a5df8ff | 1513 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1514 | } |
1515 | ||
1516 | static int arm_smmu_id_size_to_bits(int size) | |
1517 | { | |
1518 | switch (size) { | |
1519 | case 0: | |
1520 | return 32; | |
1521 | case 1: | |
1522 | return 36; | |
1523 | case 2: | |
1524 | return 40; | |
1525 | case 3: | |
1526 | return 42; | |
1527 | case 4: | |
1528 | return 44; | |
1529 | case 5: | |
1530 | default: | |
1531 | return 48; | |
1532 | } | |
1533 | } | |
1534 | ||
1535 | static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |
1536 | { | |
1537 | unsigned long size; | |
1538 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | |
1539 | u32 id; | |
bae2c2d4 | 1540 | bool cttw_dt, cttw_reg; |
45ae7cff WD |
1541 | |
1542 | dev_notice(smmu->dev, "probing hardware configuration...\n"); | |
45ae7cff WD |
1543 | dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); |
1544 | ||
1545 | /* ID0 */ | |
1546 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | |
4cf740b0 WD |
1547 | |
1548 | /* Restrict available stages based on module parameter */ | |
1549 | if (force_stage == 1) | |
1550 | id &= ~(ID0_S2TS | ID0_NTS); | |
1551 | else if (force_stage == 2) | |
1552 | id &= ~(ID0_S1TS | ID0_NTS); | |
1553 | ||
45ae7cff WD |
1554 | if (id & ID0_S1TS) { |
1555 | smmu->features |= ARM_SMMU_FEAT_TRANS_S1; | |
1556 | dev_notice(smmu->dev, "\tstage 1 translation\n"); | |
1557 | } | |
1558 | ||
1559 | if (id & ID0_S2TS) { | |
1560 | smmu->features |= ARM_SMMU_FEAT_TRANS_S2; | |
1561 | dev_notice(smmu->dev, "\tstage 2 translation\n"); | |
1562 | } | |
1563 | ||
1564 | if (id & ID0_NTS) { | |
1565 | smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; | |
1566 | dev_notice(smmu->dev, "\tnested translation\n"); | |
1567 | } | |
1568 | ||
1569 | if (!(smmu->features & | |
4cf740b0 | 1570 | (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) { |
45ae7cff WD |
1571 | dev_err(smmu->dev, "\tno translation support!\n"); |
1572 | return -ENODEV; | |
1573 | } | |
1574 | ||
d38f0ff9 | 1575 | if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { |
859a732e MH |
1576 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; |
1577 | dev_notice(smmu->dev, "\taddress translation ops\n"); | |
1578 | } | |
1579 | ||
bae2c2d4 RM |
1580 | /* |
1581 | * In order for DMA API calls to work properly, we must defer to what | |
1582 | * the DT says about coherency, regardless of what the hardware claims. | |
1583 | * Fortunately, this also opens up a workaround for systems where the | |
1584 | * ID register value has ended up configured incorrectly. | |
1585 | */ | |
1586 | cttw_dt = of_dma_is_coherent(smmu->dev->of_node); | |
1587 | cttw_reg = !!(id & ID0_CTTW); | |
1588 | if (cttw_dt) | |
45ae7cff | 1589 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; |
bae2c2d4 RM |
1590 | if (cttw_dt || cttw_reg) |
1591 | dev_notice(smmu->dev, "\t%scoherent table walk\n", | |
1592 | cttw_dt ? "" : "non-"); | |
1593 | if (cttw_dt != cttw_reg) | |
1594 | dev_notice(smmu->dev, | |
1595 | "\t(IDR0.CTTW overridden by dma-coherent property)\n"); | |
45ae7cff WD |
1596 | |
1597 | if (id & ID0_SMS) { | |
1598 | u32 smr, sid, mask; | |
1599 | ||
1600 | smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; | |
1601 | smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) & | |
1602 | ID0_NUMSMRG_MASK; | |
1603 | if (smmu->num_mapping_groups == 0) { | |
1604 | dev_err(smmu->dev, | |
1605 | "stream-matching supported, but no SMRs present!\n"); | |
1606 | return -ENODEV; | |
1607 | } | |
1608 | ||
1609 | smr = SMR_MASK_MASK << SMR_MASK_SHIFT; | |
1610 | smr |= (SMR_ID_MASK << SMR_ID_SHIFT); | |
1611 | writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1612 | smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); | |
1613 | ||
1614 | mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK; | |
1615 | sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK; | |
1616 | if ((mask & sid) != sid) { | |
1617 | dev_err(smmu->dev, | |
1618 | "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n", | |
1619 | mask, sid); | |
1620 | return -ENODEV; | |
1621 | } | |
1622 | ||
1623 | dev_notice(smmu->dev, | |
1624 | "\tstream matching with %u register groups, mask 0x%x", | |
1625 | smmu->num_mapping_groups, mask); | |
3c8766d0 OH |
1626 | } else { |
1627 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | |
1628 | ID0_NUMSIDB_MASK; | |
45ae7cff WD |
1629 | } |
1630 | ||
1631 | /* ID1 */ | |
1632 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); | |
c757e852 | 1633 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
45ae7cff | 1634 | |
c55af7f7 | 1635 | /* Check for size mismatch of SMMU address space from mapped region */ |
518f7136 | 1636 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
c757e852 | 1637 | size *= 2 << smmu->pgshift; |
c55af7f7 | 1638 | if (smmu->size != size) |
2907320d MH |
1639 | dev_warn(smmu->dev, |
1640 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | |
1641 | size, smmu->size); | |
45ae7cff | 1642 | |
518f7136 | 1643 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
45ae7cff WD |
1644 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
1645 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | |
1646 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | |
1647 | return -ENODEV; | |
1648 | } | |
1649 | dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", | |
1650 | smmu->num_context_banks, smmu->num_s2_context_banks); | |
1651 | ||
1652 | /* ID2 */ | |
1653 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | |
1654 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | |
518f7136 | 1655 | smmu->ipa_size = size; |
45ae7cff | 1656 | |
518f7136 | 1657 | /* The output mask is also applied for bypass */ |
45ae7cff | 1658 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
518f7136 | 1659 | smmu->pa_size = size; |
45ae7cff | 1660 | |
f1d84548 RM |
1661 | /* |
1662 | * What the page table walker can address actually depends on which | |
1663 | * descriptor format is in use, but since a) we don't know that yet, | |
1664 | * and b) it can vary per context bank, this will have to do... | |
1665 | */ | |
1666 | if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) | |
1667 | dev_warn(smmu->dev, | |
1668 | "failed to set DMA mask for table walker\n"); | |
1669 | ||
09360403 | 1670 | if (smmu->version == ARM_SMMU_V1) { |
518f7136 WD |
1671 | smmu->va_size = smmu->ipa_size; |
1672 | size = SZ_4K | SZ_2M | SZ_1G; | |
45ae7cff | 1673 | } else { |
45ae7cff | 1674 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
518f7136 WD |
1675 | smmu->va_size = arm_smmu_id_size_to_bits(size); |
1676 | #ifndef CONFIG_64BIT | |
1677 | smmu->va_size = min(32UL, smmu->va_size); | |
45ae7cff | 1678 | #endif |
518f7136 WD |
1679 | size = 0; |
1680 | if (id & ID2_PTFS_4K) | |
1681 | size |= SZ_4K | SZ_2M | SZ_1G; | |
1682 | if (id & ID2_PTFS_16K) | |
1683 | size |= SZ_16K | SZ_32M; | |
1684 | if (id & ID2_PTFS_64K) | |
1685 | size |= SZ_64K | SZ_512M; | |
45ae7cff WD |
1686 | } |
1687 | ||
518f7136 WD |
1688 | arm_smmu_ops.pgsize_bitmap &= size; |
1689 | dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); | |
1690 | ||
28d6007b WD |
1691 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
1692 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", | |
518f7136 | 1693 | smmu->va_size, smmu->ipa_size); |
28d6007b WD |
1694 | |
1695 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | |
1696 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | |
518f7136 | 1697 | smmu->ipa_size, smmu->pa_size); |
28d6007b | 1698 | |
45ae7cff WD |
1699 | return 0; |
1700 | } | |
1701 | ||
09b5269a | 1702 | static const struct of_device_id arm_smmu_of_match[] = { |
09360403 RM |
1703 | { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 }, |
1704 | { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 }, | |
1705 | { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 }, | |
d3aba046 | 1706 | { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 }, |
09360403 RM |
1707 | { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 }, |
1708 | { }, | |
1709 | }; | |
1710 | MODULE_DEVICE_TABLE(of, arm_smmu_of_match); | |
1711 | ||
45ae7cff WD |
1712 | static int arm_smmu_device_dt_probe(struct platform_device *pdev) |
1713 | { | |
09360403 | 1714 | const struct of_device_id *of_id; |
45ae7cff WD |
1715 | struct resource *res; |
1716 | struct arm_smmu_device *smmu; | |
45ae7cff WD |
1717 | struct device *dev = &pdev->dev; |
1718 | struct rb_node *node; | |
1719 | struct of_phandle_args masterspec; | |
1720 | int num_irqs, i, err; | |
1721 | ||
1722 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); | |
1723 | if (!smmu) { | |
1724 | dev_err(dev, "failed to allocate arm_smmu_device\n"); | |
1725 | return -ENOMEM; | |
1726 | } | |
1727 | smmu->dev = dev; | |
1728 | ||
09360403 RM |
1729 | of_id = of_match_node(arm_smmu_of_match, dev->of_node); |
1730 | smmu->version = (enum arm_smmu_arch_version)of_id->data; | |
1731 | ||
45ae7cff | 1732 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
8a7f4312 JL |
1733 | smmu->base = devm_ioremap_resource(dev, res); |
1734 | if (IS_ERR(smmu->base)) | |
1735 | return PTR_ERR(smmu->base); | |
45ae7cff | 1736 | smmu->size = resource_size(res); |
45ae7cff WD |
1737 | |
1738 | if (of_property_read_u32(dev->of_node, "#global-interrupts", | |
1739 | &smmu->num_global_irqs)) { | |
1740 | dev_err(dev, "missing #global-interrupts property\n"); | |
1741 | return -ENODEV; | |
1742 | } | |
1743 | ||
1744 | num_irqs = 0; | |
1745 | while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { | |
1746 | num_irqs++; | |
1747 | if (num_irqs > smmu->num_global_irqs) | |
1748 | smmu->num_context_irqs++; | |
1749 | } | |
1750 | ||
44a08de2 AH |
1751 | if (!smmu->num_context_irqs) { |
1752 | dev_err(dev, "found %d interrupts but expected at least %d\n", | |
1753 | num_irqs, smmu->num_global_irqs + 1); | |
1754 | return -ENODEV; | |
45ae7cff | 1755 | } |
45ae7cff WD |
1756 | |
1757 | smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, | |
1758 | GFP_KERNEL); | |
1759 | if (!smmu->irqs) { | |
1760 | dev_err(dev, "failed to allocate %d irqs\n", num_irqs); | |
1761 | return -ENOMEM; | |
1762 | } | |
1763 | ||
1764 | for (i = 0; i < num_irqs; ++i) { | |
1765 | int irq = platform_get_irq(pdev, i); | |
2907320d | 1766 | |
45ae7cff WD |
1767 | if (irq < 0) { |
1768 | dev_err(dev, "failed to get irq index %d\n", i); | |
1769 | return -ENODEV; | |
1770 | } | |
1771 | smmu->irqs[i] = irq; | |
1772 | } | |
1773 | ||
3c8766d0 OH |
1774 | err = arm_smmu_device_cfg_probe(smmu); |
1775 | if (err) | |
1776 | return err; | |
1777 | ||
45ae7cff WD |
1778 | i = 0; |
1779 | smmu->masters = RB_ROOT; | |
1780 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | |
1781 | "#stream-id-cells", i, | |
1782 | &masterspec)) { | |
1783 | err = register_smmu_master(smmu, dev, &masterspec); | |
1784 | if (err) { | |
1785 | dev_err(dev, "failed to add master %s\n", | |
1786 | masterspec.np->name); | |
1787 | goto out_put_masters; | |
1788 | } | |
1789 | ||
1790 | i++; | |
1791 | } | |
1792 | dev_notice(dev, "registered %d master devices\n", i); | |
1793 | ||
3a5df8ff AH |
1794 | parse_driver_options(smmu); |
1795 | ||
09360403 | 1796 | if (smmu->version > ARM_SMMU_V1 && |
45ae7cff WD |
1797 | smmu->num_context_banks != smmu->num_context_irqs) { |
1798 | dev_err(dev, | |
1799 | "found only %d context interrupt(s) but %d required\n", | |
1800 | smmu->num_context_irqs, smmu->num_context_banks); | |
89a23cde | 1801 | err = -ENODEV; |
44680eed | 1802 | goto out_put_masters; |
45ae7cff WD |
1803 | } |
1804 | ||
45ae7cff WD |
1805 | for (i = 0; i < smmu->num_global_irqs; ++i) { |
1806 | err = request_irq(smmu->irqs[i], | |
1807 | arm_smmu_global_fault, | |
1808 | IRQF_SHARED, | |
1809 | "arm-smmu global fault", | |
1810 | smmu); | |
1811 | if (err) { | |
1812 | dev_err(dev, "failed to request global IRQ %d (%u)\n", | |
1813 | i, smmu->irqs[i]); | |
1814 | goto out_free_irqs; | |
1815 | } | |
1816 | } | |
1817 | ||
1818 | INIT_LIST_HEAD(&smmu->list); | |
1819 | spin_lock(&arm_smmu_devices_lock); | |
1820 | list_add(&smmu->list, &arm_smmu_devices); | |
1821 | spin_unlock(&arm_smmu_devices_lock); | |
fd90cecb WD |
1822 | |
1823 | arm_smmu_device_reset(smmu); | |
45ae7cff WD |
1824 | return 0; |
1825 | ||
1826 | out_free_irqs: | |
1827 | while (i--) | |
1828 | free_irq(smmu->irqs[i], smmu); | |
1829 | ||
45ae7cff WD |
1830 | out_put_masters: |
1831 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { | |
2907320d MH |
1832 | struct arm_smmu_master *master |
1833 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1834 | of_node_put(master->of_node); |
1835 | } | |
1836 | ||
1837 | return err; | |
1838 | } | |
1839 | ||
1840 | static int arm_smmu_device_remove(struct platform_device *pdev) | |
1841 | { | |
1842 | int i; | |
1843 | struct device *dev = &pdev->dev; | |
1844 | struct arm_smmu_device *curr, *smmu = NULL; | |
1845 | struct rb_node *node; | |
1846 | ||
1847 | spin_lock(&arm_smmu_devices_lock); | |
1848 | list_for_each_entry(curr, &arm_smmu_devices, list) { | |
1849 | if (curr->dev == dev) { | |
1850 | smmu = curr; | |
1851 | list_del(&smmu->list); | |
1852 | break; | |
1853 | } | |
1854 | } | |
1855 | spin_unlock(&arm_smmu_devices_lock); | |
1856 | ||
1857 | if (!smmu) | |
1858 | return -ENODEV; | |
1859 | ||
45ae7cff | 1860 | for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { |
2907320d MH |
1861 | struct arm_smmu_master *master |
1862 | = container_of(node, struct arm_smmu_master, node); | |
45ae7cff WD |
1863 | of_node_put(master->of_node); |
1864 | } | |
1865 | ||
ecfadb6e | 1866 | if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) |
45ae7cff WD |
1867 | dev_err(dev, "removing device with active domains!\n"); |
1868 | ||
1869 | for (i = 0; i < smmu->num_global_irqs; ++i) | |
1870 | free_irq(smmu->irqs[i], smmu); | |
1871 | ||
1872 | /* Turn the thing off */ | |
2907320d | 1873 | writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
45ae7cff WD |
1874 | return 0; |
1875 | } | |
1876 | ||
45ae7cff WD |
1877 | static struct platform_driver arm_smmu_driver = { |
1878 | .driver = { | |
45ae7cff WD |
1879 | .name = "arm-smmu", |
1880 | .of_match_table = of_match_ptr(arm_smmu_of_match), | |
1881 | }, | |
1882 | .probe = arm_smmu_device_dt_probe, | |
1883 | .remove = arm_smmu_device_remove, | |
1884 | }; | |
1885 | ||
1886 | static int __init arm_smmu_init(void) | |
1887 | { | |
0e7d37ad | 1888 | struct device_node *np; |
45ae7cff WD |
1889 | int ret; |
1890 | ||
0e7d37ad TR |
1891 | /* |
1892 | * Play nice with systems that don't have an ARM SMMU by checking that | |
1893 | * an ARM SMMU exists in the system before proceeding with the driver | |
1894 | * and IOMMU bus operation registration. | |
1895 | */ | |
1896 | np = of_find_matching_node(NULL, arm_smmu_of_match); | |
1897 | if (!np) | |
1898 | return 0; | |
1899 | ||
1900 | of_node_put(np); | |
1901 | ||
45ae7cff WD |
1902 | ret = platform_driver_register(&arm_smmu_driver); |
1903 | if (ret) | |
1904 | return ret; | |
1905 | ||
1906 | /* Oh, for a proper bus abstraction */ | |
6614ee77 | 1907 | if (!iommu_present(&platform_bus_type)) |
45ae7cff WD |
1908 | bus_set_iommu(&platform_bus_type, &arm_smmu_ops); |
1909 | ||
d123cf82 | 1910 | #ifdef CONFIG_ARM_AMBA |
6614ee77 | 1911 | if (!iommu_present(&amba_bustype)) |
45ae7cff | 1912 | bus_set_iommu(&amba_bustype, &arm_smmu_ops); |
d123cf82 | 1913 | #endif |
45ae7cff | 1914 | |
a9a1b0b5 WD |
1915 | #ifdef CONFIG_PCI |
1916 | if (!iommu_present(&pci_bus_type)) | |
1917 | bus_set_iommu(&pci_bus_type, &arm_smmu_ops); | |
1918 | #endif | |
1919 | ||
45ae7cff WD |
1920 | return 0; |
1921 | } | |
1922 | ||
1923 | static void __exit arm_smmu_exit(void) | |
1924 | { | |
1925 | return platform_driver_unregister(&arm_smmu_driver); | |
1926 | } | |
1927 | ||
b1950b27 | 1928 | subsys_initcall(arm_smmu_init); |
45ae7cff WD |
1929 | module_exit(arm_smmu_exit); |
1930 | ||
1931 | MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); | |
1932 | MODULE_AUTHOR("Will Deacon <[email protected]>"); | |
1933 | MODULE_LICENSE("GPL v2"); |