]>
Commit | Line | Data |
---|---|---|
8ae08860 RH |
1 | /* |
2 | * ARM page table walking. | |
3 | * | |
4 | * This code is licensed under the GNU GPL v2 or later. | |
5 | * | |
6 | * SPDX-License-Identifier: GPL-2.0-or-later | |
7 | */ | |
8 | ||
9 | #include "qemu/osdep.h" | |
10 | #include "qemu/log.h" | |
1f2e87e5 | 11 | #include "qemu/range.h" |
8ae08860 RH |
12 | #include "cpu.h" |
13 | #include "internals.h" | |
2c1f429d | 14 | #include "idau.h" |
8ae08860 RH |
15 | |
16 | ||
11552bb0 RH |
17 | static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, |
18 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
c23f08a5 RH |
19 | bool is_secure, bool s1_is_el0, |
20 | GetPhysAddrResult *result, ARMMMUFaultInfo *fi) | |
11552bb0 RH |
21 | __attribute__((nonnull)); |
22 | ||
1c73d848 RH |
23 | /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ |
24 | static const uint8_t pamax_map[] = { | |
25 | [0] = 32, | |
26 | [1] = 36, | |
27 | [2] = 40, | |
28 | [3] = 42, | |
29 | [4] = 44, | |
30 | [5] = 48, | |
31 | [6] = 52, | |
32 | }; | |
33 | ||
34 | /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */ | |
35 | unsigned int arm_pamax(ARMCPU *cpu) | |
36 | { | |
22536b13 RH |
37 | if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { |
38 | unsigned int parange = | |
39 | FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); | |
1c73d848 | 40 | |
22536b13 RH |
41 | /* |
42 | * id_aa64mmfr0 is a read-only register so values outside of the | |
43 | * supported mappings can be considered an implementation error. | |
44 | */ | |
45 | assert(parange < ARRAY_SIZE(pamax_map)); | |
46 | return pamax_map[parange]; | |
47 | } | |
59e1b8a2 RH |
48 | |
49 | /* | |
50 | * In machvirt_init, we call arm_pamax on a cpu that is not fully | |
51 | * initialized, so we can't rely on the propagation done in realize. | |
52 | */ | |
53 | if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) || | |
54 | arm_feature(&cpu->env, ARM_FEATURE_V7VE)) { | |
22536b13 RH |
55 | /* v7 with LPAE */ |
56 | return 40; | |
57 | } | |
58 | /* Anything else */ | |
59 | return 32; | |
1c73d848 RH |
60 | } |
61 | ||
1d261255 RH |
62 | /* |
63 | * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index | |
64 | */ | |
65 | ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) | |
66 | { | |
67 | switch (mmu_idx) { | |
68 | case ARMMMUIdx_SE10_0: | |
69 | return ARMMMUIdx_Stage1_SE0; | |
70 | case ARMMMUIdx_SE10_1: | |
71 | return ARMMMUIdx_Stage1_SE1; | |
72 | case ARMMMUIdx_SE10_1_PAN: | |
73 | return ARMMMUIdx_Stage1_SE1_PAN; | |
74 | case ARMMMUIdx_E10_0: | |
75 | return ARMMMUIdx_Stage1_E0; | |
76 | case ARMMMUIdx_E10_1: | |
77 | return ARMMMUIdx_Stage1_E1; | |
78 | case ARMMMUIdx_E10_1_PAN: | |
79 | return ARMMMUIdx_Stage1_E1_PAN; | |
80 | default: | |
81 | return mmu_idx; | |
82 | } | |
83 | } | |
84 | ||
85 | ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) | |
86 | { | |
87 | return stage_1_mmu_idx(arm_mmu_idx(env)); | |
88 | } | |
89 | ||
11552bb0 RH |
90 | static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) |
91 | { | |
92 | return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; | |
93 | } | |
94 | ||
0c23d56f RH |
95 | static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) |
96 | { | |
97 | switch (mmu_idx) { | |
98 | case ARMMMUIdx_SE10_0: | |
99 | case ARMMMUIdx_E20_0: | |
100 | case ARMMMUIdx_SE20_0: | |
101 | case ARMMMUIdx_Stage1_E0: | |
102 | case ARMMMUIdx_Stage1_SE0: | |
103 | case ARMMMUIdx_MUser: | |
104 | case ARMMMUIdx_MSUser: | |
105 | case ARMMMUIdx_MUserNegPri: | |
106 | case ARMMMUIdx_MSUserNegPri: | |
107 | return true; | |
108 | default: | |
109 | return false; | |
110 | case ARMMMUIdx_E10_0: | |
111 | case ARMMMUIdx_E10_1: | |
112 | case ARMMMUIdx_E10_1_PAN: | |
113 | g_assert_not_reached(); | |
114 | } | |
115 | } | |
116 | ||
3b318aae RH |
117 | /* Return the TTBR associated with this translation regime */ |
118 | static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) | |
119 | { | |
120 | if (mmu_idx == ARMMMUIdx_Stage2) { | |
121 | return env->cp15.vttbr_el2; | |
122 | } | |
123 | if (mmu_idx == ARMMMUIdx_Stage2_S) { | |
124 | return env->cp15.vsttbr_el2; | |
125 | } | |
126 | if (ttbrn == 0) { | |
127 | return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; | |
128 | } else { | |
129 | return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; | |
130 | } | |
131 | } | |
132 | ||
8db1a3a0 | 133 | /* Return true if the specified stage of address translation is disabled */ |
7e80c0a4 RH |
134 | static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, |
135 | bool is_secure) | |
8db1a3a0 RH |
136 | { |
137 | uint64_t hcr_el2; | |
138 | ||
139 | if (arm_feature(env, ARM_FEATURE_M)) { | |
7e80c0a4 | 140 | switch (env->v7m.mpu_ctrl[is_secure] & |
8db1a3a0 RH |
141 | (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { |
142 | case R_V7M_MPU_CTRL_ENABLE_MASK: | |
143 | /* Enabled, but not for HardFault and NMI */ | |
144 | return mmu_idx & ARM_MMU_IDX_M_NEGPRI; | |
145 | case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: | |
146 | /* Enabled for all cases */ | |
147 | return false; | |
148 | case 0: | |
149 | default: | |
150 | /* | |
151 | * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but | |
152 | * we warned about that in armv7m_nvic.c when the guest set it. | |
153 | */ | |
154 | return true; | |
155 | } | |
156 | } | |
157 | ||
158 | hcr_el2 = arm_hcr_el2_eff(env); | |
159 | ||
160 | if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { | |
161 | /* HCR.DC means HCR.VM behaves as 1 */ | |
162 | return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; | |
163 | } | |
164 | ||
165 | if (hcr_el2 & HCR_TGE) { | |
166 | /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ | |
7e80c0a4 | 167 | if (!is_secure && regime_el(env, mmu_idx) == 1) { |
8db1a3a0 RH |
168 | return true; |
169 | } | |
170 | } | |
171 | ||
172 | if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { | |
173 | /* HCR.DC means SCTLR_EL1.M behaves as 0 */ | |
174 | return true; | |
175 | } | |
176 | ||
177 | return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; | |
178 | } | |
179 | ||
11552bb0 RH |
180 | static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs) |
181 | { | |
182 | /* | |
183 | * For an S1 page table walk, the stage 1 attributes are always | |
184 | * some form of "this is Normal memory". The combined S1+S2 | |
185 | * attributes are therefore only Device if stage 2 specifies Device. | |
186 | * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, | |
187 | * ie when cacheattrs.attrs bits [3:2] are 0b00. | |
188 | * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie | |
189 | * when cacheattrs.attrs bit [2] is 0. | |
190 | */ | |
191 | assert(cacheattrs.is_s2_format); | |
192 | if (arm_hcr_el2_eff(env) & HCR_FWB) { | |
193 | return (cacheattrs.attrs & 0x4) == 0; | |
194 | } else { | |
195 | return (cacheattrs.attrs & 0xc) == 0; | |
196 | } | |
197 | } | |
198 | ||
199 | /* Translate a S1 pagetable walk through S2 if needed. */ | |
200 | static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, | |
201 | hwaddr addr, bool *is_secure, | |
202 | ARMMMUFaultInfo *fi) | |
203 | { | |
bf25b7b0 RH |
204 | ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; |
205 | ||
11552bb0 | 206 | if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && |
7e80c0a4 | 207 | !regime_translation_disabled(env, s2_mmu_idx, *is_secure)) { |
03ee9bbe RH |
208 | GetPhysAddrResult s2 = {}; |
209 | int ret; | |
11552bb0 | 210 | |
c23f08a5 RH |
211 | ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, |
212 | *is_secure, false, &s2, fi); | |
11552bb0 RH |
213 | if (ret) { |
214 | assert(fi->type != ARMFault_None); | |
215 | fi->s2addr = addr; | |
216 | fi->stage2 = true; | |
217 | fi->s1ptw = true; | |
218 | fi->s1ns = !*is_secure; | |
219 | return ~0; | |
220 | } | |
221 | if ((arm_hcr_el2_eff(env) & HCR_PTW) && | |
03ee9bbe | 222 | ptw_attrs_are_device(env, s2.cacheattrs)) { |
11552bb0 RH |
223 | /* |
224 | * PTW set and S1 walk touched S2 Device memory: | |
225 | * generate Permission fault. | |
226 | */ | |
227 | fi->type = ARMFault_Permission; | |
228 | fi->s2addr = addr; | |
229 | fi->stage2 = true; | |
230 | fi->s1ptw = true; | |
231 | fi->s1ns = !*is_secure; | |
232 | return ~0; | |
233 | } | |
234 | ||
235 | if (arm_is_secure_below_el3(env)) { | |
236 | /* Check if page table walk is to secure or non-secure PA space. */ | |
237 | if (*is_secure) { | |
988cc190 | 238 | *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); |
11552bb0 | 239 | } else { |
988cc190 | 240 | *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); |
11552bb0 RH |
241 | } |
242 | } else { | |
243 | assert(!*is_secure); | |
244 | } | |
245 | ||
03ee9bbe | 246 | addr = s2.phys; |
11552bb0 RH |
247 | } |
248 | return addr; | |
249 | } | |
250 | ||
251 | /* All loads done in the course of a page table walk go through here. */ | |
5e79887b | 252 | static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure, |
11552bb0 RH |
253 | ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) |
254 | { | |
5e79887b | 255 | CPUState *cs = env_cpu(env); |
11552bb0 RH |
256 | MemTxAttrs attrs = {}; |
257 | MemTxResult result = MEMTX_OK; | |
258 | AddressSpace *as; | |
259 | uint32_t data; | |
260 | ||
261 | addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); | |
262 | attrs.secure = is_secure; | |
263 | as = arm_addressspace(cs, attrs); | |
264 | if (fi->s1ptw) { | |
265 | return 0; | |
266 | } | |
267 | if (regime_translation_big_endian(env, mmu_idx)) { | |
268 | data = address_space_ldl_be(as, addr, attrs, &result); | |
269 | } else { | |
270 | data = address_space_ldl_le(as, addr, attrs, &result); | |
271 | } | |
272 | if (result == MEMTX_OK) { | |
273 | return data; | |
274 | } | |
275 | fi->type = ARMFault_SyncExternalOnWalk; | |
276 | fi->ea = arm_extabort_type(result); | |
277 | return 0; | |
278 | } | |
279 | ||
5e79887b | 280 | static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure, |
11552bb0 RH |
281 | ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) |
282 | { | |
5e79887b | 283 | CPUState *cs = env_cpu(env); |
11552bb0 RH |
284 | MemTxAttrs attrs = {}; |
285 | MemTxResult result = MEMTX_OK; | |
286 | AddressSpace *as; | |
287 | uint64_t data; | |
288 | ||
289 | addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); | |
290 | attrs.secure = is_secure; | |
291 | as = arm_addressspace(cs, attrs); | |
292 | if (fi->s1ptw) { | |
293 | return 0; | |
294 | } | |
295 | if (regime_translation_big_endian(env, mmu_idx)) { | |
296 | data = address_space_ldq_be(as, addr, attrs, &result); | |
297 | } else { | |
298 | data = address_space_ldq_le(as, addr, attrs, &result); | |
299 | } | |
300 | if (result == MEMTX_OK) { | |
301 | return data; | |
302 | } | |
303 | fi->type = ARMFault_SyncExternalOnWalk; | |
304 | fi->ea = arm_extabort_type(result); | |
305 | return 0; | |
306 | } | |
307 | ||
4c74ab15 RH |
308 | static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, |
309 | uint32_t *table, uint32_t address) | |
310 | { | |
311 | /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ | |
c1547bba | 312 | uint64_t tcr = regime_tcr(env, mmu_idx); |
9e70e26c PM |
313 | int maskshift = extract32(tcr, 0, 3); |
314 | uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); | |
315 | uint32_t base_mask; | |
4c74ab15 | 316 | |
9e70e26c PM |
317 | if (address & mask) { |
318 | if (tcr & TTBCR_PD1) { | |
4c74ab15 RH |
319 | /* Translation table walk disabled for TTBR1 */ |
320 | return false; | |
321 | } | |
322 | *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; | |
323 | } else { | |
9e70e26c | 324 | if (tcr & TTBCR_PD0) { |
4c74ab15 RH |
325 | /* Translation table walk disabled for TTBR0 */ |
326 | return false; | |
327 | } | |
9e70e26c PM |
328 | base_mask = ~((uint32_t)0x3fffu >> maskshift); |
329 | *table = regime_ttbr(env, mmu_idx, 0) & base_mask; | |
4c74ab15 RH |
330 | } |
331 | *table |= (address >> 18) & 0x3ffc; | |
332 | return true; | |
333 | } | |
334 | ||
4845d3be RH |
335 | /* |
336 | * Translate section/page access permissions to page R/W protection flags | |
337 | * @env: CPUARMState | |
338 | * @mmu_idx: MMU index indicating required translation regime | |
339 | * @ap: The 3-bit access permissions (AP[2:0]) | |
340 | * @domain_prot: The 2-bit domain access permissions | |
341 | */ | |
342 | static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, | |
343 | int ap, int domain_prot) | |
344 | { | |
345 | bool is_user = regime_is_user(env, mmu_idx); | |
346 | ||
347 | if (domain_prot == 3) { | |
348 | return PAGE_READ | PAGE_WRITE; | |
349 | } | |
350 | ||
351 | switch (ap) { | |
352 | case 0: | |
353 | if (arm_feature(env, ARM_FEATURE_V7)) { | |
354 | return 0; | |
355 | } | |
356 | switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { | |
357 | case SCTLR_S: | |
358 | return is_user ? 0 : PAGE_READ; | |
359 | case SCTLR_R: | |
360 | return PAGE_READ; | |
361 | default: | |
362 | return 0; | |
363 | } | |
364 | case 1: | |
365 | return is_user ? 0 : PAGE_READ | PAGE_WRITE; | |
366 | case 2: | |
367 | if (is_user) { | |
368 | return PAGE_READ; | |
369 | } else { | |
370 | return PAGE_READ | PAGE_WRITE; | |
371 | } | |
372 | case 3: | |
373 | return PAGE_READ | PAGE_WRITE; | |
374 | case 4: /* Reserved. */ | |
375 | return 0; | |
376 | case 5: | |
377 | return is_user ? 0 : PAGE_READ; | |
378 | case 6: | |
379 | return PAGE_READ; | |
380 | case 7: | |
381 | if (!arm_feature(env, ARM_FEATURE_V6K)) { | |
382 | return 0; | |
383 | } | |
384 | return PAGE_READ; | |
385 | default: | |
386 | g_assert_not_reached(); | |
387 | } | |
388 | } | |
389 | ||
390 | /* | |
391 | * Translate section/page access permissions to page R/W protection flags. | |
392 | * @ap: The 2-bit simple AP (AP[2:1]) | |
393 | * @is_user: TRUE if accessing from PL0 | |
394 | */ | |
395 | static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) | |
396 | { | |
397 | switch (ap) { | |
398 | case 0: | |
399 | return is_user ? 0 : PAGE_READ | PAGE_WRITE; | |
400 | case 1: | |
401 | return PAGE_READ | PAGE_WRITE; | |
402 | case 2: | |
403 | return is_user ? 0 : PAGE_READ; | |
404 | case 3: | |
405 | return PAGE_READ; | |
406 | default: | |
407 | g_assert_not_reached(); | |
408 | } | |
409 | } | |
410 | ||
411 | static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) | |
412 | { | |
413 | return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); | |
414 | } | |
415 | ||
f2d2f5ce RH |
416 | static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, |
417 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
b29c85d5 RH |
418 | bool is_secure, GetPhysAddrResult *result, |
419 | ARMMMUFaultInfo *fi) | |
f2d2f5ce | 420 | { |
f2d2f5ce RH |
421 | int level = 1; |
422 | uint32_t table; | |
423 | uint32_t desc; | |
424 | int type; | |
425 | int ap; | |
426 | int domain = 0; | |
427 | int domain_prot; | |
428 | hwaddr phys_addr; | |
429 | uint32_t dacr; | |
430 | ||
431 | /* Pagetable walk. */ | |
432 | /* Lookup l1 descriptor. */ | |
433 | if (!get_level1_table_address(env, mmu_idx, &table, address)) { | |
434 | /* Section translation fault if page walk is disabled by PD0 or PD1 */ | |
435 | fi->type = ARMFault_Translation; | |
436 | goto do_fault; | |
437 | } | |
b29c85d5 | 438 | desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi); |
f2d2f5ce RH |
439 | if (fi->type != ARMFault_None) { |
440 | goto do_fault; | |
441 | } | |
442 | type = (desc & 3); | |
443 | domain = (desc >> 5) & 0x0f; | |
444 | if (regime_el(env, mmu_idx) == 1) { | |
445 | dacr = env->cp15.dacr_ns; | |
446 | } else { | |
447 | dacr = env->cp15.dacr_s; | |
448 | } | |
449 | domain_prot = (dacr >> (domain * 2)) & 3; | |
450 | if (type == 0) { | |
451 | /* Section translation fault. */ | |
452 | fi->type = ARMFault_Translation; | |
453 | goto do_fault; | |
454 | } | |
455 | if (type != 2) { | |
456 | level = 2; | |
457 | } | |
458 | if (domain_prot == 0 || domain_prot == 2) { | |
459 | fi->type = ARMFault_Domain; | |
460 | goto do_fault; | |
461 | } | |
462 | if (type == 2) { | |
463 | /* 1Mb section. */ | |
464 | phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); | |
465 | ap = (desc >> 10) & 3; | |
51d98ce2 | 466 | result->page_size = 1024 * 1024; |
f2d2f5ce RH |
467 | } else { |
468 | /* Lookup l2 entry. */ | |
469 | if (type == 1) { | |
470 | /* Coarse pagetable. */ | |
471 | table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); | |
472 | } else { | |
473 | /* Fine pagetable. */ | |
474 | table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); | |
475 | } | |
b29c85d5 | 476 | desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi); |
f2d2f5ce RH |
477 | if (fi->type != ARMFault_None) { |
478 | goto do_fault; | |
479 | } | |
480 | switch (desc & 3) { | |
481 | case 0: /* Page translation fault. */ | |
482 | fi->type = ARMFault_Translation; | |
483 | goto do_fault; | |
484 | case 1: /* 64k page. */ | |
485 | phys_addr = (desc & 0xffff0000) | (address & 0xffff); | |
486 | ap = (desc >> (4 + ((address >> 13) & 6))) & 3; | |
51d98ce2 | 487 | result->page_size = 0x10000; |
f2d2f5ce RH |
488 | break; |
489 | case 2: /* 4k page. */ | |
490 | phys_addr = (desc & 0xfffff000) | (address & 0xfff); | |
491 | ap = (desc >> (4 + ((address >> 9) & 6))) & 3; | |
51d98ce2 | 492 | result->page_size = 0x1000; |
f2d2f5ce RH |
493 | break; |
494 | case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ | |
495 | if (type == 1) { | |
496 | /* ARMv6/XScale extended small page format */ | |
497 | if (arm_feature(env, ARM_FEATURE_XSCALE) | |
498 | || arm_feature(env, ARM_FEATURE_V6)) { | |
499 | phys_addr = (desc & 0xfffff000) | (address & 0xfff); | |
51d98ce2 | 500 | result->page_size = 0x1000; |
f2d2f5ce RH |
501 | } else { |
502 | /* | |
503 | * UNPREDICTABLE in ARMv5; we choose to take a | |
504 | * page translation fault. | |
505 | */ | |
506 | fi->type = ARMFault_Translation; | |
507 | goto do_fault; | |
508 | } | |
509 | } else { | |
510 | phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); | |
51d98ce2 | 511 | result->page_size = 0x400; |
f2d2f5ce RH |
512 | } |
513 | ap = (desc >> 4) & 3; | |
514 | break; | |
515 | default: | |
516 | /* Never happens, but compiler isn't smart enough to tell. */ | |
517 | g_assert_not_reached(); | |
518 | } | |
519 | } | |
51d98ce2 RH |
520 | result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); |
521 | result->prot |= result->prot ? PAGE_EXEC : 0; | |
522 | if (!(result->prot & (1 << access_type))) { | |
f2d2f5ce RH |
523 | /* Access permission fault. */ |
524 | fi->type = ARMFault_Permission; | |
525 | goto do_fault; | |
526 | } | |
51d98ce2 | 527 | result->phys = phys_addr; |
f2d2f5ce | 528 | return false; |
53c038ef RH |
529 | do_fault: |
530 | fi->domain = domain; | |
531 | fi->level = level; | |
532 | return true; | |
533 | } | |
534 | ||
535 | static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, | |
536 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
71e73beb RH |
537 | bool is_secure, GetPhysAddrResult *result, |
538 | ARMMMUFaultInfo *fi) | |
53c038ef | 539 | { |
53c038ef RH |
540 | ARMCPU *cpu = env_archcpu(env); |
541 | int level = 1; | |
542 | uint32_t table; | |
543 | uint32_t desc; | |
544 | uint32_t xn; | |
545 | uint32_t pxn = 0; | |
546 | int type; | |
547 | int ap; | |
548 | int domain = 0; | |
549 | int domain_prot; | |
550 | hwaddr phys_addr; | |
551 | uint32_t dacr; | |
552 | bool ns; | |
553 | ||
554 | /* Pagetable walk. */ | |
555 | /* Lookup l1 descriptor. */ | |
556 | if (!get_level1_table_address(env, mmu_idx, &table, address)) { | |
557 | /* Section translation fault if page walk is disabled by PD0 or PD1 */ | |
558 | fi->type = ARMFault_Translation; | |
559 | goto do_fault; | |
560 | } | |
71e73beb | 561 | desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi); |
53c038ef RH |
562 | if (fi->type != ARMFault_None) { |
563 | goto do_fault; | |
564 | } | |
565 | type = (desc & 3); | |
566 | if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { | |
567 | /* Section translation fault, or attempt to use the encoding | |
568 | * which is Reserved on implementations without PXN. | |
569 | */ | |
570 | fi->type = ARMFault_Translation; | |
571 | goto do_fault; | |
572 | } | |
573 | if ((type == 1) || !(desc & (1 << 18))) { | |
574 | /* Page or Section. */ | |
575 | domain = (desc >> 5) & 0x0f; | |
576 | } | |
577 | if (regime_el(env, mmu_idx) == 1) { | |
578 | dacr = env->cp15.dacr_ns; | |
579 | } else { | |
580 | dacr = env->cp15.dacr_s; | |
581 | } | |
582 | if (type == 1) { | |
583 | level = 2; | |
584 | } | |
585 | domain_prot = (dacr >> (domain * 2)) & 3; | |
586 | if (domain_prot == 0 || domain_prot == 2) { | |
587 | /* Section or Page domain fault */ | |
588 | fi->type = ARMFault_Domain; | |
589 | goto do_fault; | |
590 | } | |
591 | if (type != 1) { | |
592 | if (desc & (1 << 18)) { | |
593 | /* Supersection. */ | |
594 | phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); | |
595 | phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; | |
596 | phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; | |
60a6a180 | 597 | result->page_size = 0x1000000; |
53c038ef RH |
598 | } else { |
599 | /* Section. */ | |
600 | phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); | |
60a6a180 | 601 | result->page_size = 0x100000; |
53c038ef RH |
602 | } |
603 | ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); | |
604 | xn = desc & (1 << 4); | |
605 | pxn = desc & 1; | |
606 | ns = extract32(desc, 19, 1); | |
607 | } else { | |
608 | if (cpu_isar_feature(aa32_pxn, cpu)) { | |
609 | pxn = (desc >> 2) & 1; | |
610 | } | |
611 | ns = extract32(desc, 3, 1); | |
612 | /* Lookup l2 entry. */ | |
613 | table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); | |
71e73beb | 614 | desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi); |
53c038ef RH |
615 | if (fi->type != ARMFault_None) { |
616 | goto do_fault; | |
617 | } | |
618 | ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); | |
619 | switch (desc & 3) { | |
620 | case 0: /* Page translation fault. */ | |
621 | fi->type = ARMFault_Translation; | |
622 | goto do_fault; | |
623 | case 1: /* 64k page. */ | |
624 | phys_addr = (desc & 0xffff0000) | (address & 0xffff); | |
625 | xn = desc & (1 << 15); | |
60a6a180 | 626 | result->page_size = 0x10000; |
53c038ef RH |
627 | break; |
628 | case 2: case 3: /* 4k page. */ | |
629 | phys_addr = (desc & 0xfffff000) | (address & 0xfff); | |
630 | xn = desc & 1; | |
60a6a180 | 631 | result->page_size = 0x1000; |
53c038ef RH |
632 | break; |
633 | default: | |
634 | /* Never happens, but compiler isn't smart enough to tell. */ | |
635 | g_assert_not_reached(); | |
636 | } | |
637 | } | |
638 | if (domain_prot == 3) { | |
60a6a180 | 639 | result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
53c038ef RH |
640 | } else { |
641 | if (pxn && !regime_is_user(env, mmu_idx)) { | |
642 | xn = 1; | |
643 | } | |
644 | if (xn && access_type == MMU_INST_FETCH) { | |
645 | fi->type = ARMFault_Permission; | |
646 | goto do_fault; | |
647 | } | |
648 | ||
649 | if (arm_feature(env, ARM_FEATURE_V6K) && | |
650 | (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { | |
651 | /* The simplified model uses AP[0] as an access control bit. */ | |
652 | if ((ap & 1) == 0) { | |
653 | /* Access flag fault. */ | |
654 | fi->type = ARMFault_AccessFlag; | |
655 | goto do_fault; | |
656 | } | |
60a6a180 | 657 | result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); |
53c038ef | 658 | } else { |
60a6a180 | 659 | result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); |
53c038ef | 660 | } |
60a6a180 RH |
661 | if (result->prot && !xn) { |
662 | result->prot |= PAGE_EXEC; | |
53c038ef | 663 | } |
60a6a180 | 664 | if (!(result->prot & (1 << access_type))) { |
53c038ef RH |
665 | /* Access permission fault. */ |
666 | fi->type = ARMFault_Permission; | |
667 | goto do_fault; | |
668 | } | |
669 | } | |
670 | if (ns) { | |
671 | /* The NS bit will (as required by the architecture) have no effect if | |
672 | * the CPU doesn't support TZ or this is a non-secure translation | |
673 | * regime, because the attribute will already be non-secure. | |
674 | */ | |
60a6a180 | 675 | result->attrs.secure = false; |
53c038ef | 676 | } |
60a6a180 | 677 | result->phys = phys_addr; |
53c038ef | 678 | return false; |
f2d2f5ce RH |
679 | do_fault: |
680 | fi->domain = domain; | |
681 | fi->level = level; | |
682 | return true; | |
683 | } | |
684 | ||
f8526edc RH |
685 | /* |
686 | * Translate S2 section/page access permissions to protection flags | |
687 | * @env: CPUARMState | |
688 | * @s2ap: The 2-bit stage2 access permissions (S2AP) | |
689 | * @xn: XN (execute-never) bits | |
690 | * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 | |
691 | */ | |
692 | static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) | |
693 | { | |
694 | int prot = 0; | |
695 | ||
696 | if (s2ap & 1) { | |
697 | prot |= PAGE_READ; | |
698 | } | |
699 | if (s2ap & 2) { | |
700 | prot |= PAGE_WRITE; | |
701 | } | |
702 | ||
703 | if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { | |
704 | switch (xn) { | |
705 | case 0: | |
706 | prot |= PAGE_EXEC; | |
707 | break; | |
708 | case 1: | |
709 | if (s1_is_el0) { | |
710 | prot |= PAGE_EXEC; | |
711 | } | |
712 | break; | |
713 | case 2: | |
714 | break; | |
715 | case 3: | |
716 | if (!s1_is_el0) { | |
717 | prot |= PAGE_EXEC; | |
718 | } | |
719 | break; | |
720 | default: | |
721 | g_assert_not_reached(); | |
722 | } | |
723 | } else { | |
724 | if (!extract32(xn, 1, 1)) { | |
725 | if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { | |
726 | prot |= PAGE_EXEC; | |
727 | } | |
728 | } | |
729 | } | |
730 | return prot; | |
731 | } | |
732 | ||
733 | /* | |
734 | * Translate section/page access permissions to protection flags | |
735 | * @env: CPUARMState | |
736 | * @mmu_idx: MMU index indicating required translation regime | |
737 | * @is_aa64: TRUE if AArch64 | |
738 | * @ap: The 2-bit simple AP (AP[2:1]) | |
739 | * @ns: NS (non-secure) bit | |
740 | * @xn: XN (execute-never) bit | |
741 | * @pxn: PXN (privileged execute-never) bit | |
742 | */ | |
743 | static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, | |
744 | int ap, int ns, int xn, int pxn) | |
745 | { | |
746 | bool is_user = regime_is_user(env, mmu_idx); | |
747 | int prot_rw, user_rw; | |
748 | bool have_wxn; | |
749 | int wxn = 0; | |
750 | ||
751 | assert(mmu_idx != ARMMMUIdx_Stage2); | |
752 | assert(mmu_idx != ARMMMUIdx_Stage2_S); | |
753 | ||
754 | user_rw = simple_ap_to_rw_prot_is_user(ap, true); | |
755 | if (is_user) { | |
756 | prot_rw = user_rw; | |
757 | } else { | |
758 | if (user_rw && regime_is_pan(env, mmu_idx)) { | |
759 | /* PAN forbids data accesses but doesn't affect insn fetch */ | |
760 | prot_rw = 0; | |
761 | } else { | |
762 | prot_rw = simple_ap_to_rw_prot_is_user(ap, false); | |
763 | } | |
764 | } | |
765 | ||
766 | if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { | |
767 | return prot_rw; | |
768 | } | |
769 | ||
770 | /* TODO have_wxn should be replaced with | |
771 | * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) | |
772 | * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE | |
773 | * compatible processors have EL2, which is required for [U]WXN. | |
774 | */ | |
775 | have_wxn = arm_feature(env, ARM_FEATURE_LPAE); | |
776 | ||
777 | if (have_wxn) { | |
778 | wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; | |
779 | } | |
780 | ||
781 | if (is_aa64) { | |
782 | if (regime_has_2_ranges(mmu_idx) && !is_user) { | |
783 | xn = pxn || (user_rw & PAGE_WRITE); | |
784 | } | |
785 | } else if (arm_feature(env, ARM_FEATURE_V7)) { | |
786 | switch (regime_el(env, mmu_idx)) { | |
787 | case 1: | |
788 | case 3: | |
789 | if (is_user) { | |
790 | xn = xn || !(user_rw & PAGE_READ); | |
791 | } else { | |
792 | int uwxn = 0; | |
793 | if (have_wxn) { | |
794 | uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; | |
795 | } | |
796 | xn = xn || !(prot_rw & PAGE_READ) || pxn || | |
797 | (uwxn && (user_rw & PAGE_WRITE)); | |
798 | } | |
799 | break; | |
800 | case 2: | |
801 | break; | |
802 | } | |
803 | } else { | |
804 | xn = wxn = 0; | |
805 | } | |
806 | ||
807 | if (xn || (wxn && (prot_rw & PAGE_WRITE))) { | |
808 | return prot_rw; | |
809 | } | |
810 | return prot_rw | PAGE_EXEC; | |
811 | } | |
812 | ||
2f0ec92e RH |
813 | static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, |
814 | ARMMMUIdx mmu_idx) | |
815 | { | |
c1547bba | 816 | uint64_t tcr = regime_tcr(env, mmu_idx); |
2f0ec92e RH |
817 | uint32_t el = regime_el(env, mmu_idx); |
818 | int select, tsz; | |
819 | bool epd, hpd; | |
820 | ||
821 | assert(mmu_idx != ARMMMUIdx_Stage2_S); | |
822 | ||
823 | if (mmu_idx == ARMMMUIdx_Stage2) { | |
824 | /* VTCR */ | |
825 | bool sext = extract32(tcr, 4, 1); | |
826 | bool sign = extract32(tcr, 3, 1); | |
827 | ||
828 | /* | |
829 | * If the sign-extend bit is not the same as t0sz[3], the result | |
830 | * is unpredictable. Flag this as a guest error. | |
831 | */ | |
832 | if (sign != sext) { | |
833 | qemu_log_mask(LOG_GUEST_ERROR, | |
834 | "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); | |
835 | } | |
836 | tsz = sextract32(tcr, 0, 4) + 8; | |
837 | select = 0; | |
838 | hpd = false; | |
839 | epd = false; | |
840 | } else if (el == 2) { | |
841 | /* HTCR */ | |
842 | tsz = extract32(tcr, 0, 3); | |
843 | select = 0; | |
844 | hpd = extract64(tcr, 24, 1); | |
845 | epd = false; | |
846 | } else { | |
847 | int t0sz = extract32(tcr, 0, 3); | |
848 | int t1sz = extract32(tcr, 16, 3); | |
849 | ||
850 | if (t1sz == 0) { | |
851 | select = va > (0xffffffffu >> t0sz); | |
852 | } else { | |
853 | /* Note that we will detect errors later. */ | |
854 | select = va >= ~(0xffffffffu >> t1sz); | |
855 | } | |
856 | if (!select) { | |
857 | tsz = t0sz; | |
858 | epd = extract32(tcr, 7, 1); | |
859 | hpd = extract64(tcr, 41, 1); | |
860 | } else { | |
861 | tsz = t1sz; | |
862 | epd = extract32(tcr, 23, 1); | |
863 | hpd = extract64(tcr, 42, 1); | |
864 | } | |
865 | /* For aarch32, hpd0 is not enabled without t2e as well. */ | |
866 | hpd &= extract32(tcr, 6, 1); | |
867 | } | |
868 | ||
869 | return (ARMVAParameters) { | |
870 | .tsz = tsz, | |
871 | .select = select, | |
872 | .epd = epd, | |
873 | .hpd = hpd, | |
874 | }; | |
875 | } | |
876 | ||
c5168785 RH |
877 | /* |
878 | * check_s2_mmu_setup | |
879 | * @cpu: ARMCPU | |
880 | * @is_aa64: True if the translation regime is in AArch64 state | |
881 | * @startlevel: Suggested starting level | |
882 | * @inputsize: Bitsize of IPAs | |
883 | * @stride: Page-table stride (See the ARM ARM) | |
884 | * | |
885 | * Returns true if the suggested S2 translation parameters are OK and | |
886 | * false otherwise. | |
887 | */ | |
888 | static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, | |
889 | int inputsize, int stride, int outputsize) | |
890 | { | |
891 | const int grainsize = stride + 3; | |
892 | int startsizecheck; | |
893 | ||
894 | /* | |
895 | * Negative levels are usually not allowed... | |
896 | * Except for FEAT_LPA2, 4k page table, 52-bit address space, which | |
897 | * begins with level -1. Note that previous feature tests will have | |
898 | * eliminated this combination if it is not enabled. | |
899 | */ | |
900 | if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) { | |
901 | return false; | |
902 | } | |
903 | ||
904 | startsizecheck = inputsize - ((3 - level) * stride + grainsize); | |
905 | if (startsizecheck < 1 || startsizecheck > stride + 4) { | |
906 | return false; | |
907 | } | |
908 | ||
909 | if (is_aa64) { | |
910 | switch (stride) { | |
911 | case 13: /* 64KB Pages. */ | |
912 | if (level == 0 || (level == 1 && outputsize <= 42)) { | |
913 | return false; | |
914 | } | |
915 | break; | |
916 | case 11: /* 16KB Pages. */ | |
917 | if (level == 0 || (level == 1 && outputsize <= 40)) { | |
918 | return false; | |
919 | } | |
920 | break; | |
921 | case 9: /* 4KB Pages. */ | |
922 | if (level == 0 && outputsize <= 42) { | |
923 | return false; | |
924 | } | |
925 | break; | |
926 | default: | |
927 | g_assert_not_reached(); | |
928 | } | |
929 | ||
930 | /* Inputsize checks. */ | |
931 | if (inputsize > outputsize && | |
932 | (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { | |
933 | /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ | |
934 | return false; | |
935 | } | |
936 | } else { | |
937 | /* AArch32 only supports 4KB pages. Assert on that. */ | |
938 | assert(stride == 9); | |
939 | ||
940 | if (level == 0) { | |
941 | return false; | |
942 | } | |
943 | } | |
944 | return true; | |
945 | } | |
946 | ||
3283222a RH |
947 | /** |
948 | * get_phys_addr_lpae: perform one stage of page table walk, LPAE format | |
949 | * | |
950 | * Returns false if the translation was successful. Otherwise, phys_ptr, | |
951 | * attrs, prot and page_size may not be filled in, and the populated fsr | |
952 | * value provides information on why the translation aborted, in the format | |
953 | * of a long-format DFSR/IFSR fault register, with the following caveat: | |
954 | * the WnR bit is never set (the caller must do this). | |
955 | * | |
956 | * @env: CPUARMState | |
957 | * @address: virtual address to get physical address for | |
958 | * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH | |
959 | * @mmu_idx: MMU index indicating required translation regime | |
960 | * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page | |
961 | * table walk), must be true if this is stage 2 of a stage 1+2 | |
962 | * walk for an EL0 access. If @mmu_idx is anything else, | |
963 | * @s1_is_el0 is ignored. | |
03ee9bbe | 964 | * @result: set on translation success, |
3283222a | 965 | * @fi: set to fault info if the translation fails |
3283222a | 966 | */ |
11552bb0 RH |
967 | static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, |
968 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
c23f08a5 RH |
969 | bool is_secure, bool s1_is_el0, |
970 | GetPhysAddrResult *result, ARMMMUFaultInfo *fi) | |
3283222a RH |
971 | { |
972 | ARMCPU *cpu = env_archcpu(env); | |
3283222a RH |
973 | /* Read an LPAE long-descriptor translation table. */ |
974 | ARMFaultType fault_type = ARMFault_Translation; | |
975 | uint32_t level; | |
976 | ARMVAParameters param; | |
977 | uint64_t ttbr; | |
978 | hwaddr descaddr, indexmask, indexmask_grainsize; | |
979 | uint32_t tableattrs; | |
980 | target_ulong page_size; | |
981 | uint32_t attrs; | |
982 | int32_t stride; | |
983 | int addrsize, inputsize, outputsize; | |
c1547bba | 984 | uint64_t tcr = regime_tcr(env, mmu_idx); |
3283222a RH |
985 | int ap, ns, xn, pxn; |
986 | uint32_t el = regime_el(env, mmu_idx); | |
987 | uint64_t descaddrmask; | |
988 | bool aarch64 = arm_el_is_aa64(env, el); | |
989 | bool guarded = false; | |
990 | ||
991 | /* TODO: This code does not support shareability levels. */ | |
992 | if (aarch64) { | |
993 | int ps; | |
994 | ||
995 | param = aa64_va_parameters(env, address, mmu_idx, | |
996 | access_type != MMU_INST_FETCH); | |
997 | level = 0; | |
998 | ||
999 | /* | |
1000 | * If TxSZ is programmed to a value larger than the maximum, | |
1001 | * or smaller than the effective minimum, it is IMPLEMENTATION | |
1002 | * DEFINED whether we behave as if the field were programmed | |
1003 | * within bounds, or if a level 0 Translation fault is generated. | |
1004 | * | |
1005 | * With FEAT_LVA, fault on less than minimum becomes required, | |
1006 | * so our choice is to always raise the fault. | |
1007 | */ | |
1008 | if (param.tsz_oob) { | |
1009 | fault_type = ARMFault_Translation; | |
1010 | goto do_fault; | |
1011 | } | |
1012 | ||
1013 | addrsize = 64 - 8 * param.tbi; | |
1014 | inputsize = 64 - param.tsz; | |
1015 | ||
1016 | /* | |
1017 | * Bound PS by PARANGE to find the effective output address size. | |
1018 | * ID_AA64MMFR0 is a read-only register so values outside of the | |
1019 | * supported mappings can be considered an implementation error. | |
1020 | */ | |
1021 | ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); | |
1022 | ps = MIN(ps, param.ps); | |
1023 | assert(ps < ARRAY_SIZE(pamax_map)); | |
1024 | outputsize = pamax_map[ps]; | |
1025 | } else { | |
1026 | param = aa32_va_parameters(env, address, mmu_idx); | |
1027 | level = 1; | |
1028 | addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); | |
1029 | inputsize = addrsize - param.tsz; | |
1030 | outputsize = 40; | |
1031 | } | |
1032 | ||
1033 | /* | |
1034 | * We determined the region when collecting the parameters, but we | |
1035 | * have not yet validated that the address is valid for the region. | |
1036 | * Extract the top bits and verify that they all match select. | |
1037 | * | |
1038 | * For aa32, if inputsize == addrsize, then we have selected the | |
1039 | * region by exclusion in aa32_va_parameters and there is no more | |
1040 | * validation to do here. | |
1041 | */ | |
1042 | if (inputsize < addrsize) { | |
1043 | target_ulong top_bits = sextract64(address, inputsize, | |
1044 | addrsize - inputsize); | |
1045 | if (-top_bits != param.select) { | |
1046 | /* The gap between the two regions is a Translation fault */ | |
1047 | fault_type = ARMFault_Translation; | |
1048 | goto do_fault; | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | if (param.using64k) { | |
1053 | stride = 13; | |
1054 | } else if (param.using16k) { | |
1055 | stride = 11; | |
1056 | } else { | |
1057 | stride = 9; | |
1058 | } | |
1059 | ||
1060 | /* | |
1061 | * Note that QEMU ignores shareability and cacheability attributes, | |
1062 | * so we don't need to do anything with the SH, ORGN, IRGN fields | |
1063 | * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the | |
1064 | * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently | |
1065 | * implement any ASID-like capability so we can ignore it (instead | |
1066 | * we will always flush the TLB any time the ASID is changed). | |
1067 | */ | |
1068 | ttbr = regime_ttbr(env, mmu_idx, param.select); | |
1069 | ||
1070 | /* | |
1071 | * Here we should have set up all the parameters for the translation: | |
1072 | * inputsize, ttbr, epd, stride, tbi | |
1073 | */ | |
1074 | ||
1075 | if (param.epd) { | |
1076 | /* | |
1077 | * Translation table walk disabled => Translation fault on TLB miss | |
1078 | * Note: This is always 0 on 64-bit EL2 and EL3. | |
1079 | */ | |
1080 | goto do_fault; | |
1081 | } | |
1082 | ||
1083 | if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { | |
1084 | /* | |
1085 | * The starting level depends on the virtual address size (which can | |
1086 | * be up to 48 bits) and the translation granule size. It indicates | |
1087 | * the number of strides (stride bits at a time) needed to | |
1088 | * consume the bits of the input address. In the pseudocode this is: | |
1089 | * level = 4 - RoundUp((inputsize - grainsize) / stride) | |
1090 | * where their 'inputsize' is our 'inputsize', 'grainsize' is | |
1091 | * our 'stride + 3' and 'stride' is our 'stride'. | |
1092 | * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: | |
1093 | * = 4 - (inputsize - stride - 3 + stride - 1) / stride | |
1094 | * = 4 - (inputsize - 4) / stride; | |
1095 | */ | |
1096 | level = 4 - (inputsize - 4) / stride; | |
1097 | } else { | |
1098 | /* | |
1099 | * For stage 2 translations the starting level is specified by the | |
1100 | * VTCR_EL2.SL0 field (whose interpretation depends on the page size) | |
1101 | */ | |
dfce4aa8 PM |
1102 | uint32_t sl0 = extract32(tcr, 6, 2); |
1103 | uint32_t sl2 = extract64(tcr, 33, 1); | |
3283222a RH |
1104 | uint32_t startlevel; |
1105 | bool ok; | |
1106 | ||
1107 | /* SL2 is RES0 unless DS=1 & 4kb granule. */ | |
1108 | if (param.ds && stride == 9 && sl2) { | |
1109 | if (sl0 != 0) { | |
1110 | level = 0; | |
1111 | fault_type = ARMFault_Translation; | |
1112 | goto do_fault; | |
1113 | } | |
1114 | startlevel = -1; | |
1115 | } else if (!aarch64 || stride == 9) { | |
1116 | /* AArch32 or 4KB pages */ | |
1117 | startlevel = 2 - sl0; | |
1118 | ||
1119 | if (cpu_isar_feature(aa64_st, cpu)) { | |
1120 | startlevel &= 3; | |
1121 | } | |
1122 | } else { | |
1123 | /* 16KB or 64KB pages */ | |
1124 | startlevel = 3 - sl0; | |
1125 | } | |
1126 | ||
1127 | /* Check that the starting level is valid. */ | |
1128 | ok = check_s2_mmu_setup(cpu, aarch64, startlevel, | |
1129 | inputsize, stride, outputsize); | |
1130 | if (!ok) { | |
1131 | fault_type = ARMFault_Translation; | |
1132 | goto do_fault; | |
1133 | } | |
1134 | level = startlevel; | |
1135 | } | |
1136 | ||
1137 | indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); | |
1138 | indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); | |
1139 | ||
1140 | /* Now we can extract the actual base address from the TTBR */ | |
1141 | descaddr = extract64(ttbr, 0, 48); | |
1142 | ||
1143 | /* | |
1144 | * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. | |
1145 | * | |
1146 | * Otherwise, if the base address is out of range, raise AddressSizeFault. | |
1147 | * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), | |
1148 | * but we've just cleared the bits above 47, so simplify the test. | |
1149 | */ | |
1150 | if (outputsize > 48) { | |
1151 | descaddr |= extract64(ttbr, 2, 4) << 48; | |
1152 | } else if (descaddr >> outputsize) { | |
1153 | level = 0; | |
1154 | fault_type = ARMFault_AddressSize; | |
1155 | goto do_fault; | |
1156 | } | |
1157 | ||
1158 | /* | |
1159 | * We rely on this masking to clear the RES0 bits at the bottom of the TTBR | |
1160 | * and also to mask out CnP (bit 0) which could validly be non-zero. | |
1161 | */ | |
1162 | descaddr &= ~indexmask; | |
1163 | ||
1164 | /* | |
1165 | * For AArch32, the address field in the descriptor goes up to bit 39 | |
1166 | * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 | |
1167 | * or an AddressSize fault is raised. So for v8 we extract those SBZ | |
1168 | * bits as part of the address, which will be checked via outputsize. | |
1169 | * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; | |
1170 | * the highest bits of a 52-bit output are placed elsewhere. | |
1171 | */ | |
1172 | if (param.ds) { | |
1173 | descaddrmask = MAKE_64BIT_MASK(0, 50); | |
1174 | } else if (arm_feature(env, ARM_FEATURE_V8)) { | |
1175 | descaddrmask = MAKE_64BIT_MASK(0, 48); | |
1176 | } else { | |
1177 | descaddrmask = MAKE_64BIT_MASK(0, 40); | |
1178 | } | |
1179 | descaddrmask &= ~indexmask_grainsize; | |
1180 | ||
1181 | /* | |
1182 | * Secure accesses start with the page table in secure memory and | |
1183 | * can be downgraded to non-secure at any step. Non-secure accesses | |
1184 | * remain non-secure. We implement this by just ORing in the NSTable/NS | |
1185 | * bits at each step. | |
1186 | */ | |
c23f08a5 | 1187 | tableattrs = is_secure ? 0 : (1 << 4); |
3283222a RH |
1188 | for (;;) { |
1189 | uint64_t descriptor; | |
1190 | bool nstable; | |
1191 | ||
1192 | descaddr |= (address >> (stride * (4 - level))) & indexmask; | |
1193 | descaddr &= ~7ULL; | |
1194 | nstable = extract32(tableattrs, 4, 1); | |
5e79887b | 1195 | descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi); |
3283222a RH |
1196 | if (fi->type != ARMFault_None) { |
1197 | goto do_fault; | |
1198 | } | |
1199 | ||
1200 | if (!(descriptor & 1) || | |
1201 | (!(descriptor & 2) && (level == 3))) { | |
1202 | /* Invalid, or the Reserved level 3 encoding */ | |
1203 | goto do_fault; | |
1204 | } | |
1205 | ||
1206 | descaddr = descriptor & descaddrmask; | |
1207 | ||
1208 | /* | |
1209 | * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] | |
1210 | * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of | |
1211 | * descaddr are in [9:8]. Otherwise, if descaddr is out of range, | |
1212 | * raise AddressSizeFault. | |
1213 | */ | |
1214 | if (outputsize > 48) { | |
1215 | if (param.ds) { | |
1216 | descaddr |= extract64(descriptor, 8, 2) << 50; | |
1217 | } else { | |
1218 | descaddr |= extract64(descriptor, 12, 4) << 48; | |
1219 | } | |
1220 | } else if (descaddr >> outputsize) { | |
1221 | fault_type = ARMFault_AddressSize; | |
1222 | goto do_fault; | |
1223 | } | |
1224 | ||
1225 | if ((descriptor & 2) && (level < 3)) { | |
1226 | /* | |
1227 | * Table entry. The top five bits are attributes which may | |
1228 | * propagate down through lower levels of the table (and | |
1229 | * which are all arranged so that 0 means "no effect", so | |
1230 | * we can gather them up by ORing in the bits at each level). | |
1231 | */ | |
1232 | tableattrs |= extract64(descriptor, 59, 5); | |
1233 | level++; | |
1234 | indexmask = indexmask_grainsize; | |
1235 | continue; | |
1236 | } | |
1237 | /* | |
1238 | * Block entry at level 1 or 2, or page entry at level 3. | |
1239 | * These are basically the same thing, although the number | |
1240 | * of bits we pull in from the vaddr varies. Note that although | |
1241 | * descaddrmask masks enough of the low bits of the descriptor | |
1242 | * to give a correct page or table address, the address field | |
1243 | * in a block descriptor is smaller; so we need to explicitly | |
1244 | * clear the lower bits here before ORing in the low vaddr bits. | |
1245 | */ | |
1246 | page_size = (1ULL << ((stride * (4 - level)) + 3)); | |
c2360eaa | 1247 | descaddr &= ~(hwaddr)(page_size - 1); |
3283222a RH |
1248 | descaddr |= (address & (page_size - 1)); |
1249 | /* Extract attributes from the descriptor */ | |
1250 | attrs = extract64(descriptor, 2, 10) | |
1251 | | (extract64(descriptor, 52, 12) << 10); | |
1252 | ||
1253 | if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { | |
1254 | /* Stage 2 table descriptors do not include any attribute fields */ | |
1255 | break; | |
1256 | } | |
1257 | /* Merge in attributes from table descriptors */ | |
1258 | attrs |= nstable << 3; /* NS */ | |
1259 | guarded = extract64(descriptor, 50, 1); /* GP */ | |
1260 | if (param.hpd) { | |
1261 | /* HPD disables all the table attributes except NSTable. */ | |
1262 | break; | |
1263 | } | |
1264 | attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ | |
1265 | /* | |
1266 | * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 | |
1267 | * means "force PL1 access only", which means forcing AP[1] to 0. | |
1268 | */ | |
1269 | attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ | |
1270 | attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ | |
1271 | break; | |
1272 | } | |
1273 | /* | |
1274 | * Here descaddr is the final physical address, and attributes | |
1275 | * are all in attrs. | |
1276 | */ | |
1277 | fault_type = ARMFault_AccessFlag; | |
1278 | if ((attrs & (1 << 8)) == 0) { | |
1279 | /* Access flag */ | |
1280 | goto do_fault; | |
1281 | } | |
1282 | ||
1283 | ap = extract32(attrs, 4, 2); | |
1284 | ||
1285 | if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { | |
1286 | ns = mmu_idx == ARMMMUIdx_Stage2; | |
1287 | xn = extract32(attrs, 11, 2); | |
03ee9bbe | 1288 | result->prot = get_S2prot(env, ap, xn, s1_is_el0); |
3283222a RH |
1289 | } else { |
1290 | ns = extract32(attrs, 3, 1); | |
1291 | xn = extract32(attrs, 12, 1); | |
1292 | pxn = extract32(attrs, 11, 1); | |
03ee9bbe | 1293 | result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); |
3283222a RH |
1294 | } |
1295 | ||
1296 | fault_type = ARMFault_Permission; | |
03ee9bbe | 1297 | if (!(result->prot & (1 << access_type))) { |
3283222a RH |
1298 | goto do_fault; |
1299 | } | |
1300 | ||
1301 | if (ns) { | |
1302 | /* | |
1303 | * The NS bit will (as required by the architecture) have no effect if | |
1304 | * the CPU doesn't support TZ or this is a non-secure translation | |
1305 | * regime, because the attribute will already be non-secure. | |
1306 | */ | |
03ee9bbe | 1307 | result->attrs.secure = false; |
3283222a RH |
1308 | } |
1309 | /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ | |
1310 | if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { | |
03ee9bbe | 1311 | arm_tlb_bti_gp(&result->attrs) = true; |
3283222a RH |
1312 | } |
1313 | ||
1314 | if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { | |
03ee9bbe RH |
1315 | result->cacheattrs.is_s2_format = true; |
1316 | result->cacheattrs.attrs = extract32(attrs, 0, 4); | |
3283222a RH |
1317 | } else { |
1318 | /* Index into MAIR registers for cache attributes */ | |
1319 | uint8_t attrindx = extract32(attrs, 0, 3); | |
1320 | uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; | |
1321 | assert(attrindx <= 7); | |
03ee9bbe RH |
1322 | result->cacheattrs.is_s2_format = false; |
1323 | result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); | |
3283222a RH |
1324 | } |
1325 | ||
1326 | /* | |
1327 | * For FEAT_LPA2 and effective DS, the SH field in the attributes | |
1328 | * was re-purposed for output address bits. The SH attribute in | |
1329 | * that case comes from TCR_ELx, which we extracted earlier. | |
1330 | */ | |
1331 | if (param.ds) { | |
03ee9bbe | 1332 | result->cacheattrs.shareability = param.sh; |
3283222a | 1333 | } else { |
03ee9bbe | 1334 | result->cacheattrs.shareability = extract32(attrs, 6, 2); |
3283222a RH |
1335 | } |
1336 | ||
03ee9bbe RH |
1337 | result->phys = descaddr; |
1338 | result->page_size = page_size; | |
3283222a RH |
1339 | return false; |
1340 | ||
1341 | do_fault: | |
1342 | fi->type = fault_type; | |
1343 | fi->level = level; | |
1344 | /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ | |
1345 | fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 || | |
1346 | mmu_idx == ARMMMUIdx_Stage2_S); | |
1347 | fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; | |
1348 | return true; | |
1349 | } | |
1350 | ||
9a12fb36 RH |
1351 | static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, |
1352 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
a5b5092f | 1353 | bool is_secure, GetPhysAddrResult *result, |
9a12fb36 RH |
1354 | ARMMMUFaultInfo *fi) |
1355 | { | |
1356 | int n; | |
1357 | uint32_t mask; | |
1358 | uint32_t base; | |
1359 | bool is_user = regime_is_user(env, mmu_idx); | |
1360 | ||
7e80c0a4 | 1361 | if (regime_translation_disabled(env, mmu_idx, is_secure)) { |
9a12fb36 | 1362 | /* MPU disabled. */ |
b7b9b579 RH |
1363 | result->phys = address; |
1364 | result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
9a12fb36 RH |
1365 | return false; |
1366 | } | |
1367 | ||
b7b9b579 | 1368 | result->phys = address; |
9a12fb36 RH |
1369 | for (n = 7; n >= 0; n--) { |
1370 | base = env->cp15.c6_region[n]; | |
1371 | if ((base & 1) == 0) { | |
1372 | continue; | |
1373 | } | |
1374 | mask = 1 << ((base >> 1) & 0x1f); | |
1375 | /* Keep this shift separate from the above to avoid an | |
1376 | (undefined) << 32. */ | |
1377 | mask = (mask << 1) - 1; | |
1378 | if (((base ^ address) & ~mask) == 0) { | |
1379 | break; | |
1380 | } | |
1381 | } | |
1382 | if (n < 0) { | |
1383 | fi->type = ARMFault_Background; | |
1384 | return true; | |
1385 | } | |
1386 | ||
1387 | if (access_type == MMU_INST_FETCH) { | |
1388 | mask = env->cp15.pmsav5_insn_ap; | |
1389 | } else { | |
1390 | mask = env->cp15.pmsav5_data_ap; | |
1391 | } | |
1392 | mask = (mask >> (n * 4)) & 0xf; | |
1393 | switch (mask) { | |
1394 | case 0: | |
1395 | fi->type = ARMFault_Permission; | |
1396 | fi->level = 1; | |
1397 | return true; | |
1398 | case 1: | |
1399 | if (is_user) { | |
1400 | fi->type = ARMFault_Permission; | |
1401 | fi->level = 1; | |
1402 | return true; | |
1403 | } | |
b7b9b579 | 1404 | result->prot = PAGE_READ | PAGE_WRITE; |
9a12fb36 RH |
1405 | break; |
1406 | case 2: | |
b7b9b579 | 1407 | result->prot = PAGE_READ; |
9a12fb36 | 1408 | if (!is_user) { |
b7b9b579 | 1409 | result->prot |= PAGE_WRITE; |
9a12fb36 RH |
1410 | } |
1411 | break; | |
1412 | case 3: | |
b7b9b579 | 1413 | result->prot = PAGE_READ | PAGE_WRITE; |
9a12fb36 RH |
1414 | break; |
1415 | case 5: | |
1416 | if (is_user) { | |
1417 | fi->type = ARMFault_Permission; | |
1418 | fi->level = 1; | |
1419 | return true; | |
1420 | } | |
b7b9b579 | 1421 | result->prot = PAGE_READ; |
9a12fb36 RH |
1422 | break; |
1423 | case 6: | |
b7b9b579 | 1424 | result->prot = PAGE_READ; |
9a12fb36 RH |
1425 | break; |
1426 | default: | |
1427 | /* Bad permission. */ | |
1428 | fi->type = ARMFault_Permission; | |
1429 | fi->level = 1; | |
1430 | return true; | |
1431 | } | |
b7b9b579 | 1432 | result->prot |= PAGE_EXEC; |
9a12fb36 RH |
1433 | return false; |
1434 | } | |
1435 | ||
fedbaa05 RH |
1436 | static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, |
1437 | int32_t address, int *prot) | |
7d2e08c9 RH |
1438 | { |
1439 | if (!arm_feature(env, ARM_FEATURE_M)) { | |
1440 | *prot = PAGE_READ | PAGE_WRITE; | |
1441 | switch (address) { | |
1442 | case 0xF0000000 ... 0xFFFFFFFF: | |
1443 | if (regime_sctlr(env, mmu_idx) & SCTLR_V) { | |
1444 | /* hivecs execing is ok */ | |
1445 | *prot |= PAGE_EXEC; | |
1446 | } | |
1447 | break; | |
1448 | case 0x00000000 ... 0x7FFFFFFF: | |
1449 | *prot |= PAGE_EXEC; | |
1450 | break; | |
1451 | } | |
1452 | } else { | |
1453 | /* Default system address map for M profile cores. | |
1454 | * The architecture specifies which regions are execute-never; | |
1455 | * at the MPU level no other checks are defined. | |
1456 | */ | |
1457 | switch (address) { | |
1458 | case 0x00000000 ... 0x1fffffff: /* ROM */ | |
1459 | case 0x20000000 ... 0x3fffffff: /* SRAM */ | |
1460 | case 0x60000000 ... 0x7fffffff: /* RAM */ | |
1461 | case 0x80000000 ... 0x9fffffff: /* RAM */ | |
1462 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
1463 | break; | |
1464 | case 0x40000000 ... 0x5fffffff: /* Peripheral */ | |
1465 | case 0xa0000000 ... 0xbfffffff: /* Device */ | |
1466 | case 0xc0000000 ... 0xdfffffff: /* Device */ | |
1467 | case 0xe0000000 ... 0xffffffff: /* System */ | |
1468 | *prot = PAGE_READ | PAGE_WRITE; | |
1469 | break; | |
1470 | default: | |
1471 | g_assert_not_reached(); | |
1472 | } | |
1473 | } | |
1474 | } | |
1475 | ||
47ff5ba9 RH |
1476 | static bool m_is_ppb_region(CPUARMState *env, uint32_t address) |
1477 | { | |
1478 | /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ | |
1479 | return arm_feature(env, ARM_FEATURE_M) && | |
1480 | extract32(address, 20, 12) == 0xe00; | |
1481 | } | |
1482 | ||
1483 | static bool m_is_system_region(CPUARMState *env, uint32_t address) | |
1484 | { | |
1485 | /* | |
1486 | * True if address is in the M profile system region | |
1487 | * 0xe0000000 - 0xffffffff | |
1488 | */ | |
1489 | return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; | |
1490 | } | |
1491 | ||
c8e436c9 | 1492 | static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, |
1a469cf7 | 1493 | bool is_secure, bool is_user) |
c8e436c9 RH |
1494 | { |
1495 | /* | |
1496 | * Return true if we should use the default memory map as a | |
1497 | * "background" region if there are no hits against any MPU regions. | |
1498 | */ | |
1499 | CPUARMState *env = &cpu->env; | |
1500 | ||
1501 | if (is_user) { | |
1502 | return false; | |
1503 | } | |
1504 | ||
1505 | if (arm_feature(env, ARM_FEATURE_M)) { | |
1a469cf7 | 1506 | return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; |
c8e436c9 RH |
1507 | } else { |
1508 | return regime_sctlr(env, mmu_idx) & SCTLR_BR; | |
1509 | } | |
1510 | } | |
1511 | ||
1f2e87e5 RH |
1512 | static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, |
1513 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
957a0bb7 | 1514 | bool secure, GetPhysAddrResult *result, |
1f2e87e5 RH |
1515 | ARMMMUFaultInfo *fi) |
1516 | { | |
1517 | ARMCPU *cpu = env_archcpu(env); | |
1518 | int n; | |
1519 | bool is_user = regime_is_user(env, mmu_idx); | |
1520 | ||
e59367e2 RH |
1521 | result->phys = address; |
1522 | result->page_size = TARGET_PAGE_SIZE; | |
1523 | result->prot = 0; | |
1f2e87e5 | 1524 | |
7e80c0a4 | 1525 | if (regime_translation_disabled(env, mmu_idx, secure) || |
1f2e87e5 RH |
1526 | m_is_ppb_region(env, address)) { |
1527 | /* | |
1528 | * MPU disabled or M profile PPB access: use default memory map. | |
1529 | * The other case which uses the default memory map in the | |
1530 | * v7M ARM ARM pseudocode is exception vector reads from the vector | |
1531 | * table. In QEMU those accesses are done in arm_v7m_load_vector(), | |
1532 | * which always does a direct read using address_space_ldl(), rather | |
1533 | * than going via this function, so we don't need to check that here. | |
1534 | */ | |
e59367e2 | 1535 | get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot); |
1f2e87e5 RH |
1536 | } else { /* MPU enabled */ |
1537 | for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { | |
1538 | /* region search */ | |
1539 | uint32_t base = env->pmsav7.drbar[n]; | |
1540 | uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); | |
1541 | uint32_t rmask; | |
1542 | bool srdis = false; | |
1543 | ||
1544 | if (!(env->pmsav7.drsr[n] & 0x1)) { | |
1545 | continue; | |
1546 | } | |
1547 | ||
1548 | if (!rsize) { | |
1549 | qemu_log_mask(LOG_GUEST_ERROR, | |
1550 | "DRSR[%d]: Rsize field cannot be 0\n", n); | |
1551 | continue; | |
1552 | } | |
1553 | rsize++; | |
1554 | rmask = (1ull << rsize) - 1; | |
1555 | ||
1556 | if (base & rmask) { | |
1557 | qemu_log_mask(LOG_GUEST_ERROR, | |
1558 | "DRBAR[%d]: 0x%" PRIx32 " misaligned " | |
1559 | "to DRSR region size, mask = 0x%" PRIx32 "\n", | |
1560 | n, base, rmask); | |
1561 | continue; | |
1562 | } | |
1563 | ||
1564 | if (address < base || address > base + rmask) { | |
1565 | /* | |
1566 | * Address not in this region. We must check whether the | |
1567 | * region covers addresses in the same page as our address. | |
1568 | * In that case we must not report a size that covers the | |
1569 | * whole page for a subsequent hit against a different MPU | |
1570 | * region or the background region, because it would result in | |
1571 | * incorrect TLB hits for subsequent accesses to addresses that | |
1572 | * are in this MPU region. | |
1573 | */ | |
1574 | if (ranges_overlap(base, rmask, | |
1575 | address & TARGET_PAGE_MASK, | |
1576 | TARGET_PAGE_SIZE)) { | |
e59367e2 | 1577 | result->page_size = 1; |
1f2e87e5 RH |
1578 | } |
1579 | continue; | |
1580 | } | |
1581 | ||
1582 | /* Region matched */ | |
1583 | ||
1584 | if (rsize >= 8) { /* no subregions for regions < 256 bytes */ | |
1585 | int i, snd; | |
1586 | uint32_t srdis_mask; | |
1587 | ||
1588 | rsize -= 3; /* sub region size (power of 2) */ | |
1589 | snd = ((address - base) >> rsize) & 0x7; | |
1590 | srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); | |
1591 | ||
1592 | srdis_mask = srdis ? 0x3 : 0x0; | |
1593 | for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { | |
1594 | /* | |
1595 | * This will check in groups of 2, 4 and then 8, whether | |
1596 | * the subregion bits are consistent. rsize is incremented | |
1597 | * back up to give the region size, considering consistent | |
1598 | * adjacent subregions as one region. Stop testing if rsize | |
1599 | * is already big enough for an entire QEMU page. | |
1600 | */ | |
1601 | int snd_rounded = snd & ~(i - 1); | |
1602 | uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], | |
1603 | snd_rounded + 8, i); | |
1604 | if (srdis_mask ^ srdis_multi) { | |
1605 | break; | |
1606 | } | |
1607 | srdis_mask = (srdis_mask << i) | srdis_mask; | |
1608 | rsize++; | |
1609 | } | |
1610 | } | |
1611 | if (srdis) { | |
1612 | continue; | |
1613 | } | |
1614 | if (rsize < TARGET_PAGE_BITS) { | |
e59367e2 | 1615 | result->page_size = 1 << rsize; |
1f2e87e5 RH |
1616 | } |
1617 | break; | |
1618 | } | |
1619 | ||
1620 | if (n == -1) { /* no hits */ | |
1a469cf7 | 1621 | if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { |
1f2e87e5 RH |
1622 | /* background fault */ |
1623 | fi->type = ARMFault_Background; | |
1624 | return true; | |
1625 | } | |
e59367e2 | 1626 | get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot); |
1f2e87e5 RH |
1627 | } else { /* a MPU hit! */ |
1628 | uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); | |
1629 | uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); | |
1630 | ||
1631 | if (m_is_system_region(env, address)) { | |
1632 | /* System space is always execute never */ | |
1633 | xn = 1; | |
1634 | } | |
1635 | ||
1636 | if (is_user) { /* User mode AP bit decoding */ | |
1637 | switch (ap) { | |
1638 | case 0: | |
1639 | case 1: | |
1640 | case 5: | |
1641 | break; /* no access */ | |
1642 | case 3: | |
e59367e2 | 1643 | result->prot |= PAGE_WRITE; |
1f2e87e5 RH |
1644 | /* fall through */ |
1645 | case 2: | |
1646 | case 6: | |
e59367e2 | 1647 | result->prot |= PAGE_READ | PAGE_EXEC; |
1f2e87e5 RH |
1648 | break; |
1649 | case 7: | |
1650 | /* for v7M, same as 6; for R profile a reserved value */ | |
1651 | if (arm_feature(env, ARM_FEATURE_M)) { | |
e59367e2 | 1652 | result->prot |= PAGE_READ | PAGE_EXEC; |
1f2e87e5 RH |
1653 | break; |
1654 | } | |
1655 | /* fall through */ | |
1656 | default: | |
1657 | qemu_log_mask(LOG_GUEST_ERROR, | |
1658 | "DRACR[%d]: Bad value for AP bits: 0x%" | |
1659 | PRIx32 "\n", n, ap); | |
1660 | } | |
1661 | } else { /* Priv. mode AP bits decoding */ | |
1662 | switch (ap) { | |
1663 | case 0: | |
1664 | break; /* no access */ | |
1665 | case 1: | |
1666 | case 2: | |
1667 | case 3: | |
e59367e2 | 1668 | result->prot |= PAGE_WRITE; |
1f2e87e5 RH |
1669 | /* fall through */ |
1670 | case 5: | |
1671 | case 6: | |
e59367e2 | 1672 | result->prot |= PAGE_READ | PAGE_EXEC; |
1f2e87e5 RH |
1673 | break; |
1674 | case 7: | |
1675 | /* for v7M, same as 6; for R profile a reserved value */ | |
1676 | if (arm_feature(env, ARM_FEATURE_M)) { | |
e59367e2 | 1677 | result->prot |= PAGE_READ | PAGE_EXEC; |
1f2e87e5 RH |
1678 | break; |
1679 | } | |
1680 | /* fall through */ | |
1681 | default: | |
1682 | qemu_log_mask(LOG_GUEST_ERROR, | |
1683 | "DRACR[%d]: Bad value for AP bits: 0x%" | |
1684 | PRIx32 "\n", n, ap); | |
1685 | } | |
1686 | } | |
1687 | ||
1688 | /* execute never */ | |
1689 | if (xn) { | |
e59367e2 | 1690 | result->prot &= ~PAGE_EXEC; |
1f2e87e5 RH |
1691 | } |
1692 | } | |
1693 | } | |
1694 | ||
1695 | fi->type = ARMFault_Permission; | |
1696 | fi->level = 1; | |
e59367e2 | 1697 | return !(result->prot & (1 << access_type)); |
1f2e87e5 RH |
1698 | } |
1699 | ||
fedbaa05 RH |
1700 | bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, |
1701 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
e9fb7090 RH |
1702 | bool secure, GetPhysAddrResult *result, |
1703 | ARMMMUFaultInfo *fi, uint32_t *mregion) | |
fedbaa05 RH |
1704 | { |
1705 | /* | |
1706 | * Perform a PMSAv8 MPU lookup (without also doing the SAU check | |
1707 | * that a full phys-to-virt translation does). | |
1708 | * mregion is (if not NULL) set to the region number which matched, | |
1709 | * or -1 if no region number is returned (MPU off, address did not | |
1710 | * hit a region, address hit in multiple regions). | |
652c750e RH |
1711 | * If the region hit doesn't cover the entire TARGET_PAGE the address |
1712 | * is within, then we set the result page_size to 1 to force the | |
1713 | * memory system to use a subpage. | |
fedbaa05 RH |
1714 | */ |
1715 | ARMCPU *cpu = env_archcpu(env); | |
1716 | bool is_user = regime_is_user(env, mmu_idx); | |
fedbaa05 RH |
1717 | int n; |
1718 | int matchregion = -1; | |
1719 | bool hit = false; | |
1720 | uint32_t addr_page_base = address & TARGET_PAGE_MASK; | |
1721 | uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); | |
1722 | ||
652c750e | 1723 | result->page_size = TARGET_PAGE_SIZE; |
d2c92e58 RH |
1724 | result->phys = address; |
1725 | result->prot = 0; | |
fedbaa05 RH |
1726 | if (mregion) { |
1727 | *mregion = -1; | |
1728 | } | |
1729 | ||
1730 | /* | |
1731 | * Unlike the ARM ARM pseudocode, we don't need to check whether this | |
1732 | * was an exception vector read from the vector table (which is always | |
1733 | * done using the default system address map), because those accesses | |
1734 | * are done in arm_v7m_load_vector(), which always does a direct | |
1735 | * read using address_space_ldl(), rather than going via this function. | |
1736 | */ | |
7e80c0a4 | 1737 | if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */ |
fedbaa05 RH |
1738 | hit = true; |
1739 | } else if (m_is_ppb_region(env, address)) { | |
1740 | hit = true; | |
1741 | } else { | |
1a469cf7 | 1742 | if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { |
fedbaa05 RH |
1743 | hit = true; |
1744 | } | |
1745 | ||
1746 | for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { | |
1747 | /* region search */ | |
1748 | /* | |
1749 | * Note that the base address is bits [31:5] from the register | |
1750 | * with bits [4:0] all zeroes, but the limit address is bits | |
1751 | * [31:5] from the register with bits [4:0] all ones. | |
1752 | */ | |
1753 | uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; | |
1754 | uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; | |
1755 | ||
1756 | if (!(env->pmsav8.rlar[secure][n] & 0x1)) { | |
1757 | /* Region disabled */ | |
1758 | continue; | |
1759 | } | |
1760 | ||
1761 | if (address < base || address > limit) { | |
1762 | /* | |
1763 | * Address not in this region. We must check whether the | |
1764 | * region covers addresses in the same page as our address. | |
1765 | * In that case we must not report a size that covers the | |
1766 | * whole page for a subsequent hit against a different MPU | |
1767 | * region or the background region, because it would result in | |
1768 | * incorrect TLB hits for subsequent accesses to addresses that | |
1769 | * are in this MPU region. | |
1770 | */ | |
1771 | if (limit >= base && | |
1772 | ranges_overlap(base, limit - base + 1, | |
1773 | addr_page_base, | |
1774 | TARGET_PAGE_SIZE)) { | |
652c750e | 1775 | result->page_size = 1; |
fedbaa05 RH |
1776 | } |
1777 | continue; | |
1778 | } | |
1779 | ||
1780 | if (base > addr_page_base || limit < addr_page_limit) { | |
652c750e | 1781 | result->page_size = 1; |
fedbaa05 RH |
1782 | } |
1783 | ||
1784 | if (matchregion != -1) { | |
1785 | /* | |
1786 | * Multiple regions match -- always a failure (unlike | |
1787 | * PMSAv7 where highest-numbered-region wins) | |
1788 | */ | |
1789 | fi->type = ARMFault_Permission; | |
1790 | fi->level = 1; | |
1791 | return true; | |
1792 | } | |
1793 | ||
1794 | matchregion = n; | |
1795 | hit = true; | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | if (!hit) { | |
1800 | /* background fault */ | |
1801 | fi->type = ARMFault_Background; | |
1802 | return true; | |
1803 | } | |
1804 | ||
1805 | if (matchregion == -1) { | |
1806 | /* hit using the background region */ | |
d2c92e58 | 1807 | get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot); |
fedbaa05 RH |
1808 | } else { |
1809 | uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); | |
1810 | uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); | |
1811 | bool pxn = false; | |
1812 | ||
1813 | if (arm_feature(env, ARM_FEATURE_V8_1M)) { | |
1814 | pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); | |
1815 | } | |
1816 | ||
1817 | if (m_is_system_region(env, address)) { | |
1818 | /* System space is always execute never */ | |
1819 | xn = 1; | |
1820 | } | |
1821 | ||
d2c92e58 RH |
1822 | result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap); |
1823 | if (result->prot && !xn && !(pxn && !is_user)) { | |
1824 | result->prot |= PAGE_EXEC; | |
fedbaa05 RH |
1825 | } |
1826 | /* | |
1827 | * We don't need to look the attribute up in the MAIR0/MAIR1 | |
1828 | * registers because that only tells us about cacheability. | |
1829 | */ | |
1830 | if (mregion) { | |
1831 | *mregion = matchregion; | |
1832 | } | |
1833 | } | |
1834 | ||
1835 | fi->type = ARMFault_Permission; | |
1836 | fi->level = 1; | |
d2c92e58 | 1837 | return !(result->prot & (1 << access_type)); |
fedbaa05 RH |
1838 | } |
1839 | ||
2c1f429d RH |
1840 | static bool v8m_is_sau_exempt(CPUARMState *env, |
1841 | uint32_t address, MMUAccessType access_type) | |
1842 | { | |
1843 | /* | |
1844 | * The architecture specifies that certain address ranges are | |
1845 | * exempt from v8M SAU/IDAU checks. | |
1846 | */ | |
1847 | return | |
1848 | (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || | |
1849 | (address >= 0xe0000000 && address <= 0xe0002fff) || | |
1850 | (address >= 0xe000e000 && address <= 0xe000efff) || | |
1851 | (address >= 0xe002e000 && address <= 0xe002efff) || | |
1852 | (address >= 0xe0040000 && address <= 0xe0041fff) || | |
1853 | (address >= 0xe00ff000 && address <= 0xe00fffff); | |
1854 | } | |
1855 | ||
1856 | void v8m_security_lookup(CPUARMState *env, uint32_t address, | |
dbf2a71a RH |
1857 | MMUAccessType access_type, ARMMMUIdx mmu_idx, |
1858 | bool is_secure, V8M_SAttributes *sattrs) | |
2c1f429d RH |
1859 | { |
1860 | /* | |
1861 | * Look up the security attributes for this address. Compare the | |
1862 | * pseudocode SecurityCheck() function. | |
1863 | * We assume the caller has zero-initialized *sattrs. | |
1864 | */ | |
1865 | ARMCPU *cpu = env_archcpu(env); | |
1866 | int r; | |
1867 | bool idau_exempt = false, idau_ns = true, idau_nsc = true; | |
1868 | int idau_region = IREGION_NOTVALID; | |
1869 | uint32_t addr_page_base = address & TARGET_PAGE_MASK; | |
1870 | uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); | |
1871 | ||
1872 | if (cpu->idau) { | |
1873 | IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); | |
1874 | IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); | |
1875 | ||
1876 | iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, | |
1877 | &idau_nsc); | |
1878 | } | |
1879 | ||
1880 | if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { | |
1881 | /* 0xf0000000..0xffffffff is always S for insn fetches */ | |
1882 | return; | |
1883 | } | |
1884 | ||
1885 | if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { | |
dbf2a71a | 1886 | sattrs->ns = !is_secure; |
2c1f429d RH |
1887 | return; |
1888 | } | |
1889 | ||
1890 | if (idau_region != IREGION_NOTVALID) { | |
1891 | sattrs->irvalid = true; | |
1892 | sattrs->iregion = idau_region; | |
1893 | } | |
1894 | ||
1895 | switch (env->sau.ctrl & 3) { | |
1896 | case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ | |
1897 | break; | |
1898 | case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ | |
1899 | sattrs->ns = true; | |
1900 | break; | |
1901 | default: /* SAU.ENABLE == 1 */ | |
1902 | for (r = 0; r < cpu->sau_sregion; r++) { | |
1903 | if (env->sau.rlar[r] & 1) { | |
1904 | uint32_t base = env->sau.rbar[r] & ~0x1f; | |
1905 | uint32_t limit = env->sau.rlar[r] | 0x1f; | |
1906 | ||
1907 | if (base <= address && limit >= address) { | |
1908 | if (base > addr_page_base || limit < addr_page_limit) { | |
1909 | sattrs->subpage = true; | |
1910 | } | |
1911 | if (sattrs->srvalid) { | |
1912 | /* | |
1913 | * If we hit in more than one region then we must report | |
1914 | * as Secure, not NS-Callable, with no valid region | |
1915 | * number info. | |
1916 | */ | |
1917 | sattrs->ns = false; | |
1918 | sattrs->nsc = false; | |
1919 | sattrs->sregion = 0; | |
1920 | sattrs->srvalid = false; | |
1921 | break; | |
1922 | } else { | |
1923 | if (env->sau.rlar[r] & 2) { | |
1924 | sattrs->nsc = true; | |
1925 | } else { | |
1926 | sattrs->ns = true; | |
1927 | } | |
1928 | sattrs->srvalid = true; | |
1929 | sattrs->sregion = r; | |
1930 | } | |
1931 | } else { | |
1932 | /* | |
1933 | * Address not in this region. We must check whether the | |
1934 | * region covers addresses in the same page as our address. | |
1935 | * In that case we must not report a size that covers the | |
1936 | * whole page for a subsequent hit against a different MPU | |
1937 | * region or the background region, because it would result | |
1938 | * in incorrect TLB hits for subsequent accesses to | |
1939 | * addresses that are in this MPU region. | |
1940 | */ | |
1941 | if (limit >= base && | |
1942 | ranges_overlap(base, limit - base + 1, | |
1943 | addr_page_base, | |
1944 | TARGET_PAGE_SIZE)) { | |
1945 | sattrs->subpage = true; | |
1946 | } | |
1947 | } | |
1948 | } | |
1949 | } | |
1950 | break; | |
1951 | } | |
1952 | ||
1953 | /* | |
1954 | * The IDAU will override the SAU lookup results if it specifies | |
1955 | * higher security than the SAU does. | |
1956 | */ | |
1957 | if (!idau_ns) { | |
1958 | if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { | |
1959 | sattrs->ns = false; | |
1960 | sattrs->nsc = idau_nsc; | |
1961 | } | |
1962 | } | |
1963 | } | |
1964 | ||
730d5c31 RH |
1965 | static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, |
1966 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
be0ca948 | 1967 | bool secure, GetPhysAddrResult *result, |
730d5c31 RH |
1968 | ARMMMUFaultInfo *fi) |
1969 | { | |
730d5c31 RH |
1970 | V8M_SAttributes sattrs = {}; |
1971 | bool ret; | |
730d5c31 RH |
1972 | |
1973 | if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { | |
dbf2a71a RH |
1974 | v8m_security_lookup(env, address, access_type, mmu_idx, |
1975 | secure, &sattrs); | |
730d5c31 RH |
1976 | if (access_type == MMU_INST_FETCH) { |
1977 | /* | |
1978 | * Instruction fetches always use the MMU bank and the | |
1979 | * transaction attribute determined by the fetch address, | |
1980 | * regardless of CPU state. This is painful for QEMU | |
1981 | * to handle, because it would mean we need to encode | |
1982 | * into the mmu_idx not just the (user, negpri) information | |
1983 | * for the current security state but also that for the | |
1984 | * other security state, which would balloon the number | |
1985 | * of mmu_idx values needed alarmingly. | |
1986 | * Fortunately we can avoid this because it's not actually | |
1987 | * possible to arbitrarily execute code from memory with | |
1988 | * the wrong security attribute: it will always generate | |
1989 | * an exception of some kind or another, apart from the | |
1990 | * special case of an NS CPU executing an SG instruction | |
1991 | * in S&NSC memory. So we always just fail the translation | |
1992 | * here and sort things out in the exception handler | |
1993 | * (including possibly emulating an SG instruction). | |
1994 | */ | |
1995 | if (sattrs.ns != !secure) { | |
1996 | if (sattrs.nsc) { | |
1997 | fi->type = ARMFault_QEMU_NSCExec; | |
1998 | } else { | |
1999 | fi->type = ARMFault_QEMU_SFault; | |
2000 | } | |
5272b23e RH |
2001 | result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; |
2002 | result->phys = address; | |
2003 | result->prot = 0; | |
730d5c31 RH |
2004 | return true; |
2005 | } | |
2006 | } else { | |
2007 | /* | |
2008 | * For data accesses we always use the MMU bank indicated | |
2009 | * by the current CPU state, but the security attributes | |
2010 | * might downgrade a secure access to nonsecure. | |
2011 | */ | |
2012 | if (sattrs.ns) { | |
5272b23e | 2013 | result->attrs.secure = false; |
730d5c31 RH |
2014 | } else if (!secure) { |
2015 | /* | |
2016 | * NS access to S memory must fault. | |
2017 | * Architecturally we should first check whether the | |
2018 | * MPU information for this address indicates that we | |
2019 | * are doing an unaligned access to Device memory, which | |
2020 | * should generate a UsageFault instead. QEMU does not | |
2021 | * currently check for that kind of unaligned access though. | |
2022 | * If we added it we would need to do so as a special case | |
2023 | * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). | |
2024 | */ | |
2025 | fi->type = ARMFault_QEMU_SFault; | |
5272b23e RH |
2026 | result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; |
2027 | result->phys = address; | |
2028 | result->prot = 0; | |
730d5c31 RH |
2029 | return true; |
2030 | } | |
2031 | } | |
2032 | } | |
2033 | ||
e9fb7090 | 2034 | ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure, |
652c750e RH |
2035 | result, fi, NULL); |
2036 | if (sattrs.subpage) { | |
2037 | result->page_size = 1; | |
2038 | } | |
730d5c31 RH |
2039 | return ret; |
2040 | } | |
2041 | ||
966f4bb7 RH |
2042 | /* |
2043 | * Translate from the 4-bit stage 2 representation of | |
2044 | * memory attributes (without cache-allocation hints) to | |
2045 | * the 8-bit representation of the stage 1 MAIR registers | |
2046 | * (which includes allocation hints). | |
2047 | * | |
2048 | * ref: shared/translation/attrs/S2AttrDecode() | |
2049 | * .../S2ConvertAttrsHints() | |
2050 | */ | |
2051 | static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) | |
2052 | { | |
2053 | uint8_t hiattr = extract32(s2attrs, 2, 2); | |
2054 | uint8_t loattr = extract32(s2attrs, 0, 2); | |
2055 | uint8_t hihint = 0, lohint = 0; | |
2056 | ||
2057 | if (hiattr != 0) { /* normal memory */ | |
2058 | if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */ | |
2059 | hiattr = loattr = 1; /* non-cacheable */ | |
2060 | } else { | |
2061 | if (hiattr != 1) { /* Write-through or write-back */ | |
2062 | hihint = 3; /* RW allocate */ | |
2063 | } | |
2064 | if (loattr != 1) { /* Write-through or write-back */ | |
2065 | lohint = 3; /* RW allocate */ | |
2066 | } | |
2067 | } | |
2068 | } | |
2069 | ||
2070 | return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; | |
2071 | } | |
2072 | ||
2073 | /* | |
2074 | * Combine either inner or outer cacheability attributes for normal | |
2075 | * memory, according to table D4-42 and pseudocode procedure | |
2076 | * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). | |
2077 | * | |
2078 | * NB: only stage 1 includes allocation hints (RW bits), leading to | |
2079 | * some asymmetry. | |
2080 | */ | |
2081 | static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) | |
2082 | { | |
2083 | if (s1 == 4 || s2 == 4) { | |
2084 | /* non-cacheable has precedence */ | |
2085 | return 4; | |
2086 | } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { | |
2087 | /* stage 1 write-through takes precedence */ | |
2088 | return s1; | |
2089 | } else if (extract32(s2, 2, 2) == 2) { | |
2090 | /* stage 2 write-through takes precedence, but the allocation hint | |
2091 | * is still taken from stage 1 | |
2092 | */ | |
2093 | return (2 << 2) | extract32(s1, 0, 2); | |
2094 | } else { /* write-back */ | |
2095 | return s1; | |
2096 | } | |
2097 | } | |
2098 | ||
2099 | /* | |
2100 | * Combine the memory type and cacheability attributes of | |
2101 | * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the | |
2102 | * combined attributes in MAIR_EL1 format. | |
2103 | */ | |
2104 | static uint8_t combined_attrs_nofwb(CPUARMState *env, | |
2105 | ARMCacheAttrs s1, ARMCacheAttrs s2) | |
2106 | { | |
2107 | uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; | |
2108 | ||
2109 | s2_mair_attrs = convert_stage2_attrs(env, s2.attrs); | |
2110 | ||
2111 | s1lo = extract32(s1.attrs, 0, 4); | |
2112 | s2lo = extract32(s2_mair_attrs, 0, 4); | |
2113 | s1hi = extract32(s1.attrs, 4, 4); | |
2114 | s2hi = extract32(s2_mair_attrs, 4, 4); | |
2115 | ||
2116 | /* Combine memory type and cacheability attributes */ | |
2117 | if (s1hi == 0 || s2hi == 0) { | |
2118 | /* Device has precedence over normal */ | |
2119 | if (s1lo == 0 || s2lo == 0) { | |
2120 | /* nGnRnE has precedence over anything */ | |
2121 | ret_attrs = 0; | |
2122 | } else if (s1lo == 4 || s2lo == 4) { | |
2123 | /* non-Reordering has precedence over Reordering */ | |
2124 | ret_attrs = 4; /* nGnRE */ | |
2125 | } else if (s1lo == 8 || s2lo == 8) { | |
2126 | /* non-Gathering has precedence over Gathering */ | |
2127 | ret_attrs = 8; /* nGRE */ | |
2128 | } else { | |
2129 | ret_attrs = 0xc; /* GRE */ | |
2130 | } | |
2131 | } else { /* Normal memory */ | |
2132 | /* Outer/inner cacheability combine independently */ | |
2133 | ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 | |
2134 | | combine_cacheattr_nibble(s1lo, s2lo); | |
2135 | } | |
2136 | return ret_attrs; | |
2137 | } | |
2138 | ||
2139 | static uint8_t force_cacheattr_nibble_wb(uint8_t attr) | |
2140 | { | |
2141 | /* | |
2142 | * Given the 4 bits specifying the outer or inner cacheability | |
2143 | * in MAIR format, return a value specifying Normal Write-Back, | |
2144 | * with the allocation and transient hints taken from the input | |
2145 | * if the input specified some kind of cacheable attribute. | |
2146 | */ | |
2147 | if (attr == 0 || attr == 4) { | |
2148 | /* | |
2149 | * 0 == an UNPREDICTABLE encoding | |
2150 | * 4 == Non-cacheable | |
2151 | * Either way, force Write-Back RW allocate non-transient | |
2152 | */ | |
2153 | return 0xf; | |
2154 | } | |
2155 | /* Change WriteThrough to WriteBack, keep allocation and transient hints */ | |
2156 | return attr | 4; | |
2157 | } | |
2158 | ||
2159 | /* | |
2160 | * Combine the memory type and cacheability attributes of | |
2161 | * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the | |
2162 | * combined attributes in MAIR_EL1 format. | |
2163 | */ | |
2164 | static uint8_t combined_attrs_fwb(CPUARMState *env, | |
2165 | ARMCacheAttrs s1, ARMCacheAttrs s2) | |
2166 | { | |
2167 | switch (s2.attrs) { | |
2168 | case 7: | |
2169 | /* Use stage 1 attributes */ | |
2170 | return s1.attrs; | |
2171 | case 6: | |
2172 | /* | |
2173 | * Force Normal Write-Back. Note that if S1 is Normal cacheable | |
2174 | * then we take the allocation hints from it; otherwise it is | |
2175 | * RW allocate, non-transient. | |
2176 | */ | |
2177 | if ((s1.attrs & 0xf0) == 0) { | |
2178 | /* S1 is Device */ | |
2179 | return 0xff; | |
2180 | } | |
2181 | /* Need to check the Inner and Outer nibbles separately */ | |
2182 | return force_cacheattr_nibble_wb(s1.attrs & 0xf) | | |
2183 | force_cacheattr_nibble_wb(s1.attrs >> 4) << 4; | |
2184 | case 5: | |
2185 | /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ | |
2186 | if ((s1.attrs & 0xf0) == 0) { | |
2187 | return s1.attrs; | |
2188 | } | |
2189 | return 0x44; | |
2190 | case 0 ... 3: | |
2191 | /* Force Device, of subtype specified by S2 */ | |
2192 | return s2.attrs << 2; | |
2193 | default: | |
2194 | /* | |
2195 | * RESERVED values (including RES0 descriptor bit [5] being nonzero); | |
2196 | * arbitrarily force Device. | |
2197 | */ | |
2198 | return 0; | |
2199 | } | |
2200 | } | |
2201 | ||
2202 | /* | |
2203 | * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 | |
2204 | * and CombineS1S2Desc() | |
2205 | * | |
2206 | * @env: CPUARMState | |
2207 | * @s1: Attributes from stage 1 walk | |
2208 | * @s2: Attributes from stage 2 walk | |
2209 | */ | |
2210 | static ARMCacheAttrs combine_cacheattrs(CPUARMState *env, | |
2211 | ARMCacheAttrs s1, ARMCacheAttrs s2) | |
2212 | { | |
2213 | ARMCacheAttrs ret; | |
2214 | bool tagged = false; | |
2215 | ||
2216 | assert(s2.is_s2_format && !s1.is_s2_format); | |
2217 | ret.is_s2_format = false; | |
2218 | ||
2219 | if (s1.attrs == 0xf0) { | |
2220 | tagged = true; | |
2221 | s1.attrs = 0xff; | |
2222 | } | |
2223 | ||
2224 | /* Combine shareability attributes (table D4-43) */ | |
2225 | if (s1.shareability == 2 || s2.shareability == 2) { | |
2226 | /* if either are outer-shareable, the result is outer-shareable */ | |
2227 | ret.shareability = 2; | |
2228 | } else if (s1.shareability == 3 || s2.shareability == 3) { | |
2229 | /* if either are inner-shareable, the result is inner-shareable */ | |
2230 | ret.shareability = 3; | |
2231 | } else { | |
2232 | /* both non-shareable */ | |
2233 | ret.shareability = 0; | |
2234 | } | |
2235 | ||
2236 | /* Combine memory type and cacheability attributes */ | |
2237 | if (arm_hcr_el2_eff(env) & HCR_FWB) { | |
2238 | ret.attrs = combined_attrs_fwb(env, s1, s2); | |
2239 | } else { | |
2240 | ret.attrs = combined_attrs_nofwb(env, s1, s2); | |
2241 | } | |
2242 | ||
2243 | /* | |
2244 | * Any location for which the resultant memory type is any | |
2245 | * type of Device memory is always treated as Outer Shareable. | |
2246 | * Any location for which the resultant memory type is Normal | |
2247 | * Inner Non-cacheable, Outer Non-cacheable is always treated | |
2248 | * as Outer Shareable. | |
2249 | * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC | |
2250 | */ | |
2251 | if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) { | |
2252 | ret.shareability = 2; | |
2253 | } | |
2254 | ||
2255 | /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ | |
2256 | if (tagged && ret.attrs == 0xff) { | |
2257 | ret.attrs = 0xf0; | |
2258 | } | |
2259 | ||
2260 | return ret; | |
2261 | } | |
2262 | ||
def8aa5b RH |
2263 | bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address, |
2264 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
2265 | bool is_secure, GetPhysAddrResult *result, | |
2266 | ARMMMUFaultInfo *fi) | |
8ae08860 RH |
2267 | { |
2268 | ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx); | |
2269 | ||
2270 | if (mmu_idx != s1_mmu_idx) { | |
2271 | /* | |
2272 | * Call ourselves recursively to do the stage 1 and then stage 2 | |
2273 | * translations if mmu_idx is a two-stage regime. | |
2274 | */ | |
2275 | if (arm_feature(env, ARM_FEATURE_EL2)) { | |
2276 | hwaddr ipa; | |
de05a709 | 2277 | int s1_prot; |
8ae08860 | 2278 | int ret; |
c7637be3 | 2279 | bool ipa_secure, s2walk_secure; |
de05a709 | 2280 | ARMCacheAttrs cacheattrs1; |
8ae08860 RH |
2281 | ARMMMUIdx s2_mmu_idx; |
2282 | bool is_el0; | |
2283 | ||
def8aa5b RH |
2284 | ret = get_phys_addr_with_secure(env, address, access_type, |
2285 | s1_mmu_idx, is_secure, result, fi); | |
8ae08860 RH |
2286 | |
2287 | /* If S1 fails or S2 is disabled, return early. */ | |
7e80c0a4 RH |
2288 | if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, |
2289 | is_secure)) { | |
8ae08860 RH |
2290 | return ret; |
2291 | } | |
2292 | ||
de05a709 RH |
2293 | ipa = result->phys; |
2294 | ipa_secure = result->attrs.secure; | |
c7637be3 RH |
2295 | if (is_secure) { |
2296 | /* Select TCR based on the NS bit from the S1 walk. */ | |
2297 | s2walk_secure = !(ipa_secure | |
2298 | ? env->cp15.vstcr_el2 & VSTCR_SW | |
2299 | : env->cp15.vtcr_el2 & VTCR_NSW); | |
8ae08860 RH |
2300 | } else { |
2301 | assert(!ipa_secure); | |
c7637be3 | 2302 | s2walk_secure = false; |
8ae08860 RH |
2303 | } |
2304 | ||
c7637be3 | 2305 | s2_mmu_idx = (s2walk_secure |
de05a709 | 2306 | ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2); |
8ae08860 RH |
2307 | is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0; |
2308 | ||
de05a709 RH |
2309 | /* |
2310 | * S1 is done, now do S2 translation. | |
2311 | * Save the stage1 results so that we may merge | |
2312 | * prot and cacheattrs later. | |
2313 | */ | |
2314 | s1_prot = result->prot; | |
2315 | cacheattrs1 = result->cacheattrs; | |
2316 | memset(result, 0, sizeof(*result)); | |
2317 | ||
03ee9bbe | 2318 | ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, |
c23f08a5 | 2319 | s2walk_secure, is_el0, result, fi); |
8ae08860 | 2320 | fi->s2addr = ipa; |
de05a709 | 2321 | |
8ae08860 | 2322 | /* Combine the S1 and S2 perms. */ |
de05a709 | 2323 | result->prot &= s1_prot; |
8ae08860 RH |
2324 | |
2325 | /* If S2 fails, return early. */ | |
2326 | if (ret) { | |
2327 | return ret; | |
2328 | } | |
2329 | ||
2330 | /* Combine the S1 and S2 cache attributes. */ | |
2331 | if (arm_hcr_el2_eff(env) & HCR_DC) { | |
2332 | /* | |
2333 | * HCR.DC forces the first stage attributes to | |
2334 | * Normal Non-Shareable, | |
2335 | * Inner Write-Back Read-Allocate Write-Allocate, | |
2336 | * Outer Write-Back Read-Allocate Write-Allocate. | |
2337 | * Do not overwrite Tagged within attrs. | |
2338 | */ | |
de05a709 RH |
2339 | if (cacheattrs1.attrs != 0xf0) { |
2340 | cacheattrs1.attrs = 0xff; | |
8ae08860 | 2341 | } |
de05a709 | 2342 | cacheattrs1.shareability = 0; |
8ae08860 | 2343 | } |
de05a709 RH |
2344 | result->cacheattrs = combine_cacheattrs(env, cacheattrs1, |
2345 | result->cacheattrs); | |
8ae08860 | 2346 | |
9b5ba97a RH |
2347 | /* |
2348 | * Check if IPA translates to secure or non-secure PA space. | |
2349 | * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. | |
2350 | */ | |
2351 | result->attrs.secure = | |
2352 | (is_secure | |
2353 | && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) | |
2354 | && (ipa_secure | |
2355 | || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)))); | |
2356 | ||
8ae08860 RH |
2357 | return 0; |
2358 | } else { | |
2359 | /* | |
2360 | * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. | |
2361 | */ | |
2362 | mmu_idx = stage_1_mmu_idx(mmu_idx); | |
2363 | } | |
2364 | } | |
2365 | ||
2366 | /* | |
2367 | * The page table entries may downgrade secure to non-secure, but | |
2368 | * cannot upgrade an non-secure translation regime's attributes | |
2369 | * to secure. | |
2370 | */ | |
b29c85d5 | 2371 | result->attrs.secure = is_secure; |
de05a709 | 2372 | result->attrs.user = regime_is_user(env, mmu_idx); |
8ae08860 RH |
2373 | |
2374 | /* | |
2375 | * Fast Context Switch Extension. This doesn't exist at all in v8. | |
2376 | * In v7 and earlier it affects all stage 1 translations. | |
2377 | */ | |
2378 | if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 | |
2379 | && !arm_feature(env, ARM_FEATURE_V8)) { | |
2380 | if (regime_el(env, mmu_idx) == 3) { | |
2381 | address += env->cp15.fcseidr_s; | |
2382 | } else { | |
2383 | address += env->cp15.fcseidr_ns; | |
2384 | } | |
2385 | } | |
2386 | ||
2387 | if (arm_feature(env, ARM_FEATURE_PMSA)) { | |
2388 | bool ret; | |
de05a709 | 2389 | result->page_size = TARGET_PAGE_SIZE; |
8ae08860 RH |
2390 | |
2391 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
2392 | /* PMSAv8 */ | |
2393 | ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, | |
be0ca948 | 2394 | is_secure, result, fi); |
8ae08860 RH |
2395 | } else if (arm_feature(env, ARM_FEATURE_V7)) { |
2396 | /* PMSAv7 */ | |
2397 | ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, | |
957a0bb7 | 2398 | is_secure, result, fi); |
8ae08860 RH |
2399 | } else { |
2400 | /* Pre-v7 MPU */ | |
2401 | ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, | |
a5b5092f | 2402 | is_secure, result, fi); |
8ae08860 RH |
2403 | } |
2404 | qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 | |
2405 | " mmu_idx %u -> %s (prot %c%c%c)\n", | |
2406 | access_type == MMU_DATA_LOAD ? "reading" : | |
2407 | (access_type == MMU_DATA_STORE ? "writing" : "execute"), | |
2408 | (uint32_t)address, mmu_idx, | |
2409 | ret ? "Miss" : "Hit", | |
de05a709 RH |
2410 | result->prot & PAGE_READ ? 'r' : '-', |
2411 | result->prot & PAGE_WRITE ? 'w' : '-', | |
2412 | result->prot & PAGE_EXEC ? 'x' : '-'); | |
8ae08860 RH |
2413 | |
2414 | return ret; | |
2415 | } | |
2416 | ||
2417 | /* Definitely a real MMU, not an MPU */ | |
2418 | ||
7e80c0a4 | 2419 | if (regime_translation_disabled(env, mmu_idx, is_secure)) { |
8ae08860 RH |
2420 | uint64_t hcr; |
2421 | uint8_t memattr; | |
2422 | ||
2423 | /* | |
2424 | * MMU disabled. S1 addresses within aa64 translation regimes are | |
2425 | * still checked for bounds -- see AArch64.TranslateAddressS1Off. | |
2426 | */ | |
2427 | if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { | |
2428 | int r_el = regime_el(env, mmu_idx); | |
2429 | if (arm_el_is_aa64(env, r_el)) { | |
2430 | int pamax = arm_pamax(env_archcpu(env)); | |
cb4a0a34 | 2431 | uint64_t tcr = env->cp15.tcr_el[r_el]; |
8ae08860 RH |
2432 | int addrtop, tbi; |
2433 | ||
2434 | tbi = aa64_va_parameter_tbi(tcr, mmu_idx); | |
2435 | if (access_type == MMU_INST_FETCH) { | |
2436 | tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); | |
2437 | } | |
2438 | tbi = (tbi >> extract64(address, 55, 1)) & 1; | |
2439 | addrtop = (tbi ? 55 : 63); | |
2440 | ||
2441 | if (extract64(address, pamax, addrtop - pamax + 1) != 0) { | |
2442 | fi->type = ARMFault_AddressSize; | |
2443 | fi->level = 0; | |
2444 | fi->stage2 = false; | |
2445 | return 1; | |
2446 | } | |
2447 | ||
2448 | /* | |
2449 | * When TBI is disabled, we've just validated that all of the | |
2450 | * bits above PAMax are zero, so logically we only need to | |
2451 | * clear the top byte for TBI. But it's clearer to follow | |
2452 | * the pseudocode set of addrdesc.paddress. | |
2453 | */ | |
2454 | address = extract64(address, 0, 52); | |
2455 | } | |
2456 | } | |
de05a709 RH |
2457 | result->phys = address; |
2458 | result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
2459 | result->page_size = TARGET_PAGE_SIZE; | |
8ae08860 RH |
2460 | |
2461 | /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ | |
2462 | hcr = arm_hcr_el2_eff(env); | |
de05a709 RH |
2463 | result->cacheattrs.shareability = 0; |
2464 | result->cacheattrs.is_s2_format = false; | |
8ae08860 RH |
2465 | if (hcr & HCR_DC) { |
2466 | if (hcr & HCR_DCT) { | |
2467 | memattr = 0xf0; /* Tagged, Normal, WB, RWA */ | |
2468 | } else { | |
2469 | memattr = 0xff; /* Normal, WB, RWA */ | |
2470 | } | |
2471 | } else if (access_type == MMU_INST_FETCH) { | |
2472 | if (regime_sctlr(env, mmu_idx) & SCTLR_I) { | |
2473 | memattr = 0xee; /* Normal, WT, RA, NT */ | |
2474 | } else { | |
2475 | memattr = 0x44; /* Normal, NC, No */ | |
2476 | } | |
de05a709 | 2477 | result->cacheattrs.shareability = 2; /* outer sharable */ |
8ae08860 RH |
2478 | } else { |
2479 | memattr = 0x00; /* Device, nGnRnE */ | |
2480 | } | |
de05a709 | 2481 | result->cacheattrs.attrs = memattr; |
8ae08860 RH |
2482 | return 0; |
2483 | } | |
2484 | ||
2485 | if (regime_using_lpae_format(env, mmu_idx)) { | |
c23f08a5 RH |
2486 | return get_phys_addr_lpae(env, address, access_type, mmu_idx, |
2487 | is_secure, false, result, fi); | |
8ae08860 RH |
2488 | } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { |
2489 | return get_phys_addr_v6(env, address, access_type, mmu_idx, | |
71e73beb | 2490 | is_secure, result, fi); |
8ae08860 RH |
2491 | } else { |
2492 | return get_phys_addr_v5(env, address, access_type, mmu_idx, | |
b29c85d5 | 2493 | is_secure, result, fi); |
8ae08860 RH |
2494 | } |
2495 | } | |
23971205 | 2496 | |
def8aa5b RH |
2497 | bool get_phys_addr(CPUARMState *env, target_ulong address, |
2498 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
2499 | GetPhysAddrResult *result, ARMMMUFaultInfo *fi) | |
2500 | { | |
2501 | return get_phys_addr_with_secure(env, address, access_type, mmu_idx, | |
2502 | regime_is_secure(env, mmu_idx), | |
2503 | result, fi); | |
2504 | } | |
2505 | ||
23971205 RH |
2506 | hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, |
2507 | MemTxAttrs *attrs) | |
2508 | { | |
2509 | ARMCPU *cpu = ARM_CPU(cs); | |
2510 | CPUARMState *env = &cpu->env; | |
de05a709 | 2511 | GetPhysAddrResult res = {}; |
23971205 RH |
2512 | ARMMMUFaultInfo fi = {}; |
2513 | ARMMMUIdx mmu_idx = arm_mmu_idx(env); | |
de05a709 | 2514 | bool ret; |
23971205 | 2515 | |
de05a709 RH |
2516 | ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi); |
2517 | *attrs = res.attrs; | |
23971205 RH |
2518 | |
2519 | if (ret) { | |
2520 | return -1; | |
2521 | } | |
de05a709 | 2522 | return res.phys; |
23971205 | 2523 | } |