]>
Commit | Line | Data |
---|---|---|
7e5e5a63 MF |
1 | /* |
2 | * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * * Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * * Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * * Neither the name of the Open Source and Linux Lab nor the | |
13 | * names of its contributors may be used to endorse or promote products | |
14 | * derived from this software without specific prior written permission. | |
15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | |
27 | ||
28 | #include "qemu/osdep.h" | |
29 | #include "qemu/main-loop.h" | |
fad866da | 30 | #include "qemu/qemu-print.h" |
7e5e5a63 MF |
31 | #include "qemu/units.h" |
32 | #include "cpu.h" | |
33 | #include "exec/helper-proto.h" | |
34 | #include "qemu/host-utils.h" | |
35 | #include "exec/exec-all.h" | |
36 | #include "exec/cpu_ldst.h" | |
37 | ||
38 | void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) | |
39 | { | |
40 | /* | |
41 | * Attempt the memory load; we don't care about the result but | |
42 | * only the side-effects (ie any MMU or other exception) | |
43 | */ | |
44 | cpu_ldub_code_ra(env, vaddr, GETPC()); | |
45 | } | |
46 | ||
47 | void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) | |
48 | { | |
49 | XtensaCPU *cpu = xtensa_env_get_cpu(env); | |
50 | ||
51 | v = (v & 0xffffff00) | 0x1; | |
52 | if (v != env->sregs[RASID]) { | |
53 | env->sregs[RASID] = v; | |
54 | tlb_flush(CPU(cpu)); | |
55 | } | |
56 | } | |
57 | ||
58 | static uint32_t get_page_size(const CPUXtensaState *env, | |
59 | bool dtlb, uint32_t way) | |
60 | { | |
61 | uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; | |
62 | ||
63 | switch (way) { | |
64 | case 4: | |
65 | return (tlbcfg >> 16) & 0x3; | |
66 | ||
67 | case 5: | |
68 | return (tlbcfg >> 20) & 0x1; | |
69 | ||
70 | case 6: | |
71 | return (tlbcfg >> 24) & 0x1; | |
72 | ||
73 | default: | |
74 | return 0; | |
75 | } | |
76 | } | |
77 | ||
78 | /*! | |
79 | * Get bit mask for the virtual address bits translated by the TLB way | |
80 | */ | |
81 | uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, | |
82 | bool dtlb, uint32_t way) | |
83 | { | |
84 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
85 | bool varway56 = dtlb ? | |
86 | env->config->dtlb.varway56 : | |
87 | env->config->itlb.varway56; | |
88 | ||
89 | switch (way) { | |
90 | case 4: | |
91 | return 0xfff00000 << get_page_size(env, dtlb, way) * 2; | |
92 | ||
93 | case 5: | |
94 | if (varway56) { | |
95 | return 0xf8000000 << get_page_size(env, dtlb, way); | |
96 | } else { | |
97 | return 0xf8000000; | |
98 | } | |
99 | ||
100 | case 6: | |
101 | if (varway56) { | |
102 | return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); | |
103 | } else { | |
104 | return 0xf0000000; | |
105 | } | |
106 | ||
107 | default: | |
108 | return 0xfffff000; | |
109 | } | |
110 | } else { | |
111 | return REGION_PAGE_MASK; | |
112 | } | |
113 | } | |
114 | ||
115 | /*! | |
116 | * Get bit mask for the 'VPN without index' field. | |
117 | * See ISA, 4.6.5.6, data format for RxTLB0 | |
118 | */ | |
119 | static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) | |
120 | { | |
121 | if (way < 4) { | |
122 | bool is32 = (dtlb ? | |
123 | env->config->dtlb.nrefillentries : | |
124 | env->config->itlb.nrefillentries) == 32; | |
125 | return is32 ? 0xffff8000 : 0xffffc000; | |
126 | } else if (way == 4) { | |
127 | return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; | |
128 | } else if (way <= 6) { | |
129 | uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); | |
130 | bool varway56 = dtlb ? | |
131 | env->config->dtlb.varway56 : | |
132 | env->config->itlb.varway56; | |
133 | ||
134 | if (varway56) { | |
135 | return mask << (way == 5 ? 2 : 3); | |
136 | } else { | |
137 | return mask << 1; | |
138 | } | |
139 | } else { | |
140 | return 0xfffff000; | |
141 | } | |
142 | } | |
143 | ||
144 | /*! | |
145 | * Split virtual address into VPN (with index) and entry index | |
146 | * for the given TLB way | |
147 | */ | |
148 | void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, | |
149 | uint32_t *vpn, uint32_t wi, uint32_t *ei) | |
150 | { | |
151 | bool varway56 = dtlb ? | |
152 | env->config->dtlb.varway56 : | |
153 | env->config->itlb.varway56; | |
154 | ||
155 | if (!dtlb) { | |
156 | wi &= 7; | |
157 | } | |
158 | ||
159 | if (wi < 4) { | |
160 | bool is32 = (dtlb ? | |
161 | env->config->dtlb.nrefillentries : | |
162 | env->config->itlb.nrefillentries) == 32; | |
163 | *ei = (v >> 12) & (is32 ? 0x7 : 0x3); | |
164 | } else { | |
165 | switch (wi) { | |
166 | case 4: | |
167 | { | |
168 | uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; | |
169 | *ei = (v >> eibase) & 0x3; | |
170 | } | |
171 | break; | |
172 | ||
173 | case 5: | |
174 | if (varway56) { | |
175 | uint32_t eibase = 27 + get_page_size(env, dtlb, wi); | |
176 | *ei = (v >> eibase) & 0x3; | |
177 | } else { | |
178 | *ei = (v >> 27) & 0x1; | |
179 | } | |
180 | break; | |
181 | ||
182 | case 6: | |
183 | if (varway56) { | |
184 | uint32_t eibase = 29 - get_page_size(env, dtlb, wi); | |
185 | *ei = (v >> eibase) & 0x7; | |
186 | } else { | |
187 | *ei = (v >> 28) & 0x1; | |
188 | } | |
189 | break; | |
190 | ||
191 | default: | |
192 | *ei = 0; | |
193 | break; | |
194 | } | |
195 | } | |
196 | *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
197 | } | |
198 | ||
199 | /*! | |
200 | * Split TLB address into TLB way, entry index and VPN (with index). | |
201 | * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format | |
202 | */ | |
203 | static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, | |
204 | uint32_t *vpn, uint32_t *wi, uint32_t *ei) | |
205 | { | |
206 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
207 | *wi = v & (dtlb ? 0xf : 0x7); | |
208 | split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); | |
209 | } else { | |
210 | *vpn = v & REGION_PAGE_MASK; | |
211 | *wi = 0; | |
212 | *ei = (v >> 29) & 0x7; | |
213 | } | |
214 | } | |
215 | ||
216 | static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, | |
217 | uint32_t v, bool dtlb, uint32_t *pwi) | |
218 | { | |
219 | uint32_t vpn; | |
220 | uint32_t wi; | |
221 | uint32_t ei; | |
222 | ||
223 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
224 | if (pwi) { | |
225 | *pwi = wi; | |
226 | } | |
227 | return xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
228 | } | |
229 | ||
230 | uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
231 | { | |
232 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
233 | uint32_t wi; | |
234 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
235 | return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; | |
236 | } else { | |
237 | return v & REGION_PAGE_MASK; | |
238 | } | |
239 | } | |
240 | ||
241 | uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
242 | { | |
243 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); | |
244 | return entry->paddr | entry->attr; | |
245 | } | |
246 | ||
247 | void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
248 | { | |
249 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
250 | uint32_t wi; | |
251 | xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
252 | if (entry->variable && entry->asid) { | |
253 | tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr); | |
254 | entry->asid = 0; | |
255 | } | |
256 | } | |
257 | } | |
258 | ||
259 | uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
260 | { | |
261 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
262 | uint32_t wi; | |
263 | uint32_t ei; | |
264 | uint8_t ring; | |
265 | int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); | |
266 | ||
267 | switch (res) { | |
268 | case 0: | |
269 | if (ring >= xtensa_get_ring(env)) { | |
270 | return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); | |
271 | } | |
272 | break; | |
273 | ||
274 | case INST_TLB_MULTI_HIT_CAUSE: | |
275 | case LOAD_STORE_TLB_MULTI_HIT_CAUSE: | |
276 | HELPER(exception_cause_vaddr)(env, env->pc, res, v); | |
277 | break; | |
278 | } | |
279 | return 0; | |
280 | } else { | |
281 | return (v & REGION_PAGE_MASK) | 0x1; | |
282 | } | |
283 | } | |
284 | ||
285 | void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, | |
286 | xtensa_tlb_entry *entry, bool dtlb, | |
287 | unsigned wi, unsigned ei, uint32_t vpn, | |
288 | uint32_t pte) | |
289 | { | |
290 | entry->vaddr = vpn; | |
291 | entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
292 | entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; | |
293 | entry->attr = pte & 0xf; | |
294 | } | |
295 | ||
296 | void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, | |
297 | unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) | |
298 | { | |
299 | XtensaCPU *cpu = xtensa_env_get_cpu(env); | |
300 | CPUState *cs = CPU(cpu); | |
301 | xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
302 | ||
303 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
304 | if (entry->variable) { | |
305 | if (entry->asid) { | |
306 | tlb_flush_page(cs, entry->vaddr); | |
307 | } | |
308 | xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); | |
309 | tlb_flush_page(cs, entry->vaddr); | |
310 | } else { | |
311 | qemu_log_mask(LOG_GUEST_ERROR, | |
312 | "%s %d, %d, %d trying to set immutable entry\n", | |
313 | __func__, dtlb, wi, ei); | |
314 | } | |
315 | } else { | |
316 | tlb_flush_page(cs, entry->vaddr); | |
317 | if (xtensa_option_enabled(env->config, | |
318 | XTENSA_OPTION_REGION_TRANSLATION)) { | |
319 | entry->paddr = pte & REGION_PAGE_MASK; | |
320 | } | |
321 | entry->attr = pte & 0xf; | |
322 | } | |
323 | } | |
324 | ||
325 | void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) | |
326 | { | |
327 | uint32_t vpn; | |
328 | uint32_t wi; | |
329 | uint32_t ei; | |
330 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
331 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); | |
332 | } | |
333 | ||
334 | hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
335 | { | |
336 | XtensaCPU *cpu = XTENSA_CPU(cs); | |
337 | uint32_t paddr; | |
338 | uint32_t page_size; | |
339 | unsigned access; | |
340 | ||
341 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, | |
342 | &paddr, &page_size, &access) == 0) { | |
343 | return paddr; | |
344 | } | |
345 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, | |
346 | &paddr, &page_size, &access) == 0) { | |
347 | return paddr; | |
348 | } | |
349 | return ~0; | |
350 | } | |
351 | ||
352 | static void reset_tlb_mmu_all_ways(CPUXtensaState *env, | |
353 | const xtensa_tlb *tlb, | |
354 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
355 | { | |
356 | unsigned wi, ei; | |
357 | ||
358 | for (wi = 0; wi < tlb->nways; ++wi) { | |
359 | for (ei = 0; ei < tlb->way_size[wi]; ++ei) { | |
360 | entry[wi][ei].asid = 0; | |
361 | entry[wi][ei].variable = true; | |
362 | } | |
363 | } | |
364 | } | |
365 | ||
366 | static void reset_tlb_mmu_ways56(CPUXtensaState *env, | |
367 | const xtensa_tlb *tlb, | |
368 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
369 | { | |
370 | if (!tlb->varway56) { | |
371 | static const xtensa_tlb_entry way5[] = { | |
372 | { | |
373 | .vaddr = 0xd0000000, | |
374 | .paddr = 0, | |
375 | .asid = 1, | |
376 | .attr = 7, | |
377 | .variable = false, | |
378 | }, { | |
379 | .vaddr = 0xd8000000, | |
380 | .paddr = 0, | |
381 | .asid = 1, | |
382 | .attr = 3, | |
383 | .variable = false, | |
384 | } | |
385 | }; | |
386 | static const xtensa_tlb_entry way6[] = { | |
387 | { | |
388 | .vaddr = 0xe0000000, | |
389 | .paddr = 0xf0000000, | |
390 | .asid = 1, | |
391 | .attr = 7, | |
392 | .variable = false, | |
393 | }, { | |
394 | .vaddr = 0xf0000000, | |
395 | .paddr = 0xf0000000, | |
396 | .asid = 1, | |
397 | .attr = 3, | |
398 | .variable = false, | |
399 | } | |
400 | }; | |
401 | memcpy(entry[5], way5, sizeof(way5)); | |
402 | memcpy(entry[6], way6, sizeof(way6)); | |
403 | } else { | |
404 | uint32_t ei; | |
405 | for (ei = 0; ei < 8; ++ei) { | |
406 | entry[6][ei].vaddr = ei << 29; | |
407 | entry[6][ei].paddr = ei << 29; | |
408 | entry[6][ei].asid = 1; | |
409 | entry[6][ei].attr = 3; | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
414 | static void reset_tlb_region_way0(CPUXtensaState *env, | |
415 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
416 | { | |
417 | unsigned ei; | |
418 | ||
419 | for (ei = 0; ei < 8; ++ei) { | |
420 | entry[0][ei].vaddr = ei << 29; | |
421 | entry[0][ei].paddr = ei << 29; | |
422 | entry[0][ei].asid = 1; | |
423 | entry[0][ei].attr = 2; | |
424 | entry[0][ei].variable = true; | |
425 | } | |
426 | } | |
427 | ||
428 | void reset_mmu(CPUXtensaState *env) | |
429 | { | |
430 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
431 | env->sregs[RASID] = 0x04030201; | |
432 | env->sregs[ITLBCFG] = 0; | |
433 | env->sregs[DTLBCFG] = 0; | |
434 | env->autorefill_idx = 0; | |
435 | reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); | |
436 | reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); | |
437 | reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); | |
438 | reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); | |
439 | } else { | |
440 | reset_tlb_region_way0(env, env->itlb); | |
441 | reset_tlb_region_way0(env, env->dtlb); | |
442 | } | |
443 | } | |
444 | ||
445 | static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) | |
446 | { | |
447 | unsigned i; | |
448 | for (i = 0; i < 4; ++i) { | |
449 | if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { | |
450 | return i; | |
451 | } | |
452 | } | |
453 | return 0xff; | |
454 | } | |
455 | ||
456 | /*! | |
457 | * Lookup xtensa TLB for the given virtual address. | |
458 | * See ISA, 4.6.2.2 | |
459 | * | |
460 | * \param pwi: [out] way index | |
461 | * \param pei: [out] entry index | |
462 | * \param pring: [out] access ring | |
463 | * \return 0 if ok, exception cause code otherwise | |
464 | */ | |
465 | int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, | |
466 | uint32_t *pwi, uint32_t *pei, uint8_t *pring) | |
467 | { | |
468 | const xtensa_tlb *tlb = dtlb ? | |
469 | &env->config->dtlb : &env->config->itlb; | |
470 | const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? | |
471 | env->dtlb : env->itlb; | |
472 | ||
473 | int nhits = 0; | |
474 | unsigned wi; | |
475 | ||
476 | for (wi = 0; wi < tlb->nways; ++wi) { | |
477 | uint32_t vpn; | |
478 | uint32_t ei; | |
479 | split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); | |
480 | if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { | |
481 | unsigned ring = get_ring(env, entry[wi][ei].asid); | |
482 | if (ring < 4) { | |
483 | if (++nhits > 1) { | |
484 | return dtlb ? | |
485 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : | |
486 | INST_TLB_MULTI_HIT_CAUSE; | |
487 | } | |
488 | *pwi = wi; | |
489 | *pei = ei; | |
490 | *pring = ring; | |
491 | } | |
492 | } | |
493 | } | |
494 | return nhits ? 0 : | |
495 | (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); | |
496 | } | |
497 | ||
498 | /*! | |
499 | * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
500 | * See ISA, 4.6.5.10 | |
501 | */ | |
502 | static unsigned mmu_attr_to_access(uint32_t attr) | |
503 | { | |
504 | unsigned access = 0; | |
505 | ||
506 | if (attr < 12) { | |
507 | access |= PAGE_READ; | |
508 | if (attr & 0x1) { | |
509 | access |= PAGE_EXEC; | |
510 | } | |
511 | if (attr & 0x2) { | |
512 | access |= PAGE_WRITE; | |
513 | } | |
514 | ||
515 | switch (attr & 0xc) { | |
516 | case 0: | |
517 | access |= PAGE_CACHE_BYPASS; | |
518 | break; | |
519 | ||
520 | case 4: | |
521 | access |= PAGE_CACHE_WB; | |
522 | break; | |
523 | ||
524 | case 8: | |
525 | access |= PAGE_CACHE_WT; | |
526 | break; | |
527 | } | |
528 | } else if (attr == 13) { | |
529 | access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; | |
530 | } | |
531 | return access; | |
532 | } | |
533 | ||
534 | /*! | |
535 | * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
536 | * See ISA, 4.6.3.3 | |
537 | */ | |
538 | static unsigned region_attr_to_access(uint32_t attr) | |
539 | { | |
540 | static const unsigned access[16] = { | |
541 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
542 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
543 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
544 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
545 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
546 | [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
547 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
548 | }; | |
549 | ||
550 | return access[attr & 0xf]; | |
551 | } | |
552 | ||
553 | /*! | |
554 | * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. | |
555 | * See ISA, A.2.14 The Cache Attribute Register | |
556 | */ | |
557 | static unsigned cacheattr_attr_to_access(uint32_t attr) | |
558 | { | |
559 | static const unsigned access[16] = { | |
560 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
561 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
562 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
563 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
564 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
565 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
566 | }; | |
567 | ||
568 | return access[attr & 0xf]; | |
569 | } | |
570 | ||
571 | static bool is_access_granted(unsigned access, int is_write) | |
572 | { | |
573 | switch (is_write) { | |
574 | case 0: | |
575 | return access & PAGE_READ; | |
576 | ||
577 | case 1: | |
578 | return access & PAGE_WRITE; | |
579 | ||
580 | case 2: | |
581 | return access & PAGE_EXEC; | |
582 | ||
583 | default: | |
584 | return 0; | |
585 | } | |
586 | } | |
587 | ||
588 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); | |
589 | ||
590 | static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, | |
591 | uint32_t vaddr, int is_write, int mmu_idx, | |
592 | uint32_t *paddr, uint32_t *page_size, | |
593 | unsigned *access, bool may_lookup_pt) | |
594 | { | |
595 | bool dtlb = is_write != 2; | |
596 | uint32_t wi; | |
597 | uint32_t ei; | |
598 | uint8_t ring; | |
599 | uint32_t vpn; | |
600 | uint32_t pte; | |
601 | const xtensa_tlb_entry *entry = NULL; | |
602 | xtensa_tlb_entry tmp_entry; | |
603 | int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); | |
604 | ||
605 | if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && | |
606 | may_lookup_pt && get_pte(env, vaddr, &pte)) { | |
607 | ring = (pte >> 4) & 0x3; | |
608 | wi = 0; | |
609 | split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); | |
610 | ||
611 | if (update_tlb) { | |
612 | wi = ++env->autorefill_idx & 0x3; | |
613 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); | |
614 | env->sregs[EXCVADDR] = vaddr; | |
615 | qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", | |
616 | __func__, vaddr, vpn, pte); | |
617 | } else { | |
618 | xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); | |
619 | entry = &tmp_entry; | |
620 | } | |
621 | ret = 0; | |
622 | } | |
623 | if (ret != 0) { | |
624 | return ret; | |
625 | } | |
626 | ||
627 | if (entry == NULL) { | |
628 | entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
629 | } | |
630 | ||
631 | if (ring < mmu_idx) { | |
632 | return dtlb ? | |
633 | LOAD_STORE_PRIVILEGE_CAUSE : | |
634 | INST_FETCH_PRIVILEGE_CAUSE; | |
635 | } | |
636 | ||
637 | *access = mmu_attr_to_access(entry->attr) & | |
638 | ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); | |
639 | if (!is_access_granted(*access, is_write)) { | |
640 | return dtlb ? | |
641 | (is_write ? | |
642 | STORE_PROHIBITED_CAUSE : | |
643 | LOAD_PROHIBITED_CAUSE) : | |
644 | INST_FETCH_PROHIBITED_CAUSE; | |
645 | } | |
646 | ||
647 | *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); | |
648 | *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
649 | ||
650 | return 0; | |
651 | } | |
652 | ||
653 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) | |
654 | { | |
655 | CPUState *cs = CPU(xtensa_env_get_cpu(env)); | |
656 | uint32_t paddr; | |
657 | uint32_t page_size; | |
658 | unsigned access; | |
659 | uint32_t pt_vaddr = | |
660 | (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; | |
661 | int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, | |
662 | &paddr, &page_size, &access, false); | |
663 | ||
664 | if (ret == 0) { | |
665 | qemu_log_mask(CPU_LOG_MMU, | |
666 | "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", | |
667 | __func__, vaddr, pt_vaddr, paddr); | |
668 | } else { | |
669 | qemu_log_mask(CPU_LOG_MMU, | |
670 | "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", | |
671 | __func__, vaddr, pt_vaddr, ret); | |
672 | } | |
673 | ||
674 | if (ret == 0) { | |
675 | MemTxResult result; | |
676 | ||
677 | *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, | |
678 | &result); | |
679 | if (result != MEMTX_OK) { | |
680 | qemu_log_mask(CPU_LOG_MMU, | |
681 | "%s: couldn't load PTE: transaction failed (%u)\n", | |
682 | __func__, (unsigned)result); | |
683 | ret = 1; | |
684 | } | |
685 | } | |
686 | return ret == 0; | |
687 | } | |
688 | ||
689 | static int get_physical_addr_region(CPUXtensaState *env, | |
690 | uint32_t vaddr, int is_write, int mmu_idx, | |
691 | uint32_t *paddr, uint32_t *page_size, | |
692 | unsigned *access) | |
693 | { | |
694 | bool dtlb = is_write != 2; | |
695 | uint32_t wi = 0; | |
696 | uint32_t ei = (vaddr >> 29) & 0x7; | |
697 | const xtensa_tlb_entry *entry = | |
698 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
699 | ||
700 | *access = region_attr_to_access(entry->attr); | |
701 | if (!is_access_granted(*access, is_write)) { | |
702 | return dtlb ? | |
703 | (is_write ? | |
704 | STORE_PROHIBITED_CAUSE : | |
705 | LOAD_PROHIBITED_CAUSE) : | |
706 | INST_FETCH_PROHIBITED_CAUSE; | |
707 | } | |
708 | ||
709 | *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); | |
710 | *page_size = ~REGION_PAGE_MASK + 1; | |
711 | ||
712 | return 0; | |
713 | } | |
714 | ||
715 | /*! | |
716 | * Convert virtual address to physical addr. | |
717 | * MMU may issue pagewalk and change xtensa autorefill TLB way entry. | |
718 | * | |
719 | * \return 0 if ok, exception cause code otherwise | |
720 | */ | |
721 | int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, | |
722 | uint32_t vaddr, int is_write, int mmu_idx, | |
723 | uint32_t *paddr, uint32_t *page_size, | |
724 | unsigned *access) | |
725 | { | |
726 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
727 | return get_physical_addr_mmu(env, update_tlb, | |
728 | vaddr, is_write, mmu_idx, paddr, | |
729 | page_size, access, true); | |
730 | } else if (xtensa_option_bits_enabled(env->config, | |
731 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
732 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { | |
733 | return get_physical_addr_region(env, vaddr, is_write, mmu_idx, | |
734 | paddr, page_size, access); | |
735 | } else { | |
736 | *paddr = vaddr; | |
737 | *page_size = TARGET_PAGE_SIZE; | |
738 | *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> | |
739 | ((vaddr & 0xe0000000) >> 27)); | |
740 | return 0; | |
741 | } | |
742 | } | |
743 | ||
fad866da | 744 | static void dump_tlb(CPUXtensaState *env, bool dtlb) |
7e5e5a63 MF |
745 | { |
746 | unsigned wi, ei; | |
747 | const xtensa_tlb *conf = | |
748 | dtlb ? &env->config->dtlb : &env->config->itlb; | |
749 | unsigned (*attr_to_access)(uint32_t) = | |
750 | xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? | |
751 | mmu_attr_to_access : region_attr_to_access; | |
752 | ||
753 | for (wi = 0; wi < conf->nways; ++wi) { | |
754 | uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
755 | const char *sz_text; | |
756 | bool print_header = true; | |
757 | ||
758 | if (sz >= 0x100000) { | |
759 | sz /= MiB; | |
760 | sz_text = "MB"; | |
761 | } else { | |
762 | sz /= KiB; | |
763 | sz_text = "KB"; | |
764 | } | |
765 | ||
766 | for (ei = 0; ei < conf->way_size[wi]; ++ei) { | |
767 | const xtensa_tlb_entry *entry = | |
768 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
769 | ||
770 | if (entry->asid) { | |
771 | static const char * const cache_text[8] = { | |
772 | [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", | |
773 | [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", | |
774 | [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", | |
775 | [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", | |
776 | }; | |
777 | unsigned access = attr_to_access(entry->attr); | |
778 | unsigned cache_idx = (access & PAGE_CACHE_MASK) >> | |
779 | PAGE_CACHE_SHIFT; | |
780 | ||
781 | if (print_header) { | |
782 | print_header = false; | |
fad866da MA |
783 | qemu_printf("Way %u (%d %s)\n", wi, sz, sz_text); |
784 | qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" | |
7e5e5a63 MF |
785 | "\t---------- ---------- ---- ---- --- -------\n"); |
786 | } | |
fad866da | 787 | qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", |
7e5e5a63 MF |
788 | entry->vaddr, |
789 | entry->paddr, | |
790 | entry->asid, | |
791 | entry->attr, | |
792 | (access & PAGE_READ) ? 'R' : '-', | |
793 | (access & PAGE_WRITE) ? 'W' : '-', | |
794 | (access & PAGE_EXEC) ? 'X' : '-', | |
795 | cache_text[cache_idx] ? | |
796 | cache_text[cache_idx] : "Invalid"); | |
797 | } | |
798 | } | |
799 | } | |
800 | } | |
801 | ||
fad866da | 802 | void dump_mmu(CPUXtensaState *env) |
7e5e5a63 MF |
803 | { |
804 | if (xtensa_option_bits_enabled(env->config, | |
805 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
806 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | | |
807 | XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { | |
808 | ||
fad866da MA |
809 | qemu_printf("ITLB:\n"); |
810 | dump_tlb(env, false); | |
811 | qemu_printf("\nDTLB:\n"); | |
812 | dump_tlb(env, true); | |
7e5e5a63 | 813 | } else { |
fad866da | 814 | qemu_printf("No TLB for this CPU core\n"); |
7e5e5a63 MF |
815 | } |
816 | } |