]>
Commit | Line | Data |
---|---|---|
7e5e5a63 MF |
1 | /* |
2 | * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * * Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * * Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * * Neither the name of the Open Source and Linux Lab nor the | |
13 | * names of its contributors may be used to endorse or promote products | |
14 | * derived from this software without specific prior written permission. | |
15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | |
27 | ||
28 | #include "qemu/osdep.h" | |
29 | #include "qemu/main-loop.h" | |
30 | #include "qemu/units.h" | |
31 | #include "cpu.h" | |
32 | #include "exec/helper-proto.h" | |
33 | #include "qemu/host-utils.h" | |
34 | #include "exec/exec-all.h" | |
35 | #include "exec/cpu_ldst.h" | |
36 | ||
37 | void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) | |
38 | { | |
39 | /* | |
40 | * Attempt the memory load; we don't care about the result but | |
41 | * only the side-effects (ie any MMU or other exception) | |
42 | */ | |
43 | cpu_ldub_code_ra(env, vaddr, GETPC()); | |
44 | } | |
45 | ||
46 | void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) | |
47 | { | |
48 | XtensaCPU *cpu = xtensa_env_get_cpu(env); | |
49 | ||
50 | v = (v & 0xffffff00) | 0x1; | |
51 | if (v != env->sregs[RASID]) { | |
52 | env->sregs[RASID] = v; | |
53 | tlb_flush(CPU(cpu)); | |
54 | } | |
55 | } | |
56 | ||
57 | static uint32_t get_page_size(const CPUXtensaState *env, | |
58 | bool dtlb, uint32_t way) | |
59 | { | |
60 | uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; | |
61 | ||
62 | switch (way) { | |
63 | case 4: | |
64 | return (tlbcfg >> 16) & 0x3; | |
65 | ||
66 | case 5: | |
67 | return (tlbcfg >> 20) & 0x1; | |
68 | ||
69 | case 6: | |
70 | return (tlbcfg >> 24) & 0x1; | |
71 | ||
72 | default: | |
73 | return 0; | |
74 | } | |
75 | } | |
76 | ||
77 | /*! | |
78 | * Get bit mask for the virtual address bits translated by the TLB way | |
79 | */ | |
80 | uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, | |
81 | bool dtlb, uint32_t way) | |
82 | { | |
83 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
84 | bool varway56 = dtlb ? | |
85 | env->config->dtlb.varway56 : | |
86 | env->config->itlb.varway56; | |
87 | ||
88 | switch (way) { | |
89 | case 4: | |
90 | return 0xfff00000 << get_page_size(env, dtlb, way) * 2; | |
91 | ||
92 | case 5: | |
93 | if (varway56) { | |
94 | return 0xf8000000 << get_page_size(env, dtlb, way); | |
95 | } else { | |
96 | return 0xf8000000; | |
97 | } | |
98 | ||
99 | case 6: | |
100 | if (varway56) { | |
101 | return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); | |
102 | } else { | |
103 | return 0xf0000000; | |
104 | } | |
105 | ||
106 | default: | |
107 | return 0xfffff000; | |
108 | } | |
109 | } else { | |
110 | return REGION_PAGE_MASK; | |
111 | } | |
112 | } | |
113 | ||
114 | /*! | |
115 | * Get bit mask for the 'VPN without index' field. | |
116 | * See ISA, 4.6.5.6, data format for RxTLB0 | |
117 | */ | |
118 | static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) | |
119 | { | |
120 | if (way < 4) { | |
121 | bool is32 = (dtlb ? | |
122 | env->config->dtlb.nrefillentries : | |
123 | env->config->itlb.nrefillentries) == 32; | |
124 | return is32 ? 0xffff8000 : 0xffffc000; | |
125 | } else if (way == 4) { | |
126 | return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; | |
127 | } else if (way <= 6) { | |
128 | uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); | |
129 | bool varway56 = dtlb ? | |
130 | env->config->dtlb.varway56 : | |
131 | env->config->itlb.varway56; | |
132 | ||
133 | if (varway56) { | |
134 | return mask << (way == 5 ? 2 : 3); | |
135 | } else { | |
136 | return mask << 1; | |
137 | } | |
138 | } else { | |
139 | return 0xfffff000; | |
140 | } | |
141 | } | |
142 | ||
143 | /*! | |
144 | * Split virtual address into VPN (with index) and entry index | |
145 | * for the given TLB way | |
146 | */ | |
147 | void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, | |
148 | uint32_t *vpn, uint32_t wi, uint32_t *ei) | |
149 | { | |
150 | bool varway56 = dtlb ? | |
151 | env->config->dtlb.varway56 : | |
152 | env->config->itlb.varway56; | |
153 | ||
154 | if (!dtlb) { | |
155 | wi &= 7; | |
156 | } | |
157 | ||
158 | if (wi < 4) { | |
159 | bool is32 = (dtlb ? | |
160 | env->config->dtlb.nrefillentries : | |
161 | env->config->itlb.nrefillentries) == 32; | |
162 | *ei = (v >> 12) & (is32 ? 0x7 : 0x3); | |
163 | } else { | |
164 | switch (wi) { | |
165 | case 4: | |
166 | { | |
167 | uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; | |
168 | *ei = (v >> eibase) & 0x3; | |
169 | } | |
170 | break; | |
171 | ||
172 | case 5: | |
173 | if (varway56) { | |
174 | uint32_t eibase = 27 + get_page_size(env, dtlb, wi); | |
175 | *ei = (v >> eibase) & 0x3; | |
176 | } else { | |
177 | *ei = (v >> 27) & 0x1; | |
178 | } | |
179 | break; | |
180 | ||
181 | case 6: | |
182 | if (varway56) { | |
183 | uint32_t eibase = 29 - get_page_size(env, dtlb, wi); | |
184 | *ei = (v >> eibase) & 0x7; | |
185 | } else { | |
186 | *ei = (v >> 28) & 0x1; | |
187 | } | |
188 | break; | |
189 | ||
190 | default: | |
191 | *ei = 0; | |
192 | break; | |
193 | } | |
194 | } | |
195 | *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
196 | } | |
197 | ||
198 | /*! | |
199 | * Split TLB address into TLB way, entry index and VPN (with index). | |
200 | * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format | |
201 | */ | |
202 | static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, | |
203 | uint32_t *vpn, uint32_t *wi, uint32_t *ei) | |
204 | { | |
205 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
206 | *wi = v & (dtlb ? 0xf : 0x7); | |
207 | split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); | |
208 | } else { | |
209 | *vpn = v & REGION_PAGE_MASK; | |
210 | *wi = 0; | |
211 | *ei = (v >> 29) & 0x7; | |
212 | } | |
213 | } | |
214 | ||
215 | static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, | |
216 | uint32_t v, bool dtlb, uint32_t *pwi) | |
217 | { | |
218 | uint32_t vpn; | |
219 | uint32_t wi; | |
220 | uint32_t ei; | |
221 | ||
222 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
223 | if (pwi) { | |
224 | *pwi = wi; | |
225 | } | |
226 | return xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
227 | } | |
228 | ||
229 | uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
230 | { | |
231 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
232 | uint32_t wi; | |
233 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
234 | return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; | |
235 | } else { | |
236 | return v & REGION_PAGE_MASK; | |
237 | } | |
238 | } | |
239 | ||
240 | uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
241 | { | |
242 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); | |
243 | return entry->paddr | entry->attr; | |
244 | } | |
245 | ||
246 | void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
247 | { | |
248 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
249 | uint32_t wi; | |
250 | xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); | |
251 | if (entry->variable && entry->asid) { | |
252 | tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr); | |
253 | entry->asid = 0; | |
254 | } | |
255 | } | |
256 | } | |
257 | ||
258 | uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) | |
259 | { | |
260 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
261 | uint32_t wi; | |
262 | uint32_t ei; | |
263 | uint8_t ring; | |
264 | int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); | |
265 | ||
266 | switch (res) { | |
267 | case 0: | |
268 | if (ring >= xtensa_get_ring(env)) { | |
269 | return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); | |
270 | } | |
271 | break; | |
272 | ||
273 | case INST_TLB_MULTI_HIT_CAUSE: | |
274 | case LOAD_STORE_TLB_MULTI_HIT_CAUSE: | |
275 | HELPER(exception_cause_vaddr)(env, env->pc, res, v); | |
276 | break; | |
277 | } | |
278 | return 0; | |
279 | } else { | |
280 | return (v & REGION_PAGE_MASK) | 0x1; | |
281 | } | |
282 | } | |
283 | ||
284 | void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, | |
285 | xtensa_tlb_entry *entry, bool dtlb, | |
286 | unsigned wi, unsigned ei, uint32_t vpn, | |
287 | uint32_t pte) | |
288 | { | |
289 | entry->vaddr = vpn; | |
290 | entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); | |
291 | entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; | |
292 | entry->attr = pte & 0xf; | |
293 | } | |
294 | ||
295 | void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, | |
296 | unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) | |
297 | { | |
298 | XtensaCPU *cpu = xtensa_env_get_cpu(env); | |
299 | CPUState *cs = CPU(cpu); | |
300 | xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
301 | ||
302 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
303 | if (entry->variable) { | |
304 | if (entry->asid) { | |
305 | tlb_flush_page(cs, entry->vaddr); | |
306 | } | |
307 | xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); | |
308 | tlb_flush_page(cs, entry->vaddr); | |
309 | } else { | |
310 | qemu_log_mask(LOG_GUEST_ERROR, | |
311 | "%s %d, %d, %d trying to set immutable entry\n", | |
312 | __func__, dtlb, wi, ei); | |
313 | } | |
314 | } else { | |
315 | tlb_flush_page(cs, entry->vaddr); | |
316 | if (xtensa_option_enabled(env->config, | |
317 | XTENSA_OPTION_REGION_TRANSLATION)) { | |
318 | entry->paddr = pte & REGION_PAGE_MASK; | |
319 | } | |
320 | entry->attr = pte & 0xf; | |
321 | } | |
322 | } | |
323 | ||
324 | void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) | |
325 | { | |
326 | uint32_t vpn; | |
327 | uint32_t wi; | |
328 | uint32_t ei; | |
329 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); | |
330 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); | |
331 | } | |
332 | ||
333 | hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
334 | { | |
335 | XtensaCPU *cpu = XTENSA_CPU(cs); | |
336 | uint32_t paddr; | |
337 | uint32_t page_size; | |
338 | unsigned access; | |
339 | ||
340 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, | |
341 | &paddr, &page_size, &access) == 0) { | |
342 | return paddr; | |
343 | } | |
344 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, | |
345 | &paddr, &page_size, &access) == 0) { | |
346 | return paddr; | |
347 | } | |
348 | return ~0; | |
349 | } | |
350 | ||
351 | static void reset_tlb_mmu_all_ways(CPUXtensaState *env, | |
352 | const xtensa_tlb *tlb, | |
353 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
354 | { | |
355 | unsigned wi, ei; | |
356 | ||
357 | for (wi = 0; wi < tlb->nways; ++wi) { | |
358 | for (ei = 0; ei < tlb->way_size[wi]; ++ei) { | |
359 | entry[wi][ei].asid = 0; | |
360 | entry[wi][ei].variable = true; | |
361 | } | |
362 | } | |
363 | } | |
364 | ||
365 | static void reset_tlb_mmu_ways56(CPUXtensaState *env, | |
366 | const xtensa_tlb *tlb, | |
367 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
368 | { | |
369 | if (!tlb->varway56) { | |
370 | static const xtensa_tlb_entry way5[] = { | |
371 | { | |
372 | .vaddr = 0xd0000000, | |
373 | .paddr = 0, | |
374 | .asid = 1, | |
375 | .attr = 7, | |
376 | .variable = false, | |
377 | }, { | |
378 | .vaddr = 0xd8000000, | |
379 | .paddr = 0, | |
380 | .asid = 1, | |
381 | .attr = 3, | |
382 | .variable = false, | |
383 | } | |
384 | }; | |
385 | static const xtensa_tlb_entry way6[] = { | |
386 | { | |
387 | .vaddr = 0xe0000000, | |
388 | .paddr = 0xf0000000, | |
389 | .asid = 1, | |
390 | .attr = 7, | |
391 | .variable = false, | |
392 | }, { | |
393 | .vaddr = 0xf0000000, | |
394 | .paddr = 0xf0000000, | |
395 | .asid = 1, | |
396 | .attr = 3, | |
397 | .variable = false, | |
398 | } | |
399 | }; | |
400 | memcpy(entry[5], way5, sizeof(way5)); | |
401 | memcpy(entry[6], way6, sizeof(way6)); | |
402 | } else { | |
403 | uint32_t ei; | |
404 | for (ei = 0; ei < 8; ++ei) { | |
405 | entry[6][ei].vaddr = ei << 29; | |
406 | entry[6][ei].paddr = ei << 29; | |
407 | entry[6][ei].asid = 1; | |
408 | entry[6][ei].attr = 3; | |
409 | } | |
410 | } | |
411 | } | |
412 | ||
413 | static void reset_tlb_region_way0(CPUXtensaState *env, | |
414 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) | |
415 | { | |
416 | unsigned ei; | |
417 | ||
418 | for (ei = 0; ei < 8; ++ei) { | |
419 | entry[0][ei].vaddr = ei << 29; | |
420 | entry[0][ei].paddr = ei << 29; | |
421 | entry[0][ei].asid = 1; | |
422 | entry[0][ei].attr = 2; | |
423 | entry[0][ei].variable = true; | |
424 | } | |
425 | } | |
426 | ||
427 | void reset_mmu(CPUXtensaState *env) | |
428 | { | |
429 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
430 | env->sregs[RASID] = 0x04030201; | |
431 | env->sregs[ITLBCFG] = 0; | |
432 | env->sregs[DTLBCFG] = 0; | |
433 | env->autorefill_idx = 0; | |
434 | reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); | |
435 | reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); | |
436 | reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); | |
437 | reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); | |
438 | } else { | |
439 | reset_tlb_region_way0(env, env->itlb); | |
440 | reset_tlb_region_way0(env, env->dtlb); | |
441 | } | |
442 | } | |
443 | ||
444 | static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) | |
445 | { | |
446 | unsigned i; | |
447 | for (i = 0; i < 4; ++i) { | |
448 | if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { | |
449 | return i; | |
450 | } | |
451 | } | |
452 | return 0xff; | |
453 | } | |
454 | ||
455 | /*! | |
456 | * Lookup xtensa TLB for the given virtual address. | |
457 | * See ISA, 4.6.2.2 | |
458 | * | |
459 | * \param pwi: [out] way index | |
460 | * \param pei: [out] entry index | |
461 | * \param pring: [out] access ring | |
462 | * \return 0 if ok, exception cause code otherwise | |
463 | */ | |
464 | int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, | |
465 | uint32_t *pwi, uint32_t *pei, uint8_t *pring) | |
466 | { | |
467 | const xtensa_tlb *tlb = dtlb ? | |
468 | &env->config->dtlb : &env->config->itlb; | |
469 | const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? | |
470 | env->dtlb : env->itlb; | |
471 | ||
472 | int nhits = 0; | |
473 | unsigned wi; | |
474 | ||
475 | for (wi = 0; wi < tlb->nways; ++wi) { | |
476 | uint32_t vpn; | |
477 | uint32_t ei; | |
478 | split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); | |
479 | if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { | |
480 | unsigned ring = get_ring(env, entry[wi][ei].asid); | |
481 | if (ring < 4) { | |
482 | if (++nhits > 1) { | |
483 | return dtlb ? | |
484 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : | |
485 | INST_TLB_MULTI_HIT_CAUSE; | |
486 | } | |
487 | *pwi = wi; | |
488 | *pei = ei; | |
489 | *pring = ring; | |
490 | } | |
491 | } | |
492 | } | |
493 | return nhits ? 0 : | |
494 | (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); | |
495 | } | |
496 | ||
497 | /*! | |
498 | * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
499 | * See ISA, 4.6.5.10 | |
500 | */ | |
501 | static unsigned mmu_attr_to_access(uint32_t attr) | |
502 | { | |
503 | unsigned access = 0; | |
504 | ||
505 | if (attr < 12) { | |
506 | access |= PAGE_READ; | |
507 | if (attr & 0x1) { | |
508 | access |= PAGE_EXEC; | |
509 | } | |
510 | if (attr & 0x2) { | |
511 | access |= PAGE_WRITE; | |
512 | } | |
513 | ||
514 | switch (attr & 0xc) { | |
515 | case 0: | |
516 | access |= PAGE_CACHE_BYPASS; | |
517 | break; | |
518 | ||
519 | case 4: | |
520 | access |= PAGE_CACHE_WB; | |
521 | break; | |
522 | ||
523 | case 8: | |
524 | access |= PAGE_CACHE_WT; | |
525 | break; | |
526 | } | |
527 | } else if (attr == 13) { | |
528 | access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; | |
529 | } | |
530 | return access; | |
531 | } | |
532 | ||
533 | /*! | |
534 | * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
535 | * See ISA, 4.6.3.3 | |
536 | */ | |
537 | static unsigned region_attr_to_access(uint32_t attr) | |
538 | { | |
539 | static const unsigned access[16] = { | |
540 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
541 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
542 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
543 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
544 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
545 | [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
546 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
547 | }; | |
548 | ||
549 | return access[attr & 0xf]; | |
550 | } | |
551 | ||
552 | /*! | |
553 | * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. | |
554 | * See ISA, A.2.14 The Cache Attribute Register | |
555 | */ | |
556 | static unsigned cacheattr_attr_to_access(uint32_t attr) | |
557 | { | |
558 | static const unsigned access[16] = { | |
559 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
560 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
561 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
562 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
563 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
564 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
565 | }; | |
566 | ||
567 | return access[attr & 0xf]; | |
568 | } | |
569 | ||
570 | static bool is_access_granted(unsigned access, int is_write) | |
571 | { | |
572 | switch (is_write) { | |
573 | case 0: | |
574 | return access & PAGE_READ; | |
575 | ||
576 | case 1: | |
577 | return access & PAGE_WRITE; | |
578 | ||
579 | case 2: | |
580 | return access & PAGE_EXEC; | |
581 | ||
582 | default: | |
583 | return 0; | |
584 | } | |
585 | } | |
586 | ||
587 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); | |
588 | ||
589 | static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, | |
590 | uint32_t vaddr, int is_write, int mmu_idx, | |
591 | uint32_t *paddr, uint32_t *page_size, | |
592 | unsigned *access, bool may_lookup_pt) | |
593 | { | |
594 | bool dtlb = is_write != 2; | |
595 | uint32_t wi; | |
596 | uint32_t ei; | |
597 | uint8_t ring; | |
598 | uint32_t vpn; | |
599 | uint32_t pte; | |
600 | const xtensa_tlb_entry *entry = NULL; | |
601 | xtensa_tlb_entry tmp_entry; | |
602 | int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); | |
603 | ||
604 | if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && | |
605 | may_lookup_pt && get_pte(env, vaddr, &pte)) { | |
606 | ring = (pte >> 4) & 0x3; | |
607 | wi = 0; | |
608 | split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); | |
609 | ||
610 | if (update_tlb) { | |
611 | wi = ++env->autorefill_idx & 0x3; | |
612 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); | |
613 | env->sregs[EXCVADDR] = vaddr; | |
614 | qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", | |
615 | __func__, vaddr, vpn, pte); | |
616 | } else { | |
617 | xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); | |
618 | entry = &tmp_entry; | |
619 | } | |
620 | ret = 0; | |
621 | } | |
622 | if (ret != 0) { | |
623 | return ret; | |
624 | } | |
625 | ||
626 | if (entry == NULL) { | |
627 | entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
628 | } | |
629 | ||
630 | if (ring < mmu_idx) { | |
631 | return dtlb ? | |
632 | LOAD_STORE_PRIVILEGE_CAUSE : | |
633 | INST_FETCH_PRIVILEGE_CAUSE; | |
634 | } | |
635 | ||
636 | *access = mmu_attr_to_access(entry->attr) & | |
637 | ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); | |
638 | if (!is_access_granted(*access, is_write)) { | |
639 | return dtlb ? | |
640 | (is_write ? | |
641 | STORE_PROHIBITED_CAUSE : | |
642 | LOAD_PROHIBITED_CAUSE) : | |
643 | INST_FETCH_PROHIBITED_CAUSE; | |
644 | } | |
645 | ||
646 | *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); | |
647 | *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) | |
653 | { | |
654 | CPUState *cs = CPU(xtensa_env_get_cpu(env)); | |
655 | uint32_t paddr; | |
656 | uint32_t page_size; | |
657 | unsigned access; | |
658 | uint32_t pt_vaddr = | |
659 | (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; | |
660 | int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, | |
661 | &paddr, &page_size, &access, false); | |
662 | ||
663 | if (ret == 0) { | |
664 | qemu_log_mask(CPU_LOG_MMU, | |
665 | "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", | |
666 | __func__, vaddr, pt_vaddr, paddr); | |
667 | } else { | |
668 | qemu_log_mask(CPU_LOG_MMU, | |
669 | "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", | |
670 | __func__, vaddr, pt_vaddr, ret); | |
671 | } | |
672 | ||
673 | if (ret == 0) { | |
674 | MemTxResult result; | |
675 | ||
676 | *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, | |
677 | &result); | |
678 | if (result != MEMTX_OK) { | |
679 | qemu_log_mask(CPU_LOG_MMU, | |
680 | "%s: couldn't load PTE: transaction failed (%u)\n", | |
681 | __func__, (unsigned)result); | |
682 | ret = 1; | |
683 | } | |
684 | } | |
685 | return ret == 0; | |
686 | } | |
687 | ||
688 | static int get_physical_addr_region(CPUXtensaState *env, | |
689 | uint32_t vaddr, int is_write, int mmu_idx, | |
690 | uint32_t *paddr, uint32_t *page_size, | |
691 | unsigned *access) | |
692 | { | |
693 | bool dtlb = is_write != 2; | |
694 | uint32_t wi = 0; | |
695 | uint32_t ei = (vaddr >> 29) & 0x7; | |
696 | const xtensa_tlb_entry *entry = | |
697 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
698 | ||
699 | *access = region_attr_to_access(entry->attr); | |
700 | if (!is_access_granted(*access, is_write)) { | |
701 | return dtlb ? | |
702 | (is_write ? | |
703 | STORE_PROHIBITED_CAUSE : | |
704 | LOAD_PROHIBITED_CAUSE) : | |
705 | INST_FETCH_PROHIBITED_CAUSE; | |
706 | } | |
707 | ||
708 | *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); | |
709 | *page_size = ~REGION_PAGE_MASK + 1; | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
714 | /*! | |
715 | * Convert virtual address to physical addr. | |
716 | * MMU may issue pagewalk and change xtensa autorefill TLB way entry. | |
717 | * | |
718 | * \return 0 if ok, exception cause code otherwise | |
719 | */ | |
720 | int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, | |
721 | uint32_t vaddr, int is_write, int mmu_idx, | |
722 | uint32_t *paddr, uint32_t *page_size, | |
723 | unsigned *access) | |
724 | { | |
725 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
726 | return get_physical_addr_mmu(env, update_tlb, | |
727 | vaddr, is_write, mmu_idx, paddr, | |
728 | page_size, access, true); | |
729 | } else if (xtensa_option_bits_enabled(env->config, | |
730 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
731 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { | |
732 | return get_physical_addr_region(env, vaddr, is_write, mmu_idx, | |
733 | paddr, page_size, access); | |
734 | } else { | |
735 | *paddr = vaddr; | |
736 | *page_size = TARGET_PAGE_SIZE; | |
737 | *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> | |
738 | ((vaddr & 0xe0000000) >> 27)); | |
739 | return 0; | |
740 | } | |
741 | } | |
742 | ||
743 | static void dump_tlb(FILE *f, fprintf_function cpu_fprintf, | |
744 | CPUXtensaState *env, bool dtlb) | |
745 | { | |
746 | unsigned wi, ei; | |
747 | const xtensa_tlb *conf = | |
748 | dtlb ? &env->config->dtlb : &env->config->itlb; | |
749 | unsigned (*attr_to_access)(uint32_t) = | |
750 | xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? | |
751 | mmu_attr_to_access : region_attr_to_access; | |
752 | ||
753 | for (wi = 0; wi < conf->nways; ++wi) { | |
754 | uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
755 | const char *sz_text; | |
756 | bool print_header = true; | |
757 | ||
758 | if (sz >= 0x100000) { | |
759 | sz /= MiB; | |
760 | sz_text = "MB"; | |
761 | } else { | |
762 | sz /= KiB; | |
763 | sz_text = "KB"; | |
764 | } | |
765 | ||
766 | for (ei = 0; ei < conf->way_size[wi]; ++ei) { | |
767 | const xtensa_tlb_entry *entry = | |
768 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
769 | ||
770 | if (entry->asid) { | |
771 | static const char * const cache_text[8] = { | |
772 | [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", | |
773 | [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", | |
774 | [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", | |
775 | [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", | |
776 | }; | |
777 | unsigned access = attr_to_access(entry->attr); | |
778 | unsigned cache_idx = (access & PAGE_CACHE_MASK) >> | |
779 | PAGE_CACHE_SHIFT; | |
780 | ||
781 | if (print_header) { | |
782 | print_header = false; | |
783 | cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text); | |
784 | cpu_fprintf(f, | |
785 | "\tVaddr Paddr ASID Attr RWX Cache\n" | |
786 | "\t---------- ---------- ---- ---- --- -------\n"); | |
787 | } | |
788 | cpu_fprintf(f, | |
789 | "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", | |
790 | entry->vaddr, | |
791 | entry->paddr, | |
792 | entry->asid, | |
793 | entry->attr, | |
794 | (access & PAGE_READ) ? 'R' : '-', | |
795 | (access & PAGE_WRITE) ? 'W' : '-', | |
796 | (access & PAGE_EXEC) ? 'X' : '-', | |
797 | cache_text[cache_idx] ? | |
798 | cache_text[cache_idx] : "Invalid"); | |
799 | } | |
800 | } | |
801 | } | |
802 | } | |
803 | ||
804 | void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env) | |
805 | { | |
806 | if (xtensa_option_bits_enabled(env->config, | |
807 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
808 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | | |
809 | XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { | |
810 | ||
811 | cpu_fprintf(f, "ITLB:\n"); | |
812 | dump_tlb(f, cpu_fprintf, env, false); | |
813 | cpu_fprintf(f, "\nDTLB:\n"); | |
814 | dump_tlb(f, cpu_fprintf, env, true); | |
815 | } else { | |
816 | cpu_fprintf(f, "No TLB for this CPU core\n"); | |
817 | } | |
818 | } |