]>
Commit | Line | Data |
---|---|---|
2328826b MF |
1 | /* |
2 | * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. | |
3 | * All rights reserved. | |
4 | * | |
5 | * Redistribution and use in source and binary forms, with or without | |
6 | * modification, are permitted provided that the following conditions are met: | |
7 | * * Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * * Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * * Neither the name of the Open Source and Linux Lab nor the | |
13 | * names of its contributors may be used to endorse or promote products | |
14 | * derived from this software without specific prior written permission. | |
15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY | |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | |
27 | ||
09aae23d | 28 | #include "qemu/osdep.h" |
2328826b | 29 | #include "cpu.h" |
022c62cb PB |
30 | #include "exec/exec-all.h" |
31 | #include "exec/gdbstub.h" | |
1de7afc9 | 32 | #include "qemu/host-utils.h" |
2328826b MF |
33 | #if !defined(CONFIG_USER_ONLY) |
34 | #include "hw/loader.h" | |
35 | #endif | |
36 | ||
ac8b7db4 MF |
37 | static struct XtensaConfigList *xtensa_cores; |
38 | ||
67cce561 AF |
39 | static void xtensa_core_class_init(ObjectClass *oc, void *data) |
40 | { | |
a0e372f0 | 41 | CPUClass *cc = CPU_CLASS(oc); |
67cce561 AF |
42 | XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc); |
43 | const XtensaConfig *config = data; | |
44 | ||
45 | xcc->config = config; | |
a0e372f0 AF |
46 | |
47 | /* Use num_core_regs to see only non-privileged registers in an unmodified | |
48 | * gdb. Use num_regs to see all registers. gdb modification is required | |
49 | * for that: reset bit 0 in the 'flags' field of the registers definitions | |
50 | * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay. | |
51 | */ | |
52 | cc->gdb_num_core_regs = config->gdb_regmap.num_regs; | |
67cce561 AF |
53 | } |
54 | ||
1479073b MF |
55 | void xtensa_finalize_config(XtensaConfig *config) |
56 | { | |
57 | unsigned i, n = 0; | |
58 | ||
59 | if (config->gdb_regmap.num_regs) { | |
60 | return; | |
61 | } | |
62 | ||
63 | for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) { | |
64 | n += (config->gdb_regmap.reg[i].type != 6); | |
65 | } | |
66 | config->gdb_regmap.num_regs = n; | |
67 | } | |
68 | ||
ac8b7db4 MF |
69 | void xtensa_register_core(XtensaConfigList *node) |
70 | { | |
67cce561 AF |
71 | TypeInfo type = { |
72 | .parent = TYPE_XTENSA_CPU, | |
73 | .class_init = xtensa_core_class_init, | |
74 | .class_data = (void *)node->config, | |
75 | }; | |
76 | ||
ac8b7db4 MF |
77 | node->next = xtensa_cores; |
78 | xtensa_cores = node; | |
67cce561 AF |
79 | type.name = g_strdup_printf("%s-" TYPE_XTENSA_CPU, node->config->name); |
80 | type_register(&type); | |
81 | g_free((gpointer)type.name); | |
ac8b7db4 | 82 | } |
dedc5eae | 83 | |
97129ac8 | 84 | static uint32_t check_hw_breakpoints(CPUXtensaState *env) |
f14c4b5f MF |
85 | { |
86 | unsigned i; | |
87 | ||
88 | for (i = 0; i < env->config->ndbreak; ++i) { | |
89 | if (env->cpu_watchpoint[i] && | |
90 | env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) { | |
91 | return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT); | |
92 | } | |
93 | } | |
94 | return 0; | |
95 | } | |
96 | ||
86025ee4 | 97 | void xtensa_breakpoint_handler(CPUState *cs) |
f14c4b5f | 98 | { |
86025ee4 PM |
99 | XtensaCPU *cpu = XTENSA_CPU(cs); |
100 | CPUXtensaState *env = &cpu->env; | |
ff4700b0 AF |
101 | |
102 | if (cs->watchpoint_hit) { | |
103 | if (cs->watchpoint_hit->flags & BP_CPU) { | |
f14c4b5f MF |
104 | uint32_t cause; |
105 | ||
ff4700b0 | 106 | cs->watchpoint_hit = NULL; |
f14c4b5f MF |
107 | cause = check_hw_breakpoints(env); |
108 | if (cause) { | |
109 | debug_exception_env(env, cause); | |
110 | } | |
6886b980 | 111 | cpu_loop_exit_noexc(cs); |
f14c4b5f MF |
112 | } |
113 | } | |
f14c4b5f MF |
114 | } |
115 | ||
15be3171 | 116 | XtensaCPU *cpu_xtensa_init(const char *cpu_model) |
2328826b | 117 | { |
67cce561 | 118 | ObjectClass *oc; |
a4633e16 | 119 | XtensaCPU *cpu; |
2328826b | 120 | CPUXtensaState *env; |
dedc5eae | 121 | |
67cce561 AF |
122 | oc = cpu_class_by_name(TYPE_XTENSA_CPU, cpu_model); |
123 | if (oc == NULL) { | |
dedc5eae MF |
124 | return NULL; |
125 | } | |
2328826b | 126 | |
67cce561 | 127 | cpu = XTENSA_CPU(object_new(object_class_get_name(oc))); |
a4633e16 | 128 | env = &cpu->env; |
2328826b | 129 | |
b994e91b | 130 | xtensa_irq_init(env); |
5f6c9643 AF |
131 | |
132 | object_property_set_bool(OBJECT(cpu), true, "realized", NULL); | |
133 | ||
15be3171 | 134 | return cpu; |
2328826b MF |
135 | } |
136 | ||
137 | ||
138 | void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf) | |
139 | { | |
ac8b7db4 | 140 | XtensaConfigList *core = xtensa_cores; |
dedc5eae | 141 | cpu_fprintf(f, "Available CPUs:\n"); |
ac8b7db4 MF |
142 | for (; core; core = core->next) { |
143 | cpu_fprintf(f, " %s\n", core->config->name); | |
dedc5eae | 144 | } |
2328826b MF |
145 | } |
146 | ||
00b941e5 | 147 | hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
2328826b | 148 | { |
00b941e5 | 149 | XtensaCPU *cpu = XTENSA_CPU(cs); |
b67ea0cd MF |
150 | uint32_t paddr; |
151 | uint32_t page_size; | |
152 | unsigned access; | |
153 | ||
00b941e5 | 154 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, |
b67ea0cd MF |
155 | &paddr, &page_size, &access) == 0) { |
156 | return paddr; | |
157 | } | |
00b941e5 | 158 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, |
b67ea0cd MF |
159 | &paddr, &page_size, &access) == 0) { |
160 | return paddr; | |
161 | } | |
162 | return ~0; | |
2328826b MF |
163 | } |
164 | ||
97129ac8 | 165 | static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector) |
97836cee MF |
166 | { |
167 | if (xtensa_option_enabled(env->config, | |
168 | XTENSA_OPTION_RELOCATABLE_VECTOR)) { | |
169 | return vector - env->config->vecbase + env->sregs[VECBASE]; | |
170 | } else { | |
171 | return vector; | |
172 | } | |
173 | } | |
174 | ||
b994e91b MF |
175 | /*! |
176 | * Handle penging IRQ. | |
177 | * For the high priority interrupt jump to the corresponding interrupt vector. | |
178 | * For the level-1 interrupt convert it to either user, kernel or double | |
179 | * exception with the 'level-1 interrupt' exception cause. | |
180 | */ | |
97129ac8 | 181 | static void handle_interrupt(CPUXtensaState *env) |
b994e91b MF |
182 | { |
183 | int level = env->pending_irq_level; | |
184 | ||
185 | if (level > xtensa_get_cintlevel(env) && | |
186 | level <= env->config->nlevel && | |
187 | (env->config->level_mask[level] & | |
188 | env->sregs[INTSET] & | |
189 | env->sregs[INTENABLE])) { | |
27103424 AF |
190 | CPUState *cs = CPU(xtensa_env_get_cpu(env)); |
191 | ||
b994e91b MF |
192 | if (level > 1) { |
193 | env->sregs[EPC1 + level - 1] = env->pc; | |
194 | env->sregs[EPS2 + level - 2] = env->sregs[PS]; | |
195 | env->sregs[PS] = | |
196 | (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; | |
97836cee MF |
197 | env->pc = relocated_vector(env, |
198 | env->config->interrupt_vector[level]); | |
b994e91b MF |
199 | } else { |
200 | env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; | |
201 | ||
202 | if (env->sregs[PS] & PS_EXCM) { | |
203 | if (env->config->ndepc) { | |
204 | env->sregs[DEPC] = env->pc; | |
205 | } else { | |
206 | env->sregs[EPC1] = env->pc; | |
207 | } | |
27103424 | 208 | cs->exception_index = EXC_DOUBLE; |
b994e91b MF |
209 | } else { |
210 | env->sregs[EPC1] = env->pc; | |
27103424 | 211 | cs->exception_index = |
b994e91b MF |
212 | (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; |
213 | } | |
214 | env->sregs[PS] |= PS_EXCM; | |
215 | } | |
216 | env->exception_taken = 1; | |
217 | } | |
218 | } | |
219 | ||
47e20887 | 220 | /* Called from cpu_handle_interrupt with BQL held */ |
97a8ea5a | 221 | void xtensa_cpu_do_interrupt(CPUState *cs) |
2328826b | 222 | { |
97a8ea5a AF |
223 | XtensaCPU *cpu = XTENSA_CPU(cs); |
224 | CPUXtensaState *env = &cpu->env; | |
225 | ||
27103424 | 226 | if (cs->exception_index == EXC_IRQ) { |
b994e91b MF |
227 | qemu_log_mask(CPU_LOG_INT, |
228 | "%s(EXC_IRQ) level = %d, cintlevel = %d, " | |
229 | "pc = %08x, a0 = %08x, ps = %08x, " | |
230 | "intset = %08x, intenable = %08x, " | |
231 | "ccount = %08x\n", | |
232 | __func__, env->pending_irq_level, xtensa_get_cintlevel(env), | |
233 | env->pc, env->regs[0], env->sregs[PS], | |
234 | env->sregs[INTSET], env->sregs[INTENABLE], | |
235 | env->sregs[CCOUNT]); | |
236 | handle_interrupt(env); | |
237 | } | |
238 | ||
27103424 | 239 | switch (cs->exception_index) { |
40643d7c MF |
240 | case EXC_WINDOW_OVERFLOW4: |
241 | case EXC_WINDOW_UNDERFLOW4: | |
242 | case EXC_WINDOW_OVERFLOW8: | |
243 | case EXC_WINDOW_UNDERFLOW8: | |
244 | case EXC_WINDOW_OVERFLOW12: | |
245 | case EXC_WINDOW_UNDERFLOW12: | |
246 | case EXC_KERNEL: | |
247 | case EXC_USER: | |
248 | case EXC_DOUBLE: | |
e61dc8f7 | 249 | case EXC_DEBUG: |
b994e91b MF |
250 | qemu_log_mask(CPU_LOG_INT, "%s(%d) " |
251 | "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n", | |
27103424 | 252 | __func__, cs->exception_index, |
b994e91b | 253 | env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]); |
27103424 | 254 | if (env->config->exception_vector[cs->exception_index]) { |
97836cee | 255 | env->pc = relocated_vector(env, |
27103424 | 256 | env->config->exception_vector[cs->exception_index]); |
40643d7c MF |
257 | env->exception_taken = 1; |
258 | } else { | |
c30f0d18 PB |
259 | qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n", |
260 | __func__, env->pc, cs->exception_index); | |
40643d7c MF |
261 | } |
262 | break; | |
263 | ||
b994e91b MF |
264 | case EXC_IRQ: |
265 | break; | |
266 | ||
267 | default: | |
268 | qemu_log("%s(pc = %08x) unknown exception_index: %d\n", | |
27103424 | 269 | __func__, env->pc, cs->exception_index); |
b994e91b | 270 | break; |
40643d7c | 271 | } |
b994e91b | 272 | check_interrupts(env); |
2328826b | 273 | } |
b67ea0cd | 274 | |
37f3616a RH |
275 | bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) |
276 | { | |
277 | if (interrupt_request & CPU_INTERRUPT_HARD) { | |
278 | cs->exception_index = EXC_IRQ; | |
279 | xtensa_cpu_do_interrupt(cs); | |
280 | return true; | |
281 | } | |
282 | return false; | |
283 | } | |
284 | ||
97129ac8 | 285 | static void reset_tlb_mmu_all_ways(CPUXtensaState *env, |
b67ea0cd MF |
286 | const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
287 | { | |
288 | unsigned wi, ei; | |
289 | ||
290 | for (wi = 0; wi < tlb->nways; ++wi) { | |
291 | for (ei = 0; ei < tlb->way_size[wi]; ++ei) { | |
292 | entry[wi][ei].asid = 0; | |
293 | entry[wi][ei].variable = true; | |
294 | } | |
295 | } | |
296 | } | |
297 | ||
97129ac8 | 298 | static void reset_tlb_mmu_ways56(CPUXtensaState *env, |
b67ea0cd MF |
299 | const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
300 | { | |
301 | if (!tlb->varway56) { | |
302 | static const xtensa_tlb_entry way5[] = { | |
303 | { | |
304 | .vaddr = 0xd0000000, | |
305 | .paddr = 0, | |
306 | .asid = 1, | |
307 | .attr = 7, | |
308 | .variable = false, | |
309 | }, { | |
310 | .vaddr = 0xd8000000, | |
311 | .paddr = 0, | |
312 | .asid = 1, | |
313 | .attr = 3, | |
314 | .variable = false, | |
315 | } | |
316 | }; | |
317 | static const xtensa_tlb_entry way6[] = { | |
318 | { | |
319 | .vaddr = 0xe0000000, | |
320 | .paddr = 0xf0000000, | |
321 | .asid = 1, | |
322 | .attr = 7, | |
323 | .variable = false, | |
324 | }, { | |
325 | .vaddr = 0xf0000000, | |
326 | .paddr = 0xf0000000, | |
327 | .asid = 1, | |
328 | .attr = 3, | |
329 | .variable = false, | |
330 | } | |
331 | }; | |
332 | memcpy(entry[5], way5, sizeof(way5)); | |
333 | memcpy(entry[6], way6, sizeof(way6)); | |
334 | } else { | |
335 | uint32_t ei; | |
336 | for (ei = 0; ei < 8; ++ei) { | |
337 | entry[6][ei].vaddr = ei << 29; | |
338 | entry[6][ei].paddr = ei << 29; | |
339 | entry[6][ei].asid = 1; | |
0fdd2e1d | 340 | entry[6][ei].attr = 3; |
b67ea0cd MF |
341 | } |
342 | } | |
343 | } | |
344 | ||
97129ac8 | 345 | static void reset_tlb_region_way0(CPUXtensaState *env, |
b67ea0cd MF |
346 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
347 | { | |
348 | unsigned ei; | |
349 | ||
350 | for (ei = 0; ei < 8; ++ei) { | |
351 | entry[0][ei].vaddr = ei << 29; | |
352 | entry[0][ei].paddr = ei << 29; | |
353 | entry[0][ei].asid = 1; | |
354 | entry[0][ei].attr = 2; | |
355 | entry[0][ei].variable = true; | |
356 | } | |
357 | } | |
358 | ||
5087a72c | 359 | void reset_mmu(CPUXtensaState *env) |
b67ea0cd MF |
360 | { |
361 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
362 | env->sregs[RASID] = 0x04030201; | |
363 | env->sregs[ITLBCFG] = 0; | |
364 | env->sregs[DTLBCFG] = 0; | |
365 | env->autorefill_idx = 0; | |
366 | reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); | |
367 | reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); | |
368 | reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); | |
369 | reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); | |
370 | } else { | |
371 | reset_tlb_region_way0(env, env->itlb); | |
372 | reset_tlb_region_way0(env, env->dtlb); | |
373 | } | |
374 | } | |
375 | ||
97129ac8 | 376 | static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) |
b67ea0cd MF |
377 | { |
378 | unsigned i; | |
379 | for (i = 0; i < 4; ++i) { | |
380 | if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { | |
381 | return i; | |
382 | } | |
383 | } | |
384 | return 0xff; | |
385 | } | |
386 | ||
387 | /*! | |
388 | * Lookup xtensa TLB for the given virtual address. | |
389 | * See ISA, 4.6.2.2 | |
390 | * | |
391 | * \param pwi: [out] way index | |
392 | * \param pei: [out] entry index | |
393 | * \param pring: [out] access ring | |
394 | * \return 0 if ok, exception cause code otherwise | |
395 | */ | |
97129ac8 | 396 | int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, |
b67ea0cd MF |
397 | uint32_t *pwi, uint32_t *pei, uint8_t *pring) |
398 | { | |
399 | const xtensa_tlb *tlb = dtlb ? | |
400 | &env->config->dtlb : &env->config->itlb; | |
401 | const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? | |
402 | env->dtlb : env->itlb; | |
403 | ||
404 | int nhits = 0; | |
405 | unsigned wi; | |
406 | ||
407 | for (wi = 0; wi < tlb->nways; ++wi) { | |
408 | uint32_t vpn; | |
409 | uint32_t ei; | |
410 | split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); | |
411 | if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { | |
412 | unsigned ring = get_ring(env, entry[wi][ei].asid); | |
413 | if (ring < 4) { | |
414 | if (++nhits > 1) { | |
415 | return dtlb ? | |
416 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : | |
417 | INST_TLB_MULTI_HIT_CAUSE; | |
418 | } | |
419 | *pwi = wi; | |
420 | *pei = ei; | |
421 | *pring = ring; | |
422 | } | |
423 | } | |
424 | } | |
425 | return nhits ? 0 : | |
426 | (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); | |
427 | } | |
428 | ||
429 | /*! | |
430 | * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
431 | * See ISA, 4.6.5.10 | |
432 | */ | |
433 | static unsigned mmu_attr_to_access(uint32_t attr) | |
434 | { | |
435 | unsigned access = 0; | |
fcc803d1 | 436 | |
b67ea0cd MF |
437 | if (attr < 12) { |
438 | access |= PAGE_READ; | |
439 | if (attr & 0x1) { | |
440 | access |= PAGE_EXEC; | |
441 | } | |
442 | if (attr & 0x2) { | |
443 | access |= PAGE_WRITE; | |
444 | } | |
fcc803d1 MF |
445 | |
446 | switch (attr & 0xc) { | |
447 | case 0: | |
448 | access |= PAGE_CACHE_BYPASS; | |
449 | break; | |
450 | ||
451 | case 4: | |
452 | access |= PAGE_CACHE_WB; | |
453 | break; | |
454 | ||
455 | case 8: | |
456 | access |= PAGE_CACHE_WT; | |
457 | break; | |
458 | } | |
b67ea0cd | 459 | } else if (attr == 13) { |
fcc803d1 | 460 | access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; |
b67ea0cd MF |
461 | } |
462 | return access; | |
463 | } | |
464 | ||
465 | /*! | |
466 | * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. | |
467 | * See ISA, 4.6.3.3 | |
468 | */ | |
469 | static unsigned region_attr_to_access(uint32_t attr) | |
470 | { | |
fcc803d1 MF |
471 | static const unsigned access[16] = { |
472 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
473 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
474 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
475 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
476 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
477 | [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
478 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
479 | }; | |
480 | ||
481 | return access[attr & 0xf]; | |
b67ea0cd MF |
482 | } |
483 | ||
4e41d2f5 MF |
484 | /*! |
485 | * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. | |
486 | * See ISA, A.2.14 The Cache Attribute Register | |
487 | */ | |
488 | static unsigned cacheattr_attr_to_access(uint32_t attr) | |
489 | { | |
490 | static const unsigned access[16] = { | |
491 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, | |
492 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, | |
493 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, | |
494 | [3] = PAGE_EXEC | PAGE_CACHE_WB, | |
495 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, | |
496 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, | |
497 | }; | |
498 | ||
499 | return access[attr & 0xf]; | |
500 | } | |
501 | ||
b67ea0cd MF |
502 | static bool is_access_granted(unsigned access, int is_write) |
503 | { | |
504 | switch (is_write) { | |
505 | case 0: | |
506 | return access & PAGE_READ; | |
507 | ||
508 | case 1: | |
509 | return access & PAGE_WRITE; | |
510 | ||
511 | case 2: | |
512 | return access & PAGE_EXEC; | |
513 | ||
514 | default: | |
515 | return 0; | |
516 | } | |
517 | } | |
518 | ||
ae4e7982 | 519 | static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); |
b67ea0cd | 520 | |
ae4e7982 | 521 | static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, |
b67ea0cd | 522 | uint32_t vaddr, int is_write, int mmu_idx, |
57705a67 MF |
523 | uint32_t *paddr, uint32_t *page_size, unsigned *access, |
524 | bool may_lookup_pt) | |
b67ea0cd MF |
525 | { |
526 | bool dtlb = is_write != 2; | |
527 | uint32_t wi; | |
528 | uint32_t ei; | |
529 | uint8_t ring; | |
ae4e7982 MF |
530 | uint32_t vpn; |
531 | uint32_t pte; | |
532 | const xtensa_tlb_entry *entry = NULL; | |
533 | xtensa_tlb_entry tmp_entry; | |
b67ea0cd MF |
534 | int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); |
535 | ||
536 | if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && | |
57705a67 | 537 | may_lookup_pt && get_pte(env, vaddr, &pte) == 0) { |
ae4e7982 MF |
538 | ring = (pte >> 4) & 0x3; |
539 | wi = 0; | |
540 | split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); | |
541 | ||
542 | if (update_tlb) { | |
543 | wi = ++env->autorefill_idx & 0x3; | |
544 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); | |
545 | env->sregs[EXCVADDR] = vaddr; | |
5577e57b MF |
546 | qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", |
547 | __func__, vaddr, vpn, pte); | |
ae4e7982 MF |
548 | } else { |
549 | xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); | |
550 | entry = &tmp_entry; | |
551 | } | |
b67ea0cd MF |
552 | ret = 0; |
553 | } | |
554 | if (ret != 0) { | |
555 | return ret; | |
556 | } | |
557 | ||
ae4e7982 MF |
558 | if (entry == NULL) { |
559 | entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
560 | } | |
b67ea0cd MF |
561 | |
562 | if (ring < mmu_idx) { | |
563 | return dtlb ? | |
564 | LOAD_STORE_PRIVILEGE_CAUSE : | |
565 | INST_FETCH_PRIVILEGE_CAUSE; | |
566 | } | |
567 | ||
659f807c MF |
568 | *access = mmu_attr_to_access(entry->attr) & |
569 | ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); | |
b67ea0cd MF |
570 | if (!is_access_granted(*access, is_write)) { |
571 | return dtlb ? | |
572 | (is_write ? | |
573 | STORE_PROHIBITED_CAUSE : | |
574 | LOAD_PROHIBITED_CAUSE) : | |
575 | INST_FETCH_PROHIBITED_CAUSE; | |
576 | } | |
577 | ||
578 | *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); | |
579 | *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
ae4e7982 | 584 | static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) |
b67ea0cd | 585 | { |
1cf5ccbc | 586 | CPUState *cs = CPU(xtensa_env_get_cpu(env)); |
b67ea0cd MF |
587 | uint32_t paddr; |
588 | uint32_t page_size; | |
589 | unsigned access; | |
590 | uint32_t pt_vaddr = | |
591 | (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; | |
ae4e7982 | 592 | int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, |
57705a67 | 593 | &paddr, &page_size, &access, false); |
b67ea0cd | 594 | |
5577e57b MF |
595 | qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n", |
596 | __func__, vaddr, ret ? ~0 : paddr); | |
b67ea0cd MF |
597 | |
598 | if (ret == 0) { | |
fdfba1a2 | 599 | *pte = ldl_phys(cs->as, paddr); |
b67ea0cd MF |
600 | } |
601 | return ret; | |
602 | } | |
603 | ||
97129ac8 | 604 | static int get_physical_addr_region(CPUXtensaState *env, |
b67ea0cd MF |
605 | uint32_t vaddr, int is_write, int mmu_idx, |
606 | uint32_t *paddr, uint32_t *page_size, unsigned *access) | |
607 | { | |
608 | bool dtlb = is_write != 2; | |
609 | uint32_t wi = 0; | |
610 | uint32_t ei = (vaddr >> 29) & 0x7; | |
611 | const xtensa_tlb_entry *entry = | |
612 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
613 | ||
614 | *access = region_attr_to_access(entry->attr); | |
615 | if (!is_access_granted(*access, is_write)) { | |
616 | return dtlb ? | |
617 | (is_write ? | |
618 | STORE_PROHIBITED_CAUSE : | |
619 | LOAD_PROHIBITED_CAUSE) : | |
620 | INST_FETCH_PROHIBITED_CAUSE; | |
621 | } | |
622 | ||
623 | *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); | |
624 | *page_size = ~REGION_PAGE_MASK + 1; | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | /*! | |
630 | * Convert virtual address to physical addr. | |
631 | * MMU may issue pagewalk and change xtensa autorefill TLB way entry. | |
632 | * | |
633 | * \return 0 if ok, exception cause code otherwise | |
634 | */ | |
ae4e7982 | 635 | int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, |
b67ea0cd MF |
636 | uint32_t vaddr, int is_write, int mmu_idx, |
637 | uint32_t *paddr, uint32_t *page_size, unsigned *access) | |
638 | { | |
639 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { | |
ae4e7982 | 640 | return get_physical_addr_mmu(env, update_tlb, |
57705a67 | 641 | vaddr, is_write, mmu_idx, paddr, page_size, access, true); |
b67ea0cd MF |
642 | } else if (xtensa_option_bits_enabled(env->config, |
643 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
644 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { | |
645 | return get_physical_addr_region(env, vaddr, is_write, mmu_idx, | |
646 | paddr, page_size, access); | |
647 | } else { | |
648 | *paddr = vaddr; | |
649 | *page_size = TARGET_PAGE_SIZE; | |
4e41d2f5 MF |
650 | *access = cacheattr_attr_to_access( |
651 | env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27)); | |
b67ea0cd MF |
652 | return 0; |
653 | } | |
654 | } | |
692f737c MF |
655 | |
656 | static void dump_tlb(FILE *f, fprintf_function cpu_fprintf, | |
97129ac8 | 657 | CPUXtensaState *env, bool dtlb) |
692f737c MF |
658 | { |
659 | unsigned wi, ei; | |
660 | const xtensa_tlb *conf = | |
661 | dtlb ? &env->config->dtlb : &env->config->itlb; | |
662 | unsigned (*attr_to_access)(uint32_t) = | |
663 | xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? | |
664 | mmu_attr_to_access : region_attr_to_access; | |
665 | ||
666 | for (wi = 0; wi < conf->nways; ++wi) { | |
667 | uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; | |
668 | const char *sz_text; | |
669 | bool print_header = true; | |
670 | ||
671 | if (sz >= 0x100000) { | |
672 | sz >>= 20; | |
673 | sz_text = "MB"; | |
674 | } else { | |
675 | sz >>= 10; | |
676 | sz_text = "KB"; | |
677 | } | |
678 | ||
679 | for (ei = 0; ei < conf->way_size[wi]; ++ei) { | |
680 | const xtensa_tlb_entry *entry = | |
681 | xtensa_tlb_get_entry(env, dtlb, wi, ei); | |
682 | ||
683 | if (entry->asid) { | |
fcc803d1 MF |
684 | static const char * const cache_text[8] = { |
685 | [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", | |
686 | [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", | |
687 | [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", | |
688 | [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", | |
689 | }; | |
692f737c | 690 | unsigned access = attr_to_access(entry->attr); |
fcc803d1 MF |
691 | unsigned cache_idx = (access & PAGE_CACHE_MASK) >> |
692 | PAGE_CACHE_SHIFT; | |
692f737c MF |
693 | |
694 | if (print_header) { | |
695 | print_header = false; | |
696 | cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text); | |
697 | cpu_fprintf(f, | |
fcc803d1 MF |
698 | "\tVaddr Paddr ASID Attr RWX Cache\n" |
699 | "\t---------- ---------- ---- ---- --- -------\n"); | |
692f737c MF |
700 | } |
701 | cpu_fprintf(f, | |
fcc803d1 | 702 | "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", |
692f737c MF |
703 | entry->vaddr, |
704 | entry->paddr, | |
705 | entry->asid, | |
706 | entry->attr, | |
707 | (access & PAGE_READ) ? 'R' : '-', | |
708 | (access & PAGE_WRITE) ? 'W' : '-', | |
fcc803d1 MF |
709 | (access & PAGE_EXEC) ? 'X' : '-', |
710 | cache_text[cache_idx] ? cache_text[cache_idx] : | |
711 | "Invalid"); | |
692f737c MF |
712 | } |
713 | } | |
714 | } | |
715 | } | |
716 | ||
97129ac8 | 717 | void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env) |
692f737c MF |
718 | { |
719 | if (xtensa_option_bits_enabled(env->config, | |
720 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | | |
721 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | | |
722 | XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { | |
723 | ||
724 | cpu_fprintf(f, "ITLB:\n"); | |
725 | dump_tlb(f, cpu_fprintf, env, false); | |
726 | cpu_fprintf(f, "\nDTLB:\n"); | |
727 | dump_tlb(f, cpu_fprintf, env, true); | |
728 | } else { | |
729 | cpu_fprintf(f, "No TLB for this CPU core\n"); | |
730 | } | |
731 | } | |
bd527a83 MF |
732 | |
733 | void xtensa_runstall(CPUXtensaState *env, bool runstall) | |
734 | { | |
735 | CPUState *cpu = CPU(xtensa_env_get_cpu(env)); | |
736 | ||
737 | env->runstall = runstall; | |
738 | cpu->halted = runstall; | |
739 | if (runstall) { | |
740 | cpu_interrupt(cpu, CPU_INTERRUPT_HALT); | |
741 | } else { | |
742 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT); | |
743 | } | |
744 | } |