2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "exec/exec-all.h"
30 #include "exec/gdbstub.h"
31 #include "qemu/host-utils.h"
32 #if !defined(CONFIG_USER_ONLY)
33 #include "hw/loader.h"
36 static struct XtensaConfigList *xtensa_cores;
38 void xtensa_register_core(XtensaConfigList *node)
40 node->next = xtensa_cores;
44 static uint32_t check_hw_breakpoints(CPUXtensaState *env)
48 for (i = 0; i < env->config->ndbreak; ++i) {
49 if (env->cpu_watchpoint[i] &&
50 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
51 return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
57 void xtensa_breakpoint_handler(CPUXtensaState *env)
59 if (env->watchpoint_hit) {
60 if (env->watchpoint_hit->flags & BP_CPU) {
63 env->watchpoint_hit = NULL;
64 cause = check_hw_breakpoints(env);
66 debug_exception_env(env, cause);
68 cpu_resume_from_signal(env, NULL);
73 XtensaCPU *cpu_xtensa_init(const char *cpu_model)
77 const XtensaConfig *config = NULL;
78 XtensaConfigList *core = xtensa_cores;
80 for (; core; core = core->next)
81 if (strcmp(core->config->name, cpu_model) == 0) {
82 config = core->config;
90 cpu = XTENSA_CPU(object_new(TYPE_XTENSA_CPU));
96 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
102 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
104 XtensaConfigList *core = xtensa_cores;
105 cpu_fprintf(f, "Available CPUs:\n");
106 for (; core; core = core->next) {
107 cpu_fprintf(f, " %s\n", core->config->name);
111 hwaddr cpu_get_phys_page_debug(CPUXtensaState *env, target_ulong addr)
117 if (xtensa_get_physical_addr(env, false, addr, 0, 0,
118 &paddr, &page_size, &access) == 0) {
121 if (xtensa_get_physical_addr(env, false, addr, 2, 0,
122 &paddr, &page_size, &access) == 0) {
128 static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
130 if (xtensa_option_enabled(env->config,
131 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
132 return vector - env->config->vecbase + env->sregs[VECBASE];
139 * Handle penging IRQ.
140 * For the high priority interrupt jump to the corresponding interrupt vector.
141 * For the level-1 interrupt convert it to either user, kernel or double
142 * exception with the 'level-1 interrupt' exception cause.
144 static void handle_interrupt(CPUXtensaState *env)
146 int level = env->pending_irq_level;
148 if (level > xtensa_get_cintlevel(env) &&
149 level <= env->config->nlevel &&
150 (env->config->level_mask[level] &
152 env->sregs[INTENABLE])) {
154 env->sregs[EPC1 + level - 1] = env->pc;
155 env->sregs[EPS2 + level - 2] = env->sregs[PS];
157 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
158 env->pc = relocated_vector(env,
159 env->config->interrupt_vector[level]);
161 env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
163 if (env->sregs[PS] & PS_EXCM) {
164 if (env->config->ndepc) {
165 env->sregs[DEPC] = env->pc;
167 env->sregs[EPC1] = env->pc;
169 env->exception_index = EXC_DOUBLE;
171 env->sregs[EPC1] = env->pc;
172 env->exception_index =
173 (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
175 env->sregs[PS] |= PS_EXCM;
177 env->exception_taken = 1;
181 void xtensa_cpu_do_interrupt(CPUState *cs)
183 XtensaCPU *cpu = XTENSA_CPU(cs);
184 CPUXtensaState *env = &cpu->env;
186 if (env->exception_index == EXC_IRQ) {
187 qemu_log_mask(CPU_LOG_INT,
188 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
189 "pc = %08x, a0 = %08x, ps = %08x, "
190 "intset = %08x, intenable = %08x, "
192 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
193 env->pc, env->regs[0], env->sregs[PS],
194 env->sregs[INTSET], env->sregs[INTENABLE],
196 handle_interrupt(env);
199 switch (env->exception_index) {
200 case EXC_WINDOW_OVERFLOW4:
201 case EXC_WINDOW_UNDERFLOW4:
202 case EXC_WINDOW_OVERFLOW8:
203 case EXC_WINDOW_UNDERFLOW8:
204 case EXC_WINDOW_OVERFLOW12:
205 case EXC_WINDOW_UNDERFLOW12:
210 qemu_log_mask(CPU_LOG_INT, "%s(%d) "
211 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
212 __func__, env->exception_index,
213 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
214 if (env->config->exception_vector[env->exception_index]) {
215 env->pc = relocated_vector(env,
216 env->config->exception_vector[env->exception_index]);
217 env->exception_taken = 1;
219 qemu_log("%s(pc = %08x) bad exception_index: %d\n",
220 __func__, env->pc, env->exception_index);
228 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
229 __func__, env->pc, env->exception_index);
232 check_interrupts(env);
235 static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
236 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
240 for (wi = 0; wi < tlb->nways; ++wi) {
241 for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
242 entry[wi][ei].asid = 0;
243 entry[wi][ei].variable = true;
248 static void reset_tlb_mmu_ways56(CPUXtensaState *env,
249 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
251 if (!tlb->varway56) {
252 static const xtensa_tlb_entry way5[] = {
267 static const xtensa_tlb_entry way6[] = {
282 memcpy(entry[5], way5, sizeof(way5));
283 memcpy(entry[6], way6, sizeof(way6));
286 for (ei = 0; ei < 8; ++ei) {
287 entry[6][ei].vaddr = ei << 29;
288 entry[6][ei].paddr = ei << 29;
289 entry[6][ei].asid = 1;
290 entry[6][ei].attr = 3;
295 static void reset_tlb_region_way0(CPUXtensaState *env,
296 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
300 for (ei = 0; ei < 8; ++ei) {
301 entry[0][ei].vaddr = ei << 29;
302 entry[0][ei].paddr = ei << 29;
303 entry[0][ei].asid = 1;
304 entry[0][ei].attr = 2;
305 entry[0][ei].variable = true;
309 void reset_mmu(CPUXtensaState *env)
311 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
312 env->sregs[RASID] = 0x04030201;
313 env->sregs[ITLBCFG] = 0;
314 env->sregs[DTLBCFG] = 0;
315 env->autorefill_idx = 0;
316 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
317 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
318 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
319 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
321 reset_tlb_region_way0(env, env->itlb);
322 reset_tlb_region_way0(env, env->dtlb);
326 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
329 for (i = 0; i < 4; ++i) {
330 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
338 * Lookup xtensa TLB for the given virtual address.
341 * \param pwi: [out] way index
342 * \param pei: [out] entry index
343 * \param pring: [out] access ring
344 * \return 0 if ok, exception cause code otherwise
346 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
347 uint32_t *pwi, uint32_t *pei, uint8_t *pring)
349 const xtensa_tlb *tlb = dtlb ?
350 &env->config->dtlb : &env->config->itlb;
351 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
352 env->dtlb : env->itlb;
357 for (wi = 0; wi < tlb->nways; ++wi) {
360 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
361 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
362 unsigned ring = get_ring(env, entry[wi][ei].asid);
366 LOAD_STORE_TLB_MULTI_HIT_CAUSE :
367 INST_TLB_MULTI_HIT_CAUSE;
376 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
380 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
383 static unsigned mmu_attr_to_access(uint32_t attr)
393 access |= PAGE_WRITE;
396 switch (attr & 0xc) {
398 access |= PAGE_CACHE_BYPASS;
402 access |= PAGE_CACHE_WB;
406 access |= PAGE_CACHE_WT;
409 } else if (attr == 13) {
410 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
416 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
419 static unsigned region_attr_to_access(uint32_t attr)
421 static const unsigned access[16] = {
422 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
423 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
424 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
425 [3] = PAGE_EXEC | PAGE_CACHE_WB,
426 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
427 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
428 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
431 return access[attr & 0xf];
435 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
436 * See ISA, A.2.14 The Cache Attribute Register
438 static unsigned cacheattr_attr_to_access(uint32_t attr)
440 static const unsigned access[16] = {
441 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
442 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
443 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
444 [3] = PAGE_EXEC | PAGE_CACHE_WB,
445 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
446 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
449 return access[attr & 0xf];
452 static bool is_access_granted(unsigned access, int is_write)
456 return access & PAGE_READ;
459 return access & PAGE_WRITE;
462 return access & PAGE_EXEC;
469 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
471 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
472 uint32_t vaddr, int is_write, int mmu_idx,
473 uint32_t *paddr, uint32_t *page_size, unsigned *access,
476 bool dtlb = is_write != 2;
482 const xtensa_tlb_entry *entry = NULL;
483 xtensa_tlb_entry tmp_entry;
484 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
486 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
487 may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
488 ring = (pte >> 4) & 0x3;
490 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
493 wi = ++env->autorefill_idx & 0x3;
494 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
495 env->sregs[EXCVADDR] = vaddr;
496 qemu_log("%s: autorefill(%08x): %08x -> %08x\n",
497 __func__, vaddr, vpn, pte);
499 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
509 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
512 if (ring < mmu_idx) {
514 LOAD_STORE_PRIVILEGE_CAUSE :
515 INST_FETCH_PRIVILEGE_CAUSE;
518 *access = mmu_attr_to_access(entry->attr) &
519 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
520 if (!is_access_granted(*access, is_write)) {
523 STORE_PROHIBITED_CAUSE :
524 LOAD_PROHIBITED_CAUSE) :
525 INST_FETCH_PROHIBITED_CAUSE;
528 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
529 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
534 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
540 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
541 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
542 &paddr, &page_size, &access, false);
544 qemu_log("%s: trying autorefill(%08x) -> %08x\n", __func__,
545 vaddr, ret ? ~0 : paddr);
548 *pte = ldl_phys(paddr);
553 static int get_physical_addr_region(CPUXtensaState *env,
554 uint32_t vaddr, int is_write, int mmu_idx,
555 uint32_t *paddr, uint32_t *page_size, unsigned *access)
557 bool dtlb = is_write != 2;
559 uint32_t ei = (vaddr >> 29) & 0x7;
560 const xtensa_tlb_entry *entry =
561 xtensa_tlb_get_entry(env, dtlb, wi, ei);
563 *access = region_attr_to_access(entry->attr);
564 if (!is_access_granted(*access, is_write)) {
567 STORE_PROHIBITED_CAUSE :
568 LOAD_PROHIBITED_CAUSE) :
569 INST_FETCH_PROHIBITED_CAUSE;
572 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
573 *page_size = ~REGION_PAGE_MASK + 1;
579 * Convert virtual address to physical addr.
580 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
582 * \return 0 if ok, exception cause code otherwise
584 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
585 uint32_t vaddr, int is_write, int mmu_idx,
586 uint32_t *paddr, uint32_t *page_size, unsigned *access)
588 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
589 return get_physical_addr_mmu(env, update_tlb,
590 vaddr, is_write, mmu_idx, paddr, page_size, access, true);
591 } else if (xtensa_option_bits_enabled(env->config,
592 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
593 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
594 return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
595 paddr, page_size, access);
598 *page_size = TARGET_PAGE_SIZE;
599 *access = cacheattr_attr_to_access(
600 env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
605 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
606 CPUXtensaState *env, bool dtlb)
609 const xtensa_tlb *conf =
610 dtlb ? &env->config->dtlb : &env->config->itlb;
611 unsigned (*attr_to_access)(uint32_t) =
612 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
613 mmu_attr_to_access : region_attr_to_access;
615 for (wi = 0; wi < conf->nways; ++wi) {
616 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
618 bool print_header = true;
620 if (sz >= 0x100000) {
628 for (ei = 0; ei < conf->way_size[wi]; ++ei) {
629 const xtensa_tlb_entry *entry =
630 xtensa_tlb_get_entry(env, dtlb, wi, ei);
633 static const char * const cache_text[8] = {
634 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
635 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
636 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
637 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
639 unsigned access = attr_to_access(entry->attr);
640 unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
644 print_header = false;
645 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
647 "\tVaddr Paddr ASID Attr RWX Cache\n"
648 "\t---------- ---------- ---- ---- --- -------\n");
651 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
656 (access & PAGE_READ) ? 'R' : '-',
657 (access & PAGE_WRITE) ? 'W' : '-',
658 (access & PAGE_EXEC) ? 'X' : '-',
659 cache_text[cache_idx] ? cache_text[cache_idx] :
666 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
668 if (xtensa_option_bits_enabled(env->config,
669 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
670 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
671 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
673 cpu_fprintf(f, "ITLB:\n");
674 dump_tlb(f, cpu_fprintf, env, false);
675 cpu_fprintf(f, "\nDTLB:\n");
676 dump_tlb(f, cpu_fprintf, env, true);
678 cpu_fprintf(f, "No TLB for this CPU core\n");