2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/qemu-print.h"
29 #define CONVERT_BIT(X, SRC, DST) \
30 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
32 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
34 return (uint64_t)env->fpcr << 32;
37 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
39 uint32_t fpcr = val >> 32;
42 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
43 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
44 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
45 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
46 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
49 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
51 switch (fpcr & FPCR_DYN_MASK) {
54 t = float_round_nearest_even;
56 case FPCR_DYN_CHOPPED:
57 t = float_round_to_zero;
66 env->fpcr_dyn_round = t;
68 env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
69 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
72 uint64_t helper_load_fpcr(CPUAlphaState *env)
74 return cpu_alpha_load_fpcr(env);
77 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
79 cpu_alpha_store_fpcr(env, val);
82 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
84 #ifndef CONFIG_USER_ONLY
85 if (env->flags & ENV_FLAG_PAL_MODE) {
86 if (reg >= 8 && reg <= 14) {
87 return &env->shadow[reg - 8];
88 } else if (reg == 25) {
89 return &env->shadow[7];
96 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
98 return *cpu_alpha_addr_gr(env, reg);
101 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
103 *cpu_alpha_addr_gr(env, reg) = val;
106 #if defined(CONFIG_USER_ONLY)
107 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
110 AlphaCPU *cpu = ALPHA_CPU(cs);
112 cs->exception_index = EXCP_MMFAULT;
113 cpu->env.trap_arg0 = address;
117 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
118 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
119 int prot_need, int mmu_idx,
120 target_ulong *pphys, int *pprot)
122 CPUState *cs = CPU(alpha_env_get_cpu(env));
123 target_long saddr = addr;
124 target_ulong phys = 0;
125 target_ulong L1pte, L2pte, L3pte;
126 target_ulong pt, index;
130 /* Handle physical accesses. */
131 if (mmu_idx == MMU_PHYS_IDX) {
133 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
138 /* Ensure that the virtual address is properly sign-extended from
139 the last implemented virtual address bit. */
140 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
144 /* Translate the superpage. */
145 /* ??? When we do more than emulate Unix PALcode, we'll need to
146 determine which KSEG is actually active. */
147 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
148 /* User-space cannot access KSEG addresses. */
149 if (mmu_idx != MMU_KERNEL_IDX) {
153 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
154 We would not do this if the 48-bit KSEG is enabled. */
155 phys = saddr & ((1ull << 40) - 1);
156 phys |= (saddr & (1ull << 40)) << 3;
158 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
163 /* Interpret the page table exactly like PALcode does. */
167 /* TODO: rather than using ldq_phys() to read the page table we should
168 * use address_space_ldq() so that we can handle the case when
169 * the page table read gives a bus fault, rather than ignoring it.
170 * For the existing code the zero data that ldq_phys will return for
171 * an access to invalid memory will result in our treating the page
172 * table as invalid, which may even be the right behaviour.
175 /* L1 page table read. */
176 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
177 L1pte = ldq_phys(cs->as, pt + index*8);
179 if (unlikely((L1pte & PTE_VALID) == 0)) {
183 if (unlikely((L1pte & PTE_KRE) == 0)) {
186 pt = L1pte >> 32 << TARGET_PAGE_BITS;
188 /* L2 page table read. */
189 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
190 L2pte = ldq_phys(cs->as, pt + index*8);
192 if (unlikely((L2pte & PTE_VALID) == 0)) {
196 if (unlikely((L2pte & PTE_KRE) == 0)) {
199 pt = L2pte >> 32 << TARGET_PAGE_BITS;
201 /* L3 page table read. */
202 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
203 L3pte = ldq_phys(cs->as, pt + index*8);
205 phys = L3pte >> 32 << TARGET_PAGE_BITS;
206 if (unlikely((L3pte & PTE_VALID) == 0)) {
211 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
212 # error page bits out of date
215 /* Check access violations. */
216 if (L3pte & (PTE_KRE << mmu_idx)) {
217 prot |= PAGE_READ | PAGE_EXEC;
219 if (L3pte & (PTE_KWE << mmu_idx)) {
222 if (unlikely((prot & prot_need) == 0 && prot_need)) {
226 /* Check fault-on-operation violations. */
227 prot &= ~(L3pte >> 1);
229 if (unlikely((prot & prot_need) == 0)) {
230 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
231 prot_need & PAGE_WRITE ? MM_K_FOW :
232 prot_need & PAGE_READ ? MM_K_FOR : -1);
241 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
243 AlphaCPU *cpu = ALPHA_CPU(cs);
247 fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
248 return (fail >= 0 ? -1 : phys);
251 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size, int rw,
254 AlphaCPU *cpu = ALPHA_CPU(cs);
255 CPUAlphaState *env = &cpu->env;
259 fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
260 if (unlikely(fail >= 0)) {
261 cs->exception_index = EXCP_MMFAULT;
262 env->trap_arg0 = addr;
263 env->trap_arg1 = fail;
264 env->trap_arg2 = (rw == 2 ? -1 : rw);
268 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
269 prot, mmu_idx, TARGET_PAGE_SIZE);
272 #endif /* USER_ONLY */
274 void alpha_cpu_do_interrupt(CPUState *cs)
276 AlphaCPU *cpu = ALPHA_CPU(cs);
277 CPUAlphaState *env = &cpu->env;
278 int i = cs->exception_index;
280 if (qemu_loglevel_mask(CPU_LOG_INT)) {
282 const char *name = "<unknown>";
291 case EXCP_SMP_INTERRUPT:
292 name = "smp_interrupt";
294 case EXCP_CLK_INTERRUPT:
295 name = "clk_interrupt";
297 case EXCP_DEV_INTERRUPT:
298 name = "dev_interrupt";
319 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
320 PRIx64 " sp=%016" PRIx64 "\n",
321 ++count, name, env->error_code, cs->cpu_index,
322 env->pc, env->ir[IR_SP]);
325 cs->exception_index = -1;
327 #if !defined(CONFIG_USER_ONLY)
335 case EXCP_SMP_INTERRUPT:
338 case EXCP_CLK_INTERRUPT:
341 case EXCP_DEV_INTERRUPT:
361 /* There are 64 entry points for both privileged and unprivileged,
362 with bit 0x80 indicating unprivileged. Each entry point gets
363 64 bytes to do its job. */
365 i = 0x2000 + (i - 0x80) * 64;
371 cpu_abort(cs, "Unhandled CPU exception");
374 /* Remember where the exception happened. Emulate real hardware in
375 that the low bit of the PC indicates PALmode. */
376 env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
378 /* Continue execution at the PALcode entry point. */
379 env->pc = env->palbr + i;
381 /* Switch to PALmode. */
382 env->flags |= ENV_FLAG_PAL_MODE;
383 #endif /* !USER_ONLY */
386 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
388 AlphaCPU *cpu = ALPHA_CPU(cs);
389 CPUAlphaState *env = &cpu->env;
392 /* We never take interrupts while in PALmode. */
393 if (env->flags & ENV_FLAG_PAL_MODE) {
397 /* Fall through the switch, collecting the highest priority
398 interrupt that isn't masked by the processor status IPL. */
399 /* ??? This hard-codes the OSF/1 interrupt levels. */
400 switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
402 if (interrupt_request & CPU_INTERRUPT_HARD) {
403 idx = EXCP_DEV_INTERRUPT;
407 if (interrupt_request & CPU_INTERRUPT_TIMER) {
408 idx = EXCP_CLK_INTERRUPT;
412 if (interrupt_request & CPU_INTERRUPT_SMP) {
413 idx = EXCP_SMP_INTERRUPT;
417 if (interrupt_request & CPU_INTERRUPT_MCHK) {
422 cs->exception_index = idx;
424 alpha_cpu_do_interrupt(cs);
430 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
432 static const char *linux_reg_names[] = {
433 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
434 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
435 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
436 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
438 AlphaCPU *cpu = ALPHA_CPU(cs);
439 CPUAlphaState *env = &cpu->env;
442 qemu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
443 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
444 for (i = 0; i < 31; i++) {
445 qemu_fprintf(f, "IR%02d %s " TARGET_FMT_lx "%c", i,
446 linux_reg_names[i], cpu_alpha_load_gr(env, i),
447 (i % 3) == 2 ? '\n' : ' ');
450 qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
451 env->lock_addr, env->lock_value);
453 if (flags & CPU_DUMP_FPU) {
454 for (i = 0; i < 31; i++) {
455 qemu_fprintf(f, "FIR%02d %016" PRIx64 "%c", i, env->fir[i],
456 (i % 3) == 2 ? '\n' : ' ');
459 qemu_fprintf(f, "\n");
462 /* This should only be called from translate, via gen_excp.
463 We expect that ENV->PC has already been updated. */
464 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
466 AlphaCPU *cpu = alpha_env_get_cpu(env);
467 CPUState *cs = CPU(cpu);
469 cs->exception_index = excp;
470 env->error_code = error;
474 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
475 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
478 AlphaCPU *cpu = alpha_env_get_cpu(env);
479 CPUState *cs = CPU(cpu);
481 cs->exception_index = excp;
482 env->error_code = error;
484 cpu_restore_state(cs, retaddr, true);
485 /* Floating-point exceptions (our only users) point to the next PC. */
491 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
492 int exc, uint64_t mask)
494 env->trap_arg0 = exc;
495 env->trap_arg1 = mask;
496 dynamic_excp(env, retaddr, EXCP_ARITH, 0);