2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext;
45 struct TranslationBlock *tb;
47 #ifndef CONFIG_USER_ONLY
52 /* Current rounding mode for this TB. */
54 /* Current flush-to-zero setting for this TB. */
57 /* implver value for this CPU. */
60 /* The set of registers active in the current context. */
63 /* Temporaries for $31 and $f31 as source and destination. */
66 /* Temporary for immediate constants. */
69 bool singlestep_enabled;
72 /* Return values from translate_one, indicating the state of the TB.
73 Note that zero indicates that we are not exiting the TB. */
78 /* We have emitted one or more goto_tb. No fixup required. */
81 /* We are not using a goto_tb (for whatever reason), but have updated
82 the PC (for whatever reason), so there's no need to do it again on
86 /* We are exiting the TB, but have neither emitted a goto_tb, nor
87 updated the PC for the next instruction to be executed. */
90 /* We are ending the TB with a noreturn function call, e.g. longjmp.
91 No following code will be executed. */
95 /* global register indexes */
96 static TCGv_env cpu_env;
97 static TCGv cpu_std_ir[31];
98 static TCGv cpu_fir[31];
100 static TCGv cpu_lock_addr;
101 static TCGv cpu_lock_st_addr;
102 static TCGv cpu_lock_value;
104 #ifndef CONFIG_USER_ONLY
105 static TCGv cpu_pal_ir[31];
108 #include "exec/gen-icount.h"
110 void alpha_translate_init(void)
112 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
114 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
115 static const GlobalVar vars[] = {
118 DEF_VAR(lock_st_addr),
124 /* Use the symbolic register names that match the disassembler. */
125 static const char greg_names[31][4] = {
126 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
127 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
128 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
129 "t10", "t11", "ra", "t12", "at", "gp", "sp"
131 static const char freg_names[31][4] = {
132 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
133 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
134 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
135 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
137 #ifndef CONFIG_USER_ONLY
138 static const char shadow_names[8][8] = {
139 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
140 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
144 static bool done_init = 0;
152 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
154 for (i = 0; i < 31; i++) {
155 cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
156 offsetof(CPUAlphaState, ir[i]),
160 for (i = 0; i < 31; i++) {
161 cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
162 offsetof(CPUAlphaState, fir[i]),
166 #ifndef CONFIG_USER_ONLY
167 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
168 for (i = 0; i < 8; i++) {
169 int r = (i == 7 ? 25 : i + 8);
170 cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
171 offsetof(CPUAlphaState,
177 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
178 const GlobalVar *v = &vars[i];
179 *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
183 static TCGv load_zero(DisasContext *ctx)
185 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
186 ctx->zero = tcg_const_i64(0);
191 static TCGv dest_sink(DisasContext *ctx)
193 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
194 ctx->sink = tcg_temp_new();
199 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
201 if (likely(reg < 31)) {
204 return load_zero(ctx);
208 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
209 uint8_t lit, bool islit)
212 ctx->lit = tcg_const_i64(lit);
214 } else if (likely(reg < 31)) {
217 return load_zero(ctx);
221 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
223 if (likely(reg < 31)) {
226 return dest_sink(ctx);
230 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
232 if (likely(reg < 31)) {
235 return load_zero(ctx);
239 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
241 if (likely(reg < 31)) {
244 return dest_sink(ctx);
248 static void gen_excp_1(int exception, int error_code)
252 tmp1 = tcg_const_i32(exception);
253 tmp2 = tcg_const_i32(error_code);
254 gen_helper_excp(cpu_env, tmp1, tmp2);
255 tcg_temp_free_i32(tmp2);
256 tcg_temp_free_i32(tmp1);
259 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
261 tcg_gen_movi_i64(cpu_pc, ctx->pc);
262 gen_excp_1(exception, error_code);
263 return EXIT_NORETURN;
266 static inline ExitStatus gen_invalid(DisasContext *ctx)
268 return gen_excp(ctx, EXCP_OPCDEC, 0);
271 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
273 TCGv_i32 tmp32 = tcg_temp_new_i32();
274 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
275 gen_helper_memory_to_f(t0, tmp32);
276 tcg_temp_free_i32(tmp32);
279 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
281 TCGv tmp = tcg_temp_new();
282 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
283 gen_helper_memory_to_g(t0, tmp);
287 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
289 TCGv_i32 tmp32 = tcg_temp_new_i32();
290 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
291 gen_helper_memory_to_s(t0, tmp32);
292 tcg_temp_free_i32(tmp32);
295 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
297 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
298 tcg_gen_mov_i64(cpu_lock_addr, t1);
299 tcg_gen_mov_i64(cpu_lock_value, t0);
302 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
304 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
305 tcg_gen_mov_i64(cpu_lock_addr, t1);
306 tcg_gen_mov_i64(cpu_lock_value, t0);
309 static inline void gen_load_mem(DisasContext *ctx,
310 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
312 int ra, int rb, int32_t disp16, bool fp,
317 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
318 prefetches, which we can treat as nops. No worries about
319 missed exceptions here. */
320 if (unlikely(ra == 31)) {
324 tmp = tcg_temp_new();
325 addr = load_gpr(ctx, rb);
328 tcg_gen_addi_i64(tmp, addr, disp16);
332 tcg_gen_andi_i64(tmp, addr, ~0x7);
336 va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
337 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
342 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
344 TCGv_i32 tmp32 = tcg_temp_new_i32();
345 gen_helper_f_to_memory(tmp32, t0);
346 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
347 tcg_temp_free_i32(tmp32);
350 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
352 TCGv tmp = tcg_temp_new();
353 gen_helper_g_to_memory(tmp, t0);
354 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
358 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
360 TCGv_i32 tmp32 = tcg_temp_new_i32();
361 gen_helper_s_to_memory(tmp32, t0);
362 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
363 tcg_temp_free_i32(tmp32);
366 static inline void gen_store_mem(DisasContext *ctx,
367 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
369 int ra, int rb, int32_t disp16, bool fp,
374 tmp = tcg_temp_new();
375 addr = load_gpr(ctx, rb);
378 tcg_gen_addi_i64(tmp, addr, disp16);
382 tcg_gen_andi_i64(tmp, addr, ~0x7);
386 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
387 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
392 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
393 int32_t disp16, int quad)
398 /* ??? Don't bother storing anything. The user can't tell
399 the difference, since the zero register always reads zero. */
403 #if defined(CONFIG_USER_ONLY)
404 addr = cpu_lock_st_addr;
406 addr = tcg_temp_local_new();
409 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
411 #if defined(CONFIG_USER_ONLY)
412 /* ??? This is handled via a complicated version of compare-and-swap
413 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
414 in TCG so that this isn't necessary. */
415 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
417 /* ??? In system mode we are never multi-threaded, so CAS can be
418 implemented via a non-atomic load-compare-store sequence. */
420 TCGLabel *lab_fail, *lab_done;
423 lab_fail = gen_new_label();
424 lab_done = gen_new_label();
425 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
427 val = tcg_temp_new();
428 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
429 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
431 tcg_gen_qemu_st_i64(ctx->ir[ra], addr, ctx->mem_idx,
432 quad ? MO_LEQ : MO_LEUL);
433 tcg_gen_movi_i64(ctx->ir[ra], 1);
434 tcg_gen_br(lab_done);
436 gen_set_label(lab_fail);
437 tcg_gen_movi_i64(ctx->ir[ra], 0);
439 gen_set_label(lab_done);
440 tcg_gen_movi_i64(cpu_lock_addr, -1);
448 static bool in_superpage(DisasContext *ctx, int64_t addr)
450 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
452 && ((addr >> 41) & 3) == 2
453 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
456 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
458 /* Suppress goto_tb in the case of single-steping and IO. */
459 if ((ctx->tb->cflags & CF_LAST_IO)
460 || ctx->singlestep_enabled || singlestep) {
463 #ifndef CONFIG_USER_ONLY
464 /* If the destination is in the superpage, the page perms can't change. */
465 if (in_superpage(ctx, dest)) {
468 /* Check for the dest on the same page as the start of the TB. */
469 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
475 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
477 uint64_t dest = ctx->pc + (disp << 2);
480 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
483 /* Notice branch-to-next; used to initialize RA with the PC. */
486 } else if (use_goto_tb(ctx, dest)) {
488 tcg_gen_movi_i64(cpu_pc, dest);
489 tcg_gen_exit_tb((uintptr_t)ctx->tb);
492 tcg_gen_movi_i64(cpu_pc, dest);
493 return EXIT_PC_UPDATED;
497 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
498 TCGv cmp, int32_t disp)
500 uint64_t dest = ctx->pc + (disp << 2);
501 TCGLabel *lab_true = gen_new_label();
503 if (use_goto_tb(ctx, dest)) {
504 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
507 tcg_gen_movi_i64(cpu_pc, ctx->pc);
508 tcg_gen_exit_tb((uintptr_t)ctx->tb);
510 gen_set_label(lab_true);
512 tcg_gen_movi_i64(cpu_pc, dest);
513 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
517 TCGv_i64 z = tcg_const_i64(0);
518 TCGv_i64 d = tcg_const_i64(dest);
519 TCGv_i64 p = tcg_const_i64(ctx->pc);
521 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
523 tcg_temp_free_i64(z);
524 tcg_temp_free_i64(d);
525 tcg_temp_free_i64(p);
526 return EXIT_PC_UPDATED;
530 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
531 int32_t disp, int mask)
536 cmp_tmp = tcg_temp_new();
537 tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
539 cmp_tmp = load_gpr(ctx, ra);
542 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
545 /* Fold -0.0 for comparison with COND. */
547 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
549 uint64_t mzero = 1ull << 63;
554 /* For <= or >, the -0.0 value directly compares the way we want. */
555 tcg_gen_mov_i64(dest, src);
560 /* For == or !=, we can simply mask off the sign bit and compare. */
561 tcg_gen_andi_i64(dest, src, mzero - 1);
566 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
567 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
568 tcg_gen_neg_i64(dest, dest);
569 tcg_gen_and_i64(dest, dest, src);
577 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
580 TCGv cmp_tmp = tcg_temp_new();
581 gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
582 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
585 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
590 vb = load_fpr(ctx, rb);
592 gen_fold_mzero(cond, va, load_fpr(ctx, ra));
594 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
599 #define QUAL_RM_N 0x080 /* Round mode nearest even */
600 #define QUAL_RM_C 0x000 /* Round mode chopped */
601 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
602 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
603 #define QUAL_RM_MASK 0x0c0
605 #define QUAL_U 0x100 /* Underflow enable (fp output) */
606 #define QUAL_V 0x100 /* Overflow enable (int output) */
607 #define QUAL_S 0x400 /* Software completion enable */
608 #define QUAL_I 0x200 /* Inexact detection enable */
610 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
614 fn11 &= QUAL_RM_MASK;
615 if (fn11 == ctx->tb_rm) {
620 tmp = tcg_temp_new_i32();
623 tcg_gen_movi_i32(tmp, float_round_nearest_even);
626 tcg_gen_movi_i32(tmp, float_round_to_zero);
629 tcg_gen_movi_i32(tmp, float_round_down);
632 tcg_gen_ld8u_i32(tmp, cpu_env,
633 offsetof(CPUAlphaState, fpcr_dyn_round));
637 #if defined(CONFIG_SOFTFLOAT_INLINE)
638 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
639 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
640 sets the one field. */
641 tcg_gen_st8_i32(tmp, cpu_env,
642 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
644 gen_helper_setroundmode(tmp);
647 tcg_temp_free_i32(tmp);
650 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
655 if (fn11 == ctx->tb_ftz) {
660 tmp = tcg_temp_new_i32();
662 /* Underflow is enabled, use the FPCR setting. */
663 tcg_gen_ld8u_i32(tmp, cpu_env,
664 offsetof(CPUAlphaState, fpcr_flush_to_zero));
666 /* Underflow is disabled, force flush-to-zero. */
667 tcg_gen_movi_i32(tmp, 1);
670 #if defined(CONFIG_SOFTFLOAT_INLINE)
671 tcg_gen_st8_i32(tmp, cpu_env,
672 offsetof(CPUAlphaState, fp_status.flush_to_zero));
674 gen_helper_setflushzero(tmp);
677 tcg_temp_free_i32(tmp);
680 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
684 if (unlikely(reg == 31)) {
685 val = load_zero(ctx);
688 if ((fn11 & QUAL_S) == 0) {
690 gen_helper_ieee_input_cmp(cpu_env, val);
692 gen_helper_ieee_input(cpu_env, val);
695 #ifndef CONFIG_USER_ONLY
696 /* In system mode, raise exceptions for denormals like real
697 hardware. In user mode, proceed as if the OS completion
698 handler is handling the denormal as per spec. */
699 gen_helper_ieee_input_s(cpu_env, val);
706 static void gen_fp_exc_raise(int rc, int fn11)
708 /* ??? We ought to be able to do something with imprecise exceptions.
709 E.g. notice we're still in the trap shadow of something within the
710 TB and do not generate the code to signal the exception; end the TB
711 when an exception is forced to arrive, either by consumption of a
712 register value or TRAPB or EXCB. */
716 if (!(fn11 & QUAL_U)) {
717 /* Note that QUAL_U == QUAL_V, so ignore either. */
718 ignore |= FPCR_UNF | FPCR_IOV;
720 if (!(fn11 & QUAL_I)) {
723 ign = tcg_const_i32(ignore);
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
732 gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
734 gen_helper_fp_exc_raise(cpu_env, ign, reg);
737 tcg_temp_free_i32(reg);
738 tcg_temp_free_i32(ign);
741 static void gen_cvtlq(TCGv vc, TCGv vb)
743 TCGv tmp = tcg_temp_new();
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp, vb, 32);
748 tcg_gen_shri_i64(vc, vb, 29);
749 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(vc, vc, 0x3fffffff);
751 tcg_gen_or_i64(vc, vc, tmp);
756 static void gen_ieee_arith2(DisasContext *ctx,
757 void (*helper)(TCGv, TCGv_ptr, TCGv),
758 int rb, int rc, int fn11)
762 gen_qual_roundmode(ctx, fn11);
763 gen_qual_flushzero(ctx, fn11);
765 vb = gen_ieee_input(ctx, rb, fn11, 0);
766 helper(dest_fpr(ctx, rc), cpu_env, vb);
768 gen_fp_exc_raise(rc, fn11);
771 #define IEEE_ARITH2(name) \
772 static inline void glue(gen_, name)(DisasContext *ctx, \
773 int rb, int rc, int fn11) \
775 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
782 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
786 /* No need to set flushzero, since we have an integer output. */
787 vb = gen_ieee_input(ctx, rb, fn11, 0);
788 vc = dest_fpr(ctx, rc);
790 /* Almost all integer conversions use cropped rounding;
791 special case that. */
792 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
793 gen_helper_cvttq_c(vc, cpu_env, vb);
795 gen_qual_roundmode(ctx, fn11);
796 gen_helper_cvttq(vc, cpu_env, vb);
798 gen_fp_exc_raise(rc, fn11);
801 static void gen_ieee_intcvt(DisasContext *ctx,
802 void (*helper)(TCGv, TCGv_ptr, TCGv),
803 int rb, int rc, int fn11)
807 gen_qual_roundmode(ctx, fn11);
808 vb = load_fpr(ctx, rb);
809 vc = dest_fpr(ctx, rc);
811 /* The only exception that can be raised by integer conversion
812 is inexact. Thus we only need to worry about exceptions when
813 inexact handling is requested. */
815 helper(vc, cpu_env, vb);
816 gen_fp_exc_raise(rc, fn11);
818 helper(vc, cpu_env, vb);
822 #define IEEE_INTCVT(name) \
823 static inline void glue(gen_, name)(DisasContext *ctx, \
824 int rb, int rc, int fn11) \
826 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
831 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
833 TCGv vmask = tcg_const_i64(mask);
834 TCGv tmp = tcg_temp_new_i64();
837 tcg_gen_andc_i64(tmp, vmask, va);
839 tcg_gen_and_i64(tmp, va, vmask);
842 tcg_gen_andc_i64(vc, vb, vmask);
843 tcg_gen_or_i64(vc, vc, tmp);
845 tcg_temp_free(vmask);
849 static void gen_ieee_arith3(DisasContext *ctx,
850 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
851 int ra, int rb, int rc, int fn11)
855 gen_qual_roundmode(ctx, fn11);
856 gen_qual_flushzero(ctx, fn11);
858 va = gen_ieee_input(ctx, ra, fn11, 0);
859 vb = gen_ieee_input(ctx, rb, fn11, 0);
860 vc = dest_fpr(ctx, rc);
861 helper(vc, cpu_env, va, vb);
863 gen_fp_exc_raise(rc, fn11);
866 #define IEEE_ARITH3(name) \
867 static inline void glue(gen_, name)(DisasContext *ctx, \
868 int ra, int rb, int rc, int fn11) \
870 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
881 static void gen_ieee_compare(DisasContext *ctx,
882 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
883 int ra, int rb, int rc, int fn11)
887 va = gen_ieee_input(ctx, ra, fn11, 1);
888 vb = gen_ieee_input(ctx, rb, fn11, 1);
889 vc = dest_fpr(ctx, rc);
890 helper(vc, cpu_env, va, vb);
892 gen_fp_exc_raise(rc, fn11);
895 #define IEEE_CMP3(name) \
896 static inline void glue(gen_, name)(DisasContext *ctx, \
897 int ra, int rb, int rc, int fn11) \
899 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
906 static inline uint64_t zapnot_mask(uint8_t lit)
911 for (i = 0; i < 8; ++i) {
912 if ((lit >> i) & 1) {
913 mask |= 0xffull << (i * 8);
919 /* Implement zapnot with an immediate operand, which expands to some
920 form of immediate AND. This is a basic building block in the
921 definition of many of the other byte manipulation instructions. */
922 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
926 tcg_gen_movi_i64(dest, 0);
929 tcg_gen_ext8u_i64(dest, src);
932 tcg_gen_ext16u_i64(dest, src);
935 tcg_gen_ext32u_i64(dest, src);
938 tcg_gen_mov_i64(dest, src);
941 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
946 /* EXTWH, EXTLH, EXTQH */
947 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
948 uint8_t lit, uint8_t byte_mask)
951 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
953 TCGv tmp = tcg_temp_new();
954 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
955 tcg_gen_neg_i64(tmp, tmp);
956 tcg_gen_andi_i64(tmp, tmp, 0x3f);
957 tcg_gen_shl_i64(vc, va, tmp);
960 gen_zapnoti(vc, vc, byte_mask);
963 /* EXTBL, EXTWL, EXTLL, EXTQL */
964 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
965 uint8_t lit, uint8_t byte_mask)
968 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
970 TCGv tmp = tcg_temp_new();
971 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
972 tcg_gen_shli_i64(tmp, tmp, 3);
973 tcg_gen_shr_i64(vc, va, tmp);
976 gen_zapnoti(vc, vc, byte_mask);
979 /* INSWH, INSLH, INSQH */
980 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
981 uint8_t lit, uint8_t byte_mask)
983 TCGv tmp = tcg_temp_new();
985 /* The instruction description has us left-shift the byte mask and extract
986 bits <15:8> and apply that zap at the end. This is equivalent to simply
987 performing the zap first and shifting afterward. */
988 gen_zapnoti(tmp, va, byte_mask);
992 if (unlikely(lit == 0)) {
993 tcg_gen_movi_i64(vc, 0);
995 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
998 TCGv shift = tcg_temp_new();
1000 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1001 portably by splitting the shift into two parts: shift_count-1 and 1.
1002 Arrange for the -1 by using ones-complement instead of
1003 twos-complement in the negation: ~(B * 8) & 63. */
1005 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1006 tcg_gen_not_i64(shift, shift);
1007 tcg_gen_andi_i64(shift, shift, 0x3f);
1009 tcg_gen_shr_i64(vc, tmp, shift);
1010 tcg_gen_shri_i64(vc, vc, 1);
1011 tcg_temp_free(shift);
1016 /* INSBL, INSWL, INSLL, INSQL */
1017 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1018 uint8_t lit, uint8_t byte_mask)
1020 TCGv tmp = tcg_temp_new();
1022 /* The instruction description has us left-shift the byte mask
1023 the same number of byte slots as the data and apply the zap
1024 at the end. This is equivalent to simply performing the zap
1025 first and shifting afterward. */
1026 gen_zapnoti(tmp, va, byte_mask);
1029 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1031 TCGv shift = tcg_temp_new();
1032 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1033 tcg_gen_shli_i64(shift, shift, 3);
1034 tcg_gen_shl_i64(vc, tmp, shift);
1035 tcg_temp_free(shift);
1040 /* MSKWH, MSKLH, MSKQH */
1041 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1042 uint8_t lit, uint8_t byte_mask)
1045 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1047 TCGv shift = tcg_temp_new();
1048 TCGv mask = tcg_temp_new();
1050 /* The instruction description is as above, where the byte_mask
1051 is shifted left, and then we extract bits <15:8>. This can be
1052 emulated with a right-shift on the expanded byte mask. This
1053 requires extra care because for an input <2:0> == 0 we need a
1054 shift of 64 bits in order to generate a zero. This is done by
1055 splitting the shift into two parts, the variable shift - 1
1056 followed by a constant 1 shift. The code we expand below is
1057 equivalent to ~(B * 8) & 63. */
1059 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1060 tcg_gen_not_i64(shift, shift);
1061 tcg_gen_andi_i64(shift, shift, 0x3f);
1062 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1063 tcg_gen_shr_i64(mask, mask, shift);
1064 tcg_gen_shri_i64(mask, mask, 1);
1066 tcg_gen_andc_i64(vc, va, mask);
1068 tcg_temp_free(mask);
1069 tcg_temp_free(shift);
1073 /* MSKBL, MSKWL, MSKLL, MSKQL */
1074 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1075 uint8_t lit, uint8_t byte_mask)
1078 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1080 TCGv shift = tcg_temp_new();
1081 TCGv mask = tcg_temp_new();
1083 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1084 tcg_gen_shli_i64(shift, shift, 3);
1085 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1086 tcg_gen_shl_i64(mask, mask, shift);
1088 tcg_gen_andc_i64(vc, va, mask);
1090 tcg_temp_free(mask);
1091 tcg_temp_free(shift);
1095 static void gen_rx(DisasContext *ctx, int ra, int set)
1100 tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
1101 offsetof(CPUAlphaState, intr_flag));
1104 tmp = tcg_const_i32(set);
1105 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1106 tcg_temp_free_i32(tmp);
1109 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1111 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1112 to internal cpu registers. */
1114 /* Unprivileged PAL call */
1115 if (palcode >= 0x80 && palcode < 0xC0) {
1119 /* No-op inside QEMU. */
1123 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1124 offsetof(CPUAlphaState, unique));
1128 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1129 offsetof(CPUAlphaState, unique));
1138 #ifndef CONFIG_USER_ONLY
1139 /* Privileged PAL code */
1140 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1144 /* No-op inside QEMU. */
1148 /* No-op inside QEMU. */
1152 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1153 offsetof(CPUAlphaState, vptptr));
1157 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1158 offsetof(CPUAlphaState, sysval));
1162 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1163 offsetof(CPUAlphaState, sysval));
1170 /* Note that we already know we're in kernel mode, so we know
1171 that PS only contains the 3 IPL bits. */
1172 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1173 offsetof(CPUAlphaState, ps));
1175 /* But make sure and store only the 3 IPL bits from the user. */
1176 tmp = tcg_temp_new();
1177 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1178 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1185 tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
1186 offsetof(CPUAlphaState, ps));
1190 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1191 offsetof(CPUAlphaState, usp));
1195 tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1196 offsetof(CPUAlphaState, usp));
1200 tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1201 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1211 return gen_invalid(ctx);
1214 #ifdef CONFIG_USER_ONLY
1215 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1218 TCGv tmp = tcg_temp_new();
1219 uint64_t exc_addr = ctx->pc;
1220 uint64_t entry = ctx->palbr;
1222 if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
1225 tcg_gen_movi_i64(tmp, 1);
1226 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
1229 tcg_gen_movi_i64(tmp, exc_addr);
1230 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1233 entry += (palcode & 0x80
1234 ? 0x2000 + (palcode - 0x80) * 64
1235 : 0x1000 + palcode * 64);
1237 /* Since the destination is running in PALmode, we don't really
1238 need the page permissions check. We'll see the existence of
1239 the page when we create the TB, and we'll flush all TBs if
1240 we change the PAL base register. */
1241 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1243 tcg_gen_movi_i64(cpu_pc, entry);
1244 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1245 return EXIT_GOTO_TB;
1247 tcg_gen_movi_i64(cpu_pc, entry);
1248 return EXIT_PC_UPDATED;
1254 #ifndef CONFIG_USER_ONLY
1256 #define PR_BYTE 0x100000
1257 #define PR_LONG 0x200000
1259 static int cpu_pr_data(int pr)
1262 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1263 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1264 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1265 case 3: return offsetof(CPUAlphaState, trap_arg0);
1266 case 4: return offsetof(CPUAlphaState, trap_arg1);
1267 case 5: return offsetof(CPUAlphaState, trap_arg2);
1268 case 6: return offsetof(CPUAlphaState, exc_addr);
1269 case 7: return offsetof(CPUAlphaState, palbr);
1270 case 8: return offsetof(CPUAlphaState, ptbr);
1271 case 9: return offsetof(CPUAlphaState, vptptr);
1272 case 10: return offsetof(CPUAlphaState, unique);
1273 case 11: return offsetof(CPUAlphaState, sysval);
1274 case 12: return offsetof(CPUAlphaState, usp);
1277 return offsetof(CPUAlphaState, scratch[pr - 40]);
1280 return offsetof(CPUAlphaState, alarm_expire);
1285 static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1287 void (*helper)(TCGv);
1292 /* Accessing the "non-shadow" general registers. */
1293 regno = regno == 39 ? 25 : regno - 32 + 8;
1294 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1297 case 250: /* WALLTIME */
1298 helper = gen_helper_get_walltime;
1300 case 249: /* VMTIME */
1301 helper = gen_helper_get_vmtime;
1307 return EXIT_PC_STALE;
1314 /* The basic registers are data only, and unknown registers
1315 are read-zero, write-ignore. */
1316 data = cpu_pr_data(regno);
1318 tcg_gen_movi_i64(va, 0);
1319 } else if (data & PR_BYTE) {
1320 tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
1321 } else if (data & PR_LONG) {
1322 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1324 tcg_gen_ld_i64(va, cpu_env, data);
1332 static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1340 gen_helper_tbia(cpu_env);
1345 gen_helper_tbis(cpu_env, vb);
1350 tmp = tcg_const_i64(1);
1351 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1352 offsetof(CPUState, halted));
1353 return gen_excp(ctx, EXCP_HLT, 0);
1357 gen_helper_halt(vb);
1358 return EXIT_PC_STALE;
1362 gen_helper_set_alarm(cpu_env, vb);
1367 tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1368 /* Changing the PAL base register implies un-chaining all of the TBs
1369 that ended with a CALL_PAL. Since the base register usually only
1370 changes during boot, flushing everything works well. */
1371 gen_helper_tb_flush(cpu_env);
1372 return EXIT_PC_STALE;
1375 /* Accessing the "non-shadow" general registers. */
1376 regno = regno == 39 ? 25 : regno - 32 + 8;
1377 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1381 /* The basic registers are data only, and unknown registers
1382 are read-zero, write-ignore. */
1383 data = cpu_pr_data(regno);
1385 if (data & PR_BYTE) {
1386 tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
1387 } else if (data & PR_LONG) {
1388 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1390 tcg_gen_st_i64(vb, cpu_env, data);
1398 #endif /* !USER_ONLY*/
1400 #define REQUIRE_NO_LIT \
1407 #define REQUIRE_TB_FLAG(FLAG) \
1409 if ((ctx->tb->flags & (FLAG)) == 0) { \
1414 #define REQUIRE_REG_31(WHICH) \
1416 if (WHICH != 31) { \
1421 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1423 int32_t disp21, disp16, disp12 __attribute__((unused));
1425 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1426 bool islit, real_islit;
1427 TCGv va, vb, vc, tmp, tmp2;
1431 /* Decode all instruction fields */
1432 opc = extract32(insn, 26, 6);
1433 ra = extract32(insn, 21, 5);
1434 rb = extract32(insn, 16, 5);
1435 rc = extract32(insn, 0, 5);
1436 real_islit = islit = extract32(insn, 12, 1);
1437 lit = extract32(insn, 13, 8);
1439 disp21 = sextract32(insn, 0, 21);
1440 disp16 = sextract32(insn, 0, 16);
1441 disp12 = sextract32(insn, 0, 12);
1443 fn11 = extract32(insn, 5, 11);
1444 fpfn = extract32(insn, 5, 6);
1445 fn7 = extract32(insn, 5, 7);
1447 if (rb == 31 && !islit) {
1456 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1482 disp16 = (uint32_t)disp16 << 16;
1486 va = dest_gpr(ctx, ra);
1487 /* It's worth special-casing immediate loads. */
1489 tcg_gen_movi_i64(va, disp16);
1491 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1498 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1502 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1506 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1507 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1512 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1516 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1517 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1521 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1525 vc = dest_gpr(ctx, rc);
1526 vb = load_gpr_lit(ctx, rb, lit, islit);
1530 /* Special case ADDL as SEXTL. */
1531 tcg_gen_ext32s_i64(vc, vb);
1535 /* Special case SUBQ as NEGQ. */
1536 tcg_gen_neg_i64(vc, vb);
1541 va = load_gpr(ctx, ra);
1545 tcg_gen_add_i64(vc, va, vb);
1546 tcg_gen_ext32s_i64(vc, vc);
1550 tmp = tcg_temp_new();
1551 tcg_gen_shli_i64(tmp, va, 2);
1552 tcg_gen_add_i64(tmp, tmp, vb);
1553 tcg_gen_ext32s_i64(vc, tmp);
1558 tcg_gen_sub_i64(vc, va, vb);
1559 tcg_gen_ext32s_i64(vc, vc);
1563 tmp = tcg_temp_new();
1564 tcg_gen_shli_i64(tmp, va, 2);
1565 tcg_gen_sub_i64(tmp, tmp, vb);
1566 tcg_gen_ext32s_i64(vc, tmp);
1572 /* Special case 0 >= X as X == 0. */
1573 gen_helper_cmpbe0(vc, vb);
1575 gen_helper_cmpbge(vc, va, vb);
1580 tmp = tcg_temp_new();
1581 tcg_gen_shli_i64(tmp, va, 3);
1582 tcg_gen_add_i64(tmp, tmp, vb);
1583 tcg_gen_ext32s_i64(vc, tmp);
1588 tmp = tcg_temp_new();
1589 tcg_gen_shli_i64(tmp, va, 3);
1590 tcg_gen_sub_i64(tmp, tmp, vb);
1591 tcg_gen_ext32s_i64(vc, tmp);
1596 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1600 tcg_gen_add_i64(vc, va, vb);
1604 tmp = tcg_temp_new();
1605 tcg_gen_shli_i64(tmp, va, 2);
1606 tcg_gen_add_i64(vc, tmp, vb);
1611 tcg_gen_sub_i64(vc, va, vb);
1615 tmp = tcg_temp_new();
1616 tcg_gen_shli_i64(tmp, va, 2);
1617 tcg_gen_sub_i64(vc, tmp, vb);
1622 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1626 tmp = tcg_temp_new();
1627 tcg_gen_shli_i64(tmp, va, 3);
1628 tcg_gen_add_i64(vc, tmp, vb);
1633 tmp = tcg_temp_new();
1634 tcg_gen_shli_i64(tmp, va, 3);
1635 tcg_gen_sub_i64(vc, tmp, vb);
1640 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1644 tmp = tcg_temp_new();
1645 tcg_gen_ext32s_i64(tmp, va);
1646 tcg_gen_ext32s_i64(vc, vb);
1647 tcg_gen_add_i64(tmp, tmp, vc);
1648 tcg_gen_ext32s_i64(vc, tmp);
1649 gen_helper_check_overflow(cpu_env, vc, tmp);
1654 tmp = tcg_temp_new();
1655 tcg_gen_ext32s_i64(tmp, va);
1656 tcg_gen_ext32s_i64(vc, vb);
1657 tcg_gen_sub_i64(tmp, tmp, vc);
1658 tcg_gen_ext32s_i64(vc, tmp);
1659 gen_helper_check_overflow(cpu_env, vc, tmp);
1664 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1668 tmp = tcg_temp_new();
1669 tmp2 = tcg_temp_new();
1670 tcg_gen_eqv_i64(tmp, va, vb);
1671 tcg_gen_mov_i64(tmp2, va);
1672 tcg_gen_add_i64(vc, va, vb);
1673 tcg_gen_xor_i64(tmp2, tmp2, vc);
1674 tcg_gen_and_i64(tmp, tmp, tmp2);
1675 tcg_gen_shri_i64(tmp, tmp, 63);
1676 tcg_gen_movi_i64(tmp2, 0);
1677 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1679 tcg_temp_free(tmp2);
1683 tmp = tcg_temp_new();
1684 tmp2 = tcg_temp_new();
1685 tcg_gen_xor_i64(tmp, va, vb);
1686 tcg_gen_mov_i64(tmp2, va);
1687 tcg_gen_sub_i64(vc, va, vb);
1688 tcg_gen_xor_i64(tmp2, tmp2, vc);
1689 tcg_gen_and_i64(tmp, tmp, tmp2);
1690 tcg_gen_shri_i64(tmp, tmp, 63);
1691 tcg_gen_movi_i64(tmp2, 0);
1692 gen_helper_check_overflow(cpu_env, tmp, tmp2);
1694 tcg_temp_free(tmp2);
1698 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1708 /* Special case BIS as NOP. */
1712 /* Special case BIS as MOV. */
1713 vc = dest_gpr(ctx, rc);
1715 tcg_gen_movi_i64(vc, lit);
1717 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1723 vc = dest_gpr(ctx, rc);
1724 vb = load_gpr_lit(ctx, rb, lit, islit);
1726 if (fn7 == 0x28 && ra == 31) {
1727 /* Special case ORNOT as NOT. */
1728 tcg_gen_not_i64(vc, vb);
1732 va = load_gpr(ctx, ra);
1736 tcg_gen_and_i64(vc, va, vb);
1740 tcg_gen_andc_i64(vc, va, vb);
1744 tmp = tcg_temp_new();
1745 tcg_gen_andi_i64(tmp, va, 1);
1746 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1747 vb, load_gpr(ctx, rc));
1752 tmp = tcg_temp_new();
1753 tcg_gen_andi_i64(tmp, va, 1);
1754 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1755 vb, load_gpr(ctx, rc));
1760 tcg_gen_or_i64(vc, va, vb);
1764 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1765 vb, load_gpr(ctx, rc));
1769 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1770 vb, load_gpr(ctx, rc));
1774 tcg_gen_orc_i64(vc, va, vb);
1778 tcg_gen_xor_i64(vc, va, vb);
1782 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1783 vb, load_gpr(ctx, rc));
1787 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1788 vb, load_gpr(ctx, rc));
1792 tcg_gen_eqv_i64(vc, va, vb);
1798 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1799 tcg_gen_andi_i64(vc, vb, ~amask);
1804 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1805 vb, load_gpr(ctx, rc));
1809 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1810 vb, load_gpr(ctx, rc));
1815 tcg_gen_movi_i64(vc, ctx->implver);
1823 vc = dest_gpr(ctx, rc);
1824 va = load_gpr(ctx, ra);
1828 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1832 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1836 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1840 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1844 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1848 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1852 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1856 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1860 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1865 gen_zapnoti(vc, va, ~lit);
1867 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1873 gen_zapnoti(vc, va, lit);
1875 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1880 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1885 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1887 tmp = tcg_temp_new();
1888 vb = load_gpr(ctx, rb);
1889 tcg_gen_andi_i64(tmp, vb, 0x3f);
1890 tcg_gen_shr_i64(vc, va, tmp);
1896 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1901 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1903 tmp = tcg_temp_new();
1904 vb = load_gpr(ctx, rb);
1905 tcg_gen_andi_i64(tmp, vb, 0x3f);
1906 tcg_gen_shl_i64(vc, va, tmp);
1912 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1917 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1919 tmp = tcg_temp_new();
1920 vb = load_gpr(ctx, rb);
1921 tcg_gen_andi_i64(tmp, vb, 0x3f);
1922 tcg_gen_sar_i64(vc, va, tmp);
1928 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1932 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1936 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1940 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1944 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1948 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1952 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1956 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1960 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1968 vc = dest_gpr(ctx, rc);
1969 vb = load_gpr_lit(ctx, rb, lit, islit);
1970 va = load_gpr(ctx, ra);
1974 tcg_gen_mul_i64(vc, va, vb);
1975 tcg_gen_ext32s_i64(vc, vc);
1979 tcg_gen_mul_i64(vc, va, vb);
1983 tmp = tcg_temp_new();
1984 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1989 tmp = tcg_temp_new();
1990 tcg_gen_ext32s_i64(tmp, va);
1991 tcg_gen_ext32s_i64(vc, vb);
1992 tcg_gen_mul_i64(tmp, tmp, vc);
1993 tcg_gen_ext32s_i64(vc, tmp);
1994 gen_helper_check_overflow(cpu_env, vc, tmp);
1999 tmp = tcg_temp_new();
2000 tmp2 = tcg_temp_new();
2001 tcg_gen_muls2_i64(vc, tmp, va, vb);
2002 tcg_gen_sari_i64(tmp2, vc, 63);
2003 gen_helper_check_overflow(cpu_env, tmp, tmp2);
2005 tcg_temp_free(tmp2);
2013 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2014 vc = dest_fpr(ctx, rc);
2015 switch (fpfn) { /* fn11 & 0x3F */
2019 t32 = tcg_temp_new_i32();
2020 va = load_gpr(ctx, ra);
2021 tcg_gen_extrl_i64_i32(t32, va);
2022 gen_helper_memory_to_s(vc, t32);
2023 tcg_temp_free_i32(t32);
2028 vb = load_fpr(ctx, rb);
2029 gen_helper_sqrtf(vc, cpu_env, vb);
2034 gen_sqrts(ctx, rb, rc, fn11);
2039 t32 = tcg_temp_new_i32();
2040 va = load_gpr(ctx, ra);
2041 tcg_gen_extrl_i64_i32(t32, va);
2042 gen_helper_memory_to_f(vc, t32);
2043 tcg_temp_free_i32(t32);
2048 va = load_gpr(ctx, ra);
2049 tcg_gen_mov_i64(vc, va);
2054 vb = load_fpr(ctx, rb);
2055 gen_helper_sqrtg(vc, cpu_env, vb);
2060 gen_sqrtt(ctx, rb, rc, fn11);
2068 /* VAX floating point */
2069 /* XXX: rounding mode and trap are ignored (!) */
2070 vc = dest_fpr(ctx, rc);
2071 vb = load_fpr(ctx, rb);
2072 va = load_fpr(ctx, ra);
2073 switch (fpfn) { /* fn11 & 0x3F */
2076 gen_helper_addf(vc, cpu_env, va, vb);
2080 gen_helper_subf(vc, cpu_env, va, vb);
2084 gen_helper_mulf(vc, cpu_env, va, vb);
2088 gen_helper_divf(vc, cpu_env, va, vb);
2096 gen_helper_addg(vc, cpu_env, va, vb);
2100 gen_helper_subg(vc, cpu_env, va, vb);
2104 gen_helper_mulg(vc, cpu_env, va, vb);
2108 gen_helper_divg(vc, cpu_env, va, vb);
2112 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2116 gen_helper_cmpglt(vc, cpu_env, va, vb);
2120 gen_helper_cmpgle(vc, cpu_env, va, vb);
2125 gen_helper_cvtgf(vc, cpu_env, vb);
2134 gen_helper_cvtgq(vc, cpu_env, vb);
2139 gen_helper_cvtqf(vc, cpu_env, vb);
2144 gen_helper_cvtqg(vc, cpu_env, vb);
2152 /* IEEE floating-point */
2153 switch (fpfn) { /* fn11 & 0x3F */
2156 gen_adds(ctx, ra, rb, rc, fn11);
2160 gen_subs(ctx, ra, rb, rc, fn11);
2164 gen_muls(ctx, ra, rb, rc, fn11);
2168 gen_divs(ctx, ra, rb, rc, fn11);
2172 gen_addt(ctx, ra, rb, rc, fn11);
2176 gen_subt(ctx, ra, rb, rc, fn11);
2180 gen_mult(ctx, ra, rb, rc, fn11);
2184 gen_divt(ctx, ra, rb, rc, fn11);
2188 gen_cmptun(ctx, ra, rb, rc, fn11);
2192 gen_cmpteq(ctx, ra, rb, rc, fn11);
2196 gen_cmptlt(ctx, ra, rb, rc, fn11);
2200 gen_cmptle(ctx, ra, rb, rc, fn11);
2204 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2206 gen_cvtst(ctx, rb, rc, fn11);
2209 gen_cvtts(ctx, rb, rc, fn11);
2215 gen_cvttq(ctx, rb, rc, fn11);
2220 gen_cvtqs(ctx, rb, rc, fn11);
2225 gen_cvtqt(ctx, rb, rc, fn11);
2237 vc = dest_fpr(ctx, rc);
2238 vb = load_fpr(ctx, rb);
2244 /* Special case CPYS as FNOP. */
2246 vc = dest_fpr(ctx, rc);
2247 va = load_fpr(ctx, ra);
2249 /* Special case CPYS as FMOV. */
2250 tcg_gen_mov_i64(vc, va);
2252 vb = load_fpr(ctx, rb);
2253 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2259 vc = dest_fpr(ctx, rc);
2260 vb = load_fpr(ctx, rb);
2261 va = load_fpr(ctx, ra);
2262 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2266 vc = dest_fpr(ctx, rc);
2267 vb = load_fpr(ctx, rb);
2268 va = load_fpr(ctx, ra);
2269 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2273 va = load_fpr(ctx, ra);
2274 gen_helper_store_fpcr(cpu_env, va);
2275 if (ctx->tb_rm == QUAL_RM_D) {
2276 /* Re-do the copy of the rounding mode to fp_status
2277 the next time we use dynamic rounding. */
2283 va = dest_fpr(ctx, ra);
2284 gen_helper_load_fpcr(va, cpu_env);
2288 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2292 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2296 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2300 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2304 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2308 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2310 case 0x030: /* CVTQL */
2311 case 0x130: /* CVTQL/V */
2312 case 0x530: /* CVTQL/SV */
2314 vc = dest_fpr(ctx, rc);
2315 vb = load_fpr(ctx, rb);
2316 gen_helper_cvtql(vc, cpu_env, vb);
2317 gen_fp_exc_raise(rc, fn11);
2325 switch ((uint16_t)disp16) {
2352 va = dest_gpr(ctx, ra);
2353 if (ctx->tb->cflags & CF_USE_ICOUNT) {
2355 gen_helper_load_pcc(va, cpu_env);
2357 ret = EXIT_PC_STALE;
2359 gen_helper_load_pcc(va, cpu_env);
2387 /* HW_MFPR (PALcode) */
2388 #ifndef CONFIG_USER_ONLY
2389 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2390 va = dest_gpr(ctx, ra);
2391 ret = gen_mfpr(ctx, va, insn & 0xffff);
2398 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2399 prediction stack action, which of course we don't implement. */
2400 vb = load_gpr(ctx, rb);
2401 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2403 tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
2405 ret = EXIT_PC_UPDATED;
2409 /* HW_LD (PALcode) */
2410 #ifndef CONFIG_USER_ONLY
2411 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2413 TCGv addr = tcg_temp_new();
2414 vb = load_gpr(ctx, rb);
2415 va = dest_gpr(ctx, ra);
2417 tcg_gen_addi_i64(addr, vb, disp12);
2418 switch ((insn >> 12) & 0xF) {
2420 /* Longword physical access (hw_ldl/p) */
2421 gen_helper_ldl_phys(va, cpu_env, addr);
2424 /* Quadword physical access (hw_ldq/p) */
2425 gen_helper_ldq_phys(va, cpu_env, addr);
2428 /* Longword physical access with lock (hw_ldl_l/p) */
2429 gen_helper_ldl_l_phys(va, cpu_env, addr);
2432 /* Quadword physical access with lock (hw_ldq_l/p) */
2433 gen_helper_ldq_l_phys(va, cpu_env, addr);
2436 /* Longword virtual PTE fetch (hw_ldl/v) */
2439 /* Quadword virtual PTE fetch (hw_ldq/v) */
2449 /* Longword virtual access (hw_ldl) */
2452 /* Quadword virtual access (hw_ldq) */
2455 /* Longword virtual access with protection check (hw_ldl/w) */
2456 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2459 /* Quadword virtual access with protection check (hw_ldq/w) */
2460 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2463 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2466 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2469 /* Longword virtual access with alternate access mode and
2470 protection checks (hw_ldl/wa) */
2471 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2474 /* Quadword virtual access with alternate access mode and
2475 protection checks (hw_ldq/wa) */
2476 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2479 tcg_temp_free(addr);
2487 vc = dest_gpr(ctx, rc);
2490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2492 va = load_fpr(ctx, ra);
2493 tcg_gen_mov_i64(vc, va);
2495 } else if (fn7 == 0x78) {
2497 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2499 t32 = tcg_temp_new_i32();
2500 va = load_fpr(ctx, ra);
2501 gen_helper_s_to_memory(t32, va);
2502 tcg_gen_ext_i32_i64(vc, t32);
2503 tcg_temp_free_i32(t32);
2507 vb = load_gpr_lit(ctx, rb, lit, islit);
2511 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2513 tcg_gen_ext8s_i64(vc, vb);
2517 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2519 tcg_gen_ext16s_i64(vc, vb);
2523 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2526 gen_helper_ctpop(vc, vb);
2530 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2532 va = load_gpr(ctx, ra);
2533 gen_helper_perr(vc, va, vb);
2537 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2540 gen_helper_ctlz(vc, vb);
2544 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2547 gen_helper_cttz(vc, vb);
2551 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2554 gen_helper_unpkbw(vc, vb);
2558 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2561 gen_helper_unpkbl(vc, vb);
2565 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2568 gen_helper_pkwb(vc, vb);
2572 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2575 gen_helper_pklb(vc, vb);
2579 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2580 va = load_gpr(ctx, ra);
2581 gen_helper_minsb8(vc, va, vb);
2585 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2586 va = load_gpr(ctx, ra);
2587 gen_helper_minsw4(vc, va, vb);
2591 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2592 va = load_gpr(ctx, ra);
2593 gen_helper_minub8(vc, va, vb);
2597 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2598 va = load_gpr(ctx, ra);
2599 gen_helper_minuw4(vc, va, vb);
2603 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2604 va = load_gpr(ctx, ra);
2605 gen_helper_maxub8(vc, va, vb);
2609 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2610 va = load_gpr(ctx, ra);
2611 gen_helper_maxuw4(vc, va, vb);
2615 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2616 va = load_gpr(ctx, ra);
2617 gen_helper_maxsb8(vc, va, vb);
2621 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2622 va = load_gpr(ctx, ra);
2623 gen_helper_maxsw4(vc, va, vb);
2631 /* HW_MTPR (PALcode) */
2632 #ifndef CONFIG_USER_ONLY
2633 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2634 vb = load_gpr(ctx, rb);
2635 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2642 /* HW_RET (PALcode) */
2643 #ifndef CONFIG_USER_ONLY
2644 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2646 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2647 address from EXC_ADDR. This turns out to be useful for our
2648 emulation PALcode, so continue to accept it. */
2649 ctx->lit = vb = tcg_temp_new();
2650 tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2652 vb = load_gpr(ctx, rb);
2654 tmp = tcg_temp_new();
2655 tcg_gen_movi_i64(tmp, 0);
2656 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
2657 tcg_gen_movi_i64(cpu_lock_addr, -1);
2658 tcg_gen_andi_i64(tmp, vb, 1);
2659 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
2660 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2661 ret = EXIT_PC_UPDATED;
2668 /* HW_ST (PALcode) */
2669 #ifndef CONFIG_USER_ONLY
2670 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2672 TCGv addr = tcg_temp_new();
2673 va = load_gpr(ctx, ra);
2674 vb = load_gpr(ctx, rb);
2676 tcg_gen_addi_i64(addr, vb, disp12);
2677 switch ((insn >> 12) & 0xF) {
2679 /* Longword physical access */
2680 gen_helper_stl_phys(cpu_env, addr, va);
2683 /* Quadword physical access */
2684 gen_helper_stq_phys(cpu_env, addr, va);
2687 /* Longword physical access with lock */
2688 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2691 /* Quadword physical access with lock */
2692 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2695 /* Longword virtual access */
2698 /* Quadword virtual access */
2719 /* Longword virtual access with alternate access mode */
2722 /* Quadword virtual access with alternate access mode */
2731 tcg_temp_free(addr);
2739 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2743 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2747 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2751 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2755 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2759 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2763 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2767 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2771 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2775 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2779 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2783 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2787 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2791 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2795 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2799 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2803 ret = gen_bdirect(ctx, ra, disp21);
2805 case 0x31: /* FBEQ */
2806 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2808 case 0x32: /* FBLT */
2809 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2811 case 0x33: /* FBLE */
2812 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2816 ret = gen_bdirect(ctx, ra, disp21);
2818 case 0x35: /* FBNE */
2819 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2821 case 0x36: /* FBGE */
2822 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2824 case 0x37: /* FBGT */
2825 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2829 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2833 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2837 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2841 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2845 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2849 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2853 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2857 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2860 ret = gen_invalid(ctx);
2867 void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
2869 AlphaCPU *cpu = alpha_env_get_cpu(env);
2870 CPUState *cs = CPU(cpu);
2871 DisasContext ctx, *ctxp = &ctx;
2872 target_ulong pc_start;
2873 target_ulong pc_mask;
2883 ctx.mem_idx = cpu_mmu_index(env, false);
2884 ctx.implver = env->implver;
2885 ctx.singlestep_enabled = cs->singlestep_enabled;
2887 #ifdef CONFIG_USER_ONLY
2888 ctx.ir = cpu_std_ir;
2890 ctx.palbr = env->palbr;
2891 ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2894 /* ??? Every TB begins with unset rounding mode, to be initialized on
2895 the first fp insn of the TB. Alternately we could define a proper
2896 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2897 to reset the FP_STATUS to that default at the end of any TB that
2898 changes the default. We could even (gasp) dynamiclly figure out
2899 what default would be most efficient given the running program. */
2901 /* Similarly for flush-to-zero. */
2905 max_insns = tb->cflags & CF_COUNT_MASK;
2906 if (max_insns == 0) {
2907 max_insns = CF_COUNT_MASK;
2909 if (max_insns > TCG_MAX_INSNS) {
2910 max_insns = TCG_MAX_INSNS;
2913 if (in_superpage(&ctx, pc_start)) {
2914 pc_mask = (1ULL << 41) - 1;
2916 pc_mask = ~TARGET_PAGE_MASK;
2921 tcg_gen_insn_start(ctx.pc);
2924 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2925 ret = gen_excp(&ctx, EXCP_DEBUG, 0);
2926 /* The address covered by the breakpoint must be included in
2927 [tb->pc, tb->pc + tb->size) in order to for it to be
2928 properly cleared -- thus we increment the PC here so that
2929 the logic setting tb->size below does the right thing. */
2933 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2936 insn = cpu_ldl_code(env, ctx.pc);
2938 TCGV_UNUSED_I64(ctx.zero);
2939 TCGV_UNUSED_I64(ctx.sink);
2940 TCGV_UNUSED_I64(ctx.lit);
2943 ret = translate_one(ctxp, insn);
2945 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
2946 tcg_gen_discard_i64(ctx.sink);
2947 tcg_temp_free(ctx.sink);
2949 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
2950 tcg_temp_free(ctx.zero);
2952 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
2953 tcg_temp_free(ctx.lit);
2956 /* If we reach a page boundary, are single stepping,
2957 or exhaust instruction count, stop generation. */
2959 && ((ctx.pc & pc_mask) == 0
2960 || tcg_op_buf_full()
2961 || num_insns >= max_insns
2963 || ctx.singlestep_enabled)) {
2964 ret = EXIT_PC_STALE;
2966 } while (ret == NO_EXIT);
2968 if (tb->cflags & CF_LAST_IO) {
2977 tcg_gen_movi_i64(cpu_pc, ctx.pc);
2979 case EXIT_PC_UPDATED:
2980 if (ctx.singlestep_enabled) {
2981 gen_excp_1(EXCP_DEBUG, 0);
2990 gen_tb_end(tb, num_insns);
2992 tb->size = ctx.pc - pc_start;
2993 tb->icount = num_insns;
2996 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2997 qemu_log("IN: %s\n", lookup_symbol(pc_start));
2998 log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
3004 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,