2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext;
40 struct TranslationBlock *tb;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 /* Temporaries for $31 and $f31 as source and destination. */
55 /* Temporary for immediate constants. */
58 bool singlestep_enabled;
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
67 /* We have emitted one or more goto_tb. No fixup required. */
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
84 /* global register indexes */
85 static TCGv_ptr cpu_env;
86 static TCGv cpu_ir[31];
87 static TCGv cpu_fir[31];
89 static TCGv cpu_lock_addr;
90 static TCGv cpu_lock_st_addr;
91 static TCGv cpu_lock_value;
92 static TCGv cpu_unique;
93 #ifndef CONFIG_USER_ONLY
94 static TCGv cpu_sysval;
99 static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
101 #include "exec/gen-icount.h"
103 void alpha_translate_init(void)
107 static int done_init = 0;
113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
116 for (i = 0; i < 31; i++) {
117 sprintf(p, "ir%d", i);
118 cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
119 offsetof(CPUAlphaState, ir[i]), p);
120 p += (i < 10) ? 4 : 5;
122 sprintf(p, "fir%d", i);
123 cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
124 offsetof(CPUAlphaState, fir[i]), p);
125 p += (i < 10) ? 5 : 6;
128 cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
129 offsetof(CPUAlphaState, pc), "pc");
131 cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
132 offsetof(CPUAlphaState, lock_addr),
134 cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
135 offsetof(CPUAlphaState, lock_st_addr),
137 cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
138 offsetof(CPUAlphaState, lock_value),
141 cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
142 offsetof(CPUAlphaState, unique), "unique");
143 #ifndef CONFIG_USER_ONLY
144 cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
145 offsetof(CPUAlphaState, sysval), "sysval");
146 cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
147 offsetof(CPUAlphaState, usp), "usp");
153 static TCGv load_zero(DisasContext *ctx)
155 if (TCGV_IS_UNUSED_I64(ctx->zero)) {
156 ctx->zero = tcg_const_local_i64(0);
161 static TCGv dest_sink(DisasContext *ctx)
163 if (TCGV_IS_UNUSED_I64(ctx->sink)) {
164 ctx->sink = tcg_temp_local_new();
169 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
171 if (likely(reg < 31)) {
174 return load_zero(ctx);
178 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
179 uint8_t lit, bool islit)
182 ctx->lit = tcg_const_i64(lit);
184 } else if (likely(reg < 31)) {
187 return load_zero(ctx);
191 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
193 if (likely(reg < 31)) {
196 return dest_sink(ctx);
200 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
202 if (likely(reg < 31)) {
205 return load_zero(ctx);
209 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
211 if (likely(reg < 31)) {
214 return dest_sink(ctx);
218 static void gen_excp_1(int exception, int error_code)
222 tmp1 = tcg_const_i32(exception);
223 tmp2 = tcg_const_i32(error_code);
224 gen_helper_excp(cpu_env, tmp1, tmp2);
225 tcg_temp_free_i32(tmp2);
226 tcg_temp_free_i32(tmp1);
229 static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
231 tcg_gen_movi_i64(cpu_pc, ctx->pc);
232 gen_excp_1(exception, error_code);
233 return EXIT_NORETURN;
236 static inline ExitStatus gen_invalid(DisasContext *ctx)
238 return gen_excp(ctx, EXCP_OPCDEC, 0);
241 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
243 TCGv_i32 tmp32 = tcg_temp_new_i32();
244 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
245 gen_helper_memory_to_f(t0, tmp32);
246 tcg_temp_free_i32(tmp32);
249 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
251 TCGv tmp = tcg_temp_new();
252 tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
253 gen_helper_memory_to_g(t0, tmp);
257 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
259 TCGv_i32 tmp32 = tcg_temp_new_i32();
260 tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
261 gen_helper_memory_to_s(t0, tmp32);
262 tcg_temp_free_i32(tmp32);
265 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
267 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
268 tcg_gen_mov_i64(cpu_lock_addr, t1);
269 tcg_gen_mov_i64(cpu_lock_value, t0);
272 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
274 tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
275 tcg_gen_mov_i64(cpu_lock_addr, t1);
276 tcg_gen_mov_i64(cpu_lock_value, t0);
279 static inline void gen_load_mem(DisasContext *ctx,
280 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
282 int ra, int rb, int32_t disp16, bool fp,
287 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
288 prefetches, which we can treat as nops. No worries about
289 missed exceptions here. */
290 if (unlikely(ra == 31)) {
294 tmp = tcg_temp_new();
295 addr = load_gpr(ctx, rb);
298 tcg_gen_addi_i64(tmp, addr, disp16);
302 tcg_gen_andi_i64(tmp, addr, ~0x7);
306 va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
307 tcg_gen_qemu_load(va, addr, ctx->mem_idx);
312 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
314 TCGv_i32 tmp32 = tcg_temp_new_i32();
315 gen_helper_f_to_memory(tmp32, t0);
316 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
317 tcg_temp_free_i32(tmp32);
320 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
322 TCGv tmp = tcg_temp_new();
323 gen_helper_g_to_memory(tmp, t0);
324 tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
328 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
330 TCGv_i32 tmp32 = tcg_temp_new_i32();
331 gen_helper_s_to_memory(tmp32, t0);
332 tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
333 tcg_temp_free_i32(tmp32);
336 static inline void gen_store_mem(DisasContext *ctx,
337 void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
339 int ra, int rb, int32_t disp16, bool fp,
344 tmp = tcg_temp_new();
345 addr = load_gpr(ctx, rb);
348 tcg_gen_addi_i64(tmp, addr, disp16);
352 tcg_gen_andi_i64(tmp, addr, ~0x7);
356 va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
357 tcg_gen_qemu_store(va, addr, ctx->mem_idx);
362 static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
363 int32_t disp16, int quad)
368 /* ??? Don't bother storing anything. The user can't tell
369 the difference, since the zero register always reads zero. */
373 #if defined(CONFIG_USER_ONLY)
374 addr = cpu_lock_st_addr;
376 addr = tcg_temp_local_new();
379 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
381 #if defined(CONFIG_USER_ONLY)
382 /* ??? This is handled via a complicated version of compare-and-swap
383 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
384 in TCG so that this isn't necessary. */
385 return gen_excp(ctx, quad ? EXCP_STQ_C : EXCP_STL_C, ra);
387 /* ??? In system mode we are never multi-threaded, so CAS can be
388 implemented via a non-atomic load-compare-store sequence. */
390 int lab_fail, lab_done;
393 lab_fail = gen_new_label();
394 lab_done = gen_new_label();
395 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
397 val = tcg_temp_new();
398 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, quad ? MO_LEQ : MO_LESL);
399 tcg_gen_brcond_i64(TCG_COND_NE, val, cpu_lock_value, lab_fail);
401 tcg_gen_qemu_st_i64(cpu_ir[ra], addr, ctx->mem_idx,
402 quad ? MO_LEQ : MO_LEUL);
403 tcg_gen_movi_i64(cpu_ir[ra], 1);
404 tcg_gen_br(lab_done);
406 gen_set_label(lab_fail);
407 tcg_gen_movi_i64(cpu_ir[ra], 0);
409 gen_set_label(lab_done);
410 tcg_gen_movi_i64(cpu_lock_addr, -1);
418 static bool in_superpage(DisasContext *ctx, int64_t addr)
420 return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
422 && ((addr >> 41) & 3) == 2
423 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
426 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
428 /* Suppress goto_tb in the case of single-steping and IO. */
429 if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
432 /* If the destination is in the superpage, the page perms can't change. */
433 if (in_superpage(ctx, dest)) {
436 /* Check for the dest on the same page as the start of the TB. */
437 return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
440 static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
442 uint64_t dest = ctx->pc + (disp << 2);
445 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
448 /* Notice branch-to-next; used to initialize RA with the PC. */
451 } else if (use_goto_tb(ctx, dest)) {
453 tcg_gen_movi_i64(cpu_pc, dest);
454 tcg_gen_exit_tb((uintptr_t)ctx->tb);
457 tcg_gen_movi_i64(cpu_pc, dest);
458 return EXIT_PC_UPDATED;
462 static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
463 TCGv cmp, int32_t disp)
465 uint64_t dest = ctx->pc + (disp << 2);
466 int lab_true = gen_new_label();
468 if (use_goto_tb(ctx, dest)) {
469 tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
472 tcg_gen_movi_i64(cpu_pc, ctx->pc);
473 tcg_gen_exit_tb((uintptr_t)ctx->tb);
475 gen_set_label(lab_true);
477 tcg_gen_movi_i64(cpu_pc, dest);
478 tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
482 TCGv_i64 z = tcg_const_i64(0);
483 TCGv_i64 d = tcg_const_i64(dest);
484 TCGv_i64 p = tcg_const_i64(ctx->pc);
486 tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
488 tcg_temp_free_i64(z);
489 tcg_temp_free_i64(d);
490 tcg_temp_free_i64(p);
491 return EXIT_PC_UPDATED;
495 static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
496 int32_t disp, int mask)
500 if (unlikely(ra == 31)) {
501 cmp_tmp = tcg_const_i64(0);
503 cmp_tmp = tcg_temp_new();
505 tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
507 tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
511 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
514 /* Fold -0.0 for comparison with COND. */
516 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
518 uint64_t mzero = 1ull << 63;
523 /* For <= or >, the -0.0 value directly compares the way we want. */
524 tcg_gen_mov_i64(dest, src);
529 /* For == or !=, we can simply mask off the sign bit and compare. */
530 tcg_gen_andi_i64(dest, src, mzero - 1);
535 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
536 tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
537 tcg_gen_neg_i64(dest, dest);
538 tcg_gen_and_i64(dest, dest, src);
546 static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
551 if (unlikely(ra == 31)) {
552 /* Very uncommon case, but easier to optimize it to an integer
553 comparison than continuing with the floating point comparison. */
554 return gen_bcond(ctx, cond, ra, disp, 0);
557 cmp_tmp = tcg_temp_new();
558 gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
559 return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
562 static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
566 if (unlikely(rc == 31)) {
570 c1 = tcg_temp_new_i64();
571 if (unlikely(ra == 31)) {
572 tcg_gen_movi_i64(c1, 0);
574 gen_fold_mzero(cond, c1, cpu_fir[ra]);
577 v1 = tcg_const_i64(0);
581 z = tcg_const_i64(0);
583 tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
585 tcg_temp_free_i64(z);
586 tcg_temp_free_i64(c1);
588 tcg_temp_free_i64(v1);
592 #define QUAL_RM_N 0x080 /* Round mode nearest even */
593 #define QUAL_RM_C 0x000 /* Round mode chopped */
594 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
595 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
596 #define QUAL_RM_MASK 0x0c0
598 #define QUAL_U 0x100 /* Underflow enable (fp output) */
599 #define QUAL_V 0x100 /* Overflow enable (int output) */
600 #define QUAL_S 0x400 /* Software completion enable */
601 #define QUAL_I 0x200 /* Inexact detection enable */
603 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
607 fn11 &= QUAL_RM_MASK;
608 if (fn11 == ctx->tb_rm) {
613 tmp = tcg_temp_new_i32();
616 tcg_gen_movi_i32(tmp, float_round_nearest_even);
619 tcg_gen_movi_i32(tmp, float_round_to_zero);
622 tcg_gen_movi_i32(tmp, float_round_down);
625 tcg_gen_ld8u_i32(tmp, cpu_env,
626 offsetof(CPUAlphaState, fpcr_dyn_round));
630 #if defined(CONFIG_SOFTFLOAT_INLINE)
631 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
632 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
633 sets the one field. */
634 tcg_gen_st8_i32(tmp, cpu_env,
635 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
637 gen_helper_setroundmode(tmp);
640 tcg_temp_free_i32(tmp);
643 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
648 if (fn11 == ctx->tb_ftz) {
653 tmp = tcg_temp_new_i32();
655 /* Underflow is enabled, use the FPCR setting. */
656 tcg_gen_ld8u_i32(tmp, cpu_env,
657 offsetof(CPUAlphaState, fpcr_flush_to_zero));
659 /* Underflow is disabled, force flush-to-zero. */
660 tcg_gen_movi_i32(tmp, 1);
663 #if defined(CONFIG_SOFTFLOAT_INLINE)
664 tcg_gen_st8_i32(tmp, cpu_env,
665 offsetof(CPUAlphaState, fp_status.flush_to_zero));
667 gen_helper_setflushzero(tmp);
670 tcg_temp_free_i32(tmp);
673 static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
677 val = tcg_const_i64(0);
679 if ((fn11 & QUAL_S) == 0) {
681 gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
683 gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
686 val = tcg_temp_new();
687 tcg_gen_mov_i64(val, cpu_fir[reg]);
692 static void gen_fp_exc_clear(void)
694 #if defined(CONFIG_SOFTFLOAT_INLINE)
695 TCGv_i32 zero = tcg_const_i32(0);
696 tcg_gen_st8_i32(zero, cpu_env,
697 offsetof(CPUAlphaState, fp_status.float_exception_flags));
698 tcg_temp_free_i32(zero);
700 gen_helper_fp_exc_clear(cpu_env);
704 static void gen_fp_exc_raise_ignore(int rc, int fn11, int ignore)
706 /* ??? We ought to be able to do something with imprecise exceptions.
707 E.g. notice we're still in the trap shadow of something within the
708 TB and do not generate the code to signal the exception; end the TB
709 when an exception is forced to arrive, either by consumption of a
710 register value or TRAPB or EXCB. */
711 TCGv_i32 exc = tcg_temp_new_i32();
714 #if defined(CONFIG_SOFTFLOAT_INLINE)
715 tcg_gen_ld8u_i32(exc, cpu_env,
716 offsetof(CPUAlphaState, fp_status.float_exception_flags));
718 gen_helper_fp_exc_get(exc, cpu_env);
722 tcg_gen_andi_i32(exc, exc, ~ignore);
725 /* ??? Pass in the regno of the destination so that the helper can
726 set EXC_MASK, which contains a bitmask of destination registers
727 that have caused arithmetic traps. A simple userspace emulation
728 does not require this. We do need it for a guest kernel's entArith,
729 or if we were to do something clever with imprecise exceptions. */
730 reg = tcg_const_i32(rc + 32);
733 gen_helper_fp_exc_raise_s(cpu_env, exc, reg);
735 gen_helper_fp_exc_raise(cpu_env, exc, reg);
738 tcg_temp_free_i32(reg);
739 tcg_temp_free_i32(exc);
742 static inline void gen_fp_exc_raise(int rc, int fn11)
744 gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
747 static void gen_fcvtlq(int rb, int rc)
749 if (unlikely(rc == 31)) {
752 if (unlikely(rb == 31)) {
753 tcg_gen_movi_i64(cpu_fir[rc], 0);
755 TCGv tmp = tcg_temp_new();
757 /* The arithmetic right shift here, plus the sign-extended mask below
758 yields a sign-extended result without an explicit ext32s_i64. */
759 tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
760 tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
761 tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
762 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
763 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
769 static void gen_fcvtql(int rb, int rc)
771 if (unlikely(rc == 31)) {
774 if (unlikely(rb == 31)) {
775 tcg_gen_movi_i64(cpu_fir[rc], 0);
777 TCGv tmp = tcg_temp_new();
779 tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
780 tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
781 tcg_gen_shli_i64(tmp, tmp, 32);
782 tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
783 tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
789 static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
792 int lab = gen_new_label();
793 TCGv tmp = tcg_temp_new();
795 tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
796 tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
797 gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
804 static void gen_ieee_arith2(DisasContext *ctx,
805 void (*helper)(TCGv, TCGv_ptr, TCGv),
806 int rb, int rc, int fn11)
810 /* ??? This is wrong: the instruction is not a nop, it still may
812 if (unlikely(rc == 31)) {
816 gen_qual_roundmode(ctx, fn11);
817 gen_qual_flushzero(ctx, fn11);
820 vb = gen_ieee_input(rb, fn11, 0);
821 helper(cpu_fir[rc], cpu_env, vb);
824 gen_fp_exc_raise(rc, fn11);
827 #define IEEE_ARITH2(name) \
828 static inline void glue(gen_f, name)(DisasContext *ctx, \
829 int rb, int rc, int fn11) \
831 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
838 static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
843 /* ??? This is wrong: the instruction is not a nop, it still may
845 if (unlikely(rc == 31)) {
849 /* No need to set flushzero, since we have an integer output. */
851 vb = gen_ieee_input(rb, fn11, 0);
853 /* Almost all integer conversions use cropped rounding, and most
854 also do not have integer overflow enabled. Special case that. */
857 gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
859 case QUAL_V | QUAL_RM_C:
860 case QUAL_S | QUAL_V | QUAL_RM_C:
861 ignore = float_flag_inexact;
863 case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
864 gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
867 gen_qual_roundmode(ctx, fn11);
868 gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
869 ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
870 ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
875 gen_fp_exc_raise_ignore(rc, fn11, ignore);
878 static void gen_ieee_intcvt(DisasContext *ctx,
879 void (*helper)(TCGv, TCGv_ptr, TCGv),
880 int rb, int rc, int fn11)
884 /* ??? This is wrong: the instruction is not a nop, it still may
886 if (unlikely(rc == 31)) {
890 gen_qual_roundmode(ctx, fn11);
893 vb = tcg_const_i64(0);
898 /* The only exception that can be raised by integer conversion
899 is inexact. Thus we only need to worry about exceptions when
900 inexact handling is requested. */
903 helper(cpu_fir[rc], cpu_env, vb);
904 gen_fp_exc_raise(rc, fn11);
906 helper(cpu_fir[rc], cpu_env, vb);
914 #define IEEE_INTCVT(name) \
915 static inline void glue(gen_f, name)(DisasContext *ctx, \
916 int rb, int rc, int fn11) \
918 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
923 static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
928 if (unlikely(rc == 31)) {
932 vmask = tcg_const_i64(mask);
942 va = tcg_temp_new_i64();
943 tcg_gen_mov_i64(va, cpu_fir[ra]);
945 tcg_gen_andc_i64(va, vmask, va);
947 tcg_gen_and_i64(va, va, vmask);
955 vb = tcg_temp_new_i64();
956 tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
959 switch (za << 1 | zb) {
961 tcg_gen_or_i64(cpu_fir[rc], va, vb);
964 tcg_gen_mov_i64(cpu_fir[rc], va);
967 tcg_gen_mov_i64(cpu_fir[rc], vb);
970 tcg_gen_movi_i64(cpu_fir[rc], 0);
974 tcg_temp_free(vmask);
983 static inline void gen_fcpys(int ra, int rb, int rc)
985 gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
988 static inline void gen_fcpysn(int ra, int rb, int rc)
990 gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
993 static inline void gen_fcpyse(int ra, int rb, int rc)
995 gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
998 static void gen_ieee_arith3(DisasContext *ctx,
999 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1000 int ra, int rb, int rc, int fn11)
1004 /* ??? This is wrong: the instruction is not a nop, it still may
1005 raise exceptions. */
1006 if (unlikely(rc == 31)) {
1010 gen_qual_roundmode(ctx, fn11);
1011 gen_qual_flushzero(ctx, fn11);
1014 va = gen_ieee_input(ra, fn11, 0);
1015 vb = gen_ieee_input(rb, fn11, 0);
1016 helper(cpu_fir[rc], cpu_env, va, vb);
1020 gen_fp_exc_raise(rc, fn11);
1023 #define IEEE_ARITH3(name) \
1024 static inline void glue(gen_f, name)(DisasContext *ctx, \
1025 int ra, int rb, int rc, int fn11) \
1027 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1038 static void gen_ieee_compare(DisasContext *ctx,
1039 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
1040 int ra, int rb, int rc, int fn11)
1044 /* ??? This is wrong: the instruction is not a nop, it still may
1045 raise exceptions. */
1046 if (unlikely(rc == 31)) {
1052 va = gen_ieee_input(ra, fn11, 1);
1053 vb = gen_ieee_input(rb, fn11, 1);
1054 helper(cpu_fir[rc], cpu_env, va, vb);
1058 gen_fp_exc_raise(rc, fn11);
1061 #define IEEE_CMP3(name) \
1062 static inline void glue(gen_f, name)(DisasContext *ctx, \
1063 int ra, int rb, int rc, int fn11) \
1065 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1072 static inline uint64_t zapnot_mask(uint8_t lit)
1077 for (i = 0; i < 8; ++i) {
1078 if ((lit >> i) & 1) {
1079 mask |= 0xffull << (i * 8);
1085 /* Implement zapnot with an immediate operand, which expands to some
1086 form of immediate AND. This is a basic building block in the
1087 definition of many of the other byte manipulation instructions. */
1088 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
1092 tcg_gen_movi_i64(dest, 0);
1095 tcg_gen_ext8u_i64(dest, src);
1098 tcg_gen_ext16u_i64(dest, src);
1101 tcg_gen_ext32u_i64(dest, src);
1104 tcg_gen_mov_i64(dest, src);
1107 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
1112 /* EXTWH, EXTLH, EXTQH */
1113 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1114 uint8_t lit, uint8_t byte_mask)
1117 tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
1119 TCGv tmp = tcg_temp_new();
1120 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
1121 tcg_gen_neg_i64(tmp, tmp);
1122 tcg_gen_andi_i64(tmp, tmp, 0x3f);
1123 tcg_gen_shl_i64(vc, va, tmp);
1126 gen_zapnoti(vc, vc, byte_mask);
1129 /* EXTBL, EXTWL, EXTLL, EXTQL */
1130 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1131 uint8_t lit, uint8_t byte_mask)
1134 tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
1136 TCGv tmp = tcg_temp_new();
1137 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
1138 tcg_gen_shli_i64(tmp, tmp, 3);
1139 tcg_gen_shr_i64(vc, va, tmp);
1142 gen_zapnoti(vc, vc, byte_mask);
1145 /* INSWH, INSLH, INSQH */
1146 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1147 uint8_t lit, uint8_t byte_mask)
1149 TCGv tmp = tcg_temp_new();
1151 /* The instruction description has us left-shift the byte mask and extract
1152 bits <15:8> and apply that zap at the end. This is equivalent to simply
1153 performing the zap first and shifting afterward. */
1154 gen_zapnoti(tmp, va, byte_mask);
1158 if (unlikely(lit == 0)) {
1159 tcg_gen_movi_i64(vc, 0);
1161 tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
1164 TCGv shift = tcg_temp_new();
1166 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1167 portably by splitting the shift into two parts: shift_count-1 and 1.
1168 Arrange for the -1 by using ones-complement instead of
1169 twos-complement in the negation: ~(B * 8) & 63. */
1171 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1172 tcg_gen_not_i64(shift, shift);
1173 tcg_gen_andi_i64(shift, shift, 0x3f);
1175 tcg_gen_shr_i64(vc, tmp, shift);
1176 tcg_gen_shri_i64(vc, vc, 1);
1177 tcg_temp_free(shift);
1182 /* INSBL, INSWL, INSLL, INSQL */
1183 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1184 uint8_t lit, uint8_t byte_mask)
1186 TCGv tmp = tcg_temp_new();
1188 /* The instruction description has us left-shift the byte mask
1189 the same number of byte slots as the data and apply the zap
1190 at the end. This is equivalent to simply performing the zap
1191 first and shifting afterward. */
1192 gen_zapnoti(tmp, va, byte_mask);
1195 tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
1197 TCGv shift = tcg_temp_new();
1198 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1199 tcg_gen_shli_i64(shift, shift, 3);
1200 tcg_gen_shl_i64(vc, tmp, shift);
1201 tcg_temp_free(shift);
1206 /* MSKWH, MSKLH, MSKQH */
1207 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1208 uint8_t lit, uint8_t byte_mask)
1211 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1213 TCGv shift = tcg_temp_new();
1214 TCGv mask = tcg_temp_new();
1216 /* The instruction description is as above, where the byte_mask
1217 is shifted left, and then we extract bits <15:8>. This can be
1218 emulated with a right-shift on the expanded byte mask. This
1219 requires extra care because for an input <2:0> == 0 we need a
1220 shift of 64 bits in order to generate a zero. This is done by
1221 splitting the shift into two parts, the variable shift - 1
1222 followed by a constant 1 shift. The code we expand below is
1223 equivalent to ~(B * 8) & 63. */
1225 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1226 tcg_gen_not_i64(shift, shift);
1227 tcg_gen_andi_i64(shift, shift, 0x3f);
1228 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1229 tcg_gen_shr_i64(mask, mask, shift);
1230 tcg_gen_shri_i64(mask, mask, 1);
1232 tcg_gen_andc_i64(vc, va, mask);
1234 tcg_temp_free(mask);
1235 tcg_temp_free(shift);
1239 /* MSKBL, MSKWL, MSKLL, MSKQL */
1240 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1241 uint8_t lit, uint8_t byte_mask)
1244 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1246 TCGv shift = tcg_temp_new();
1247 TCGv mask = tcg_temp_new();
1249 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1250 tcg_gen_shli_i64(shift, shift, 3);
1251 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1252 tcg_gen_shl_i64(mask, mask, shift);
1254 tcg_gen_andc_i64(vc, va, mask);
1256 tcg_temp_free(mask);
1257 tcg_temp_free(shift);
1261 #define MVIOP2(name) \
1262 static inline void glue(gen_, name)(int rb, int rc) \
1264 if (unlikely(rc == 31)) \
1266 if (unlikely(rb == 31)) \
1267 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1269 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1276 static void gen_rx(int ra, int set)
1281 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, offsetof(CPUAlphaState, intr_flag));
1284 tmp = tcg_const_i32(set);
1285 tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
1286 tcg_temp_free_i32(tmp);
1289 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
1291 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1292 to internal cpu registers. */
1294 /* Unprivileged PAL call */
1295 if (palcode >= 0x80 && palcode < 0xC0) {
1299 /* No-op inside QEMU. */
1303 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
1307 tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
1316 #ifndef CONFIG_USER_ONLY
1317 /* Privileged PAL code */
1318 if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
1322 /* No-op inside QEMU. */
1326 /* No-op inside QEMU. */
1330 tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
1334 tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
1338 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
1345 /* Note that we already know we're in kernel mode, so we know
1346 that PS only contains the 3 IPL bits. */
1347 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1349 /* But make sure and store only the 3 IPL bits from the user. */
1350 tmp = tcg_temp_new();
1351 tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
1352 tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
1359 tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
1363 tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
1367 tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
1371 tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
1372 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1382 return gen_invalid(ctx);
1385 #ifdef CONFIG_USER_ONLY
1386 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1389 TCGv pc = tcg_const_i64(ctx->pc);
1390 TCGv entry = tcg_const_i64(palcode & 0x80
1391 ? 0x2000 + (palcode - 0x80) * 64
1392 : 0x1000 + palcode * 64);
1394 gen_helper_call_pal(cpu_env, pc, entry);
1396 tcg_temp_free(entry);
1399 /* Since the destination is running in PALmode, we don't really
1400 need the page permissions check. We'll see the existence of
1401 the page when we create the TB, and we'll flush all TBs if
1402 we change the PAL base register. */
1403 if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
1405 tcg_gen_exit_tb((uintptr_t)ctx->tb);
1406 return EXIT_GOTO_TB;
1409 return EXIT_PC_UPDATED;
1414 #ifndef CONFIG_USER_ONLY
1416 #define PR_BYTE 0x100000
1417 #define PR_LONG 0x200000
1419 static int cpu_pr_data(int pr)
1422 case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
1423 case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
1424 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1425 case 3: return offsetof(CPUAlphaState, trap_arg0);
1426 case 4: return offsetof(CPUAlphaState, trap_arg1);
1427 case 5: return offsetof(CPUAlphaState, trap_arg2);
1428 case 6: return offsetof(CPUAlphaState, exc_addr);
1429 case 7: return offsetof(CPUAlphaState, palbr);
1430 case 8: return offsetof(CPUAlphaState, ptbr);
1431 case 9: return offsetof(CPUAlphaState, vptptr);
1432 case 10: return offsetof(CPUAlphaState, unique);
1433 case 11: return offsetof(CPUAlphaState, sysval);
1434 case 12: return offsetof(CPUAlphaState, usp);
1437 return offsetof(CPUAlphaState, shadow[pr - 32]);
1439 return offsetof(CPUAlphaState, scratch[pr - 40]);
1442 return offsetof(CPUAlphaState, alarm_expire);
1447 static ExitStatus gen_mfpr(int ra, int regno)
1449 int data = cpu_pr_data(regno);
1451 /* In our emulated PALcode, these processor registers have no
1452 side effects from reading. */
1457 /* Special help for VMTIME and WALLTIME. */
1458 if (regno == 250 || regno == 249) {
1459 void (*helper)(TCGv) = gen_helper_get_walltime;
1461 helper = gen_helper_get_vmtime;
1467 return EXIT_PC_STALE;
1474 /* The basic registers are data only, and unknown registers
1475 are read-zero, write-ignore. */
1477 tcg_gen_movi_i64(cpu_ir[ra], 0);
1478 } else if (data & PR_BYTE) {
1479 tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
1480 } else if (data & PR_LONG) {
1481 tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
1483 tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
1488 static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
1494 tmp = tcg_const_i64(0);
1502 gen_helper_tbia(cpu_env);
1507 gen_helper_tbis(cpu_env, tmp);
1512 tmp = tcg_const_i64(1);
1513 tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1514 offsetof(CPUState, halted));
1515 return gen_excp(ctx, EXCP_HLT, 0);
1519 gen_helper_halt(tmp);
1520 return EXIT_PC_STALE;
1524 gen_helper_set_alarm(cpu_env, tmp);
1529 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
1530 /* Changing the PAL base register implies un-chaining all of the TBs
1531 that ended with a CALL_PAL. Since the base register usually only
1532 changes during boot, flushing everything works well. */
1533 gen_helper_tb_flush(cpu_env);
1534 return EXIT_PC_STALE;
1537 /* The basic registers are data only, and unknown registers
1538 are read-zero, write-ignore. */
1539 data = cpu_pr_data(regno);
1541 if (data & PR_BYTE) {
1542 tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
1543 } else if (data & PR_LONG) {
1544 tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
1546 tcg_gen_st_i64(tmp, cpu_env, data);
1558 #endif /* !USER_ONLY*/
1560 #define REQUIRE_TB_FLAG(FLAG) \
1562 if ((ctx->tb->flags & (FLAG)) == 0) { \
1567 #define REQUIRE_REG_31(WHICH) \
1569 if (WHICH != 31) { \
1574 static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
1577 int32_t disp21, disp16;
1578 #ifndef CONFIG_USER_ONLY
1582 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1584 TCGv va, vb, vc, tmp;
1588 /* Decode all instruction fields */
1590 ra = (insn >> 21) & 0x1F;
1591 rb = (insn >> 16) & 0x1F;
1593 islit = (insn >> 12) & 1;
1594 if (rb == 31 && !islit) {
1598 lit = (insn >> 13) & 0xFF;
1600 palcode = insn & 0x03FFFFFF;
1601 disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
1602 disp16 = (int16_t)(insn & 0x0000FFFF);
1603 #ifndef CONFIG_USER_ONLY
1604 disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
1606 fn11 = (insn >> 5) & 0x000007FF;
1608 fn7 = (insn >> 5) & 0x0000007F;
1609 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1610 opc, ra, rb, rc, disp16);
1616 ret = gen_call_pal(ctx, palcode);
1642 disp16 = (uint32_t)disp16 << 16;
1646 va = dest_gpr(ctx, ra);
1647 /* It's worth special-casing immediate loads. */
1649 tcg_gen_movi_i64(va, disp16);
1651 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1657 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1658 gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1662 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1666 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1667 gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1671 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1672 gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1676 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
1677 gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1681 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1685 vc = dest_gpr(ctx, rc);
1686 vb = load_gpr_lit(ctx, rb, lit, islit);
1690 /* Special case ADDL as SEXTL. */
1691 tcg_gen_ext32s_i64(vc, vb);
1695 /* Special case SUBQ as NEGQ. */
1696 tcg_gen_neg_i64(vc, vb);
1701 va = load_gpr(ctx, ra);
1705 tcg_gen_add_i64(vc, va, vb);
1706 tcg_gen_ext32s_i64(vc, vc);
1710 tmp = tcg_temp_new();
1711 tcg_gen_shli_i64(tmp, va, 2);
1712 tcg_gen_add_i64(tmp, tmp, vb);
1713 tcg_gen_ext32s_i64(vc, tmp);
1718 tcg_gen_sub_i64(vc, va, vb);
1719 tcg_gen_ext32s_i64(vc, vc);
1723 tmp = tcg_temp_new();
1724 tcg_gen_shli_i64(tmp, va, 2);
1725 tcg_gen_sub_i64(tmp, tmp, vb);
1726 tcg_gen_ext32s_i64(vc, tmp);
1731 gen_helper_cmpbge(vc, va, vb);
1735 tmp = tcg_temp_new();
1736 tcg_gen_shli_i64(tmp, va, 3);
1737 tcg_gen_add_i64(tmp, tmp, vb);
1738 tcg_gen_ext32s_i64(vc, tmp);
1743 tmp = tcg_temp_new();
1744 tcg_gen_shli_i64(tmp, va, 3);
1745 tcg_gen_sub_i64(tmp, tmp, vb);
1746 tcg_gen_ext32s_i64(vc, tmp);
1751 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1755 tcg_gen_add_i64(vc, va, vb);
1759 tmp = tcg_temp_new();
1760 tcg_gen_shli_i64(tmp, va, 2);
1761 tcg_gen_add_i64(vc, tmp, vb);
1766 tcg_gen_sub_i64(vc, va, vb);
1770 tmp = tcg_temp_new();
1771 tcg_gen_shli_i64(tmp, va, 2);
1772 tcg_gen_sub_i64(vc, tmp, vb);
1777 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1781 tmp = tcg_temp_new();
1782 tcg_gen_shli_i64(tmp, va, 3);
1783 tcg_gen_add_i64(vc, tmp, vb);
1788 tmp = tcg_temp_new();
1789 tcg_gen_shli_i64(tmp, va, 3);
1790 tcg_gen_sub_i64(vc, tmp, vb);
1795 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1799 gen_helper_addlv(vc, cpu_env, va, vb);
1803 gen_helper_sublv(vc, cpu_env, va, vb);
1807 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1811 gen_helper_addqv(vc, cpu_env, va, vb);
1815 gen_helper_subqv(vc, cpu_env, va, vb);
1819 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1829 /* Special case BIS as NOP. */
1833 /* Special case BIS as MOV. */
1834 vc = dest_gpr(ctx, rc);
1836 tcg_gen_movi_i64(vc, lit);
1838 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1844 vc = dest_gpr(ctx, rc);
1845 vb = load_gpr_lit(ctx, rb, lit, islit);
1847 if (fn7 == 0x28 && ra == 31) {
1848 /* Special case ORNOT as NOT. */
1849 tcg_gen_not_i64(vc, vb);
1853 va = load_gpr(ctx, ra);
1857 tcg_gen_and_i64(vc, va, vb);
1861 tcg_gen_andc_i64(vc, va, vb);
1865 tmp = tcg_temp_new();
1866 tcg_gen_andi_i64(tmp, va, 1);
1867 tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1868 vb, load_gpr(ctx, rc));
1873 tmp = tcg_temp_new();
1874 tcg_gen_andi_i64(tmp, va, 1);
1875 tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1876 vb, load_gpr(ctx, rc));
1881 tcg_gen_or_i64(vc, va, vb);
1885 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1886 vb, load_gpr(ctx, rc));
1890 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1891 vb, load_gpr(ctx, rc));
1895 tcg_gen_orc_i64(vc, va, vb);
1899 tcg_gen_xor_i64(vc, va, vb);
1903 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1904 vb, load_gpr(ctx, rc));
1908 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1909 vb, load_gpr(ctx, rc));
1913 tcg_gen_eqv_i64(vc, va, vb);
1919 uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
1920 tcg_gen_andi_i64(vc, vb, ~amask);
1925 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1926 vb, load_gpr(ctx, rc));
1930 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1931 vb, load_gpr(ctx, rc));
1936 tcg_gen_movi_i64(vc, ctx->implver);
1944 vc = dest_gpr(ctx, rc);
1945 va = load_gpr(ctx, ra);
1949 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1953 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1957 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1961 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1965 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1969 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1973 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1977 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1981 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1986 gen_zapnoti(vc, va, ~lit);
1988 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1994 gen_zapnoti(vc, va, lit);
1996 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
2001 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
2006 tcg_gen_shri_i64(vc, va, lit & 0x3f);
2008 tmp = tcg_temp_new();
2009 vb = load_gpr(ctx, rb);
2010 tcg_gen_andi_i64(tmp, vb, 0x3f);
2011 tcg_gen_shr_i64(vc, va, tmp);
2017 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
2022 tcg_gen_shli_i64(vc, va, lit & 0x3f);
2024 tmp = tcg_temp_new();
2025 vb = load_gpr(ctx, rb);
2026 tcg_gen_andi_i64(tmp, vb, 0x3f);
2027 tcg_gen_shl_i64(vc, va, tmp);
2033 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
2038 tcg_gen_sari_i64(vc, va, lit & 0x3f);
2040 tmp = tcg_temp_new();
2041 vb = load_gpr(ctx, rb);
2042 tcg_gen_andi_i64(tmp, vb, 0x3f);
2043 tcg_gen_sar_i64(vc, va, tmp);
2049 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
2053 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
2057 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
2061 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
2065 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
2069 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
2073 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
2077 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2081 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2089 vc = dest_gpr(ctx, rc);
2090 vb = load_gpr_lit(ctx, rb, lit, islit);
2091 va = load_gpr(ctx, ra);
2095 tcg_gen_mul_i64(vc, va, vb);
2096 tcg_gen_ext32s_i64(vc, vc);
2100 tcg_gen_mul_i64(vc, va, vb);
2104 tmp = tcg_temp_new();
2105 tcg_gen_mulu2_i64(tmp, vc, va, vb);
2110 gen_helper_mullv(vc, cpu_env, va, vb);
2114 gen_helper_mulqv(vc, cpu_env, va, vb);
2122 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2123 vc = dest_fpr(ctx, rc);
2124 switch (fpfn) { /* fn11 & 0x3F */
2128 t32 = tcg_temp_new_i32();
2129 va = load_gpr(ctx, ra);
2130 tcg_gen_trunc_i64_i32(t32, va);
2131 gen_helper_memory_to_s(vc, t32);
2132 tcg_temp_free_i32(t32);
2137 vb = load_fpr(ctx, rb);
2138 gen_helper_sqrtf(vc, cpu_env, vb);
2143 gen_fsqrts(ctx, rb, rc, fn11);
2148 t32 = tcg_temp_new_i32();
2149 va = load_gpr(ctx, ra);
2150 tcg_gen_trunc_i64_i32(t32, va);
2151 gen_helper_memory_to_f(vc, t32);
2152 tcg_temp_free_i32(t32);
2157 va = load_gpr(ctx, ra);
2158 tcg_gen_mov_i64(vc, va);
2163 vb = load_fpr(ctx, rb);
2164 gen_helper_sqrtg(vc, cpu_env, vb);
2169 gen_fsqrtt(ctx, rb, rc, fn11);
2177 /* VAX floating point */
2178 /* XXX: rounding mode and trap are ignored (!) */
2179 vc = dest_fpr(ctx, rc);
2180 vb = load_fpr(ctx, rb);
2181 va = load_fpr(ctx, ra);
2182 switch (fpfn) { /* fn11 & 0x3F */
2185 gen_helper_addf(vc, cpu_env, va, vb);
2189 gen_helper_subf(vc, cpu_env, va, vb);
2193 gen_helper_mulf(vc, cpu_env, va, vb);
2197 gen_helper_divf(vc, cpu_env, va, vb);
2205 gen_helper_addg(vc, cpu_env, va, vb);
2209 gen_helper_subg(vc, cpu_env, va, vb);
2213 gen_helper_mulg(vc, cpu_env, va, vb);
2217 gen_helper_divg(vc, cpu_env, va, vb);
2221 gen_helper_cmpgeq(vc, cpu_env, va, vb);
2225 gen_helper_cmpglt(vc, cpu_env, va, vb);
2229 gen_helper_cmpgle(vc, cpu_env, va, vb);
2234 gen_helper_cvtgf(vc, cpu_env, vb);
2243 gen_helper_cvtgq(vc, cpu_env, vb);
2248 gen_helper_cvtqf(vc, cpu_env, vb);
2253 gen_helper_cvtqg(vc, cpu_env, vb);
2261 /* IEEE floating-point */
2262 switch (fpfn) { /* fn11 & 0x3F */
2265 gen_fadds(ctx, ra, rb, rc, fn11);
2269 gen_fsubs(ctx, ra, rb, rc, fn11);
2273 gen_fmuls(ctx, ra, rb, rc, fn11);
2277 gen_fdivs(ctx, ra, rb, rc, fn11);
2281 gen_faddt(ctx, ra, rb, rc, fn11);
2285 gen_fsubt(ctx, ra, rb, rc, fn11);
2289 gen_fmult(ctx, ra, rb, rc, fn11);
2293 gen_fdivt(ctx, ra, rb, rc, fn11);
2297 gen_fcmptun(ctx, ra, rb, rc, fn11);
2301 gen_fcmpteq(ctx, ra, rb, rc, fn11);
2305 gen_fcmptlt(ctx, ra, rb, rc, fn11);
2309 gen_fcmptle(ctx, ra, rb, rc, fn11);
2313 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2315 gen_fcvtst(ctx, rb, rc, fn11);
2318 gen_fcvtts(ctx, rb, rc, fn11);
2324 gen_fcvttq(ctx, rb, rc, fn11);
2329 gen_fcvtqs(ctx, rb, rc, fn11);
2334 gen_fcvtqt(ctx, rb, rc, fn11);
2351 /* Special case CPYS as FNOP. */
2352 } else if (ra == rb) {
2353 vc = dest_fpr(ctx, rc);
2354 /* Special case CPYS as FMOV. */
2356 tcg_gen_movi_i64(vc, 0);
2358 va = load_fpr(ctx, ra);
2359 tcg_gen_mov_i64(vc, va);
2362 gen_fcpys(ra, rb, rc);
2367 gen_fcpysn(ra, rb, rc);
2371 gen_fcpyse(ra, rb, rc);
2375 va = load_fpr(ctx, ra);
2376 gen_helper_store_fpcr(cpu_env, va);
2380 va = dest_fpr(ctx, ra);
2381 gen_helper_load_fpcr(va, cpu_env);
2385 gen_fcmov(TCG_COND_EQ, ra, rb, rc);
2389 gen_fcmov(TCG_COND_NE, ra, rb, rc);
2393 gen_fcmov(TCG_COND_LT, ra, rb, rc);
2397 gen_fcmov(TCG_COND_GE, ra, rb, rc);
2401 gen_fcmov(TCG_COND_LE, ra, rb, rc);
2405 gen_fcmov(TCG_COND_GT, ra, rb, rc);
2417 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2418 /v doesn't do. The only thing I can think is that /sv is a
2419 valid instruction merely for completeness in the ISA. */
2420 gen_fcvtql_v(ctx, rb, rc);
2428 switch ((uint16_t)disp16) {
2455 va = dest_gpr(ctx, ra);
2458 gen_helper_load_pcc(va, cpu_env);
2460 ret = EXIT_PC_STALE;
2462 gen_helper_load_pcc(va, cpu_env);
2486 /* HW_MFPR (PALcode) */
2487 #ifndef CONFIG_USER_ONLY
2488 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2489 return gen_mfpr(ra, insn & 0xffff);
2495 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2496 prediction stack action, which of course we don't implement. */
2497 vb = load_gpr(ctx, rb);
2498 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2500 tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
2502 ret = EXIT_PC_UPDATED;
2506 /* HW_LD (PALcode) */
2507 #ifndef CONFIG_USER_ONLY
2508 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2510 TCGv addr = tcg_temp_new();
2511 vb = load_gpr(ctx, rb);
2512 va = dest_gpr(ctx, ra);
2514 tcg_gen_addi_i64(addr, vb, disp12);
2515 switch ((insn >> 12) & 0xF) {
2517 /* Longword physical access (hw_ldl/p) */
2518 gen_helper_ldl_phys(va, cpu_env, addr);
2521 /* Quadword physical access (hw_ldq/p) */
2522 gen_helper_ldq_phys(va, cpu_env, addr);
2525 /* Longword physical access with lock (hw_ldl_l/p) */
2526 gen_helper_ldl_l_phys(va, cpu_env, addr);
2529 /* Quadword physical access with lock (hw_ldq_l/p) */
2530 gen_helper_ldq_l_phys(va, cpu_env, addr);
2533 /* Longword virtual PTE fetch (hw_ldl/v) */
2536 /* Quadword virtual PTE fetch (hw_ldq/v) */
2540 /* Incpu_ir[ra]id */
2543 /* Incpu_ir[ra]id */
2546 /* Longword virtual access (hw_ldl) */
2549 /* Quadword virtual access (hw_ldq) */
2552 /* Longword virtual access with protection check (hw_ldl/w) */
2553 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2556 /* Quadword virtual access with protection check (hw_ldq/w) */
2557 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2560 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2563 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2566 /* Longword virtual access with alternate access mode and
2567 protection checks (hw_ldl/wa) */
2568 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2571 /* Quadword virtual access with alternate access mode and
2572 protection checks (hw_ldq/wa) */
2573 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2576 tcg_temp_free(addr);
2584 vc = dest_gpr(ctx, rc);
2587 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2589 va = load_fpr(ctx, ra);
2590 tcg_gen_mov_i64(vc, va);
2592 } else if (fn7 == 0x78) {
2594 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
2596 t32 = tcg_temp_new_i32();
2597 va = load_fpr(ctx, ra);
2598 gen_helper_s_to_memory(t32, va);
2599 tcg_gen_ext_i32_i64(vc, t32);
2600 tcg_temp_free_i32(t32);
2604 vb = load_gpr_lit(ctx, rb, lit, islit);
2608 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2610 tcg_gen_ext8s_i64(vc, vb);
2614 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
2616 tcg_gen_ext16s_i64(vc, vb);
2620 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2622 gen_helper_ctpop(vc, vb);
2626 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2627 va = load_gpr(ctx, ra);
2628 gen_helper_perr(vc, va, vb);
2632 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2634 gen_helper_ctlz(vc, vb);
2638 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
2640 gen_helper_cttz(vc, vb);
2644 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2650 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2656 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2662 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2668 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2669 va = load_gpr(ctx, ra);
2670 gen_helper_minsb8(vc, va, vb);
2674 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2675 va = load_gpr(ctx, ra);
2676 gen_helper_minsw4(vc, va, vb);
2680 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2681 va = load_gpr(ctx, ra);
2682 gen_helper_minub8(vc, va, vb);
2686 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2687 va = load_gpr(ctx, ra);
2688 gen_helper_minuw4(vc, va, vb);
2692 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2693 va = load_gpr(ctx, ra);
2694 gen_helper_maxub8(vc, va, vb);
2698 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2699 va = load_gpr(ctx, ra);
2700 gen_helper_maxuw4(vc, va, vb);
2704 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2705 va = load_gpr(ctx, ra);
2706 gen_helper_maxsb8(vc, va, vb);
2710 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
2711 va = load_gpr(ctx, ra);
2712 gen_helper_maxsw4(vc, va, vb);
2720 /* HW_MTPR (PALcode) */
2721 #ifndef CONFIG_USER_ONLY
2722 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2723 return gen_mtpr(ctx, rb, insn & 0xffff);
2729 /* HW_RET (PALcode) */
2730 #ifndef CONFIG_USER_ONLY
2731 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2733 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2734 address from EXC_ADDR. This turns out to be useful for our
2735 emulation PALcode, so continue to accept it. */
2736 tmp = tcg_temp_new();
2737 tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
2738 gen_helper_hw_ret(cpu_env, tmp);
2741 gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
2743 ret = EXIT_PC_UPDATED;
2750 /* HW_ST (PALcode) */
2751 #ifndef CONFIG_USER_ONLY
2752 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
2754 TCGv addr = tcg_temp_new();
2755 va = load_gpr(ctx, ra);
2756 vb = load_gpr(ctx, rb);
2758 tcg_gen_addi_i64(addr, vb, disp12);
2759 switch ((insn >> 12) & 0xF) {
2761 /* Longword physical access */
2762 gen_helper_stl_phys(cpu_env, addr, va);
2765 /* Quadword physical access */
2766 gen_helper_stq_phys(cpu_env, addr, va);
2769 /* Longword physical access with lock */
2770 gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2773 /* Quadword physical access with lock */
2774 gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
2777 /* Longword virtual access */
2780 /* Quadword virtual access */
2801 /* Longword virtual access with alternate access mode */
2804 /* Quadword virtual access with alternate access mode */
2813 tcg_temp_free(addr);
2821 gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2825 gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2829 gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2833 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2837 gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2841 gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2845 gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2849 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2853 gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2857 gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2861 gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2865 gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2869 gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2873 gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2877 ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
2881 ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
2885 ret = gen_bdirect(ctx, ra, disp21);
2887 case 0x31: /* FBEQ */
2888 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2890 case 0x32: /* FBLT */
2891 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2893 case 0x33: /* FBLE */
2894 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2898 ret = gen_bdirect(ctx, ra, disp21);
2900 case 0x35: /* FBNE */
2901 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2903 case 0x36: /* FBGE */
2904 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2906 case 0x37: /* FBGT */
2907 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2911 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2915 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2919 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2923 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2927 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2931 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2935 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2939 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2942 ret = gen_invalid(ctx);
2949 static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
2950 TranslationBlock *tb,
2953 CPUState *cs = CPU(cpu);
2954 CPUAlphaState *env = &cpu->env;
2955 DisasContext ctx, *ctxp = &ctx;
2956 target_ulong pc_start;
2957 target_ulong pc_mask;
2959 uint16_t *gen_opc_end;
2967 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2971 ctx.mem_idx = cpu_mmu_index(env);
2972 ctx.implver = env->implver;
2973 ctx.singlestep_enabled = cs->singlestep_enabled;
2975 /* ??? Every TB begins with unset rounding mode, to be initialized on
2976 the first fp insn of the TB. Alternately we could define a proper
2977 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2978 to reset the FP_STATUS to that default at the end of any TB that
2979 changes the default. We could even (gasp) dynamiclly figure out
2980 what default would be most efficient given the running program. */
2982 /* Similarly for flush-to-zero. */
2986 max_insns = tb->cflags & CF_COUNT_MASK;
2987 if (max_insns == 0) {
2988 max_insns = CF_COUNT_MASK;
2991 if (in_superpage(&ctx, pc_start)) {
2992 pc_mask = (1ULL << 41) - 1;
2994 pc_mask = ~TARGET_PAGE_MASK;
2999 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
3000 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
3001 if (bp->pc == ctx.pc) {
3002 gen_excp(&ctx, EXCP_DEBUG, 0);
3008 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3012 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3014 tcg_ctx.gen_opc_pc[lj] = ctx.pc;
3015 tcg_ctx.gen_opc_instr_start[lj] = 1;
3016 tcg_ctx.gen_opc_icount[lj] = num_insns;
3018 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
3021 insn = cpu_ldl_code(env, ctx.pc);
3024 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3025 tcg_gen_debug_insn_start(ctx.pc);
3028 TCGV_UNUSED_I64(ctx.zero);
3029 TCGV_UNUSED_I64(ctx.sink);
3030 TCGV_UNUSED_I64(ctx.lit);
3033 ret = translate_one(ctxp, insn);
3035 if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
3036 tcg_gen_discard_i64(ctx.sink);
3037 tcg_temp_free(ctx.sink);
3039 if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
3040 tcg_temp_free(ctx.zero);
3042 if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
3043 tcg_temp_free(ctx.lit);
3046 /* If we reach a page boundary, are single stepping,
3047 or exhaust instruction count, stop generation. */
3049 && ((ctx.pc & pc_mask) == 0
3050 || tcg_ctx.gen_opc_ptr >= gen_opc_end
3051 || num_insns >= max_insns
3053 || ctx.singlestep_enabled)) {
3054 ret = EXIT_PC_STALE;
3056 } while (ret == NO_EXIT);
3058 if (tb->cflags & CF_LAST_IO) {
3067 tcg_gen_movi_i64(cpu_pc, ctx.pc);
3069 case EXIT_PC_UPDATED:
3070 if (ctx.singlestep_enabled) {
3071 gen_excp_1(EXCP_DEBUG, 0);
3080 gen_tb_end(tb, num_insns);
3081 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
3083 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3086 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3088 tb->size = ctx.pc - pc_start;
3089 tb->icount = num_insns;
3093 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
3094 qemu_log("IN: %s\n", lookup_symbol(pc_start));
3095 log_target_disas(env, pc_start, ctx.pc - pc_start, 1);
3101 void gen_intermediate_code (CPUAlphaState *env, struct TranslationBlock *tb)
3103 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, false);
3106 void gen_intermediate_code_pc (CPUAlphaState *env, struct TranslationBlock *tb)
3108 gen_intermediate_code_internal(alpha_env_get_cpu(env), tb, true);
3111 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb, int pc_pos)
3113 env->pc = tcg_ctx.gen_opc_pc[pc_pos];