5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
39 #define DYNAMIC_PC 1 /* dynamic pc value */
40 #define JUMP_PC 2 /* dynamic pc value which takes only two values
41 according to jump_pc[T2] */
43 /* global register indexes */
44 static TCGv_env cpu_env;
45 static TCGv_ptr cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc;
50 static TCGv cpu_regs[32];
52 #ifndef CONFIG_USER_ONLY
57 static TCGv_i32 cpu_xcc, cpu_fprs;
59 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
60 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
76 bool address_mask_32bit;
78 #ifndef CONFIG_USER_ONLY
85 uint32_t cc_op; /* current CC operation */
86 struct TranslationBlock *tb;
105 // This function uses non-native bit order
106 #define GET_FIELD(X, FROM, TO) \
107 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
109 // This function uses the order in the manuals, i.e. bit 0 is 2^0
110 #define GET_FIELD_SP(X, FROM, TO) \
111 GET_FIELD(X, 31 - (TO), 31 - (FROM))
113 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
114 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
116 #ifdef TARGET_SPARC64
117 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
118 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
120 #define DFPREG(r) (r & 0x1e)
121 #define QFPREG(r) (r & 0x1c)
124 #define UA2005_HTRAP_MASK 0xff
125 #define V8_TRAP_MASK 0x7f
127 static int sign_extend(int x, int len)
130 return (x << len) >> len;
133 #define IS_IMM (insn & (1<<13))
135 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
138 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
139 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
143 static inline TCGv get_temp_tl(DisasContext *dc)
146 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
147 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
151 static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
153 #if defined(TARGET_SPARC64)
154 int bit = (rd < 32) ? 1 : 2;
155 /* If we know we've already set this bit within the TB,
156 we can avoid setting it again. */
157 if (!(dc->fprs_dirty & bit)) {
158 dc->fprs_dirty |= bit;
159 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
164 /* floating point registers moves */
165 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
167 #if TCG_TARGET_REG_BITS == 32
169 return TCGV_LOW(cpu_fpr[src / 2]);
171 return TCGV_HIGH(cpu_fpr[src / 2]);
175 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
177 TCGv_i32 ret = get_temp_i32(dc);
178 TCGv_i64 t = tcg_temp_new_i64();
180 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
181 tcg_gen_extrl_i64_i32(ret, t);
182 tcg_temp_free_i64(t);
189 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
191 #if TCG_TARGET_REG_BITS == 32
193 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
195 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
198 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
199 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
200 (dst & 1 ? 0 : 32), 32);
202 gen_update_fprs_dirty(dc, dst);
205 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
207 return get_temp_i32(dc);
210 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
213 return cpu_fpr[src / 2];
216 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
219 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
220 gen_update_fprs_dirty(dc, dst);
223 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
225 return cpu_fpr[DFPREG(dst) / 2];
228 static void gen_op_load_fpr_QT0(unsigned int src)
230 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
231 offsetof(CPU_QuadU, ll.upper));
232 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
233 offsetof(CPU_QuadU, ll.lower));
236 static void gen_op_load_fpr_QT1(unsigned int src)
238 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
239 offsetof(CPU_QuadU, ll.upper));
240 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
241 offsetof(CPU_QuadU, ll.lower));
244 static void gen_op_store_QT0_fpr(unsigned int dst)
246 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
247 offsetof(CPU_QuadU, ll.upper));
248 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
249 offsetof(CPU_QuadU, ll.lower));
252 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
253 TCGv_i64 v1, TCGv_i64 v2)
257 tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
258 tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
259 gen_update_fprs_dirty(dc, dst);
262 #ifdef TARGET_SPARC64
263 static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
266 return cpu_fpr[src / 2];
269 static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
272 return cpu_fpr[src / 2 + 1];
275 static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
280 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
281 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
282 gen_update_fprs_dirty(dc, rd);
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #ifdef TARGET_SPARC64
290 #define hypervisor(dc) 0
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #define supervisor(dc) (dc->supervisor)
301 #ifdef TARGET_SPARC64
303 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
305 #define AM_CHECK(dc) (1)
309 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
311 #ifdef TARGET_SPARC64
313 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
317 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
321 return cpu_regs[reg];
323 TCGv t = get_temp_tl(dc);
324 tcg_gen_movi_tl(t, 0);
329 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
333 tcg_gen_mov_tl(cpu_regs[reg], v);
337 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
341 return cpu_regs[reg];
343 return get_temp_tl(dc);
347 static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
350 if (unlikely(s->singlestep)) {
354 #ifndef CONFIG_USER_ONLY
355 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
356 (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
362 static inline void gen_goto_tb(DisasContext *s, int tb_num,
363 target_ulong pc, target_ulong npc)
365 if (use_goto_tb(s, pc, npc)) {
366 /* jump to same page: we can use a direct jump */
367 tcg_gen_goto_tb(tb_num);
368 tcg_gen_movi_tl(cpu_pc, pc);
369 tcg_gen_movi_tl(cpu_npc, npc);
370 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
372 /* jump to another page: currently not optimized */
373 tcg_gen_movi_tl(cpu_pc, pc);
374 tcg_gen_movi_tl(cpu_npc, npc);
380 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
382 tcg_gen_extu_i32_tl(reg, src);
383 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
384 tcg_gen_andi_tl(reg, reg, 0x1);
387 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
389 tcg_gen_extu_i32_tl(reg, src);
390 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
391 tcg_gen_andi_tl(reg, reg, 0x1);
394 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
396 tcg_gen_extu_i32_tl(reg, src);
397 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
398 tcg_gen_andi_tl(reg, reg, 0x1);
401 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
403 tcg_gen_extu_i32_tl(reg, src);
404 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
405 tcg_gen_andi_tl(reg, reg, 0x1);
408 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
410 tcg_gen_mov_tl(cpu_cc_src, src1);
411 tcg_gen_mov_tl(cpu_cc_src2, src2);
412 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
413 tcg_gen_mov_tl(dst, cpu_cc_dst);
416 static TCGv_i32 gen_add32_carry32(void)
418 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
420 /* Carry is computed from a previous add: (dst < src) */
421 #if TARGET_LONG_BITS == 64
422 cc_src1_32 = tcg_temp_new_i32();
423 cc_src2_32 = tcg_temp_new_i32();
424 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
425 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
427 cc_src1_32 = cpu_cc_dst;
428 cc_src2_32 = cpu_cc_src;
431 carry_32 = tcg_temp_new_i32();
432 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
434 #if TARGET_LONG_BITS == 64
435 tcg_temp_free_i32(cc_src1_32);
436 tcg_temp_free_i32(cc_src2_32);
442 static TCGv_i32 gen_sub32_carry32(void)
444 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
446 /* Carry is computed from a previous borrow: (src1 < src2) */
447 #if TARGET_LONG_BITS == 64
448 cc_src1_32 = tcg_temp_new_i32();
449 cc_src2_32 = tcg_temp_new_i32();
450 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
451 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
453 cc_src1_32 = cpu_cc_src;
454 cc_src2_32 = cpu_cc_src2;
457 carry_32 = tcg_temp_new_i32();
458 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
460 #if TARGET_LONG_BITS == 64
461 tcg_temp_free_i32(cc_src1_32);
462 tcg_temp_free_i32(cc_src2_32);
468 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
469 TCGv src2, int update_cc)
477 /* Carry is known to be zero. Fall back to plain ADD. */
479 gen_op_add_cc(dst, src1, src2);
481 tcg_gen_add_tl(dst, src1, src2);
488 if (TARGET_LONG_BITS == 32) {
489 /* We can re-use the host's hardware carry generation by using
490 an ADD2 opcode. We discard the low part of the output.
491 Ideally we'd combine this operation with the add that
492 generated the carry in the first place. */
493 carry = tcg_temp_new();
494 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
495 tcg_temp_free(carry);
498 carry_32 = gen_add32_carry32();
504 carry_32 = gen_sub32_carry32();
508 /* We need external help to produce the carry. */
509 carry_32 = tcg_temp_new_i32();
510 gen_helper_compute_C_icc(carry_32, cpu_env);
514 #if TARGET_LONG_BITS == 64
515 carry = tcg_temp_new();
516 tcg_gen_extu_i32_i64(carry, carry_32);
521 tcg_gen_add_tl(dst, src1, src2);
522 tcg_gen_add_tl(dst, dst, carry);
524 tcg_temp_free_i32(carry_32);
525 #if TARGET_LONG_BITS == 64
526 tcg_temp_free(carry);
531 tcg_gen_mov_tl(cpu_cc_src, src1);
532 tcg_gen_mov_tl(cpu_cc_src2, src2);
533 tcg_gen_mov_tl(cpu_cc_dst, dst);
534 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
535 dc->cc_op = CC_OP_ADDX;
539 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
541 tcg_gen_mov_tl(cpu_cc_src, src1);
542 tcg_gen_mov_tl(cpu_cc_src2, src2);
543 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
544 tcg_gen_mov_tl(dst, cpu_cc_dst);
547 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
548 TCGv src2, int update_cc)
556 /* Carry is known to be zero. Fall back to plain SUB. */
558 gen_op_sub_cc(dst, src1, src2);
560 tcg_gen_sub_tl(dst, src1, src2);
567 carry_32 = gen_add32_carry32();
573 if (TARGET_LONG_BITS == 32) {
574 /* We can re-use the host's hardware carry generation by using
575 a SUB2 opcode. We discard the low part of the output.
576 Ideally we'd combine this operation with the add that
577 generated the carry in the first place. */
578 carry = tcg_temp_new();
579 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
580 tcg_temp_free(carry);
583 carry_32 = gen_sub32_carry32();
587 /* We need external help to produce the carry. */
588 carry_32 = tcg_temp_new_i32();
589 gen_helper_compute_C_icc(carry_32, cpu_env);
593 #if TARGET_LONG_BITS == 64
594 carry = tcg_temp_new();
595 tcg_gen_extu_i32_i64(carry, carry_32);
600 tcg_gen_sub_tl(dst, src1, src2);
601 tcg_gen_sub_tl(dst, dst, carry);
603 tcg_temp_free_i32(carry_32);
604 #if TARGET_LONG_BITS == 64
605 tcg_temp_free(carry);
610 tcg_gen_mov_tl(cpu_cc_src, src1);
611 tcg_gen_mov_tl(cpu_cc_src2, src2);
612 tcg_gen_mov_tl(cpu_cc_dst, dst);
613 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
614 dc->cc_op = CC_OP_SUBX;
618 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
620 TCGv r_temp, zero, t0;
622 r_temp = tcg_temp_new();
629 zero = tcg_const_tl(0);
630 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
631 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
632 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
633 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
638 // env->y = (b2 << 31) | (env->y >> 1);
639 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
640 tcg_gen_shli_tl(r_temp, r_temp, 31);
641 tcg_gen_shri_tl(t0, cpu_y, 1);
642 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
643 tcg_gen_or_tl(t0, t0, r_temp);
644 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
647 gen_mov_reg_N(t0, cpu_psr);
648 gen_mov_reg_V(r_temp, cpu_psr);
649 tcg_gen_xor_tl(t0, t0, r_temp);
650 tcg_temp_free(r_temp);
652 // T0 = (b1 << 31) | (T0 >> 1);
654 tcg_gen_shli_tl(t0, t0, 31);
655 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
656 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
659 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
661 tcg_gen_mov_tl(dst, cpu_cc_dst);
664 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
666 #if TARGET_LONG_BITS == 32
668 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
670 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
673 TCGv t0 = tcg_temp_new_i64();
674 TCGv t1 = tcg_temp_new_i64();
677 tcg_gen_ext32s_i64(t0, src1);
678 tcg_gen_ext32s_i64(t1, src2);
680 tcg_gen_ext32u_i64(t0, src1);
681 tcg_gen_ext32u_i64(t1, src2);
684 tcg_gen_mul_i64(dst, t0, t1);
688 tcg_gen_shri_i64(cpu_y, dst, 32);
692 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
694 /* zero-extend truncated operands before multiplication */
695 gen_op_multiply(dst, src1, src2, 0);
698 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
700 /* sign-extend truncated operands before multiplication */
701 gen_op_multiply(dst, src1, src2, 1);
705 static inline void gen_op_eval_ba(TCGv dst)
707 tcg_gen_movi_tl(dst, 1);
711 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
713 gen_mov_reg_Z(dst, src);
717 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
719 TCGv t0 = tcg_temp_new();
720 gen_mov_reg_N(t0, src);
721 gen_mov_reg_V(dst, src);
722 tcg_gen_xor_tl(dst, dst, t0);
723 gen_mov_reg_Z(t0, src);
724 tcg_gen_or_tl(dst, dst, t0);
729 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
731 TCGv t0 = tcg_temp_new();
732 gen_mov_reg_V(t0, src);
733 gen_mov_reg_N(dst, src);
734 tcg_gen_xor_tl(dst, dst, t0);
739 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
741 TCGv t0 = tcg_temp_new();
742 gen_mov_reg_Z(t0, src);
743 gen_mov_reg_C(dst, src);
744 tcg_gen_or_tl(dst, dst, t0);
749 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
751 gen_mov_reg_C(dst, src);
755 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
757 gen_mov_reg_V(dst, src);
761 static inline void gen_op_eval_bn(TCGv dst)
763 tcg_gen_movi_tl(dst, 0);
767 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
769 gen_mov_reg_N(dst, src);
773 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
775 gen_mov_reg_Z(dst, src);
776 tcg_gen_xori_tl(dst, dst, 0x1);
780 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
782 gen_op_eval_ble(dst, src);
783 tcg_gen_xori_tl(dst, dst, 0x1);
787 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
789 gen_op_eval_bl(dst, src);
790 tcg_gen_xori_tl(dst, dst, 0x1);
794 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
796 gen_op_eval_bleu(dst, src);
797 tcg_gen_xori_tl(dst, dst, 0x1);
801 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
803 gen_mov_reg_C(dst, src);
804 tcg_gen_xori_tl(dst, dst, 0x1);
808 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
810 gen_mov_reg_N(dst, src);
811 tcg_gen_xori_tl(dst, dst, 0x1);
815 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
817 gen_mov_reg_V(dst, src);
818 tcg_gen_xori_tl(dst, dst, 0x1);
822 FPSR bit field FCC1 | FCC0:
828 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
829 unsigned int fcc_offset)
831 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
832 tcg_gen_andi_tl(reg, reg, 0x1);
835 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
836 unsigned int fcc_offset)
838 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
839 tcg_gen_andi_tl(reg, reg, 0x1);
843 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
844 unsigned int fcc_offset)
846 TCGv t0 = tcg_temp_new();
847 gen_mov_reg_FCC0(dst, src, fcc_offset);
848 gen_mov_reg_FCC1(t0, src, fcc_offset);
849 tcg_gen_or_tl(dst, dst, t0);
853 // 1 or 2: FCC0 ^ FCC1
854 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
855 unsigned int fcc_offset)
857 TCGv t0 = tcg_temp_new();
858 gen_mov_reg_FCC0(dst, src, fcc_offset);
859 gen_mov_reg_FCC1(t0, src, fcc_offset);
860 tcg_gen_xor_tl(dst, dst, t0);
865 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
866 unsigned int fcc_offset)
868 gen_mov_reg_FCC0(dst, src, fcc_offset);
872 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
873 unsigned int fcc_offset)
875 TCGv t0 = tcg_temp_new();
876 gen_mov_reg_FCC0(dst, src, fcc_offset);
877 gen_mov_reg_FCC1(t0, src, fcc_offset);
878 tcg_gen_andc_tl(dst, dst, t0);
883 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
884 unsigned int fcc_offset)
886 gen_mov_reg_FCC1(dst, src, fcc_offset);
890 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
891 unsigned int fcc_offset)
893 TCGv t0 = tcg_temp_new();
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 gen_mov_reg_FCC1(t0, src, fcc_offset);
896 tcg_gen_andc_tl(dst, t0, dst);
901 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
902 unsigned int fcc_offset)
904 TCGv t0 = tcg_temp_new();
905 gen_mov_reg_FCC0(dst, src, fcc_offset);
906 gen_mov_reg_FCC1(t0, src, fcc_offset);
907 tcg_gen_and_tl(dst, dst, t0);
912 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
913 unsigned int fcc_offset)
915 TCGv t0 = tcg_temp_new();
916 gen_mov_reg_FCC0(dst, src, fcc_offset);
917 gen_mov_reg_FCC1(t0, src, fcc_offset);
918 tcg_gen_or_tl(dst, dst, t0);
919 tcg_gen_xori_tl(dst, dst, 0x1);
923 // 0 or 3: !(FCC0 ^ FCC1)
924 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
925 unsigned int fcc_offset)
927 TCGv t0 = tcg_temp_new();
928 gen_mov_reg_FCC0(dst, src, fcc_offset);
929 gen_mov_reg_FCC1(t0, src, fcc_offset);
930 tcg_gen_xor_tl(dst, dst, t0);
931 tcg_gen_xori_tl(dst, dst, 0x1);
936 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
937 unsigned int fcc_offset)
939 gen_mov_reg_FCC0(dst, src, fcc_offset);
940 tcg_gen_xori_tl(dst, dst, 0x1);
943 // !1: !(FCC0 & !FCC1)
944 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
945 unsigned int fcc_offset)
947 TCGv t0 = tcg_temp_new();
948 gen_mov_reg_FCC0(dst, src, fcc_offset);
949 gen_mov_reg_FCC1(t0, src, fcc_offset);
950 tcg_gen_andc_tl(dst, dst, t0);
951 tcg_gen_xori_tl(dst, dst, 0x1);
956 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
957 unsigned int fcc_offset)
959 gen_mov_reg_FCC1(dst, src, fcc_offset);
960 tcg_gen_xori_tl(dst, dst, 0x1);
963 // !2: !(!FCC0 & FCC1)
964 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
965 unsigned int fcc_offset)
967 TCGv t0 = tcg_temp_new();
968 gen_mov_reg_FCC0(dst, src, fcc_offset);
969 gen_mov_reg_FCC1(t0, src, fcc_offset);
970 tcg_gen_andc_tl(dst, t0, dst);
971 tcg_gen_xori_tl(dst, dst, 0x1);
975 // !3: !(FCC0 & FCC1)
976 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
977 unsigned int fcc_offset)
979 TCGv t0 = tcg_temp_new();
980 gen_mov_reg_FCC0(dst, src, fcc_offset);
981 gen_mov_reg_FCC1(t0, src, fcc_offset);
982 tcg_gen_and_tl(dst, dst, t0);
983 tcg_gen_xori_tl(dst, dst, 0x1);
987 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
988 target_ulong pc2, TCGv r_cond)
990 TCGLabel *l1 = gen_new_label();
992 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
994 gen_goto_tb(dc, 0, pc1, pc1 + 4);
997 gen_goto_tb(dc, 1, pc2, pc2 + 4);
1000 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
1002 TCGLabel *l1 = gen_new_label();
1003 target_ulong npc = dc->npc;
1005 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
1007 gen_goto_tb(dc, 0, npc, pc1);
1010 gen_goto_tb(dc, 1, npc + 4, npc + 8);
1015 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
1017 target_ulong npc = dc->npc;
1019 if (likely(npc != DYNAMIC_PC)) {
1021 dc->jump_pc[0] = pc1;
1022 dc->jump_pc[1] = npc + 4;
1027 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1029 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1030 t = tcg_const_tl(pc1);
1031 z = tcg_const_tl(0);
1032 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
1036 dc->pc = DYNAMIC_PC;
1040 static inline void gen_generic_branch(DisasContext *dc)
1042 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
1043 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
1044 TCGv zero = tcg_const_tl(0);
1046 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1048 tcg_temp_free(npc0);
1049 tcg_temp_free(npc1);
1050 tcg_temp_free(zero);
1053 /* call this function before using the condition register as it may
1054 have been set for a jump */
1055 static inline void flush_cond(DisasContext *dc)
1057 if (dc->npc == JUMP_PC) {
1058 gen_generic_branch(dc);
1059 dc->npc = DYNAMIC_PC;
1063 static inline void save_npc(DisasContext *dc)
1065 if (dc->npc == JUMP_PC) {
1066 gen_generic_branch(dc);
1067 dc->npc = DYNAMIC_PC;
1068 } else if (dc->npc != DYNAMIC_PC) {
1069 tcg_gen_movi_tl(cpu_npc, dc->npc);
1073 static inline void update_psr(DisasContext *dc)
1075 if (dc->cc_op != CC_OP_FLAGS) {
1076 dc->cc_op = CC_OP_FLAGS;
1077 gen_helper_compute_psr(cpu_env);
1081 static inline void save_state(DisasContext *dc)
1083 tcg_gen_movi_tl(cpu_pc, dc->pc);
1087 static void gen_exception(DisasContext *dc, int which)
1092 t = tcg_const_i32(which);
1093 gen_helper_raise_exception(cpu_env, t);
1094 tcg_temp_free_i32(t);
1098 static void gen_check_align(TCGv addr, int mask)
1100 TCGv_i32 r_mask = tcg_const_i32(mask);
1101 gen_helper_check_align(cpu_env, addr, r_mask);
1102 tcg_temp_free_i32(r_mask);
1105 static inline void gen_mov_pc_npc(DisasContext *dc)
1107 if (dc->npc == JUMP_PC) {
1108 gen_generic_branch(dc);
1109 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110 dc->pc = DYNAMIC_PC;
1111 } else if (dc->npc == DYNAMIC_PC) {
1112 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1113 dc->pc = DYNAMIC_PC;
1119 static inline void gen_op_next_insn(void)
1121 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1122 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1125 static void free_compare(DisasCompare *cmp)
1128 tcg_temp_free(cmp->c1);
1131 tcg_temp_free(cmp->c2);
1135 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1138 static int subcc_cond[16] = {
1154 -1, /* no overflow */
1157 static int logic_cond[16] = {
1159 TCG_COND_EQ, /* eq: Z */
1160 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1161 TCG_COND_LT, /* lt: N ^ V -> N */
1162 TCG_COND_EQ, /* leu: C | Z -> Z */
1163 TCG_COND_NEVER, /* ltu: C -> 0 */
1164 TCG_COND_LT, /* neg: N */
1165 TCG_COND_NEVER, /* vs: V -> 0 */
1167 TCG_COND_NE, /* ne: !Z */
1168 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1169 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1170 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1171 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1172 TCG_COND_GE, /* pos: !N */
1173 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1179 #ifdef TARGET_SPARC64
1189 switch (dc->cc_op) {
1191 cmp->cond = logic_cond[cond];
1193 cmp->is_bool = false;
1195 cmp->c2 = tcg_const_tl(0);
1196 #ifdef TARGET_SPARC64
1199 cmp->c1 = tcg_temp_new();
1200 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1205 cmp->c1 = cpu_cc_dst;
1212 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1213 goto do_compare_dst_0;
1215 case 7: /* overflow */
1216 case 15: /* !overflow */
1220 cmp->cond = subcc_cond[cond];
1221 cmp->is_bool = false;
1222 #ifdef TARGET_SPARC64
1224 /* Note that sign-extension works for unsigned compares as
1225 long as both operands are sign-extended. */
1226 cmp->g1 = cmp->g2 = false;
1227 cmp->c1 = tcg_temp_new();
1228 cmp->c2 = tcg_temp_new();
1229 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1230 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1234 cmp->g1 = cmp->g2 = true;
1235 cmp->c1 = cpu_cc_src;
1236 cmp->c2 = cpu_cc_src2;
1243 gen_helper_compute_psr(cpu_env);
1244 dc->cc_op = CC_OP_FLAGS;
1248 /* We're going to generate a boolean result. */
1249 cmp->cond = TCG_COND_NE;
1250 cmp->is_bool = true;
1251 cmp->g1 = cmp->g2 = false;
1252 cmp->c1 = r_dst = tcg_temp_new();
1253 cmp->c2 = tcg_const_tl(0);
1257 gen_op_eval_bn(r_dst);
1260 gen_op_eval_be(r_dst, r_src);
1263 gen_op_eval_ble(r_dst, r_src);
1266 gen_op_eval_bl(r_dst, r_src);
1269 gen_op_eval_bleu(r_dst, r_src);
1272 gen_op_eval_bcs(r_dst, r_src);
1275 gen_op_eval_bneg(r_dst, r_src);
1278 gen_op_eval_bvs(r_dst, r_src);
1281 gen_op_eval_ba(r_dst);
1284 gen_op_eval_bne(r_dst, r_src);
1287 gen_op_eval_bg(r_dst, r_src);
1290 gen_op_eval_bge(r_dst, r_src);
1293 gen_op_eval_bgu(r_dst, r_src);
1296 gen_op_eval_bcc(r_dst, r_src);
1299 gen_op_eval_bpos(r_dst, r_src);
1302 gen_op_eval_bvc(r_dst, r_src);
1309 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1311 unsigned int offset;
1314 /* For now we still generate a straight boolean result. */
1315 cmp->cond = TCG_COND_NE;
1316 cmp->is_bool = true;
1317 cmp->g1 = cmp->g2 = false;
1318 cmp->c1 = r_dst = tcg_temp_new();
1319 cmp->c2 = tcg_const_tl(0);
1339 gen_op_eval_bn(r_dst);
1342 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1345 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1348 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1351 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1354 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1357 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1360 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1363 gen_op_eval_ba(r_dst);
1366 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1369 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1372 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1375 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1378 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1381 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1384 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1389 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1393 gen_compare(&cmp, cc, cond, dc);
1395 /* The interface is to return a boolean in r_dst. */
1397 tcg_gen_mov_tl(r_dst, cmp.c1);
1399 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1405 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1408 gen_fcompare(&cmp, cc, cond);
1410 /* The interface is to return a boolean in r_dst. */
1412 tcg_gen_mov_tl(r_dst, cmp.c1);
1414 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1420 #ifdef TARGET_SPARC64
1422 static const int gen_tcg_cond_reg[8] = {
1433 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1435 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1436 cmp->is_bool = false;
1440 cmp->c2 = tcg_const_tl(0);
1443 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1446 gen_compare_reg(&cmp, cond, r_src);
1448 /* The interface is to return a boolean in r_dst. */
1449 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1455 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1457 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1458 target_ulong target = dc->pc + offset;
1460 #ifdef TARGET_SPARC64
1461 if (unlikely(AM_CHECK(dc))) {
1462 target &= 0xffffffffULL;
1466 /* unconditional not taken */
1468 dc->pc = dc->npc + 4;
1469 dc->npc = dc->pc + 4;
1472 dc->npc = dc->pc + 4;
1474 } else if (cond == 0x8) {
1475 /* unconditional taken */
1478 dc->npc = dc->pc + 4;
1482 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1486 gen_cond(cpu_cond, cc, cond, dc);
1488 gen_branch_a(dc, target);
1490 gen_branch_n(dc, target);
1495 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1497 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1498 target_ulong target = dc->pc + offset;
1500 #ifdef TARGET_SPARC64
1501 if (unlikely(AM_CHECK(dc))) {
1502 target &= 0xffffffffULL;
1506 /* unconditional not taken */
1508 dc->pc = dc->npc + 4;
1509 dc->npc = dc->pc + 4;
1512 dc->npc = dc->pc + 4;
1514 } else if (cond == 0x8) {
1515 /* unconditional taken */
1518 dc->npc = dc->pc + 4;
1522 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1526 gen_fcond(cpu_cond, cc, cond);
1528 gen_branch_a(dc, target);
1530 gen_branch_n(dc, target);
1535 #ifdef TARGET_SPARC64
1536 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1539 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1540 target_ulong target = dc->pc + offset;
1542 if (unlikely(AM_CHECK(dc))) {
1543 target &= 0xffffffffULL;
1546 gen_cond_reg(cpu_cond, cond, r_reg);
1548 gen_branch_a(dc, target);
1550 gen_branch_n(dc, target);
1554 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1558 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1561 gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1564 gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1567 gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1572 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1576 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1579 gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1582 gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1585 gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1590 static inline void gen_op_fcmpq(int fccno)
1594 gen_helper_fcmpq(cpu_fsr, cpu_env);
1597 gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
1600 gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
1603 gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
1608 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1612 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1615 gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1618 gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1621 gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1626 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1630 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1633 gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
1636 gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
1639 gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
1644 static inline void gen_op_fcmpeq(int fccno)
1648 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1651 gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
1654 gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
1657 gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
1664 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1666 gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
1669 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1671 gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
1674 static inline void gen_op_fcmpq(int fccno)
1676 gen_helper_fcmpq(cpu_fsr, cpu_env);
1679 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1681 gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
1684 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1686 gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
1689 static inline void gen_op_fcmpeq(int fccno)
1691 gen_helper_fcmpeq(cpu_fsr, cpu_env);
1695 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1697 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1698 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1699 gen_exception(dc, TT_FP_EXCP);
1702 static int gen_trap_ifnofpu(DisasContext *dc)
1704 #if !defined(CONFIG_USER_ONLY)
1705 if (!dc->fpu_enabled) {
1706 gen_exception(dc, TT_NFPU_INSN);
1713 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1715 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1718 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1719 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1723 src = gen_load_fpr_F(dc, rs);
1724 dst = gen_dest_fpr_F(dc);
1726 gen(dst, cpu_env, src);
1727 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1729 gen_store_fpr_F(dc, rd, dst);
1732 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1733 void (*gen)(TCGv_i32, TCGv_i32))
1737 src = gen_load_fpr_F(dc, rs);
1738 dst = gen_dest_fpr_F(dc);
1742 gen_store_fpr_F(dc, rd, dst);
1745 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1746 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1748 TCGv_i32 dst, src1, src2;
1750 src1 = gen_load_fpr_F(dc, rs1);
1751 src2 = gen_load_fpr_F(dc, rs2);
1752 dst = gen_dest_fpr_F(dc);
1754 gen(dst, cpu_env, src1, src2);
1755 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1757 gen_store_fpr_F(dc, rd, dst);
1760 #ifdef TARGET_SPARC64
1761 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1762 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1764 TCGv_i32 dst, src1, src2;
1766 src1 = gen_load_fpr_F(dc, rs1);
1767 src2 = gen_load_fpr_F(dc, rs2);
1768 dst = gen_dest_fpr_F(dc);
1770 gen(dst, src1, src2);
1772 gen_store_fpr_F(dc, rd, dst);
1776 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1777 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1781 src = gen_load_fpr_D(dc, rs);
1782 dst = gen_dest_fpr_D(dc, rd);
1784 gen(dst, cpu_env, src);
1785 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1787 gen_store_fpr_D(dc, rd, dst);
1790 #ifdef TARGET_SPARC64
1791 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1792 void (*gen)(TCGv_i64, TCGv_i64))
1796 src = gen_load_fpr_D(dc, rs);
1797 dst = gen_dest_fpr_D(dc, rd);
1801 gen_store_fpr_D(dc, rd, dst);
1805 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1806 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1808 TCGv_i64 dst, src1, src2;
1810 src1 = gen_load_fpr_D(dc, rs1);
1811 src2 = gen_load_fpr_D(dc, rs2);
1812 dst = gen_dest_fpr_D(dc, rd);
1814 gen(dst, cpu_env, src1, src2);
1815 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1817 gen_store_fpr_D(dc, rd, dst);
1820 #ifdef TARGET_SPARC64
1821 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1822 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1824 TCGv_i64 dst, src1, src2;
1826 src1 = gen_load_fpr_D(dc, rs1);
1827 src2 = gen_load_fpr_D(dc, rs2);
1828 dst = gen_dest_fpr_D(dc, rd);
1830 gen(dst, src1, src2);
1832 gen_store_fpr_D(dc, rd, dst);
1835 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1836 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1838 TCGv_i64 dst, src1, src2;
1840 src1 = gen_load_fpr_D(dc, rs1);
1841 src2 = gen_load_fpr_D(dc, rs2);
1842 dst = gen_dest_fpr_D(dc, rd);
1844 gen(dst, cpu_gsr, src1, src2);
1846 gen_store_fpr_D(dc, rd, dst);
1849 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1850 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1852 TCGv_i64 dst, src0, src1, src2;
1854 src1 = gen_load_fpr_D(dc, rs1);
1855 src2 = gen_load_fpr_D(dc, rs2);
1856 src0 = gen_load_fpr_D(dc, rd);
1857 dst = gen_dest_fpr_D(dc, rd);
1859 gen(dst, src0, src1, src2);
1861 gen_store_fpr_D(dc, rd, dst);
1865 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1866 void (*gen)(TCGv_ptr))
1868 gen_op_load_fpr_QT1(QFPREG(rs));
1871 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1873 gen_op_store_QT0_fpr(QFPREG(rd));
1874 gen_update_fprs_dirty(dc, QFPREG(rd));
1877 #ifdef TARGET_SPARC64
1878 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1879 void (*gen)(TCGv_ptr))
1881 gen_op_load_fpr_QT1(QFPREG(rs));
1885 gen_op_store_QT0_fpr(QFPREG(rd));
1886 gen_update_fprs_dirty(dc, QFPREG(rd));
1890 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1891 void (*gen)(TCGv_ptr))
1893 gen_op_load_fpr_QT0(QFPREG(rs1));
1894 gen_op_load_fpr_QT1(QFPREG(rs2));
1897 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1899 gen_op_store_QT0_fpr(QFPREG(rd));
1900 gen_update_fprs_dirty(dc, QFPREG(rd));
1903 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1904 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1907 TCGv_i32 src1, src2;
1909 src1 = gen_load_fpr_F(dc, rs1);
1910 src2 = gen_load_fpr_F(dc, rs2);
1911 dst = gen_dest_fpr_D(dc, rd);
1913 gen(dst, cpu_env, src1, src2);
1914 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1916 gen_store_fpr_D(dc, rd, dst);
1919 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1920 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1922 TCGv_i64 src1, src2;
1924 src1 = gen_load_fpr_D(dc, rs1);
1925 src2 = gen_load_fpr_D(dc, rs2);
1927 gen(cpu_env, src1, src2);
1928 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1930 gen_op_store_QT0_fpr(QFPREG(rd));
1931 gen_update_fprs_dirty(dc, QFPREG(rd));
1934 #ifdef TARGET_SPARC64
1935 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1936 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1941 src = gen_load_fpr_F(dc, rs);
1942 dst = gen_dest_fpr_D(dc, rd);
1944 gen(dst, cpu_env, src);
1945 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1947 gen_store_fpr_D(dc, rd, dst);
1951 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1952 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1957 src = gen_load_fpr_F(dc, rs);
1958 dst = gen_dest_fpr_D(dc, rd);
1960 gen(dst, cpu_env, src);
1962 gen_store_fpr_D(dc, rd, dst);
1965 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1966 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1971 src = gen_load_fpr_D(dc, rs);
1972 dst = gen_dest_fpr_F(dc);
1974 gen(dst, cpu_env, src);
1975 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1977 gen_store_fpr_F(dc, rd, dst);
1980 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1981 void (*gen)(TCGv_i32, TCGv_ptr))
1985 gen_op_load_fpr_QT1(QFPREG(rs));
1986 dst = gen_dest_fpr_F(dc);
1989 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
1991 gen_store_fpr_F(dc, rd, dst);
1994 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1995 void (*gen)(TCGv_i64, TCGv_ptr))
1999 gen_op_load_fpr_QT1(QFPREG(rs));
2000 dst = gen_dest_fpr_D(dc, rd);
2003 gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
2005 gen_store_fpr_D(dc, rd, dst);
2008 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
2009 void (*gen)(TCGv_ptr, TCGv_i32))
2013 src = gen_load_fpr_F(dc, rs);
2017 gen_op_store_QT0_fpr(QFPREG(rd));
2018 gen_update_fprs_dirty(dc, QFPREG(rd));
2021 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
2022 void (*gen)(TCGv_ptr, TCGv_i64))
2026 src = gen_load_fpr_D(dc, rs);
2030 gen_op_store_QT0_fpr(QFPREG(rd));
2031 gen_update_fprs_dirty(dc, QFPREG(rd));
2034 static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
2035 TCGv addr, int mmu_idx, TCGMemOp memop)
2037 gen_address_mask(dc, addr);
2038 tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
2041 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
2043 TCGv m1 = tcg_const_tl(0xff);
2044 gen_address_mask(dc, addr);
2045 tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
2050 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2069 static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
2071 int asi = GET_FIELD(insn, 19, 26);
2072 ASIType type = GET_ASI_HELPER;
2073 int mem_idx = dc->mem_idx;
2075 #ifndef TARGET_SPARC64
2076 /* Before v9, all asis are immediate and privileged. */
2078 gen_exception(dc, TT_ILL_INSN);
2079 type = GET_ASI_EXCP;
2080 } else if (supervisor(dc)
2081 /* Note that LEON accepts ASI_USERDATA in user mode, for
2082 use with CASA. Also note that previous versions of
2083 QEMU allowed (and old versions of gcc emitted) ASI_P
2084 for LEON, which is incorrect. */
2085 || (asi == ASI_USERDATA
2086 && (dc->def->features & CPU_FEATURE_CASA))) {
2088 case ASI_USERDATA: /* User data access */
2089 mem_idx = MMU_USER_IDX;
2090 type = GET_ASI_DIRECT;
2092 case ASI_KERNELDATA: /* Supervisor data access */
2093 mem_idx = MMU_KERNEL_IDX;
2094 type = GET_ASI_DIRECT;
2096 case ASI_M_BYPASS: /* MMU passthrough */
2097 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
2098 mem_idx = MMU_PHYS_IDX;
2099 type = GET_ASI_DIRECT;
2101 case ASI_M_BCOPY: /* Block copy, sta access */
2102 mem_idx = MMU_KERNEL_IDX;
2103 type = GET_ASI_BCOPY;
2105 case ASI_M_BFILL: /* Block fill, stda access */
2106 mem_idx = MMU_KERNEL_IDX;
2107 type = GET_ASI_BFILL;
2111 gen_exception(dc, TT_PRIV_INSN);
2112 type = GET_ASI_EXCP;
2118 /* With v9, all asis below 0x80 are privileged. */
2119 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
2120 down that bit into DisasContext. For the moment that's ok,
2121 since the direct implementations below doesn't have any ASIs
2122 in the restricted [0x30, 0x7f] range, and the check will be
2123 done properly in the helper. */
2124 if (!supervisor(dc) && asi < 0x80) {
2125 gen_exception(dc, TT_PRIV_ACT);
2126 type = GET_ASI_EXCP;
2129 case ASI_REAL: /* Bypass */
2130 case ASI_REAL_IO: /* Bypass, non-cacheable */
2131 case ASI_REAL_L: /* Bypass LE */
2132 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
2133 case ASI_TWINX_REAL: /* Real address, twinx */
2134 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
2135 case ASI_QUAD_LDD_PHYS:
2136 case ASI_QUAD_LDD_PHYS_L:
2137 mem_idx = MMU_PHYS_IDX;
2139 case ASI_N: /* Nucleus */
2140 case ASI_NL: /* Nucleus LE */
2143 case ASI_NUCLEUS_QUAD_LDD:
2144 case ASI_NUCLEUS_QUAD_LDD_L:
2145 if (hypervisor(dc)) {
2146 mem_idx = MMU_PHYS_IDX;
2148 mem_idx = MMU_NUCLEUS_IDX;
2151 case ASI_AIUP: /* As if user primary */
2152 case ASI_AIUPL: /* As if user primary LE */
2153 case ASI_TWINX_AIUP:
2154 case ASI_TWINX_AIUP_L:
2155 case ASI_BLK_AIUP_4V:
2156 case ASI_BLK_AIUP_L_4V:
2159 mem_idx = MMU_USER_IDX;
2161 case ASI_AIUS: /* As if user secondary */
2162 case ASI_AIUSL: /* As if user secondary LE */
2163 case ASI_TWINX_AIUS:
2164 case ASI_TWINX_AIUS_L:
2165 case ASI_BLK_AIUS_4V:
2166 case ASI_BLK_AIUS_L_4V:
2169 mem_idx = MMU_USER_SECONDARY_IDX;
2171 case ASI_S: /* Secondary */
2172 case ASI_SL: /* Secondary LE */
2175 case ASI_BLK_COMMIT_S:
2182 if (mem_idx == MMU_USER_IDX) {
2183 mem_idx = MMU_USER_SECONDARY_IDX;
2184 } else if (mem_idx == MMU_KERNEL_IDX) {
2185 mem_idx = MMU_KERNEL_SECONDARY_IDX;
2188 case ASI_P: /* Primary */
2189 case ASI_PL: /* Primary LE */
2192 case ASI_BLK_COMMIT_P:
2216 type = GET_ASI_DIRECT;
2218 case ASI_TWINX_REAL:
2219 case ASI_TWINX_REAL_L:
2222 case ASI_TWINX_AIUP:
2223 case ASI_TWINX_AIUP_L:
2224 case ASI_TWINX_AIUS:
2225 case ASI_TWINX_AIUS_L:
2230 case ASI_QUAD_LDD_PHYS:
2231 case ASI_QUAD_LDD_PHYS_L:
2232 case ASI_NUCLEUS_QUAD_LDD:
2233 case ASI_NUCLEUS_QUAD_LDD_L:
2234 type = GET_ASI_DTWINX;
2236 case ASI_BLK_COMMIT_P:
2237 case ASI_BLK_COMMIT_S:
2238 case ASI_BLK_AIUP_4V:
2239 case ASI_BLK_AIUP_L_4V:
2242 case ASI_BLK_AIUS_4V:
2243 case ASI_BLK_AIUS_L_4V:
2250 type = GET_ASI_BLOCK;
2257 type = GET_ASI_SHORT;
2264 type = GET_ASI_SHORT;
2267 /* The little-endian asis all have bit 3 set. */
2274 return (DisasASI){ type, asi, mem_idx, memop };
2277 static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
2278 int insn, TCGMemOp memop)
2280 DisasASI da = get_asi(dc, insn, memop);
2285 case GET_ASI_DTWINX: /* Reserved for ldda. */
2286 gen_exception(dc, TT_ILL_INSN);
2288 case GET_ASI_DIRECT:
2289 gen_address_mask(dc, addr);
2290 tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
2294 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2295 TCGv_i32 r_mop = tcg_const_i32(memop);
2298 #ifdef TARGET_SPARC64
2299 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
2302 TCGv_i64 t64 = tcg_temp_new_i64();
2303 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2304 tcg_gen_trunc_i64_tl(dst, t64);
2305 tcg_temp_free_i64(t64);
2308 tcg_temp_free_i32(r_mop);
2309 tcg_temp_free_i32(r_asi);
2315 static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
2316 int insn, TCGMemOp memop)
2318 DisasASI da = get_asi(dc, insn, memop);
2323 case GET_ASI_DTWINX: /* Reserved for stda. */
2324 gen_exception(dc, TT_ILL_INSN);
2326 case GET_ASI_DIRECT:
2327 gen_address_mask(dc, addr);
2328 tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
2330 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2332 /* Copy 32 bytes from the address in SRC to ADDR. */
2333 /* ??? The original qemu code suggests 4-byte alignment, dropping
2334 the low bits, but the only place I can see this used is in the
2335 Linux kernel with 32 byte alignment, which would make more sense
2336 as a cacheline-style operation. */
2338 TCGv saddr = tcg_temp_new();
2339 TCGv daddr = tcg_temp_new();
2340 TCGv four = tcg_const_tl(4);
2341 TCGv_i32 tmp = tcg_temp_new_i32();
2344 tcg_gen_andi_tl(saddr, src, -4);
2345 tcg_gen_andi_tl(daddr, addr, -4);
2346 for (i = 0; i < 32; i += 4) {
2347 /* Since the loads and stores are paired, allow the
2348 copy to happen in the host endianness. */
2349 tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
2350 tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
2351 tcg_gen_add_tl(saddr, saddr, four);
2352 tcg_gen_add_tl(daddr, daddr, four);
2355 tcg_temp_free(saddr);
2356 tcg_temp_free(daddr);
2357 tcg_temp_free(four);
2358 tcg_temp_free_i32(tmp);
2364 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2365 TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
2368 #ifdef TARGET_SPARC64
2369 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
2372 TCGv_i64 t64 = tcg_temp_new_i64();
2373 tcg_gen_extu_tl_i64(t64, src);
2374 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2375 tcg_temp_free_i64(t64);
2378 tcg_temp_free_i32(r_mop);
2379 tcg_temp_free_i32(r_asi);
2381 /* A write to a TLB register may alter page maps. End the TB. */
2382 dc->npc = DYNAMIC_PC;
2388 static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
2389 TCGv addr, int insn)
2391 DisasASI da = get_asi(dc, insn, MO_TEUL);
2396 case GET_ASI_DIRECT:
2397 gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
2400 /* ??? Should be DAE_invalid_asi. */
2401 gen_exception(dc, TT_DATA_ACCESS);
2406 static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2409 DisasASI da = get_asi(dc, insn, MO_TEUL);
2415 case GET_ASI_DIRECT:
2416 oldv = tcg_temp_new();
2417 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2418 da.mem_idx, da.memop);
2419 gen_store_gpr(dc, rd, oldv);
2420 tcg_temp_free(oldv);
2423 /* ??? Should be DAE_invalid_asi. */
2424 gen_exception(dc, TT_DATA_ACCESS);
2429 static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
2431 DisasASI da = get_asi(dc, insn, MO_UB);
2436 case GET_ASI_DIRECT:
2437 gen_ldstub(dc, dst, addr, da.mem_idx);
2440 /* ??? Should be DAE_invalid_asi. */
2441 gen_exception(dc, TT_DATA_ACCESS);
2447 #ifdef TARGET_SPARC64
2448 static void gen_ldf_asi(DisasContext *dc, TCGv addr,
2449 int insn, int size, int rd)
2451 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2459 case GET_ASI_DIRECT:
2460 gen_address_mask(dc, addr);
2463 d32 = gen_dest_fpr_F(dc);
2464 tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
2465 gen_store_fpr_F(dc, rd, d32);
2468 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2469 da.memop | MO_ALIGN_4);
2472 d64 = tcg_temp_new_i64();
2473 tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
2474 tcg_gen_addi_tl(addr, addr, 8);
2475 tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
2476 da.memop | MO_ALIGN_4);
2477 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2478 tcg_temp_free_i64(d64);
2481 g_assert_not_reached();
2486 /* Valid for lddfa on aligned registers only. */
2487 if (size == 8 && (rd & 7) == 0) {
2492 gen_address_mask(dc, addr);
2494 /* The first operation checks required alignment. */
2495 memop = da.memop | MO_ALIGN_64;
2496 eight = tcg_const_tl(8);
2497 for (i = 0; ; ++i) {
2498 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
2503 tcg_gen_add_tl(addr, addr, eight);
2506 tcg_temp_free(eight);
2508 gen_exception(dc, TT_ILL_INSN);
2513 /* Valid for lddfa only. */
2515 gen_address_mask(dc, addr);
2516 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2518 gen_exception(dc, TT_ILL_INSN);
2524 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2525 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2528 /* According to the table in the UA2011 manual, the only
2529 other asis that are valid for ldfa/lddfa/ldqfa are
2530 the NO_FAULT asis. We still need a helper for these,
2531 but we can just use the integer asi helper for them. */
2534 d64 = tcg_temp_new_i64();
2535 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2536 d32 = gen_dest_fpr_F(dc);
2537 tcg_gen_extrl_i64_i32(d32, d64);
2538 tcg_temp_free_i64(d64);
2539 gen_store_fpr_F(dc, rd, d32);
2542 gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
2545 d64 = tcg_temp_new_i64();
2546 gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
2547 tcg_gen_addi_tl(addr, addr, 8);
2548 gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
2549 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2550 tcg_temp_free_i64(d64);
2553 g_assert_not_reached();
2555 tcg_temp_free_i32(r_mop);
2556 tcg_temp_free_i32(r_asi);
2562 static void gen_stf_asi(DisasContext *dc, TCGv addr,
2563 int insn, int size, int rd)
2565 DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
2572 case GET_ASI_DIRECT:
2573 gen_address_mask(dc, addr);
2576 d32 = gen_load_fpr_F(dc, rd);
2577 tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
2580 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2581 da.memop | MO_ALIGN_4);
2584 /* Only 4-byte alignment required. However, it is legal for the
2585 cpu to signal the alignment fault, and the OS trap handler is
2586 required to fix it up. Requiring 16-byte alignment here avoids
2587 having to probe the second page before performing the first
2589 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
2590 da.memop | MO_ALIGN_16);
2591 tcg_gen_addi_tl(addr, addr, 8);
2592 tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
2595 g_assert_not_reached();
2600 /* Valid for stdfa on aligned registers only. */
2601 if (size == 8 && (rd & 7) == 0) {
2606 gen_address_mask(dc, addr);
2608 /* The first operation checks required alignment. */
2609 memop = da.memop | MO_ALIGN_64;
2610 eight = tcg_const_tl(8);
2611 for (i = 0; ; ++i) {
2612 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
2617 tcg_gen_add_tl(addr, addr, eight);
2620 tcg_temp_free(eight);
2622 gen_exception(dc, TT_ILL_INSN);
2627 /* Valid for stdfa only. */
2629 gen_address_mask(dc, addr);
2630 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
2632 gen_exception(dc, TT_ILL_INSN);
2637 /* According to the table in the UA2011 manual, the only
2638 other asis that are valid for ldfa/lddfa/ldqfa are
2639 the PST* asis, which aren't currently handled. */
2640 gen_exception(dc, TT_ILL_INSN);
2645 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2647 DisasASI da = get_asi(dc, insn, MO_TEQ);
2648 TCGv_i64 hi = gen_dest_gpr(dc, rd);
2649 TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
2655 case GET_ASI_DTWINX:
2656 gen_address_mask(dc, addr);
2657 tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2658 tcg_gen_addi_tl(addr, addr, 8);
2659 tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
2662 case GET_ASI_DIRECT:
2664 TCGv_i64 tmp = tcg_temp_new_i64();
2666 gen_address_mask(dc, addr);
2667 tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
2669 /* Note that LE ldda acts as if each 32-bit register
2670 result is byte swapped. Having just performed one
2671 64-bit bswap, we need now to swap the writebacks. */
2672 if ((da.memop & MO_BSWAP) == MO_TE) {
2673 tcg_gen_extr32_i64(lo, hi, tmp);
2675 tcg_gen_extr32_i64(hi, lo, tmp);
2677 tcg_temp_free_i64(tmp);
2682 /* ??? In theory we've handled all of the ASIs that are valid
2683 for ldda, and this should raise DAE_invalid_asi. However,
2684 real hardware allows others. This can be seen with e.g.
2685 FreeBSD 10.3 wrt ASI_IC_TAG. */
2687 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2688 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2689 TCGv_i64 tmp = tcg_temp_new_i64();
2692 gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
2693 tcg_temp_free_i32(r_asi);
2694 tcg_temp_free_i32(r_mop);
2697 if ((da.memop & MO_BSWAP) == MO_TE) {
2698 tcg_gen_extr32_i64(lo, hi, tmp);
2700 tcg_gen_extr32_i64(hi, lo, tmp);
2702 tcg_temp_free_i64(tmp);
2707 gen_store_gpr(dc, rd, hi);
2708 gen_store_gpr(dc, rd + 1, lo);
2711 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2714 DisasASI da = get_asi(dc, insn, MO_TEQ);
2715 TCGv lo = gen_load_gpr(dc, rd + 1);
2721 case GET_ASI_DTWINX:
2722 gen_address_mask(dc, addr);
2723 tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
2724 tcg_gen_addi_tl(addr, addr, 8);
2725 tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
2728 case GET_ASI_DIRECT:
2730 TCGv_i64 t64 = tcg_temp_new_i64();
2732 /* Note that LE stda acts as if each 32-bit register result is
2733 byte swapped. We will perform one 64-bit LE store, so now
2734 we must swap the order of the construction. */
2735 if ((da.memop & MO_BSWAP) == MO_TE) {
2736 tcg_gen_concat32_i64(t64, lo, hi);
2738 tcg_gen_concat32_i64(t64, hi, lo);
2740 gen_address_mask(dc, addr);
2741 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2742 tcg_temp_free_i64(t64);
2747 /* ??? In theory we've handled all of the ASIs that are valid
2748 for stda, and this should raise DAE_invalid_asi. */
2750 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2751 TCGv_i32 r_mop = tcg_const_i32(da.memop);
2752 TCGv_i64 t64 = tcg_temp_new_i64();
2755 if ((da.memop & MO_BSWAP) == MO_TE) {
2756 tcg_gen_concat32_i64(t64, lo, hi);
2758 tcg_gen_concat32_i64(t64, hi, lo);
2762 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2763 tcg_temp_free_i32(r_mop);
2764 tcg_temp_free_i32(r_asi);
2765 tcg_temp_free_i64(t64);
2771 static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
2774 DisasASI da = get_asi(dc, insn, MO_TEQ);
2780 case GET_ASI_DIRECT:
2781 oldv = tcg_temp_new();
2782 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
2783 da.mem_idx, da.memop);
2784 gen_store_gpr(dc, rd, oldv);
2785 tcg_temp_free(oldv);
2788 /* ??? Should be DAE_invalid_asi. */
2789 gen_exception(dc, TT_DATA_ACCESS);
2794 #elif !defined(CONFIG_USER_ONLY)
2795 static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
2797 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2798 whereby "rd + 1" elicits "error: array subscript is above array".
2799 Since we have already asserted that rd is even, the semantics
2801 TCGv lo = gen_dest_gpr(dc, rd | 1);
2802 TCGv hi = gen_dest_gpr(dc, rd);
2803 TCGv_i64 t64 = tcg_temp_new_i64();
2804 DisasASI da = get_asi(dc, insn, MO_TEQ);
2808 tcg_temp_free_i64(t64);
2810 case GET_ASI_DIRECT:
2811 gen_address_mask(dc, addr);
2812 tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
2816 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2817 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2820 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
2821 tcg_temp_free_i32(r_mop);
2822 tcg_temp_free_i32(r_asi);
2827 tcg_gen_extr_i64_i32(lo, hi, t64);
2828 tcg_temp_free_i64(t64);
2829 gen_store_gpr(dc, rd | 1, lo);
2830 gen_store_gpr(dc, rd, hi);
2833 static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2836 DisasASI da = get_asi(dc, insn, MO_TEQ);
2837 TCGv lo = gen_load_gpr(dc, rd + 1);
2838 TCGv_i64 t64 = tcg_temp_new_i64();
2840 tcg_gen_concat_tl_i64(t64, lo, hi);
2845 case GET_ASI_DIRECT:
2846 gen_address_mask(dc, addr);
2847 tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
2850 /* Store 32 bytes of T64 to ADDR. */
2851 /* ??? The original qemu code suggests 8-byte alignment, dropping
2852 the low bits, but the only place I can see this used is in the
2853 Linux kernel with 32 byte alignment, which would make more sense
2854 as a cacheline-style operation. */
2856 TCGv d_addr = tcg_temp_new();
2857 TCGv eight = tcg_const_tl(8);
2860 tcg_gen_andi_tl(d_addr, addr, -8);
2861 for (i = 0; i < 32; i += 8) {
2862 tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
2863 tcg_gen_add_tl(d_addr, d_addr, eight);
2866 tcg_temp_free(d_addr);
2867 tcg_temp_free(eight);
2872 TCGv_i32 r_asi = tcg_const_i32(da.asi);
2873 TCGv_i32 r_mop = tcg_const_i32(MO_Q);
2876 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
2877 tcg_temp_free_i32(r_mop);
2878 tcg_temp_free_i32(r_asi);
2883 tcg_temp_free_i64(t64);
2887 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2889 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2890 return gen_load_gpr(dc, rs1);
2893 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2895 if (IS_IMM) { /* immediate */
2896 target_long simm = GET_FIELDs(insn, 19, 31);
2897 TCGv t = get_temp_tl(dc);
2898 tcg_gen_movi_tl(t, simm);
2900 } else { /* register */
2901 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2902 return gen_load_gpr(dc, rs2);
2906 #ifdef TARGET_SPARC64
2907 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2909 TCGv_i32 c32, zero, dst, s1, s2;
2911 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2912 or fold the comparison down to 32 bits and use movcond_i32. Choose
2914 c32 = tcg_temp_new_i32();
2916 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2918 TCGv_i64 c64 = tcg_temp_new_i64();
2919 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2920 tcg_gen_extrl_i64_i32(c32, c64);
2921 tcg_temp_free_i64(c64);
2924 s1 = gen_load_fpr_F(dc, rs);
2925 s2 = gen_load_fpr_F(dc, rd);
2926 dst = gen_dest_fpr_F(dc);
2927 zero = tcg_const_i32(0);
2929 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2931 tcg_temp_free_i32(c32);
2932 tcg_temp_free_i32(zero);
2933 gen_store_fpr_F(dc, rd, dst);
2936 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2938 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2939 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2940 gen_load_fpr_D(dc, rs),
2941 gen_load_fpr_D(dc, rd));
2942 gen_store_fpr_D(dc, rd, dst);
2945 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2947 int qd = QFPREG(rd);
2948 int qs = QFPREG(rs);
2950 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2951 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2952 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2953 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2955 gen_update_fprs_dirty(dc, qd);
2958 #ifndef CONFIG_USER_ONLY
2959 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
2961 TCGv_i32 r_tl = tcg_temp_new_i32();
2963 /* load env->tl into r_tl */
2964 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2966 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2967 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2969 /* calculate offset to current trap state from env->ts, reuse r_tl */
2970 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2971 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2973 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2975 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2976 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2977 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2978 tcg_temp_free_ptr(r_tl_tmp);
2981 tcg_temp_free_i32(r_tl);
2985 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2986 int width, bool cc, bool left)
2988 TCGv lo1, lo2, t1, t2;
2989 uint64_t amask, tabl, tabr;
2990 int shift, imask, omask;
2993 tcg_gen_mov_tl(cpu_cc_src, s1);
2994 tcg_gen_mov_tl(cpu_cc_src2, s2);
2995 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2996 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2997 dc->cc_op = CC_OP_SUB;
3000 /* Theory of operation: there are two tables, left and right (not to
3001 be confused with the left and right versions of the opcode). These
3002 are indexed by the low 3 bits of the inputs. To make things "easy",
3003 these tables are loaded into two constants, TABL and TABR below.
3004 The operation index = (input & imask) << shift calculates the index
3005 into the constant, while val = (table >> index) & omask calculates
3006 the value we're looking for. */
3013 tabl = 0x80c0e0f0f8fcfeffULL;
3014 tabr = 0xff7f3f1f0f070301ULL;
3016 tabl = 0x0103070f1f3f7fffULL;
3017 tabr = 0xfffefcf8f0e0c080ULL;
3037 tabl = (2 << 2) | 3;
3038 tabr = (3 << 2) | 1;
3040 tabl = (1 << 2) | 3;
3041 tabr = (3 << 2) | 2;
3048 lo1 = tcg_temp_new();
3049 lo2 = tcg_temp_new();
3050 tcg_gen_andi_tl(lo1, s1, imask);
3051 tcg_gen_andi_tl(lo2, s2, imask);
3052 tcg_gen_shli_tl(lo1, lo1, shift);
3053 tcg_gen_shli_tl(lo2, lo2, shift);
3055 t1 = tcg_const_tl(tabl);
3056 t2 = tcg_const_tl(tabr);
3057 tcg_gen_shr_tl(lo1, t1, lo1);
3058 tcg_gen_shr_tl(lo2, t2, lo2);
3059 tcg_gen_andi_tl(dst, lo1, omask);
3060 tcg_gen_andi_tl(lo2, lo2, omask);
3064 amask &= 0xffffffffULL;
3066 tcg_gen_andi_tl(s1, s1, amask);
3067 tcg_gen_andi_tl(s2, s2, amask);
3069 /* We want to compute
3070 dst = (s1 == s2 ? lo1 : lo1 & lo2).
3071 We've already done dst = lo1, so this reduces to
3072 dst &= (s1 == s2 ? -1 : lo2)
3077 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
3078 tcg_gen_neg_tl(t1, t1);
3079 tcg_gen_or_tl(lo2, lo2, t1);
3080 tcg_gen_and_tl(dst, dst, lo2);
3088 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
3090 TCGv tmp = tcg_temp_new();
3092 tcg_gen_add_tl(tmp, s1, s2);
3093 tcg_gen_andi_tl(dst, tmp, -8);
3095 tcg_gen_neg_tl(tmp, tmp);
3097 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3102 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
3106 t1 = tcg_temp_new();
3107 t2 = tcg_temp_new();
3108 shift = tcg_temp_new();
3110 tcg_gen_andi_tl(shift, gsr, 7);
3111 tcg_gen_shli_tl(shift, shift, 3);
3112 tcg_gen_shl_tl(t1, s1, shift);
3114 /* A shift of 64 does not produce 0 in TCG. Divide this into a
3115 shift of (up to 63) followed by a constant shift of 1. */
3116 tcg_gen_xori_tl(shift, shift, 63);
3117 tcg_gen_shr_tl(t2, s2, shift);
3118 tcg_gen_shri_tl(t2, t2, 1);
3120 tcg_gen_or_tl(dst, t1, t2);
3124 tcg_temp_free(shift);
3128 #define CHECK_IU_FEATURE(dc, FEATURE) \
3129 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3131 #define CHECK_FPU_FEATURE(dc, FEATURE) \
3132 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
3135 /* before an instruction, dc->pc must be static */
3136 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
3138 unsigned int opc, rs1, rs2, rd;
3139 TCGv cpu_src1, cpu_src2;
3140 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
3141 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
3144 opc = GET_FIELD(insn, 0, 1);
3145 rd = GET_FIELD(insn, 2, 6);
3148 case 0: /* branches/sethi */
3150 unsigned int xop = GET_FIELD(insn, 7, 9);
3153 #ifdef TARGET_SPARC64
3154 case 0x1: /* V9 BPcc */
3158 target = GET_FIELD_SP(insn, 0, 18);
3159 target = sign_extend(target, 19);
3161 cc = GET_FIELD_SP(insn, 20, 21);
3163 do_branch(dc, target, insn, 0);
3165 do_branch(dc, target, insn, 1);
3170 case 0x3: /* V9 BPr */
3172 target = GET_FIELD_SP(insn, 0, 13) |
3173 (GET_FIELD_SP(insn, 20, 21) << 14);
3174 target = sign_extend(target, 16);
3176 cpu_src1 = get_src1(dc, insn);
3177 do_branch_reg(dc, target, insn, cpu_src1);
3180 case 0x5: /* V9 FBPcc */
3182 int cc = GET_FIELD_SP(insn, 20, 21);
3183 if (gen_trap_ifnofpu(dc)) {
3186 target = GET_FIELD_SP(insn, 0, 18);
3187 target = sign_extend(target, 19);
3189 do_fbranch(dc, target, insn, cc);
3193 case 0x7: /* CBN+x */
3198 case 0x2: /* BN+x */
3200 target = GET_FIELD(insn, 10, 31);
3201 target = sign_extend(target, 22);
3203 do_branch(dc, target, insn, 0);
3206 case 0x6: /* FBN+x */
3208 if (gen_trap_ifnofpu(dc)) {
3211 target = GET_FIELD(insn, 10, 31);
3212 target = sign_extend(target, 22);
3214 do_fbranch(dc, target, insn, 0);
3217 case 0x4: /* SETHI */
3218 /* Special-case %g0 because that's the canonical nop. */
3220 uint32_t value = GET_FIELD(insn, 10, 31);
3221 TCGv t = gen_dest_gpr(dc, rd);
3222 tcg_gen_movi_tl(t, value << 10);
3223 gen_store_gpr(dc, rd, t);
3226 case 0x0: /* UNIMPL */
3235 target_long target = GET_FIELDs(insn, 2, 31) << 2;
3236 TCGv o7 = gen_dest_gpr(dc, 15);
3238 tcg_gen_movi_tl(o7, dc->pc);
3239 gen_store_gpr(dc, 15, o7);
3242 #ifdef TARGET_SPARC64
3243 if (unlikely(AM_CHECK(dc))) {
3244 target &= 0xffffffffULL;
3250 case 2: /* FPU & Logical Operations */
3252 unsigned int xop = GET_FIELD(insn, 7, 12);
3253 TCGv cpu_dst = get_temp_tl(dc);
3256 if (xop == 0x3a) { /* generate trap */
3257 int cond = GET_FIELD(insn, 3, 6);
3259 TCGLabel *l1 = NULL;
3270 /* Conditional trap. */
3272 #ifdef TARGET_SPARC64
3274 int cc = GET_FIELD_SP(insn, 11, 12);
3276 gen_compare(&cmp, 0, cond, dc);
3277 } else if (cc == 2) {
3278 gen_compare(&cmp, 1, cond, dc);
3283 gen_compare(&cmp, 0, cond, dc);
3285 l1 = gen_new_label();
3286 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
3287 cmp.c1, cmp.c2, l1);
3291 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
3292 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
3294 /* Don't use the normal temporaries, as they may well have
3295 gone out of scope with the branch above. While we're
3296 doing that we might as well pre-truncate to 32-bit. */
3297 trap = tcg_temp_new_i32();
3299 rs1 = GET_FIELD_SP(insn, 14, 18);
3301 rs2 = GET_FIELD_SP(insn, 0, 7);
3303 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
3304 /* Signal that the trap value is fully constant. */
3307 TCGv t1 = gen_load_gpr(dc, rs1);
3308 tcg_gen_trunc_tl_i32(trap, t1);
3309 tcg_gen_addi_i32(trap, trap, rs2);
3313 rs2 = GET_FIELD_SP(insn, 0, 4);
3314 t1 = gen_load_gpr(dc, rs1);
3315 t2 = gen_load_gpr(dc, rs2);
3316 tcg_gen_add_tl(t1, t1, t2);
3317 tcg_gen_trunc_tl_i32(trap, t1);
3320 tcg_gen_andi_i32(trap, trap, mask);
3321 tcg_gen_addi_i32(trap, trap, TT_TRAP);
3324 gen_helper_raise_exception(cpu_env, trap);
3325 tcg_temp_free_i32(trap);
3328 /* An unconditional trap ends the TB. */
3332 /* A conditional trap falls through to the next insn. */
3336 } else if (xop == 0x28) {
3337 rs1 = GET_FIELD(insn, 13, 17);
3340 #ifndef TARGET_SPARC64
3341 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3342 manual, rdy on the microSPARC
3344 case 0x0f: /* stbar in the SPARCv8 manual,
3345 rdy on the microSPARC II */
3346 case 0x10 ... 0x1f: /* implementation-dependent in the
3347 SPARCv8 manual, rdy on the
3350 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
3351 TCGv t = gen_dest_gpr(dc, rd);
3352 /* Read Asr17 for a Leon3 monoprocessor */
3353 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
3354 gen_store_gpr(dc, rd, t);
3358 gen_store_gpr(dc, rd, cpu_y);
3360 #ifdef TARGET_SPARC64
3361 case 0x2: /* V9 rdccr */
3363 gen_helper_rdccr(cpu_dst, cpu_env);
3364 gen_store_gpr(dc, rd, cpu_dst);
3366 case 0x3: /* V9 rdasi */
3367 tcg_gen_movi_tl(cpu_dst, dc->asi);
3368 gen_store_gpr(dc, rd, cpu_dst);
3370 case 0x4: /* V9 rdtick */
3375 r_tickptr = tcg_temp_new_ptr();
3376 r_const = tcg_const_i32(dc->mem_idx);
3377 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3378 offsetof(CPUSPARCState, tick));
3379 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3381 tcg_temp_free_ptr(r_tickptr);
3382 tcg_temp_free_i32(r_const);
3383 gen_store_gpr(dc, rd, cpu_dst);
3386 case 0x5: /* V9 rdpc */
3388 TCGv t = gen_dest_gpr(dc, rd);
3389 if (unlikely(AM_CHECK(dc))) {
3390 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
3392 tcg_gen_movi_tl(t, dc->pc);
3394 gen_store_gpr(dc, rd, t);
3397 case 0x6: /* V9 rdfprs */
3398 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
3399 gen_store_gpr(dc, rd, cpu_dst);
3401 case 0xf: /* V9 membar */
3402 break; /* no effect */
3403 case 0x13: /* Graphics Status */
3404 if (gen_trap_ifnofpu(dc)) {
3407 gen_store_gpr(dc, rd, cpu_gsr);
3409 case 0x16: /* Softint */
3410 tcg_gen_ld32s_tl(cpu_dst, cpu_env,
3411 offsetof(CPUSPARCState, softint));
3412 gen_store_gpr(dc, rd, cpu_dst);
3414 case 0x17: /* Tick compare */
3415 gen_store_gpr(dc, rd, cpu_tick_cmpr);
3417 case 0x18: /* System tick */
3422 r_tickptr = tcg_temp_new_ptr();
3423 r_const = tcg_const_i32(dc->mem_idx);
3424 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3425 offsetof(CPUSPARCState, stick));
3426 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
3428 tcg_temp_free_ptr(r_tickptr);
3429 tcg_temp_free_i32(r_const);
3430 gen_store_gpr(dc, rd, cpu_dst);
3433 case 0x19: /* System tick compare */
3434 gen_store_gpr(dc, rd, cpu_stick_cmpr);
3436 case 0x1a: /* UltraSPARC-T1 Strand status */
3437 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3438 * this ASR as impl. dep
3440 CHECK_IU_FEATURE(dc, HYPV);
3442 TCGv t = gen_dest_gpr(dc, rd);
3443 tcg_gen_movi_tl(t, 1UL);
3444 gen_store_gpr(dc, rd, t);
3447 case 0x10: /* Performance Control */
3448 case 0x11: /* Performance Instrumentation Counter */
3449 case 0x12: /* Dispatch Control */
3450 case 0x14: /* Softint set, WO */
3451 case 0x15: /* Softint clear, WO */
3456 #if !defined(CONFIG_USER_ONLY)
3457 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
3458 #ifndef TARGET_SPARC64
3459 if (!supervisor(dc)) {
3463 gen_helper_rdpsr(cpu_dst, cpu_env);
3465 CHECK_IU_FEATURE(dc, HYPV);
3466 if (!hypervisor(dc))
3468 rs1 = GET_FIELD(insn, 13, 17);
3471 tcg_gen_ld_i64(cpu_dst, cpu_env,
3472 offsetof(CPUSPARCState, hpstate));
3475 // gen_op_rdhtstate();
3478 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
3481 tcg_gen_mov_tl(cpu_dst, cpu_htba);
3484 tcg_gen_mov_tl(cpu_dst, cpu_hver);
3486 case 31: // hstick_cmpr
3487 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
3493 gen_store_gpr(dc, rd, cpu_dst);
3495 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
3496 if (!supervisor(dc)) {
3499 cpu_tmp0 = get_temp_tl(dc);
3500 #ifdef TARGET_SPARC64
3501 rs1 = GET_FIELD(insn, 13, 17);
3507 r_tsptr = tcg_temp_new_ptr();
3508 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3509 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3510 offsetof(trap_state, tpc));
3511 tcg_temp_free_ptr(r_tsptr);
3518 r_tsptr = tcg_temp_new_ptr();
3519 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3520 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3521 offsetof(trap_state, tnpc));
3522 tcg_temp_free_ptr(r_tsptr);
3529 r_tsptr = tcg_temp_new_ptr();
3530 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3531 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
3532 offsetof(trap_state, tstate));
3533 tcg_temp_free_ptr(r_tsptr);
3538 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3540 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3541 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
3542 offsetof(trap_state, tt));
3543 tcg_temp_free_ptr(r_tsptr);
3551 r_tickptr = tcg_temp_new_ptr();
3552 r_const = tcg_const_i32(dc->mem_idx);
3553 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3554 offsetof(CPUSPARCState, tick));
3555 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
3556 r_tickptr, r_const);
3557 tcg_temp_free_ptr(r_tickptr);
3558 tcg_temp_free_i32(r_const);
3562 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
3565 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3566 offsetof(CPUSPARCState, pstate));
3569 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3570 offsetof(CPUSPARCState, tl));
3573 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3574 offsetof(CPUSPARCState, psrpil));
3577 gen_helper_rdcwp(cpu_tmp0, cpu_env);
3580 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3581 offsetof(CPUSPARCState, cansave));
3583 case 11: // canrestore
3584 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3585 offsetof(CPUSPARCState, canrestore));
3587 case 12: // cleanwin
3588 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3589 offsetof(CPUSPARCState, cleanwin));
3591 case 13: // otherwin
3592 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3593 offsetof(CPUSPARCState, otherwin));
3596 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3597 offsetof(CPUSPARCState, wstate));
3599 case 16: // UA2005 gl
3600 CHECK_IU_FEATURE(dc, GL);
3601 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
3602 offsetof(CPUSPARCState, gl));
3604 case 26: // UA2005 strand status
3605 CHECK_IU_FEATURE(dc, HYPV);
3606 if (!hypervisor(dc))
3608 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
3611 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
3618 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
3620 gen_store_gpr(dc, rd, cpu_tmp0);
3622 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
3623 #ifdef TARGET_SPARC64
3624 gen_helper_flushw(cpu_env);
3626 if (!supervisor(dc))
3628 gen_store_gpr(dc, rd, cpu_tbr);
3632 } else if (xop == 0x34) { /* FPU Operations */
3633 if (gen_trap_ifnofpu(dc)) {
3636 gen_op_clear_ieee_excp_and_FTT();
3637 rs1 = GET_FIELD(insn, 13, 17);
3638 rs2 = GET_FIELD(insn, 27, 31);
3639 xop = GET_FIELD(insn, 18, 26);
3642 case 0x1: /* fmovs */
3643 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
3644 gen_store_fpr_F(dc, rd, cpu_src1_32);
3646 case 0x5: /* fnegs */
3647 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
3649 case 0x9: /* fabss */
3650 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
3652 case 0x29: /* fsqrts */
3653 CHECK_FPU_FEATURE(dc, FSQRT);
3654 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
3656 case 0x2a: /* fsqrtd */
3657 CHECK_FPU_FEATURE(dc, FSQRT);
3658 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
3660 case 0x2b: /* fsqrtq */
3661 CHECK_FPU_FEATURE(dc, FLOAT128);
3662 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
3664 case 0x41: /* fadds */
3665 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
3667 case 0x42: /* faddd */
3668 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
3670 case 0x43: /* faddq */
3671 CHECK_FPU_FEATURE(dc, FLOAT128);
3672 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3674 case 0x45: /* fsubs */
3675 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3677 case 0x46: /* fsubd */
3678 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3680 case 0x47: /* fsubq */
3681 CHECK_FPU_FEATURE(dc, FLOAT128);
3682 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3684 case 0x49: /* fmuls */
3685 CHECK_FPU_FEATURE(dc, FMUL);
3686 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3688 case 0x4a: /* fmuld */
3689 CHECK_FPU_FEATURE(dc, FMUL);
3690 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3692 case 0x4b: /* fmulq */
3693 CHECK_FPU_FEATURE(dc, FLOAT128);
3694 CHECK_FPU_FEATURE(dc, FMUL);
3695 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3697 case 0x4d: /* fdivs */
3698 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3700 case 0x4e: /* fdivd */
3701 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3703 case 0x4f: /* fdivq */
3704 CHECK_FPU_FEATURE(dc, FLOAT128);
3705 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3707 case 0x69: /* fsmuld */
3708 CHECK_FPU_FEATURE(dc, FSMULD);
3709 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3711 case 0x6e: /* fdmulq */
3712 CHECK_FPU_FEATURE(dc, FLOAT128);
3713 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3715 case 0xc4: /* fitos */
3716 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3718 case 0xc6: /* fdtos */
3719 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3721 case 0xc7: /* fqtos */
3722 CHECK_FPU_FEATURE(dc, FLOAT128);
3723 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3725 case 0xc8: /* fitod */
3726 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3728 case 0xc9: /* fstod */
3729 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3731 case 0xcb: /* fqtod */
3732 CHECK_FPU_FEATURE(dc, FLOAT128);
3733 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3735 case 0xcc: /* fitoq */
3736 CHECK_FPU_FEATURE(dc, FLOAT128);
3737 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3739 case 0xcd: /* fstoq */
3740 CHECK_FPU_FEATURE(dc, FLOAT128);
3741 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3743 case 0xce: /* fdtoq */
3744 CHECK_FPU_FEATURE(dc, FLOAT128);
3745 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3747 case 0xd1: /* fstoi */
3748 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3750 case 0xd2: /* fdtoi */
3751 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3753 case 0xd3: /* fqtoi */
3754 CHECK_FPU_FEATURE(dc, FLOAT128);
3755 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3757 #ifdef TARGET_SPARC64
3758 case 0x2: /* V9 fmovd */
3759 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3760 gen_store_fpr_D(dc, rd, cpu_src1_64);
3762 case 0x3: /* V9 fmovq */
3763 CHECK_FPU_FEATURE(dc, FLOAT128);
3764 gen_move_Q(dc, rd, rs2);
3766 case 0x6: /* V9 fnegd */
3767 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3769 case 0x7: /* V9 fnegq */
3770 CHECK_FPU_FEATURE(dc, FLOAT128);
3771 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3773 case 0xa: /* V9 fabsd */
3774 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3776 case 0xb: /* V9 fabsq */
3777 CHECK_FPU_FEATURE(dc, FLOAT128);
3778 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3780 case 0x81: /* V9 fstox */
3781 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3783 case 0x82: /* V9 fdtox */
3784 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3786 case 0x83: /* V9 fqtox */
3787 CHECK_FPU_FEATURE(dc, FLOAT128);
3788 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3790 case 0x84: /* V9 fxtos */
3791 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3793 case 0x88: /* V9 fxtod */
3794 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3796 case 0x8c: /* V9 fxtoq */
3797 CHECK_FPU_FEATURE(dc, FLOAT128);
3798 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3804 } else if (xop == 0x35) { /* FPU Operations */
3805 #ifdef TARGET_SPARC64
3808 if (gen_trap_ifnofpu(dc)) {
3811 gen_op_clear_ieee_excp_and_FTT();
3812 rs1 = GET_FIELD(insn, 13, 17);
3813 rs2 = GET_FIELD(insn, 27, 31);
3814 xop = GET_FIELD(insn, 18, 26);
3816 #ifdef TARGET_SPARC64
3820 cond = GET_FIELD_SP(insn, 10, 12); \
3821 cpu_src1 = get_src1(dc, insn); \
3822 gen_compare_reg(&cmp, cond, cpu_src1); \
3823 gen_fmov##sz(dc, &cmp, rd, rs2); \
3824 free_compare(&cmp); \
3827 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3830 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3833 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3834 CHECK_FPU_FEATURE(dc, FLOAT128);
3841 #ifdef TARGET_SPARC64
3842 #define FMOVCC(fcc, sz) \
3845 cond = GET_FIELD_SP(insn, 14, 17); \
3846 gen_fcompare(&cmp, fcc, cond); \
3847 gen_fmov##sz(dc, &cmp, rd, rs2); \
3848 free_compare(&cmp); \
3851 case 0x001: /* V9 fmovscc %fcc0 */
3854 case 0x002: /* V9 fmovdcc %fcc0 */
3857 case 0x003: /* V9 fmovqcc %fcc0 */
3858 CHECK_FPU_FEATURE(dc, FLOAT128);
3861 case 0x041: /* V9 fmovscc %fcc1 */
3864 case 0x042: /* V9 fmovdcc %fcc1 */
3867 case 0x043: /* V9 fmovqcc %fcc1 */
3868 CHECK_FPU_FEATURE(dc, FLOAT128);
3871 case 0x081: /* V9 fmovscc %fcc2 */
3874 case 0x082: /* V9 fmovdcc %fcc2 */
3877 case 0x083: /* V9 fmovqcc %fcc2 */
3878 CHECK_FPU_FEATURE(dc, FLOAT128);
3881 case 0x0c1: /* V9 fmovscc %fcc3 */
3884 case 0x0c2: /* V9 fmovdcc %fcc3 */
3887 case 0x0c3: /* V9 fmovqcc %fcc3 */
3888 CHECK_FPU_FEATURE(dc, FLOAT128);
3892 #define FMOVCC(xcc, sz) \
3895 cond = GET_FIELD_SP(insn, 14, 17); \
3896 gen_compare(&cmp, xcc, cond, dc); \
3897 gen_fmov##sz(dc, &cmp, rd, rs2); \
3898 free_compare(&cmp); \
3901 case 0x101: /* V9 fmovscc %icc */
3904 case 0x102: /* V9 fmovdcc %icc */
3907 case 0x103: /* V9 fmovqcc %icc */
3908 CHECK_FPU_FEATURE(dc, FLOAT128);
3911 case 0x181: /* V9 fmovscc %xcc */
3914 case 0x182: /* V9 fmovdcc %xcc */
3917 case 0x183: /* V9 fmovqcc %xcc */
3918 CHECK_FPU_FEATURE(dc, FLOAT128);
3923 case 0x51: /* fcmps, V9 %fcc */
3924 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3925 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3926 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3928 case 0x52: /* fcmpd, V9 %fcc */
3929 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3930 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3931 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3933 case 0x53: /* fcmpq, V9 %fcc */
3934 CHECK_FPU_FEATURE(dc, FLOAT128);
3935 gen_op_load_fpr_QT0(QFPREG(rs1));
3936 gen_op_load_fpr_QT1(QFPREG(rs2));
3937 gen_op_fcmpq(rd & 3);
3939 case 0x55: /* fcmpes, V9 %fcc */
3940 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3941 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3942 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3944 case 0x56: /* fcmped, V9 %fcc */
3945 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3946 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3947 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3949 case 0x57: /* fcmpeq, V9 %fcc */
3950 CHECK_FPU_FEATURE(dc, FLOAT128);
3951 gen_op_load_fpr_QT0(QFPREG(rs1));
3952 gen_op_load_fpr_QT1(QFPREG(rs2));
3953 gen_op_fcmpeq(rd & 3);
3958 } else if (xop == 0x2) {
3959 TCGv dst = gen_dest_gpr(dc, rd);
3960 rs1 = GET_FIELD(insn, 13, 17);
3962 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3963 if (IS_IMM) { /* immediate */
3964 simm = GET_FIELDs(insn, 19, 31);
3965 tcg_gen_movi_tl(dst, simm);
3966 gen_store_gpr(dc, rd, dst);
3967 } else { /* register */
3968 rs2 = GET_FIELD(insn, 27, 31);
3970 tcg_gen_movi_tl(dst, 0);
3971 gen_store_gpr(dc, rd, dst);
3973 cpu_src2 = gen_load_gpr(dc, rs2);
3974 gen_store_gpr(dc, rd, cpu_src2);
3978 cpu_src1 = get_src1(dc, insn);
3979 if (IS_IMM) { /* immediate */
3980 simm = GET_FIELDs(insn, 19, 31);
3981 tcg_gen_ori_tl(dst, cpu_src1, simm);
3982 gen_store_gpr(dc, rd, dst);
3983 } else { /* register */
3984 rs2 = GET_FIELD(insn, 27, 31);
3986 /* mov shortcut: or x, %g0, y -> mov x, y */
3987 gen_store_gpr(dc, rd, cpu_src1);
3989 cpu_src2 = gen_load_gpr(dc, rs2);
3990 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3991 gen_store_gpr(dc, rd, dst);
3995 #ifdef TARGET_SPARC64
3996 } else if (xop == 0x25) { /* sll, V9 sllx */
3997 cpu_src1 = get_src1(dc, insn);
3998 if (IS_IMM) { /* immediate */
3999 simm = GET_FIELDs(insn, 20, 31);
4000 if (insn & (1 << 12)) {
4001 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
4003 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
4005 } else { /* register */
4006 rs2 = GET_FIELD(insn, 27, 31);
4007 cpu_src2 = gen_load_gpr(dc, rs2);
4008 cpu_tmp0 = get_temp_tl(dc);
4009 if (insn & (1 << 12)) {
4010 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4012 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4014 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
4016 gen_store_gpr(dc, rd, cpu_dst);
4017 } else if (xop == 0x26) { /* srl, V9 srlx */
4018 cpu_src1 = get_src1(dc, insn);
4019 if (IS_IMM) { /* immediate */
4020 simm = GET_FIELDs(insn, 20, 31);
4021 if (insn & (1 << 12)) {
4022 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
4024 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4025 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
4027 } else { /* register */
4028 rs2 = GET_FIELD(insn, 27, 31);
4029 cpu_src2 = gen_load_gpr(dc, rs2);
4030 cpu_tmp0 = get_temp_tl(dc);
4031 if (insn & (1 << 12)) {
4032 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4033 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
4035 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4036 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
4037 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
4040 gen_store_gpr(dc, rd, cpu_dst);
4041 } else if (xop == 0x27) { /* sra, V9 srax */
4042 cpu_src1 = get_src1(dc, insn);
4043 if (IS_IMM) { /* immediate */
4044 simm = GET_FIELDs(insn, 20, 31);
4045 if (insn & (1 << 12)) {
4046 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
4048 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4049 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
4051 } else { /* register */
4052 rs2 = GET_FIELD(insn, 27, 31);
4053 cpu_src2 = gen_load_gpr(dc, rs2);
4054 cpu_tmp0 = get_temp_tl(dc);
4055 if (insn & (1 << 12)) {
4056 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
4057 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
4059 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
4060 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
4061 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
4064 gen_store_gpr(dc, rd, cpu_dst);
4066 } else if (xop < 0x36) {
4068 cpu_src1 = get_src1(dc, insn);
4069 cpu_src2 = get_src2(dc, insn);
4070 switch (xop & ~0x10) {
4073 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4074 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4075 dc->cc_op = CC_OP_ADD;
4077 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4081 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
4083 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4084 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4085 dc->cc_op = CC_OP_LOGIC;
4089 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
4091 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4092 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4093 dc->cc_op = CC_OP_LOGIC;
4097 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
4099 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4100 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4101 dc->cc_op = CC_OP_LOGIC;
4106 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4107 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
4108 dc->cc_op = CC_OP_SUB;
4110 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
4113 case 0x5: /* andn */
4114 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
4116 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4117 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4118 dc->cc_op = CC_OP_LOGIC;
4122 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
4124 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4125 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4126 dc->cc_op = CC_OP_LOGIC;
4129 case 0x7: /* xorn */
4130 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
4132 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4133 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4134 dc->cc_op = CC_OP_LOGIC;
4137 case 0x8: /* addx, V9 addc */
4138 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4141 #ifdef TARGET_SPARC64
4142 case 0x9: /* V9 mulx */
4143 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
4146 case 0xa: /* umul */
4147 CHECK_IU_FEATURE(dc, MUL);
4148 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
4150 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4151 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4152 dc->cc_op = CC_OP_LOGIC;
4155 case 0xb: /* smul */
4156 CHECK_IU_FEATURE(dc, MUL);
4157 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
4159 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
4160 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
4161 dc->cc_op = CC_OP_LOGIC;
4164 case 0xc: /* subx, V9 subc */
4165 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
4168 #ifdef TARGET_SPARC64
4169 case 0xd: /* V9 udivx */
4170 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4173 case 0xe: /* udiv */
4174 CHECK_IU_FEATURE(dc, DIV);
4176 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
4178 dc->cc_op = CC_OP_DIV;
4180 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
4184 case 0xf: /* sdiv */
4185 CHECK_IU_FEATURE(dc, DIV);
4187 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
4189 dc->cc_op = CC_OP_DIV;
4191 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
4198 gen_store_gpr(dc, rd, cpu_dst);
4200 cpu_src1 = get_src1(dc, insn);
4201 cpu_src2 = get_src2(dc, insn);
4203 case 0x20: /* taddcc */
4204 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
4205 gen_store_gpr(dc, rd, cpu_dst);
4206 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
4207 dc->cc_op = CC_OP_TADD;
4209 case 0x21: /* tsubcc */
4210 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
4211 gen_store_gpr(dc, rd, cpu_dst);
4212 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
4213 dc->cc_op = CC_OP_TSUB;
4215 case 0x22: /* taddcctv */
4216 gen_helper_taddcctv(cpu_dst, cpu_env,
4217 cpu_src1, cpu_src2);
4218 gen_store_gpr(dc, rd, cpu_dst);
4219 dc->cc_op = CC_OP_TADDTV;
4221 case 0x23: /* tsubcctv */
4222 gen_helper_tsubcctv(cpu_dst, cpu_env,
4223 cpu_src1, cpu_src2);
4224 gen_store_gpr(dc, rd, cpu_dst);
4225 dc->cc_op = CC_OP_TSUBTV;
4227 case 0x24: /* mulscc */
4229 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
4230 gen_store_gpr(dc, rd, cpu_dst);
4231 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
4232 dc->cc_op = CC_OP_ADD;
4234 #ifndef TARGET_SPARC64
4235 case 0x25: /* sll */
4236 if (IS_IMM) { /* immediate */
4237 simm = GET_FIELDs(insn, 20, 31);
4238 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
4239 } else { /* register */
4240 cpu_tmp0 = get_temp_tl(dc);
4241 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4242 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
4244 gen_store_gpr(dc, rd, cpu_dst);
4246 case 0x26: /* srl */
4247 if (IS_IMM) { /* immediate */
4248 simm = GET_FIELDs(insn, 20, 31);
4249 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
4250 } else { /* register */
4251 cpu_tmp0 = get_temp_tl(dc);
4252 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4253 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
4255 gen_store_gpr(dc, rd, cpu_dst);
4257 case 0x27: /* sra */
4258 if (IS_IMM) { /* immediate */
4259 simm = GET_FIELDs(insn, 20, 31);
4260 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
4261 } else { /* register */
4262 cpu_tmp0 = get_temp_tl(dc);
4263 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
4264 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
4266 gen_store_gpr(dc, rd, cpu_dst);
4271 cpu_tmp0 = get_temp_tl(dc);
4274 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4275 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
4277 #ifndef TARGET_SPARC64
4278 case 0x01 ... 0x0f: /* undefined in the
4282 case 0x10 ... 0x1f: /* implementation-dependent
4286 if ((rd == 0x13) && (dc->def->features &
4287 CPU_FEATURE_POWERDOWN)) {
4288 /* LEON3 power-down */
4290 gen_helper_power_down(cpu_env);
4294 case 0x2: /* V9 wrccr */
4295 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4296 gen_helper_wrccr(cpu_env, cpu_tmp0);
4297 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4298 dc->cc_op = CC_OP_FLAGS;
4300 case 0x3: /* V9 wrasi */
4301 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4302 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
4303 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4304 offsetof(CPUSPARCState, asi));
4305 /* End TB to notice changed ASI. */
4311 case 0x6: /* V9 wrfprs */
4312 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4313 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
4320 case 0xf: /* V9 sir, nop if user */
4321 #if !defined(CONFIG_USER_ONLY)
4322 if (supervisor(dc)) {
4327 case 0x13: /* Graphics Status */
4328 if (gen_trap_ifnofpu(dc)) {
4331 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
4333 case 0x14: /* Softint set */
4334 if (!supervisor(dc))
4336 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4337 gen_helper_set_softint(cpu_env, cpu_tmp0);
4339 case 0x15: /* Softint clear */
4340 if (!supervisor(dc))
4342 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4343 gen_helper_clear_softint(cpu_env, cpu_tmp0);
4345 case 0x16: /* Softint write */
4346 if (!supervisor(dc))
4348 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4349 gen_helper_write_softint(cpu_env, cpu_tmp0);
4351 case 0x17: /* Tick compare */
4352 #if !defined(CONFIG_USER_ONLY)
4353 if (!supervisor(dc))
4359 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
4361 r_tickptr = tcg_temp_new_ptr();
4362 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4363 offsetof(CPUSPARCState, tick));
4364 gen_helper_tick_set_limit(r_tickptr,
4366 tcg_temp_free_ptr(r_tickptr);
4369 case 0x18: /* System tick */
4370 #if !defined(CONFIG_USER_ONLY)
4371 if (!supervisor(dc))
4377 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
4379 r_tickptr = tcg_temp_new_ptr();
4380 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4381 offsetof(CPUSPARCState, stick));
4382 gen_helper_tick_set_count(r_tickptr,
4384 tcg_temp_free_ptr(r_tickptr);
4387 case 0x19: /* System tick compare */
4388 #if !defined(CONFIG_USER_ONLY)
4389 if (!supervisor(dc))
4395 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
4397 r_tickptr = tcg_temp_new_ptr();
4398 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4399 offsetof(CPUSPARCState, stick));
4400 gen_helper_tick_set_limit(r_tickptr,
4402 tcg_temp_free_ptr(r_tickptr);
4406 case 0x10: /* Performance Control */
4407 case 0x11: /* Performance Instrumentation
4409 case 0x12: /* Dispatch Control */
4416 #if !defined(CONFIG_USER_ONLY)
4417 case 0x31: /* wrpsr, V9 saved, restored */
4419 if (!supervisor(dc))
4421 #ifdef TARGET_SPARC64
4424 gen_helper_saved(cpu_env);
4427 gen_helper_restored(cpu_env);
4429 case 2: /* UA2005 allclean */
4430 case 3: /* UA2005 otherw */
4431 case 4: /* UA2005 normalw */
4432 case 5: /* UA2005 invalw */
4438 cpu_tmp0 = get_temp_tl(dc);
4439 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4440 gen_helper_wrpsr(cpu_env, cpu_tmp0);
4441 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
4442 dc->cc_op = CC_OP_FLAGS;
4450 case 0x32: /* wrwim, V9 wrpr */
4452 if (!supervisor(dc))
4454 cpu_tmp0 = get_temp_tl(dc);
4455 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4456 #ifdef TARGET_SPARC64
4462 r_tsptr = tcg_temp_new_ptr();
4463 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4464 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4465 offsetof(trap_state, tpc));
4466 tcg_temp_free_ptr(r_tsptr);
4473 r_tsptr = tcg_temp_new_ptr();
4474 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4475 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4476 offsetof(trap_state, tnpc));
4477 tcg_temp_free_ptr(r_tsptr);
4484 r_tsptr = tcg_temp_new_ptr();
4485 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4486 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
4487 offsetof(trap_state,
4489 tcg_temp_free_ptr(r_tsptr);
4496 r_tsptr = tcg_temp_new_ptr();
4497 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
4498 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
4499 offsetof(trap_state, tt));
4500 tcg_temp_free_ptr(r_tsptr);
4507 r_tickptr = tcg_temp_new_ptr();
4508 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4509 offsetof(CPUSPARCState, tick));
4510 gen_helper_tick_set_count(r_tickptr,
4512 tcg_temp_free_ptr(r_tickptr);
4516 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
4520 gen_helper_wrpstate(cpu_env, cpu_tmp0);
4521 dc->npc = DYNAMIC_PC;
4525 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4526 offsetof(CPUSPARCState, tl));
4527 dc->npc = DYNAMIC_PC;
4530 gen_helper_wrpil(cpu_env, cpu_tmp0);
4533 gen_helper_wrcwp(cpu_env, cpu_tmp0);
4536 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4537 offsetof(CPUSPARCState,
4540 case 11: // canrestore
4541 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4542 offsetof(CPUSPARCState,
4545 case 12: // cleanwin
4546 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4547 offsetof(CPUSPARCState,
4550 case 13: // otherwin
4551 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4552 offsetof(CPUSPARCState,
4556 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
4557 offsetof(CPUSPARCState,
4560 case 16: // UA2005 gl
4561 CHECK_IU_FEATURE(dc, GL);
4562 gen_helper_wrgl(cpu_env, cpu_tmp0);
4564 case 26: // UA2005 strand status
4565 CHECK_IU_FEATURE(dc, HYPV);
4566 if (!hypervisor(dc))
4568 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
4574 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
4575 if (dc->def->nwindows != 32) {
4576 tcg_gen_andi_tl(cpu_wim, cpu_wim,
4577 (1 << dc->def->nwindows) - 1);
4582 case 0x33: /* wrtbr, UA2005 wrhpr */
4584 #ifndef TARGET_SPARC64
4585 if (!supervisor(dc))
4587 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
4589 CHECK_IU_FEATURE(dc, HYPV);
4590 if (!hypervisor(dc))
4592 cpu_tmp0 = get_temp_tl(dc);
4593 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
4596 tcg_gen_st_i64(cpu_tmp0, cpu_env,
4597 offsetof(CPUSPARCState,
4605 // XXX gen_op_wrhtstate();
4608 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
4611 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
4613 case 31: // hstick_cmpr
4617 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
4618 r_tickptr = tcg_temp_new_ptr();
4619 tcg_gen_ld_ptr(r_tickptr, cpu_env,
4620 offsetof(CPUSPARCState, hstick));
4621 gen_helper_tick_set_limit(r_tickptr,
4623 tcg_temp_free_ptr(r_tickptr);
4626 case 6: // hver readonly
4634 #ifdef TARGET_SPARC64
4635 case 0x2c: /* V9 movcc */
4637 int cc = GET_FIELD_SP(insn, 11, 12);
4638 int cond = GET_FIELD_SP(insn, 14, 17);
4642 if (insn & (1 << 18)) {
4644 gen_compare(&cmp, 0, cond, dc);
4645 } else if (cc == 2) {
4646 gen_compare(&cmp, 1, cond, dc);
4651 gen_fcompare(&cmp, cc, cond);
4654 /* The get_src2 above loaded the normal 13-bit
4655 immediate field, not the 11-bit field we have
4656 in movcc. But it did handle the reg case. */
4658 simm = GET_FIELD_SPs(insn, 0, 10);
4659 tcg_gen_movi_tl(cpu_src2, simm);
4662 dst = gen_load_gpr(dc, rd);
4663 tcg_gen_movcond_tl(cmp.cond, dst,
4667 gen_store_gpr(dc, rd, dst);
4670 case 0x2d: /* V9 sdivx */
4671 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
4672 gen_store_gpr(dc, rd, cpu_dst);
4674 case 0x2e: /* V9 popc */
4675 tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
4676 gen_store_gpr(dc, rd, cpu_dst);
4678 case 0x2f: /* V9 movr */
4680 int cond = GET_FIELD_SP(insn, 10, 12);
4684 gen_compare_reg(&cmp, cond, cpu_src1);
4686 /* The get_src2 above loaded the normal 13-bit
4687 immediate field, not the 10-bit field we have
4688 in movr. But it did handle the reg case. */
4690 simm = GET_FIELD_SPs(insn, 0, 9);
4691 tcg_gen_movi_tl(cpu_src2, simm);
4694 dst = gen_load_gpr(dc, rd);
4695 tcg_gen_movcond_tl(cmp.cond, dst,
4699 gen_store_gpr(dc, rd, dst);
4707 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4708 #ifdef TARGET_SPARC64
4709 int opf = GET_FIELD_SP(insn, 5, 13);
4710 rs1 = GET_FIELD(insn, 13, 17);
4711 rs2 = GET_FIELD(insn, 27, 31);
4712 if (gen_trap_ifnofpu(dc)) {
4717 case 0x000: /* VIS I edge8cc */
4718 CHECK_FPU_FEATURE(dc, VIS1);
4719 cpu_src1 = gen_load_gpr(dc, rs1);
4720 cpu_src2 = gen_load_gpr(dc, rs2);
4721 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4722 gen_store_gpr(dc, rd, cpu_dst);
4724 case 0x001: /* VIS II edge8n */
4725 CHECK_FPU_FEATURE(dc, VIS2);
4726 cpu_src1 = gen_load_gpr(dc, rs1);
4727 cpu_src2 = gen_load_gpr(dc, rs2);
4728 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4729 gen_store_gpr(dc, rd, cpu_dst);
4731 case 0x002: /* VIS I edge8lcc */
4732 CHECK_FPU_FEATURE(dc, VIS1);
4733 cpu_src1 = gen_load_gpr(dc, rs1);
4734 cpu_src2 = gen_load_gpr(dc, rs2);
4735 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4736 gen_store_gpr(dc, rd, cpu_dst);
4738 case 0x003: /* VIS II edge8ln */
4739 CHECK_FPU_FEATURE(dc, VIS2);
4740 cpu_src1 = gen_load_gpr(dc, rs1);
4741 cpu_src2 = gen_load_gpr(dc, rs2);
4742 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4743 gen_store_gpr(dc, rd, cpu_dst);
4745 case 0x004: /* VIS I edge16cc */
4746 CHECK_FPU_FEATURE(dc, VIS1);
4747 cpu_src1 = gen_load_gpr(dc, rs1);
4748 cpu_src2 = gen_load_gpr(dc, rs2);
4749 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4750 gen_store_gpr(dc, rd, cpu_dst);
4752 case 0x005: /* VIS II edge16n */
4753 CHECK_FPU_FEATURE(dc, VIS2);
4754 cpu_src1 = gen_load_gpr(dc, rs1);
4755 cpu_src2 = gen_load_gpr(dc, rs2);
4756 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4757 gen_store_gpr(dc, rd, cpu_dst);
4759 case 0x006: /* VIS I edge16lcc */
4760 CHECK_FPU_FEATURE(dc, VIS1);
4761 cpu_src1 = gen_load_gpr(dc, rs1);
4762 cpu_src2 = gen_load_gpr(dc, rs2);
4763 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4764 gen_store_gpr(dc, rd, cpu_dst);
4766 case 0x007: /* VIS II edge16ln */
4767 CHECK_FPU_FEATURE(dc, VIS2);
4768 cpu_src1 = gen_load_gpr(dc, rs1);
4769 cpu_src2 = gen_load_gpr(dc, rs2);
4770 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4771 gen_store_gpr(dc, rd, cpu_dst);
4773 case 0x008: /* VIS I edge32cc */
4774 CHECK_FPU_FEATURE(dc, VIS1);
4775 cpu_src1 = gen_load_gpr(dc, rs1);
4776 cpu_src2 = gen_load_gpr(dc, rs2);
4777 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4778 gen_store_gpr(dc, rd, cpu_dst);
4780 case 0x009: /* VIS II edge32n */
4781 CHECK_FPU_FEATURE(dc, VIS2);
4782 cpu_src1 = gen_load_gpr(dc, rs1);
4783 cpu_src2 = gen_load_gpr(dc, rs2);
4784 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4785 gen_store_gpr(dc, rd, cpu_dst);
4787 case 0x00a: /* VIS I edge32lcc */
4788 CHECK_FPU_FEATURE(dc, VIS1);
4789 cpu_src1 = gen_load_gpr(dc, rs1);
4790 cpu_src2 = gen_load_gpr(dc, rs2);
4791 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4792 gen_store_gpr(dc, rd, cpu_dst);
4794 case 0x00b: /* VIS II edge32ln */
4795 CHECK_FPU_FEATURE(dc, VIS2);
4796 cpu_src1 = gen_load_gpr(dc, rs1);
4797 cpu_src2 = gen_load_gpr(dc, rs2);
4798 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4799 gen_store_gpr(dc, rd, cpu_dst);
4801 case 0x010: /* VIS I array8 */
4802 CHECK_FPU_FEATURE(dc, VIS1);
4803 cpu_src1 = gen_load_gpr(dc, rs1);
4804 cpu_src2 = gen_load_gpr(dc, rs2);
4805 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4806 gen_store_gpr(dc, rd, cpu_dst);
4808 case 0x012: /* VIS I array16 */
4809 CHECK_FPU_FEATURE(dc, VIS1);
4810 cpu_src1 = gen_load_gpr(dc, rs1);
4811 cpu_src2 = gen_load_gpr(dc, rs2);
4812 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4813 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4814 gen_store_gpr(dc, rd, cpu_dst);
4816 case 0x014: /* VIS I array32 */
4817 CHECK_FPU_FEATURE(dc, VIS1);
4818 cpu_src1 = gen_load_gpr(dc, rs1);
4819 cpu_src2 = gen_load_gpr(dc, rs2);
4820 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4821 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4822 gen_store_gpr(dc, rd, cpu_dst);
4824 case 0x018: /* VIS I alignaddr */
4825 CHECK_FPU_FEATURE(dc, VIS1);
4826 cpu_src1 = gen_load_gpr(dc, rs1);
4827 cpu_src2 = gen_load_gpr(dc, rs2);
4828 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4829 gen_store_gpr(dc, rd, cpu_dst);
4831 case 0x01a: /* VIS I alignaddrl */
4832 CHECK_FPU_FEATURE(dc, VIS1);
4833 cpu_src1 = gen_load_gpr(dc, rs1);
4834 cpu_src2 = gen_load_gpr(dc, rs2);
4835 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4836 gen_store_gpr(dc, rd, cpu_dst);
4838 case 0x019: /* VIS II bmask */
4839 CHECK_FPU_FEATURE(dc, VIS2);
4840 cpu_src1 = gen_load_gpr(dc, rs1);
4841 cpu_src2 = gen_load_gpr(dc, rs2);
4842 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4843 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4844 gen_store_gpr(dc, rd, cpu_dst);
4846 case 0x020: /* VIS I fcmple16 */
4847 CHECK_FPU_FEATURE(dc, VIS1);
4848 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4849 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4850 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4851 gen_store_gpr(dc, rd, cpu_dst);
4853 case 0x022: /* VIS I fcmpne16 */
4854 CHECK_FPU_FEATURE(dc, VIS1);
4855 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4856 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4857 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4858 gen_store_gpr(dc, rd, cpu_dst);
4860 case 0x024: /* VIS I fcmple32 */
4861 CHECK_FPU_FEATURE(dc, VIS1);
4862 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4863 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4864 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4865 gen_store_gpr(dc, rd, cpu_dst);
4867 case 0x026: /* VIS I fcmpne32 */
4868 CHECK_FPU_FEATURE(dc, VIS1);
4869 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4870 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4871 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4872 gen_store_gpr(dc, rd, cpu_dst);
4874 case 0x028: /* VIS I fcmpgt16 */
4875 CHECK_FPU_FEATURE(dc, VIS1);
4876 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4877 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4878 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4879 gen_store_gpr(dc, rd, cpu_dst);
4881 case 0x02a: /* VIS I fcmpeq16 */
4882 CHECK_FPU_FEATURE(dc, VIS1);
4883 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4884 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4885 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4886 gen_store_gpr(dc, rd, cpu_dst);
4888 case 0x02c: /* VIS I fcmpgt32 */
4889 CHECK_FPU_FEATURE(dc, VIS1);
4890 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4891 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4892 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4893 gen_store_gpr(dc, rd, cpu_dst);
4895 case 0x02e: /* VIS I fcmpeq32 */
4896 CHECK_FPU_FEATURE(dc, VIS1);
4897 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4898 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4899 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4900 gen_store_gpr(dc, rd, cpu_dst);
4902 case 0x031: /* VIS I fmul8x16 */
4903 CHECK_FPU_FEATURE(dc, VIS1);
4904 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4906 case 0x033: /* VIS I fmul8x16au */
4907 CHECK_FPU_FEATURE(dc, VIS1);
4908 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4910 case 0x035: /* VIS I fmul8x16al */
4911 CHECK_FPU_FEATURE(dc, VIS1);
4912 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4914 case 0x036: /* VIS I fmul8sux16 */
4915 CHECK_FPU_FEATURE(dc, VIS1);
4916 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4918 case 0x037: /* VIS I fmul8ulx16 */
4919 CHECK_FPU_FEATURE(dc, VIS1);
4920 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4922 case 0x038: /* VIS I fmuld8sux16 */
4923 CHECK_FPU_FEATURE(dc, VIS1);
4924 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4926 case 0x039: /* VIS I fmuld8ulx16 */
4927 CHECK_FPU_FEATURE(dc, VIS1);
4928 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4930 case 0x03a: /* VIS I fpack32 */
4931 CHECK_FPU_FEATURE(dc, VIS1);
4932 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4934 case 0x03b: /* VIS I fpack16 */
4935 CHECK_FPU_FEATURE(dc, VIS1);
4936 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4937 cpu_dst_32 = gen_dest_fpr_F(dc);
4938 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4939 gen_store_fpr_F(dc, rd, cpu_dst_32);
4941 case 0x03d: /* VIS I fpackfix */
4942 CHECK_FPU_FEATURE(dc, VIS1);
4943 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4944 cpu_dst_32 = gen_dest_fpr_F(dc);
4945 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4946 gen_store_fpr_F(dc, rd, cpu_dst_32);
4948 case 0x03e: /* VIS I pdist */
4949 CHECK_FPU_FEATURE(dc, VIS1);
4950 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4952 case 0x048: /* VIS I faligndata */
4953 CHECK_FPU_FEATURE(dc, VIS1);
4954 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4956 case 0x04b: /* VIS I fpmerge */
4957 CHECK_FPU_FEATURE(dc, VIS1);
4958 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4960 case 0x04c: /* VIS II bshuffle */
4961 CHECK_FPU_FEATURE(dc, VIS2);
4962 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4964 case 0x04d: /* VIS I fexpand */
4965 CHECK_FPU_FEATURE(dc, VIS1);
4966 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4968 case 0x050: /* VIS I fpadd16 */
4969 CHECK_FPU_FEATURE(dc, VIS1);
4970 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4972 case 0x051: /* VIS I fpadd16s */
4973 CHECK_FPU_FEATURE(dc, VIS1);
4974 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4976 case 0x052: /* VIS I fpadd32 */
4977 CHECK_FPU_FEATURE(dc, VIS1);
4978 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4980 case 0x053: /* VIS I fpadd32s */
4981 CHECK_FPU_FEATURE(dc, VIS1);
4982 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4984 case 0x054: /* VIS I fpsub16 */
4985 CHECK_FPU_FEATURE(dc, VIS1);
4986 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4988 case 0x055: /* VIS I fpsub16s */
4989 CHECK_FPU_FEATURE(dc, VIS1);
4990 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4992 case 0x056: /* VIS I fpsub32 */
4993 CHECK_FPU_FEATURE(dc, VIS1);
4994 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4996 case 0x057: /* VIS I fpsub32s */
4997 CHECK_FPU_FEATURE(dc, VIS1);
4998 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
5000 case 0x060: /* VIS I fzero */
5001 CHECK_FPU_FEATURE(dc, VIS1);
5002 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5003 tcg_gen_movi_i64(cpu_dst_64, 0);
5004 gen_store_fpr_D(dc, rd, cpu_dst_64);
5006 case 0x061: /* VIS I fzeros */
5007 CHECK_FPU_FEATURE(dc, VIS1);
5008 cpu_dst_32 = gen_dest_fpr_F(dc);
5009 tcg_gen_movi_i32(cpu_dst_32, 0);
5010 gen_store_fpr_F(dc, rd, cpu_dst_32);
5012 case 0x062: /* VIS I fnor */
5013 CHECK_FPU_FEATURE(dc, VIS1);
5014 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
5016 case 0x063: /* VIS I fnors */
5017 CHECK_FPU_FEATURE(dc, VIS1);
5018 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
5020 case 0x064: /* VIS I fandnot2 */
5021 CHECK_FPU_FEATURE(dc, VIS1);
5022 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
5024 case 0x065: /* VIS I fandnot2s */
5025 CHECK_FPU_FEATURE(dc, VIS1);
5026 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
5028 case 0x066: /* VIS I fnot2 */
5029 CHECK_FPU_FEATURE(dc, VIS1);
5030 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
5032 case 0x067: /* VIS I fnot2s */
5033 CHECK_FPU_FEATURE(dc, VIS1);
5034 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
5036 case 0x068: /* VIS I fandnot1 */
5037 CHECK_FPU_FEATURE(dc, VIS1);
5038 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
5040 case 0x069: /* VIS I fandnot1s */
5041 CHECK_FPU_FEATURE(dc, VIS1);
5042 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
5044 case 0x06a: /* VIS I fnot1 */
5045 CHECK_FPU_FEATURE(dc, VIS1);
5046 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
5048 case 0x06b: /* VIS I fnot1s */
5049 CHECK_FPU_FEATURE(dc, VIS1);
5050 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
5052 case 0x06c: /* VIS I fxor */
5053 CHECK_FPU_FEATURE(dc, VIS1);
5054 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
5056 case 0x06d: /* VIS I fxors */
5057 CHECK_FPU_FEATURE(dc, VIS1);
5058 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
5060 case 0x06e: /* VIS I fnand */
5061 CHECK_FPU_FEATURE(dc, VIS1);
5062 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
5064 case 0x06f: /* VIS I fnands */
5065 CHECK_FPU_FEATURE(dc, VIS1);
5066 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
5068 case 0x070: /* VIS I fand */
5069 CHECK_FPU_FEATURE(dc, VIS1);
5070 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
5072 case 0x071: /* VIS I fands */
5073 CHECK_FPU_FEATURE(dc, VIS1);
5074 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
5076 case 0x072: /* VIS I fxnor */
5077 CHECK_FPU_FEATURE(dc, VIS1);
5078 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
5080 case 0x073: /* VIS I fxnors */
5081 CHECK_FPU_FEATURE(dc, VIS1);
5082 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
5084 case 0x074: /* VIS I fsrc1 */
5085 CHECK_FPU_FEATURE(dc, VIS1);
5086 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
5087 gen_store_fpr_D(dc, rd, cpu_src1_64);
5089 case 0x075: /* VIS I fsrc1s */
5090 CHECK_FPU_FEATURE(dc, VIS1);
5091 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
5092 gen_store_fpr_F(dc, rd, cpu_src1_32);
5094 case 0x076: /* VIS I fornot2 */
5095 CHECK_FPU_FEATURE(dc, VIS1);
5096 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
5098 case 0x077: /* VIS I fornot2s */
5099 CHECK_FPU_FEATURE(dc, VIS1);
5100 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
5102 case 0x078: /* VIS I fsrc2 */
5103 CHECK_FPU_FEATURE(dc, VIS1);
5104 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
5105 gen_store_fpr_D(dc, rd, cpu_src1_64);
5107 case 0x079: /* VIS I fsrc2s */
5108 CHECK_FPU_FEATURE(dc, VIS1);
5109 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
5110 gen_store_fpr_F(dc, rd, cpu_src1_32);
5112 case 0x07a: /* VIS I fornot1 */
5113 CHECK_FPU_FEATURE(dc, VIS1);
5114 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
5116 case 0x07b: /* VIS I fornot1s */
5117 CHECK_FPU_FEATURE(dc, VIS1);
5118 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
5120 case 0x07c: /* VIS I for */
5121 CHECK_FPU_FEATURE(dc, VIS1);
5122 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
5124 case 0x07d: /* VIS I fors */
5125 CHECK_FPU_FEATURE(dc, VIS1);
5126 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
5128 case 0x07e: /* VIS I fone */
5129 CHECK_FPU_FEATURE(dc, VIS1);
5130 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5131 tcg_gen_movi_i64(cpu_dst_64, -1);
5132 gen_store_fpr_D(dc, rd, cpu_dst_64);
5134 case 0x07f: /* VIS I fones */
5135 CHECK_FPU_FEATURE(dc, VIS1);
5136 cpu_dst_32 = gen_dest_fpr_F(dc);
5137 tcg_gen_movi_i32(cpu_dst_32, -1);
5138 gen_store_fpr_F(dc, rd, cpu_dst_32);
5140 case 0x080: /* VIS I shutdown */
5141 case 0x081: /* VIS II siam */
5150 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
5151 #ifdef TARGET_SPARC64
5156 #ifdef TARGET_SPARC64
5157 } else if (xop == 0x39) { /* V9 return */
5159 cpu_src1 = get_src1(dc, insn);
5160 cpu_tmp0 = get_temp_tl(dc);
5161 if (IS_IMM) { /* immediate */
5162 simm = GET_FIELDs(insn, 19, 31);
5163 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5164 } else { /* register */
5165 rs2 = GET_FIELD(insn, 27, 31);
5167 cpu_src2 = gen_load_gpr(dc, rs2);
5168 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5170 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5173 gen_helper_restore(cpu_env);
5175 gen_check_align(cpu_tmp0, 3);
5176 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5177 dc->npc = DYNAMIC_PC;
5181 cpu_src1 = get_src1(dc, insn);
5182 cpu_tmp0 = get_temp_tl(dc);
5183 if (IS_IMM) { /* immediate */
5184 simm = GET_FIELDs(insn, 19, 31);
5185 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
5186 } else { /* register */
5187 rs2 = GET_FIELD(insn, 27, 31);
5189 cpu_src2 = gen_load_gpr(dc, rs2);
5190 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
5192 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
5196 case 0x38: /* jmpl */
5198 TCGv t = gen_dest_gpr(dc, rd);
5199 tcg_gen_movi_tl(t, dc->pc);
5200 gen_store_gpr(dc, rd, t);
5203 gen_check_align(cpu_tmp0, 3);
5204 gen_address_mask(dc, cpu_tmp0);
5205 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5206 dc->npc = DYNAMIC_PC;
5209 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5210 case 0x39: /* rett, V9 return */
5212 if (!supervisor(dc))
5215 gen_check_align(cpu_tmp0, 3);
5216 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
5217 dc->npc = DYNAMIC_PC;
5218 gen_helper_rett(cpu_env);
5222 case 0x3b: /* flush */
5223 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
5227 case 0x3c: /* save */
5228 gen_helper_save(cpu_env);
5229 gen_store_gpr(dc, rd, cpu_tmp0);
5231 case 0x3d: /* restore */
5232 gen_helper_restore(cpu_env);
5233 gen_store_gpr(dc, rd, cpu_tmp0);
5235 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5236 case 0x3e: /* V9 done/retry */
5240 if (!supervisor(dc))
5242 dc->npc = DYNAMIC_PC;
5243 dc->pc = DYNAMIC_PC;
5244 gen_helper_done(cpu_env);
5247 if (!supervisor(dc))
5249 dc->npc = DYNAMIC_PC;
5250 dc->pc = DYNAMIC_PC;
5251 gen_helper_retry(cpu_env);
5266 case 3: /* load/store instructions */
5268 unsigned int xop = GET_FIELD(insn, 7, 12);
5269 /* ??? gen_address_mask prevents us from using a source
5270 register directly. Always generate a temporary. */
5271 TCGv cpu_addr = get_temp_tl(dc);
5273 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
5274 if (xop == 0x3c || xop == 0x3e) {
5275 /* V9 casa/casxa : no offset */
5276 } else if (IS_IMM) { /* immediate */
5277 simm = GET_FIELDs(insn, 19, 31);
5279 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
5281 } else { /* register */
5282 rs2 = GET_FIELD(insn, 27, 31);
5284 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
5287 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
5288 (xop > 0x17 && xop <= 0x1d ) ||
5289 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
5290 TCGv cpu_val = gen_dest_gpr(dc, rd);
5293 case 0x0: /* ld, V9 lduw, load unsigned word */
5294 gen_address_mask(dc, cpu_addr);
5295 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
5297 case 0x1: /* ldub, load unsigned byte */
5298 gen_address_mask(dc, cpu_addr);
5299 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
5301 case 0x2: /* lduh, load unsigned halfword */
5302 gen_address_mask(dc, cpu_addr);
5303 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
5305 case 0x3: /* ldd, load double word */
5311 gen_address_mask(dc, cpu_addr);
5312 t64 = tcg_temp_new_i64();
5313 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
5314 tcg_gen_trunc_i64_tl(cpu_val, t64);
5315 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5316 gen_store_gpr(dc, rd + 1, cpu_val);
5317 tcg_gen_shri_i64(t64, t64, 32);
5318 tcg_gen_trunc_i64_tl(cpu_val, t64);
5319 tcg_temp_free_i64(t64);
5320 tcg_gen_ext32u_tl(cpu_val, cpu_val);
5323 case 0x9: /* ldsb, load signed byte */
5324 gen_address_mask(dc, cpu_addr);
5325 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
5327 case 0xa: /* ldsh, load signed halfword */
5328 gen_address_mask(dc, cpu_addr);
5329 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
5331 case 0xd: /* ldstub */
5332 gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
5335 /* swap, swap register with memory. Also atomically */
5336 CHECK_IU_FEATURE(dc, SWAP);
5337 cpu_src1 = gen_load_gpr(dc, rd);
5338 gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
5339 dc->mem_idx, MO_TEUL);
5341 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5342 case 0x10: /* lda, V9 lduwa, load word alternate */
5343 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5345 case 0x11: /* lduba, load unsigned byte alternate */
5346 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5348 case 0x12: /* lduha, load unsigned halfword alternate */
5349 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5351 case 0x13: /* ldda, load double word alternate */
5355 gen_ldda_asi(dc, cpu_addr, insn, rd);
5357 case 0x19: /* ldsba, load signed byte alternate */
5358 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
5360 case 0x1a: /* ldsha, load signed halfword alternate */
5361 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
5363 case 0x1d: /* ldstuba -- XXX: should be atomically */
5364 gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
5366 case 0x1f: /* swapa, swap reg with alt. memory. Also
5368 CHECK_IU_FEATURE(dc, SWAP);
5369 cpu_src1 = gen_load_gpr(dc, rd);
5370 gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
5373 #ifndef TARGET_SPARC64
5374 case 0x30: /* ldc */
5375 case 0x31: /* ldcsr */
5376 case 0x33: /* lddc */
5380 #ifdef TARGET_SPARC64
5381 case 0x08: /* V9 ldsw */
5382 gen_address_mask(dc, cpu_addr);
5383 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
5385 case 0x0b: /* V9 ldx */
5386 gen_address_mask(dc, cpu_addr);
5387 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
5389 case 0x18: /* V9 ldswa */
5390 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
5392 case 0x1b: /* V9 ldxa */
5393 gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5395 case 0x2d: /* V9 prefetch, no effect */
5397 case 0x30: /* V9 ldfa */
5398 if (gen_trap_ifnofpu(dc)) {
5401 gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
5402 gen_update_fprs_dirty(dc, rd);
5404 case 0x33: /* V9 lddfa */
5405 if (gen_trap_ifnofpu(dc)) {
5408 gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5409 gen_update_fprs_dirty(dc, DFPREG(rd));
5411 case 0x3d: /* V9 prefetcha, no effect */
5413 case 0x32: /* V9 ldqfa */
5414 CHECK_FPU_FEATURE(dc, FLOAT128);
5415 if (gen_trap_ifnofpu(dc)) {
5418 gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5419 gen_update_fprs_dirty(dc, QFPREG(rd));
5425 gen_store_gpr(dc, rd, cpu_val);
5426 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5429 } else if (xop >= 0x20 && xop < 0x24) {
5430 if (gen_trap_ifnofpu(dc)) {
5434 case 0x20: /* ldf, load fpreg */
5435 gen_address_mask(dc, cpu_addr);
5436 cpu_dst_32 = gen_dest_fpr_F(dc);
5437 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5438 dc->mem_idx, MO_TEUL);
5439 gen_store_fpr_F(dc, rd, cpu_dst_32);
5441 case 0x21: /* ldfsr, V9 ldxfsr */
5442 #ifdef TARGET_SPARC64
5443 gen_address_mask(dc, cpu_addr);
5445 TCGv_i64 t64 = tcg_temp_new_i64();
5446 tcg_gen_qemu_ld_i64(t64, cpu_addr,
5447 dc->mem_idx, MO_TEQ);
5448 gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
5449 tcg_temp_free_i64(t64);
5453 cpu_dst_32 = get_temp_i32(dc);
5454 tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
5455 dc->mem_idx, MO_TEUL);
5456 gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
5458 case 0x22: /* ldqf, load quad fpreg */
5459 CHECK_FPU_FEATURE(dc, FLOAT128);
5460 gen_address_mask(dc, cpu_addr);
5461 cpu_src1_64 = tcg_temp_new_i64();
5462 tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5463 MO_TEQ | MO_ALIGN_4);
5464 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5465 cpu_src2_64 = tcg_temp_new_i64();
5466 tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
5467 MO_TEQ | MO_ALIGN_4);
5468 gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
5469 tcg_temp_free_i64(cpu_src1_64);
5470 tcg_temp_free_i64(cpu_src2_64);
5472 case 0x23: /* lddf, load double fpreg */
5473 gen_address_mask(dc, cpu_addr);
5474 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
5475 tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
5476 MO_TEQ | MO_ALIGN_4);
5477 gen_store_fpr_D(dc, rd, cpu_dst_64);
5482 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
5483 xop == 0xe || xop == 0x1e) {
5484 TCGv cpu_val = gen_load_gpr(dc, rd);
5487 case 0x4: /* st, store word */
5488 gen_address_mask(dc, cpu_addr);
5489 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
5491 case 0x5: /* stb, store byte */
5492 gen_address_mask(dc, cpu_addr);
5493 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
5495 case 0x6: /* sth, store halfword */
5496 gen_address_mask(dc, cpu_addr);
5497 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
5499 case 0x7: /* std, store double word */
5506 gen_address_mask(dc, cpu_addr);
5507 lo = gen_load_gpr(dc, rd + 1);
5508 t64 = tcg_temp_new_i64();
5509 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
5510 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
5511 tcg_temp_free_i64(t64);
5514 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5515 case 0x14: /* sta, V9 stwa, store word alternate */
5516 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
5518 case 0x15: /* stba, store byte alternate */
5519 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
5521 case 0x16: /* stha, store halfword alternate */
5522 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
5524 case 0x17: /* stda, store double word alternate */
5528 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
5531 #ifdef TARGET_SPARC64
5532 case 0x0e: /* V9 stx */
5533 gen_address_mask(dc, cpu_addr);
5534 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
5536 case 0x1e: /* V9 stxa */
5537 gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
5543 } else if (xop > 0x23 && xop < 0x28) {
5544 if (gen_trap_ifnofpu(dc)) {
5548 case 0x24: /* stf, store fpreg */
5549 gen_address_mask(dc, cpu_addr);
5550 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5551 tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
5552 dc->mem_idx, MO_TEUL);
5554 case 0x25: /* stfsr, V9 stxfsr */
5556 #ifdef TARGET_SPARC64
5557 gen_address_mask(dc, cpu_addr);
5559 tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
5563 tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
5567 #ifdef TARGET_SPARC64
5568 /* V9 stqf, store quad fpreg */
5569 CHECK_FPU_FEATURE(dc, FLOAT128);
5570 gen_address_mask(dc, cpu_addr);
5571 /* ??? While stqf only requires 4-byte alignment, it is
5572 legal for the cpu to signal the unaligned exception.
5573 The OS trap handler is then required to fix it up.
5574 For qemu, this avoids having to probe the second page
5575 before performing the first write. */
5576 cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
5577 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5578 dc->mem_idx, MO_TEQ | MO_ALIGN_16);
5579 tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
5580 cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
5581 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
5582 dc->mem_idx, MO_TEQ);
5584 #else /* !TARGET_SPARC64 */
5585 /* stdfq, store floating point queue */
5586 #if defined(CONFIG_USER_ONLY)
5589 if (!supervisor(dc))
5591 if (gen_trap_ifnofpu(dc)) {
5597 case 0x27: /* stdf, store double fpreg */
5598 gen_address_mask(dc, cpu_addr);
5599 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5600 tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
5601 MO_TEQ | MO_ALIGN_4);
5606 } else if (xop > 0x33 && xop < 0x3f) {
5608 #ifdef TARGET_SPARC64
5609 case 0x34: /* V9 stfa */
5610 if (gen_trap_ifnofpu(dc)) {
5613 gen_stf_asi(dc, cpu_addr, insn, 4, rd);
5615 case 0x36: /* V9 stqfa */
5617 CHECK_FPU_FEATURE(dc, FLOAT128);
5618 if (gen_trap_ifnofpu(dc)) {
5621 gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
5624 case 0x37: /* V9 stdfa */
5625 if (gen_trap_ifnofpu(dc)) {
5628 gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
5630 case 0x3e: /* V9 casxa */
5631 rs2 = GET_FIELD(insn, 27, 31);
5632 cpu_src2 = gen_load_gpr(dc, rs2);
5633 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5636 case 0x34: /* stc */
5637 case 0x35: /* stcsr */
5638 case 0x36: /* stdcq */
5639 case 0x37: /* stdc */
5642 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5643 case 0x3c: /* V9 or LEON3 casa */
5644 #ifndef TARGET_SPARC64
5645 CHECK_IU_FEATURE(dc, CASA);
5647 rs2 = GET_FIELD(insn, 27, 31);
5648 cpu_src2 = gen_load_gpr(dc, rs2);
5649 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5661 /* default case for non jump instructions */
5662 if (dc->npc == DYNAMIC_PC) {
5663 dc->pc = DYNAMIC_PC;
5665 } else if (dc->npc == JUMP_PC) {
5666 /* we can do a static jump */
5667 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5671 dc->npc = dc->npc + 4;
5676 gen_exception(dc, TT_ILL_INSN);
5679 gen_exception(dc, TT_UNIMP_FLUSH);
5681 #if !defined(CONFIG_USER_ONLY)
5683 gen_exception(dc, TT_PRIV_INSN);
5687 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
5689 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5691 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
5694 #ifndef TARGET_SPARC64
5696 gen_exception(dc, TT_NCP_INSN);
5700 if (dc->n_t32 != 0) {
5702 for (i = dc->n_t32 - 1; i >= 0; --i) {
5703 tcg_temp_free_i32(dc->t32[i]);
5707 if (dc->n_ttl != 0) {
5709 for (i = dc->n_ttl - 1; i >= 0; --i) {
5710 tcg_temp_free(dc->ttl[i]);
5716 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5718 SPARCCPU *cpu = sparc_env_get_cpu(env);
5719 CPUState *cs = CPU(cpu);
5720 target_ulong pc_start, last_pc;
5721 DisasContext dc1, *dc = &dc1;
5726 memset(dc, 0, sizeof(DisasContext));
5731 dc->npc = (target_ulong) tb->cs_base;
5732 dc->cc_op = CC_OP_DYNAMIC;
5733 dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
5735 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5736 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5737 dc->singlestep = (cs->singlestep_enabled || singlestep);
5738 #ifndef CONFIG_USER_ONLY
5739 dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
5741 #ifdef TARGET_SPARC64
5743 dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5744 #ifndef CONFIG_USER_ONLY
5745 dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
5750 max_insns = tb->cflags & CF_COUNT_MASK;
5751 if (max_insns == 0) {
5752 max_insns = CF_COUNT_MASK;
5754 if (max_insns > TCG_MAX_INSNS) {
5755 max_insns = TCG_MAX_INSNS;
5760 if (dc->npc & JUMP_PC) {
5761 assert(dc->jump_pc[1] == dc->pc + 4);
5762 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5764 tcg_gen_insn_start(dc->pc, dc->npc);
5769 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5770 if (dc->pc != pc_start) {
5773 gen_helper_debug(cpu_env);
5779 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5783 insn = cpu_ldl_code(env, dc->pc);
5785 disas_sparc_insn(dc, insn);
5789 /* if the next PC is different, we abort now */
5790 if (dc->pc != (last_pc + 4))
5792 /* if we reach a page boundary, we stop generation so that the
5793 PC of a TT_TFAULT exception is always in the right page */
5794 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5796 /* if single step mode, we generate only one instruction and
5797 generate an exception */
5798 if (dc->singlestep) {
5801 } while (!tcg_op_buf_full() &&
5802 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5803 num_insns < max_insns);
5806 if (tb->cflags & CF_LAST_IO) {
5810 if (dc->pc != DYNAMIC_PC &&
5811 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5812 /* static PC and NPC: we can use direct chaining */
5813 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5815 if (dc->pc != DYNAMIC_PC) {
5816 tcg_gen_movi_tl(cpu_pc, dc->pc);
5822 gen_tb_end(tb, num_insns);
5824 tb->size = last_pc + 4 - pc_start;
5825 tb->icount = num_insns;
5828 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5829 && qemu_log_in_addr_range(pc_start)) {
5831 qemu_log("--------------\n");
5832 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5833 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5840 void gen_intermediate_code_init(CPUSPARCState *env)
5843 static const char gregnames[32][4] = {
5844 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5845 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5846 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5847 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5849 static const char fregnames[32][4] = {
5850 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5851 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5852 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5853 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5856 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5857 #ifdef TARGET_SPARC64
5858 { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
5859 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5861 { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
5863 { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5864 { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
5867 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5868 #ifdef TARGET_SPARC64
5869 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5870 { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
5871 { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
5872 { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
5874 { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
5875 { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
5876 { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
5877 { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
5878 { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
5880 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5881 { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5882 { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5883 { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5884 { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5885 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5886 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5887 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5888 #ifndef CONFIG_USER_ONLY
5889 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5895 /* init various static tables */
5901 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5902 tcg_ctx.tcg_env = cpu_env;
5904 cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
5905 offsetof(CPUSPARCState, regwptr),
5908 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5909 *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
5912 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5913 *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
5916 TCGV_UNUSED(cpu_regs[0]);
5917 for (i = 1; i < 8; ++i) {
5918 cpu_regs[i] = tcg_global_mem_new(cpu_env,
5919 offsetof(CPUSPARCState, gregs[i]),
5923 for (i = 8; i < 32; ++i) {
5924 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5925 (i - 8) * sizeof(target_ulong),
5929 for (i = 0; i < TARGET_DPREGS; i++) {
5930 cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
5931 offsetof(CPUSPARCState, fpr[i]),
5936 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5939 target_ulong pc = data[0];
5940 target_ulong npc = data[1];
5943 if (npc == DYNAMIC_PC) {
5944 /* dynamic NPC: already stored */
5945 } else if (npc & JUMP_PC) {
5946 /* jump PC: use 'cond' and the jump targets of the translation */
5948 env->npc = npc & ~3;