5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "disas/disas.h"
29 #include "exec/helper-proto.h"
31 #include "exec/cpu_ldst.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
40 #define DYNAMIC_PC 1 /* dynamic pc value */
41 #define JUMP_PC 2 /* dynamic pc value which takes only two values
42 according to jump_pc[T2] */
44 /* global register indexes */
45 static TCGv_ptr cpu_env, cpu_regwptr;
46 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
47 static TCGv_i32 cpu_cc_op;
48 static TCGv_i32 cpu_psr;
49 static TCGv cpu_fsr, cpu_pc, cpu_npc, cpu_gregs[8];
51 #ifndef CONFIG_USER_ONLY
56 static TCGv_i32 cpu_xcc, cpu_asi, cpu_fprs;
58 static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
59 static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
60 static TCGv_i32 cpu_softint;
64 /* Floating point registers */
65 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
67 #include "exec/gen-icount.h"
69 typedef struct DisasContext {
70 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
71 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
72 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
76 int address_mask_32bit;
78 uint32_t cc_op; /* current CC operation */
79 struct TranslationBlock *tb;
94 // This function uses non-native bit order
95 #define GET_FIELD(X, FROM, TO) \
96 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
98 // This function uses the order in the manuals, i.e. bit 0 is 2^0
99 #define GET_FIELD_SP(X, FROM, TO) \
100 GET_FIELD(X, 31 - (TO), 31 - (FROM))
102 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
103 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
105 #ifdef TARGET_SPARC64
106 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
107 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
109 #define DFPREG(r) (r & 0x1e)
110 #define QFPREG(r) (r & 0x1c)
113 #define UA2005_HTRAP_MASK 0xff
114 #define V8_TRAP_MASK 0x7f
116 static int sign_extend(int x, int len)
119 return (x << len) >> len;
122 #define IS_IMM (insn & (1<<13))
124 static inline TCGv_i32 get_temp_i32(DisasContext *dc)
127 assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
128 dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
132 static inline TCGv get_temp_tl(DisasContext *dc)
135 assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
136 dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
140 static inline void gen_update_fprs_dirty(int rd)
142 #if defined(TARGET_SPARC64)
143 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, (rd < 32) ? 1 : 2);
147 /* floating point registers moves */
148 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
150 #if TCG_TARGET_REG_BITS == 32
152 return TCGV_LOW(cpu_fpr[src / 2]);
154 return TCGV_HIGH(cpu_fpr[src / 2]);
158 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
160 TCGv_i32 ret = get_temp_i32(dc);
161 TCGv_i64 t = tcg_temp_new_i64();
163 tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
164 tcg_gen_extrl_i64_i32(ret, t);
165 tcg_temp_free_i64(t);
172 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
174 #if TCG_TARGET_REG_BITS == 32
176 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
178 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
181 TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
182 tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
183 (dst & 1 ? 0 : 32), 32);
185 gen_update_fprs_dirty(dst);
188 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
190 return get_temp_i32(dc);
193 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
196 return cpu_fpr[src / 2];
199 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
202 tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
203 gen_update_fprs_dirty(dst);
206 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
208 return cpu_fpr[DFPREG(dst) / 2];
211 static void gen_op_load_fpr_QT0(unsigned int src)
213 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
214 offsetof(CPU_QuadU, ll.upper));
215 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
216 offsetof(CPU_QuadU, ll.lower));
219 static void gen_op_load_fpr_QT1(unsigned int src)
221 tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
222 offsetof(CPU_QuadU, ll.upper));
223 tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
224 offsetof(CPU_QuadU, ll.lower));
227 static void gen_op_store_QT0_fpr(unsigned int dst)
229 tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
230 offsetof(CPU_QuadU, ll.upper));
231 tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
232 offsetof(CPU_QuadU, ll.lower));
235 #ifdef TARGET_SPARC64
236 static void gen_move_Q(unsigned int rd, unsigned int rs)
241 tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
242 tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
243 gen_update_fprs_dirty(rd);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
255 #ifdef TARGET_SPARC64
256 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static inline void gen_address_mask(DisasContext *dc, TCGv addr)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
277 static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
279 if (reg == 0 || reg >= 8) {
280 TCGv t = get_temp_tl(dc);
282 tcg_gen_movi_tl(t, 0);
284 tcg_gen_ld_tl(t, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
288 return cpu_gregs[reg];
292 static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
296 tcg_gen_mov_tl(cpu_gregs[reg], v);
298 tcg_gen_st_tl(v, cpu_regwptr, (reg - 8) * sizeof(target_ulong));
303 static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
305 if (reg == 0 || reg >= 8) {
306 return get_temp_tl(dc);
308 return cpu_gregs[reg];
312 static inline void gen_goto_tb(DisasContext *s, int tb_num,
313 target_ulong pc, target_ulong npc)
315 TranslationBlock *tb;
318 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
319 (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
321 /* jump to same page: we can use a direct jump */
322 tcg_gen_goto_tb(tb_num);
323 tcg_gen_movi_tl(cpu_pc, pc);
324 tcg_gen_movi_tl(cpu_npc, npc);
325 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
327 /* jump to another page: currently not optimized */
328 tcg_gen_movi_tl(cpu_pc, pc);
329 tcg_gen_movi_tl(cpu_npc, npc);
335 static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
337 tcg_gen_extu_i32_tl(reg, src);
338 tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
339 tcg_gen_andi_tl(reg, reg, 0x1);
342 static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
344 tcg_gen_extu_i32_tl(reg, src);
345 tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
346 tcg_gen_andi_tl(reg, reg, 0x1);
349 static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
351 tcg_gen_extu_i32_tl(reg, src);
352 tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
353 tcg_gen_andi_tl(reg, reg, 0x1);
356 static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
358 tcg_gen_extu_i32_tl(reg, src);
359 tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
360 tcg_gen_andi_tl(reg, reg, 0x1);
363 static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
365 tcg_gen_mov_tl(cpu_cc_src, src1);
366 tcg_gen_mov_tl(cpu_cc_src2, src2);
367 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
368 tcg_gen_mov_tl(dst, cpu_cc_dst);
371 static TCGv_i32 gen_add32_carry32(void)
373 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
375 /* Carry is computed from a previous add: (dst < src) */
376 #if TARGET_LONG_BITS == 64
377 cc_src1_32 = tcg_temp_new_i32();
378 cc_src2_32 = tcg_temp_new_i32();
379 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
380 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
382 cc_src1_32 = cpu_cc_dst;
383 cc_src2_32 = cpu_cc_src;
386 carry_32 = tcg_temp_new_i32();
387 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
389 #if TARGET_LONG_BITS == 64
390 tcg_temp_free_i32(cc_src1_32);
391 tcg_temp_free_i32(cc_src2_32);
397 static TCGv_i32 gen_sub32_carry32(void)
399 TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
401 /* Carry is computed from a previous borrow: (src1 < src2) */
402 #if TARGET_LONG_BITS == 64
403 cc_src1_32 = tcg_temp_new_i32();
404 cc_src2_32 = tcg_temp_new_i32();
405 tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
406 tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
408 cc_src1_32 = cpu_cc_src;
409 cc_src2_32 = cpu_cc_src2;
412 carry_32 = tcg_temp_new_i32();
413 tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
415 #if TARGET_LONG_BITS == 64
416 tcg_temp_free_i32(cc_src1_32);
417 tcg_temp_free_i32(cc_src2_32);
423 static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
424 TCGv src2, int update_cc)
432 /* Carry is known to be zero. Fall back to plain ADD. */
434 gen_op_add_cc(dst, src1, src2);
436 tcg_gen_add_tl(dst, src1, src2);
443 if (TARGET_LONG_BITS == 32) {
444 /* We can re-use the host's hardware carry generation by using
445 an ADD2 opcode. We discard the low part of the output.
446 Ideally we'd combine this operation with the add that
447 generated the carry in the first place. */
448 carry = tcg_temp_new();
449 tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
450 tcg_temp_free(carry);
453 carry_32 = gen_add32_carry32();
459 carry_32 = gen_sub32_carry32();
463 /* We need external help to produce the carry. */
464 carry_32 = tcg_temp_new_i32();
465 gen_helper_compute_C_icc(carry_32, cpu_env);
469 #if TARGET_LONG_BITS == 64
470 carry = tcg_temp_new();
471 tcg_gen_extu_i32_i64(carry, carry_32);
476 tcg_gen_add_tl(dst, src1, src2);
477 tcg_gen_add_tl(dst, dst, carry);
479 tcg_temp_free_i32(carry_32);
480 #if TARGET_LONG_BITS == 64
481 tcg_temp_free(carry);
486 tcg_gen_mov_tl(cpu_cc_src, src1);
487 tcg_gen_mov_tl(cpu_cc_src2, src2);
488 tcg_gen_mov_tl(cpu_cc_dst, dst);
489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
490 dc->cc_op = CC_OP_ADDX;
494 static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
496 tcg_gen_mov_tl(cpu_cc_src, src1);
497 tcg_gen_mov_tl(cpu_cc_src2, src2);
498 tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
499 tcg_gen_mov_tl(dst, cpu_cc_dst);
502 static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
503 TCGv src2, int update_cc)
511 /* Carry is known to be zero. Fall back to plain SUB. */
513 gen_op_sub_cc(dst, src1, src2);
515 tcg_gen_sub_tl(dst, src1, src2);
522 carry_32 = gen_add32_carry32();
528 if (TARGET_LONG_BITS == 32) {
529 /* We can re-use the host's hardware carry generation by using
530 a SUB2 opcode. We discard the low part of the output.
531 Ideally we'd combine this operation with the add that
532 generated the carry in the first place. */
533 carry = tcg_temp_new();
534 tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
535 tcg_temp_free(carry);
538 carry_32 = gen_sub32_carry32();
542 /* We need external help to produce the carry. */
543 carry_32 = tcg_temp_new_i32();
544 gen_helper_compute_C_icc(carry_32, cpu_env);
548 #if TARGET_LONG_BITS == 64
549 carry = tcg_temp_new();
550 tcg_gen_extu_i32_i64(carry, carry_32);
555 tcg_gen_sub_tl(dst, src1, src2);
556 tcg_gen_sub_tl(dst, dst, carry);
558 tcg_temp_free_i32(carry_32);
559 #if TARGET_LONG_BITS == 64
560 tcg_temp_free(carry);
565 tcg_gen_mov_tl(cpu_cc_src, src1);
566 tcg_gen_mov_tl(cpu_cc_src2, src2);
567 tcg_gen_mov_tl(cpu_cc_dst, dst);
568 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
569 dc->cc_op = CC_OP_SUBX;
573 static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
575 TCGv r_temp, zero, t0;
577 r_temp = tcg_temp_new();
584 zero = tcg_const_tl(0);
585 tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
586 tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
587 tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
588 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
593 // env->y = (b2 << 31) | (env->y >> 1);
594 tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
595 tcg_gen_shli_tl(r_temp, r_temp, 31);
596 tcg_gen_shri_tl(t0, cpu_y, 1);
597 tcg_gen_andi_tl(t0, t0, 0x7fffffff);
598 tcg_gen_or_tl(t0, t0, r_temp);
599 tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
602 gen_mov_reg_N(t0, cpu_psr);
603 gen_mov_reg_V(r_temp, cpu_psr);
604 tcg_gen_xor_tl(t0, t0, r_temp);
605 tcg_temp_free(r_temp);
607 // T0 = (b1 << 31) | (T0 >> 1);
609 tcg_gen_shli_tl(t0, t0, 31);
610 tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
611 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
614 tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
616 tcg_gen_mov_tl(dst, cpu_cc_dst);
619 static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
621 #if TARGET_LONG_BITS == 32
623 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
625 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
628 TCGv t0 = tcg_temp_new_i64();
629 TCGv t1 = tcg_temp_new_i64();
632 tcg_gen_ext32s_i64(t0, src1);
633 tcg_gen_ext32s_i64(t1, src2);
635 tcg_gen_ext32u_i64(t0, src1);
636 tcg_gen_ext32u_i64(t1, src2);
639 tcg_gen_mul_i64(dst, t0, t1);
643 tcg_gen_shri_i64(cpu_y, dst, 32);
647 static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
649 /* zero-extend truncated operands before multiplication */
650 gen_op_multiply(dst, src1, src2, 0);
653 static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
655 /* sign-extend truncated operands before multiplication */
656 gen_op_multiply(dst, src1, src2, 1);
660 static inline void gen_op_eval_ba(TCGv dst)
662 tcg_gen_movi_tl(dst, 1);
666 static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
668 gen_mov_reg_Z(dst, src);
672 static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
674 TCGv t0 = tcg_temp_new();
675 gen_mov_reg_N(t0, src);
676 gen_mov_reg_V(dst, src);
677 tcg_gen_xor_tl(dst, dst, t0);
678 gen_mov_reg_Z(t0, src);
679 tcg_gen_or_tl(dst, dst, t0);
684 static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
686 TCGv t0 = tcg_temp_new();
687 gen_mov_reg_V(t0, src);
688 gen_mov_reg_N(dst, src);
689 tcg_gen_xor_tl(dst, dst, t0);
694 static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
696 TCGv t0 = tcg_temp_new();
697 gen_mov_reg_Z(t0, src);
698 gen_mov_reg_C(dst, src);
699 tcg_gen_or_tl(dst, dst, t0);
704 static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
706 gen_mov_reg_C(dst, src);
710 static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
712 gen_mov_reg_V(dst, src);
716 static inline void gen_op_eval_bn(TCGv dst)
718 tcg_gen_movi_tl(dst, 0);
722 static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
724 gen_mov_reg_N(dst, src);
728 static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
730 gen_mov_reg_Z(dst, src);
731 tcg_gen_xori_tl(dst, dst, 0x1);
735 static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
737 gen_op_eval_ble(dst, src);
738 tcg_gen_xori_tl(dst, dst, 0x1);
742 static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
744 gen_op_eval_bl(dst, src);
745 tcg_gen_xori_tl(dst, dst, 0x1);
749 static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
751 gen_op_eval_bleu(dst, src);
752 tcg_gen_xori_tl(dst, dst, 0x1);
756 static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
758 gen_mov_reg_C(dst, src);
759 tcg_gen_xori_tl(dst, dst, 0x1);
763 static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
765 gen_mov_reg_N(dst, src);
766 tcg_gen_xori_tl(dst, dst, 0x1);
770 static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
772 gen_mov_reg_V(dst, src);
773 tcg_gen_xori_tl(dst, dst, 0x1);
777 FPSR bit field FCC1 | FCC0:
783 static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
784 unsigned int fcc_offset)
786 tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
787 tcg_gen_andi_tl(reg, reg, 0x1);
790 static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
791 unsigned int fcc_offset)
793 tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
794 tcg_gen_andi_tl(reg, reg, 0x1);
798 static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
799 unsigned int fcc_offset)
801 TCGv t0 = tcg_temp_new();
802 gen_mov_reg_FCC0(dst, src, fcc_offset);
803 gen_mov_reg_FCC1(t0, src, fcc_offset);
804 tcg_gen_or_tl(dst, dst, t0);
808 // 1 or 2: FCC0 ^ FCC1
809 static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
810 unsigned int fcc_offset)
812 TCGv t0 = tcg_temp_new();
813 gen_mov_reg_FCC0(dst, src, fcc_offset);
814 gen_mov_reg_FCC1(t0, src, fcc_offset);
815 tcg_gen_xor_tl(dst, dst, t0);
820 static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
821 unsigned int fcc_offset)
823 gen_mov_reg_FCC0(dst, src, fcc_offset);
827 static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
828 unsigned int fcc_offset)
830 TCGv t0 = tcg_temp_new();
831 gen_mov_reg_FCC0(dst, src, fcc_offset);
832 gen_mov_reg_FCC1(t0, src, fcc_offset);
833 tcg_gen_andc_tl(dst, dst, t0);
838 static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
839 unsigned int fcc_offset)
841 gen_mov_reg_FCC1(dst, src, fcc_offset);
845 static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
846 unsigned int fcc_offset)
848 TCGv t0 = tcg_temp_new();
849 gen_mov_reg_FCC0(dst, src, fcc_offset);
850 gen_mov_reg_FCC1(t0, src, fcc_offset);
851 tcg_gen_andc_tl(dst, t0, dst);
856 static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
857 unsigned int fcc_offset)
859 TCGv t0 = tcg_temp_new();
860 gen_mov_reg_FCC0(dst, src, fcc_offset);
861 gen_mov_reg_FCC1(t0, src, fcc_offset);
862 tcg_gen_and_tl(dst, dst, t0);
867 static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
868 unsigned int fcc_offset)
870 TCGv t0 = tcg_temp_new();
871 gen_mov_reg_FCC0(dst, src, fcc_offset);
872 gen_mov_reg_FCC1(t0, src, fcc_offset);
873 tcg_gen_or_tl(dst, dst, t0);
874 tcg_gen_xori_tl(dst, dst, 0x1);
878 // 0 or 3: !(FCC0 ^ FCC1)
879 static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
880 unsigned int fcc_offset)
882 TCGv t0 = tcg_temp_new();
883 gen_mov_reg_FCC0(dst, src, fcc_offset);
884 gen_mov_reg_FCC1(t0, src, fcc_offset);
885 tcg_gen_xor_tl(dst, dst, t0);
886 tcg_gen_xori_tl(dst, dst, 0x1);
891 static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
892 unsigned int fcc_offset)
894 gen_mov_reg_FCC0(dst, src, fcc_offset);
895 tcg_gen_xori_tl(dst, dst, 0x1);
898 // !1: !(FCC0 & !FCC1)
899 static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
900 unsigned int fcc_offset)
902 TCGv t0 = tcg_temp_new();
903 gen_mov_reg_FCC0(dst, src, fcc_offset);
904 gen_mov_reg_FCC1(t0, src, fcc_offset);
905 tcg_gen_andc_tl(dst, dst, t0);
906 tcg_gen_xori_tl(dst, dst, 0x1);
911 static inline void gen_op_eval_fble(TCGv dst, TCGv src,
912 unsigned int fcc_offset)
914 gen_mov_reg_FCC1(dst, src, fcc_offset);
915 tcg_gen_xori_tl(dst, dst, 0x1);
918 // !2: !(!FCC0 & FCC1)
919 static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
920 unsigned int fcc_offset)
922 TCGv t0 = tcg_temp_new();
923 gen_mov_reg_FCC0(dst, src, fcc_offset);
924 gen_mov_reg_FCC1(t0, src, fcc_offset);
925 tcg_gen_andc_tl(dst, t0, dst);
926 tcg_gen_xori_tl(dst, dst, 0x1);
930 // !3: !(FCC0 & FCC1)
931 static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
932 unsigned int fcc_offset)
934 TCGv t0 = tcg_temp_new();
935 gen_mov_reg_FCC0(dst, src, fcc_offset);
936 gen_mov_reg_FCC1(t0, src, fcc_offset);
937 tcg_gen_and_tl(dst, dst, t0);
938 tcg_gen_xori_tl(dst, dst, 0x1);
942 static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
943 target_ulong pc2, TCGv r_cond)
945 TCGLabel *l1 = gen_new_label();
947 tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
949 gen_goto_tb(dc, 0, pc1, pc1 + 4);
952 gen_goto_tb(dc, 1, pc2, pc2 + 4);
955 static void gen_branch_a(DisasContext *dc, target_ulong pc1)
957 TCGLabel *l1 = gen_new_label();
958 target_ulong npc = dc->npc;
960 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
962 gen_goto_tb(dc, 0, npc, pc1);
965 gen_goto_tb(dc, 1, npc + 4, npc + 8);
970 static void gen_branch_n(DisasContext *dc, target_ulong pc1)
972 target_ulong npc = dc->npc;
974 if (likely(npc != DYNAMIC_PC)) {
976 dc->jump_pc[0] = pc1;
977 dc->jump_pc[1] = npc + 4;
982 tcg_gen_mov_tl(cpu_pc, cpu_npc);
984 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
985 t = tcg_const_tl(pc1);
987 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
995 static inline void gen_generic_branch(DisasContext *dc)
997 TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
998 TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
999 TCGv zero = tcg_const_tl(0);
1001 tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
1003 tcg_temp_free(npc0);
1004 tcg_temp_free(npc1);
1005 tcg_temp_free(zero);
1008 /* call this function before using the condition register as it may
1009 have been set for a jump */
1010 static inline void flush_cond(DisasContext *dc)
1012 if (dc->npc == JUMP_PC) {
1013 gen_generic_branch(dc);
1014 dc->npc = DYNAMIC_PC;
1018 static inline void save_npc(DisasContext *dc)
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC;
1023 } else if (dc->npc != DYNAMIC_PC) {
1024 tcg_gen_movi_tl(cpu_npc, dc->npc);
1028 static inline void update_psr(DisasContext *dc)
1030 if (dc->cc_op != CC_OP_FLAGS) {
1031 dc->cc_op = CC_OP_FLAGS;
1032 gen_helper_compute_psr(cpu_env);
1036 static inline void save_state(DisasContext *dc)
1038 tcg_gen_movi_tl(cpu_pc, dc->pc);
1042 static inline void gen_mov_pc_npc(DisasContext *dc)
1044 if (dc->npc == JUMP_PC) {
1045 gen_generic_branch(dc);
1046 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1047 dc->pc = DYNAMIC_PC;
1048 } else if (dc->npc == DYNAMIC_PC) {
1049 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1050 dc->pc = DYNAMIC_PC;
1056 static inline void gen_op_next_insn(void)
1058 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1059 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1062 static void free_compare(DisasCompare *cmp)
1065 tcg_temp_free(cmp->c1);
1068 tcg_temp_free(cmp->c2);
1072 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1075 static int subcc_cond[16] = {
1091 -1, /* no overflow */
1094 static int logic_cond[16] = {
1096 TCG_COND_EQ, /* eq: Z */
1097 TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
1098 TCG_COND_LT, /* lt: N ^ V -> N */
1099 TCG_COND_EQ, /* leu: C | Z -> Z */
1100 TCG_COND_NEVER, /* ltu: C -> 0 */
1101 TCG_COND_LT, /* neg: N */
1102 TCG_COND_NEVER, /* vs: V -> 0 */
1104 TCG_COND_NE, /* ne: !Z */
1105 TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1106 TCG_COND_GE, /* ge: !(N ^ V) -> !N */
1107 TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
1108 TCG_COND_ALWAYS, /* geu: !C -> 1 */
1109 TCG_COND_GE, /* pos: !N */
1110 TCG_COND_ALWAYS, /* vc: !V -> 1 */
1116 #ifdef TARGET_SPARC64
1126 switch (dc->cc_op) {
1128 cmp->cond = logic_cond[cond];
1130 cmp->is_bool = false;
1132 cmp->c2 = tcg_const_tl(0);
1133 #ifdef TARGET_SPARC64
1136 cmp->c1 = tcg_temp_new();
1137 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
1142 cmp->c1 = cpu_cc_dst;
1149 cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1150 goto do_compare_dst_0;
1152 case 7: /* overflow */
1153 case 15: /* !overflow */
1157 cmp->cond = subcc_cond[cond];
1158 cmp->is_bool = false;
1159 #ifdef TARGET_SPARC64
1161 /* Note that sign-extension works for unsigned compares as
1162 long as both operands are sign-extended. */
1163 cmp->g1 = cmp->g2 = false;
1164 cmp->c1 = tcg_temp_new();
1165 cmp->c2 = tcg_temp_new();
1166 tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
1167 tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
1171 cmp->g1 = cmp->g2 = true;
1172 cmp->c1 = cpu_cc_src;
1173 cmp->c2 = cpu_cc_src2;
1180 gen_helper_compute_psr(cpu_env);
1181 dc->cc_op = CC_OP_FLAGS;
1185 /* We're going to generate a boolean result. */
1186 cmp->cond = TCG_COND_NE;
1187 cmp->is_bool = true;
1188 cmp->g1 = cmp->g2 = false;
1189 cmp->c1 = r_dst = tcg_temp_new();
1190 cmp->c2 = tcg_const_tl(0);
1194 gen_op_eval_bn(r_dst);
1197 gen_op_eval_be(r_dst, r_src);
1200 gen_op_eval_ble(r_dst, r_src);
1203 gen_op_eval_bl(r_dst, r_src);
1206 gen_op_eval_bleu(r_dst, r_src);
1209 gen_op_eval_bcs(r_dst, r_src);
1212 gen_op_eval_bneg(r_dst, r_src);
1215 gen_op_eval_bvs(r_dst, r_src);
1218 gen_op_eval_ba(r_dst);
1221 gen_op_eval_bne(r_dst, r_src);
1224 gen_op_eval_bg(r_dst, r_src);
1227 gen_op_eval_bge(r_dst, r_src);
1230 gen_op_eval_bgu(r_dst, r_src);
1233 gen_op_eval_bcc(r_dst, r_src);
1236 gen_op_eval_bpos(r_dst, r_src);
1239 gen_op_eval_bvc(r_dst, r_src);
1246 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1248 unsigned int offset;
1251 /* For now we still generate a straight boolean result. */
1252 cmp->cond = TCG_COND_NE;
1253 cmp->is_bool = true;
1254 cmp->g1 = cmp->g2 = false;
1255 cmp->c1 = r_dst = tcg_temp_new();
1256 cmp->c2 = tcg_const_tl(0);
1276 gen_op_eval_bn(r_dst);
1279 gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1282 gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1285 gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1288 gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1291 gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1294 gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1297 gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1300 gen_op_eval_ba(r_dst);
1303 gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1306 gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1309 gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1312 gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1315 gen_op_eval_fble(r_dst, cpu_fsr, offset);
1318 gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1321 gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1326 static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
1330 gen_compare(&cmp, cc, cond, dc);
1332 /* The interface is to return a boolean in r_dst. */
1334 tcg_gen_mov_tl(r_dst, cmp.c1);
1336 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1342 static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
1345 gen_fcompare(&cmp, cc, cond);
1347 /* The interface is to return a boolean in r_dst. */
1349 tcg_gen_mov_tl(r_dst, cmp.c1);
1351 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1357 #ifdef TARGET_SPARC64
1359 static const int gen_tcg_cond_reg[8] = {
1370 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1372 cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1373 cmp->is_bool = false;
1377 cmp->c2 = tcg_const_tl(0);
1380 static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
1383 gen_compare_reg(&cmp, cond, r_src);
1385 /* The interface is to return a boolean in r_dst. */
1386 tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
1392 static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1394 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1395 target_ulong target = dc->pc + offset;
1397 #ifdef TARGET_SPARC64
1398 if (unlikely(AM_CHECK(dc))) {
1399 target &= 0xffffffffULL;
1403 /* unconditional not taken */
1405 dc->pc = dc->npc + 4;
1406 dc->npc = dc->pc + 4;
1409 dc->npc = dc->pc + 4;
1411 } else if (cond == 0x8) {
1412 /* unconditional taken */
1415 dc->npc = dc->pc + 4;
1419 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1423 gen_cond(cpu_cond, cc, cond, dc);
1425 gen_branch_a(dc, target);
1427 gen_branch_n(dc, target);
1432 static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
1434 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
1435 target_ulong target = dc->pc + offset;
1437 #ifdef TARGET_SPARC64
1438 if (unlikely(AM_CHECK(dc))) {
1439 target &= 0xffffffffULL;
1443 /* unconditional not taken */
1445 dc->pc = dc->npc + 4;
1446 dc->npc = dc->pc + 4;
1449 dc->npc = dc->pc + 4;
1451 } else if (cond == 0x8) {
1452 /* unconditional taken */
1455 dc->npc = dc->pc + 4;
1459 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1463 gen_fcond(cpu_cond, cc, cond);
1465 gen_branch_a(dc, target);
1467 gen_branch_n(dc, target);
1472 #ifdef TARGET_SPARC64
1473 static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
1476 unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
1477 target_ulong target = dc->pc + offset;
1479 if (unlikely(AM_CHECK(dc))) {
1480 target &= 0xffffffffULL;
1483 gen_cond_reg(cpu_cond, cond, r_reg);
1485 gen_branch_a(dc, target);
1487 gen_branch_n(dc, target);
1491 static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1495 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1498 gen_helper_fcmps_fcc1(cpu_env, r_rs1, r_rs2);
1501 gen_helper_fcmps_fcc2(cpu_env, r_rs1, r_rs2);
1504 gen_helper_fcmps_fcc3(cpu_env, r_rs1, r_rs2);
1509 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1513 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1516 gen_helper_fcmpd_fcc1(cpu_env, r_rs1, r_rs2);
1519 gen_helper_fcmpd_fcc2(cpu_env, r_rs1, r_rs2);
1522 gen_helper_fcmpd_fcc3(cpu_env, r_rs1, r_rs2);
1527 static inline void gen_op_fcmpq(int fccno)
1531 gen_helper_fcmpq(cpu_env);
1534 gen_helper_fcmpq_fcc1(cpu_env);
1537 gen_helper_fcmpq_fcc2(cpu_env);
1540 gen_helper_fcmpq_fcc3(cpu_env);
1545 static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1549 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1552 gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
1555 gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
1558 gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
1563 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1567 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1570 gen_helper_fcmped_fcc1(cpu_env, r_rs1, r_rs2);
1573 gen_helper_fcmped_fcc2(cpu_env, r_rs1, r_rs2);
1576 gen_helper_fcmped_fcc3(cpu_env, r_rs1, r_rs2);
1581 static inline void gen_op_fcmpeq(int fccno)
1585 gen_helper_fcmpeq(cpu_env);
1588 gen_helper_fcmpeq_fcc1(cpu_env);
1591 gen_helper_fcmpeq_fcc2(cpu_env);
1594 gen_helper_fcmpeq_fcc3(cpu_env);
1601 static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1603 gen_helper_fcmps(cpu_env, r_rs1, r_rs2);
1606 static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1608 gen_helper_fcmpd(cpu_env, r_rs1, r_rs2);
1611 static inline void gen_op_fcmpq(int fccno)
1613 gen_helper_fcmpq(cpu_env);
1616 static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1618 gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
1621 static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1623 gen_helper_fcmped(cpu_env, r_rs1, r_rs2);
1626 static inline void gen_op_fcmpeq(int fccno)
1628 gen_helper_fcmpeq(cpu_env);
1632 static inline void gen_op_fpexception_im(int fsr_flags)
1636 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1637 tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1638 r_const = tcg_const_i32(TT_FP_EXCP);
1639 gen_helper_raise_exception(cpu_env, r_const);
1640 tcg_temp_free_i32(r_const);
1643 static int gen_trap_ifnofpu(DisasContext *dc)
1645 #if !defined(CONFIG_USER_ONLY)
1646 if (!dc->fpu_enabled) {
1650 r_const = tcg_const_i32(TT_NFPU_INSN);
1651 gen_helper_raise_exception(cpu_env, r_const);
1652 tcg_temp_free_i32(r_const);
1660 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1662 tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1665 static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
1666 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
1670 src = gen_load_fpr_F(dc, rs);
1671 dst = gen_dest_fpr_F(dc);
1673 gen(dst, cpu_env, src);
1675 gen_store_fpr_F(dc, rd, dst);
1678 static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
1679 void (*gen)(TCGv_i32, TCGv_i32))
1683 src = gen_load_fpr_F(dc, rs);
1684 dst = gen_dest_fpr_F(dc);
1688 gen_store_fpr_F(dc, rd, dst);
1691 static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1692 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
1694 TCGv_i32 dst, src1, src2;
1696 src1 = gen_load_fpr_F(dc, rs1);
1697 src2 = gen_load_fpr_F(dc, rs2);
1698 dst = gen_dest_fpr_F(dc);
1700 gen(dst, cpu_env, src1, src2);
1702 gen_store_fpr_F(dc, rd, dst);
1705 #ifdef TARGET_SPARC64
1706 static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
1707 void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
1709 TCGv_i32 dst, src1, src2;
1711 src1 = gen_load_fpr_F(dc, rs1);
1712 src2 = gen_load_fpr_F(dc, rs2);
1713 dst = gen_dest_fpr_F(dc);
1715 gen(dst, src1, src2);
1717 gen_store_fpr_F(dc, rd, dst);
1721 static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
1722 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
1726 src = gen_load_fpr_D(dc, rs);
1727 dst = gen_dest_fpr_D(dc, rd);
1729 gen(dst, cpu_env, src);
1731 gen_store_fpr_D(dc, rd, dst);
1734 #ifdef TARGET_SPARC64
1735 static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
1736 void (*gen)(TCGv_i64, TCGv_i64))
1740 src = gen_load_fpr_D(dc, rs);
1741 dst = gen_dest_fpr_D(dc, rd);
1745 gen_store_fpr_D(dc, rd, dst);
1749 static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1750 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
1752 TCGv_i64 dst, src1, src2;
1754 src1 = gen_load_fpr_D(dc, rs1);
1755 src2 = gen_load_fpr_D(dc, rs2);
1756 dst = gen_dest_fpr_D(dc, rd);
1758 gen(dst, cpu_env, src1, src2);
1760 gen_store_fpr_D(dc, rd, dst);
1763 #ifdef TARGET_SPARC64
1764 static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1765 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
1767 TCGv_i64 dst, src1, src2;
1769 src1 = gen_load_fpr_D(dc, rs1);
1770 src2 = gen_load_fpr_D(dc, rs2);
1771 dst = gen_dest_fpr_D(dc, rd);
1773 gen(dst, src1, src2);
1775 gen_store_fpr_D(dc, rd, dst);
1778 static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
1779 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1781 TCGv_i64 dst, src1, src2;
1783 src1 = gen_load_fpr_D(dc, rs1);
1784 src2 = gen_load_fpr_D(dc, rs2);
1785 dst = gen_dest_fpr_D(dc, rd);
1787 gen(dst, cpu_gsr, src1, src2);
1789 gen_store_fpr_D(dc, rd, dst);
1792 static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
1793 void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
1795 TCGv_i64 dst, src0, src1, src2;
1797 src1 = gen_load_fpr_D(dc, rs1);
1798 src2 = gen_load_fpr_D(dc, rs2);
1799 src0 = gen_load_fpr_D(dc, rd);
1800 dst = gen_dest_fpr_D(dc, rd);
1802 gen(dst, src0, src1, src2);
1804 gen_store_fpr_D(dc, rd, dst);
1808 static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
1809 void (*gen)(TCGv_ptr))
1811 gen_op_load_fpr_QT1(QFPREG(rs));
1815 gen_op_store_QT0_fpr(QFPREG(rd));
1816 gen_update_fprs_dirty(QFPREG(rd));
1819 #ifdef TARGET_SPARC64
1820 static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
1821 void (*gen)(TCGv_ptr))
1823 gen_op_load_fpr_QT1(QFPREG(rs));
1827 gen_op_store_QT0_fpr(QFPREG(rd));
1828 gen_update_fprs_dirty(QFPREG(rd));
1832 static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
1833 void (*gen)(TCGv_ptr))
1835 gen_op_load_fpr_QT0(QFPREG(rs1));
1836 gen_op_load_fpr_QT1(QFPREG(rs2));
1840 gen_op_store_QT0_fpr(QFPREG(rd));
1841 gen_update_fprs_dirty(QFPREG(rd));
1844 static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
1845 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
1848 TCGv_i32 src1, src2;
1850 src1 = gen_load_fpr_F(dc, rs1);
1851 src2 = gen_load_fpr_F(dc, rs2);
1852 dst = gen_dest_fpr_D(dc, rd);
1854 gen(dst, cpu_env, src1, src2);
1856 gen_store_fpr_D(dc, rd, dst);
1859 static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
1860 void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
1862 TCGv_i64 src1, src2;
1864 src1 = gen_load_fpr_D(dc, rs1);
1865 src2 = gen_load_fpr_D(dc, rs2);
1867 gen(cpu_env, src1, src2);
1869 gen_op_store_QT0_fpr(QFPREG(rd));
1870 gen_update_fprs_dirty(QFPREG(rd));
1873 #ifdef TARGET_SPARC64
1874 static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
1875 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1880 src = gen_load_fpr_F(dc, rs);
1881 dst = gen_dest_fpr_D(dc, rd);
1883 gen(dst, cpu_env, src);
1885 gen_store_fpr_D(dc, rd, dst);
1889 static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
1890 void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
1895 src = gen_load_fpr_F(dc, rs);
1896 dst = gen_dest_fpr_D(dc, rd);
1898 gen(dst, cpu_env, src);
1900 gen_store_fpr_D(dc, rd, dst);
1903 static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
1904 void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
1909 src = gen_load_fpr_D(dc, rs);
1910 dst = gen_dest_fpr_F(dc);
1912 gen(dst, cpu_env, src);
1914 gen_store_fpr_F(dc, rd, dst);
1917 static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
1918 void (*gen)(TCGv_i32, TCGv_ptr))
1922 gen_op_load_fpr_QT1(QFPREG(rs));
1923 dst = gen_dest_fpr_F(dc);
1927 gen_store_fpr_F(dc, rd, dst);
1930 static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
1931 void (*gen)(TCGv_i64, TCGv_ptr))
1935 gen_op_load_fpr_QT1(QFPREG(rs));
1936 dst = gen_dest_fpr_D(dc, rd);
1940 gen_store_fpr_D(dc, rd, dst);
1943 static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
1944 void (*gen)(TCGv_ptr, TCGv_i32))
1948 src = gen_load_fpr_F(dc, rs);
1952 gen_op_store_QT0_fpr(QFPREG(rd));
1953 gen_update_fprs_dirty(QFPREG(rd));
1956 static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
1957 void (*gen)(TCGv_ptr, TCGv_i64))
1961 src = gen_load_fpr_D(dc, rs);
1965 gen_op_store_QT0_fpr(QFPREG(rd));
1966 gen_update_fprs_dirty(QFPREG(rd));
1970 #ifdef TARGET_SPARC64
1971 static inline TCGv_i32 gen_get_asi(int insn, TCGv r_addr)
1977 r_asi = tcg_temp_new_i32();
1978 tcg_gen_mov_i32(r_asi, cpu_asi);
1980 asi = GET_FIELD(insn, 19, 26);
1981 r_asi = tcg_const_i32(asi);
1986 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
1989 TCGv_i32 r_asi, r_size, r_sign;
1991 r_asi = gen_get_asi(insn, addr);
1992 r_size = tcg_const_i32(size);
1993 r_sign = tcg_const_i32(sign);
1994 gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_size, r_sign);
1995 tcg_temp_free_i32(r_sign);
1996 tcg_temp_free_i32(r_size);
1997 tcg_temp_free_i32(r_asi);
2000 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2002 TCGv_i32 r_asi, r_size;
2004 r_asi = gen_get_asi(insn, addr);
2005 r_size = tcg_const_i32(size);
2006 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2007 tcg_temp_free_i32(r_size);
2008 tcg_temp_free_i32(r_asi);
2011 static inline void gen_ldf_asi(TCGv addr, int insn, int size, int rd)
2013 TCGv_i32 r_asi, r_size, r_rd;
2015 r_asi = gen_get_asi(insn, addr);
2016 r_size = tcg_const_i32(size);
2017 r_rd = tcg_const_i32(rd);
2018 gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2019 tcg_temp_free_i32(r_rd);
2020 tcg_temp_free_i32(r_size);
2021 tcg_temp_free_i32(r_asi);
2024 static inline void gen_stf_asi(TCGv addr, int insn, int size, int rd)
2026 TCGv_i32 r_asi, r_size, r_rd;
2028 r_asi = gen_get_asi(insn, addr);
2029 r_size = tcg_const_i32(size);
2030 r_rd = tcg_const_i32(rd);
2031 gen_helper_stf_asi(cpu_env, addr, r_asi, r_size, r_rd);
2032 tcg_temp_free_i32(r_rd);
2033 tcg_temp_free_i32(r_size);
2034 tcg_temp_free_i32(r_asi);
2037 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2039 TCGv_i32 r_asi, r_size, r_sign;
2040 TCGv_i64 t64 = tcg_temp_new_i64();
2042 r_asi = gen_get_asi(insn, addr);
2043 r_size = tcg_const_i32(4);
2044 r_sign = tcg_const_i32(0);
2045 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2046 tcg_temp_free_i32(r_sign);
2047 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size);
2048 tcg_temp_free_i32(r_size);
2049 tcg_temp_free_i32(r_asi);
2050 tcg_gen_trunc_i64_tl(dst, t64);
2051 tcg_temp_free_i64(t64);
2054 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2057 TCGv_i32 r_asi, r_rd;
2059 r_asi = gen_get_asi(insn, addr);
2060 r_rd = tcg_const_i32(rd);
2061 gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd);
2062 tcg_temp_free_i32(r_rd);
2063 tcg_temp_free_i32(r_asi);
2066 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2069 TCGv_i32 r_asi, r_size;
2070 TCGv lo = gen_load_gpr(dc, rd + 1);
2071 TCGv_i64 t64 = tcg_temp_new_i64();
2073 tcg_gen_concat_tl_i64(t64, lo, hi);
2074 r_asi = gen_get_asi(insn, addr);
2075 r_size = tcg_const_i32(8);
2076 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2077 tcg_temp_free_i32(r_size);
2078 tcg_temp_free_i32(r_asi);
2079 tcg_temp_free_i64(t64);
2082 static inline void gen_casx_asi(DisasContext *dc, TCGv addr,
2083 TCGv val2, int insn, int rd)
2085 TCGv val1 = gen_load_gpr(dc, rd);
2086 TCGv dst = gen_dest_gpr(dc, rd);
2087 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2089 gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi);
2090 tcg_temp_free_i32(r_asi);
2091 gen_store_gpr(dc, rd, dst);
2094 #elif !defined(CONFIG_USER_ONLY)
2096 static inline void gen_ld_asi(TCGv dst, TCGv addr, int insn, int size,
2099 TCGv_i32 r_asi, r_size, r_sign;
2100 TCGv_i64 t64 = tcg_temp_new_i64();
2102 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2103 r_size = tcg_const_i32(size);
2104 r_sign = tcg_const_i32(sign);
2105 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2106 tcg_temp_free_i32(r_sign);
2107 tcg_temp_free_i32(r_size);
2108 tcg_temp_free_i32(r_asi);
2109 tcg_gen_trunc_i64_tl(dst, t64);
2110 tcg_temp_free_i64(t64);
2113 static inline void gen_st_asi(TCGv src, TCGv addr, int insn, int size)
2115 TCGv_i32 r_asi, r_size;
2116 TCGv_i64 t64 = tcg_temp_new_i64();
2118 tcg_gen_extu_tl_i64(t64, src);
2119 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2120 r_size = tcg_const_i32(size);
2121 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2122 tcg_temp_free_i32(r_size);
2123 tcg_temp_free_i32(r_asi);
2124 tcg_temp_free_i64(t64);
2127 static inline void gen_swap_asi(TCGv dst, TCGv src, TCGv addr, int insn)
2129 TCGv_i32 r_asi, r_size, r_sign;
2130 TCGv_i64 r_val, t64;
2132 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2133 r_size = tcg_const_i32(4);
2134 r_sign = tcg_const_i32(0);
2135 t64 = tcg_temp_new_i64();
2136 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2137 tcg_temp_free(r_sign);
2138 r_val = tcg_temp_new_i64();
2139 tcg_gen_extu_tl_i64(r_val, src);
2140 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2141 tcg_temp_free_i64(r_val);
2142 tcg_temp_free_i32(r_size);
2143 tcg_temp_free_i32(r_asi);
2144 tcg_gen_trunc_i64_tl(dst, t64);
2145 tcg_temp_free_i64(t64);
2148 static inline void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2151 TCGv_i32 r_asi, r_size, r_sign;
2155 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2156 r_size = tcg_const_i32(8);
2157 r_sign = tcg_const_i32(0);
2158 t64 = tcg_temp_new_i64();
2159 gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_size, r_sign);
2160 tcg_temp_free_i32(r_sign);
2161 tcg_temp_free_i32(r_size);
2162 tcg_temp_free_i32(r_asi);
2164 t = gen_dest_gpr(dc, rd + 1);
2165 tcg_gen_trunc_i64_tl(t, t64);
2166 gen_store_gpr(dc, rd + 1, t);
2168 tcg_gen_shri_i64(t64, t64, 32);
2169 tcg_gen_trunc_i64_tl(hi, t64);
2170 tcg_temp_free_i64(t64);
2171 gen_store_gpr(dc, rd, hi);
2174 static inline void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
2177 TCGv_i32 r_asi, r_size;
2178 TCGv lo = gen_load_gpr(dc, rd + 1);
2179 TCGv_i64 t64 = tcg_temp_new_i64();
2181 tcg_gen_concat_tl_i64(t64, lo, hi);
2182 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2183 r_size = tcg_const_i32(8);
2184 gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size);
2185 tcg_temp_free_i32(r_size);
2186 tcg_temp_free_i32(r_asi);
2187 tcg_temp_free_i64(t64);
2191 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2192 static inline void gen_cas_asi(DisasContext *dc, TCGv addr,
2193 TCGv val2, int insn, int rd)
2195 TCGv val1 = gen_load_gpr(dc, rd);
2196 TCGv dst = gen_dest_gpr(dc, rd);
2197 #ifdef TARGET_SPARC64
2198 TCGv_i32 r_asi = gen_get_asi(insn, addr);
2200 TCGv_i32 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2203 gen_helper_cas_asi(dst, cpu_env, addr, val1, val2, r_asi);
2204 tcg_temp_free_i32(r_asi);
2205 gen_store_gpr(dc, rd, dst);
2208 static inline void gen_ldstub_asi(TCGv dst, TCGv addr, int insn)
2211 TCGv_i32 r_asi, r_size;
2213 gen_ld_asi(dst, addr, insn, 1, 0);
2215 r_val = tcg_const_i64(0xffULL);
2216 r_asi = tcg_const_i32(GET_FIELD(insn, 19, 26));
2217 r_size = tcg_const_i32(1);
2218 gen_helper_st_asi(cpu_env, addr, r_val, r_asi, r_size);
2219 tcg_temp_free_i32(r_size);
2220 tcg_temp_free_i32(r_asi);
2221 tcg_temp_free_i64(r_val);
2225 static TCGv get_src1(DisasContext *dc, unsigned int insn)
2227 unsigned int rs1 = GET_FIELD(insn, 13, 17);
2228 return gen_load_gpr(dc, rs1);
2231 static TCGv get_src2(DisasContext *dc, unsigned int insn)
2233 if (IS_IMM) { /* immediate */
2234 target_long simm = GET_FIELDs(insn, 19, 31);
2235 TCGv t = get_temp_tl(dc);
2236 tcg_gen_movi_tl(t, simm);
2238 } else { /* register */
2239 unsigned int rs2 = GET_FIELD(insn, 27, 31);
2240 return gen_load_gpr(dc, rs2);
2244 #ifdef TARGET_SPARC64
2245 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2247 TCGv_i32 c32, zero, dst, s1, s2;
2249 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2250 or fold the comparison down to 32 bits and use movcond_i32. Choose
2252 c32 = tcg_temp_new_i32();
2254 tcg_gen_extrl_i64_i32(c32, cmp->c1);
2256 TCGv_i64 c64 = tcg_temp_new_i64();
2257 tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2258 tcg_gen_extrl_i64_i32(c32, c64);
2259 tcg_temp_free_i64(c64);
2262 s1 = gen_load_fpr_F(dc, rs);
2263 s2 = gen_load_fpr_F(dc, rd);
2264 dst = gen_dest_fpr_F(dc);
2265 zero = tcg_const_i32(0);
2267 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2269 tcg_temp_free_i32(c32);
2270 tcg_temp_free_i32(zero);
2271 gen_store_fpr_F(dc, rd, dst);
2274 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2276 TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2277 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2278 gen_load_fpr_D(dc, rs),
2279 gen_load_fpr_D(dc, rd));
2280 gen_store_fpr_D(dc, rd, dst);
2283 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2285 int qd = QFPREG(rd);
2286 int qs = QFPREG(rs);
2288 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2289 cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2290 tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2291 cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2293 gen_update_fprs_dirty(qd);
2296 #ifndef CONFIG_USER_ONLY
2297 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_ptr cpu_env)
2299 TCGv_i32 r_tl = tcg_temp_new_i32();
2301 /* load env->tl into r_tl */
2302 tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
2304 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2305 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2307 /* calculate offset to current trap state from env->ts, reuse r_tl */
2308 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2309 tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
2311 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2313 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2314 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2315 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2316 tcg_temp_free_ptr(r_tl_tmp);
2319 tcg_temp_free_i32(r_tl);
2323 static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
2324 int width, bool cc, bool left)
2326 TCGv lo1, lo2, t1, t2;
2327 uint64_t amask, tabl, tabr;
2328 int shift, imask, omask;
2331 tcg_gen_mov_tl(cpu_cc_src, s1);
2332 tcg_gen_mov_tl(cpu_cc_src2, s2);
2333 tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
2334 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
2335 dc->cc_op = CC_OP_SUB;
2338 /* Theory of operation: there are two tables, left and right (not to
2339 be confused with the left and right versions of the opcode). These
2340 are indexed by the low 3 bits of the inputs. To make things "easy",
2341 these tables are loaded into two constants, TABL and TABR below.
2342 The operation index = (input & imask) << shift calculates the index
2343 into the constant, while val = (table >> index) & omask calculates
2344 the value we're looking for. */
2351 tabl = 0x80c0e0f0f8fcfeffULL;
2352 tabr = 0xff7f3f1f0f070301ULL;
2354 tabl = 0x0103070f1f3f7fffULL;
2355 tabr = 0xfffefcf8f0e0c080ULL;
2375 tabl = (2 << 2) | 3;
2376 tabr = (3 << 2) | 1;
2378 tabl = (1 << 2) | 3;
2379 tabr = (3 << 2) | 2;
2386 lo1 = tcg_temp_new();
2387 lo2 = tcg_temp_new();
2388 tcg_gen_andi_tl(lo1, s1, imask);
2389 tcg_gen_andi_tl(lo2, s2, imask);
2390 tcg_gen_shli_tl(lo1, lo1, shift);
2391 tcg_gen_shli_tl(lo2, lo2, shift);
2393 t1 = tcg_const_tl(tabl);
2394 t2 = tcg_const_tl(tabr);
2395 tcg_gen_shr_tl(lo1, t1, lo1);
2396 tcg_gen_shr_tl(lo2, t2, lo2);
2397 tcg_gen_andi_tl(dst, lo1, omask);
2398 tcg_gen_andi_tl(lo2, lo2, omask);
2402 amask &= 0xffffffffULL;
2404 tcg_gen_andi_tl(s1, s1, amask);
2405 tcg_gen_andi_tl(s2, s2, amask);
2407 /* We want to compute
2408 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2409 We've already done dst = lo1, so this reduces to
2410 dst &= (s1 == s2 ? -1 : lo2)
2415 tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
2416 tcg_gen_neg_tl(t1, t1);
2417 tcg_gen_or_tl(lo2, lo2, t1);
2418 tcg_gen_and_tl(dst, dst, lo2);
2426 static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
2428 TCGv tmp = tcg_temp_new();
2430 tcg_gen_add_tl(tmp, s1, s2);
2431 tcg_gen_andi_tl(dst, tmp, -8);
2433 tcg_gen_neg_tl(tmp, tmp);
2435 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
2440 static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
2444 t1 = tcg_temp_new();
2445 t2 = tcg_temp_new();
2446 shift = tcg_temp_new();
2448 tcg_gen_andi_tl(shift, gsr, 7);
2449 tcg_gen_shli_tl(shift, shift, 3);
2450 tcg_gen_shl_tl(t1, s1, shift);
2452 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2453 shift of (up to 63) followed by a constant shift of 1. */
2454 tcg_gen_xori_tl(shift, shift, 63);
2455 tcg_gen_shr_tl(t2, s2, shift);
2456 tcg_gen_shri_tl(t2, t2, 1);
2458 tcg_gen_or_tl(dst, t1, t2);
2462 tcg_temp_free(shift);
2466 #define CHECK_IU_FEATURE(dc, FEATURE) \
2467 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2469 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2470 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2473 /* before an instruction, dc->pc must be static */
2474 static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
2476 unsigned int opc, rs1, rs2, rd;
2477 TCGv cpu_src1, cpu_src2;
2478 TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
2479 TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
2482 opc = GET_FIELD(insn, 0, 1);
2483 rd = GET_FIELD(insn, 2, 6);
2486 case 0: /* branches/sethi */
2488 unsigned int xop = GET_FIELD(insn, 7, 9);
2491 #ifdef TARGET_SPARC64
2492 case 0x1: /* V9 BPcc */
2496 target = GET_FIELD_SP(insn, 0, 18);
2497 target = sign_extend(target, 19);
2499 cc = GET_FIELD_SP(insn, 20, 21);
2501 do_branch(dc, target, insn, 0);
2503 do_branch(dc, target, insn, 1);
2508 case 0x3: /* V9 BPr */
2510 target = GET_FIELD_SP(insn, 0, 13) |
2511 (GET_FIELD_SP(insn, 20, 21) << 14);
2512 target = sign_extend(target, 16);
2514 cpu_src1 = get_src1(dc, insn);
2515 do_branch_reg(dc, target, insn, cpu_src1);
2518 case 0x5: /* V9 FBPcc */
2520 int cc = GET_FIELD_SP(insn, 20, 21);
2521 if (gen_trap_ifnofpu(dc)) {
2524 target = GET_FIELD_SP(insn, 0, 18);
2525 target = sign_extend(target, 19);
2527 do_fbranch(dc, target, insn, cc);
2531 case 0x7: /* CBN+x */
2536 case 0x2: /* BN+x */
2538 target = GET_FIELD(insn, 10, 31);
2539 target = sign_extend(target, 22);
2541 do_branch(dc, target, insn, 0);
2544 case 0x6: /* FBN+x */
2546 if (gen_trap_ifnofpu(dc)) {
2549 target = GET_FIELD(insn, 10, 31);
2550 target = sign_extend(target, 22);
2552 do_fbranch(dc, target, insn, 0);
2555 case 0x4: /* SETHI */
2556 /* Special-case %g0 because that's the canonical nop. */
2558 uint32_t value = GET_FIELD(insn, 10, 31);
2559 TCGv t = gen_dest_gpr(dc, rd);
2560 tcg_gen_movi_tl(t, value << 10);
2561 gen_store_gpr(dc, rd, t);
2564 case 0x0: /* UNIMPL */
2573 target_long target = GET_FIELDs(insn, 2, 31) << 2;
2574 TCGv o7 = gen_dest_gpr(dc, 15);
2576 tcg_gen_movi_tl(o7, dc->pc);
2577 gen_store_gpr(dc, 15, o7);
2580 #ifdef TARGET_SPARC64
2581 if (unlikely(AM_CHECK(dc))) {
2582 target &= 0xffffffffULL;
2588 case 2: /* FPU & Logical Operations */
2590 unsigned int xop = GET_FIELD(insn, 7, 12);
2591 TCGv cpu_dst = get_temp_tl(dc);
2594 if (xop == 0x3a) { /* generate trap */
2595 int cond = GET_FIELD(insn, 3, 6);
2597 TCGLabel *l1 = NULL;
2608 /* Conditional trap. */
2610 #ifdef TARGET_SPARC64
2612 int cc = GET_FIELD_SP(insn, 11, 12);
2614 gen_compare(&cmp, 0, cond, dc);
2615 } else if (cc == 2) {
2616 gen_compare(&cmp, 1, cond, dc);
2621 gen_compare(&cmp, 0, cond, dc);
2623 l1 = gen_new_label();
2624 tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
2625 cmp.c1, cmp.c2, l1);
2629 mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2630 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2632 /* Don't use the normal temporaries, as they may well have
2633 gone out of scope with the branch above. While we're
2634 doing that we might as well pre-truncate to 32-bit. */
2635 trap = tcg_temp_new_i32();
2637 rs1 = GET_FIELD_SP(insn, 14, 18);
2639 rs2 = GET_FIELD_SP(insn, 0, 6);
2641 tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
2642 /* Signal that the trap value is fully constant. */
2645 TCGv t1 = gen_load_gpr(dc, rs1);
2646 tcg_gen_trunc_tl_i32(trap, t1);
2647 tcg_gen_addi_i32(trap, trap, rs2);
2651 rs2 = GET_FIELD_SP(insn, 0, 4);
2652 t1 = gen_load_gpr(dc, rs1);
2653 t2 = gen_load_gpr(dc, rs2);
2654 tcg_gen_add_tl(t1, t1, t2);
2655 tcg_gen_trunc_tl_i32(trap, t1);
2658 tcg_gen_andi_i32(trap, trap, mask);
2659 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2662 gen_helper_raise_exception(cpu_env, trap);
2663 tcg_temp_free_i32(trap);
2666 /* An unconditional trap ends the TB. */
2670 /* A conditional trap falls through to the next insn. */
2674 } else if (xop == 0x28) {
2675 rs1 = GET_FIELD(insn, 13, 17);
2678 #ifndef TARGET_SPARC64
2679 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2680 manual, rdy on the microSPARC
2682 case 0x0f: /* stbar in the SPARCv8 manual,
2683 rdy on the microSPARC II */
2684 case 0x10 ... 0x1f: /* implementation-dependent in the
2685 SPARCv8 manual, rdy on the
2688 if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
2689 TCGv t = gen_dest_gpr(dc, rd);
2690 /* Read Asr17 for a Leon3 monoprocessor */
2691 tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
2692 gen_store_gpr(dc, rd, t);
2696 gen_store_gpr(dc, rd, cpu_y);
2698 #ifdef TARGET_SPARC64
2699 case 0x2: /* V9 rdccr */
2701 gen_helper_rdccr(cpu_dst, cpu_env);
2702 gen_store_gpr(dc, rd, cpu_dst);
2704 case 0x3: /* V9 rdasi */
2705 tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
2706 gen_store_gpr(dc, rd, cpu_dst);
2708 case 0x4: /* V9 rdtick */
2713 r_tickptr = tcg_temp_new_ptr();
2714 r_const = tcg_const_i32(dc->mem_idx);
2715 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2716 offsetof(CPUSPARCState, tick));
2717 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2719 tcg_temp_free_ptr(r_tickptr);
2720 tcg_temp_free_i32(r_const);
2721 gen_store_gpr(dc, rd, cpu_dst);
2724 case 0x5: /* V9 rdpc */
2726 TCGv t = gen_dest_gpr(dc, rd);
2727 if (unlikely(AM_CHECK(dc))) {
2728 tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
2730 tcg_gen_movi_tl(t, dc->pc);
2732 gen_store_gpr(dc, rd, t);
2735 case 0x6: /* V9 rdfprs */
2736 tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
2737 gen_store_gpr(dc, rd, cpu_dst);
2739 case 0xf: /* V9 membar */
2740 break; /* no effect */
2741 case 0x13: /* Graphics Status */
2742 if (gen_trap_ifnofpu(dc)) {
2745 gen_store_gpr(dc, rd, cpu_gsr);
2747 case 0x16: /* Softint */
2748 tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
2749 gen_store_gpr(dc, rd, cpu_dst);
2751 case 0x17: /* Tick compare */
2752 gen_store_gpr(dc, rd, cpu_tick_cmpr);
2754 case 0x18: /* System tick */
2759 r_tickptr = tcg_temp_new_ptr();
2760 r_const = tcg_const_i32(dc->mem_idx);
2761 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2762 offsetof(CPUSPARCState, stick));
2763 gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
2765 tcg_temp_free_ptr(r_tickptr);
2766 tcg_temp_free_i32(r_const);
2767 gen_store_gpr(dc, rd, cpu_dst);
2770 case 0x19: /* System tick compare */
2771 gen_store_gpr(dc, rd, cpu_stick_cmpr);
2773 case 0x10: /* Performance Control */
2774 case 0x11: /* Performance Instrumentation Counter */
2775 case 0x12: /* Dispatch Control */
2776 case 0x14: /* Softint set, WO */
2777 case 0x15: /* Softint clear, WO */
2782 #if !defined(CONFIG_USER_ONLY)
2783 } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
2784 #ifndef TARGET_SPARC64
2785 if (!supervisor(dc)) {
2789 gen_helper_rdpsr(cpu_dst, cpu_env);
2791 CHECK_IU_FEATURE(dc, HYPV);
2792 if (!hypervisor(dc))
2794 rs1 = GET_FIELD(insn, 13, 17);
2797 // gen_op_rdhpstate();
2800 // gen_op_rdhtstate();
2803 tcg_gen_mov_tl(cpu_dst, cpu_hintp);
2806 tcg_gen_mov_tl(cpu_dst, cpu_htba);
2809 tcg_gen_mov_tl(cpu_dst, cpu_hver);
2811 case 31: // hstick_cmpr
2812 tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
2818 gen_store_gpr(dc, rd, cpu_dst);
2820 } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
2821 if (!supervisor(dc)) {
2824 cpu_tmp0 = get_temp_tl(dc);
2825 #ifdef TARGET_SPARC64
2826 rs1 = GET_FIELD(insn, 13, 17);
2832 r_tsptr = tcg_temp_new_ptr();
2833 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2834 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2835 offsetof(trap_state, tpc));
2836 tcg_temp_free_ptr(r_tsptr);
2843 r_tsptr = tcg_temp_new_ptr();
2844 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2845 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2846 offsetof(trap_state, tnpc));
2847 tcg_temp_free_ptr(r_tsptr);
2854 r_tsptr = tcg_temp_new_ptr();
2855 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2856 tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
2857 offsetof(trap_state, tstate));
2858 tcg_temp_free_ptr(r_tsptr);
2863 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2865 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
2866 tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
2867 offsetof(trap_state, tt));
2868 tcg_temp_free_ptr(r_tsptr);
2876 r_tickptr = tcg_temp_new_ptr();
2877 r_const = tcg_const_i32(dc->mem_idx);
2878 tcg_gen_ld_ptr(r_tickptr, cpu_env,
2879 offsetof(CPUSPARCState, tick));
2880 gen_helper_tick_get_count(cpu_tmp0, cpu_env,
2881 r_tickptr, r_const);
2882 tcg_temp_free_ptr(r_tickptr);
2883 tcg_temp_free_i32(r_const);
2887 tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
2890 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2891 offsetof(CPUSPARCState, pstate));
2894 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2895 offsetof(CPUSPARCState, tl));
2898 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2899 offsetof(CPUSPARCState, psrpil));
2902 gen_helper_rdcwp(cpu_tmp0, cpu_env);
2905 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2906 offsetof(CPUSPARCState, cansave));
2908 case 11: // canrestore
2909 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2910 offsetof(CPUSPARCState, canrestore));
2912 case 12: // cleanwin
2913 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2914 offsetof(CPUSPARCState, cleanwin));
2916 case 13: // otherwin
2917 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2918 offsetof(CPUSPARCState, otherwin));
2921 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2922 offsetof(CPUSPARCState, wstate));
2924 case 16: // UA2005 gl
2925 CHECK_IU_FEATURE(dc, GL);
2926 tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
2927 offsetof(CPUSPARCState, gl));
2929 case 26: // UA2005 strand status
2930 CHECK_IU_FEATURE(dc, HYPV);
2931 if (!hypervisor(dc))
2933 tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
2936 tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
2943 tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
2945 gen_store_gpr(dc, rd, cpu_tmp0);
2947 } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
2948 #ifdef TARGET_SPARC64
2950 gen_helper_flushw(cpu_env);
2952 if (!supervisor(dc))
2954 gen_store_gpr(dc, rd, cpu_tbr);
2958 } else if (xop == 0x34) { /* FPU Operations */
2959 if (gen_trap_ifnofpu(dc)) {
2962 gen_op_clear_ieee_excp_and_FTT();
2963 rs1 = GET_FIELD(insn, 13, 17);
2964 rs2 = GET_FIELD(insn, 27, 31);
2965 xop = GET_FIELD(insn, 18, 26);
2968 case 0x1: /* fmovs */
2969 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
2970 gen_store_fpr_F(dc, rd, cpu_src1_32);
2972 case 0x5: /* fnegs */
2973 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
2975 case 0x9: /* fabss */
2976 gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
2978 case 0x29: /* fsqrts */
2979 CHECK_FPU_FEATURE(dc, FSQRT);
2980 gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
2982 case 0x2a: /* fsqrtd */
2983 CHECK_FPU_FEATURE(dc, FSQRT);
2984 gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
2986 case 0x2b: /* fsqrtq */
2987 CHECK_FPU_FEATURE(dc, FLOAT128);
2988 gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
2990 case 0x41: /* fadds */
2991 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
2993 case 0x42: /* faddd */
2994 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
2996 case 0x43: /* faddq */
2997 CHECK_FPU_FEATURE(dc, FLOAT128);
2998 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
3000 case 0x45: /* fsubs */
3001 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
3003 case 0x46: /* fsubd */
3004 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
3006 case 0x47: /* fsubq */
3007 CHECK_FPU_FEATURE(dc, FLOAT128);
3008 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
3010 case 0x49: /* fmuls */
3011 CHECK_FPU_FEATURE(dc, FMUL);
3012 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
3014 case 0x4a: /* fmuld */
3015 CHECK_FPU_FEATURE(dc, FMUL);
3016 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
3018 case 0x4b: /* fmulq */
3019 CHECK_FPU_FEATURE(dc, FLOAT128);
3020 CHECK_FPU_FEATURE(dc, FMUL);
3021 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
3023 case 0x4d: /* fdivs */
3024 gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
3026 case 0x4e: /* fdivd */
3027 gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
3029 case 0x4f: /* fdivq */
3030 CHECK_FPU_FEATURE(dc, FLOAT128);
3031 gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
3033 case 0x69: /* fsmuld */
3034 CHECK_FPU_FEATURE(dc, FSMULD);
3035 gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
3037 case 0x6e: /* fdmulq */
3038 CHECK_FPU_FEATURE(dc, FLOAT128);
3039 gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
3041 case 0xc4: /* fitos */
3042 gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
3044 case 0xc6: /* fdtos */
3045 gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
3047 case 0xc7: /* fqtos */
3048 CHECK_FPU_FEATURE(dc, FLOAT128);
3049 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
3051 case 0xc8: /* fitod */
3052 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
3054 case 0xc9: /* fstod */
3055 gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
3057 case 0xcb: /* fqtod */
3058 CHECK_FPU_FEATURE(dc, FLOAT128);
3059 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
3061 case 0xcc: /* fitoq */
3062 CHECK_FPU_FEATURE(dc, FLOAT128);
3063 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
3065 case 0xcd: /* fstoq */
3066 CHECK_FPU_FEATURE(dc, FLOAT128);
3067 gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
3069 case 0xce: /* fdtoq */
3070 CHECK_FPU_FEATURE(dc, FLOAT128);
3071 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
3073 case 0xd1: /* fstoi */
3074 gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
3076 case 0xd2: /* fdtoi */
3077 gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
3079 case 0xd3: /* fqtoi */
3080 CHECK_FPU_FEATURE(dc, FLOAT128);
3081 gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
3083 #ifdef TARGET_SPARC64
3084 case 0x2: /* V9 fmovd */
3085 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
3086 gen_store_fpr_D(dc, rd, cpu_src1_64);
3088 case 0x3: /* V9 fmovq */
3089 CHECK_FPU_FEATURE(dc, FLOAT128);
3090 gen_move_Q(rd, rs2);
3092 case 0x6: /* V9 fnegd */
3093 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
3095 case 0x7: /* V9 fnegq */
3096 CHECK_FPU_FEATURE(dc, FLOAT128);
3097 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
3099 case 0xa: /* V9 fabsd */
3100 gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
3102 case 0xb: /* V9 fabsq */
3103 CHECK_FPU_FEATURE(dc, FLOAT128);
3104 gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
3106 case 0x81: /* V9 fstox */
3107 gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
3109 case 0x82: /* V9 fdtox */
3110 gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
3112 case 0x83: /* V9 fqtox */
3113 CHECK_FPU_FEATURE(dc, FLOAT128);
3114 gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
3116 case 0x84: /* V9 fxtos */
3117 gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
3119 case 0x88: /* V9 fxtod */
3120 gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
3122 case 0x8c: /* V9 fxtoq */
3123 CHECK_FPU_FEATURE(dc, FLOAT128);
3124 gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
3130 } else if (xop == 0x35) { /* FPU Operations */
3131 #ifdef TARGET_SPARC64
3134 if (gen_trap_ifnofpu(dc)) {
3137 gen_op_clear_ieee_excp_and_FTT();
3138 rs1 = GET_FIELD(insn, 13, 17);
3139 rs2 = GET_FIELD(insn, 27, 31);
3140 xop = GET_FIELD(insn, 18, 26);
3143 #ifdef TARGET_SPARC64
3147 cond = GET_FIELD_SP(insn, 10, 12); \
3148 cpu_src1 = get_src1(dc, insn); \
3149 gen_compare_reg(&cmp, cond, cpu_src1); \
3150 gen_fmov##sz(dc, &cmp, rd, rs2); \
3151 free_compare(&cmp); \
3154 if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
3157 } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
3160 } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
3161 CHECK_FPU_FEATURE(dc, FLOAT128);
3168 #ifdef TARGET_SPARC64
3169 #define FMOVCC(fcc, sz) \
3172 cond = GET_FIELD_SP(insn, 14, 17); \
3173 gen_fcompare(&cmp, fcc, cond); \
3174 gen_fmov##sz(dc, &cmp, rd, rs2); \
3175 free_compare(&cmp); \
3178 case 0x001: /* V9 fmovscc %fcc0 */
3181 case 0x002: /* V9 fmovdcc %fcc0 */
3184 case 0x003: /* V9 fmovqcc %fcc0 */
3185 CHECK_FPU_FEATURE(dc, FLOAT128);
3188 case 0x041: /* V9 fmovscc %fcc1 */
3191 case 0x042: /* V9 fmovdcc %fcc1 */
3194 case 0x043: /* V9 fmovqcc %fcc1 */
3195 CHECK_FPU_FEATURE(dc, FLOAT128);
3198 case 0x081: /* V9 fmovscc %fcc2 */
3201 case 0x082: /* V9 fmovdcc %fcc2 */
3204 case 0x083: /* V9 fmovqcc %fcc2 */
3205 CHECK_FPU_FEATURE(dc, FLOAT128);
3208 case 0x0c1: /* V9 fmovscc %fcc3 */
3211 case 0x0c2: /* V9 fmovdcc %fcc3 */
3214 case 0x0c3: /* V9 fmovqcc %fcc3 */
3215 CHECK_FPU_FEATURE(dc, FLOAT128);
3219 #define FMOVCC(xcc, sz) \
3222 cond = GET_FIELD_SP(insn, 14, 17); \
3223 gen_compare(&cmp, xcc, cond, dc); \
3224 gen_fmov##sz(dc, &cmp, rd, rs2); \
3225 free_compare(&cmp); \
3228 case 0x101: /* V9 fmovscc %icc */
3231 case 0x102: /* V9 fmovdcc %icc */
3234 case 0x103: /* V9 fmovqcc %icc */
3235 CHECK_FPU_FEATURE(dc, FLOAT128);
3238 case 0x181: /* V9 fmovscc %xcc */
3241 case 0x182: /* V9 fmovdcc %xcc */
3244 case 0x183: /* V9 fmovqcc %xcc */
3245 CHECK_FPU_FEATURE(dc, FLOAT128);
3250 case 0x51: /* fcmps, V9 %fcc */
3251 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3252 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3253 gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
3255 case 0x52: /* fcmpd, V9 %fcc */
3256 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3257 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3258 gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
3260 case 0x53: /* fcmpq, V9 %fcc */
3261 CHECK_FPU_FEATURE(dc, FLOAT128);
3262 gen_op_load_fpr_QT0(QFPREG(rs1));
3263 gen_op_load_fpr_QT1(QFPREG(rs2));
3264 gen_op_fcmpq(rd & 3);
3266 case 0x55: /* fcmpes, V9 %fcc */
3267 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
3268 cpu_src2_32 = gen_load_fpr_F(dc, rs2);
3269 gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
3271 case 0x56: /* fcmped, V9 %fcc */
3272 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
3273 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
3274 gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
3276 case 0x57: /* fcmpeq, V9 %fcc */
3277 CHECK_FPU_FEATURE(dc, FLOAT128);
3278 gen_op_load_fpr_QT0(QFPREG(rs1));
3279 gen_op_load_fpr_QT1(QFPREG(rs2));
3280 gen_op_fcmpeq(rd & 3);
3285 } else if (xop == 0x2) {
3286 TCGv dst = gen_dest_gpr(dc, rd);
3287 rs1 = GET_FIELD(insn, 13, 17);
3289 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3290 if (IS_IMM) { /* immediate */
3291 simm = GET_FIELDs(insn, 19, 31);
3292 tcg_gen_movi_tl(dst, simm);
3293 gen_store_gpr(dc, rd, dst);
3294 } else { /* register */
3295 rs2 = GET_FIELD(insn, 27, 31);
3297 tcg_gen_movi_tl(dst, 0);
3298 gen_store_gpr(dc, rd, dst);
3300 cpu_src2 = gen_load_gpr(dc, rs2);
3301 gen_store_gpr(dc, rd, cpu_src2);
3305 cpu_src1 = get_src1(dc, insn);
3306 if (IS_IMM) { /* immediate */
3307 simm = GET_FIELDs(insn, 19, 31);
3308 tcg_gen_ori_tl(dst, cpu_src1, simm);
3309 gen_store_gpr(dc, rd, dst);
3310 } else { /* register */
3311 rs2 = GET_FIELD(insn, 27, 31);
3313 /* mov shortcut: or x, %g0, y -> mov x, y */
3314 gen_store_gpr(dc, rd, cpu_src1);
3316 cpu_src2 = gen_load_gpr(dc, rs2);
3317 tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
3318 gen_store_gpr(dc, rd, dst);
3322 #ifdef TARGET_SPARC64
3323 } else if (xop == 0x25) { /* sll, V9 sllx */
3324 cpu_src1 = get_src1(dc, insn);
3325 if (IS_IMM) { /* immediate */
3326 simm = GET_FIELDs(insn, 20, 31);
3327 if (insn & (1 << 12)) {
3328 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
3330 tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
3332 } else { /* register */
3333 rs2 = GET_FIELD(insn, 27, 31);
3334 cpu_src2 = gen_load_gpr(dc, rs2);
3335 cpu_tmp0 = get_temp_tl(dc);
3336 if (insn & (1 << 12)) {
3337 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3339 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3341 tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
3343 gen_store_gpr(dc, rd, cpu_dst);
3344 } else if (xop == 0x26) { /* srl, V9 srlx */
3345 cpu_src1 = get_src1(dc, insn);
3346 if (IS_IMM) { /* immediate */
3347 simm = GET_FIELDs(insn, 20, 31);
3348 if (insn & (1 << 12)) {
3349 tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
3351 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3352 tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
3354 } else { /* register */
3355 rs2 = GET_FIELD(insn, 27, 31);
3356 cpu_src2 = gen_load_gpr(dc, rs2);
3357 cpu_tmp0 = get_temp_tl(dc);
3358 if (insn & (1 << 12)) {
3359 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3360 tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
3362 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3363 tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
3364 tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
3367 gen_store_gpr(dc, rd, cpu_dst);
3368 } else if (xop == 0x27) { /* sra, V9 srax */
3369 cpu_src1 = get_src1(dc, insn);
3370 if (IS_IMM) { /* immediate */
3371 simm = GET_FIELDs(insn, 20, 31);
3372 if (insn & (1 << 12)) {
3373 tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
3375 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3376 tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
3378 } else { /* register */
3379 rs2 = GET_FIELD(insn, 27, 31);
3380 cpu_src2 = gen_load_gpr(dc, rs2);
3381 cpu_tmp0 = get_temp_tl(dc);
3382 if (insn & (1 << 12)) {
3383 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
3384 tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
3386 tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
3387 tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
3388 tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
3391 gen_store_gpr(dc, rd, cpu_dst);
3393 } else if (xop < 0x36) {
3395 cpu_src1 = get_src1(dc, insn);
3396 cpu_src2 = get_src2(dc, insn);
3397 switch (xop & ~0x10) {
3400 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3401 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3402 dc->cc_op = CC_OP_ADD;
3404 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
3408 tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
3410 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3411 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3412 dc->cc_op = CC_OP_LOGIC;
3416 tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
3418 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3419 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3420 dc->cc_op = CC_OP_LOGIC;
3424 tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
3426 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3427 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3428 dc->cc_op = CC_OP_LOGIC;
3433 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3434 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3435 dc->cc_op = CC_OP_SUB;
3437 tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
3440 case 0x5: /* andn */
3441 tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
3443 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3444 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3445 dc->cc_op = CC_OP_LOGIC;
3449 tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
3451 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3452 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3453 dc->cc_op = CC_OP_LOGIC;
3456 case 0x7: /* xorn */
3457 tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
3459 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3460 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3461 dc->cc_op = CC_OP_LOGIC;
3464 case 0x8: /* addx, V9 addc */
3465 gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3468 #ifdef TARGET_SPARC64
3469 case 0x9: /* V9 mulx */
3470 tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
3473 case 0xa: /* umul */
3474 CHECK_IU_FEATURE(dc, MUL);
3475 gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
3477 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3478 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3479 dc->cc_op = CC_OP_LOGIC;
3482 case 0xb: /* smul */
3483 CHECK_IU_FEATURE(dc, MUL);
3484 gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
3486 tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
3487 tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
3488 dc->cc_op = CC_OP_LOGIC;
3491 case 0xc: /* subx, V9 subc */
3492 gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
3495 #ifdef TARGET_SPARC64
3496 case 0xd: /* V9 udivx */
3497 gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3500 case 0xe: /* udiv */
3501 CHECK_IU_FEATURE(dc, DIV);
3503 gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
3505 dc->cc_op = CC_OP_DIV;
3507 gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
3511 case 0xf: /* sdiv */
3512 CHECK_IU_FEATURE(dc, DIV);
3514 gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
3516 dc->cc_op = CC_OP_DIV;
3518 gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
3525 gen_store_gpr(dc, rd, cpu_dst);
3527 cpu_src1 = get_src1(dc, insn);
3528 cpu_src2 = get_src2(dc, insn);
3530 case 0x20: /* taddcc */
3531 gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
3532 gen_store_gpr(dc, rd, cpu_dst);
3533 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
3534 dc->cc_op = CC_OP_TADD;
3536 case 0x21: /* tsubcc */
3537 gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
3538 gen_store_gpr(dc, rd, cpu_dst);
3539 tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
3540 dc->cc_op = CC_OP_TSUB;
3542 case 0x22: /* taddcctv */
3543 gen_helper_taddcctv(cpu_dst, cpu_env,
3544 cpu_src1, cpu_src2);
3545 gen_store_gpr(dc, rd, cpu_dst);
3546 dc->cc_op = CC_OP_TADDTV;
3548 case 0x23: /* tsubcctv */
3549 gen_helper_tsubcctv(cpu_dst, cpu_env,
3550 cpu_src1, cpu_src2);
3551 gen_store_gpr(dc, rd, cpu_dst);
3552 dc->cc_op = CC_OP_TSUBTV;
3554 case 0x24: /* mulscc */
3556 gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
3557 gen_store_gpr(dc, rd, cpu_dst);
3558 tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
3559 dc->cc_op = CC_OP_ADD;
3561 #ifndef TARGET_SPARC64
3562 case 0x25: /* sll */
3563 if (IS_IMM) { /* immediate */
3564 simm = GET_FIELDs(insn, 20, 31);
3565 tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
3566 } else { /* register */
3567 cpu_tmp0 = get_temp_tl(dc);
3568 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3569 tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
3571 gen_store_gpr(dc, rd, cpu_dst);
3573 case 0x26: /* srl */
3574 if (IS_IMM) { /* immediate */
3575 simm = GET_FIELDs(insn, 20, 31);
3576 tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
3577 } else { /* register */
3578 cpu_tmp0 = get_temp_tl(dc);
3579 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3580 tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
3582 gen_store_gpr(dc, rd, cpu_dst);
3584 case 0x27: /* sra */
3585 if (IS_IMM) { /* immediate */
3586 simm = GET_FIELDs(insn, 20, 31);
3587 tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
3588 } else { /* register */
3589 cpu_tmp0 = get_temp_tl(dc);
3590 tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
3591 tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
3593 gen_store_gpr(dc, rd, cpu_dst);
3598 cpu_tmp0 = get_temp_tl(dc);
3601 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3602 tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
3604 #ifndef TARGET_SPARC64
3605 case 0x01 ... 0x0f: /* undefined in the
3609 case 0x10 ... 0x1f: /* implementation-dependent
3613 if ((rd == 0x13) && (dc->def->features &
3614 CPU_FEATURE_POWERDOWN)) {
3615 /* LEON3 power-down */
3617 gen_helper_power_down(cpu_env);
3621 case 0x2: /* V9 wrccr */
3622 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3623 gen_helper_wrccr(cpu_env, cpu_tmp0);
3624 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3625 dc->cc_op = CC_OP_FLAGS;
3627 case 0x3: /* V9 wrasi */
3628 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3629 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
3630 tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
3632 case 0x6: /* V9 wrfprs */
3633 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3634 tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
3640 case 0xf: /* V9 sir, nop if user */
3641 #if !defined(CONFIG_USER_ONLY)
3642 if (supervisor(dc)) {
3647 case 0x13: /* Graphics Status */
3648 if (gen_trap_ifnofpu(dc)) {
3651 tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
3653 case 0x14: /* Softint set */
3654 if (!supervisor(dc))
3656 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3657 gen_helper_set_softint(cpu_env, cpu_tmp0);
3659 case 0x15: /* Softint clear */
3660 if (!supervisor(dc))
3662 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3663 gen_helper_clear_softint(cpu_env, cpu_tmp0);
3665 case 0x16: /* Softint write */
3666 if (!supervisor(dc))
3668 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3669 gen_helper_write_softint(cpu_env, cpu_tmp0);
3671 case 0x17: /* Tick compare */
3672 #if !defined(CONFIG_USER_ONLY)
3673 if (!supervisor(dc))
3679 tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
3681 r_tickptr = tcg_temp_new_ptr();
3682 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3683 offsetof(CPUSPARCState, tick));
3684 gen_helper_tick_set_limit(r_tickptr,
3686 tcg_temp_free_ptr(r_tickptr);
3689 case 0x18: /* System tick */
3690 #if !defined(CONFIG_USER_ONLY)
3691 if (!supervisor(dc))
3697 tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
3699 r_tickptr = tcg_temp_new_ptr();
3700 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3701 offsetof(CPUSPARCState, stick));
3702 gen_helper_tick_set_count(r_tickptr,
3704 tcg_temp_free_ptr(r_tickptr);
3707 case 0x19: /* System tick compare */
3708 #if !defined(CONFIG_USER_ONLY)
3709 if (!supervisor(dc))
3715 tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
3717 r_tickptr = tcg_temp_new_ptr();
3718 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3719 offsetof(CPUSPARCState, stick));
3720 gen_helper_tick_set_limit(r_tickptr,
3722 tcg_temp_free_ptr(r_tickptr);
3726 case 0x10: /* Performance Control */
3727 case 0x11: /* Performance Instrumentation
3729 case 0x12: /* Dispatch Control */
3736 #if !defined(CONFIG_USER_ONLY)
3737 case 0x31: /* wrpsr, V9 saved, restored */
3739 if (!supervisor(dc))
3741 #ifdef TARGET_SPARC64
3744 gen_helper_saved(cpu_env);
3747 gen_helper_restored(cpu_env);
3749 case 2: /* UA2005 allclean */
3750 case 3: /* UA2005 otherw */
3751 case 4: /* UA2005 normalw */
3752 case 5: /* UA2005 invalw */
3758 cpu_tmp0 = get_temp_tl(dc);
3759 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3760 gen_helper_wrpsr(cpu_env, cpu_tmp0);
3761 tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3762 dc->cc_op = CC_OP_FLAGS;
3770 case 0x32: /* wrwim, V9 wrpr */
3772 if (!supervisor(dc))
3774 cpu_tmp0 = get_temp_tl(dc);
3775 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3776 #ifdef TARGET_SPARC64
3782 r_tsptr = tcg_temp_new_ptr();
3783 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3784 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3785 offsetof(trap_state, tpc));
3786 tcg_temp_free_ptr(r_tsptr);
3793 r_tsptr = tcg_temp_new_ptr();
3794 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3795 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3796 offsetof(trap_state, tnpc));
3797 tcg_temp_free_ptr(r_tsptr);
3804 r_tsptr = tcg_temp_new_ptr();
3805 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3806 tcg_gen_st_tl(cpu_tmp0, r_tsptr,
3807 offsetof(trap_state,
3809 tcg_temp_free_ptr(r_tsptr);
3816 r_tsptr = tcg_temp_new_ptr();
3817 gen_load_trap_state_at_tl(r_tsptr, cpu_env);
3818 tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
3819 offsetof(trap_state, tt));
3820 tcg_temp_free_ptr(r_tsptr);
3827 r_tickptr = tcg_temp_new_ptr();
3828 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3829 offsetof(CPUSPARCState, tick));
3830 gen_helper_tick_set_count(r_tickptr,
3832 tcg_temp_free_ptr(r_tickptr);
3836 tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
3840 gen_helper_wrpstate(cpu_env, cpu_tmp0);
3841 dc->npc = DYNAMIC_PC;
3845 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3846 offsetof(CPUSPARCState, tl));
3847 dc->npc = DYNAMIC_PC;
3850 gen_helper_wrpil(cpu_env, cpu_tmp0);
3853 gen_helper_wrcwp(cpu_env, cpu_tmp0);
3856 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3857 offsetof(CPUSPARCState,
3860 case 11: // canrestore
3861 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3862 offsetof(CPUSPARCState,
3865 case 12: // cleanwin
3866 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3867 offsetof(CPUSPARCState,
3870 case 13: // otherwin
3871 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3872 offsetof(CPUSPARCState,
3876 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3877 offsetof(CPUSPARCState,
3880 case 16: // UA2005 gl
3881 CHECK_IU_FEATURE(dc, GL);
3882 tcg_gen_st32_tl(cpu_tmp0, cpu_env,
3883 offsetof(CPUSPARCState, gl));
3885 case 26: // UA2005 strand status
3886 CHECK_IU_FEATURE(dc, HYPV);
3887 if (!hypervisor(dc))
3889 tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
3895 tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
3896 if (dc->def->nwindows != 32) {
3897 tcg_gen_andi_tl(cpu_wim, cpu_wim,
3898 (1 << dc->def->nwindows) - 1);
3903 case 0x33: /* wrtbr, UA2005 wrhpr */
3905 #ifndef TARGET_SPARC64
3906 if (!supervisor(dc))
3908 tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
3910 CHECK_IU_FEATURE(dc, HYPV);
3911 if (!hypervisor(dc))
3913 cpu_tmp0 = get_temp_tl(dc);
3914 tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
3917 // XXX gen_op_wrhpstate();
3924 // XXX gen_op_wrhtstate();
3927 tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
3930 tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
3932 case 31: // hstick_cmpr
3936 tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
3937 r_tickptr = tcg_temp_new_ptr();
3938 tcg_gen_ld_ptr(r_tickptr, cpu_env,
3939 offsetof(CPUSPARCState, hstick));
3940 gen_helper_tick_set_limit(r_tickptr,
3942 tcg_temp_free_ptr(r_tickptr);
3945 case 6: // hver readonly
3953 #ifdef TARGET_SPARC64
3954 case 0x2c: /* V9 movcc */
3956 int cc = GET_FIELD_SP(insn, 11, 12);
3957 int cond = GET_FIELD_SP(insn, 14, 17);
3961 if (insn & (1 << 18)) {
3963 gen_compare(&cmp, 0, cond, dc);
3964 } else if (cc == 2) {
3965 gen_compare(&cmp, 1, cond, dc);
3970 gen_fcompare(&cmp, cc, cond);
3973 /* The get_src2 above loaded the normal 13-bit
3974 immediate field, not the 11-bit field we have
3975 in movcc. But it did handle the reg case. */
3977 simm = GET_FIELD_SPs(insn, 0, 10);
3978 tcg_gen_movi_tl(cpu_src2, simm);
3981 dst = gen_load_gpr(dc, rd);
3982 tcg_gen_movcond_tl(cmp.cond, dst,
3986 gen_store_gpr(dc, rd, dst);
3989 case 0x2d: /* V9 sdivx */
3990 gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
3991 gen_store_gpr(dc, rd, cpu_dst);
3993 case 0x2e: /* V9 popc */
3994 gen_helper_popc(cpu_dst, cpu_src2);
3995 gen_store_gpr(dc, rd, cpu_dst);
3997 case 0x2f: /* V9 movr */
3999 int cond = GET_FIELD_SP(insn, 10, 12);
4003 gen_compare_reg(&cmp, cond, cpu_src1);
4005 /* The get_src2 above loaded the normal 13-bit
4006 immediate field, not the 10-bit field we have
4007 in movr. But it did handle the reg case. */
4009 simm = GET_FIELD_SPs(insn, 0, 9);
4010 tcg_gen_movi_tl(cpu_src2, simm);
4013 dst = gen_load_gpr(dc, rd);
4014 tcg_gen_movcond_tl(cmp.cond, dst,
4018 gen_store_gpr(dc, rd, dst);
4026 } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4027 #ifdef TARGET_SPARC64
4028 int opf = GET_FIELD_SP(insn, 5, 13);
4029 rs1 = GET_FIELD(insn, 13, 17);
4030 rs2 = GET_FIELD(insn, 27, 31);
4031 if (gen_trap_ifnofpu(dc)) {
4036 case 0x000: /* VIS I edge8cc */
4037 CHECK_FPU_FEATURE(dc, VIS1);
4038 cpu_src1 = gen_load_gpr(dc, rs1);
4039 cpu_src2 = gen_load_gpr(dc, rs2);
4040 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
4041 gen_store_gpr(dc, rd, cpu_dst);
4043 case 0x001: /* VIS II edge8n */
4044 CHECK_FPU_FEATURE(dc, VIS2);
4045 cpu_src1 = gen_load_gpr(dc, rs1);
4046 cpu_src2 = gen_load_gpr(dc, rs2);
4047 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
4048 gen_store_gpr(dc, rd, cpu_dst);
4050 case 0x002: /* VIS I edge8lcc */
4051 CHECK_FPU_FEATURE(dc, VIS1);
4052 cpu_src1 = gen_load_gpr(dc, rs1);
4053 cpu_src2 = gen_load_gpr(dc, rs2);
4054 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
4055 gen_store_gpr(dc, rd, cpu_dst);
4057 case 0x003: /* VIS II edge8ln */
4058 CHECK_FPU_FEATURE(dc, VIS2);
4059 cpu_src1 = gen_load_gpr(dc, rs1);
4060 cpu_src2 = gen_load_gpr(dc, rs2);
4061 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
4062 gen_store_gpr(dc, rd, cpu_dst);
4064 case 0x004: /* VIS I edge16cc */
4065 CHECK_FPU_FEATURE(dc, VIS1);
4066 cpu_src1 = gen_load_gpr(dc, rs1);
4067 cpu_src2 = gen_load_gpr(dc, rs2);
4068 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
4069 gen_store_gpr(dc, rd, cpu_dst);
4071 case 0x005: /* VIS II edge16n */
4072 CHECK_FPU_FEATURE(dc, VIS2);
4073 cpu_src1 = gen_load_gpr(dc, rs1);
4074 cpu_src2 = gen_load_gpr(dc, rs2);
4075 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
4076 gen_store_gpr(dc, rd, cpu_dst);
4078 case 0x006: /* VIS I edge16lcc */
4079 CHECK_FPU_FEATURE(dc, VIS1);
4080 cpu_src1 = gen_load_gpr(dc, rs1);
4081 cpu_src2 = gen_load_gpr(dc, rs2);
4082 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
4083 gen_store_gpr(dc, rd, cpu_dst);
4085 case 0x007: /* VIS II edge16ln */
4086 CHECK_FPU_FEATURE(dc, VIS2);
4087 cpu_src1 = gen_load_gpr(dc, rs1);
4088 cpu_src2 = gen_load_gpr(dc, rs2);
4089 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
4090 gen_store_gpr(dc, rd, cpu_dst);
4092 case 0x008: /* VIS I edge32cc */
4093 CHECK_FPU_FEATURE(dc, VIS1);
4094 cpu_src1 = gen_load_gpr(dc, rs1);
4095 cpu_src2 = gen_load_gpr(dc, rs2);
4096 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
4097 gen_store_gpr(dc, rd, cpu_dst);
4099 case 0x009: /* VIS II edge32n */
4100 CHECK_FPU_FEATURE(dc, VIS2);
4101 cpu_src1 = gen_load_gpr(dc, rs1);
4102 cpu_src2 = gen_load_gpr(dc, rs2);
4103 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
4104 gen_store_gpr(dc, rd, cpu_dst);
4106 case 0x00a: /* VIS I edge32lcc */
4107 CHECK_FPU_FEATURE(dc, VIS1);
4108 cpu_src1 = gen_load_gpr(dc, rs1);
4109 cpu_src2 = gen_load_gpr(dc, rs2);
4110 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
4111 gen_store_gpr(dc, rd, cpu_dst);
4113 case 0x00b: /* VIS II edge32ln */
4114 CHECK_FPU_FEATURE(dc, VIS2);
4115 cpu_src1 = gen_load_gpr(dc, rs1);
4116 cpu_src2 = gen_load_gpr(dc, rs2);
4117 gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
4118 gen_store_gpr(dc, rd, cpu_dst);
4120 case 0x010: /* VIS I array8 */
4121 CHECK_FPU_FEATURE(dc, VIS1);
4122 cpu_src1 = gen_load_gpr(dc, rs1);
4123 cpu_src2 = gen_load_gpr(dc, rs2);
4124 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4125 gen_store_gpr(dc, rd, cpu_dst);
4127 case 0x012: /* VIS I array16 */
4128 CHECK_FPU_FEATURE(dc, VIS1);
4129 cpu_src1 = gen_load_gpr(dc, rs1);
4130 cpu_src2 = gen_load_gpr(dc, rs2);
4131 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4132 tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
4133 gen_store_gpr(dc, rd, cpu_dst);
4135 case 0x014: /* VIS I array32 */
4136 CHECK_FPU_FEATURE(dc, VIS1);
4137 cpu_src1 = gen_load_gpr(dc, rs1);
4138 cpu_src2 = gen_load_gpr(dc, rs2);
4139 gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
4140 tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
4141 gen_store_gpr(dc, rd, cpu_dst);
4143 case 0x018: /* VIS I alignaddr */
4144 CHECK_FPU_FEATURE(dc, VIS1);
4145 cpu_src1 = gen_load_gpr(dc, rs1);
4146 cpu_src2 = gen_load_gpr(dc, rs2);
4147 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
4148 gen_store_gpr(dc, rd, cpu_dst);
4150 case 0x01a: /* VIS I alignaddrl */
4151 CHECK_FPU_FEATURE(dc, VIS1);
4152 cpu_src1 = gen_load_gpr(dc, rs1);
4153 cpu_src2 = gen_load_gpr(dc, rs2);
4154 gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
4155 gen_store_gpr(dc, rd, cpu_dst);
4157 case 0x019: /* VIS II bmask */
4158 CHECK_FPU_FEATURE(dc, VIS2);
4159 cpu_src1 = gen_load_gpr(dc, rs1);
4160 cpu_src2 = gen_load_gpr(dc, rs2);
4161 tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
4162 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
4163 gen_store_gpr(dc, rd, cpu_dst);
4165 case 0x020: /* VIS I fcmple16 */
4166 CHECK_FPU_FEATURE(dc, VIS1);
4167 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4168 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4169 gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
4170 gen_store_gpr(dc, rd, cpu_dst);
4172 case 0x022: /* VIS I fcmpne16 */
4173 CHECK_FPU_FEATURE(dc, VIS1);
4174 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4175 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4176 gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
4177 gen_store_gpr(dc, rd, cpu_dst);
4179 case 0x024: /* VIS I fcmple32 */
4180 CHECK_FPU_FEATURE(dc, VIS1);
4181 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4182 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4183 gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
4184 gen_store_gpr(dc, rd, cpu_dst);
4186 case 0x026: /* VIS I fcmpne32 */
4187 CHECK_FPU_FEATURE(dc, VIS1);
4188 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4189 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4190 gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
4191 gen_store_gpr(dc, rd, cpu_dst);
4193 case 0x028: /* VIS I fcmpgt16 */
4194 CHECK_FPU_FEATURE(dc, VIS1);
4195 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4196 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4197 gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
4198 gen_store_gpr(dc, rd, cpu_dst);
4200 case 0x02a: /* VIS I fcmpeq16 */
4201 CHECK_FPU_FEATURE(dc, VIS1);
4202 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4203 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4204 gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
4205 gen_store_gpr(dc, rd, cpu_dst);
4207 case 0x02c: /* VIS I fcmpgt32 */
4208 CHECK_FPU_FEATURE(dc, VIS1);
4209 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4210 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4211 gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
4212 gen_store_gpr(dc, rd, cpu_dst);
4214 case 0x02e: /* VIS I fcmpeq32 */
4215 CHECK_FPU_FEATURE(dc, VIS1);
4216 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4217 cpu_src2_64 = gen_load_fpr_D(dc, rs2);
4218 gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
4219 gen_store_gpr(dc, rd, cpu_dst);
4221 case 0x031: /* VIS I fmul8x16 */
4222 CHECK_FPU_FEATURE(dc, VIS1);
4223 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
4225 case 0x033: /* VIS I fmul8x16au */
4226 CHECK_FPU_FEATURE(dc, VIS1);
4227 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
4229 case 0x035: /* VIS I fmul8x16al */
4230 CHECK_FPU_FEATURE(dc, VIS1);
4231 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
4233 case 0x036: /* VIS I fmul8sux16 */
4234 CHECK_FPU_FEATURE(dc, VIS1);
4235 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
4237 case 0x037: /* VIS I fmul8ulx16 */
4238 CHECK_FPU_FEATURE(dc, VIS1);
4239 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
4241 case 0x038: /* VIS I fmuld8sux16 */
4242 CHECK_FPU_FEATURE(dc, VIS1);
4243 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
4245 case 0x039: /* VIS I fmuld8ulx16 */
4246 CHECK_FPU_FEATURE(dc, VIS1);
4247 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
4249 case 0x03a: /* VIS I fpack32 */
4250 CHECK_FPU_FEATURE(dc, VIS1);
4251 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
4253 case 0x03b: /* VIS I fpack16 */
4254 CHECK_FPU_FEATURE(dc, VIS1);
4255 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4256 cpu_dst_32 = gen_dest_fpr_F(dc);
4257 gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
4258 gen_store_fpr_F(dc, rd, cpu_dst_32);
4260 case 0x03d: /* VIS I fpackfix */
4261 CHECK_FPU_FEATURE(dc, VIS1);
4262 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4263 cpu_dst_32 = gen_dest_fpr_F(dc);
4264 gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
4265 gen_store_fpr_F(dc, rd, cpu_dst_32);
4267 case 0x03e: /* VIS I pdist */
4268 CHECK_FPU_FEATURE(dc, VIS1);
4269 gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
4271 case 0x048: /* VIS I faligndata */
4272 CHECK_FPU_FEATURE(dc, VIS1);
4273 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
4275 case 0x04b: /* VIS I fpmerge */
4276 CHECK_FPU_FEATURE(dc, VIS1);
4277 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
4279 case 0x04c: /* VIS II bshuffle */
4280 CHECK_FPU_FEATURE(dc, VIS2);
4281 gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
4283 case 0x04d: /* VIS I fexpand */
4284 CHECK_FPU_FEATURE(dc, VIS1);
4285 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
4287 case 0x050: /* VIS I fpadd16 */
4288 CHECK_FPU_FEATURE(dc, VIS1);
4289 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
4291 case 0x051: /* VIS I fpadd16s */
4292 CHECK_FPU_FEATURE(dc, VIS1);
4293 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
4295 case 0x052: /* VIS I fpadd32 */
4296 CHECK_FPU_FEATURE(dc, VIS1);
4297 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
4299 case 0x053: /* VIS I fpadd32s */
4300 CHECK_FPU_FEATURE(dc, VIS1);
4301 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
4303 case 0x054: /* VIS I fpsub16 */
4304 CHECK_FPU_FEATURE(dc, VIS1);
4305 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
4307 case 0x055: /* VIS I fpsub16s */
4308 CHECK_FPU_FEATURE(dc, VIS1);
4309 gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
4311 case 0x056: /* VIS I fpsub32 */
4312 CHECK_FPU_FEATURE(dc, VIS1);
4313 gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
4315 case 0x057: /* VIS I fpsub32s */
4316 CHECK_FPU_FEATURE(dc, VIS1);
4317 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
4319 case 0x060: /* VIS I fzero */
4320 CHECK_FPU_FEATURE(dc, VIS1);
4321 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4322 tcg_gen_movi_i64(cpu_dst_64, 0);
4323 gen_store_fpr_D(dc, rd, cpu_dst_64);
4325 case 0x061: /* VIS I fzeros */
4326 CHECK_FPU_FEATURE(dc, VIS1);
4327 cpu_dst_32 = gen_dest_fpr_F(dc);
4328 tcg_gen_movi_i32(cpu_dst_32, 0);
4329 gen_store_fpr_F(dc, rd, cpu_dst_32);
4331 case 0x062: /* VIS I fnor */
4332 CHECK_FPU_FEATURE(dc, VIS1);
4333 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
4335 case 0x063: /* VIS I fnors */
4336 CHECK_FPU_FEATURE(dc, VIS1);
4337 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
4339 case 0x064: /* VIS I fandnot2 */
4340 CHECK_FPU_FEATURE(dc, VIS1);
4341 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
4343 case 0x065: /* VIS I fandnot2s */
4344 CHECK_FPU_FEATURE(dc, VIS1);
4345 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
4347 case 0x066: /* VIS I fnot2 */
4348 CHECK_FPU_FEATURE(dc, VIS1);
4349 gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
4351 case 0x067: /* VIS I fnot2s */
4352 CHECK_FPU_FEATURE(dc, VIS1);
4353 gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
4355 case 0x068: /* VIS I fandnot1 */
4356 CHECK_FPU_FEATURE(dc, VIS1);
4357 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
4359 case 0x069: /* VIS I fandnot1s */
4360 CHECK_FPU_FEATURE(dc, VIS1);
4361 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
4363 case 0x06a: /* VIS I fnot1 */
4364 CHECK_FPU_FEATURE(dc, VIS1);
4365 gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
4367 case 0x06b: /* VIS I fnot1s */
4368 CHECK_FPU_FEATURE(dc, VIS1);
4369 gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
4371 case 0x06c: /* VIS I fxor */
4372 CHECK_FPU_FEATURE(dc, VIS1);
4373 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
4375 case 0x06d: /* VIS I fxors */
4376 CHECK_FPU_FEATURE(dc, VIS1);
4377 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
4379 case 0x06e: /* VIS I fnand */
4380 CHECK_FPU_FEATURE(dc, VIS1);
4381 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
4383 case 0x06f: /* VIS I fnands */
4384 CHECK_FPU_FEATURE(dc, VIS1);
4385 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
4387 case 0x070: /* VIS I fand */
4388 CHECK_FPU_FEATURE(dc, VIS1);
4389 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
4391 case 0x071: /* VIS I fands */
4392 CHECK_FPU_FEATURE(dc, VIS1);
4393 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
4395 case 0x072: /* VIS I fxnor */
4396 CHECK_FPU_FEATURE(dc, VIS1);
4397 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
4399 case 0x073: /* VIS I fxnors */
4400 CHECK_FPU_FEATURE(dc, VIS1);
4401 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
4403 case 0x074: /* VIS I fsrc1 */
4404 CHECK_FPU_FEATURE(dc, VIS1);
4405 cpu_src1_64 = gen_load_fpr_D(dc, rs1);
4406 gen_store_fpr_D(dc, rd, cpu_src1_64);
4408 case 0x075: /* VIS I fsrc1s */
4409 CHECK_FPU_FEATURE(dc, VIS1);
4410 cpu_src1_32 = gen_load_fpr_F(dc, rs1);
4411 gen_store_fpr_F(dc, rd, cpu_src1_32);
4413 case 0x076: /* VIS I fornot2 */
4414 CHECK_FPU_FEATURE(dc, VIS1);
4415 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
4417 case 0x077: /* VIS I fornot2s */
4418 CHECK_FPU_FEATURE(dc, VIS1);
4419 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
4421 case 0x078: /* VIS I fsrc2 */
4422 CHECK_FPU_FEATURE(dc, VIS1);
4423 cpu_src1_64 = gen_load_fpr_D(dc, rs2);
4424 gen_store_fpr_D(dc, rd, cpu_src1_64);
4426 case 0x079: /* VIS I fsrc2s */
4427 CHECK_FPU_FEATURE(dc, VIS1);
4428 cpu_src1_32 = gen_load_fpr_F(dc, rs2);
4429 gen_store_fpr_F(dc, rd, cpu_src1_32);
4431 case 0x07a: /* VIS I fornot1 */
4432 CHECK_FPU_FEATURE(dc, VIS1);
4433 gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
4435 case 0x07b: /* VIS I fornot1s */
4436 CHECK_FPU_FEATURE(dc, VIS1);
4437 gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
4439 case 0x07c: /* VIS I for */
4440 CHECK_FPU_FEATURE(dc, VIS1);
4441 gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
4443 case 0x07d: /* VIS I fors */
4444 CHECK_FPU_FEATURE(dc, VIS1);
4445 gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
4447 case 0x07e: /* VIS I fone */
4448 CHECK_FPU_FEATURE(dc, VIS1);
4449 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4450 tcg_gen_movi_i64(cpu_dst_64, -1);
4451 gen_store_fpr_D(dc, rd, cpu_dst_64);
4453 case 0x07f: /* VIS I fones */
4454 CHECK_FPU_FEATURE(dc, VIS1);
4455 cpu_dst_32 = gen_dest_fpr_F(dc);
4456 tcg_gen_movi_i32(cpu_dst_32, -1);
4457 gen_store_fpr_F(dc, rd, cpu_dst_32);
4459 case 0x080: /* VIS I shutdown */
4460 case 0x081: /* VIS II siam */
4469 } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
4470 #ifdef TARGET_SPARC64
4475 #ifdef TARGET_SPARC64
4476 } else if (xop == 0x39) { /* V9 return */
4480 cpu_src1 = get_src1(dc, insn);
4481 cpu_tmp0 = get_temp_tl(dc);
4482 if (IS_IMM) { /* immediate */
4483 simm = GET_FIELDs(insn, 19, 31);
4484 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4485 } else { /* register */
4486 rs2 = GET_FIELD(insn, 27, 31);
4488 cpu_src2 = gen_load_gpr(dc, rs2);
4489 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4491 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4494 gen_helper_restore(cpu_env);
4496 r_const = tcg_const_i32(3);
4497 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4498 tcg_temp_free_i32(r_const);
4499 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4500 dc->npc = DYNAMIC_PC;
4504 cpu_src1 = get_src1(dc, insn);
4505 cpu_tmp0 = get_temp_tl(dc);
4506 if (IS_IMM) { /* immediate */
4507 simm = GET_FIELDs(insn, 19, 31);
4508 tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
4509 } else { /* register */
4510 rs2 = GET_FIELD(insn, 27, 31);
4512 cpu_src2 = gen_load_gpr(dc, rs2);
4513 tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
4515 tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
4519 case 0x38: /* jmpl */
4524 t = gen_dest_gpr(dc, rd);
4525 tcg_gen_movi_tl(t, dc->pc);
4526 gen_store_gpr(dc, rd, t);
4528 r_const = tcg_const_i32(3);
4529 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4530 tcg_temp_free_i32(r_const);
4531 gen_address_mask(dc, cpu_tmp0);
4532 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4533 dc->npc = DYNAMIC_PC;
4536 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4537 case 0x39: /* rett, V9 return */
4541 if (!supervisor(dc))
4544 r_const = tcg_const_i32(3);
4545 gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
4546 tcg_temp_free_i32(r_const);
4547 tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
4548 dc->npc = DYNAMIC_PC;
4549 gen_helper_rett(cpu_env);
4553 case 0x3b: /* flush */
4554 if (!((dc)->def->features & CPU_FEATURE_FLUSH))
4558 case 0x3c: /* save */
4560 gen_helper_save(cpu_env);
4561 gen_store_gpr(dc, rd, cpu_tmp0);
4563 case 0x3d: /* restore */
4565 gen_helper_restore(cpu_env);
4566 gen_store_gpr(dc, rd, cpu_tmp0);
4568 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4569 case 0x3e: /* V9 done/retry */
4573 if (!supervisor(dc))
4575 dc->npc = DYNAMIC_PC;
4576 dc->pc = DYNAMIC_PC;
4577 gen_helper_done(cpu_env);
4580 if (!supervisor(dc))
4582 dc->npc = DYNAMIC_PC;
4583 dc->pc = DYNAMIC_PC;
4584 gen_helper_retry(cpu_env);
4599 case 3: /* load/store instructions */
4601 unsigned int xop = GET_FIELD(insn, 7, 12);
4602 /* ??? gen_address_mask prevents us from using a source
4603 register directly. Always generate a temporary. */
4604 TCGv cpu_addr = get_temp_tl(dc);
4606 tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
4607 if (xop == 0x3c || xop == 0x3e) {
4608 /* V9 casa/casxa : no offset */
4609 } else if (IS_IMM) { /* immediate */
4610 simm = GET_FIELDs(insn, 19, 31);
4612 tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
4614 } else { /* register */
4615 rs2 = GET_FIELD(insn, 27, 31);
4617 tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
4620 if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
4621 (xop > 0x17 && xop <= 0x1d ) ||
4622 (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
4623 TCGv cpu_val = gen_dest_gpr(dc, rd);
4626 case 0x0: /* ld, V9 lduw, load unsigned word */
4627 gen_address_mask(dc, cpu_addr);
4628 tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
4630 case 0x1: /* ldub, load unsigned byte */
4631 gen_address_mask(dc, cpu_addr);
4632 tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
4634 case 0x2: /* lduh, load unsigned halfword */
4635 gen_address_mask(dc, cpu_addr);
4636 tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
4638 case 0x3: /* ldd, load double word */
4646 r_const = tcg_const_i32(7);
4647 /* XXX remove alignment check */
4648 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4649 tcg_temp_free_i32(r_const);
4650 gen_address_mask(dc, cpu_addr);
4651 t64 = tcg_temp_new_i64();
4652 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4653 tcg_gen_trunc_i64_tl(cpu_val, t64);
4654 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4655 gen_store_gpr(dc, rd + 1, cpu_val);
4656 tcg_gen_shri_i64(t64, t64, 32);
4657 tcg_gen_trunc_i64_tl(cpu_val, t64);
4658 tcg_temp_free_i64(t64);
4659 tcg_gen_ext32u_tl(cpu_val, cpu_val);
4662 case 0x9: /* ldsb, load signed byte */
4663 gen_address_mask(dc, cpu_addr);
4664 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4666 case 0xa: /* ldsh, load signed halfword */
4667 gen_address_mask(dc, cpu_addr);
4668 tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
4670 case 0xd: /* ldstub -- XXX: should be atomically */
4674 gen_address_mask(dc, cpu_addr);
4675 tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
4676 r_const = tcg_const_tl(0xff);
4677 tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
4678 tcg_temp_free(r_const);
4682 /* swap, swap register with memory. Also atomically */
4684 TCGv t0 = get_temp_tl(dc);
4685 CHECK_IU_FEATURE(dc, SWAP);
4686 cpu_src1 = gen_load_gpr(dc, rd);
4687 gen_address_mask(dc, cpu_addr);
4688 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4689 tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
4690 tcg_gen_mov_tl(cpu_val, t0);
4693 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4694 case 0x10: /* lda, V9 lduwa, load word alternate */
4695 #ifndef TARGET_SPARC64
4698 if (!supervisor(dc))
4702 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
4704 case 0x11: /* lduba, load unsigned byte alternate */
4705 #ifndef TARGET_SPARC64
4708 if (!supervisor(dc))
4712 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
4714 case 0x12: /* lduha, load unsigned halfword alternate */
4715 #ifndef TARGET_SPARC64
4718 if (!supervisor(dc))
4722 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
4724 case 0x13: /* ldda, load double word alternate */
4725 #ifndef TARGET_SPARC64
4728 if (!supervisor(dc))
4734 gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
4736 case 0x19: /* ldsba, load signed byte alternate */
4737 #ifndef TARGET_SPARC64
4740 if (!supervisor(dc))
4744 gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
4746 case 0x1a: /* ldsha, load signed halfword alternate */
4747 #ifndef TARGET_SPARC64
4750 if (!supervisor(dc))
4754 gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
4756 case 0x1d: /* ldstuba -- XXX: should be atomically */
4757 #ifndef TARGET_SPARC64
4760 if (!supervisor(dc))
4764 gen_ldstub_asi(cpu_val, cpu_addr, insn);
4766 case 0x1f: /* swapa, swap reg with alt. memory. Also
4768 CHECK_IU_FEATURE(dc, SWAP);
4769 #ifndef TARGET_SPARC64
4772 if (!supervisor(dc))
4776 cpu_src1 = gen_load_gpr(dc, rd);
4777 gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
4780 #ifndef TARGET_SPARC64
4781 case 0x30: /* ldc */
4782 case 0x31: /* ldcsr */
4783 case 0x33: /* lddc */
4787 #ifdef TARGET_SPARC64
4788 case 0x08: /* V9 ldsw */
4789 gen_address_mask(dc, cpu_addr);
4790 tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
4792 case 0x0b: /* V9 ldx */
4793 gen_address_mask(dc, cpu_addr);
4794 tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
4796 case 0x18: /* V9 ldswa */
4798 gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
4800 case 0x1b: /* V9 ldxa */
4802 gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
4804 case 0x2d: /* V9 prefetch, no effect */
4806 case 0x30: /* V9 ldfa */
4807 if (gen_trap_ifnofpu(dc)) {
4811 gen_ldf_asi(cpu_addr, insn, 4, rd);
4812 gen_update_fprs_dirty(rd);
4814 case 0x33: /* V9 lddfa */
4815 if (gen_trap_ifnofpu(dc)) {
4819 gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
4820 gen_update_fprs_dirty(DFPREG(rd));
4822 case 0x3d: /* V9 prefetcha, no effect */
4824 case 0x32: /* V9 ldqfa */
4825 CHECK_FPU_FEATURE(dc, FLOAT128);
4826 if (gen_trap_ifnofpu(dc)) {
4830 gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
4831 gen_update_fprs_dirty(QFPREG(rd));
4837 gen_store_gpr(dc, rd, cpu_val);
4838 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4841 } else if (xop >= 0x20 && xop < 0x24) {
4844 if (gen_trap_ifnofpu(dc)) {
4849 case 0x20: /* ldf, load fpreg */
4850 gen_address_mask(dc, cpu_addr);
4851 t0 = get_temp_tl(dc);
4852 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4853 cpu_dst_32 = gen_dest_fpr_F(dc);
4854 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4855 gen_store_fpr_F(dc, rd, cpu_dst_32);
4857 case 0x21: /* ldfsr, V9 ldxfsr */
4858 #ifdef TARGET_SPARC64
4859 gen_address_mask(dc, cpu_addr);
4861 TCGv_i64 t64 = tcg_temp_new_i64();
4862 tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
4863 gen_helper_ldxfsr(cpu_env, t64);
4864 tcg_temp_free_i64(t64);
4868 cpu_dst_32 = get_temp_i32(dc);
4869 t0 = get_temp_tl(dc);
4870 tcg_gen_qemu_ld32u(t0, cpu_addr, dc->mem_idx);
4871 tcg_gen_trunc_tl_i32(cpu_dst_32, t0);
4872 gen_helper_ldfsr(cpu_env, cpu_dst_32);
4874 case 0x22: /* ldqf, load quad fpreg */
4878 CHECK_FPU_FEATURE(dc, FLOAT128);
4879 r_const = tcg_const_i32(dc->mem_idx);
4880 gen_address_mask(dc, cpu_addr);
4881 gen_helper_ldqf(cpu_env, cpu_addr, r_const);
4882 tcg_temp_free_i32(r_const);
4883 gen_op_store_QT0_fpr(QFPREG(rd));
4884 gen_update_fprs_dirty(QFPREG(rd));
4887 case 0x23: /* lddf, load double fpreg */
4888 gen_address_mask(dc, cpu_addr);
4889 cpu_dst_64 = gen_dest_fpr_D(dc, rd);
4890 tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
4891 gen_store_fpr_D(dc, rd, cpu_dst_64);
4896 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
4897 xop == 0xe || xop == 0x1e) {
4898 TCGv cpu_val = gen_load_gpr(dc, rd);
4901 case 0x4: /* st, store word */
4902 gen_address_mask(dc, cpu_addr);
4903 tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
4905 case 0x5: /* stb, store byte */
4906 gen_address_mask(dc, cpu_addr);
4907 tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
4909 case 0x6: /* sth, store halfword */
4910 gen_address_mask(dc, cpu_addr);
4911 tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
4913 case 0x7: /* std, store double word */
4922 gen_address_mask(dc, cpu_addr);
4923 r_const = tcg_const_i32(7);
4924 /* XXX remove alignment check */
4925 gen_helper_check_align(cpu_env, cpu_addr, r_const);
4926 tcg_temp_free_i32(r_const);
4927 lo = gen_load_gpr(dc, rd + 1);
4929 t64 = tcg_temp_new_i64();
4930 tcg_gen_concat_tl_i64(t64, lo, cpu_val);
4931 tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
4932 tcg_temp_free_i64(t64);
4935 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4936 case 0x14: /* sta, V9 stwa, store word alternate */
4937 #ifndef TARGET_SPARC64
4940 if (!supervisor(dc))
4944 gen_st_asi(cpu_val, cpu_addr, insn, 4);
4945 dc->npc = DYNAMIC_PC;
4947 case 0x15: /* stba, store byte alternate */
4948 #ifndef TARGET_SPARC64
4951 if (!supervisor(dc))
4955 gen_st_asi(cpu_val, cpu_addr, insn, 1);
4956 dc->npc = DYNAMIC_PC;
4958 case 0x16: /* stha, store halfword alternate */
4959 #ifndef TARGET_SPARC64
4962 if (!supervisor(dc))
4966 gen_st_asi(cpu_val, cpu_addr, insn, 2);
4967 dc->npc = DYNAMIC_PC;
4969 case 0x17: /* stda, store double word alternate */
4970 #ifndef TARGET_SPARC64
4973 if (!supervisor(dc))
4980 gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
4984 #ifdef TARGET_SPARC64
4985 case 0x0e: /* V9 stx */
4986 gen_address_mask(dc, cpu_addr);
4987 tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
4989 case 0x1e: /* V9 stxa */
4991 gen_st_asi(cpu_val, cpu_addr, insn, 8);
4992 dc->npc = DYNAMIC_PC;
4998 } else if (xop > 0x23 && xop < 0x28) {
4999 if (gen_trap_ifnofpu(dc)) {
5004 case 0x24: /* stf, store fpreg */
5006 TCGv t = get_temp_tl(dc);
5007 gen_address_mask(dc, cpu_addr);
5008 cpu_src1_32 = gen_load_fpr_F(dc, rd);
5009 tcg_gen_ext_i32_tl(t, cpu_src1_32);
5010 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5013 case 0x25: /* stfsr, V9 stxfsr */
5015 TCGv t = get_temp_tl(dc);
5017 tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
5018 #ifdef TARGET_SPARC64
5019 gen_address_mask(dc, cpu_addr);
5021 tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
5025 tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
5029 #ifdef TARGET_SPARC64
5030 /* V9 stqf, store quad fpreg */
5034 CHECK_FPU_FEATURE(dc, FLOAT128);
5035 gen_op_load_fpr_QT0(QFPREG(rd));
5036 r_const = tcg_const_i32(dc->mem_idx);
5037 gen_address_mask(dc, cpu_addr);
5038 gen_helper_stqf(cpu_env, cpu_addr, r_const);
5039 tcg_temp_free_i32(r_const);
5042 #else /* !TARGET_SPARC64 */
5043 /* stdfq, store floating point queue */
5044 #if defined(CONFIG_USER_ONLY)
5047 if (!supervisor(dc))
5049 if (gen_trap_ifnofpu(dc)) {
5055 case 0x27: /* stdf, store double fpreg */
5056 gen_address_mask(dc, cpu_addr);
5057 cpu_src1_64 = gen_load_fpr_D(dc, rd);
5058 tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
5063 } else if (xop > 0x33 && xop < 0x3f) {
5066 #ifdef TARGET_SPARC64
5067 case 0x34: /* V9 stfa */
5068 if (gen_trap_ifnofpu(dc)) {
5071 gen_stf_asi(cpu_addr, insn, 4, rd);
5073 case 0x36: /* V9 stqfa */
5077 CHECK_FPU_FEATURE(dc, FLOAT128);
5078 if (gen_trap_ifnofpu(dc)) {
5081 r_const = tcg_const_i32(7);
5082 gen_helper_check_align(cpu_env, cpu_addr, r_const);
5083 tcg_temp_free_i32(r_const);
5084 gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
5087 case 0x37: /* V9 stdfa */
5088 if (gen_trap_ifnofpu(dc)) {
5091 gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
5093 case 0x3e: /* V9 casxa */
5094 rs2 = GET_FIELD(insn, 27, 31);
5095 cpu_src2 = gen_load_gpr(dc, rs2);
5096 gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
5099 case 0x34: /* stc */
5100 case 0x35: /* stcsr */
5101 case 0x36: /* stdcq */
5102 case 0x37: /* stdc */
5105 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5106 case 0x3c: /* V9 or LEON3 casa */
5107 #ifndef TARGET_SPARC64
5108 CHECK_IU_FEATURE(dc, CASA);
5112 /* LEON3 allows CASA from user space with ASI 0xa */
5113 if ((GET_FIELD(insn, 19, 26) != 0xa) && !supervisor(dc)) {
5117 rs2 = GET_FIELD(insn, 27, 31);
5118 cpu_src2 = gen_load_gpr(dc, rs2);
5119 gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
5131 /* default case for non jump instructions */
5132 if (dc->npc == DYNAMIC_PC) {
5133 dc->pc = DYNAMIC_PC;
5135 } else if (dc->npc == JUMP_PC) {
5136 /* we can do a static jump */
5137 gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
5141 dc->npc = dc->npc + 4;
5150 r_const = tcg_const_i32(TT_ILL_INSN);
5151 gen_helper_raise_exception(cpu_env, r_const);
5152 tcg_temp_free_i32(r_const);
5161 r_const = tcg_const_i32(TT_UNIMP_FLUSH);
5162 gen_helper_raise_exception(cpu_env, r_const);
5163 tcg_temp_free_i32(r_const);
5167 #if !defined(CONFIG_USER_ONLY)
5173 r_const = tcg_const_i32(TT_PRIV_INSN);
5174 gen_helper_raise_exception(cpu_env, r_const);
5175 tcg_temp_free_i32(r_const);
5182 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
5185 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5188 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
5192 #ifndef TARGET_SPARC64
5198 r_const = tcg_const_i32(TT_NCP_INSN);
5199 gen_helper_raise_exception(cpu_env, r_const);
5200 tcg_temp_free(r_const);
5206 if (dc->n_t32 != 0) {
5208 for (i = dc->n_t32 - 1; i >= 0; --i) {
5209 tcg_temp_free_i32(dc->t32[i]);
5213 if (dc->n_ttl != 0) {
5215 for (i = dc->n_ttl - 1; i >= 0; --i) {
5216 tcg_temp_free(dc->ttl[i]);
5222 void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
5224 SPARCCPU *cpu = sparc_env_get_cpu(env);
5225 CPUState *cs = CPU(cpu);
5226 target_ulong pc_start, last_pc;
5227 DisasContext dc1, *dc = &dc1;
5232 memset(dc, 0, sizeof(DisasContext));
5237 dc->npc = (target_ulong) tb->cs_base;
5238 dc->cc_op = CC_OP_DYNAMIC;
5239 dc->mem_idx = cpu_mmu_index(env, false);
5241 dc->fpu_enabled = tb_fpu_enabled(tb->flags);
5242 dc->address_mask_32bit = tb_am_enabled(tb->flags);
5243 dc->singlestep = (cs->singlestep_enabled || singlestep);
5246 max_insns = tb->cflags & CF_COUNT_MASK;
5247 if (max_insns == 0) {
5248 max_insns = CF_COUNT_MASK;
5250 if (max_insns > TCG_MAX_INSNS) {
5251 max_insns = TCG_MAX_INSNS;
5256 if (dc->npc & JUMP_PC) {
5257 assert(dc->jump_pc[1] == dc->pc + 4);
5258 tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
5260 tcg_gen_insn_start(dc->pc, dc->npc);
5265 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
5266 if (dc->pc != pc_start) {
5269 gen_helper_debug(cpu_env);
5275 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5279 insn = cpu_ldl_code(env, dc->pc);
5281 disas_sparc_insn(dc, insn);
5285 /* if the next PC is different, we abort now */
5286 if (dc->pc != (last_pc + 4))
5288 /* if we reach a page boundary, we stop generation so that the
5289 PC of a TT_TFAULT exception is always in the right page */
5290 if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
5292 /* if single step mode, we generate only one instruction and
5293 generate an exception */
5294 if (dc->singlestep) {
5297 } while (!tcg_op_buf_full() &&
5298 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
5299 num_insns < max_insns);
5302 if (tb->cflags & CF_LAST_IO) {
5306 if (dc->pc != DYNAMIC_PC &&
5307 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
5308 /* static PC and NPC: we can use direct chaining */
5309 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5311 if (dc->pc != DYNAMIC_PC) {
5312 tcg_gen_movi_tl(cpu_pc, dc->pc);
5318 gen_tb_end(tb, num_insns);
5320 tb->size = last_pc + 4 - pc_start;
5321 tb->icount = num_insns;
5324 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5325 qemu_log("--------------\n");
5326 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5327 log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
5333 void gen_intermediate_code_init(CPUSPARCState *env)
5337 static const char * const gregnames[8] = {
5338 NULL, // g0 not used
5347 static const char * const fregnames[32] = {
5348 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5349 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5350 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5351 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5354 /* init various static tables */
5358 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
5359 cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
5360 offsetof(CPUSPARCState, regwptr),
5362 #ifdef TARGET_SPARC64
5363 cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
5365 cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
5367 cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
5369 cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
5371 cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
5372 offsetof(CPUSPARCState, tick_cmpr),
5374 cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
5375 offsetof(CPUSPARCState, stick_cmpr),
5377 cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
5378 offsetof(CPUSPARCState, hstick_cmpr),
5380 cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
5382 cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
5384 cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
5386 cpu_ssr = tcg_global_mem_new(TCG_AREG0,
5387 offsetof(CPUSPARCState, ssr), "ssr");
5388 cpu_ver = tcg_global_mem_new(TCG_AREG0,
5389 offsetof(CPUSPARCState, version), "ver");
5390 cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
5391 offsetof(CPUSPARCState, softint),
5394 cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
5397 cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
5399 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
5401 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
5402 offsetof(CPUSPARCState, cc_src2),
5404 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
5406 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
5408 cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
5410 cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
5412 cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
5414 cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
5416 cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
5417 #ifndef CONFIG_USER_ONLY
5418 cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
5421 for (i = 1; i < 8; i++) {
5422 cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
5423 offsetof(CPUSPARCState, gregs[i]),
5426 for (i = 0; i < TARGET_DPREGS; i++) {
5427 cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
5428 offsetof(CPUSPARCState, fpr[i]),
5434 void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
5437 target_ulong pc = data[0];
5438 target_ulong npc = data[1];
5441 if (npc == DYNAMIC_PC) {
5442 /* dynamic NPC: already stored */
5443 } else if (npc & JUMP_PC) {
5444 /* jump PC: use 'cond' and the jump targets of the translation */
5446 env->npc = npc & ~3;