2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
62 /* Define some temporary registers. T2 is used for constant generation. */
63 #define TCG_REG_T1 TCG_REG_G1
64 #define TCG_REG_T2 TCG_REG_O7
66 #ifdef CONFIG_USE_GUEST_BASE
67 # define TCG_GUEST_BASE_REG TCG_REG_I5
69 # define TCG_GUEST_BASE_REG TCG_REG_G0
72 static const int tcg_target_reg_alloc_order[] = {
102 static const int tcg_target_call_iarg_regs[6] = {
111 static const int tcg_target_call_oarg_regs[] = {
118 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
120 return (val << ((sizeof(tcg_target_long) * 8 - bits))
121 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
124 static inline int check_fit_i32(uint32_t val, unsigned int bits)
126 return ((val << (32 - bits)) >> (32 - bits)) == val;
129 static void patch_reloc(uint8_t *code_ptr, int type,
130 tcg_target_long value, tcg_target_long addend)
135 if (value != (uint32_t)value)
137 *(uint32_t *)code_ptr = value;
139 case R_SPARC_WDISP22:
140 value -= (long)code_ptr;
142 if (!check_fit_tl(value, 22))
144 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
146 case R_SPARC_WDISP19:
147 value -= (long)code_ptr;
149 if (!check_fit_tl(value, 19))
151 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
158 /* parse target specific constraints */
159 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
169 case 'L': /* qemu_ld/st constraint */
170 ct->ct |= TCG_CT_REG;
171 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
173 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
174 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
175 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
178 ct->ct |= TCG_CT_CONST_S11;
181 ct->ct |= TCG_CT_CONST_S13;
184 ct->ct |= TCG_CT_CONST_ZERO;
194 /* test if a constant matches the constraint */
195 static inline int tcg_target_const_match(tcg_target_long val,
196 const TCGArgConstraint *arg_ct)
200 if (ct & TCG_CT_CONST) {
202 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
204 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
206 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
213 #define INSN_OP(x) ((x) << 30)
214 #define INSN_OP2(x) ((x) << 22)
215 #define INSN_OP3(x) ((x) << 19)
216 #define INSN_OPF(x) ((x) << 5)
217 #define INSN_RD(x) ((x) << 25)
218 #define INSN_RS1(x) ((x) << 14)
219 #define INSN_RS2(x) (x)
220 #define INSN_ASI(x) ((x) << 5)
222 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
223 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
224 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
225 #define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
227 #define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
244 #define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
246 #define MOVCC_ICC (1 << 18)
247 #define MOVCC_XCC (1 << 18 | 1 << 12)
249 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
250 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
251 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
252 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
253 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
254 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
255 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
256 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
257 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
258 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
259 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
260 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
261 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
262 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
263 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
264 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
265 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
266 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
267 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
269 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
270 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
271 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
273 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
274 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
275 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
277 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
278 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
279 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
280 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
281 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
282 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
283 #define CALL INSN_OP(1)
284 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
285 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
286 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
287 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
288 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
289 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
290 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
291 #define STB (INSN_OP(3) | INSN_OP3(0x05))
292 #define STH (INSN_OP(3) | INSN_OP3(0x06))
293 #define STW (INSN_OP(3) | INSN_OP3(0x04))
294 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
295 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
296 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
297 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
298 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
299 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
300 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
301 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
302 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
303 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
304 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
305 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
307 #ifndef ASI_PRIMARY_LITTLE
308 #define ASI_PRIMARY_LITTLE 0x88
311 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
312 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
313 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
314 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
315 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
317 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
318 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
319 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
321 static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
324 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
328 static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
329 uint32_t offset, int op)
331 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
335 static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
336 int val2, int val2const, int op)
338 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
339 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
342 static inline void tcg_out_mov(TCGContext *s, TCGType type,
343 TCGReg ret, TCGReg arg)
346 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
350 static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
352 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
355 static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
357 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
360 static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
362 if (check_fit_tl(arg, 13))
363 tcg_out_movi_imm13(s, ret, arg);
365 tcg_out_sethi(s, ret, arg);
367 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
371 static inline void tcg_out_movi(TCGContext *s, TCGType type,
372 TCGReg ret, tcg_target_long arg)
374 /* All 32-bit constants, as well as 64-bit constants with
375 no high bits set go through movi_imm32. */
376 if (TCG_TARGET_REG_BITS == 32
377 || type == TCG_TYPE_I32
378 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
379 tcg_out_movi_imm32(s, ret, arg);
380 } else if (check_fit_tl(arg, 13)) {
381 /* A 13-bit constant sign-extended to 64-bits. */
382 tcg_out_movi_imm13(s, ret, arg);
383 } else if (check_fit_tl(arg, 32)) {
384 /* A 32-bit constant sign-extended to 64-bits. */
385 tcg_out_sethi(s, ret, ~arg);
386 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
388 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
389 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
390 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
391 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
395 static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
398 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
401 static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
404 if (check_fit_tl(offset, 13)) {
405 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
408 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
409 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
413 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
414 TCGReg arg1, tcg_target_long arg2)
416 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
419 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
420 TCGReg arg1, tcg_target_long arg2)
422 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
425 static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
428 if (!check_fit_tl(arg, 10)) {
429 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
431 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
434 static inline void tcg_out_sety(TCGContext *s, int rs)
436 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
439 static inline void tcg_out_rdy(TCGContext *s, int rd)
441 tcg_out32(s, RDY | INSN_RD(rd));
444 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
447 if (check_fit_tl(val, 13))
448 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
450 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
451 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
456 static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
460 if (check_fit_tl(val, 13))
461 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
463 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
464 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
469 static void tcg_out_div32(TCGContext *s, int rd, int rs1,
470 int val2, int val2const, int uns)
472 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
474 tcg_out_sety(s, TCG_REG_G0);
476 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
477 tcg_out_sety(s, TCG_REG_T1);
480 tcg_out_arithc(s, rd, rs1, val2, val2const,
481 uns ? ARITH_UDIV : ARITH_SDIV);
484 static inline void tcg_out_nop(TCGContext *s)
486 tcg_out_sethi(s, TCG_REG_G0, 0);
489 static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
491 TCGLabel *l = &s->labels[label_index];
495 off22 = INSN_OFF22(l->u.value - (unsigned long)s->code_ptr);
497 /* Make sure to preserve destinations during retranslation. */
498 off22 = *(uint32_t *)s->code_ptr & INSN_OFF22(-1);
499 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
501 tcg_out32(s, INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | off22);
504 #if TCG_TARGET_REG_BITS == 64
505 static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
507 TCGLabel *l = &s->labels[label_index];
511 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
513 /* Make sure to preserve destinations during retranslation. */
514 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
515 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
517 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
518 (0x5 << 19) | off19));
522 static const uint8_t tcg_cond_to_bcond[] = {
523 [TCG_COND_EQ] = COND_E,
524 [TCG_COND_NE] = COND_NE,
525 [TCG_COND_LT] = COND_L,
526 [TCG_COND_GE] = COND_GE,
527 [TCG_COND_LE] = COND_LE,
528 [TCG_COND_GT] = COND_G,
529 [TCG_COND_LTU] = COND_CS,
530 [TCG_COND_GEU] = COND_CC,
531 [TCG_COND_LEU] = COND_LEU,
532 [TCG_COND_GTU] = COND_GU,
535 static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
537 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
540 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
541 TCGArg arg1, TCGArg arg2, int const_arg2,
544 tcg_out_cmp(s, arg1, arg2, const_arg2);
545 tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
549 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
550 TCGArg v1, int v1const)
552 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
553 | INSN_RS1(tcg_cond_to_bcond[cond])
554 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
557 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
558 TCGArg c1, TCGArg c2, int c2const,
559 TCGArg v1, int v1const)
561 tcg_out_cmp(s, c1, c2, c2const);
562 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
565 #if TCG_TARGET_REG_BITS == 64
566 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
567 TCGArg arg1, TCGArg arg2, int const_arg2,
570 tcg_out_cmp(s, arg1, arg2, const_arg2);
571 tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
575 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
576 TCGArg c1, TCGArg c2, int c2const,
577 TCGArg v1, int v1const)
579 tcg_out_cmp(s, c1, c2, c2const);
580 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
583 static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
584 TCGArg al, TCGArg ah,
585 TCGArg bl, int blconst,
586 TCGArg bh, int bhconst, int label_dest)
588 int cc, label_next = gen_new_label();
590 tcg_out_cmp(s, ah, bh, bhconst);
592 /* Note that we fill one of the delay slots with the second compare. */
595 tcg_out_branch_i32(s, COND_NE, label_next);
596 tcg_out_cmp(s, al, bl, blconst);
597 tcg_out_branch_i32(s, COND_E, label_dest);
601 tcg_out_branch_i32(s, COND_NE, label_dest);
602 tcg_out_cmp(s, al, bl, blconst);
603 tcg_out_branch_i32(s, COND_NE, label_dest);
607 cc = tcg_cond_to_bcond[tcg_high_cond(cond)];
608 tcg_out_branch_i32(s, cc, label_dest);
610 tcg_out_branch_i32(s, COND_NE, label_next);
611 tcg_out_cmp(s, al, bl, blconst);
612 cc = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
613 tcg_out_branch_i32(s, cc, label_dest);
618 tcg_out_label(s, label_next, s->code_ptr);
622 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
623 TCGArg c1, TCGArg c2, int c2const)
625 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
629 /* The result of the comparison is in the carry bit. */
634 /* For equality, we can transform to inequality vs zero. */
636 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
638 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
639 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
644 /* If we don't need to load a constant into a register, we can
645 swap the operands on GTU/LEU. There's no benefit to loading
646 the constant into a temporary register. */
647 if (!c2const || c2 == 0) {
652 cond = tcg_swap_cond(cond);
658 tcg_out_cmp(s, c1, c2, c2const);
659 tcg_out_movi_imm13(s, ret, 0);
660 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
664 tcg_out_cmp(s, c1, c2, c2const);
665 if (cond == TCG_COND_LTU) {
666 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
668 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
672 #if TCG_TARGET_REG_BITS == 64
673 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
674 TCGArg c1, TCGArg c2, int c2const)
676 tcg_out_cmp(s, c1, c2, c2const);
677 tcg_out_movi_imm13(s, ret, 0);
678 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
681 static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
682 TCGArg al, TCGArg ah,
683 TCGArg bl, int blconst,
684 TCGArg bh, int bhconst)
686 int tmp = TCG_REG_T1;
688 /* Note that the low parts are fully consumed before tmp is set. */
689 if (ret != ah && (bhconst || ret != bh)) {
696 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
697 tcg_out_cmp(s, ah, bh, bhconst);
698 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
699 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
703 /* <= : ah < bh | (ah == bh && al <= bl) */
704 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
705 tcg_out_cmp(s, ah, bh, bhconst);
706 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
707 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
708 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
713 static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
714 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
715 TCGArg bh, int bhconst, int opl, int oph)
717 TCGArg tmp = TCG_REG_T1;
719 /* Note that the low parts are fully consumed before tmp is set. */
720 if (rl != ah && (bhconst || rl != bh)) {
724 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
725 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
726 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
730 /* Generate global QEMU prologue and epilogue code */
731 static void tcg_target_qemu_prologue(TCGContext *s)
733 int tmp_buf_size, frame_size;
735 /* The TCG temp buffer is at the top of the frame, immediately
736 below the frame pointer. */
737 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
738 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
741 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
742 otherwise the minimal frame usable by callees. */
743 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
744 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
745 frame_size += TCG_TARGET_STACK_ALIGN - 1;
746 frame_size &= -TCG_TARGET_STACK_ALIGN;
747 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
748 INSN_IMM13(-frame_size));
750 #ifdef CONFIG_USE_GUEST_BASE
751 if (GUEST_BASE != 0) {
752 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
753 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
757 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
758 INSN_RS2(TCG_REG_G0));
762 /* No epilogue required. We issue ret + restore directly in the TB. */
765 #if defined(CONFIG_SOFTMMU)
767 #include "../../softmmu_defs.h"
769 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
771 static const void * const qemu_ld_helpers[4] = {
778 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
779 uintxx_t val, int mmu_idx) */
780 static const void * const qemu_st_helpers[4] = {
787 /* Perform the TLB load and compare.
790 ADDRLO_IDX contains the index into ARGS of the low part of the
791 address; the high part of the address is at ADDR_LOW_IDX+1.
793 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
795 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
796 This should be offsetof addr_read or addr_write.
798 The result of the TLB comparison is in %[ix]cc. The sanitized address
799 is in the returned register, maybe %o0. The TLB addend is in %o1. */
801 static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
802 int s_bits, const TCGArg *args, int which)
804 const int addrlo = args[addrlo_idx];
805 const int r0 = TCG_REG_O0;
806 const int r1 = TCG_REG_O1;
807 const int r2 = TCG_REG_O2;
811 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
812 /* Assemble the 64-bit address in R0. */
813 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
814 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
815 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
818 /* Shift the page number down to tlb-entry. */
819 tcg_out_arithi(s, r1, addrlo,
820 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
822 /* Mask out the page offset, except for the required alignment. */
823 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
825 /* Compute tlb index, modulo tlb size. */
826 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
828 /* Relative to the current ENV. */
829 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
831 /* Find a base address that can load both tlb comparator and addend. */
832 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
833 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
834 tcg_out_addi(s, r1, tlb_ofs);
838 /* Load the tlb comparator and the addend. */
839 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
840 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
842 /* subcc arg0, arg2, %g0 */
843 tcg_out_cmp(s, r0, r2, 0);
845 /* If the guest address must be zero-extended, do so now. */
846 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
847 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
852 #endif /* CONFIG_SOFTMMU */
854 static const int qemu_ld_opc[8] = {
855 #ifdef TARGET_WORDS_BIGENDIAN
856 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
858 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
862 static const int qemu_st_opc[4] = {
863 #ifdef TARGET_WORDS_BIGENDIAN
866 STB, STH_LE, STW_LE, STX_LE
870 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
872 int addrlo_idx = 1, datalo, datahi, addr_reg;
873 #if defined(CONFIG_SOFTMMU)
874 int memi_idx, memi, s_bits, n;
875 uint32_t *label_ptr[2];
878 datahi = datalo = args[0];
879 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
884 #if defined(CONFIG_SOFTMMU)
885 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
886 memi = args[memi_idx];
889 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
890 offsetof(CPUTLBEntry, addr_read));
892 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
895 /* bne,pn %[xi]cc, label0 */
896 label_ptr[0] = (uint32_t *)s->code_ptr;
897 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
898 | ((TARGET_LONG_BITS == 64) << 21)));
901 /* Load all 64-bits into an O/G register. */
902 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
903 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
905 /* Move the two 32-bit pieces into the destination registers. */
906 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
907 if (reg64 != datalo) {
908 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
912 label_ptr[1] = (uint32_t *)s->code_ptr;
913 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
914 | (1 << 29) | (1 << 19)));
916 /* The fast path is exactly one insn. Thus we can perform the
917 entire TLB Hit in the (annulled) delay slot of the branch
918 over the TLB Miss case. */
920 /* beq,a,pt %[xi]cc, label0 */
922 label_ptr[1] = (uint32_t *)s->code_ptr;
923 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
924 | ((TARGET_LONG_BITS == 64) << 21)
925 | (1 << 29) | (1 << 19)));
927 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
933 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
934 (unsigned long)label_ptr[0]);
937 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
938 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
939 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
940 args[addrlo_idx + 1]);
942 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
945 /* qemu_ld_helper[s_bits](arg0, arg1) */
946 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
947 - (tcg_target_ulong)s->code_ptr) >> 2)
950 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
952 n = tcg_target_call_oarg_regs[0];
953 /* datalo = sign_extend(arg0) */
956 /* Recall that SRA sign extends from bit 31 through bit 63. */
957 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
958 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
961 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
962 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
965 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
968 if (TCG_TARGET_REG_BITS == 32) {
969 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
970 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
979 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
983 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
984 (unsigned long)label_ptr[1]);
986 addr_reg = args[addrlo_idx];
987 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
988 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
989 addr_reg = TCG_REG_T1;
991 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
992 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
994 tcg_out_ldst_rr(s, reg64, addr_reg,
995 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
996 qemu_ld_opc[sizeop]);
998 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
999 if (reg64 != datalo) {
1000 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1003 tcg_out_ldst_rr(s, datalo, addr_reg,
1004 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1005 qemu_ld_opc[sizeop]);
1007 #endif /* CONFIG_SOFTMMU */
1010 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
1012 int addrlo_idx = 1, datalo, datahi, addr_reg;
1013 #if defined(CONFIG_SOFTMMU)
1014 int memi_idx, memi, n, datafull;
1015 uint32_t *label_ptr;
1018 datahi = datalo = args[0];
1019 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1024 #if defined(CONFIG_SOFTMMU)
1025 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1026 memi = args[memi_idx];
1028 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1029 offsetof(CPUTLBEntry, addr_write));
1032 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1033 /* Reconstruct the full 64-bit value. */
1034 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1035 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1036 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1037 datafull = TCG_REG_O2;
1040 /* The fast path is exactly one insn. Thus we can perform the entire
1041 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1042 /* beq,a,pt %[xi]cc, label0 */
1043 label_ptr = (uint32_t *)s->code_ptr;
1044 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
1045 | ((TARGET_LONG_BITS == 64) << 21)
1046 | (1 << 29) | (1 << 19)));
1048 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1053 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1054 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1055 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1056 args[addrlo_idx + 1]);
1058 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1060 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1061 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1063 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1065 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1066 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1067 - (tcg_target_ulong)s->code_ptr) >> 2)
1070 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1072 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1073 (unsigned long)label_ptr);
1075 addr_reg = args[addrlo_idx];
1076 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1077 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1078 addr_reg = TCG_REG_T1;
1080 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1081 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1082 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1083 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1084 datalo = TCG_REG_O2;
1086 tcg_out_ldst_rr(s, datalo, addr_reg,
1087 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1088 qemu_st_opc[sizeop]);
1089 #endif /* CONFIG_SOFTMMU */
1092 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1093 const int *const_args)
1098 case INDEX_op_exit_tb:
1099 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1100 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1102 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1103 INSN_RS2(TCG_REG_G0));
1105 case INDEX_op_goto_tb:
1106 if (s->tb_jmp_offset) {
1107 /* direct jump method */
1108 uint32_t old_insn = *(uint32_t *)s->code_ptr;
1109 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1110 /* Make sure to preserve links during retranslation. */
1111 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
1113 /* indirect jump method */
1114 tcg_out_ld_ptr(s, TCG_REG_T1,
1115 (tcg_target_long)(s->tb_next + args[0]));
1116 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1117 INSN_RS2(TCG_REG_G0));
1120 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1123 if (const_args[0]) {
1124 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1125 - (tcg_target_ulong)s->code_ptr) >> 2)
1128 tcg_out_ld_ptr(s, TCG_REG_T1,
1129 (tcg_target_long)(s->tb_next + args[0]));
1130 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
1131 INSN_RS2(TCG_REG_G0));
1137 tcg_out_branch_i32(s, COND_A, args[0]);
1140 case INDEX_op_movi_i32:
1141 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1144 #if TCG_TARGET_REG_BITS == 64
1145 #define OP_32_64(x) \
1146 glue(glue(case INDEX_op_, x), _i32): \
1147 glue(glue(case INDEX_op_, x), _i64)
1149 #define OP_32_64(x) \
1150 glue(glue(case INDEX_op_, x), _i32)
1153 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1156 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1159 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1162 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1164 case INDEX_op_ld_i32:
1165 #if TCG_TARGET_REG_BITS == 64
1166 case INDEX_op_ld32u_i64:
1168 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1171 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1174 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1176 case INDEX_op_st_i32:
1177 #if TCG_TARGET_REG_BITS == 64
1178 case INDEX_op_st32_i64:
1180 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1203 case INDEX_op_shl_i32:
1206 /* Limit immediate shift count lest we create an illegal insn. */
1207 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1209 case INDEX_op_shr_i32:
1212 case INDEX_op_sar_i32:
1215 case INDEX_op_mul_i32:
1226 case INDEX_op_div_i32:
1227 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1229 case INDEX_op_divu_i32:
1230 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1233 case INDEX_op_rem_i32:
1234 case INDEX_op_remu_i32:
1235 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
1236 opc == INDEX_op_remu_i32);
1237 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1239 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1242 case INDEX_op_brcond_i32:
1243 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1246 case INDEX_op_setcond_i32:
1247 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1248 args[2], const_args[2]);
1250 case INDEX_op_movcond_i32:
1251 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1252 args[2], const_args[2], args[3], const_args[3]);
1255 #if TCG_TARGET_REG_BITS == 32
1256 case INDEX_op_brcond2_i32:
1257 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1258 args[2], const_args[2],
1259 args[3], const_args[3], args[5]);
1261 case INDEX_op_setcond2_i32:
1262 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1263 args[3], const_args[3],
1264 args[4], const_args[4]);
1266 case INDEX_op_add2_i32:
1267 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1268 args[4], const_args[4], args[5], const_args[5],
1269 ARITH_ADDCC, ARITH_ADDX);
1271 case INDEX_op_sub2_i32:
1272 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1273 args[4], const_args[4], args[5], const_args[5],
1274 ARITH_SUBCC, ARITH_SUBX);
1276 case INDEX_op_mulu2_i32:
1277 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1279 tcg_out_rdy(s, args[1]);
1283 case INDEX_op_qemu_ld8u:
1284 tcg_out_qemu_ld(s, args, 0);
1286 case INDEX_op_qemu_ld8s:
1287 tcg_out_qemu_ld(s, args, 0 | 4);
1289 case INDEX_op_qemu_ld16u:
1290 tcg_out_qemu_ld(s, args, 1);
1292 case INDEX_op_qemu_ld16s:
1293 tcg_out_qemu_ld(s, args, 1 | 4);
1295 case INDEX_op_qemu_ld32:
1296 #if TCG_TARGET_REG_BITS == 64
1297 case INDEX_op_qemu_ld32u:
1299 tcg_out_qemu_ld(s, args, 2);
1301 #if TCG_TARGET_REG_BITS == 64
1302 case INDEX_op_qemu_ld32s:
1303 tcg_out_qemu_ld(s, args, 2 | 4);
1306 case INDEX_op_qemu_ld64:
1307 tcg_out_qemu_ld(s, args, 3);
1309 case INDEX_op_qemu_st8:
1310 tcg_out_qemu_st(s, args, 0);
1312 case INDEX_op_qemu_st16:
1313 tcg_out_qemu_st(s, args, 1);
1315 case INDEX_op_qemu_st32:
1316 tcg_out_qemu_st(s, args, 2);
1318 case INDEX_op_qemu_st64:
1319 tcg_out_qemu_st(s, args, 3);
1322 #if TCG_TARGET_REG_BITS == 64
1323 case INDEX_op_movi_i64:
1324 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1326 case INDEX_op_ld32s_i64:
1327 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1329 case INDEX_op_ld_i64:
1330 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1332 case INDEX_op_st_i64:
1333 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1335 case INDEX_op_shl_i64:
1338 /* Limit immediate shift count lest we create an illegal insn. */
1339 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1341 case INDEX_op_shr_i64:
1344 case INDEX_op_sar_i64:
1347 case INDEX_op_mul_i64:
1350 case INDEX_op_div_i64:
1353 case INDEX_op_divu_i64:
1356 case INDEX_op_rem_i64:
1357 case INDEX_op_remu_i64:
1358 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
1359 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1360 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1362 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1364 case INDEX_op_ext32s_i64:
1365 if (const_args[1]) {
1366 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1368 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1371 case INDEX_op_ext32u_i64:
1372 if (const_args[1]) {
1373 tcg_out_movi_imm32(s, args[0], args[1]);
1375 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1379 case INDEX_op_brcond_i64:
1380 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1383 case INDEX_op_setcond_i64:
1384 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1385 args[2], const_args[2]);
1387 case INDEX_op_movcond_i64:
1388 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1389 args[2], const_args[2], args[3], const_args[3]);
1393 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1397 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1401 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1406 static const TCGTargetOpDef sparc_op_defs[] = {
1407 { INDEX_op_exit_tb, { } },
1408 { INDEX_op_goto_tb, { } },
1409 { INDEX_op_call, { "ri" } },
1410 { INDEX_op_br, { } },
1412 { INDEX_op_mov_i32, { "r", "r" } },
1413 { INDEX_op_movi_i32, { "r" } },
1414 { INDEX_op_ld8u_i32, { "r", "r" } },
1415 { INDEX_op_ld8s_i32, { "r", "r" } },
1416 { INDEX_op_ld16u_i32, { "r", "r" } },
1417 { INDEX_op_ld16s_i32, { "r", "r" } },
1418 { INDEX_op_ld_i32, { "r", "r" } },
1419 { INDEX_op_st8_i32, { "rZ", "r" } },
1420 { INDEX_op_st16_i32, { "rZ", "r" } },
1421 { INDEX_op_st_i32, { "rZ", "r" } },
1423 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1424 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1425 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1426 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1427 { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1428 { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1429 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1430 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1431 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1432 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1433 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1434 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1436 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1437 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1438 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1440 { INDEX_op_neg_i32, { "r", "rJ" } },
1441 { INDEX_op_not_i32, { "r", "rJ" } },
1443 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1444 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1445 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1447 #if TCG_TARGET_REG_BITS == 32
1448 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1449 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
1450 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1451 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1452 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1455 #if TCG_TARGET_REG_BITS == 64
1456 { INDEX_op_mov_i64, { "r", "r" } },
1457 { INDEX_op_movi_i64, { "r" } },
1458 { INDEX_op_ld8u_i64, { "r", "r" } },
1459 { INDEX_op_ld8s_i64, { "r", "r" } },
1460 { INDEX_op_ld16u_i64, { "r", "r" } },
1461 { INDEX_op_ld16s_i64, { "r", "r" } },
1462 { INDEX_op_ld32u_i64, { "r", "r" } },
1463 { INDEX_op_ld32s_i64, { "r", "r" } },
1464 { INDEX_op_ld_i64, { "r", "r" } },
1465 { INDEX_op_st8_i64, { "rZ", "r" } },
1466 { INDEX_op_st16_i64, { "rZ", "r" } },
1467 { INDEX_op_st32_i64, { "rZ", "r" } },
1468 { INDEX_op_st_i64, { "rZ", "r" } },
1470 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1471 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1472 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1473 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1474 { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1475 { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1476 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1477 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1478 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1479 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1480 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1481 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1483 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1484 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1485 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
1487 { INDEX_op_neg_i64, { "r", "rJ" } },
1488 { INDEX_op_not_i64, { "r", "rJ" } },
1490 { INDEX_op_ext32s_i64, { "r", "ri" } },
1491 { INDEX_op_ext32u_i64, { "r", "ri" } },
1493 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1494 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1495 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
1498 #if TCG_TARGET_REG_BITS == 64
1499 { INDEX_op_qemu_ld8u, { "r", "L" } },
1500 { INDEX_op_qemu_ld8s, { "r", "L" } },
1501 { INDEX_op_qemu_ld16u, { "r", "L" } },
1502 { INDEX_op_qemu_ld16s, { "r", "L" } },
1503 { INDEX_op_qemu_ld32, { "r", "L" } },
1504 { INDEX_op_qemu_ld32u, { "r", "L" } },
1505 { INDEX_op_qemu_ld32s, { "r", "L" } },
1506 { INDEX_op_qemu_ld64, { "r", "L" } },
1508 { INDEX_op_qemu_st8, { "L", "L" } },
1509 { INDEX_op_qemu_st16, { "L", "L" } },
1510 { INDEX_op_qemu_st32, { "L", "L" } },
1511 { INDEX_op_qemu_st64, { "L", "L" } },
1512 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1513 { INDEX_op_qemu_ld8u, { "r", "L" } },
1514 { INDEX_op_qemu_ld8s, { "r", "L" } },
1515 { INDEX_op_qemu_ld16u, { "r", "L" } },
1516 { INDEX_op_qemu_ld16s, { "r", "L" } },
1517 { INDEX_op_qemu_ld32, { "r", "L" } },
1518 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1520 { INDEX_op_qemu_st8, { "L", "L" } },
1521 { INDEX_op_qemu_st16, { "L", "L" } },
1522 { INDEX_op_qemu_st32, { "L", "L" } },
1523 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1525 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1526 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1527 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1528 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1529 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1530 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1532 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1533 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1534 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1535 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1541 static void tcg_target_init(TCGContext *s)
1543 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1544 #if TCG_TARGET_REG_BITS == 64
1545 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1547 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1563 tcg_regset_clear(s->reserved_regs);
1564 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1565 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1566 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1567 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1568 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1569 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1570 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1571 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1573 tcg_add_target_add_op_defs(sparc_op_defs);
1576 #if TCG_TARGET_REG_BITS == 64
1577 # define ELF_HOST_MACHINE EM_SPARCV9
1579 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1580 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1584 uint32_t len __attribute__((aligned((sizeof(void *)))));
1587 char augmentation[1];
1590 uint8_t return_column;
1594 uint32_t len __attribute__((aligned((sizeof(void *)))));
1595 uint32_t cie_offset;
1596 tcg_target_long func_start __attribute__((packed));
1597 tcg_target_long func_len __attribute__((packed));
1598 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1600 uint8_t ret_save[3];
1608 static DebugFrame debug_frame = {
1609 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1612 .cie.code_align = 1,
1613 .cie.data_align = -sizeof(void *) & 0x7f,
1614 .cie.return_column = 15, /* o7 */
1616 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1618 #if TCG_TARGET_REG_BITS == 64
1619 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1620 (2047 & 0x7f) | 0x80, (2047 >> 7)
1622 13, 30 /* DW_CFA_def_cfa_register i6 */
1625 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1626 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1629 void tcg_register_jit(void *buf, size_t buf_size)
1631 debug_frame.fde.func_start = (tcg_target_long) buf;
1632 debug_frame.fde.func_len = buf_size;
1634 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1637 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1639 uint32_t *ptr = (uint32_t *)jmp_addr;
1640 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1642 /* We can reach the entire address space for 32-bit. For 64-bit
1643 the code_gen_buffer can't be larger than 2GB. */
1644 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1648 *ptr = CALL | (disp & 0x3fffffff);
1649 flush_icache_range(jmp_addr, jmp_addr + 4);