]> Git Repo - qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Use Z constraint for %g0
[qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
d4a9eb1f 25#ifndef NDEBUG
8289b279
BS
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
59};
d4a9eb1f 60#endif
8289b279 61
375816f8
RH
62/* Define some temporary registers. T2 is used for constant generation. */
63#define TCG_REG_T1 TCG_REG_G1
64#define TCG_REG_T2 TCG_REG_O7
65
c6f7e4fb 66#ifdef CONFIG_USE_GUEST_BASE
375816f8 67# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
68#else
69# define TCG_GUEST_BASE_REG TCG_REG_G0
70#endif
e141ab52 71
0954d0d9 72static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
73 TCG_REG_L0,
74 TCG_REG_L1,
75 TCG_REG_L2,
76 TCG_REG_L3,
77 TCG_REG_L4,
78 TCG_REG_L5,
79 TCG_REG_L6,
80 TCG_REG_L7,
26adfb75 81
8289b279
BS
82 TCG_REG_I0,
83 TCG_REG_I1,
84 TCG_REG_I2,
85 TCG_REG_I3,
86 TCG_REG_I4,
375816f8 87 TCG_REG_I5,
26adfb75
RH
88
89 TCG_REG_G2,
90 TCG_REG_G3,
91 TCG_REG_G4,
92 TCG_REG_G5,
93
94 TCG_REG_O0,
95 TCG_REG_O1,
96 TCG_REG_O2,
97 TCG_REG_O3,
98 TCG_REG_O4,
99 TCG_REG_O5,
8289b279
BS
100};
101
102static const int tcg_target_call_iarg_regs[6] = {
103 TCG_REG_O0,
104 TCG_REG_O1,
105 TCG_REG_O2,
106 TCG_REG_O3,
107 TCG_REG_O4,
108 TCG_REG_O5,
109};
110
26a74ae3 111static const int tcg_target_call_oarg_regs[] = {
8289b279 112 TCG_REG_O0,
e141ab52
BS
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
8289b279
BS
116};
117
57e49b40 118static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
f5ef6aac 119{
57e49b40
BS
120 return (val << ((sizeof(tcg_target_long) * 8 - bits))
121 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
122}
123
124static inline int check_fit_i32(uint32_t val, unsigned int bits)
125{
126 return ((val << (32 - bits)) >> (32 - bits)) == val;
f5ef6aac
BS
127}
128
8289b279 129static void patch_reloc(uint8_t *code_ptr, int type,
f54b3f92 130 tcg_target_long value, tcg_target_long addend)
8289b279 131{
f54b3f92 132 value += addend;
8289b279
BS
133 switch (type) {
134 case R_SPARC_32:
135 if (value != (uint32_t)value)
136 tcg_abort();
137 *(uint32_t *)code_ptr = value;
138 break;
f5ef6aac
BS
139 case R_SPARC_WDISP22:
140 value -= (long)code_ptr;
141 value >>= 2;
57e49b40 142 if (!check_fit_tl(value, 22))
f5ef6aac
BS
143 tcg_abort();
144 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
145 break;
1da92db2
BS
146 case R_SPARC_WDISP19:
147 value -= (long)code_ptr;
148 value >>= 2;
149 if (!check_fit_tl(value, 19))
150 tcg_abort();
151 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
152 break;
8289b279
BS
153 default:
154 tcg_abort();
155 }
156}
157
8289b279
BS
158/* parse target specific constraints */
159static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
160{
161 const char *ct_str;
162
163 ct_str = *pct_str;
164 switch (ct_str[0]) {
165 case 'r':
5e143c43
RH
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
168 break;
8289b279
BS
169 case 'L': /* qemu_ld/st constraint */
170 ct->ct |= TCG_CT_REG;
171 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
53c37487
BS
172 // Helper args
173 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
174 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
175 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
8289b279
BS
176 break;
177 case 'I':
178 ct->ct |= TCG_CT_CONST_S11;
179 break;
180 case 'J':
181 ct->ct |= TCG_CT_CONST_S13;
182 break;
89269f6c
RH
183 case 'Z':
184 ct->ct |= TCG_CT_CONST_ZERO;
185 break;
8289b279
BS
186 default:
187 return -1;
188 }
189 ct_str++;
190 *pct_str = ct_str;
191 return 0;
192}
193
8289b279
BS
194/* test if a constant matches the constraint */
195static inline int tcg_target_const_match(tcg_target_long val,
196 const TCGArgConstraint *arg_ct)
197{
89269f6c 198 int ct = arg_ct->ct;
8289b279 199
89269f6c
RH
200 if (ct & TCG_CT_CONST) {
201 return 1;
202 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
8289b279 203 return 1;
89269f6c 204 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
8289b279 205 return 1;
89269f6c 206 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
8289b279 207 return 1;
89269f6c 208 } else {
8289b279 209 return 0;
89269f6c 210 }
8289b279
BS
211}
212
213#define INSN_OP(x) ((x) << 30)
214#define INSN_OP2(x) ((x) << 22)
215#define INSN_OP3(x) ((x) << 19)
216#define INSN_OPF(x) ((x) << 5)
217#define INSN_RD(x) ((x) << 25)
218#define INSN_RS1(x) ((x) << 14)
219#define INSN_RS2(x) (x)
8384dd67 220#define INSN_ASI(x) ((x) << 5)
8289b279 221
dbfe80e1 222#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 223#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
1da92db2 224#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
b3db8758 225#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
8289b279 226
b3db8758 227#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
cf7c2ca5
BS
228#define COND_N 0x0
229#define COND_E 0x1
230#define COND_LE 0x2
231#define COND_L 0x3
232#define COND_LEU 0x4
233#define COND_CS 0x5
234#define COND_NEG 0x6
235#define COND_VS 0x7
b3db8758 236#define COND_A 0x8
cf7c2ca5
BS
237#define COND_NE 0x9
238#define COND_G 0xa
239#define COND_GE 0xb
240#define COND_GU 0xc
241#define COND_CC 0xd
242#define COND_POS 0xe
243#define COND_VC 0xf
b3db8758 244#define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
8289b279 245
dbfe80e1
RH
246#define MOVCC_ICC (1 << 18)
247#define MOVCC_XCC (1 << 18 | 1 << 12)
248
8289b279 249#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 250#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 251#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 252#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 253#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 254#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 255#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 256#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
257#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
258#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 259#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
260#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
261#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
262#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
263#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
264#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
265#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
266#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 267#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
8289b279
BS
268
269#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
270#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
271#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
272
273#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
274#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
275#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
276
7a3766f3 277#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 278#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
279#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
280#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
281#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
282#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
283#define CALL INSN_OP(1)
284#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
285#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
286#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
287#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
288#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
289#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
290#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
291#define STB (INSN_OP(3) | INSN_OP3(0x05))
292#define STH (INSN_OP(3) | INSN_OP3(0x06))
293#define STW (INSN_OP(3) | INSN_OP3(0x04))
294#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
295#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
296#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
297#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
298#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
299#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
300#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
301#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
302#define STBA (INSN_OP(3) | INSN_OP3(0x15))
303#define STHA (INSN_OP(3) | INSN_OP3(0x16))
304#define STWA (INSN_OP(3) | INSN_OP3(0x14))
305#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
306
307#ifndef ASI_PRIMARY_LITTLE
308#define ASI_PRIMARY_LITTLE 0x88
309#endif
8289b279 310
a0ce341a
RH
311#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
312#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
313#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
314#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
315#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
316
317#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
318#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
319#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
320
26cc915c
BS
321static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
322 int op)
323{
324 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
325 INSN_RS2(rs2));
326}
327
6f41b777
BS
328static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
329 uint32_t offset, int op)
26cc915c
BS
330{
331 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
332 INSN_IMM13(offset));
333}
334
ba225198
RH
335static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
336 int val2, int val2const, int op)
337{
338 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
339 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
340}
341
2a534aff
RH
342static inline void tcg_out_mov(TCGContext *s, TCGType type,
343 TCGReg ret, TCGReg arg)
8289b279 344{
dda73c78
RH
345 if (ret != arg) {
346 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
347 }
26cc915c
BS
348}
349
350static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
351{
352 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
353}
354
b101234a
BS
355static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
356{
357 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
358}
359
360static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
8289b279 361{
4a09aa89 362 if (check_fit_tl(arg, 13))
b101234a 363 tcg_out_movi_imm13(s, ret, arg);
8289b279 364 else {
26cc915c 365 tcg_out_sethi(s, ret, arg);
8289b279 366 if (arg & 0x3ff)
b101234a 367 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
8289b279
BS
368 }
369}
370
b101234a 371static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 372 TCGReg ret, tcg_target_long arg)
b101234a 373{
43172207
RH
374 /* All 32-bit constants, as well as 64-bit constants with
375 no high bits set go through movi_imm32. */
376 if (TCG_TARGET_REG_BITS == 32
377 || type == TCG_TYPE_I32
378 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
379 tcg_out_movi_imm32(s, ret, arg);
380 } else if (check_fit_tl(arg, 13)) {
381 /* A 13-bit constant sign-extended to 64-bits. */
382 tcg_out_movi_imm13(s, ret, arg);
383 } else if (check_fit_tl(arg, 32)) {
384 /* A 32-bit constant sign-extended to 64-bits. */
385 tcg_out_sethi(s, ret, ~arg);
386 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
387 } else {
375816f8
RH
388 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
389 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
390 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
391 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 392 }
b101234a
BS
393}
394
a0ce341a
RH
395static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
396 int a2, int op)
8289b279 397{
a0ce341a 398 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
399}
400
a0ce341a
RH
401static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
402 int offset, int op)
8289b279 403{
a0ce341a 404 if (check_fit_tl(offset, 13)) {
8289b279
BS
405 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
406 INSN_IMM13(offset));
a0ce341a 407 } else {
375816f8
RH
408 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
409 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 410 }
8289b279
BS
411}
412
2a534aff
RH
413static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
414 TCGReg arg1, tcg_target_long arg2)
8289b279 415{
a0ce341a 416 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
417}
418
2a534aff
RH
419static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
420 TCGReg arg1, tcg_target_long arg2)
8289b279 421{
a0ce341a
RH
422 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
423}
424
425static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
426 tcg_target_long arg)
427{
428 if (!check_fit_tl(arg, 10)) {
429 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
430 }
431 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
432}
433
583d1215 434static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 435{
583d1215 436 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
437}
438
7a3766f3
RH
439static inline void tcg_out_rdy(TCGContext *s, int rd)
440{
441 tcg_out32(s, RDY | INSN_RD(rd));
442}
443
8289b279
BS
444static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
445{
446 if (val != 0) {
57e49b40 447 if (check_fit_tl(val, 13))
8289b279 448 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 449 else {
375816f8
RH
450 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
451 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 452 }
8289b279
BS
453 }
454}
455
a0ce341a
RH
456static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
457 tcg_target_long val)
53c37487
BS
458{
459 if (val != 0) {
460 if (check_fit_tl(val, 13))
a0ce341a 461 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
53c37487 462 else {
375816f8
RH
463 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
464 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
53c37487
BS
465 }
466 }
467}
468
583d1215
RH
469static void tcg_out_div32(TCGContext *s, int rd, int rs1,
470 int val2, int val2const, int uns)
471{
472 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
473 if (uns) {
474 tcg_out_sety(s, TCG_REG_G0);
475 } else {
375816f8
RH
476 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
477 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
478 }
479
480 tcg_out_arithc(s, rd, rs1, val2, val2const,
481 uns ? ARITH_UDIV : ARITH_SDIV);
482}
483
8289b279
BS
484static inline void tcg_out_nop(TCGContext *s)
485{
26cc915c 486 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
487}
488
1da92db2 489static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
cf7c2ca5 490{
cf7c2ca5 491 TCGLabel *l = &s->labels[label_index];
f4bf0b91 492 uint32_t off22;
cf7c2ca5
BS
493
494 if (l->has_value) {
f4bf0b91 495 off22 = INSN_OFF22(l->u.value - (unsigned long)s->code_ptr);
f5ef6aac 496 } else {
f4bf0b91
RH
497 /* Make sure to preserve destinations during retranslation. */
498 off22 = *(uint32_t *)s->code_ptr & INSN_OFF22(-1);
f5ef6aac 499 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
f5ef6aac 500 }
f4bf0b91 501 tcg_out32(s, INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | off22);
cf7c2ca5
BS
502}
503
a212ea75 504#if TCG_TARGET_REG_BITS == 64
1da92db2
BS
505static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
506{
1da92db2 507 TCGLabel *l = &s->labels[label_index];
f4bf0b91 508 uint32_t off19;
1da92db2
BS
509
510 if (l->has_value) {
f4bf0b91 511 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
1da92db2 512 } else {
f4bf0b91
RH
513 /* Make sure to preserve destinations during retranslation. */
514 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
1da92db2 515 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
1da92db2 516 }
f4bf0b91
RH
517 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
518 (0x5 << 19) | off19));
1da92db2
BS
519}
520#endif
521
0aed257f 522static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
523 [TCG_COND_EQ] = COND_E,
524 [TCG_COND_NE] = COND_NE,
525 [TCG_COND_LT] = COND_L,
526 [TCG_COND_GE] = COND_GE,
527 [TCG_COND_LE] = COND_LE,
528 [TCG_COND_GT] = COND_G,
529 [TCG_COND_LTU] = COND_CS,
530 [TCG_COND_GEU] = COND_CC,
531 [TCG_COND_LEU] = COND_LEU,
532 [TCG_COND_GTU] = COND_GU,
533};
534
56f4927e
RH
535static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
536{
ba225198 537 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
538}
539
8a56e840 540static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
1da92db2
BS
541 TCGArg arg1, TCGArg arg2, int const_arg2,
542 int label_index)
cf7c2ca5 543{
56f4927e 544 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2 545 tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
cf7c2ca5
BS
546 tcg_out_nop(s);
547}
548
ded37f0d
RH
549static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
550 TCGArg v1, int v1const)
551{
552 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
553 | INSN_RS1(tcg_cond_to_bcond[cond])
554 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
555}
556
557static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
558 TCGArg c1, TCGArg c2, int c2const,
559 TCGArg v1, int v1const)
560{
561 tcg_out_cmp(s, c1, c2, c2const);
562 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
563}
564
a212ea75 565#if TCG_TARGET_REG_BITS == 64
8a56e840 566static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
1da92db2
BS
567 TCGArg arg1, TCGArg arg2, int const_arg2,
568 int label_index)
569{
56f4927e 570 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2
BS
571 tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
572 tcg_out_nop(s);
573}
ded37f0d
RH
574
575static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
576 TCGArg c1, TCGArg c2, int c2const,
577 TCGArg v1, int v1const)
578{
579 tcg_out_cmp(s, c1, c2, c2const);
580 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
581}
56f4927e 582#else
8a56e840 583static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
584 TCGArg al, TCGArg ah,
585 TCGArg bl, int blconst,
586 TCGArg bh, int bhconst, int label_dest)
587{
588 int cc, label_next = gen_new_label();
589
590 tcg_out_cmp(s, ah, bh, bhconst);
591
592 /* Note that we fill one of the delay slots with the second compare. */
593 switch (cond) {
594 case TCG_COND_EQ:
24c7f754 595 tcg_out_branch_i32(s, COND_NE, label_next);
56f4927e 596 tcg_out_cmp(s, al, bl, blconst);
24c7f754 597 tcg_out_branch_i32(s, COND_E, label_dest);
56f4927e
RH
598 break;
599
600 case TCG_COND_NE:
24c7f754 601 tcg_out_branch_i32(s, COND_NE, label_dest);
56f4927e 602 tcg_out_cmp(s, al, bl, blconst);
24c7f754 603 tcg_out_branch_i32(s, COND_NE, label_dest);
56f4927e
RH
604 break;
605
606 default:
24c7f754 607 cc = tcg_cond_to_bcond[tcg_high_cond(cond)];
56f4927e
RH
608 tcg_out_branch_i32(s, cc, label_dest);
609 tcg_out_nop(s);
24c7f754 610 tcg_out_branch_i32(s, COND_NE, label_next);
56f4927e 611 tcg_out_cmp(s, al, bl, blconst);
24c7f754 612 cc = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
56f4927e
RH
613 tcg_out_branch_i32(s, cc, label_dest);
614 break;
615 }
616 tcg_out_nop(s);
617
9d6fca70 618 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 619}
1da92db2
BS
620#endif
621
8a56e840 622static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
623 TCGArg c1, TCGArg c2, int c2const)
624{
dbfe80e1
RH
625 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
626 switch (cond) {
7d458a75
RH
627 case TCG_COND_LTU:
628 case TCG_COND_GEU:
629 /* The result of the comparison is in the carry bit. */
630 break;
631
dbfe80e1
RH
632 case TCG_COND_EQ:
633 case TCG_COND_NE:
7d458a75 634 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
635 if (c2 != 0) {
636 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
637 }
638 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 639 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
640 break;
641
642 case TCG_COND_GTU:
dbfe80e1 643 case TCG_COND_LEU:
7d458a75
RH
644 /* If we don't need to load a constant into a register, we can
645 swap the operands on GTU/LEU. There's no benefit to loading
646 the constant into a temporary register. */
647 if (!c2const || c2 == 0) {
648 TCGArg t = c1;
649 c1 = c2;
650 c2 = t;
651 c2const = 0;
652 cond = tcg_swap_cond(cond);
653 break;
654 }
655 /* FALLTHRU */
dbfe80e1
RH
656
657 default:
658 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 659 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 660 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
661 return;
662 }
663
664 tcg_out_cmp(s, c1, c2, c2const);
665 if (cond == TCG_COND_LTU) {
666 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
667 } else {
668 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
669 }
670}
671
672#if TCG_TARGET_REG_BITS == 64
8a56e840 673static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
674 TCGArg c1, TCGArg c2, int c2const)
675{
676 tcg_out_cmp(s, c1, c2, c2const);
677 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 678 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
dbfe80e1
RH
679}
680#else
8a56e840 681static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
682 TCGArg al, TCGArg ah,
683 TCGArg bl, int blconst,
684 TCGArg bh, int bhconst)
685{
dda73c78
RH
686 int tmp = TCG_REG_T1;
687
688 /* Note that the low parts are fully consumed before tmp is set. */
689 if (ret != ah && (bhconst || ret != bh)) {
690 tmp = ret;
691 }
dbfe80e1
RH
692
693 switch (cond) {
694 case TCG_COND_EQ:
dbfe80e1 695 case TCG_COND_NE:
dda73c78
RH
696 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
697 tcg_out_cmp(s, ah, bh, bhconst);
698 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
699 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
dbfe80e1
RH
700 break;
701
702 default:
dda73c78
RH
703 /* <= : ah < bh | (ah == bh && al <= bl) */
704 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
dbfe80e1 705 tcg_out_cmp(s, ah, bh, bhconst);
dda73c78
RH
706 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
707 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
708 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
709 break;
710 }
711}
4ec28e25
RH
712
713static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
714 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
715 TCGArg bh, int bhconst, int opl, int oph)
716{
717 TCGArg tmp = TCG_REG_T1;
718
719 /* Note that the low parts are fully consumed before tmp is set. */
720 if (rl != ah && (bhconst || rl != bh)) {
721 tmp = rl;
722 }
723
724 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
725 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
726 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
727}
dbfe80e1
RH
728#endif
729
7d551702 730/* Generate global QEMU prologue and epilogue code */
e4d58b41 731static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 732{
4c3204cb
RH
733 int tmp_buf_size, frame_size;
734
735 /* The TCG temp buffer is at the top of the frame, immediately
736 below the frame pointer. */
737 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
738 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
739 tmp_buf_size);
740
741 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
742 otherwise the minimal frame usable by callees. */
743 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
744 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
745 frame_size += TCG_TARGET_STACK_ALIGN - 1;
746 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 747 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 748 INSN_IMM13(-frame_size));
c6f7e4fb
RH
749
750#ifdef CONFIG_USE_GUEST_BASE
751 if (GUEST_BASE != 0) {
752 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
753 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
754 }
755#endif
756
cea5f9a2 757 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
7d551702 758 INSN_RS2(TCG_REG_G0));
0c554161
RH
759 /* delay slot */
760 tcg_out_nop(s);
4c3204cb
RH
761
762 /* No epilogue required. We issue ret + restore directly in the TB. */
b3db8758
BS
763}
764
f5ef6aac 765#if defined(CONFIG_SOFTMMU)
f5ef6aac 766
79383c9c 767#include "../../softmmu_defs.h"
f5ef6aac 768
e141ab52
BS
769/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
770 int mmu_idx) */
771static const void * const qemu_ld_helpers[4] = {
772 helper_ldb_mmu,
773 helper_ldw_mmu,
774 helper_ldl_mmu,
775 helper_ldq_mmu,
776};
777
778/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
779 uintxx_t val, int mmu_idx) */
780static const void * const qemu_st_helpers[4] = {
781 helper_stb_mmu,
782 helper_stw_mmu,
783 helper_stl_mmu,
784 helper_stq_mmu,
785};
f5ef6aac 786
a0ce341a 787/* Perform the TLB load and compare.
bffe1431 788
a0ce341a
RH
789 Inputs:
790 ADDRLO_IDX contains the index into ARGS of the low part of the
791 address; the high part of the address is at ADDR_LOW_IDX+1.
792
793 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
794
795 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
796 This should be offsetof addr_read or addr_write.
797
798 The result of the TLB comparison is in %[ix]cc. The sanitized address
799 is in the returned register, maybe %o0. The TLB addend is in %o1. */
800
801static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
802 int s_bits, const TCGArg *args, int which)
803{
804 const int addrlo = args[addrlo_idx];
805 const int r0 = TCG_REG_O0;
806 const int r1 = TCG_REG_O1;
807 const int r2 = TCG_REG_O2;
808 int addr = addrlo;
809 int tlb_ofs;
810
811 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
812 /* Assemble the 64-bit address in R0. */
813 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
814 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
815 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
816 }
817
818 /* Shift the page number down to tlb-entry. */
819 tcg_out_arithi(s, r1, addrlo,
820 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
821
822 /* Mask out the page offset, except for the required alignment. */
823 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
824
825 /* Compute tlb index, modulo tlb size. */
826 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
827
828 /* Relative to the current ENV. */
829 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
830
831 /* Find a base address that can load both tlb comparator and addend. */
832 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
833 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
834 tcg_out_addi(s, r1, tlb_ofs);
835 tlb_ofs = 0;
836 }
837
838 /* Load the tlb comparator and the addend. */
839 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
840 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
841
842 /* subcc arg0, arg2, %g0 */
843 tcg_out_cmp(s, r0, r2, 0);
844
845 /* If the guest address must be zero-extended, do so now. */
846 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
847 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
848 return r0;
849 }
850 return addrlo;
851}
852#endif /* CONFIG_SOFTMMU */
853
854static const int qemu_ld_opc[8] = {
855#ifdef TARGET_WORDS_BIGENDIAN
856 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
65850a02 857#else
a0ce341a 858 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
65850a02 859#endif
a0ce341a 860};
9d0efc88 861
a0ce341a
RH
862static const int qemu_st_opc[4] = {
863#ifdef TARGET_WORDS_BIGENDIAN
864 STB, STH, STW, STX
bffe1431 865#else
a0ce341a 866 STB, STH_LE, STW_LE, STX_LE
bffe1431 867#endif
a0ce341a 868};
bffe1431 869
a0ce341a 870static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 871{
a0ce341a 872 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 873#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
874 int memi_idx, memi, s_bits, n;
875 uint32_t *label_ptr[2];
f5ef6aac
BS
876#endif
877
a0ce341a
RH
878 datahi = datalo = args[0];
879 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
880 datahi = args[1];
881 addrlo_idx = 2;
882 }
f5ef6aac 883
f5ef6aac 884#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
885 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
886 memi = args[memi_idx];
887 s_bits = sizeop & 3;
888
889 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
890 offsetof(CPUTLBEntry, addr_read));
891
892 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
893 int reg64;
894
895 /* bne,pn %[xi]cc, label0 */
896 label_ptr[0] = (uint32_t *)s->code_ptr;
897 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
898 | ((TARGET_LONG_BITS == 64) << 21)));
899
900 /* TLB Hit. */
901 /* Load all 64-bits into an O/G register. */
902 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
903 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
904
905 /* Move the two 32-bit pieces into the destination registers. */
906 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
907 if (reg64 != datalo) {
908 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
909 }
f5ef6aac 910
a0ce341a
RH
911 /* b,a,pt label1 */
912 label_ptr[1] = (uint32_t *)s->code_ptr;
913 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
914 | (1 << 29) | (1 << 19)));
915 } else {
916 /* The fast path is exactly one insn. Thus we can perform the
917 entire TLB Hit in the (annulled) delay slot of the branch
918 over the TLB Miss case. */
919
920 /* beq,a,pt %[xi]cc, label0 */
921 label_ptr[0] = NULL;
922 label_ptr[1] = (uint32_t *)s->code_ptr;
923 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
924 | ((TARGET_LONG_BITS == 64) << 21)
925 | (1 << 29) | (1 << 19)));
926 /* delay slot */
927 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
928 }
53c37487 929
a0ce341a 930 /* TLB Miss. */
f5ef6aac 931
a0ce341a
RH
932 if (label_ptr[0]) {
933 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
934 (unsigned long)label_ptr[0]);
935 }
936 n = 0;
937 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
938 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
939 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
940 args[addrlo_idx + 1]);
941 }
942 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
943 args[addrlo_idx]);
f5ef6aac 944
53c37487 945 /* qemu_ld_helper[s_bits](arg0, arg1) */
f5ef6aac
BS
946 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
947 - (tcg_target_ulong)s->code_ptr) >> 2)
948 & 0x3fffffff));
a0ce341a
RH
949 /* delay slot */
950 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
951
a0ce341a
RH
952 n = tcg_target_call_oarg_regs[0];
953 /* datalo = sign_extend(arg0) */
954 switch (sizeop) {
f5ef6aac 955 case 0 | 4:
a0ce341a
RH
956 /* Recall that SRA sign extends from bit 31 through bit 63. */
957 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
958 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
f5ef6aac
BS
959 break;
960 case 1 | 4:
a0ce341a
RH
961 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
962 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
f5ef6aac
BS
963 break;
964 case 2 | 4:
a0ce341a 965 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
f5ef6aac 966 break;
a0ce341a
RH
967 case 3:
968 if (TCG_TARGET_REG_BITS == 32) {
969 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
970 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
971 break;
972 }
973 /* FALLTHRU */
f5ef6aac
BS
974 case 0:
975 case 1:
976 case 2:
f5ef6aac
BS
977 default:
978 /* mov */
a0ce341a 979 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
f5ef6aac
BS
980 break;
981 }
982
a0ce341a
RH
983 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
984 (unsigned long)label_ptr[1]);
90cbed46 985#else
a0ce341a
RH
986 addr_reg = args[addrlo_idx];
987 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
988 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
989 addr_reg = TCG_REG_T1;
a0ce341a
RH
990 }
991 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
992 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 993
c6f7e4fb
RH
994 tcg_out_ldst_rr(s, reg64, addr_reg,
995 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
996 qemu_ld_opc[sizeop]);
f5ef6aac 997
a0ce341a
RH
998 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
999 if (reg64 != datalo) {
1000 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1001 }
1002 } else {
c6f7e4fb
RH
1003 tcg_out_ldst_rr(s, datalo, addr_reg,
1004 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1005 qemu_ld_opc[sizeop]);
f5ef6aac 1006 }
a0ce341a 1007#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1008}
1009
a0ce341a 1010static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 1011{
a0ce341a 1012 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 1013#if defined(CONFIG_SOFTMMU)
a7a49843 1014 int memi_idx, memi, n, datafull;
a0ce341a 1015 uint32_t *label_ptr;
f5ef6aac
BS
1016#endif
1017
a0ce341a
RH
1018 datahi = datalo = args[0];
1019 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1020 datahi = args[1];
1021 addrlo_idx = 2;
1022 }
f5ef6aac 1023
f5ef6aac 1024#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
1025 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1026 memi = args[memi_idx];
1027
1028 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1029 offsetof(CPUTLBEntry, addr_write));
1030
a7a49843 1031 datafull = datalo;
a0ce341a 1032 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8
RH
1033 /* Reconstruct the full 64-bit value. */
1034 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1035 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8 1036 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
a7a49843 1037 datafull = TCG_REG_O2;
a0ce341a 1038 }
f5ef6aac 1039
a0ce341a
RH
1040 /* The fast path is exactly one insn. Thus we can perform the entire
1041 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1042 /* beq,a,pt %[xi]cc, label0 */
1043 label_ptr = (uint32_t *)s->code_ptr;
1044 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
1045 | ((TARGET_LONG_BITS == 64) << 21)
1046 | (1 << 29) | (1 << 19)));
1047 /* delay slot */
a7a49843 1048 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
a0ce341a
RH
1049
1050 /* TLB Miss. */
1051
1052 n = 0;
1053 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1054 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1055 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1056 args[addrlo_idx + 1]);
1057 }
1058 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1059 args[addrlo_idx]);
1060 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1061 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1062 }
1063 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
53c37487 1064
53c37487 1065 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
a0ce341a 1066 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
f5ef6aac
BS
1067 - (tcg_target_ulong)s->code_ptr) >> 2)
1068 & 0x3fffffff));
a0ce341a
RH
1069 /* delay slot */
1070 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
f5ef6aac 1071
a0ce341a
RH
1072 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1073 (unsigned long)label_ptr);
8384dd67 1074#else
a0ce341a
RH
1075 addr_reg = args[addrlo_idx];
1076 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1077 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1078 addr_reg = TCG_REG_T1;
f5ef6aac 1079 }
a0ce341a 1080 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8 1081 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1082 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1083 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1084 datalo = TCG_REG_O2;
a0ce341a 1085 }
c6f7e4fb
RH
1086 tcg_out_ldst_rr(s, datalo, addr_reg,
1087 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1088 qemu_st_opc[sizeop]);
a0ce341a 1089#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1090}
1091
a9751609 1092static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1093 const int *const_args)
1094{
1095 int c;
1096
1097 switch (opc) {
1098 case INDEX_op_exit_tb:
b3db8758
BS
1099 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1100 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
8289b279 1101 INSN_IMM13(8));
b3db8758
BS
1102 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1103 INSN_RS2(TCG_REG_G0));
8289b279
BS
1104 break;
1105 case INDEX_op_goto_tb:
1106 if (s->tb_jmp_offset) {
1107 /* direct jump method */
5bbd2cae 1108 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1109 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1110 /* Make sure to preserve links during retranslation. */
1111 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1112 } else {
1113 /* indirect jump method */
375816f8
RH
1114 tcg_out_ld_ptr(s, TCG_REG_T1,
1115 (tcg_target_long)(s->tb_next + args[0]));
1116 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
b3db8758 1117 INSN_RS2(TCG_REG_G0));
8289b279 1118 }
53cd9273 1119 tcg_out_nop(s);
8289b279
BS
1120 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1121 break;
1122 case INDEX_op_call:
375816f8 1123 if (const_args[0]) {
bffe1431
BS
1124 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1125 - (tcg_target_ulong)s->code_ptr) >> 2)
1126 & 0x3fffffff));
375816f8
RH
1127 } else {
1128 tcg_out_ld_ptr(s, TCG_REG_T1,
bffe1431 1129 (tcg_target_long)(s->tb_next + args[0]));
375816f8 1130 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
bffe1431 1131 INSN_RS2(TCG_REG_G0));
8289b279 1132 }
4c3204cb
RH
1133 /* delay slot */
1134 tcg_out_nop(s);
8289b279 1135 break;
8289b279 1136 case INDEX_op_br:
1da92db2 1137 tcg_out_branch_i32(s, COND_A, args[0]);
f5ef6aac 1138 tcg_out_nop(s);
8289b279
BS
1139 break;
1140 case INDEX_op_movi_i32:
1141 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1142 break;
1143
a212ea75 1144#if TCG_TARGET_REG_BITS == 64
8289b279 1145#define OP_32_64(x) \
ba225198
RH
1146 glue(glue(case INDEX_op_, x), _i32): \
1147 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1148#else
1149#define OP_32_64(x) \
ba225198 1150 glue(glue(case INDEX_op_, x), _i32)
8289b279 1151#endif
ba225198 1152 OP_32_64(ld8u):
8289b279
BS
1153 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1154 break;
ba225198 1155 OP_32_64(ld8s):
8289b279
BS
1156 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1157 break;
ba225198 1158 OP_32_64(ld16u):
8289b279
BS
1159 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1160 break;
ba225198 1161 OP_32_64(ld16s):
8289b279
BS
1162 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1163 break;
1164 case INDEX_op_ld_i32:
a212ea75 1165#if TCG_TARGET_REG_BITS == 64
53cd9273 1166 case INDEX_op_ld32u_i64:
8289b279
BS
1167#endif
1168 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1169 break;
ba225198 1170 OP_32_64(st8):
8289b279
BS
1171 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1172 break;
ba225198 1173 OP_32_64(st16):
8289b279
BS
1174 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1175 break;
1176 case INDEX_op_st_i32:
a212ea75 1177#if TCG_TARGET_REG_BITS == 64
53cd9273 1178 case INDEX_op_st32_i64:
8289b279
BS
1179#endif
1180 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1181 break;
ba225198 1182 OP_32_64(add):
53cd9273 1183 c = ARITH_ADD;
ba225198
RH
1184 goto gen_arith;
1185 OP_32_64(sub):
8289b279 1186 c = ARITH_SUB;
ba225198
RH
1187 goto gen_arith;
1188 OP_32_64(and):
8289b279 1189 c = ARITH_AND;
ba225198 1190 goto gen_arith;
dc69960d
RH
1191 OP_32_64(andc):
1192 c = ARITH_ANDN;
1193 goto gen_arith;
ba225198 1194 OP_32_64(or):
8289b279 1195 c = ARITH_OR;
ba225198 1196 goto gen_arith;
18c8f7a3
RH
1197 OP_32_64(orc):
1198 c = ARITH_ORN;
1199 goto gen_arith;
ba225198 1200 OP_32_64(xor):
8289b279 1201 c = ARITH_XOR;
ba225198 1202 goto gen_arith;
8289b279
BS
1203 case INDEX_op_shl_i32:
1204 c = SHIFT_SLL;
1fd95946
RH
1205 do_shift32:
1206 /* Limit immediate shift count lest we create an illegal insn. */
1207 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1208 break;
8289b279
BS
1209 case INDEX_op_shr_i32:
1210 c = SHIFT_SRL;
1fd95946 1211 goto do_shift32;
8289b279
BS
1212 case INDEX_op_sar_i32:
1213 c = SHIFT_SRA;
1fd95946 1214 goto do_shift32;
8289b279
BS
1215 case INDEX_op_mul_i32:
1216 c = ARITH_UMUL;
ba225198 1217 goto gen_arith;
583d1215 1218
4b5a85c1
RH
1219 OP_32_64(neg):
1220 c = ARITH_SUB;
1221 goto gen_arith1;
be6551b1
RH
1222 OP_32_64(not):
1223 c = ARITH_ORN;
1224 goto gen_arith1;
4b5a85c1 1225
583d1215
RH
1226 case INDEX_op_div_i32:
1227 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1228 break;
1229 case INDEX_op_divu_i32:
1230 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1231 break;
1232
1233 case INDEX_op_rem_i32:
1234 case INDEX_op_remu_i32:
375816f8 1235 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1236 opc == INDEX_op_remu_i32);
375816f8 1237 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1238 ARITH_UMUL);
375816f8 1239 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1240 break;
8289b279
BS
1241
1242 case INDEX_op_brcond_i32:
1da92db2
BS
1243 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1244 args[3]);
8289b279 1245 break;
dbfe80e1
RH
1246 case INDEX_op_setcond_i32:
1247 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1248 args[2], const_args[2]);
1249 break;
ded37f0d
RH
1250 case INDEX_op_movcond_i32:
1251 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1252 args[2], const_args[2], args[3], const_args[3]);
1253 break;
dbfe80e1 1254
56f4927e
RH
1255#if TCG_TARGET_REG_BITS == 32
1256 case INDEX_op_brcond2_i32:
1257 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1258 args[2], const_args[2],
1259 args[3], const_args[3], args[5]);
1260 break;
dbfe80e1
RH
1261 case INDEX_op_setcond2_i32:
1262 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1263 args[3], const_args[3],
1264 args[4], const_args[4]);
1265 break;
7a3766f3 1266 case INDEX_op_add2_i32:
4ec28e25
RH
1267 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1268 args[4], const_args[4], args[5], const_args[5],
1269 ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1270 break;
1271 case INDEX_op_sub2_i32:
4ec28e25
RH
1272 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1273 args[4], const_args[4], args[5], const_args[5],
1274 ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1275 break;
1276 case INDEX_op_mulu2_i32:
1277 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1278 ARITH_UMUL);
1279 tcg_out_rdy(s, args[1]);
1280 break;
56f4927e 1281#endif
8289b279
BS
1282
1283 case INDEX_op_qemu_ld8u:
f5ef6aac 1284 tcg_out_qemu_ld(s, args, 0);
8289b279
BS
1285 break;
1286 case INDEX_op_qemu_ld8s:
f5ef6aac 1287 tcg_out_qemu_ld(s, args, 0 | 4);
8289b279
BS
1288 break;
1289 case INDEX_op_qemu_ld16u:
f5ef6aac 1290 tcg_out_qemu_ld(s, args, 1);
8289b279
BS
1291 break;
1292 case INDEX_op_qemu_ld16s:
f5ef6aac 1293 tcg_out_qemu_ld(s, args, 1 | 4);
8289b279 1294 break;
86feb1c8
RH
1295 case INDEX_op_qemu_ld32:
1296#if TCG_TARGET_REG_BITS == 64
8289b279 1297 case INDEX_op_qemu_ld32u:
86feb1c8 1298#endif
f5ef6aac 1299 tcg_out_qemu_ld(s, args, 2);
8289b279 1300 break;
30c0c76c 1301#if TCG_TARGET_REG_BITS == 64
8289b279 1302 case INDEX_op_qemu_ld32s:
f5ef6aac 1303 tcg_out_qemu_ld(s, args, 2 | 4);
8289b279 1304 break;
30c0c76c 1305#endif
a0ce341a
RH
1306 case INDEX_op_qemu_ld64:
1307 tcg_out_qemu_ld(s, args, 3);
1308 break;
8289b279 1309 case INDEX_op_qemu_st8:
f5ef6aac 1310 tcg_out_qemu_st(s, args, 0);
8289b279
BS
1311 break;
1312 case INDEX_op_qemu_st16:
f5ef6aac 1313 tcg_out_qemu_st(s, args, 1);
8289b279
BS
1314 break;
1315 case INDEX_op_qemu_st32:
f5ef6aac 1316 tcg_out_qemu_st(s, args, 2);
8289b279 1317 break;
a0ce341a
RH
1318 case INDEX_op_qemu_st64:
1319 tcg_out_qemu_st(s, args, 3);
1320 break;
8289b279 1321
a212ea75 1322#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1323 case INDEX_op_movi_i64:
1324 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1325 break;
53cd9273
BS
1326 case INDEX_op_ld32s_i64:
1327 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1328 break;
8289b279
BS
1329 case INDEX_op_ld_i64:
1330 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1331 break;
1332 case INDEX_op_st_i64:
1333 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1334 break;
1335 case INDEX_op_shl_i64:
1336 c = SHIFT_SLLX;
1fd95946
RH
1337 do_shift64:
1338 /* Limit immediate shift count lest we create an illegal insn. */
1339 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1340 break;
8289b279
BS
1341 case INDEX_op_shr_i64:
1342 c = SHIFT_SRLX;
1fd95946 1343 goto do_shift64;
8289b279
BS
1344 case INDEX_op_sar_i64:
1345 c = SHIFT_SRAX;
1fd95946 1346 goto do_shift64;
8289b279
BS
1347 case INDEX_op_mul_i64:
1348 c = ARITH_MULX;
ba225198 1349 goto gen_arith;
583d1215 1350 case INDEX_op_div_i64:
53cd9273 1351 c = ARITH_SDIVX;
ba225198 1352 goto gen_arith;
583d1215 1353 case INDEX_op_divu_i64:
8289b279 1354 c = ARITH_UDIVX;
ba225198 1355 goto gen_arith;
583d1215
RH
1356 case INDEX_op_rem_i64:
1357 case INDEX_op_remu_i64:
375816f8 1358 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1359 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
375816f8 1360 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1361 ARITH_MULX);
375816f8 1362 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1363 break;
cc6dfecf
RH
1364 case INDEX_op_ext32s_i64:
1365 if (const_args[1]) {
1366 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1367 } else {
1368 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1369 }
1370 break;
1371 case INDEX_op_ext32u_i64:
1372 if (const_args[1]) {
1373 tcg_out_movi_imm32(s, args[0], args[1]);
1374 } else {
1375 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1376 }
1377 break;
8289b279
BS
1378
1379 case INDEX_op_brcond_i64:
1da92db2
BS
1380 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1381 args[3]);
8289b279 1382 break;
dbfe80e1
RH
1383 case INDEX_op_setcond_i64:
1384 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1385 args[2], const_args[2]);
1386 break;
ded37f0d
RH
1387 case INDEX_op_movcond_i64:
1388 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1389 args[2], const_args[2], args[3], const_args[3]);
1390 break;
8289b279 1391#endif
ba225198
RH
1392 gen_arith:
1393 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1394 break;
1395
4b5a85c1
RH
1396 gen_arith1:
1397 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1398 break;
1399
8289b279
BS
1400 default:
1401 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1402 tcg_abort();
1403 }
1404}
1405
1406static const TCGTargetOpDef sparc_op_defs[] = {
1407 { INDEX_op_exit_tb, { } },
b3db8758 1408 { INDEX_op_goto_tb, { } },
8289b279 1409 { INDEX_op_call, { "ri" } },
8289b279
BS
1410 { INDEX_op_br, { } },
1411
1412 { INDEX_op_mov_i32, { "r", "r" } },
1413 { INDEX_op_movi_i32, { "r" } },
1414 { INDEX_op_ld8u_i32, { "r", "r" } },
1415 { INDEX_op_ld8s_i32, { "r", "r" } },
1416 { INDEX_op_ld16u_i32, { "r", "r" } },
1417 { INDEX_op_ld16s_i32, { "r", "r" } },
1418 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1419 { INDEX_op_st8_i32, { "rZ", "r" } },
1420 { INDEX_op_st16_i32, { "rZ", "r" } },
1421 { INDEX_op_st_i32, { "rZ", "r" } },
1422
1423 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1424 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1425 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1426 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1427 { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1428 { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1429 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1430 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1431 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1432 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1433 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1434 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1435
1436 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1437 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1438 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1439
4b5a85c1 1440 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1441 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1442
89269f6c
RH
1443 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1444 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1445 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1446
56f4927e 1447#if TCG_TARGET_REG_BITS == 32
89269f6c
RH
1448 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1449 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
1450 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1451 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1452 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
56f4927e 1453#endif
8289b279 1454
a212ea75 1455#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1456 { INDEX_op_mov_i64, { "r", "r" } },
1457 { INDEX_op_movi_i64, { "r" } },
1458 { INDEX_op_ld8u_i64, { "r", "r" } },
1459 { INDEX_op_ld8s_i64, { "r", "r" } },
1460 { INDEX_op_ld16u_i64, { "r", "r" } },
1461 { INDEX_op_ld16s_i64, { "r", "r" } },
1462 { INDEX_op_ld32u_i64, { "r", "r" } },
1463 { INDEX_op_ld32s_i64, { "r", "r" } },
1464 { INDEX_op_ld_i64, { "r", "r" } },
89269f6c
RH
1465 { INDEX_op_st8_i64, { "rZ", "r" } },
1466 { INDEX_op_st16_i64, { "rZ", "r" } },
1467 { INDEX_op_st32_i64, { "rZ", "r" } },
1468 { INDEX_op_st_i64, { "rZ", "r" } },
1469
1470 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1471 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1472 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1473 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1474 { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1475 { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1476 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1477 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1478 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1479 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1480 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1481 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1482
1483 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1484 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1485 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
4b5a85c1
RH
1486
1487 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1488 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1489
cc6dfecf
RH
1490 { INDEX_op_ext32s_i64, { "r", "ri" } },
1491 { INDEX_op_ext32u_i64, { "r", "ri" } },
8289b279 1492
89269f6c
RH
1493 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1494 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1495 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
8289b279 1496#endif
a0ce341a
RH
1497
1498#if TCG_TARGET_REG_BITS == 64
1499 { INDEX_op_qemu_ld8u, { "r", "L" } },
1500 { INDEX_op_qemu_ld8s, { "r", "L" } },
1501 { INDEX_op_qemu_ld16u, { "r", "L" } },
1502 { INDEX_op_qemu_ld16s, { "r", "L" } },
1503 { INDEX_op_qemu_ld32, { "r", "L" } },
1504 { INDEX_op_qemu_ld32u, { "r", "L" } },
1505 { INDEX_op_qemu_ld32s, { "r", "L" } },
1506 { INDEX_op_qemu_ld64, { "r", "L" } },
1507
1508 { INDEX_op_qemu_st8, { "L", "L" } },
1509 { INDEX_op_qemu_st16, { "L", "L" } },
1510 { INDEX_op_qemu_st32, { "L", "L" } },
1511 { INDEX_op_qemu_st64, { "L", "L" } },
1512#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1513 { INDEX_op_qemu_ld8u, { "r", "L" } },
1514 { INDEX_op_qemu_ld8s, { "r", "L" } },
1515 { INDEX_op_qemu_ld16u, { "r", "L" } },
1516 { INDEX_op_qemu_ld16s, { "r", "L" } },
1517 { INDEX_op_qemu_ld32, { "r", "L" } },
1518 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1519
1520 { INDEX_op_qemu_st8, { "L", "L" } },
1521 { INDEX_op_qemu_st16, { "L", "L" } },
1522 { INDEX_op_qemu_st32, { "L", "L" } },
3ee60ad4 1523 { INDEX_op_qemu_st64, { "L", "L", "L" } },
a0ce341a
RH
1524#else
1525 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1526 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1527 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1528 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1529 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1530 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1531
1532 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1533 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1534 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1535 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
8289b279 1536#endif
a0ce341a 1537
8289b279
BS
1538 { -1 },
1539};
1540
e4d58b41 1541static void tcg_target_init(TCGContext *s)
8289b279
BS
1542{
1543 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
a212ea75 1544#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1545 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1546#endif
1547 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1548 (1 << TCG_REG_G1) |
1549 (1 << TCG_REG_G2) |
1550 (1 << TCG_REG_G3) |
1551 (1 << TCG_REG_G4) |
1552 (1 << TCG_REG_G5) |
1553 (1 << TCG_REG_G6) |
1554 (1 << TCG_REG_G7) |
8289b279
BS
1555 (1 << TCG_REG_O0) |
1556 (1 << TCG_REG_O1) |
1557 (1 << TCG_REG_O2) |
1558 (1 << TCG_REG_O3) |
1559 (1 << TCG_REG_O4) |
1560 (1 << TCG_REG_O5) |
8289b279
BS
1561 (1 << TCG_REG_O7));
1562
1563 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1564 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1565 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1566 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1567 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1568 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1569 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1570 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1571 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1572
8289b279
BS
1573 tcg_add_target_add_op_defs(sparc_op_defs);
1574}
cb1977d3
RH
1575
1576#if TCG_TARGET_REG_BITS == 64
1577# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1578#else
cb1977d3
RH
1579# define ELF_HOST_MACHINE EM_SPARC32PLUS
1580# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1581#endif
1582
1583typedef struct {
1584 uint32_t len __attribute__((aligned((sizeof(void *)))));
1585 uint32_t id;
1586 uint8_t version;
1587 char augmentation[1];
1588 uint8_t code_align;
1589 uint8_t data_align;
1590 uint8_t return_column;
1591} DebugFrameCIE;
1592
1593typedef struct {
1594 uint32_t len __attribute__((aligned((sizeof(void *)))));
1595 uint32_t cie_offset;
1596 tcg_target_long func_start __attribute__((packed));
1597 tcg_target_long func_len __attribute__((packed));
1598 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1599 uint8_t win_save;
1600 uint8_t ret_save[3];
1601} DebugFrameFDE;
1602
1603typedef struct {
1604 DebugFrameCIE cie;
1605 DebugFrameFDE fde;
1606} DebugFrame;
1607
1608static DebugFrame debug_frame = {
1609 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1610 .cie.id = -1,
1611 .cie.version = 1,
1612 .cie.code_align = 1,
1613 .cie.data_align = -sizeof(void *) & 0x7f,
1614 .cie.return_column = 15, /* o7 */
1615
1616 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1617 .fde.def_cfa = {
1618#if TCG_TARGET_REG_BITS == 64
1619 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1620 (2047 & 0x7f) | 0x80, (2047 >> 7)
1621#else
1622 13, 30 /* DW_CFA_def_cfa_register i6 */
1623#endif
1624 },
1625 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1626 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1627};
1628
1629void tcg_register_jit(void *buf, size_t buf_size)
1630{
1631 debug_frame.fde.func_start = (tcg_target_long) buf;
1632 debug_frame.fde.func_len = buf_size;
1633
1634 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1635}
5bbd2cae
RH
1636
1637void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1638{
1639 uint32_t *ptr = (uint32_t *)jmp_addr;
1640 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1641
1642 /* We can reach the entire address space for 32-bit. For 64-bit
1643 the code_gen_buffer can't be larger than 2GB. */
1644 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1645 tcg_abort();
1646 }
1647
1648 *ptr = CALL | (disp & 0x3fffffff);
1649 flush_icache_range(jmp_addr, jmp_addr + 4);
1650}
This page took 0.905656 seconds and 4 git commands to generate.