2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "tcg-pool.inc.c"
28 int arm_arch = __ARM_ARCH;
30 #ifndef use_idiv_instructions
31 bool use_idiv_instructions;
34 /* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
36 # define USING_SOFTMMU 1
38 # define USING_SOFTMMU 0
41 #ifdef CONFIG_DEBUG_TCG
42 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
62 static const int tcg_target_reg_alloc_order[] = {
80 static const int tcg_target_call_iarg_regs[4] = {
81 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
83 static const int tcg_target_call_oarg_regs[2] = {
84 TCG_REG_R0, TCG_REG_R1
87 #define TCG_REG_TMP TCG_REG_R12
89 enum arm_cond_code_e {
92 COND_CS = 0x2, /* Unsigned greater or equal */
93 COND_CC = 0x3, /* Unsigned less than */
94 COND_MI = 0x4, /* Negative */
95 COND_PL = 0x5, /* Zero or greater */
96 COND_VS = 0x6, /* Overflow */
97 COND_VC = 0x7, /* No overflow */
98 COND_HI = 0x8, /* Unsigned greater than */
99 COND_LS = 0x9, /* Unsigned less or equal */
107 #define TO_CPSR (1 << 20)
109 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
110 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
111 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
112 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
113 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
114 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
115 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
116 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
119 ARITH_AND = 0x0 << 21,
120 ARITH_EOR = 0x1 << 21,
121 ARITH_SUB = 0x2 << 21,
122 ARITH_RSB = 0x3 << 21,
123 ARITH_ADD = 0x4 << 21,
124 ARITH_ADC = 0x5 << 21,
125 ARITH_SBC = 0x6 << 21,
126 ARITH_RSC = 0x7 << 21,
127 ARITH_TST = 0x8 << 21 | TO_CPSR,
128 ARITH_CMP = 0xa << 21 | TO_CPSR,
129 ARITH_CMN = 0xb << 21 | TO_CPSR,
130 ARITH_ORR = 0xc << 21,
131 ARITH_MOV = 0xd << 21,
132 ARITH_BIC = 0xe << 21,
133 ARITH_MVN = 0xf << 21,
135 INSN_CLZ = 0x016f0f10,
136 INSN_RBIT = 0x06ff0f30,
138 INSN_LDR_IMM = 0x04100000,
139 INSN_LDR_REG = 0x06100000,
140 INSN_STR_IMM = 0x04000000,
141 INSN_STR_REG = 0x06000000,
143 INSN_LDRH_IMM = 0x005000b0,
144 INSN_LDRH_REG = 0x001000b0,
145 INSN_LDRSH_IMM = 0x005000f0,
146 INSN_LDRSH_REG = 0x001000f0,
147 INSN_STRH_IMM = 0x004000b0,
148 INSN_STRH_REG = 0x000000b0,
150 INSN_LDRB_IMM = 0x04500000,
151 INSN_LDRB_REG = 0x06500000,
152 INSN_LDRSB_IMM = 0x005000d0,
153 INSN_LDRSB_REG = 0x001000d0,
154 INSN_STRB_IMM = 0x04400000,
155 INSN_STRB_REG = 0x06400000,
157 INSN_LDRD_IMM = 0x004000d0,
158 INSN_LDRD_REG = 0x000000d0,
159 INSN_STRD_IMM = 0x004000f0,
160 INSN_STRD_REG = 0x000000f0,
162 INSN_DMB_ISH = 0xf57ff05b,
163 INSN_DMB_MCR = 0xee070fba,
165 /* Architected nop introduced in v6k. */
166 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
167 also Just So Happened to do nothing on pre-v6k so that we
168 don't need to conditionalize it? */
169 INSN_NOP_v6k = 0xe320f000,
170 /* Otherwise the assembler uses mov r0,r0 */
171 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
174 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
176 static const uint8_t tcg_cond_to_arm_cond[] = {
177 [TCG_COND_EQ] = COND_EQ,
178 [TCG_COND_NE] = COND_NE,
179 [TCG_COND_LT] = COND_LT,
180 [TCG_COND_GE] = COND_GE,
181 [TCG_COND_LE] = COND_LE,
182 [TCG_COND_GT] = COND_GT,
184 [TCG_COND_LTU] = COND_CC,
185 [TCG_COND_GEU] = COND_CS,
186 [TCG_COND_LEU] = COND_LS,
187 [TCG_COND_GTU] = COND_HI,
190 static inline bool reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
192 ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
193 if (offset == sextract32(offset, 0, 24)) {
194 *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
200 static inline bool reloc_pc13(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
202 ptrdiff_t offset = tcg_ptr_byte_diff(target, code_ptr) - 8;
204 if (offset >= -0xfff && offset <= 0xfff) {
205 tcg_insn_unit insn = *code_ptr;
206 bool u = (offset >= 0);
210 insn = deposit32(insn, 23, 1, u);
211 insn = deposit32(insn, 0, 12, offset);
218 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
219 intptr_t value, intptr_t addend)
221 tcg_debug_assert(addend == 0);
223 if (type == R_ARM_PC24) {
224 return reloc_pc24(code_ptr, (tcg_insn_unit *)value);
225 } else if (type == R_ARM_PC13) {
226 return reloc_pc13(code_ptr, (tcg_insn_unit *)value);
228 g_assert_not_reached();
232 #define TCG_CT_CONST_ARM 0x100
233 #define TCG_CT_CONST_INV 0x200
234 #define TCG_CT_CONST_NEG 0x400
235 #define TCG_CT_CONST_ZERO 0x800
237 /* parse target specific constraints */
238 static const char *target_parse_constraint(TCGArgConstraint *ct,
239 const char *ct_str, TCGType type)
243 ct->ct |= TCG_CT_CONST_ARM;
246 ct->ct |= TCG_CT_CONST_INV;
248 case 'N': /* The gcc constraint letter is L, already used here. */
249 ct->ct |= TCG_CT_CONST_NEG;
252 ct->ct |= TCG_CT_CONST_ZERO;
256 ct->ct |= TCG_CT_REG;
260 /* qemu_ld address */
262 ct->ct |= TCG_CT_REG;
264 #ifdef CONFIG_SOFTMMU
265 /* r0-r2,lr will be overwritten when reading the tlb entry,
266 so don't use these. */
267 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
268 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
269 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
270 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
274 /* qemu_st address & data */
276 ct->ct |= TCG_CT_REG;
278 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
279 and r0-r1 doing the byte swapping, so don't use these. */
280 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
281 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
282 #if defined(CONFIG_SOFTMMU)
283 /* Avoid clashes with registers being used for helper args */
284 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
285 #if TARGET_LONG_BITS == 64
286 /* Avoid clashes with registers being used for helper args */
287 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
289 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
299 static inline uint32_t rotl(uint32_t val, int n)
301 return (val << n) | (val >> (32 - n));
304 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
305 right-rotated by an even amount between 0 and 30. */
306 static inline int encode_imm(uint32_t imm)
310 /* simple case, only lower bits */
311 if ((imm & ~0xff) == 0)
313 /* then try a simple even shift */
314 shift = ctz32(imm) & ~1;
315 if (((imm >> shift) & ~0xff) == 0)
317 /* now try harder with rotations */
318 if ((rotl(imm, 2) & ~0xff) == 0)
320 if ((rotl(imm, 4) & ~0xff) == 0)
322 if ((rotl(imm, 6) & ~0xff) == 0)
324 /* imm can't be encoded */
328 static inline int check_fit_imm(uint32_t imm)
330 return encode_imm(imm) >= 0;
333 /* Test if a constant matches the constraint.
334 * TODO: define constraints for:
336 * ldr/str offset: between -0xfff and 0xfff
337 * ldrh/strh offset: between -0xff and 0xff
338 * mov operand2: values represented with x << (2 * y), x < 0x100
339 * add, sub, eor...: ditto
341 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
342 const TCGArgConstraint *arg_ct)
346 if (ct & TCG_CT_CONST) {
348 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
350 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
352 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
354 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
361 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
363 tcg_out32(s, (cond << 28) | 0x0a000000 |
364 (((offset - 8) >> 2) & 0x00ffffff));
367 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
369 tcg_out32(s, (cond << 28) | 0x0b000000 |
370 (((offset - 8) >> 2) & 0x00ffffff));
373 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
375 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
378 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
380 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
381 (((offset - 8) >> 2) & 0x00ffffff));
384 static inline void tcg_out_dat_reg(TCGContext *s,
385 int cond, int opc, int rd, int rn, int rm, int shift)
387 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
388 (rn << 16) | (rd << 12) | shift | rm);
391 static inline void tcg_out_nop(TCGContext *s)
393 tcg_out32(s, INSN_NOP);
396 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
398 /* Simple reg-reg move, optimising out the 'do nothing' case */
400 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
404 static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
406 /* Unless the C portion of QEMU is compiled as thumb, we don't
407 actually need true BX semantics; merely a branch to an address
408 held in a register. */
409 if (use_armv5t_instructions) {
410 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
412 tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
416 static inline void tcg_out_dat_imm(TCGContext *s,
417 int cond, int opc, int rd, int rn, int im)
419 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
420 (rn << 16) | (rd << 12) | im);
423 /* Note that this routine is used for both LDR and LDRH formats, so we do
424 not wish to include an immediate shift at this point. */
425 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
426 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
428 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
429 | (w << 21) | (rn << 16) | (rt << 12) | rm);
432 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
433 TCGReg rn, int imm8, bool p, bool w)
440 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
441 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
444 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
445 TCGReg rn, int imm12, bool p, bool w)
452 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
453 (rn << 16) | (rt << 12) | imm12);
456 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
457 TCGReg rn, int imm12)
459 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
462 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
463 TCGReg rn, int imm12)
465 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
468 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
469 TCGReg rn, TCGReg rm)
471 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
474 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
475 TCGReg rn, TCGReg rm)
477 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
480 static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
483 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
486 static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
487 TCGReg rn, TCGReg rm)
489 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
492 static inline void tcg_out_ldrd_rwb(TCGContext *s, int cond, TCGReg rt,
493 TCGReg rn, TCGReg rm)
495 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
498 static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
501 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
504 static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
505 TCGReg rn, TCGReg rm)
507 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
510 /* Register pre-increment with base writeback. */
511 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
512 TCGReg rn, TCGReg rm)
514 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
517 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
518 TCGReg rn, TCGReg rm)
520 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
523 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
526 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
529 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
532 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
535 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
536 TCGReg rn, TCGReg rm)
538 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
541 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
542 TCGReg rn, TCGReg rm)
544 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
547 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
550 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
553 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
554 TCGReg rn, TCGReg rm)
556 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
559 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
560 TCGReg rn, int imm12)
562 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
565 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
566 TCGReg rn, int imm12)
568 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
571 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
572 TCGReg rn, TCGReg rm)
574 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
577 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
578 TCGReg rn, TCGReg rm)
580 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
583 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
586 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
589 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
590 TCGReg rn, TCGReg rm)
592 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
595 static void tcg_out_movi_pool(TCGContext *s, int cond, int rd, uint32_t arg)
597 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
598 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
601 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
603 int rot, diff, opc, sh1, sh2;
604 uint32_t tt0, tt1, tt2;
606 /* Check a single MOV/MVN before anything else. */
607 rot = encode_imm(arg);
609 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
610 rotl(arg, rot) | (rot << 7));
613 rot = encode_imm(~arg);
615 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
616 rotl(~arg, rot) | (rot << 7));
620 /* Check for a pc-relative address. This will usually be the TB,
621 or within the TB, which is immediately before the code block. */
622 diff = arg - ((intptr_t)s->code_ptr + 8);
624 rot = encode_imm(diff);
626 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC,
627 rotl(diff, rot) | (rot << 7));
631 rot = encode_imm(-diff);
633 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC,
634 rotl(-diff, rot) | (rot << 7));
639 /* Use movw + movt. */
640 if (use_armv7_instructions) {
642 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
643 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
644 if (arg & 0xffff0000) {
646 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
647 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
652 /* Look for sequences of two insns. If we have lots of 1's, we can
653 shorten the sequence by beginning with mvn and then clearing
654 higher bits with eor. */
657 if (ctpop32(arg) > 16) {
661 sh1 = ctz32(tt0) & ~1;
662 tt1 = tt0 & ~(0xff << sh1);
663 sh2 = ctz32(tt1) & ~1;
664 tt2 = tt1 & ~(0xff << sh2);
666 rot = ((32 - sh1) << 7) & 0xf00;
667 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
668 rot = ((32 - sh2) << 7) & 0xf00;
669 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
670 ((tt0 >> sh2) & 0xff) | rot);
674 /* Otherwise, drop it into the constant pool. */
675 tcg_out_movi_pool(s, cond, rd, arg);
678 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
679 TCGArg lhs, TCGArg rhs, int rhs_is_const)
681 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
682 * rhs must satisfy the "rI" constraint.
685 int rot = encode_imm(rhs);
686 tcg_debug_assert(rot >= 0);
687 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
689 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
693 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
694 TCGReg dst, TCGReg lhs, TCGArg rhs,
697 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
698 * rhs must satisfy the "rIK" constraint.
701 int rot = encode_imm(rhs);
704 rot = encode_imm(rhs);
705 tcg_debug_assert(rot >= 0);
708 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
710 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
714 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
715 TCGArg dst, TCGArg lhs, TCGArg rhs,
718 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
719 * rhs must satisfy the "rIN" constraint.
722 int rot = encode_imm(rhs);
725 rot = encode_imm(rhs);
726 tcg_debug_assert(rot >= 0);
729 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
731 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
735 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
736 TCGReg rn, TCGReg rm)
738 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
739 if (!use_armv6_instructions && rd == rn) {
741 /* rd == rn == rm; copy an input to tmp first. */
742 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
743 rm = rn = TCG_REG_TMP;
750 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
753 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
754 TCGReg rd1, TCGReg rn, TCGReg rm)
756 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
757 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
758 if (rd0 == rm || rd1 == rm) {
759 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
768 tcg_out32(s, (cond << 28) | 0x00800090 |
769 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
772 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
773 TCGReg rd1, TCGReg rn, TCGReg rm)
775 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
776 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
777 if (rd0 == rm || rd1 == rm) {
778 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
787 tcg_out32(s, (cond << 28) | 0x00c00090 |
788 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
791 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
793 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
796 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
798 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
801 static inline void tcg_out_ext8s(TCGContext *s, int cond,
804 if (use_armv6_instructions) {
806 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
808 tcg_out_dat_reg(s, cond, ARITH_MOV,
809 rd, 0, rn, SHIFT_IMM_LSL(24));
810 tcg_out_dat_reg(s, cond, ARITH_MOV,
811 rd, 0, rd, SHIFT_IMM_ASR(24));
815 static inline void tcg_out_ext8u(TCGContext *s, int cond,
818 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
821 static inline void tcg_out_ext16s(TCGContext *s, int cond,
824 if (use_armv6_instructions) {
826 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
828 tcg_out_dat_reg(s, cond, ARITH_MOV,
829 rd, 0, rn, SHIFT_IMM_LSL(16));
830 tcg_out_dat_reg(s, cond, ARITH_MOV,
831 rd, 0, rd, SHIFT_IMM_ASR(16));
835 static inline void tcg_out_ext16u(TCGContext *s, int cond,
838 if (use_armv6_instructions) {
840 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
842 tcg_out_dat_reg(s, cond, ARITH_MOV,
843 rd, 0, rn, SHIFT_IMM_LSL(16));
844 tcg_out_dat_reg(s, cond, ARITH_MOV,
845 rd, 0, rd, SHIFT_IMM_LSR(16));
849 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
851 if (use_armv6_instructions) {
853 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
855 tcg_out_dat_reg(s, cond, ARITH_MOV,
856 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
857 tcg_out_dat_reg(s, cond, ARITH_MOV,
858 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
859 tcg_out_dat_reg(s, cond, ARITH_ORR,
860 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
864 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
866 if (use_armv6_instructions) {
868 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
870 tcg_out_dat_reg(s, cond, ARITH_MOV,
871 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
872 tcg_out_dat_reg(s, cond, ARITH_MOV,
873 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
874 tcg_out_dat_reg(s, cond, ARITH_ORR,
875 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
879 /* swap the two low bytes assuming that the two high input bytes and the
880 two high output bit can hold any value. */
881 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
883 if (use_armv6_instructions) {
885 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
887 tcg_out_dat_reg(s, cond, ARITH_MOV,
888 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
889 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
890 tcg_out_dat_reg(s, cond, ARITH_ORR,
891 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
895 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
897 if (use_armv6_instructions) {
899 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
901 tcg_out_dat_reg(s, cond, ARITH_EOR,
902 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
903 tcg_out_dat_imm(s, cond, ARITH_BIC,
904 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
905 tcg_out_dat_reg(s, cond, ARITH_MOV,
906 rd, 0, rn, SHIFT_IMM_ROR(8));
907 tcg_out_dat_reg(s, cond, ARITH_EOR,
908 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
912 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
913 TCGArg a1, int ofs, int len, bool const_a1)
916 /* bfi becomes bfc with rn == 15. */
920 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
921 | (ofs << 7) | ((ofs + len - 1) << 16));
924 static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
925 TCGArg a1, int ofs, int len)
928 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
929 | (ofs << 7) | ((len - 1) << 16));
932 static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
933 TCGArg a1, int ofs, int len)
936 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
937 | (ofs << 7) | ((len - 1) << 16));
940 static inline void tcg_out_ld32u(TCGContext *s, int cond,
941 int rd, int rn, int32_t offset)
943 if (offset > 0xfff || offset < -0xfff) {
944 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
945 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
947 tcg_out_ld32_12(s, cond, rd, rn, offset);
950 static inline void tcg_out_st32(TCGContext *s, int cond,
951 int rd, int rn, int32_t offset)
953 if (offset > 0xfff || offset < -0xfff) {
954 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
955 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
957 tcg_out_st32_12(s, cond, rd, rn, offset);
960 static inline void tcg_out_ld16u(TCGContext *s, int cond,
961 int rd, int rn, int32_t offset)
963 if (offset > 0xff || offset < -0xff) {
964 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
965 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
967 tcg_out_ld16u_8(s, cond, rd, rn, offset);
970 static inline void tcg_out_ld16s(TCGContext *s, int cond,
971 int rd, int rn, int32_t offset)
973 if (offset > 0xff || offset < -0xff) {
974 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
975 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
977 tcg_out_ld16s_8(s, cond, rd, rn, offset);
980 static inline void tcg_out_st16(TCGContext *s, int cond,
981 int rd, int rn, int32_t offset)
983 if (offset > 0xff || offset < -0xff) {
984 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
985 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
987 tcg_out_st16_8(s, cond, rd, rn, offset);
990 static inline void tcg_out_ld8u(TCGContext *s, int cond,
991 int rd, int rn, int32_t offset)
993 if (offset > 0xfff || offset < -0xfff) {
994 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
995 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
997 tcg_out_ld8_12(s, cond, rd, rn, offset);
1000 static inline void tcg_out_ld8s(TCGContext *s, int cond,
1001 int rd, int rn, int32_t offset)
1003 if (offset > 0xff || offset < -0xff) {
1004 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1005 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1007 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1010 static inline void tcg_out_st8(TCGContext *s, int cond,
1011 int rd, int rn, int32_t offset)
1013 if (offset > 0xfff || offset < -0xfff) {
1014 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1015 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1017 tcg_out_st8_12(s, cond, rd, rn, offset);
1020 /* The _goto case is normally between TBs within the same code buffer, and
1021 * with the code buffer limited to 16MB we wouldn't need the long case.
1022 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1024 static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
1026 intptr_t addri = (intptr_t)addr;
1027 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1029 if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1030 tcg_out_b(s, cond, disp);
1033 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1036 /* The call case is mostly used for helpers - so it's not unreasonable
1037 * for them to be beyond branch range */
1038 static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
1040 intptr_t addri = (intptr_t)addr;
1041 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1043 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1045 /* Use BLX if the target is in Thumb mode */
1046 if (!use_armv5t_instructions) {
1049 tcg_out_blx_imm(s, disp);
1051 tcg_out_bl(s, COND_AL, disp);
1053 } else if (use_armv7_instructions) {
1054 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1055 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1057 /* ??? Know that movi_pool emits exactly 1 insn. */
1058 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 0);
1059 tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
1063 static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
1066 tcg_out_goto(s, cond, l->u.value_ptr);
1068 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1069 tcg_out_b(s, cond, 0);
1073 static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
1075 if (use_armv7_instructions) {
1076 tcg_out32(s, INSN_DMB_ISH);
1077 } else if (use_armv6_instructions) {
1078 tcg_out32(s, INSN_DMB_MCR);
1082 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1083 const int *const_args)
1085 TCGReg al = args[0];
1086 TCGReg ah = args[1];
1087 TCGArg bl = args[2];
1088 TCGArg bh = args[3];
1089 TCGCond cond = args[4];
1090 int const_bl = const_args[2];
1091 int const_bh = const_args[3];
1100 /* We perform a conditional comparision. If the high half is
1101 equal, then overwrite the flags with the comparison of the
1102 low half. The resulting flags cover the whole. */
1103 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1104 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1109 /* We perform a double-word subtraction and examine the result.
1110 We do not actually need the result of the subtract, so the
1111 low part "subtract" is a compare. For the high half we have
1112 no choice but to compute into a temporary. */
1113 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1114 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1115 TCG_REG_TMP, ah, bh, const_bh);
1120 /* Similar, but with swapped arguments, via reversed subtract. */
1121 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1122 TCG_REG_TMP, al, bl, const_bl);
1123 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1124 TCG_REG_TMP, ah, bh, const_bh);
1125 return tcg_swap_cond(cond);
1128 g_assert_not_reached();
1132 #ifdef CONFIG_SOFTMMU
1133 #include "tcg-ldst.inc.c"
1135 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1136 * int mmu_idx, uintptr_t ra)
1138 static void * const qemu_ld_helpers[16] = {
1139 [MO_UB] = helper_ret_ldub_mmu,
1140 [MO_SB] = helper_ret_ldsb_mmu,
1142 [MO_LEUW] = helper_le_lduw_mmu,
1143 [MO_LEUL] = helper_le_ldul_mmu,
1144 [MO_LEQ] = helper_le_ldq_mmu,
1145 [MO_LESW] = helper_le_ldsw_mmu,
1146 [MO_LESL] = helper_le_ldul_mmu,
1148 [MO_BEUW] = helper_be_lduw_mmu,
1149 [MO_BEUL] = helper_be_ldul_mmu,
1150 [MO_BEQ] = helper_be_ldq_mmu,
1151 [MO_BESW] = helper_be_ldsw_mmu,
1152 [MO_BESL] = helper_be_ldul_mmu,
1155 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1156 * uintxx_t val, int mmu_idx, uintptr_t ra)
1158 static void * const qemu_st_helpers[16] = {
1159 [MO_UB] = helper_ret_stb_mmu,
1160 [MO_LEUW] = helper_le_stw_mmu,
1161 [MO_LEUL] = helper_le_stl_mmu,
1162 [MO_LEQ] = helper_le_stq_mmu,
1163 [MO_BEUW] = helper_be_stw_mmu,
1164 [MO_BEUL] = helper_be_stl_mmu,
1165 [MO_BEQ] = helper_be_stq_mmu,
1168 /* Helper routines for marshalling helper function arguments into
1169 * the correct registers and stack.
1170 * argreg is where we want to put this argument, arg is the argument itself.
1171 * Return value is the updated argreg ready for the next call.
1172 * Note that argreg 0..3 is real registers, 4+ on stack.
1174 * We provide routines for arguments which are: immediate, 32 bit
1175 * value in register, 16 and 8 bit values in register (which must be zero
1176 * extended before use) and 64 bit value in a lo:hi register pair.
1178 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1179 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1182 MOV_ARG(s, COND_AL, argreg, arg); \
1184 int ofs = (argreg - 4) * 4; \
1186 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1187 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1189 return argreg + 1; \
1192 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1193 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1194 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1195 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1196 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1197 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1198 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1200 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1201 TCGReg arglo, TCGReg arghi)
1203 /* 64 bit arguments must go in even/odd register pairs
1204 * and in 8-aligned stack slots.
1209 if (use_armv6_instructions && argreg >= 4
1210 && (arglo & 1) == 0 && arghi == arglo + 1) {
1211 tcg_out_strd_8(s, COND_AL, arglo,
1212 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1215 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1216 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1221 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1223 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1224 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1225 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1227 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1228 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1230 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1231 TCGMemOp opc, int mem_index, bool is_load)
1233 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1234 : offsetof(CPUTLBEntry, addr_write));
1235 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1236 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1237 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1238 unsigned s_bits = opc & MO_SIZE;
1239 unsigned a_bits = get_alignment_bits(opc);
1241 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1242 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP, TCG_AREG0, mask_off);
1243 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R2, TCG_AREG0, table_off);
1245 /* Extract the tlb index from the address into TMP. */
1246 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, addrlo,
1247 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1250 * Add the tlb_table pointer, creating the CPUTLBEntry address in R2.
1251 * Load the tlb comparator into R0/R1 and the fast path addend into R2.
1254 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1255 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R2, TCG_REG_TMP);
1257 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R2, TCG_REG_TMP);
1260 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1261 TCG_REG_R2, TCG_REG_R2, TCG_REG_TMP, 0);
1262 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1263 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
1265 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
1268 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
1269 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2, cmp_off + 4);
1272 /* Load the tlb addend. */
1273 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2,
1274 offsetof(CPUTLBEntry, addend));
1276 /* Check alignment. We don't support inline unaligned acceses,
1277 but we can easily support overalignment checks. */
1278 if (a_bits < s_bits) {
1282 if (use_armv7_instructions) {
1283 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1284 int rot = encode_imm(mask);
1287 tcg_out_dat_imm(s, COND_AL, ARITH_BIC, TCG_REG_TMP, addrlo,
1288 rotl(mask, rot) | (rot << 7));
1290 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
1291 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1292 addrlo, TCG_REG_TMP, 0);
1294 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R0, TCG_REG_TMP, 0);
1297 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
1300 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
1301 0, TCG_REG_R0, TCG_REG_TMP,
1302 SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1305 if (TARGET_LONG_BITS == 64) {
1306 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addrhi, 0);
1312 /* Record the context of a call to the out of line helper code for the slow
1313 path for a load or store, so that we can later generate the correct
1315 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1316 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1317 TCGReg addrhi, tcg_insn_unit *raddr,
1318 tcg_insn_unit *label_ptr)
1320 TCGLabelQemuLdst *label = new_ldst_label(s);
1322 label->is_ld = is_ld;
1324 label->datalo_reg = datalo;
1325 label->datahi_reg = datahi;
1326 label->addrlo_reg = addrlo;
1327 label->addrhi_reg = addrhi;
1328 label->raddr = raddr;
1329 label->label_ptr[0] = label_ptr;
1332 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1334 TCGReg argreg, datalo, datahi;
1335 TCGMemOpIdx oi = lb->oi;
1336 TCGMemOp opc = get_memop(oi);
1339 if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1343 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1344 if (TARGET_LONG_BITS == 64) {
1345 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1347 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1349 argreg = tcg_out_arg_imm32(s, argreg, oi);
1350 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1352 /* For armv6 we can use the canonical unsigned helpers and minimize
1353 icache usage. For pre-armv6, use the signed helpers since we do
1354 not have a single insn sign-extend. */
1355 if (use_armv6_instructions) {
1356 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
1358 func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
1359 if (opc & MO_SIGN) {
1363 tcg_out_call(s, func);
1365 datalo = lb->datalo_reg;
1366 datahi = lb->datahi_reg;
1367 switch (opc & MO_SSIZE) {
1369 tcg_out_ext8s(s, COND_AL, datalo, TCG_REG_R0);
1372 tcg_out_ext16s(s, COND_AL, datalo, TCG_REG_R0);
1375 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1378 if (datalo != TCG_REG_R1) {
1379 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1380 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1381 } else if (datahi != TCG_REG_R0) {
1382 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1383 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1385 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1386 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1387 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1392 tcg_out_goto(s, COND_AL, lb->raddr);
1396 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1398 TCGReg argreg, datalo, datahi;
1399 TCGMemOpIdx oi = lb->oi;
1400 TCGMemOp opc = get_memop(oi);
1402 if (!reloc_pc24(lb->label_ptr[0], s->code_ptr)) {
1406 argreg = TCG_REG_R0;
1407 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1408 if (TARGET_LONG_BITS == 64) {
1409 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1411 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1414 datalo = lb->datalo_reg;
1415 datahi = lb->datahi_reg;
1416 switch (opc & MO_SIZE) {
1418 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1421 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1425 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1428 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1432 argreg = tcg_out_arg_imm32(s, argreg, oi);
1433 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1435 /* Tail-call to the helper, which will return to the fast path. */
1436 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1439 #endif /* SOFTMMU */
1441 static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc,
1442 TCGReg datalo, TCGReg datahi,
1443 TCGReg addrlo, TCGReg addend)
1445 TCGMemOp bswap = opc & MO_BSWAP;
1447 switch (opc & MO_SSIZE) {
1449 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1452 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1455 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1457 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1462 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1463 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1465 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1470 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1472 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1477 TCGReg dl = (bswap ? datahi : datalo);
1478 TCGReg dh = (bswap ? datalo : datahi);
1480 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1481 if (USING_SOFTMMU && use_armv6_instructions
1482 && (dl & 1) == 0 && dh == dl + 1) {
1483 tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
1484 } else if (dl != addend) {
1485 tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
1486 tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
1488 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1489 addend, addrlo, SHIFT_IMM_LSL(0));
1490 tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
1491 tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
1494 tcg_out_bswap32(s, COND_AL, dl, dl);
1495 tcg_out_bswap32(s, COND_AL, dh, dh);
1502 static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
1503 TCGReg datalo, TCGReg datahi,
1506 TCGMemOp bswap = opc & MO_BSWAP;
1508 switch (opc & MO_SSIZE) {
1510 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1513 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1516 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1518 tcg_out_bswap16(s, COND_AL, datalo, datalo);
1523 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1524 tcg_out_bswap16s(s, COND_AL, datalo, datalo);
1526 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1531 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1533 tcg_out_bswap32(s, COND_AL, datalo, datalo);
1538 TCGReg dl = (bswap ? datahi : datalo);
1539 TCGReg dh = (bswap ? datalo : datahi);
1541 /* Avoid ldrd for user-only emulation, to handle unaligned. */
1542 if (USING_SOFTMMU && use_armv6_instructions
1543 && (dl & 1) == 0 && dh == dl + 1) {
1544 tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
1545 } else if (dl == addrlo) {
1546 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1547 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1549 tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
1550 tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
1553 tcg_out_bswap32(s, COND_AL, dl, dl);
1554 tcg_out_bswap32(s, COND_AL, dh, dh);
1561 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1563 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1566 #ifdef CONFIG_SOFTMMU
1569 tcg_insn_unit *label_ptr;
1573 datahi = (is64 ? *args++ : 0);
1575 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1577 opc = get_memop(oi);
1579 #ifdef CONFIG_SOFTMMU
1580 mem_index = get_mmuidx(oi);
1581 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1583 /* This a conditional BL only to load a pointer within this opcode into LR
1584 for the slow path. We will not be using the value for a tail call. */
1585 label_ptr = s->code_ptr;
1586 tcg_out_bl(s, COND_NE, 0);
1588 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
1590 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1591 s->code_ptr, label_ptr);
1592 #else /* !CONFIG_SOFTMMU */
1594 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1595 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
1597 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1602 static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc,
1603 TCGReg datalo, TCGReg datahi,
1604 TCGReg addrlo, TCGReg addend)
1606 TCGMemOp bswap = opc & MO_BSWAP;
1608 switch (opc & MO_SIZE) {
1610 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1614 tcg_out_bswap16st(s, cond, TCG_REG_R0, datalo);
1615 tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
1617 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1623 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1624 tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
1626 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1630 /* Avoid strd for user-only emulation, to handle unaligned. */
1632 tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
1633 tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
1634 tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
1635 tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
1636 } else if (USING_SOFTMMU && use_armv6_instructions
1637 && (datalo & 1) == 0 && datahi == datalo + 1) {
1638 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1640 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1641 tcg_out_st32_12(s, cond, datahi, addend, 4);
1647 static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
1648 TCGReg datalo, TCGReg datahi,
1651 TCGMemOp bswap = opc & MO_BSWAP;
1653 switch (opc & MO_SIZE) {
1655 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1659 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, datalo);
1660 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
1662 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1668 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1669 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1671 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1675 /* Avoid strd for user-only emulation, to handle unaligned. */
1677 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
1678 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
1679 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
1680 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
1681 } else if (USING_SOFTMMU && use_armv6_instructions
1682 && (datalo & 1) == 0 && datahi == datalo + 1) {
1683 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1685 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1686 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1692 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1694 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1697 #ifdef CONFIG_SOFTMMU
1700 tcg_insn_unit *label_ptr;
1704 datahi = (is64 ? *args++ : 0);
1706 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1708 opc = get_memop(oi);
1710 #ifdef CONFIG_SOFTMMU
1711 mem_index = get_mmuidx(oi);
1712 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1714 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
1716 /* The conditional call must come last, as we're going to return here. */
1717 label_ptr = s->code_ptr;
1718 tcg_out_bl(s, COND_NE, 0);
1720 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1721 s->code_ptr, label_ptr);
1722 #else /* !CONFIG_SOFTMMU */
1724 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
1725 tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
1726 datahi, addrlo, TCG_REG_TMP);
1728 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1733 static tcg_insn_unit *tb_ret_addr;
1735 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1736 const TCGArg *args, const int *const_args)
1738 TCGArg a0, a1, a2, a3, a4, a5;
1742 case INDEX_op_exit_tb:
1743 /* Reuse the zeroing that exists for goto_ptr. */
1746 tcg_out_goto(s, COND_AL, s->code_gen_epilogue);
1748 tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1749 tcg_out_goto(s, COND_AL, tb_ret_addr);
1752 case INDEX_op_goto_tb:
1754 /* Indirect jump method */
1755 intptr_t ptr, dif, dil;
1756 TCGReg base = TCG_REG_PC;
1758 tcg_debug_assert(s->tb_jmp_insn_offset == 0);
1759 ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
1760 dif = ptr - ((intptr_t)s->code_ptr + 8);
1761 dil = sextract32(dif, 0, 12);
1763 /* The TB is close, but outside the 12 bits addressable by
1764 the load. We can extend this to 20 bits with a sub of a
1765 shifted immediate from pc. In the vastly unlikely event
1766 the code requires more than 1MB, we'll use 2 insns and
1769 tcg_out_movi32(s, COND_AL, base, ptr - dil);
1771 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
1772 set_jmp_reset_offset(s, args[0]);
1775 case INDEX_op_goto_ptr:
1776 tcg_out_bx(s, COND_AL, args[0]);
1779 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
1782 case INDEX_op_ld8u_i32:
1783 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1785 case INDEX_op_ld8s_i32:
1786 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1788 case INDEX_op_ld16u_i32:
1789 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1791 case INDEX_op_ld16s_i32:
1792 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1794 case INDEX_op_ld_i32:
1795 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1797 case INDEX_op_st8_i32:
1798 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1800 case INDEX_op_st16_i32:
1801 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1803 case INDEX_op_st_i32:
1804 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1807 case INDEX_op_movcond_i32:
1808 /* Constraints mean that v2 is always in the same register as dest,
1809 * so we only need to do "if condition passed, move v1 to dest".
1811 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1812 args[1], args[2], const_args[2]);
1813 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1814 ARITH_MVN, args[0], 0, args[3], const_args[3]);
1816 case INDEX_op_add_i32:
1817 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1818 args[0], args[1], args[2], const_args[2]);
1820 case INDEX_op_sub_i32:
1821 if (const_args[1]) {
1822 if (const_args[2]) {
1823 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1825 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1826 args[0], args[2], args[1], 1);
1829 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1830 args[0], args[1], args[2], const_args[2]);
1833 case INDEX_op_and_i32:
1834 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1835 args[0], args[1], args[2], const_args[2]);
1837 case INDEX_op_andc_i32:
1838 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1839 args[0], args[1], args[2], const_args[2]);
1841 case INDEX_op_or_i32:
1844 case INDEX_op_xor_i32:
1848 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1850 case INDEX_op_add2_i32:
1851 a0 = args[0], a1 = args[1], a2 = args[2];
1852 a3 = args[3], a4 = args[4], a5 = args[5];
1853 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1856 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1857 a0, a2, a4, const_args[4]);
1858 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1859 a1, a3, a5, const_args[5]);
1860 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1862 case INDEX_op_sub2_i32:
1863 a0 = args[0], a1 = args[1], a2 = args[2];
1864 a3 = args[3], a4 = args[4], a5 = args[5];
1865 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1868 if (const_args[2]) {
1869 if (const_args[4]) {
1870 tcg_out_movi32(s, COND_AL, a0, a4);
1873 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1875 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1876 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1878 if (const_args[3]) {
1879 if (const_args[5]) {
1880 tcg_out_movi32(s, COND_AL, a1, a5);
1883 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1885 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1886 a1, a3, a5, const_args[5]);
1888 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1890 case INDEX_op_neg_i32:
1891 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1893 case INDEX_op_not_i32:
1894 tcg_out_dat_reg(s, COND_AL,
1895 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1897 case INDEX_op_mul_i32:
1898 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1900 case INDEX_op_mulu2_i32:
1901 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1903 case INDEX_op_muls2_i32:
1904 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1906 /* XXX: Perhaps args[2] & 0x1f is wrong */
1907 case INDEX_op_shl_i32:
1909 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1911 case INDEX_op_shr_i32:
1912 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1913 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1915 case INDEX_op_sar_i32:
1916 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1917 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1919 case INDEX_op_rotr_i32:
1920 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1921 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1924 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1927 case INDEX_op_rotl_i32:
1928 if (const_args[2]) {
1929 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1930 ((0x20 - args[2]) & 0x1f) ?
1931 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1934 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
1935 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1936 SHIFT_REG_ROR(TCG_REG_TMP));
1940 case INDEX_op_ctz_i32:
1941 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
1945 case INDEX_op_clz_i32:
1951 if (c && a2 == 32) {
1952 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
1955 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
1956 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
1957 if (c || a0 != a2) {
1958 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
1962 case INDEX_op_brcond_i32:
1963 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1964 args[0], args[1], const_args[1]);
1965 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
1966 arg_label(args[3]));
1968 case INDEX_op_setcond_i32:
1969 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1970 args[1], args[2], const_args[2]);
1971 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1972 ARITH_MOV, args[0], 0, 1);
1973 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1974 ARITH_MOV, args[0], 0, 0);
1977 case INDEX_op_brcond2_i32:
1978 c = tcg_out_cmp2(s, args, const_args);
1979 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
1981 case INDEX_op_setcond2_i32:
1982 c = tcg_out_cmp2(s, args + 1, const_args + 1);
1983 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
1984 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
1985 ARITH_MOV, args[0], 0, 0);
1988 case INDEX_op_qemu_ld_i32:
1989 tcg_out_qemu_ld(s, args, 0);
1991 case INDEX_op_qemu_ld_i64:
1992 tcg_out_qemu_ld(s, args, 1);
1994 case INDEX_op_qemu_st_i32:
1995 tcg_out_qemu_st(s, args, 0);
1997 case INDEX_op_qemu_st_i64:
1998 tcg_out_qemu_st(s, args, 1);
2001 case INDEX_op_bswap16_i32:
2002 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
2004 case INDEX_op_bswap32_i32:
2005 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2008 case INDEX_op_ext8s_i32:
2009 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
2011 case INDEX_op_ext16s_i32:
2012 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
2014 case INDEX_op_ext16u_i32:
2015 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
2018 case INDEX_op_deposit_i32:
2019 tcg_out_deposit(s, COND_AL, args[0], args[2],
2020 args[3], args[4], const_args[2]);
2022 case INDEX_op_extract_i32:
2023 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2025 case INDEX_op_sextract_i32:
2026 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2028 case INDEX_op_extract2_i32:
2029 /* ??? These optimization vs zero should be generic. */
2030 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2031 if (const_args[1]) {
2032 if (const_args[2]) {
2033 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2035 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2036 args[2], SHIFT_IMM_LSL(32 - args[3]));
2038 } else if (const_args[2]) {
2039 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2040 args[1], SHIFT_IMM_LSR(args[3]));
2042 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2043 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2044 args[2], SHIFT_IMM_LSL(32 - args[3]));
2045 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2046 args[1], SHIFT_IMM_LSR(args[3]));
2050 case INDEX_op_div_i32:
2051 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2053 case INDEX_op_divu_i32:
2054 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2058 tcg_out_mb(s, args[0]);
2061 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2062 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2063 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2069 static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2071 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2072 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2073 static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } };
2074 static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
2075 static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
2076 static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } };
2077 static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } };
2078 static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } };
2079 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
2080 static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
2081 static const TCGTargetOpDef r_r_rIN
2082 = { .args_ct_str = { "r", "r", "rIN" } };
2083 static const TCGTargetOpDef r_r_rIK
2084 = { .args_ct_str = { "r", "r", "rIK" } };
2085 static const TCGTargetOpDef r_r_r_r
2086 = { .args_ct_str = { "r", "r", "r", "r" } };
2087 static const TCGTargetOpDef r_r_l_l
2088 = { .args_ct_str = { "r", "r", "l", "l" } };
2089 static const TCGTargetOpDef s_s_s_s
2090 = { .args_ct_str = { "s", "s", "s", "s" } };
2091 static const TCGTargetOpDef br
2092 = { .args_ct_str = { "r", "rIN" } };
2093 static const TCGTargetOpDef ext2
2094 = { .args_ct_str = { "r", "rZ", "rZ" } };
2095 static const TCGTargetOpDef dep
2096 = { .args_ct_str = { "r", "0", "rZ" } };
2097 static const TCGTargetOpDef movc
2098 = { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } };
2099 static const TCGTargetOpDef add2
2100 = { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } };
2101 static const TCGTargetOpDef sub2
2102 = { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } };
2103 static const TCGTargetOpDef br2
2104 = { .args_ct_str = { "r", "r", "rI", "rI" } };
2105 static const TCGTargetOpDef setc2
2106 = { .args_ct_str = { "r", "r", "r", "rI", "rI" } };
2109 case INDEX_op_goto_ptr:
2112 case INDEX_op_ld8u_i32:
2113 case INDEX_op_ld8s_i32:
2114 case INDEX_op_ld16u_i32:
2115 case INDEX_op_ld16s_i32:
2116 case INDEX_op_ld_i32:
2117 case INDEX_op_st8_i32:
2118 case INDEX_op_st16_i32:
2119 case INDEX_op_st_i32:
2120 case INDEX_op_neg_i32:
2121 case INDEX_op_not_i32:
2122 case INDEX_op_bswap16_i32:
2123 case INDEX_op_bswap32_i32:
2124 case INDEX_op_ext8s_i32:
2125 case INDEX_op_ext16s_i32:
2126 case INDEX_op_ext16u_i32:
2127 case INDEX_op_extract_i32:
2128 case INDEX_op_sextract_i32:
2131 case INDEX_op_add_i32:
2132 case INDEX_op_sub_i32:
2133 case INDEX_op_setcond_i32:
2135 case INDEX_op_and_i32:
2136 case INDEX_op_andc_i32:
2137 case INDEX_op_clz_i32:
2138 case INDEX_op_ctz_i32:
2140 case INDEX_op_mul_i32:
2141 case INDEX_op_div_i32:
2142 case INDEX_op_divu_i32:
2144 case INDEX_op_mulu2_i32:
2145 case INDEX_op_muls2_i32:
2147 case INDEX_op_or_i32:
2148 case INDEX_op_xor_i32:
2150 case INDEX_op_shl_i32:
2151 case INDEX_op_shr_i32:
2152 case INDEX_op_sar_i32:
2153 case INDEX_op_rotl_i32:
2154 case INDEX_op_rotr_i32:
2157 case INDEX_op_brcond_i32:
2159 case INDEX_op_deposit_i32:
2161 case INDEX_op_extract2_i32:
2163 case INDEX_op_movcond_i32:
2165 case INDEX_op_add2_i32:
2167 case INDEX_op_sub2_i32:
2169 case INDEX_op_brcond2_i32:
2171 case INDEX_op_setcond2_i32:
2174 case INDEX_op_qemu_ld_i32:
2175 return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
2176 case INDEX_op_qemu_ld_i64:
2177 return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l;
2178 case INDEX_op_qemu_st_i32:
2179 return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
2180 case INDEX_op_qemu_st_i64:
2181 return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
2188 static void tcg_target_init(TCGContext *s)
2190 /* Only probe for the platform and capabilities if we havn't already
2191 determined maximum values at compile time. */
2192 #ifndef use_idiv_instructions
2194 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2195 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2198 if (__ARM_ARCH < 7) {
2199 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2200 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2201 arm_arch = pl[1] - '0';
2205 tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
2207 tcg_target_call_clobber_regs = 0;
2208 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2209 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2210 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2211 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2212 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2213 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2215 s->reserved_regs = 0;
2216 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2217 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2218 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2221 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2222 TCGReg arg1, intptr_t arg2)
2224 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2227 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2228 TCGReg arg1, intptr_t arg2)
2230 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2233 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2234 TCGReg base, intptr_t ofs)
2239 static inline bool tcg_out_mov(TCGContext *s, TCGType type,
2240 TCGReg ret, TCGReg arg)
2242 tcg_out_mov_reg(s, COND_AL, ret, arg);
2246 static inline void tcg_out_movi(TCGContext *s, TCGType type,
2247 TCGReg ret, tcg_target_long arg)
2249 tcg_out_movi32(s, COND_AL, ret, arg);
2252 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2255 for (i = 0; i < count; ++i) {
2260 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2261 and tcg_register_jit. */
2263 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2265 #define FRAME_SIZE \
2267 + TCG_STATIC_CALL_ARGS_SIZE \
2268 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2269 + TCG_TARGET_STACK_ALIGN - 1) \
2270 & -TCG_TARGET_STACK_ALIGN)
2272 static void tcg_target_qemu_prologue(TCGContext *s)
2276 /* Calling convention requires us to save r4-r11 and lr. */
2277 /* stmdb sp!, { r4 - r11, lr } */
2278 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
2280 /* Reserve callee argument and tcg temp space. */
2281 stack_addend = FRAME_SIZE - PUSH_SIZE;
2283 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2284 TCG_REG_CALL_STACK, stack_addend, 1);
2285 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2286 CPU_TEMP_BUF_NLONGS * sizeof(long));
2288 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2290 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
2293 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2294 * and fall through to the rest of the epilogue.
2296 s->code_gen_epilogue = s->code_ptr;
2297 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
2300 tb_ret_addr = s->code_ptr;
2301 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2302 TCG_REG_CALL_STACK, stack_addend, 1);
2304 /* ldmia sp!, { r4 - r11, pc } */
2305 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
2310 uint8_t fde_def_cfa[4];
2311 uint8_t fde_reg_ofs[18];
2314 #define ELF_HOST_MACHINE EM_ARM
2316 /* We're expecting a 2 byte uleb128 encoded value. */
2317 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2319 static const DebugFrame debug_frame = {
2320 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2323 .h.cie.code_align = 1,
2324 .h.cie.data_align = 0x7c, /* sleb128 -4 */
2325 .h.cie.return_column = 14,
2327 /* Total FDE size does not include the "len" member. */
2328 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2331 12, 13, /* DW_CFA_def_cfa sp, ... */
2332 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2336 /* The following must match the stmdb in the prologue. */
2337 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2338 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2339 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2340 0x89, 4, /* DW_CFA_offset, r9, -16 */
2341 0x88, 5, /* DW_CFA_offset, r8, -20 */
2342 0x87, 6, /* DW_CFA_offset, r7, -24 */
2343 0x86, 7, /* DW_CFA_offset, r6, -28 */
2344 0x85, 8, /* DW_CFA_offset, r5, -32 */
2345 0x84, 9, /* DW_CFA_offset, r4, -36 */
2349 void tcg_register_jit(void *buf, size_t buf_size)
2351 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));