2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
45 static const int tcg_target_reg_alloc_order[] = {
63 static const int tcg_target_call_iarg_regs[4] = {
64 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
66 static const int tcg_target_call_oarg_regs[2] = {
67 TCG_REG_R0, TCG_REG_R1
70 static void patch_reloc(uint8_t *code_ptr, int type,
71 tcg_target_long value, tcg_target_long addend)
75 *(uint32_t *) code_ptr = value;
84 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
85 (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
90 /* maximum number of register used for input function arguments */
91 static inline int tcg_target_get_call_iarg_regs_count(int flags)
96 /* parse target specific constraints */
97 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
104 ct->ct |= TCG_CT_CONST_ARM;
108 #ifndef CONFIG_SOFTMMU
114 ct->ct |= TCG_CT_REG;
115 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
118 #ifdef CONFIG_SOFTMMU
119 /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
121 ct->ct |= TCG_CT_REG;
122 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
123 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
124 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
127 /* qemu_ld64 data_reg */
129 ct->ct |= TCG_CT_REG;
130 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
131 /* r1 is still needed to load data_reg2, so don't use it. */
132 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
135 /* qemu_ld/st64 data_reg2 */
137 ct->ct |= TCG_CT_REG;
138 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
139 /* r0, r1 and optionally r2 will be overwritten by the address
140 * and the low word of data, so don't use these. */
141 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
142 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
143 # if TARGET_LONG_BITS == 64
144 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
148 # if TARGET_LONG_BITS == 64
149 /* qemu_ld/st addr_reg2 */
151 ct->ct |= TCG_CT_REG;
152 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
153 /* r0 will be overwritten by the low word of base, so don't use it. */
154 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
155 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
161 ct->ct |= TCG_CT_REG;
162 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
163 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
167 ct->ct |= TCG_CT_REG;
168 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
169 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
182 static inline uint32_t rotl(uint32_t val, int n)
184 return (val << n) | (val >> (32 - n));
187 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
188 right-rotated by an even amount between 0 and 30. */
189 static inline int encode_imm(uint32_t imm)
193 /* simple case, only lower bits */
194 if ((imm & ~0xff) == 0)
196 /* then try a simple even shift */
197 shift = ctz32(imm) & ~1;
198 if (((imm >> shift) & ~0xff) == 0)
200 /* now try harder with rotations */
201 if ((rotl(imm, 2) & ~0xff) == 0)
203 if ((rotl(imm, 4) & ~0xff) == 0)
205 if ((rotl(imm, 6) & ~0xff) == 0)
207 /* imm can't be encoded */
211 static inline int check_fit_imm(uint32_t imm)
213 return encode_imm(imm) >= 0;
216 /* Test if a constant matches the constraint.
217 * TODO: define constraints for:
219 * ldr/str offset: between -0xfff and 0xfff
220 * ldrh/strh offset: between -0xff and 0xff
221 * mov operand2: values represented with x << (2 * y), x < 0x100
222 * add, sub, eor...: ditto
224 static inline int tcg_target_const_match(tcg_target_long val,
225 const TCGArgConstraint *arg_ct)
229 if (ct & TCG_CT_CONST)
231 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
237 enum arm_data_opc_e {
255 #define TO_CPSR(opc) \
256 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
258 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
259 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
260 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
261 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
262 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
263 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
264 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
265 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
267 enum arm_cond_code_e {
270 COND_CS = 0x2, /* Unsigned greater or equal */
271 COND_CC = 0x3, /* Unsigned less than */
272 COND_MI = 0x4, /* Negative */
273 COND_PL = 0x5, /* Zero or greater */
274 COND_VS = 0x6, /* Overflow */
275 COND_VC = 0x7, /* No overflow */
276 COND_HI = 0x8, /* Unsigned greater than */
277 COND_LS = 0x9, /* Unsigned less or equal */
285 static const uint8_t tcg_cond_to_arm_cond[10] = {
286 [TCG_COND_EQ] = COND_EQ,
287 [TCG_COND_NE] = COND_NE,
288 [TCG_COND_LT] = COND_LT,
289 [TCG_COND_GE] = COND_GE,
290 [TCG_COND_LE] = COND_LE,
291 [TCG_COND_GT] = COND_GT,
293 [TCG_COND_LTU] = COND_CC,
294 [TCG_COND_GEU] = COND_CS,
295 [TCG_COND_LEU] = COND_LS,
296 [TCG_COND_GTU] = COND_HI,
299 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
301 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
304 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
306 tcg_out32(s, (cond << 28) | 0x0a000000 |
307 (((offset - 8) >> 2) & 0x00ffffff));
310 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
312 #ifdef HOST_WORDS_BIGENDIAN
313 tcg_out8(s, (cond << 4) | 0x0a);
317 tcg_out8(s, (cond << 4) | 0x0a);
321 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
323 tcg_out32(s, (cond << 28) | 0x0b000000 |
324 (((offset - 8) >> 2) & 0x00ffffff));
327 static inline void tcg_out_dat_reg(TCGContext *s,
328 int cond, int opc, int rd, int rn, int rm, int shift)
330 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
331 (rn << 16) | (rd << 12) | shift | rm);
334 static inline void tcg_out_dat_reg2(TCGContext *s,
335 int cond, int opc0, int opc1, int rd0, int rd1,
336 int rn0, int rn1, int rm0, int rm1, int shift)
338 if (rd0 == rn1 || rd0 == rm1) {
339 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
340 (rn0 << 16) | (8 << 12) | shift | rm0);
341 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
342 (rn1 << 16) | (rd1 << 12) | shift | rm1);
343 tcg_out_dat_reg(s, cond, ARITH_MOV,
344 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
346 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
347 (rn0 << 16) | (rd0 << 12) | shift | rm0);
348 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
349 (rn1 << 16) | (rd1 << 12) | shift | rm1);
353 static inline void tcg_out_dat_imm(TCGContext *s,
354 int cond, int opc, int rd, int rn, int im)
356 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
357 (rn << 16) | (rd << 12) | im);
360 static inline void tcg_out_movi32(TCGContext *s,
361 int cond, int rd, int32_t arg)
363 int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
365 /* TODO: This is very suboptimal, we can easily have a constant
366 * pool somewhere after all the instructions. */
368 if (arg < 0 && arg > -0x100)
369 return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
371 if (offset < 0x100 && offset > -0x100)
373 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
374 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
376 #ifdef __ARM_ARCH_7A__
379 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
380 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
381 if (arg & 0xffff0000)
383 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
384 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
386 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
387 if (arg & 0x0000ff00)
388 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
389 ((arg >> 8) & 0xff) | 0xc00);
390 if (arg & 0x00ff0000)
391 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
392 ((arg >> 16) & 0xff) | 0x800);
393 if (arg & 0xff000000)
394 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
395 ((arg >> 24) & 0xff) | 0x400);
399 static inline void tcg_out_mul32(TCGContext *s,
400 int cond, int rd, int rs, int rm)
403 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
404 (rs << 8) | 0x90 | rm);
406 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
407 (rm << 8) | 0x90 | rs);
409 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
410 (rs << 8) | 0x90 | rm);
411 tcg_out_dat_reg(s, cond, ARITH_MOV,
412 rd, 0, 8, SHIFT_IMM_LSL(0));
416 static inline void tcg_out_umull32(TCGContext *s,
417 int cond, int rd0, int rd1, int rs, int rm)
419 if (rd0 != rm && rd1 != rm)
420 tcg_out32(s, (cond << 28) | 0x800090 |
421 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
422 else if (rd0 != rs && rd1 != rs)
423 tcg_out32(s, (cond << 28) | 0x800090 |
424 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
426 tcg_out_dat_reg(s, cond, ARITH_MOV,
427 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
428 tcg_out32(s, (cond << 28) | 0x800098 |
429 (rd1 << 16) | (rd0 << 12) | (rs << 8));
433 static inline void tcg_out_smull32(TCGContext *s,
434 int cond, int rd0, int rd1, int rs, int rm)
436 if (rd0 != rm && rd1 != rm)
437 tcg_out32(s, (cond << 28) | 0xc00090 |
438 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
439 else if (rd0 != rs && rd1 != rs)
440 tcg_out32(s, (cond << 28) | 0xc00090 |
441 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
443 tcg_out_dat_reg(s, cond, ARITH_MOV,
444 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
445 tcg_out32(s, (cond << 28) | 0xc00098 |
446 (rd1 << 16) | (rd0 << 12) | (rs << 8));
450 static inline void tcg_out_ld32_12(TCGContext *s, int cond,
451 int rd, int rn, tcg_target_long im)
454 tcg_out32(s, (cond << 28) | 0x05900000 |
455 (rn << 16) | (rd << 12) | (im & 0xfff));
457 tcg_out32(s, (cond << 28) | 0x05100000 |
458 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
461 static inline void tcg_out_st32_12(TCGContext *s, int cond,
462 int rd, int rn, tcg_target_long im)
465 tcg_out32(s, (cond << 28) | 0x05800000 |
466 (rn << 16) | (rd << 12) | (im & 0xfff));
468 tcg_out32(s, (cond << 28) | 0x05000000 |
469 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
472 static inline void tcg_out_ld32_r(TCGContext *s, int cond,
473 int rd, int rn, int rm)
475 tcg_out32(s, (cond << 28) | 0x07900000 |
476 (rn << 16) | (rd << 12) | rm);
479 static inline void tcg_out_st32_r(TCGContext *s, int cond,
480 int rd, int rn, int rm)
482 tcg_out32(s, (cond << 28) | 0x07800000 |
483 (rn << 16) | (rd << 12) | rm);
486 /* Register pre-increment with base writeback. */
487 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
488 int rd, int rn, int rm)
490 tcg_out32(s, (cond << 28) | 0x07b00000 |
491 (rn << 16) | (rd << 12) | rm);
494 static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
495 int rd, int rn, int rm)
497 tcg_out32(s, (cond << 28) | 0x07a00000 |
498 (rn << 16) | (rd << 12) | rm);
501 static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
502 int rd, int rn, tcg_target_long im)
505 tcg_out32(s, (cond << 28) | 0x01d000b0 |
506 (rn << 16) | (rd << 12) |
507 ((im & 0xf0) << 4) | (im & 0xf));
509 tcg_out32(s, (cond << 28) | 0x015000b0 |
510 (rn << 16) | (rd << 12) |
511 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
514 static inline void tcg_out_st16u_8(TCGContext *s, int cond,
515 int rd, int rn, tcg_target_long im)
518 tcg_out32(s, (cond << 28) | 0x01c000b0 |
519 (rn << 16) | (rd << 12) |
520 ((im & 0xf0) << 4) | (im & 0xf));
522 tcg_out32(s, (cond << 28) | 0x014000b0 |
523 (rn << 16) | (rd << 12) |
524 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
527 static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
528 int rd, int rn, int rm)
530 tcg_out32(s, (cond << 28) | 0x019000b0 |
531 (rn << 16) | (rd << 12) | rm);
534 static inline void tcg_out_st16u_r(TCGContext *s, int cond,
535 int rd, int rn, int rm)
537 tcg_out32(s, (cond << 28) | 0x018000b0 |
538 (rn << 16) | (rd << 12) | rm);
541 static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
542 int rd, int rn, tcg_target_long im)
545 tcg_out32(s, (cond << 28) | 0x01d000f0 |
546 (rn << 16) | (rd << 12) |
547 ((im & 0xf0) << 4) | (im & 0xf));
549 tcg_out32(s, (cond << 28) | 0x015000f0 |
550 (rn << 16) | (rd << 12) |
551 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
554 static inline void tcg_out_st16s_8(TCGContext *s, int cond,
555 int rd, int rn, tcg_target_long im)
558 tcg_out32(s, (cond << 28) | 0x01c000f0 |
559 (rn << 16) | (rd << 12) |
560 ((im & 0xf0) << 4) | (im & 0xf));
562 tcg_out32(s, (cond << 28) | 0x014000f0 |
563 (rn << 16) | (rd << 12) |
564 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
567 static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
568 int rd, int rn, int rm)
570 tcg_out32(s, (cond << 28) | 0x019000f0 |
571 (rn << 16) | (rd << 12) | rm);
574 static inline void tcg_out_st16s_r(TCGContext *s, int cond,
575 int rd, int rn, int rm)
577 tcg_out32(s, (cond << 28) | 0x018000f0 |
578 (rn << 16) | (rd << 12) | rm);
581 static inline void tcg_out_ld8_12(TCGContext *s, int cond,
582 int rd, int rn, tcg_target_long im)
585 tcg_out32(s, (cond << 28) | 0x05d00000 |
586 (rn << 16) | (rd << 12) | (im & 0xfff));
588 tcg_out32(s, (cond << 28) | 0x05500000 |
589 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
592 static inline void tcg_out_st8_12(TCGContext *s, int cond,
593 int rd, int rn, tcg_target_long im)
596 tcg_out32(s, (cond << 28) | 0x05c00000 |
597 (rn << 16) | (rd << 12) | (im & 0xfff));
599 tcg_out32(s, (cond << 28) | 0x05400000 |
600 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
603 static inline void tcg_out_ld8_r(TCGContext *s, int cond,
604 int rd, int rn, int rm)
606 tcg_out32(s, (cond << 28) | 0x07d00000 |
607 (rn << 16) | (rd << 12) | rm);
610 static inline void tcg_out_st8_r(TCGContext *s, int cond,
611 int rd, int rn, int rm)
613 tcg_out32(s, (cond << 28) | 0x07c00000 |
614 (rn << 16) | (rd << 12) | rm);
617 static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
618 int rd, int rn, tcg_target_long im)
621 tcg_out32(s, (cond << 28) | 0x01d000d0 |
622 (rn << 16) | (rd << 12) |
623 ((im & 0xf0) << 4) | (im & 0xf));
625 tcg_out32(s, (cond << 28) | 0x015000d0 |
626 (rn << 16) | (rd << 12) |
627 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
630 static inline void tcg_out_st8s_8(TCGContext *s, int cond,
631 int rd, int rn, tcg_target_long im)
634 tcg_out32(s, (cond << 28) | 0x01c000d0 |
635 (rn << 16) | (rd << 12) |
636 ((im & 0xf0) << 4) | (im & 0xf));
638 tcg_out32(s, (cond << 28) | 0x014000d0 |
639 (rn << 16) | (rd << 12) |
640 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
643 static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
644 int rd, int rn, int rm)
646 tcg_out32(s, (cond << 28) | 0x019000d0 |
647 (rn << 16) | (rd << 12) | rm);
650 static inline void tcg_out_st8s_r(TCGContext *s, int cond,
651 int rd, int rn, int rm)
653 tcg_out32(s, (cond << 28) | 0x018000d0 |
654 (rn << 16) | (rd << 12) | rm);
657 static inline void tcg_out_ld32u(TCGContext *s, int cond,
658 int rd, int rn, int32_t offset)
660 if (offset > 0xfff || offset < -0xfff) {
661 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
662 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
664 tcg_out_ld32_12(s, cond, rd, rn, offset);
667 static inline void tcg_out_st32(TCGContext *s, int cond,
668 int rd, int rn, int32_t offset)
670 if (offset > 0xfff || offset < -0xfff) {
671 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
672 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
674 tcg_out_st32_12(s, cond, rd, rn, offset);
677 static inline void tcg_out_ld16u(TCGContext *s, int cond,
678 int rd, int rn, int32_t offset)
680 if (offset > 0xff || offset < -0xff) {
681 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
682 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
684 tcg_out_ld16u_8(s, cond, rd, rn, offset);
687 static inline void tcg_out_ld16s(TCGContext *s, int cond,
688 int rd, int rn, int32_t offset)
690 if (offset > 0xff || offset < -0xff) {
691 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
692 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
694 tcg_out_ld16s_8(s, cond, rd, rn, offset);
697 static inline void tcg_out_st16u(TCGContext *s, int cond,
698 int rd, int rn, int32_t offset)
700 if (offset > 0xff || offset < -0xff) {
701 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
702 tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
704 tcg_out_st16u_8(s, cond, rd, rn, offset);
707 static inline void tcg_out_ld8u(TCGContext *s, int cond,
708 int rd, int rn, int32_t offset)
710 if (offset > 0xfff || offset < -0xfff) {
711 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
712 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
714 tcg_out_ld8_12(s, cond, rd, rn, offset);
717 static inline void tcg_out_ld8s(TCGContext *s, int cond,
718 int rd, int rn, int32_t offset)
720 if (offset > 0xff || offset < -0xff) {
721 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
722 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
724 tcg_out_ld8s_8(s, cond, rd, rn, offset);
727 static inline void tcg_out_st8u(TCGContext *s, int cond,
728 int rd, int rn, int32_t offset)
730 if (offset > 0xfff || offset < -0xfff) {
731 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
732 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
734 tcg_out_st8_12(s, cond, rd, rn, offset);
737 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
741 val = addr - (tcg_target_long) s->code_ptr;
742 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
743 tcg_out_b(s, cond, val);
748 if (cond == COND_AL) {
749 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
750 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
752 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
753 tcg_out_dat_reg(s, cond, ARITH_ADD,
754 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
760 static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
765 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
768 val = addr - (tcg_target_long) s->code_ptr;
769 if (val < 0x01fffffd && val > -0x01fffffd)
770 tcg_out_bl(s, cond, val);
775 if (cond == COND_AL) {
776 tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
777 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
778 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
780 tcg_out_movi32(s, cond, TCG_REG_R9, addr);
781 tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
782 tcg_out_bx(s, cond, TCG_REG_R9);
788 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
792 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
795 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
797 /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
798 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
799 tcg_out_bx(s, cond, arg);
801 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
805 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
807 TCGLabel *l = &s->labels[label_index];
810 tcg_out_goto(s, cond, l->u.value);
811 else if (cond == COND_AL) {
812 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
813 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
816 /* Probably this should be preferred even for COND_AL... */
817 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
818 tcg_out_b_noaddr(s, cond);
822 static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
823 void *helper_div, void *helper_rem, int shift)
825 int div_reg = args[0];
826 int rem_reg = args[1];
828 /* stmdb sp!, { r0 - r3, ip, lr } */
829 /* (Note that we need an even number of registers as per EABI) */
830 tcg_out32(s, (cond << 28) | 0x092d500f);
832 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
833 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
834 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
835 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
837 tcg_out_call(s, cond, (uint32_t) helper_div);
838 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
840 /* ldmia sp, { r0 - r3, fp, lr } */
841 tcg_out32(s, (cond << 28) | 0x089d500f);
843 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
844 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
845 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
846 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
848 tcg_out_call(s, cond, (uint32_t) helper_rem);
850 tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
851 tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
853 /* ldr r0, [sp], #4 */
854 if (rem_reg != 0 && div_reg != 0) {
855 tcg_out32(s, (cond << 28) | 0x04bd0004);
857 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
859 /* ldr r1, [sp], #4 */
860 if (rem_reg != 1 && div_reg != 1) {
861 tcg_out32(s, (cond << 28) | 0x04bd1004);
863 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
865 /* ldr r2, [sp], #4 */
866 if (rem_reg != 2 && div_reg != 2) {
867 tcg_out32(s, (cond << 28) | 0x04bd2004);
869 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
871 /* ldr r3, [sp], #4 */
872 if (rem_reg != 3 && div_reg != 3) {
873 tcg_out32(s, (cond << 28) | 0x04bd3004);
875 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
877 /* ldr ip, [sp], #4 */
878 if (rem_reg != 12 && div_reg != 12) {
879 tcg_out32(s, (cond << 28) | 0x04bdc004);
881 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
883 /* ldr lr, [sp], #4 */
884 if (rem_reg != 14 && div_reg != 14) {
885 tcg_out32(s, (cond << 28) | 0x04bde004);
887 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4);
891 #ifdef CONFIG_SOFTMMU
893 #include "../../softmmu_defs.h"
895 static void *qemu_ld_helpers[4] = {
902 static void *qemu_st_helpers[4] = {
910 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
912 static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
913 const TCGArg *args, int opc)
915 int addr_reg, data_reg, data_reg2;
916 #ifdef CONFIG_SOFTMMU
917 int mem_index, s_bits;
918 # if TARGET_LONG_BITS == 64
928 data_reg2 = 0; /* suppress warning */
930 #ifdef CONFIG_SOFTMMU
931 # if TARGET_LONG_BITS == 64
937 /* Should generate something like the following:
938 * shr r8, addr_reg, #TARGET_PAGE_BITS
939 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
940 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
942 # if CPU_TLB_BITS > 8
945 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
946 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
947 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
948 0, 8, CPU_TLB_SIZE - 1);
949 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
950 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
952 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
953 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
954 * not exceed otherwise, so use an
955 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
959 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
960 (mem_index << (TLB_SHIFT & 1)) |
961 ((16 - (TLB_SHIFT >> 1)) << 8));
962 tcg_out_ld32_12(s, COND_AL, 1, 0,
963 offsetof(CPUState, tlb_table[0][0].addr_read));
964 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
965 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
966 /* Check alignment. */
968 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
969 0, addr_reg, (1 << s_bits) - 1);
970 # if TARGET_LONG_BITS == 64
971 /* XXX: possibly we could use a block data load or writeback in
972 * the first access. */
973 tcg_out_ld32_12(s, COND_EQ, 1, 0,
974 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
975 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
976 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
978 tcg_out_ld32_12(s, COND_EQ, 1, 0,
979 offsetof(CPUState, tlb_table[0][0].addend));
983 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
986 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
989 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
992 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
996 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
999 tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
1000 tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
1004 label_ptr = (void *) s->code_ptr;
1005 tcg_out_b(s, COND_EQ, 8);
1008 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
1011 /* TODO: move this code to where the constants pool will be */
1013 tcg_out_dat_reg(s, cond, ARITH_MOV,
1014 0, 0, addr_reg, SHIFT_IMM_LSL(0));
1015 # if TARGET_LONG_BITS == 32
1016 tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
1019 tcg_out_dat_reg(s, cond, ARITH_MOV,
1020 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1021 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1023 tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
1024 (tcg_target_long) s->code_ptr);
1028 tcg_out_dat_reg(s, cond, ARITH_MOV,
1029 0, 0, 0, SHIFT_IMM_LSL(24));
1030 tcg_out_dat_reg(s, cond, ARITH_MOV,
1031 data_reg, 0, 0, SHIFT_IMM_ASR(24));
1034 tcg_out_dat_reg(s, cond, ARITH_MOV,
1035 0, 0, 0, SHIFT_IMM_LSL(16));
1036 tcg_out_dat_reg(s, cond, ARITH_MOV,
1037 data_reg, 0, 0, SHIFT_IMM_ASR(16));
1044 tcg_out_dat_reg(s, cond, ARITH_MOV,
1045 data_reg, 0, 0, SHIFT_IMM_LSL(0));
1049 tcg_out_dat_reg(s, cond, ARITH_MOV,
1050 data_reg, 0, 0, SHIFT_IMM_LSL(0));
1052 tcg_out_dat_reg(s, cond, ARITH_MOV,
1053 data_reg2, 0, 1, SHIFT_IMM_LSL(0));
1058 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1061 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1062 #else /* !CONFIG_SOFTMMU */
1064 uint32_t offset = GUEST_BASE;
1069 i = ctz32(offset) & ~1;
1070 rot = ((32 - i) << 7) & 0xf00;
1072 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1073 ((offset >> i) & 0xff) | rot);
1075 offset &= ~(0xff << i);
1080 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1083 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1086 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1089 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1093 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1096 /* TODO: use block load -
1097 * check that data_reg2 > data_reg or the other way */
1098 if (data_reg == addr_reg) {
1099 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1100 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1102 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1103 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1110 static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1111 const TCGArg *args, int opc)
1113 int addr_reg, data_reg, data_reg2;
1114 #ifdef CONFIG_SOFTMMU
1115 int mem_index, s_bits;
1116 # if TARGET_LONG_BITS == 64
1119 uint32_t *label_ptr;
1124 data_reg2 = *args++;
1126 data_reg2 = 0; /* suppress warning */
1128 #ifdef CONFIG_SOFTMMU
1129 # if TARGET_LONG_BITS == 64
1130 addr_reg2 = *args++;
1135 /* Should generate something like the following:
1136 * shr r8, addr_reg, #TARGET_PAGE_BITS
1137 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1138 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1140 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1141 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1142 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1143 0, 8, CPU_TLB_SIZE - 1);
1144 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1145 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1147 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1148 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1149 * not exceed otherwise, so use an
1150 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1154 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
1155 (mem_index << (TLB_SHIFT & 1)) |
1156 ((16 - (TLB_SHIFT >> 1)) << 8));
1157 tcg_out_ld32_12(s, COND_AL, 1, 0,
1158 offsetof(CPUState, tlb_table[0][0].addr_write));
1159 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
1160 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1161 /* Check alignment. */
1163 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1164 0, addr_reg, (1 << s_bits) - 1);
1165 # if TARGET_LONG_BITS == 64
1166 /* XXX: possibly we could use a block data load or writeback in
1167 * the first access. */
1168 tcg_out_ld32_12(s, COND_EQ, 1, 0,
1169 offsetof(CPUState, tlb_table[0][0].addr_write)
1171 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
1172 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
1174 tcg_out_ld32_12(s, COND_EQ, 1, 0,
1175 offsetof(CPUState, tlb_table[0][0].addend));
1179 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
1182 tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
1185 tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
1188 tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
1192 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
1195 tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
1196 tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
1200 label_ptr = (void *) s->code_ptr;
1201 tcg_out_b(s, COND_EQ, 8);
1203 /* TODO: move this code to where the constants pool will be */
1205 tcg_out_dat_reg(s, cond, ARITH_MOV,
1206 0, 0, addr_reg, SHIFT_IMM_LSL(0));
1207 # if TARGET_LONG_BITS == 32
1210 tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
1211 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1214 tcg_out_dat_reg(s, cond, ARITH_MOV,
1215 1, 0, data_reg, SHIFT_IMM_LSL(16));
1216 tcg_out_dat_reg(s, cond, ARITH_MOV,
1217 1, 0, 1, SHIFT_IMM_LSR(16));
1218 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1222 tcg_out_dat_reg(s, cond, ARITH_MOV,
1223 1, 0, data_reg, SHIFT_IMM_LSL(0));
1224 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1228 tcg_out_dat_reg(s, cond, ARITH_MOV,
1229 1, 0, data_reg, SHIFT_IMM_LSL(0));
1231 tcg_out_dat_reg(s, cond, ARITH_MOV,
1232 2, 0, data_reg2, SHIFT_IMM_LSL(0));
1233 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1238 tcg_out_dat_reg(s, cond, ARITH_MOV,
1239 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1242 tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
1243 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1246 tcg_out_dat_reg(s, cond, ARITH_MOV,
1247 2, 0, data_reg, SHIFT_IMM_LSL(16));
1248 tcg_out_dat_reg(s, cond, ARITH_MOV,
1249 2, 0, 2, SHIFT_IMM_LSR(16));
1250 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1254 tcg_out_dat_reg(s, cond, ARITH_MOV,
1255 2, 0, data_reg, SHIFT_IMM_LSL(0));
1256 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1259 tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
1260 tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1262 tcg_out_dat_reg(s, cond, ARITH_MOV,
1263 2, 0, data_reg, SHIFT_IMM_LSL(0));
1265 tcg_out_dat_reg(s, cond, ARITH_MOV,
1266 3, 0, data_reg2, SHIFT_IMM_LSL(0));
1272 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
1275 tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
1276 (tcg_target_long) s->code_ptr);
1277 # if TARGET_LONG_BITS == 64
1279 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
1283 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1286 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1287 #else /* !CONFIG_SOFTMMU */
1289 uint32_t offset = GUEST_BASE;
1294 i = ctz32(offset) & ~1;
1295 rot = ((32 - i) << 7) & 0xf00;
1297 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1298 ((offset >> i) & 0xff) | rot);
1300 offset &= ~(0xff << i);
1305 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1308 tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
1311 tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
1314 tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
1318 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1321 /* TODO: use block store -
1322 * check that data_reg2 > data_reg or the other way */
1323 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1324 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1330 static uint8_t *tb_ret_addr;
1332 static inline void tcg_out_op(TCGContext *s, int opc,
1333 const TCGArg *args, const int *const_args)
1338 case INDEX_op_exit_tb:
1341 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1343 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1344 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
1346 tcg_out32(s, args[0]);
1349 uint8_t *ld_ptr = s->code_ptr;
1351 tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
1353 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
1354 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1356 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1357 tcg_out32(s, args[0]);
1362 case INDEX_op_goto_tb:
1363 if (s->tb_jmp_offset) {
1364 /* Direct jump method */
1365 #if defined(USE_DIRECT_JUMP)
1366 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1367 tcg_out_b(s, COND_AL, 8);
1369 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
1370 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1374 /* Indirect jump method */
1376 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1377 if (c > 0xfff || c < -0xfff) {
1378 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1379 (tcg_target_long) (s->tb_next + args[0]));
1380 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1382 tcg_out_ld32_12(s, COND_AL, 15, 15, c);
1384 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1385 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1386 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1389 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1393 tcg_out_call(s, COND_AL, args[0]);
1395 tcg_out_callr(s, COND_AL, args[0]);
1399 tcg_out_goto(s, COND_AL, args[0]);
1401 tcg_out_bx(s, COND_AL, args[0]);
1404 tcg_out_goto_label(s, COND_AL, args[0]);
1407 case INDEX_op_ld8u_i32:
1408 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1410 case INDEX_op_ld8s_i32:
1411 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1413 case INDEX_op_ld16u_i32:
1414 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1416 case INDEX_op_ld16s_i32:
1417 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1419 case INDEX_op_ld_i32:
1420 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1422 case INDEX_op_st8_i32:
1423 tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
1425 case INDEX_op_st16_i32:
1426 tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
1428 case INDEX_op_st_i32:
1429 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1432 case INDEX_op_mov_i32:
1433 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1434 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1436 case INDEX_op_movi_i32:
1437 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1439 case INDEX_op_add_i32:
1442 case INDEX_op_sub_i32:
1445 case INDEX_op_and_i32:
1448 case INDEX_op_or_i32:
1451 case INDEX_op_xor_i32:
1455 if (const_args[2]) {
1457 rot = encode_imm(args[2]);
1458 tcg_out_dat_imm(s, COND_AL, c,
1459 args[0], args[1], rotl(args[2], rot) | (rot << 7));
1461 tcg_out_dat_reg(s, COND_AL, c,
1462 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1464 case INDEX_op_add2_i32:
1465 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1466 args[0], args[1], args[2], args[3],
1467 args[4], args[5], SHIFT_IMM_LSL(0));
1469 case INDEX_op_sub2_i32:
1470 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1471 args[0], args[1], args[2], args[3],
1472 args[4], args[5], SHIFT_IMM_LSL(0));
1474 case INDEX_op_neg_i32:
1475 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1477 case INDEX_op_not_i32:
1478 tcg_out_dat_reg(s, COND_AL,
1479 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1481 case INDEX_op_mul_i32:
1482 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1484 case INDEX_op_mulu2_i32:
1485 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1487 case INDEX_op_div2_i32:
1488 tcg_out_div_helper(s, COND_AL, args,
1489 tcg_helper_div_i64, tcg_helper_rem_i64,
1492 case INDEX_op_divu2_i32:
1493 tcg_out_div_helper(s, COND_AL, args,
1494 tcg_helper_divu_i64, tcg_helper_remu_i64,
1497 /* XXX: Perhaps args[2] & 0x1f is wrong */
1498 case INDEX_op_shl_i32:
1500 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1502 case INDEX_op_shr_i32:
1503 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1504 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1506 case INDEX_op_sar_i32:
1507 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1508 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1511 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1514 case INDEX_op_brcond_i32:
1515 if (const_args[1]) {
1517 rot = encode_imm(args[1]);
1518 tcg_out_dat_imm(s, COND_AL, ARITH_CMP,
1519 0, args[0], rotl(args[1], rot) | (rot << 7));
1521 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1522 args[0], args[1], SHIFT_IMM_LSL(0));
1524 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1526 case INDEX_op_brcond2_i32:
1527 /* The resulting conditions are:
1528 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1529 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1530 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1531 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1532 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1533 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1535 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1536 args[1], args[3], SHIFT_IMM_LSL(0));
1537 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1538 args[0], args[2], SHIFT_IMM_LSL(0));
1539 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1541 case INDEX_op_setcond_i32:
1542 if (const_args[2]) {
1544 rot = encode_imm(args[2]);
1545 tcg_out_dat_imm(s, COND_AL, ARITH_CMP,
1546 0, args[1], rotl(args[2], rot) | (rot << 7));
1548 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1549 args[1], args[2], SHIFT_IMM_LSL(0));
1551 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1552 ARITH_MOV, args[0], 0, 1);
1553 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1554 ARITH_MOV, args[0], 0, 0);
1556 case INDEX_op_setcond2_i32:
1557 /* See brcond2_i32 comment */
1558 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1559 args[2], args[4], SHIFT_IMM_LSL(0));
1560 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1561 args[1], args[3], SHIFT_IMM_LSL(0));
1562 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1563 ARITH_MOV, args[0], 0, 1);
1564 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1565 ARITH_MOV, args[0], 0, 0);
1568 case INDEX_op_qemu_ld8u:
1569 tcg_out_qemu_ld(s, COND_AL, args, 0);
1571 case INDEX_op_qemu_ld8s:
1572 tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1574 case INDEX_op_qemu_ld16u:
1575 tcg_out_qemu_ld(s, COND_AL, args, 1);
1577 case INDEX_op_qemu_ld16s:
1578 tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1580 case INDEX_op_qemu_ld32u:
1581 tcg_out_qemu_ld(s, COND_AL, args, 2);
1583 case INDEX_op_qemu_ld64:
1584 tcg_out_qemu_ld(s, COND_AL, args, 3);
1587 case INDEX_op_qemu_st8:
1588 tcg_out_qemu_st(s, COND_AL, args, 0);
1590 case INDEX_op_qemu_st16:
1591 tcg_out_qemu_st(s, COND_AL, args, 1);
1593 case INDEX_op_qemu_st32:
1594 tcg_out_qemu_st(s, COND_AL, args, 2);
1596 case INDEX_op_qemu_st64:
1597 tcg_out_qemu_st(s, COND_AL, args, 3);
1600 case INDEX_op_ext8s_i32:
1601 #ifdef __ARM_ARCH_7A__
1603 tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]);
1605 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1606 args[0], 0, args[1], SHIFT_IMM_LSL(24));
1607 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1608 args[0], 0, args[0], SHIFT_IMM_ASR(24));
1611 case INDEX_op_ext16s_i32:
1612 #ifdef __ARM_ARCH_7A__
1614 tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]);
1616 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1617 args[0], 0, args[1], SHIFT_IMM_LSL(16));
1618 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1619 args[0], 0, args[0], SHIFT_IMM_ASR(16));
1628 static const TCGTargetOpDef arm_op_defs[] = {
1629 { INDEX_op_exit_tb, { } },
1630 { INDEX_op_goto_tb, { } },
1631 { INDEX_op_call, { "ri" } },
1632 { INDEX_op_jmp, { "ri" } },
1633 { INDEX_op_br, { } },
1635 { INDEX_op_mov_i32, { "r", "r" } },
1636 { INDEX_op_movi_i32, { "r" } },
1638 { INDEX_op_ld8u_i32, { "r", "r" } },
1639 { INDEX_op_ld8s_i32, { "r", "r" } },
1640 { INDEX_op_ld16u_i32, { "r", "r" } },
1641 { INDEX_op_ld16s_i32, { "r", "r" } },
1642 { INDEX_op_ld_i32, { "r", "r" } },
1643 { INDEX_op_st8_i32, { "r", "r" } },
1644 { INDEX_op_st16_i32, { "r", "r" } },
1645 { INDEX_op_st_i32, { "r", "r" } },
1647 /* TODO: "r", "r", "ri" */
1648 { INDEX_op_add_i32, { "r", "r", "rI" } },
1649 { INDEX_op_sub_i32, { "r", "r", "rI" } },
1650 { INDEX_op_mul_i32, { "r", "r", "r" } },
1651 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1652 { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
1653 { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
1654 { INDEX_op_and_i32, { "r", "r", "rI" } },
1655 { INDEX_op_or_i32, { "r", "r", "rI" } },
1656 { INDEX_op_xor_i32, { "r", "r", "rI" } },
1657 { INDEX_op_neg_i32, { "r", "r" } },
1658 { INDEX_op_not_i32, { "r", "r" } },
1660 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1661 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1662 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1664 { INDEX_op_brcond_i32, { "r", "rI" } },
1665 { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1667 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1668 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1669 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1670 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1671 { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1673 { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1674 { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1675 { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1676 { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
1677 { INDEX_op_qemu_ld32u, { "r", "x", "X" } },
1678 { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
1680 { INDEX_op_qemu_st8, { "x", "x", "X" } },
1681 { INDEX_op_qemu_st16, { "x", "x", "X" } },
1682 { INDEX_op_qemu_st32, { "x", "x", "X" } },
1683 { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
1685 { INDEX_op_ext8s_i32, { "r", "r" } },
1686 { INDEX_op_ext16s_i32, { "r", "r" } },
1691 void tcg_target_init(TCGContext *s)
1693 #if !defined(CONFIG_USER_ONLY)
1695 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1699 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
1700 ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
1701 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1702 ((2 << TCG_REG_R3) - 1) |
1703 (1 << TCG_REG_R12) | (1 << TCG_REG_R14));
1705 tcg_regset_clear(s->reserved_regs);
1707 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
1709 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1710 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1712 tcg_add_target_add_op_defs(arm_op_defs);
1715 static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1716 int arg1, tcg_target_long arg2)
1718 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1721 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1722 int arg1, tcg_target_long arg2)
1724 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1727 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1731 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1736 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1742 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1744 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1747 static inline void tcg_out_movi(TCGContext *s, TCGType type,
1748 int ret, tcg_target_long arg)
1750 tcg_out_movi32(s, COND_AL, ret, arg);
1753 void tcg_target_qemu_prologue(TCGContext *s)
1755 /* stmdb sp!, { r9 - r11, lr } */
1756 tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
1758 tcg_out_bx(s, COND_AL, TCG_REG_R0);
1759 tb_ret_addr = s->code_ptr;
1761 /* ldmia sp!, { r9 - r11, pc } */
1762 tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);