2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Andrzej Zaborowski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30 #define USE_ARMV7_INSTRUCTIONS
33 #if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39 #define USE_ARMV6_INSTRUCTIONS
42 #if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46 #define USE_ARMV5_INSTRUCTIONS
49 #ifdef USE_ARMV5_INSTRUCTIONS
50 static const int use_armv5_instructions = 1;
52 static const int use_armv5_instructions = 0;
54 #undef USE_ARMV5_INSTRUCTIONS
56 #ifdef USE_ARMV6_INSTRUCTIONS
57 static const int use_armv6_instructions = 1;
59 static const int use_armv6_instructions = 0;
61 #undef USE_ARMV6_INSTRUCTIONS
63 #ifdef USE_ARMV7_INSTRUCTIONS
64 static const int use_armv7_instructions = 1;
66 static const int use_armv7_instructions = 0;
68 #undef USE_ARMV7_INSTRUCTIONS
71 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
91 static const int tcg_target_reg_alloc_order[] = {
109 static const int tcg_target_call_iarg_regs[4] = {
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
112 static const int tcg_target_call_oarg_regs[2] = {
113 TCG_REG_R0, TCG_REG_R1
116 static void patch_reloc(uint8_t *code_ptr, int type,
117 tcg_target_long value, tcg_target_long addend)
121 *(uint32_t *) code_ptr = value;
130 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
131 (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
136 /* maximum number of register used for input function arguments */
137 static inline int tcg_target_get_call_iarg_regs_count(int flags)
142 /* parse target specific constraints */
143 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
150 ct->ct |= TCG_CT_CONST_ARM;
154 ct->ct |= TCG_CT_REG;
155 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
158 /* qemu_ld address */
160 ct->ct |= TCG_CT_REG;
161 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162 #ifdef CONFIG_SOFTMMU
163 /* r0 and r1 will be overwritten when reading the tlb entry,
164 so don't use these. */
165 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
166 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
170 ct->ct |= TCG_CT_REG;
171 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
172 #ifdef CONFIG_SOFTMMU
173 /* r1 is still needed to load data_reg or data_reg2,
175 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179 /* qemu_st address & data_reg */
181 ct->ct |= TCG_CT_REG;
182 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
183 /* r0 and r1 will be overwritten when reading the tlb entry
184 (softmmu only) and doing the byte swapping, so don't
186 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
189 /* qemu_st64 data_reg2 */
191 ct->ct |= TCG_CT_REG;
192 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
193 /* r0 and r1 will be overwritten when reading the tlb entry
194 (softmmu only) and doing the byte swapping, so don't
196 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
197 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
198 #ifdef CONFIG_SOFTMMU
199 /* r2 is still needed to load data_reg, so don't use it. */
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
213 static inline uint32_t rotl(uint32_t val, int n)
215 return (val << n) | (val >> (32 - n));
218 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
219 right-rotated by an even amount between 0 and 30. */
220 static inline int encode_imm(uint32_t imm)
224 /* simple case, only lower bits */
225 if ((imm & ~0xff) == 0)
227 /* then try a simple even shift */
228 shift = ctz32(imm) & ~1;
229 if (((imm >> shift) & ~0xff) == 0)
231 /* now try harder with rotations */
232 if ((rotl(imm, 2) & ~0xff) == 0)
234 if ((rotl(imm, 4) & ~0xff) == 0)
236 if ((rotl(imm, 6) & ~0xff) == 0)
238 /* imm can't be encoded */
242 static inline int check_fit_imm(uint32_t imm)
244 return encode_imm(imm) >= 0;
247 /* Test if a constant matches the constraint.
248 * TODO: define constraints for:
250 * ldr/str offset: between -0xfff and 0xfff
251 * ldrh/strh offset: between -0xff and 0xff
252 * mov operand2: values represented with x << (2 * y), x < 0x100
253 * add, sub, eor...: ditto
255 static inline int tcg_target_const_match(tcg_target_long val,
256 const TCGArgConstraint *arg_ct)
260 if (ct & TCG_CT_CONST)
262 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
268 enum arm_data_opc_e {
286 #define TO_CPSR(opc) \
287 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
289 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
290 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
291 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
292 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
293 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
294 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
295 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
296 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
298 enum arm_cond_code_e {
301 COND_CS = 0x2, /* Unsigned greater or equal */
302 COND_CC = 0x3, /* Unsigned less than */
303 COND_MI = 0x4, /* Negative */
304 COND_PL = 0x5, /* Zero or greater */
305 COND_VS = 0x6, /* Overflow */
306 COND_VC = 0x7, /* No overflow */
307 COND_HI = 0x8, /* Unsigned greater than */
308 COND_LS = 0x9, /* Unsigned less or equal */
316 static const uint8_t tcg_cond_to_arm_cond[10] = {
317 [TCG_COND_EQ] = COND_EQ,
318 [TCG_COND_NE] = COND_NE,
319 [TCG_COND_LT] = COND_LT,
320 [TCG_COND_GE] = COND_GE,
321 [TCG_COND_LE] = COND_LE,
322 [TCG_COND_GT] = COND_GT,
324 [TCG_COND_LTU] = COND_CC,
325 [TCG_COND_GEU] = COND_CS,
326 [TCG_COND_LEU] = COND_LS,
327 [TCG_COND_GTU] = COND_HI,
330 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
332 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
335 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
337 tcg_out32(s, (cond << 28) | 0x0a000000 |
338 (((offset - 8) >> 2) & 0x00ffffff));
341 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
343 #ifdef HOST_WORDS_BIGENDIAN
344 tcg_out8(s, (cond << 4) | 0x0a);
348 tcg_out8(s, (cond << 4) | 0x0a);
352 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
354 tcg_out32(s, (cond << 28) | 0x0b000000 |
355 (((offset - 8) >> 2) & 0x00ffffff));
358 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
360 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
363 static inline void tcg_out_dat_reg(TCGContext *s,
364 int cond, int opc, int rd, int rn, int rm, int shift)
366 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
367 (rn << 16) | (rd << 12) | shift | rm);
370 static inline void tcg_out_dat_reg2(TCGContext *s,
371 int cond, int opc0, int opc1, int rd0, int rd1,
372 int rn0, int rn1, int rm0, int rm1, int shift)
374 if (rd0 == rn1 || rd0 == rm1) {
375 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
376 (rn0 << 16) | (8 << 12) | shift | rm0);
377 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
378 (rn1 << 16) | (rd1 << 12) | shift | rm1);
379 tcg_out_dat_reg(s, cond, ARITH_MOV,
380 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
382 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
383 (rn0 << 16) | (rd0 << 12) | shift | rm0);
384 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
385 (rn1 << 16) | (rd1 << 12) | shift | rm1);
389 static inline void tcg_out_dat_imm(TCGContext *s,
390 int cond, int opc, int rd, int rn, int im)
392 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
393 (rn << 16) | (rd << 12) | im);
396 static inline void tcg_out_movi32(TCGContext *s,
397 int cond, int rd, int32_t arg)
399 int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
401 /* TODO: This is very suboptimal, we can easily have a constant
402 * pool somewhere after all the instructions. */
404 if (arg < 0 && arg > -0x100)
405 return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
407 if (offset < 0x100 && offset > -0x100)
409 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
410 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
412 if (use_armv7_instructions) {
415 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
416 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
417 if (arg & 0xffff0000)
419 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
420 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
422 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
423 if (arg & 0x0000ff00)
424 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
425 ((arg >> 8) & 0xff) | 0xc00);
426 if (arg & 0x00ff0000)
427 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
428 ((arg >> 16) & 0xff) | 0x800);
429 if (arg & 0xff000000)
430 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
431 ((arg >> 24) & 0xff) | 0x400);
435 static inline void tcg_out_mul32(TCGContext *s,
436 int cond, int rd, int rs, int rm)
439 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
440 (rs << 8) | 0x90 | rm);
442 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
443 (rm << 8) | 0x90 | rs);
445 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
446 (rs << 8) | 0x90 | rm);
447 tcg_out_dat_reg(s, cond, ARITH_MOV,
448 rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
452 static inline void tcg_out_umull32(TCGContext *s,
453 int cond, int rd0, int rd1, int rs, int rm)
455 if (rd0 != rm && rd1 != rm)
456 tcg_out32(s, (cond << 28) | 0x800090 |
457 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
458 else if (rd0 != rs && rd1 != rs)
459 tcg_out32(s, (cond << 28) | 0x800090 |
460 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
462 tcg_out_dat_reg(s, cond, ARITH_MOV,
463 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
464 tcg_out32(s, (cond << 28) | 0x800098 |
465 (rd1 << 16) | (rd0 << 12) | (rs << 8));
469 static inline void tcg_out_smull32(TCGContext *s,
470 int cond, int rd0, int rd1, int rs, int rm)
472 if (rd0 != rm && rd1 != rm)
473 tcg_out32(s, (cond << 28) | 0xc00090 |
474 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
475 else if (rd0 != rs && rd1 != rs)
476 tcg_out32(s, (cond << 28) | 0xc00090 |
477 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
479 tcg_out_dat_reg(s, cond, ARITH_MOV,
480 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
481 tcg_out32(s, (cond << 28) | 0xc00098 |
482 (rd1 << 16) | (rd0 << 12) | (rs << 8));
486 static inline void tcg_out_ext8s(TCGContext *s, int cond,
489 if (use_armv6_instructions) {
491 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
493 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
494 rd, 0, rn, SHIFT_IMM_LSL(24));
495 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
496 rd, 0, rd, SHIFT_IMM_ASR(24));
500 static inline void tcg_out_ext8u(TCGContext *s, int cond,
503 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
506 static inline void tcg_out_ext16s(TCGContext *s, int cond,
509 if (use_armv6_instructions) {
511 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
513 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
514 rd, 0, rn, SHIFT_IMM_LSL(16));
515 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
516 rd, 0, rd, SHIFT_IMM_ASR(16));
520 static inline void tcg_out_ext16u(TCGContext *s, int cond,
523 if (use_armv6_instructions) {
525 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
527 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
528 rd, 0, rn, SHIFT_IMM_LSL(16));
529 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
530 rd, 0, rd, SHIFT_IMM_LSR(16));
534 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
536 if (use_armv6_instructions) {
538 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
540 tcg_out_dat_reg(s, cond, ARITH_MOV,
541 TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
542 tcg_out_dat_reg(s, cond, ARITH_MOV,
543 TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_ASR(16));
544 tcg_out_dat_reg(s, cond, ARITH_ORR,
545 rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
549 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
551 if (use_armv6_instructions) {
553 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
555 tcg_out_dat_reg(s, cond, ARITH_MOV,
556 TCG_REG_R8, 0, rn, SHIFT_IMM_LSL(24));
557 tcg_out_dat_reg(s, cond, ARITH_MOV,
558 TCG_REG_R8, 0, TCG_REG_R8, SHIFT_IMM_LSR(16));
559 tcg_out_dat_reg(s, cond, ARITH_ORR,
560 rd, TCG_REG_R8, rn, SHIFT_IMM_LSR(8));
564 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
566 if (use_armv6_instructions) {
568 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
570 tcg_out_dat_reg(s, cond, ARITH_EOR,
571 TCG_REG_R8, rn, rn, SHIFT_IMM_ROR(16));
572 tcg_out_dat_imm(s, cond, ARITH_BIC,
573 TCG_REG_R8, TCG_REG_R8, 0xff | 0x800);
574 tcg_out_dat_reg(s, cond, ARITH_MOV,
575 rd, 0, rn, SHIFT_IMM_ROR(8));
576 tcg_out_dat_reg(s, cond, ARITH_EOR,
577 rd, rd, TCG_REG_R8, SHIFT_IMM_LSR(8));
581 static inline void tcg_out_ld32_12(TCGContext *s, int cond,
582 int rd, int rn, tcg_target_long im)
585 tcg_out32(s, (cond << 28) | 0x05900000 |
586 (rn << 16) | (rd << 12) | (im & 0xfff));
588 tcg_out32(s, (cond << 28) | 0x05100000 |
589 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
592 static inline void tcg_out_st32_12(TCGContext *s, int cond,
593 int rd, int rn, tcg_target_long im)
596 tcg_out32(s, (cond << 28) | 0x05800000 |
597 (rn << 16) | (rd << 12) | (im & 0xfff));
599 tcg_out32(s, (cond << 28) | 0x05000000 |
600 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
603 static inline void tcg_out_ld32_r(TCGContext *s, int cond,
604 int rd, int rn, int rm)
606 tcg_out32(s, (cond << 28) | 0x07900000 |
607 (rn << 16) | (rd << 12) | rm);
610 static inline void tcg_out_st32_r(TCGContext *s, int cond,
611 int rd, int rn, int rm)
613 tcg_out32(s, (cond << 28) | 0x07800000 |
614 (rn << 16) | (rd << 12) | rm);
617 /* Register pre-increment with base writeback. */
618 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
619 int rd, int rn, int rm)
621 tcg_out32(s, (cond << 28) | 0x07b00000 |
622 (rn << 16) | (rd << 12) | rm);
625 static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
626 int rd, int rn, int rm)
628 tcg_out32(s, (cond << 28) | 0x07a00000 |
629 (rn << 16) | (rd << 12) | rm);
632 static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
633 int rd, int rn, tcg_target_long im)
636 tcg_out32(s, (cond << 28) | 0x01d000b0 |
637 (rn << 16) | (rd << 12) |
638 ((im & 0xf0) << 4) | (im & 0xf));
640 tcg_out32(s, (cond << 28) | 0x015000b0 |
641 (rn << 16) | (rd << 12) |
642 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
645 static inline void tcg_out_st16_8(TCGContext *s, int cond,
646 int rd, int rn, tcg_target_long im)
649 tcg_out32(s, (cond << 28) | 0x01c000b0 |
650 (rn << 16) | (rd << 12) |
651 ((im & 0xf0) << 4) | (im & 0xf));
653 tcg_out32(s, (cond << 28) | 0x014000b0 |
654 (rn << 16) | (rd << 12) |
655 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
658 static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
659 int rd, int rn, int rm)
661 tcg_out32(s, (cond << 28) | 0x019000b0 |
662 (rn << 16) | (rd << 12) | rm);
665 static inline void tcg_out_st16_r(TCGContext *s, int cond,
666 int rd, int rn, int rm)
668 tcg_out32(s, (cond << 28) | 0x018000b0 |
669 (rn << 16) | (rd << 12) | rm);
672 static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
673 int rd, int rn, tcg_target_long im)
676 tcg_out32(s, (cond << 28) | 0x01d000f0 |
677 (rn << 16) | (rd << 12) |
678 ((im & 0xf0) << 4) | (im & 0xf));
680 tcg_out32(s, (cond << 28) | 0x015000f0 |
681 (rn << 16) | (rd << 12) |
682 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
685 static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
686 int rd, int rn, int rm)
688 tcg_out32(s, (cond << 28) | 0x019000f0 |
689 (rn << 16) | (rd << 12) | rm);
692 static inline void tcg_out_ld8_12(TCGContext *s, int cond,
693 int rd, int rn, tcg_target_long im)
696 tcg_out32(s, (cond << 28) | 0x05d00000 |
697 (rn << 16) | (rd << 12) | (im & 0xfff));
699 tcg_out32(s, (cond << 28) | 0x05500000 |
700 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
703 static inline void tcg_out_st8_12(TCGContext *s, int cond,
704 int rd, int rn, tcg_target_long im)
707 tcg_out32(s, (cond << 28) | 0x05c00000 |
708 (rn << 16) | (rd << 12) | (im & 0xfff));
710 tcg_out32(s, (cond << 28) | 0x05400000 |
711 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
714 static inline void tcg_out_ld8_r(TCGContext *s, int cond,
715 int rd, int rn, int rm)
717 tcg_out32(s, (cond << 28) | 0x07d00000 |
718 (rn << 16) | (rd << 12) | rm);
721 static inline void tcg_out_st8_r(TCGContext *s, int cond,
722 int rd, int rn, int rm)
724 tcg_out32(s, (cond << 28) | 0x07c00000 |
725 (rn << 16) | (rd << 12) | rm);
728 static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
729 int rd, int rn, tcg_target_long im)
732 tcg_out32(s, (cond << 28) | 0x01d000d0 |
733 (rn << 16) | (rd << 12) |
734 ((im & 0xf0) << 4) | (im & 0xf));
736 tcg_out32(s, (cond << 28) | 0x015000d0 |
737 (rn << 16) | (rd << 12) |
738 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
741 static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
742 int rd, int rn, int rm)
744 tcg_out32(s, (cond << 28) | 0x019000d0 |
745 (rn << 16) | (rd << 12) | rm);
748 static inline void tcg_out_ld32u(TCGContext *s, int cond,
749 int rd, int rn, int32_t offset)
751 if (offset > 0xfff || offset < -0xfff) {
752 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
753 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
755 tcg_out_ld32_12(s, cond, rd, rn, offset);
758 static inline void tcg_out_st32(TCGContext *s, int cond,
759 int rd, int rn, int32_t offset)
761 if (offset > 0xfff || offset < -0xfff) {
762 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
763 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
765 tcg_out_st32_12(s, cond, rd, rn, offset);
768 static inline void tcg_out_ld16u(TCGContext *s, int cond,
769 int rd, int rn, int32_t offset)
771 if (offset > 0xff || offset < -0xff) {
772 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
773 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
775 tcg_out_ld16u_8(s, cond, rd, rn, offset);
778 static inline void tcg_out_ld16s(TCGContext *s, int cond,
779 int rd, int rn, int32_t offset)
781 if (offset > 0xff || offset < -0xff) {
782 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
783 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
785 tcg_out_ld16s_8(s, cond, rd, rn, offset);
788 static inline void tcg_out_st16(TCGContext *s, int cond,
789 int rd, int rn, int32_t offset)
791 if (offset > 0xff || offset < -0xff) {
792 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
793 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
795 tcg_out_st16_8(s, cond, rd, rn, offset);
798 static inline void tcg_out_ld8u(TCGContext *s, int cond,
799 int rd, int rn, int32_t offset)
801 if (offset > 0xfff || offset < -0xfff) {
802 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
803 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
805 tcg_out_ld8_12(s, cond, rd, rn, offset);
808 static inline void tcg_out_ld8s(TCGContext *s, int cond,
809 int rd, int rn, int32_t offset)
811 if (offset > 0xff || offset < -0xff) {
812 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
813 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
815 tcg_out_ld8s_8(s, cond, rd, rn, offset);
818 static inline void tcg_out_st8(TCGContext *s, int cond,
819 int rd, int rn, int32_t offset)
821 if (offset > 0xfff || offset < -0xfff) {
822 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
823 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
825 tcg_out_st8_12(s, cond, rd, rn, offset);
828 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
832 val = addr - (tcg_target_long) s->code_ptr;
833 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
834 tcg_out_b(s, cond, val);
839 if (cond == COND_AL) {
840 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
841 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
843 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
844 tcg_out_dat_reg(s, cond, ARITH_ADD,
845 TCG_REG_PC, TCG_REG_PC,
846 TCG_REG_R8, SHIFT_IMM_LSL(0));
852 static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
856 val = addr - (tcg_target_long) s->code_ptr;
857 if (val < 0x01fffffd && val > -0x01fffffd)
858 tcg_out_bl(s, cond, val);
863 if (cond == COND_AL) {
864 tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
865 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
866 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
868 tcg_out_movi32(s, cond, TCG_REG_R9, addr);
869 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
870 TCG_REG_PC, SHIFT_IMM_LSL(0));
871 tcg_out_bx(s, cond, TCG_REG_R9);
877 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
879 if (use_armv5_instructions) {
880 tcg_out_blx(s, cond, arg);
882 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
883 TCG_REG_PC, SHIFT_IMM_LSL(0));
884 tcg_out_bx(s, cond, arg);
888 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
890 TCGLabel *l = &s->labels[label_index];
893 tcg_out_goto(s, cond, l->u.value);
894 else if (cond == COND_AL) {
895 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
896 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
899 /* Probably this should be preferred even for COND_AL... */
900 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
901 tcg_out_b_noaddr(s, cond);
905 #ifdef CONFIG_SOFTMMU
907 #include "../../softmmu_defs.h"
909 static void *qemu_ld_helpers[4] = {
916 static void *qemu_st_helpers[4] = {
924 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
926 static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
928 int addr_reg, data_reg, data_reg2, bswap;
929 #ifdef CONFIG_SOFTMMU
930 int mem_index, s_bits;
931 # if TARGET_LONG_BITS == 64
937 #ifdef TARGET_WORDS_BIGENDIAN
946 data_reg2 = 0; /* suppress warning */
948 #ifdef CONFIG_SOFTMMU
949 # if TARGET_LONG_BITS == 64
955 /* Should generate something like the following:
956 * shr r8, addr_reg, #TARGET_PAGE_BITS
957 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
958 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
960 # if CPU_TLB_BITS > 8
963 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
964 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
965 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
966 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
967 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
968 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
970 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
971 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
972 * not exceed otherwise, so use an
973 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
977 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
978 (mem_index << (TLB_SHIFT & 1)) |
979 ((16 - (TLB_SHIFT >> 1)) << 8));
980 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
981 offsetof(CPUState, tlb_table[0][0].addr_read));
982 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
983 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
984 /* Check alignment. */
986 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
987 0, addr_reg, (1 << s_bits) - 1);
988 # if TARGET_LONG_BITS == 64
989 /* XXX: possibly we could use a block data load or writeback in
990 * the first access. */
991 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
992 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
993 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
994 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
996 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
997 offsetof(CPUState, tlb_table[0][0].addend));
1001 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1004 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1007 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1009 tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1014 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1015 tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1017 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1022 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1024 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1029 tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1030 tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1031 tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1032 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1034 tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1035 tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1040 label_ptr = (void *) s->code_ptr;
1041 tcg_out_b(s, COND_EQ, 8);
1043 /* TODO: move this code to where the constants pool will be */
1044 if (addr_reg != TCG_REG_R0) {
1045 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1046 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1048 # if TARGET_LONG_BITS == 32
1049 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R1, 0, mem_index);
1051 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1052 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1053 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1055 tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_ld_helpers[s_bits] -
1056 (tcg_target_long) s->code_ptr);
1060 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1063 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1069 if (data_reg != TCG_REG_R0) {
1070 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1071 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1075 if (data_reg != TCG_REG_R0) {
1076 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1077 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
1079 if (data_reg2 != TCG_REG_R1) {
1080 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1081 data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
1086 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1087 #else /* !CONFIG_SOFTMMU */
1089 uint32_t offset = GUEST_BASE;
1094 i = ctz32(offset) & ~1;
1095 rot = ((32 - i) << 7) & 0xf00;
1097 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
1098 ((offset >> i) & 0xff) | rot);
1099 addr_reg = TCG_REG_R8;
1100 offset &= ~(0xff << i);
1105 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1108 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1111 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1113 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1118 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1119 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1121 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1126 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1128 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1132 /* TODO: use block load -
1133 * check that data_reg2 > data_reg or the other way */
1134 if (data_reg == addr_reg) {
1135 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1136 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1138 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1139 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1142 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1143 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1150 static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1152 int addr_reg, data_reg, data_reg2, bswap;
1153 #ifdef CONFIG_SOFTMMU
1154 int mem_index, s_bits;
1155 # if TARGET_LONG_BITS == 64
1158 uint32_t *label_ptr;
1161 #ifdef TARGET_WORDS_BIGENDIAN
1168 data_reg2 = *args++;
1170 data_reg2 = 0; /* suppress warning */
1172 #ifdef CONFIG_SOFTMMU
1173 # if TARGET_LONG_BITS == 64
1174 addr_reg2 = *args++;
1179 /* Should generate something like the following:
1180 * shr r8, addr_reg, #TARGET_PAGE_BITS
1181 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
1182 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
1184 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1185 TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1186 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1187 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1188 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1189 TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1191 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1192 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1193 * not exceed otherwise, so use an
1194 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1198 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
1199 (mem_index << (TLB_SHIFT & 1)) |
1200 ((16 - (TLB_SHIFT >> 1)) << 8));
1201 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
1202 offsetof(CPUState, tlb_table[0][0].addr_write));
1203 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1204 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1205 /* Check alignment. */
1207 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1208 0, addr_reg, (1 << s_bits) - 1);
1209 # if TARGET_LONG_BITS == 64
1210 /* XXX: possibly we could use a block data load or writeback in
1211 * the first access. */
1212 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1213 offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1214 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1215 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
1217 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1218 offsetof(CPUState, tlb_table[0][0].addend));
1222 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1226 tcg_out_bswap16(s, COND_EQ, TCG_REG_R0, data_reg);
1227 tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1229 tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1235 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1236 tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1238 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1243 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1244 tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1245 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1246 tcg_out_st32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1248 tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1249 tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1254 label_ptr = (void *) s->code_ptr;
1255 tcg_out_b(s, COND_EQ, 8);
1257 /* TODO: move this code to where the constants pool will be */
1258 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1259 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1260 # if TARGET_LONG_BITS == 32
1263 tcg_out_ext8u(s, COND_AL, TCG_REG_R1, data_reg);
1264 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1267 tcg_out_ext16u(s, COND_AL, TCG_REG_R1, data_reg);
1268 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1271 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1272 TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1273 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R2, 0, mem_index);
1276 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1277 tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1278 if (data_reg != TCG_REG_R2) {
1279 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1280 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1282 if (data_reg2 != TCG_REG_R3) {
1283 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1284 TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1289 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1290 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1293 tcg_out_ext8u(s, COND_AL, TCG_REG_R2, data_reg);
1294 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1297 tcg_out_ext16u(s, COND_AL, TCG_REG_R2, data_reg);
1298 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1301 if (data_reg != TCG_REG_R2) {
1302 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1303 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1305 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R3, 0, mem_index);
1308 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R8, 0, mem_index);
1309 tcg_out32(s, (COND_AL << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
1310 if (data_reg != TCG_REG_R2) {
1311 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1312 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1314 if (data_reg2 != TCG_REG_R3) {
1315 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1316 TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1322 tcg_out_bl(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits] -
1323 (tcg_target_long) s->code_ptr);
1325 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
1327 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
1328 #else /* !CONFIG_SOFTMMU */
1330 uint32_t offset = GUEST_BASE;
1335 i = ctz32(offset) & ~1;
1336 rot = ((32 - i) << 7) & 0xf00;
1338 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1339 ((offset >> i) & 0xff) | rot);
1340 addr_reg = TCG_REG_R1;
1341 offset &= ~(0xff << i);
1346 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1350 tcg_out_bswap16(s, COND_AL, TCG_REG_R0, data_reg);
1351 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1353 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1359 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1360 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1362 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1366 /* TODO: use block store -
1367 * check that data_reg2 > data_reg or the other way */
1369 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1370 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1371 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1372 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1374 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1375 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1382 static uint8_t *tb_ret_addr;
1384 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1385 const TCGArg *args, const int *const_args)
1390 case INDEX_op_exit_tb:
1392 uint8_t *ld_ptr = s->code_ptr;
1394 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1396 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1397 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1399 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1400 tcg_out32(s, args[0]);
1404 case INDEX_op_goto_tb:
1405 if (s->tb_jmp_offset) {
1406 /* Direct jump method */
1407 #if defined(USE_DIRECT_JUMP)
1408 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1409 tcg_out_b(s, COND_AL, 8);
1411 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1412 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1416 /* Indirect jump method */
1418 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1419 if (c > 0xfff || c < -0xfff) {
1420 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1421 (tcg_target_long) (s->tb_next + args[0]));
1422 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1424 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1426 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1427 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1428 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1431 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1435 tcg_out_call(s, COND_AL, args[0]);
1437 tcg_out_callr(s, COND_AL, args[0]);
1441 tcg_out_goto(s, COND_AL, args[0]);
1443 tcg_out_bx(s, COND_AL, args[0]);
1446 tcg_out_goto_label(s, COND_AL, args[0]);
1449 case INDEX_op_ld8u_i32:
1450 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1452 case INDEX_op_ld8s_i32:
1453 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1455 case INDEX_op_ld16u_i32:
1456 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1458 case INDEX_op_ld16s_i32:
1459 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1461 case INDEX_op_ld_i32:
1462 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1464 case INDEX_op_st8_i32:
1465 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1467 case INDEX_op_st16_i32:
1468 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1470 case INDEX_op_st_i32:
1471 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1474 case INDEX_op_mov_i32:
1475 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1476 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1478 case INDEX_op_movi_i32:
1479 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1481 case INDEX_op_add_i32:
1484 case INDEX_op_sub_i32:
1487 case INDEX_op_and_i32:
1490 case INDEX_op_andc_i32:
1493 case INDEX_op_or_i32:
1496 case INDEX_op_xor_i32:
1500 if (const_args[2]) {
1502 rot = encode_imm(args[2]);
1503 tcg_out_dat_imm(s, COND_AL, c,
1504 args[0], args[1], rotl(args[2], rot) | (rot << 7));
1506 tcg_out_dat_reg(s, COND_AL, c,
1507 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
1509 case INDEX_op_add2_i32:
1510 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1511 args[0], args[1], args[2], args[3],
1512 args[4], args[5], SHIFT_IMM_LSL(0));
1514 case INDEX_op_sub2_i32:
1515 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1516 args[0], args[1], args[2], args[3],
1517 args[4], args[5], SHIFT_IMM_LSL(0));
1519 case INDEX_op_neg_i32:
1520 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1522 case INDEX_op_not_i32:
1523 tcg_out_dat_reg(s, COND_AL,
1524 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1526 case INDEX_op_mul_i32:
1527 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1529 case INDEX_op_mulu2_i32:
1530 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1532 /* XXX: Perhaps args[2] & 0x1f is wrong */
1533 case INDEX_op_shl_i32:
1535 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1537 case INDEX_op_shr_i32:
1538 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1539 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1541 case INDEX_op_sar_i32:
1542 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1543 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1545 case INDEX_op_rotr_i32:
1546 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1547 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1550 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1553 case INDEX_op_rotl_i32:
1554 if (const_args[2]) {
1555 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1556 ((0x20 - args[2]) & 0x1f) ?
1557 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1560 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_R8, args[1], 0x20);
1561 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1562 SHIFT_REG_ROR(TCG_REG_R8));
1566 case INDEX_op_brcond_i32:
1567 if (const_args[1]) {
1569 rot = encode_imm(args[1]);
1570 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1571 args[0], rotl(args[1], rot) | (rot << 7));
1573 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1574 args[0], args[1], SHIFT_IMM_LSL(0));
1576 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1578 case INDEX_op_brcond2_i32:
1579 /* The resulting conditions are:
1580 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1581 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1582 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1583 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1584 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1585 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1587 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1588 args[1], args[3], SHIFT_IMM_LSL(0));
1589 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1590 args[0], args[2], SHIFT_IMM_LSL(0));
1591 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1593 case INDEX_op_setcond_i32:
1594 if (const_args[2]) {
1596 rot = encode_imm(args[2]);
1597 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1598 args[1], rotl(args[2], rot) | (rot << 7));
1600 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1601 args[1], args[2], SHIFT_IMM_LSL(0));
1603 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1604 ARITH_MOV, args[0], 0, 1);
1605 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1606 ARITH_MOV, args[0], 0, 0);
1608 case INDEX_op_setcond2_i32:
1609 /* See brcond2_i32 comment */
1610 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1611 args[2], args[4], SHIFT_IMM_LSL(0));
1612 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1613 args[1], args[3], SHIFT_IMM_LSL(0));
1614 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1615 ARITH_MOV, args[0], 0, 1);
1616 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1617 ARITH_MOV, args[0], 0, 0);
1620 case INDEX_op_qemu_ld8u:
1621 tcg_out_qemu_ld(s, args, 0);
1623 case INDEX_op_qemu_ld8s:
1624 tcg_out_qemu_ld(s, args, 0 | 4);
1626 case INDEX_op_qemu_ld16u:
1627 tcg_out_qemu_ld(s, args, 1);
1629 case INDEX_op_qemu_ld16s:
1630 tcg_out_qemu_ld(s, args, 1 | 4);
1632 case INDEX_op_qemu_ld32:
1633 tcg_out_qemu_ld(s, args, 2);
1635 case INDEX_op_qemu_ld64:
1636 tcg_out_qemu_ld(s, args, 3);
1639 case INDEX_op_qemu_st8:
1640 tcg_out_qemu_st(s, args, 0);
1642 case INDEX_op_qemu_st16:
1643 tcg_out_qemu_st(s, args, 1);
1645 case INDEX_op_qemu_st32:
1646 tcg_out_qemu_st(s, args, 2);
1648 case INDEX_op_qemu_st64:
1649 tcg_out_qemu_st(s, args, 3);
1652 case INDEX_op_bswap16_i32:
1653 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1655 case INDEX_op_bswap32_i32:
1656 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1659 case INDEX_op_ext8s_i32:
1660 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1662 case INDEX_op_ext16s_i32:
1663 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1665 case INDEX_op_ext16u_i32:
1666 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1674 static const TCGTargetOpDef arm_op_defs[] = {
1675 { INDEX_op_exit_tb, { } },
1676 { INDEX_op_goto_tb, { } },
1677 { INDEX_op_call, { "ri" } },
1678 { INDEX_op_jmp, { "ri" } },
1679 { INDEX_op_br, { } },
1681 { INDEX_op_mov_i32, { "r", "r" } },
1682 { INDEX_op_movi_i32, { "r" } },
1684 { INDEX_op_ld8u_i32, { "r", "r" } },
1685 { INDEX_op_ld8s_i32, { "r", "r" } },
1686 { INDEX_op_ld16u_i32, { "r", "r" } },
1687 { INDEX_op_ld16s_i32, { "r", "r" } },
1688 { INDEX_op_ld_i32, { "r", "r" } },
1689 { INDEX_op_st8_i32, { "r", "r" } },
1690 { INDEX_op_st16_i32, { "r", "r" } },
1691 { INDEX_op_st_i32, { "r", "r" } },
1693 /* TODO: "r", "r", "ri" */
1694 { INDEX_op_add_i32, { "r", "r", "rI" } },
1695 { INDEX_op_sub_i32, { "r", "r", "rI" } },
1696 { INDEX_op_mul_i32, { "r", "r", "r" } },
1697 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1698 { INDEX_op_and_i32, { "r", "r", "rI" } },
1699 { INDEX_op_andc_i32, { "r", "r", "rI" } },
1700 { INDEX_op_or_i32, { "r", "r", "rI" } },
1701 { INDEX_op_xor_i32, { "r", "r", "rI" } },
1702 { INDEX_op_neg_i32, { "r", "r" } },
1703 { INDEX_op_not_i32, { "r", "r" } },
1705 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1706 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1707 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1708 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1709 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1711 { INDEX_op_brcond_i32, { "r", "rI" } },
1712 { INDEX_op_setcond_i32, { "r", "r", "rI" } },
1714 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1715 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1716 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1717 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1718 { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
1720 #if TARGET_LONG_BITS == 32
1721 { INDEX_op_qemu_ld8u, { "r", "l" } },
1722 { INDEX_op_qemu_ld8s, { "r", "l" } },
1723 { INDEX_op_qemu_ld16u, { "r", "l" } },
1724 { INDEX_op_qemu_ld16s, { "r", "l" } },
1725 { INDEX_op_qemu_ld32, { "r", "l" } },
1726 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1728 { INDEX_op_qemu_st8, { "s", "s" } },
1729 { INDEX_op_qemu_st16, { "s", "s" } },
1730 { INDEX_op_qemu_st32, { "s", "s" } },
1731 { INDEX_op_qemu_st64, { "S", "S", "s" } },
1733 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1734 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1735 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1736 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1737 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1738 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1740 { INDEX_op_qemu_st8, { "s", "s", "s" } },
1741 { INDEX_op_qemu_st16, { "s", "s", "s" } },
1742 { INDEX_op_qemu_st32, { "s", "s", "s" } },
1743 { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
1746 { INDEX_op_bswap16_i32, { "r", "r" } },
1747 { INDEX_op_bswap32_i32, { "r", "r" } },
1749 { INDEX_op_ext8s_i32, { "r", "r" } },
1750 { INDEX_op_ext16s_i32, { "r", "r" } },
1751 { INDEX_op_ext16u_i32, { "r", "r" } },
1756 void tcg_target_init(TCGContext *s)
1758 #if !defined(CONFIG_USER_ONLY)
1760 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1764 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1765 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1770 (1 << TCG_REG_R12) |
1771 (1 << TCG_REG_R14));
1773 tcg_regset_clear(s->reserved_regs);
1774 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1775 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1776 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
1778 tcg_add_target_add_op_defs(arm_op_defs);
1781 static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1782 int arg1, tcg_target_long arg2)
1784 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1787 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1788 int arg1, tcg_target_long arg2)
1790 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1793 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
1797 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1802 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1808 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1810 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1813 static inline void tcg_out_movi(TCGContext *s, TCGType type,
1814 int ret, tcg_target_long arg)
1816 tcg_out_movi32(s, COND_AL, ret, arg);
1819 void tcg_target_qemu_prologue(TCGContext *s)
1821 /* There is no need to save r7, it is used to store the address
1822 of the env structure and is not modified by GCC. */
1824 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1825 tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
1827 tcg_out_bx(s, COND_AL, TCG_REG_R0);
1828 tb_ret_addr = s->code_ptr;
1830 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1831 tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);