]> Git Repo - qemu.git/blame - tcg/arm/tcg-target.c
Merge remote-tracking branch 'pmaydell/target-arm.next' into staging
[qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f 24
ac34fb5c
AJ
25#if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30#define USE_ARMV7_INSTRUCTIONS
31#endif
32
33#if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39#define USE_ARMV6_INSTRUCTIONS
40#endif
41
42#if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46#define USE_ARMV5_INSTRUCTIONS
47#endif
48
49#ifdef USE_ARMV5_INSTRUCTIONS
50static const int use_armv5_instructions = 1;
51#else
52static const int use_armv5_instructions = 0;
53#endif
54#undef USE_ARMV5_INSTRUCTIONS
55
56#ifdef USE_ARMV6_INSTRUCTIONS
57static const int use_armv6_instructions = 1;
58#else
59static const int use_armv6_instructions = 0;
60#endif
61#undef USE_ARMV6_INSTRUCTIONS
62
63#ifdef USE_ARMV7_INSTRUCTIONS
64static const int use_armv7_instructions = 1;
65#else
66static const int use_armv7_instructions = 0;
67#endif
68#undef USE_ARMV7_INSTRUCTIONS
69
d4a9eb1f
BS
70#ifndef NDEBUG
71static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
e4a7d5e8 87 "%pc",
811d4cf4 88};
d4a9eb1f 89#endif
811d4cf4 90
d4a9eb1f 91static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
92 TCG_REG_R4,
93 TCG_REG_R5,
94 TCG_REG_R6,
95 TCG_REG_R7,
96 TCG_REG_R8,
97 TCG_REG_R9,
98 TCG_REG_R10,
99 TCG_REG_R11,
811d4cf4 100 TCG_REG_R13,
914ccf51
AJ
101 TCG_REG_R0,
102 TCG_REG_R1,
103 TCG_REG_R2,
104 TCG_REG_R3,
105 TCG_REG_R12,
811d4cf4
AZ
106 TCG_REG_R14,
107};
108
d4a9eb1f 109static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111};
d4a9eb1f 112static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
113 TCG_REG_R0, TCG_REG_R1
114};
115
13dd6fb9 116#define TCG_REG_TMP TCG_REG_R12
4346457a 117
c69806ab
AJ
118static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
119{
120 *(uint32_t *) code_ptr = target;
121}
122
123static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
124{
125 uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
126
127 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
128 | (offset & 0xffffff);
129}
130
650bbb36 131static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
132 tcg_target_long value, tcg_target_long addend)
133{
134 switch (type) {
135 case R_ARM_ABS32:
c69806ab 136 reloc_abs32(code_ptr, value);
811d4cf4
AZ
137 break;
138
139 case R_ARM_CALL:
140 case R_ARM_JUMP24:
141 default:
142 tcg_abort();
143
144 case R_ARM_PC24:
c69806ab 145 reloc_pc24(code_ptr, value);
811d4cf4
AZ
146 break;
147 }
148}
149
b6b24cb0
RH
150#define TCG_CT_CONST_ARM 0x100
151#define TCG_CT_CONST_INV 0x200
152#define TCG_CT_CONST_NEG 0x400
153#define TCG_CT_CONST_ZERO 0x800
19b62bf4 154
811d4cf4 155/* parse target specific constraints */
d4a9eb1f 156static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
157{
158 const char *ct_str;
159
160 ct_str = *pct_str;
161 switch (ct_str[0]) {
cb4e581f 162 case 'I':
19b62bf4
RH
163 ct->ct |= TCG_CT_CONST_ARM;
164 break;
165 case 'K':
166 ct->ct |= TCG_CT_CONST_INV;
167 break;
a9a86ae9
RH
168 case 'N': /* The gcc constraint letter is L, already used here. */
169 ct->ct |= TCG_CT_CONST_NEG;
170 break;
b6b24cb0
RH
171 case 'Z':
172 ct->ct |= TCG_CT_CONST_ZERO;
173 break;
cb4e581f 174
811d4cf4 175 case 'r':
811d4cf4
AZ
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
178 break;
179
67dcab73
AJ
180 /* qemu_ld address */
181 case 'l':
811d4cf4
AZ
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73 184#ifdef CONFIG_SOFTMMU
702b33b1 185 /* r0-r2 will be overwritten when reading the tlb entry,
67dcab73 186 so don't use these. */
811d4cf4
AZ
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
9716ef3b 189 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
67dcab73 190#endif
811d4cf4 191 break;
67dcab73 192 case 'L':
d0660ed4
AZ
193 ct->ct |= TCG_CT_REG;
194 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
195#ifdef CONFIG_SOFTMMU
196 /* r1 is still needed to load data_reg or data_reg2,
197 so don't use it. */
d0660ed4 198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73 199#endif
d0660ed4
AZ
200 break;
201
67dcab73
AJ
202 /* qemu_st address & data_reg */
203 case 's':
811d4cf4
AZ
204 ct->ct |= TCG_CT_REG;
205 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
702b33b1
RH
206 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
207 and r0-r1 doing the byte swapping, so don't use these. */
811d4cf4 208 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 209 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
702b33b1
RH
210#if defined(CONFIG_SOFTMMU)
211 /* Avoid clashes with registers being used for helper args */
67dcab73 212 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
89c33337 213#if TARGET_LONG_BITS == 64
9716ef3b
PM
214 /* Avoid clashes with registers being used for helper args */
215 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
216#endif
811d4cf4 217#endif
67dcab73 218 break;
811d4cf4 219
811d4cf4
AZ
220 default:
221 return -1;
222 }
223 ct_str++;
224 *pct_str = ct_str;
225
226 return 0;
227}
228
94953e6d
LD
229static inline uint32_t rotl(uint32_t val, int n)
230{
231 return (val << n) | (val >> (32 - n));
232}
233
234/* ARM immediates for ALU instructions are made of an unsigned 8-bit
235 right-rotated by an even amount between 0 and 30. */
236static inline int encode_imm(uint32_t imm)
237{
4e6f6d4c
LD
238 int shift;
239
94953e6d
LD
240 /* simple case, only lower bits */
241 if ((imm & ~0xff) == 0)
242 return 0;
243 /* then try a simple even shift */
244 shift = ctz32(imm) & ~1;
245 if (((imm >> shift) & ~0xff) == 0)
246 return 32 - shift;
247 /* now try harder with rotations */
248 if ((rotl(imm, 2) & ~0xff) == 0)
249 return 2;
250 if ((rotl(imm, 4) & ~0xff) == 0)
251 return 4;
252 if ((rotl(imm, 6) & ~0xff) == 0)
253 return 6;
254 /* imm can't be encoded */
255 return -1;
256}
cb4e581f
LD
257
258static inline int check_fit_imm(uint32_t imm)
259{
94953e6d 260 return encode_imm(imm) >= 0;
cb4e581f
LD
261}
262
811d4cf4
AZ
263/* Test if a constant matches the constraint.
264 * TODO: define constraints for:
265 *
266 * ldr/str offset: between -0xfff and 0xfff
267 * ldrh/strh offset: between -0xff and 0xff
268 * mov operand2: values represented with x << (2 * y), x < 0x100
269 * add, sub, eor...: ditto
270 */
271static inline int tcg_target_const_match(tcg_target_long val,
19b62bf4 272 const TCGArgConstraint *arg_ct)
811d4cf4
AZ
273{
274 int ct;
275 ct = arg_ct->ct;
19b62bf4 276 if (ct & TCG_CT_CONST) {
811d4cf4 277 return 1;
19b62bf4 278 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
cb4e581f 279 return 1;
19b62bf4
RH
280 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
281 return 1;
a9a86ae9
RH
282 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
283 return 1;
b6b24cb0
RH
284 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
285 return 1;
19b62bf4 286 } else {
811d4cf4 287 return 0;
19b62bf4 288 }
811d4cf4
AZ
289}
290
2df3f1ee
RH
291#define TO_CPSR (1 << 20)
292
9feac1d7 293typedef enum {
2df3f1ee
RH
294 ARITH_AND = 0x0 << 21,
295 ARITH_EOR = 0x1 << 21,
296 ARITH_SUB = 0x2 << 21,
297 ARITH_RSB = 0x3 << 21,
298 ARITH_ADD = 0x4 << 21,
299 ARITH_ADC = 0x5 << 21,
300 ARITH_SBC = 0x6 << 21,
301 ARITH_RSC = 0x7 << 21,
302 ARITH_TST = 0x8 << 21 | TO_CPSR,
303 ARITH_CMP = 0xa << 21 | TO_CPSR,
304 ARITH_CMN = 0xb << 21 | TO_CPSR,
305 ARITH_ORR = 0xc << 21,
306 ARITH_MOV = 0xd << 21,
307 ARITH_BIC = 0xe << 21,
308 ARITH_MVN = 0xf << 21,
9feac1d7
RH
309
310 INSN_LDR_IMM = 0x04100000,
311 INSN_LDR_REG = 0x06100000,
312 INSN_STR_IMM = 0x04000000,
313 INSN_STR_REG = 0x06000000,
314
315 INSN_LDRH_IMM = 0x005000b0,
316 INSN_LDRH_REG = 0x001000b0,
317 INSN_LDRSH_IMM = 0x005000f0,
318 INSN_LDRSH_REG = 0x001000f0,
319 INSN_STRH_IMM = 0x004000b0,
320 INSN_STRH_REG = 0x000000b0,
321
322 INSN_LDRB_IMM = 0x04500000,
323 INSN_LDRB_REG = 0x06500000,
324 INSN_LDRSB_IMM = 0x005000d0,
325 INSN_LDRSB_REG = 0x001000d0,
326 INSN_STRB_IMM = 0x04400000,
327 INSN_STRB_REG = 0x06400000,
702b33b1
RH
328
329 INSN_LDRD_IMM = 0x004000d0,
9feac1d7 330} ARMInsn;
811d4cf4 331
811d4cf4
AZ
332#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
333#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
334#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
335#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
336#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
337#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
338#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
339#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
340
341enum arm_cond_code_e {
342 COND_EQ = 0x0,
343 COND_NE = 0x1,
344 COND_CS = 0x2, /* Unsigned greater or equal */
345 COND_CC = 0x3, /* Unsigned less than */
346 COND_MI = 0x4, /* Negative */
347 COND_PL = 0x5, /* Zero or greater */
348 COND_VS = 0x6, /* Overflow */
349 COND_VC = 0x7, /* No overflow */
350 COND_HI = 0x8, /* Unsigned greater than */
351 COND_LS = 0x9, /* Unsigned less or equal */
352 COND_GE = 0xa,
353 COND_LT = 0xb,
354 COND_GT = 0xc,
355 COND_LE = 0xd,
356 COND_AL = 0xe,
357};
358
0aed257f 359static const uint8_t tcg_cond_to_arm_cond[] = {
811d4cf4
AZ
360 [TCG_COND_EQ] = COND_EQ,
361 [TCG_COND_NE] = COND_NE,
362 [TCG_COND_LT] = COND_LT,
363 [TCG_COND_GE] = COND_GE,
364 [TCG_COND_LE] = COND_LE,
365 [TCG_COND_GT] = COND_GT,
366 /* unsigned */
367 [TCG_COND_LTU] = COND_CC,
368 [TCG_COND_GEU] = COND_CS,
369 [TCG_COND_LEU] = COND_LS,
370 [TCG_COND_GTU] = COND_HI,
371};
372
373static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
374{
375 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
376}
377
378static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
379{
380 tcg_out32(s, (cond << 28) | 0x0a000000 |
381 (((offset - 8) >> 2) & 0x00ffffff));
382}
383
e936243a
AZ
384static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
385{
56779034
AJ
386 /* We pay attention here to not modify the branch target by skipping
387 the corresponding bytes. This ensure that caches and memory are
388 kept coherent during retranslation. */
e2542fe2 389#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
390 tcg_out8(s, (cond << 4) | 0x0a);
391 s->code_ptr += 3;
392#else
393 s->code_ptr += 3;
394 tcg_out8(s, (cond << 4) | 0x0a);
395#endif
396}
397
811d4cf4
AZ
398static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
399{
400 tcg_out32(s, (cond << 28) | 0x0b000000 |
401 (((offset - 8) >> 2) & 0x00ffffff));
402}
403
23401b58
AJ
404static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
405{
406 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
407}
408
24e838b7
PM
409static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
410{
411 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
412 (((offset - 8) >> 2) & 0x00ffffff));
413}
414
811d4cf4
AZ
415static inline void tcg_out_dat_reg(TCGContext *s,
416 int cond, int opc, int rd, int rn, int rm, int shift)
417{
2df3f1ee 418 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
811d4cf4
AZ
419 (rn << 16) | (rd << 12) | shift | rm);
420}
421
df5e0ef7
RH
422static inline void tcg_out_nop(TCGContext *s)
423{
424 if (use_armv7_instructions) {
425 /* Architected nop introduced in v6k. */
426 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
427 also Just So Happened to do nothing on pre-v6k so that we
428 don't need to conditionalize it? */
429 tcg_out32(s, 0xe320f000);
430 } else {
431 /* Prior to that the assembler uses mov r0, r0. */
432 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0));
433 }
434}
435
9716ef3b
PM
436static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
437{
438 /* Simple reg-reg move, optimising out the 'do nothing' case */
439 if (rd != rm) {
440 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
441 }
442}
443
811d4cf4
AZ
444static inline void tcg_out_dat_imm(TCGContext *s,
445 int cond, int opc, int rd, int rn, int im)
446{
2df3f1ee 447 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
811d4cf4
AZ
448 (rn << 16) | (rd << 12) | im);
449}
450
e86e0f28 451static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
811d4cf4 452{
e86e0f28
RH
453 int rot, opc, rn;
454
455 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
456 Speed things up by only checking when movt would be required.
457 Prior to armv7, have one go at fully rotated immediates before
458 doing the decomposition thing below. */
459 if (!use_armv7_instructions || (arg & 0xffff0000)) {
460 rot = encode_imm(arg);
461 if (rot >= 0) {
462 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
463 rotl(arg, rot) | (rot << 7));
464 return;
465 }
466 rot = encode_imm(~arg);
467 if (rot >= 0) {
468 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
469 rotl(~arg, rot) | (rot << 7));
470 return;
471 }
472 }
473
474 /* Use movw + movt. */
475 if (use_armv7_instructions) {
ac34fb5c
AJ
476 /* movw */
477 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
478 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
0f11f25a 479 if (arg & 0xffff0000) {
ac34fb5c
AJ
480 /* movt */
481 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
482 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
ac34fb5c 483 }
e86e0f28
RH
484 return;
485 }
0f11f25a 486
e86e0f28
RH
487 /* TODO: This is very suboptimal, we can easily have a constant
488 pool somewhere after all the instructions. */
489 opc = ARITH_MOV;
490 rn = 0;
491 /* If we have lots of leading 1's, we can shorten the sequence by
492 beginning with mvn and then clearing higher bits with eor. */
493 if (clz32(~arg) > clz32(arg)) {
494 opc = ARITH_MVN, arg = ~arg;
0f11f25a 495 }
e86e0f28
RH
496 do {
497 int i = ctz32(arg) & ~1;
498 rot = ((32 - i) << 7) & 0xf00;
499 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
500 arg &= ~(0xff << i);
501
502 opc = ARITH_EOR;
503 rn = rd;
504 } while (arg);
811d4cf4
AZ
505}
506
7fc645bf
PM
507static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
508 TCGArg lhs, TCGArg rhs, int rhs_is_const)
509{
510 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
511 * rhs must satisfy the "rI" constraint.
512 */
513 if (rhs_is_const) {
514 int rot = encode_imm(rhs);
515 assert(rot >= 0);
516 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
517 } else {
518 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
519 }
520}
521
19b62bf4
RH
522static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
523 TCGReg dst, TCGReg lhs, TCGArg rhs,
524 bool rhs_is_const)
525{
526 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
527 * rhs must satisfy the "rIK" constraint.
528 */
529 if (rhs_is_const) {
530 int rot = encode_imm(rhs);
531 if (rot < 0) {
532 rhs = ~rhs;
533 rot = encode_imm(rhs);
534 assert(rot >= 0);
535 opc = opinv;
536 }
537 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
538 } else {
539 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
540 }
541}
542
a9a86ae9
RH
543static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
544 TCGArg dst, TCGArg lhs, TCGArg rhs,
545 bool rhs_is_const)
546{
547 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
548 * rhs must satisfy the "rIN" constraint.
549 */
550 if (rhs_is_const) {
551 int rot = encode_imm(rhs);
552 if (rot < 0) {
553 rhs = -rhs;
554 rot = encode_imm(rhs);
555 assert(rot >= 0);
556 opc = opneg;
557 }
558 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
559 } else {
560 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
561 }
562}
563
34358a12
RH
564static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
565 TCGReg rn, TCGReg rm)
811d4cf4 566{
34358a12
RH
567 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
568 if (!use_armv6_instructions && rd == rn) {
569 if (rd == rm) {
570 /* rd == rn == rm; copy an input to tmp first. */
571 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
572 rm = rn = TCG_REG_TMP;
573 } else {
574 rn = rm;
575 rm = rd;
576 }
811d4cf4 577 }
34358a12
RH
578 /* mul */
579 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
811d4cf4
AZ
580}
581
34358a12
RH
582static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
583 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 584{
34358a12
RH
585 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
586 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
587 if (rd0 == rm || rd1 == rm) {
588 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
589 rn = TCG_REG_TMP;
590 } else {
591 TCGReg t = rn;
592 rn = rm;
593 rm = t;
594 }
811d4cf4 595 }
34358a12
RH
596 /* umull */
597 tcg_out32(s, (cond << 28) | 0x00800090 |
598 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
599}
600
34358a12
RH
601static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
602 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 603{
34358a12
RH
604 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
605 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
606 if (rd0 == rm || rd1 == rm) {
607 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
608 rn = TCG_REG_TMP;
609 } else {
610 TCGReg t = rn;
611 rn = rm;
612 rm = t;
613 }
811d4cf4 614 }
34358a12
RH
615 /* smull */
616 tcg_out32(s, (cond << 28) | 0x00c00090 |
617 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
618}
619
0637c56c
RH
620static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
621{
622 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
623}
624
625static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
626{
627 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
628}
629
9517094f
AJ
630static inline void tcg_out_ext8s(TCGContext *s, int cond,
631 int rd, int rn)
632{
633 if (use_armv6_instructions) {
634 /* sxtb */
635 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
636 } else {
e23886a9 637 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 638 rd, 0, rn, SHIFT_IMM_LSL(24));
e23886a9 639 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
640 rd, 0, rd, SHIFT_IMM_ASR(24));
641 }
642}
643
e854b6d3
AJ
644static inline void tcg_out_ext8u(TCGContext *s, int cond,
645 int rd, int rn)
646{
647 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
648}
649
9517094f
AJ
650static inline void tcg_out_ext16s(TCGContext *s, int cond,
651 int rd, int rn)
652{
653 if (use_armv6_instructions) {
654 /* sxth */
655 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
656 } else {
e23886a9 657 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 658 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 659 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
660 rd, 0, rd, SHIFT_IMM_ASR(16));
661 }
662}
663
664static inline void tcg_out_ext16u(TCGContext *s, int cond,
665 int rd, int rn)
666{
667 if (use_armv6_instructions) {
668 /* uxth */
669 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
670 } else {
e23886a9 671 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 672 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 673 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
674 rd, 0, rd, SHIFT_IMM_LSR(16));
675 }
676}
677
67dcab73
AJ
678static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
679{
680 if (use_armv6_instructions) {
681 /* revsh */
682 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
683 } else {
684 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 685 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
67dcab73 686 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 687 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
67dcab73 688 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 689 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
67dcab73
AJ
690 }
691}
692
244b1e81
AJ
693static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
694{
695 if (use_armv6_instructions) {
696 /* rev16 */
697 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
698 } else {
699 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 700 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
244b1e81 701 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 702 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
244b1e81 703 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 704 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
244b1e81
AJ
705 }
706}
707
7aab08aa
AJ
708/* swap the two low bytes assuming that the two high input bytes and the
709 two high output bit can hold any value. */
710static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
711{
712 if (use_armv6_instructions) {
713 /* rev16 */
714 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
715 } else {
716 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a
RH
717 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
718 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
7aab08aa 719 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 720 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
7aab08aa
AJ
721 }
722}
723
244b1e81
AJ
724static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
725{
726 if (use_armv6_instructions) {
727 /* rev */
728 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
729 } else {
730 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 731 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
244b1e81 732 tcg_out_dat_imm(s, cond, ARITH_BIC,
4346457a 733 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
244b1e81
AJ
734 tcg_out_dat_reg(s, cond, ARITH_MOV,
735 rd, 0, rn, SHIFT_IMM_ROR(8));
736 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 737 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
244b1e81
AJ
738 }
739}
740
b6b24cb0
RH
741bool tcg_target_deposit_valid(int ofs, int len)
742{
743 /* ??? Without bfi, we could improve over generic code by combining
744 the right-shift from a non-zero ofs with the orr. We do run into
745 problems when rd == rs, and the mask generated from ofs+len doesn't
746 fit into an immediate. We would have to be careful not to pessimize
747 wrt the optimizations performed on the expanded code. */
748 return use_armv7_instructions;
749}
750
751static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
752 TCGArg a1, int ofs, int len, bool const_a1)
753{
754 if (const_a1) {
755 /* bfi becomes bfc with rn == 15. */
756 a1 = 15;
757 }
758 /* bfi/bfc */
759 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
760 | (ofs << 7) | ((ofs + len - 1) << 16));
761}
762
9feac1d7
RH
763/* Note that this routine is used for both LDR and LDRH formats, so we do
764 not wish to include an immediate shift at this point. */
765static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
766 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
767{
768 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
769 | (w << 21) | (rn << 16) | (rt << 12) | rm);
770}
771
772static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
773 TCGReg rn, int imm8, bool p, bool w)
774{
775 bool u = 1;
776 if (imm8 < 0) {
777 imm8 = -imm8;
778 u = 0;
779 }
780 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
781 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
782}
783
784static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
785 TCGReg rn, int imm12, bool p, bool w)
811d4cf4 786{
9feac1d7
RH
787 bool u = 1;
788 if (imm12 < 0) {
789 imm12 = -imm12;
790 u = 0;
791 }
792 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
793 (rn << 16) | (rt << 12) | imm12);
794}
795
796static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
797 TCGReg rn, int imm12)
798{
799 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
800}
801
9feac1d7
RH
802static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
803 TCGReg rn, int imm12)
811d4cf4 804{
9feac1d7 805 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
806}
807
9feac1d7
RH
808static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
809 TCGReg rn, TCGReg rm)
811d4cf4 810{
9feac1d7 811 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
812}
813
9feac1d7
RH
814static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
815 TCGReg rn, TCGReg rm)
811d4cf4 816{
9feac1d7 817 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
818}
819
3979144c 820/* Register pre-increment with base writeback. */
9feac1d7
RH
821static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
822 TCGReg rn, TCGReg rm)
3979144c 823{
9feac1d7 824 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
825}
826
9feac1d7
RH
827static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
828 TCGReg rn, TCGReg rm)
3979144c 829{
9feac1d7 830 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
831}
832
9feac1d7
RH
833static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
834 TCGReg rn, int imm8)
811d4cf4 835{
9feac1d7 836 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
837}
838
9feac1d7
RH
839static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
840 TCGReg rn, int imm8)
811d4cf4 841{
9feac1d7 842 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
843}
844
9feac1d7
RH
845static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
846 TCGReg rn, TCGReg rm)
811d4cf4 847{
9feac1d7 848 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
849}
850
9feac1d7
RH
851static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
852 TCGReg rn, TCGReg rm)
811d4cf4 853{
9feac1d7 854 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
855}
856
9feac1d7
RH
857static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
858 TCGReg rn, int imm8)
811d4cf4 859{
9feac1d7 860 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
861}
862
9feac1d7
RH
863static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
864 TCGReg rn, TCGReg rm)
811d4cf4 865{
9feac1d7 866 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
867}
868
9feac1d7
RH
869static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
870 TCGReg rn, int imm12)
811d4cf4 871{
9feac1d7 872 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
873}
874
9feac1d7
RH
875static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
876 TCGReg rn, int imm12)
811d4cf4 877{
9feac1d7 878 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
879}
880
9feac1d7
RH
881static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
882 TCGReg rn, TCGReg rm)
811d4cf4 883{
9feac1d7 884 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
885}
886
9feac1d7
RH
887static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
888 TCGReg rn, TCGReg rm)
811d4cf4 889{
9feac1d7 890 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
891}
892
9feac1d7
RH
893static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
894 TCGReg rn, int imm8)
811d4cf4 895{
9feac1d7 896 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
897}
898
9feac1d7
RH
899static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
900 TCGReg rn, TCGReg rm)
811d4cf4 901{
9feac1d7 902 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
903}
904
811d4cf4
AZ
905static inline void tcg_out_ld32u(TCGContext *s, int cond,
906 int rd, int rn, int32_t offset)
907{
908 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
909 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
910 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
911 } else
912 tcg_out_ld32_12(s, cond, rd, rn, offset);
913}
914
915static inline void tcg_out_st32(TCGContext *s, int cond,
916 int rd, int rn, int32_t offset)
917{
918 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
919 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
920 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
921 } else
922 tcg_out_st32_12(s, cond, rd, rn, offset);
923}
924
925static inline void tcg_out_ld16u(TCGContext *s, int cond,
926 int rd, int rn, int32_t offset)
927{
928 if (offset > 0xff || offset < -0xff) {
4346457a
RH
929 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
930 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
931 } else
932 tcg_out_ld16u_8(s, cond, rd, rn, offset);
933}
934
935static inline void tcg_out_ld16s(TCGContext *s, int cond,
936 int rd, int rn, int32_t offset)
937{
938 if (offset > 0xff || offset < -0xff) {
4346457a
RH
939 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
940 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
941 } else
942 tcg_out_ld16s_8(s, cond, rd, rn, offset);
943}
944
f694a27e 945static inline void tcg_out_st16(TCGContext *s, int cond,
811d4cf4
AZ
946 int rd, int rn, int32_t offset)
947{
948 if (offset > 0xff || offset < -0xff) {
4346457a
RH
949 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
950 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4 951 } else
f694a27e 952 tcg_out_st16_8(s, cond, rd, rn, offset);
811d4cf4
AZ
953}
954
955static inline void tcg_out_ld8u(TCGContext *s, int cond,
956 int rd, int rn, int32_t offset)
957{
958 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
959 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
960 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
961 } else
962 tcg_out_ld8_12(s, cond, rd, rn, offset);
963}
964
965static inline void tcg_out_ld8s(TCGContext *s, int cond,
966 int rd, int rn, int32_t offset)
967{
968 if (offset > 0xff || offset < -0xff) {
4346457a
RH
969 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
970 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
971 } else
972 tcg_out_ld8s_8(s, cond, rd, rn, offset);
973}
974
f694a27e 975static inline void tcg_out_st8(TCGContext *s, int cond,
811d4cf4
AZ
976 int rd, int rn, int32_t offset)
977{
978 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
979 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
980 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
981 } else
982 tcg_out_st8_12(s, cond, rd, rn, offset);
983}
984
222f23f5 985/* The _goto case is normally between TBs within the same code buffer,
5c84bd90 986 * and with the code buffer limited to 16MB we shouldn't need the long
222f23f5
DDAG
987 * case.
988 *
989 * .... except to the prologue that is in its own buffer.
990 */
811d4cf4
AZ
991static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
992{
993 int32_t val;
994
24e838b7
PM
995 if (addr & 1) {
996 /* goto to a Thumb destination isn't supported */
997 tcg_abort();
998 }
999
811d4cf4
AZ
1000 val = addr - (tcg_target_long) s->code_ptr;
1001 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
1002 tcg_out_b(s, cond, val);
1003 else {
811d4cf4 1004 if (cond == COND_AL) {
c8d80cef 1005 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
222f23f5 1006 tcg_out32(s, addr);
811d4cf4 1007 } else {
4346457a 1008 tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
811d4cf4 1009 tcg_out_dat_reg(s, cond, ARITH_ADD,
c8d80cef 1010 TCG_REG_PC, TCG_REG_PC,
4346457a 1011 TCG_REG_TMP, SHIFT_IMM_LSL(0));
811d4cf4 1012 }
811d4cf4
AZ
1013 }
1014}
1015
222f23f5
DDAG
1016/* The call case is mostly used for helpers - so it's not unreasonable
1017 * for them to be beyond branch range */
24e838b7 1018static inline void tcg_out_call(TCGContext *s, uint32_t addr)
811d4cf4
AZ
1019{
1020 int32_t val;
1021
811d4cf4 1022 val = addr - (tcg_target_long) s->code_ptr;
24e838b7
PM
1023 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1024 if (addr & 1) {
1025 /* Use BLX if the target is in Thumb mode */
1026 if (!use_armv5_instructions) {
1027 tcg_abort();
1028 }
1029 tcg_out_blx_imm(s, val);
1030 } else {
1031 tcg_out_bl(s, COND_AL, val);
1032 }
302fdde7
RH
1033 } else if (use_armv7_instructions) {
1034 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
1035 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
24e838b7 1036 } else {
222f23f5
DDAG
1037 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1038 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1039 tcg_out32(s, addr);
811d4cf4 1040 }
811d4cf4
AZ
1041}
1042
1043static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1044{
23401b58
AJ
1045 if (use_armv5_instructions) {
1046 tcg_out_blx(s, cond, arg);
1047 } else {
1048 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1049 TCG_REG_PC, SHIFT_IMM_LSL(0));
1050 tcg_out_bx(s, cond, arg);
1051 }
811d4cf4
AZ
1052}
1053
1054static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1055{
1056 TCGLabel *l = &s->labels[label_index];
1057
96fbd7de 1058 if (l->has_value) {
811d4cf4 1059 tcg_out_goto(s, cond, l->u.value);
811d4cf4 1060 } else {
811d4cf4 1061 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 1062 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
1063 }
1064}
1065
811d4cf4 1066#ifdef CONFIG_SOFTMMU
79383c9c 1067
022c62cb 1068#include "exec/softmmu_defs.h"
811d4cf4 1069
e141ab52
BS
1070/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1071 int mmu_idx) */
1072static const void * const qemu_ld_helpers[4] = {
1073 helper_ldb_mmu,
1074 helper_ldw_mmu,
1075 helper_ldl_mmu,
1076 helper_ldq_mmu,
1077};
1078
1079/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1080 uintxx_t val, int mmu_idx) */
1081static const void * const qemu_st_helpers[4] = {
1082 helper_stb_mmu,
1083 helper_stw_mmu,
1084 helper_stl_mmu,
1085 helper_stq_mmu,
1086};
9716ef3b
PM
1087
1088/* Helper routines for marshalling helper function arguments into
1089 * the correct registers and stack.
1090 * argreg is where we want to put this argument, arg is the argument itself.
1091 * Return value is the updated argreg ready for the next call.
1092 * Note that argreg 0..3 is real registers, 4+ on stack.
9716ef3b
PM
1093 *
1094 * We provide routines for arguments which are: immediate, 32 bit
1095 * value in register, 16 and 8 bit values in register (which must be zero
1096 * extended before use) and 64 bit value in a lo:hi register pair.
1097 */
fc4d60ee
RH
1098#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1099static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1100{ \
1101 if (argreg < 4) { \
1102 MOV_ARG(s, COND_AL, argreg, arg); \
1103 } else { \
1104 int ofs = (argreg - 4) * 4; \
1105 EXT_ARG; \
1106 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1107 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1108 } \
1109 return argreg + 1; \
1110}
1111
1112DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
4346457a 1113 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1114DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
4346457a 1115 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1116DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
4346457a 1117 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee
RH
1118DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1119
1120static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1121 TCGReg arglo, TCGReg arghi)
9716ef3b
PM
1122{
1123 /* 64 bit arguments must go in even/odd register pairs
1124 * and in 8-aligned stack slots.
1125 */
1126 if (argreg & 1) {
1127 argreg++;
1128 }
1129 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1130 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1131 return argreg;
1132}
811d4cf4 1133
3979144c
PB
1134#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1135
702b33b1 1136/* Load and compare a TLB entry, leaving the flags set. Leaves R2 pointing
cee87be8 1137 to the tlb entry. Clobbers R1 and TMP. */
811d4cf4 1138
cee87be8
RH
1139static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1140 int s_bits, int tlb_offset)
1141{
702b33b1
RH
1142 TCGReg base = TCG_AREG0;
1143
91a3c1b0 1144 /* Should generate something like the following:
702b33b1
RH
1145 * pre-v7:
1146 * shr tmp, addr_reg, #TARGET_PAGE_BITS (1)
1147 * add r2, env, #off & 0xff00
1148 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
1149 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
1150 * ldr r0, [r2, #off & 0xff]! (4)
1151 * tst addr_reg, #s_mask
1152 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS (5)
1153 *
1154 * v7 (not implemented yet):
1155 * ubfx r2, addr_reg, #TARGET_PAGE_BITS, #CPU_TLB_BITS (1)
1156 * movw tmp, #~TARGET_PAGE_MASK & ~s_mask
1157 * movw r0, #off
1158 * add r2, env, r2, lsl #CPU_TLB_ENTRY_BITS (2)
1159 * bic tmp, addr_reg, tmp
1160 * ldr r0, [r2, r0]! (3)
1161 * cmp r0, tmp (4)
91a3c1b0
AZ
1162 */
1163# if CPU_TLB_BITS > 8
1164# error
1165# endif
4346457a 1166 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
cee87be8 1167 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
702b33b1
RH
1168
1169 /* We assume that the offset is contained within 16 bits. */
1170 assert((tlb_offset & ~0xffff) == 0);
1171 if (tlb_offset > 0xff) {
1172 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1173 (24 << 7) | (tlb_offset >> 8));
1174 tlb_offset &= 0xff;
1175 base = TCG_REG_R2;
1176 }
1177
811d4cf4 1178 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1179 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
702b33b1 1180 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
c8d80cef 1181 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
cee87be8 1182
702b33b1
RH
1183 /* Load the tlb comparator. Use ldrd if needed and available,
1184 but due to how the pointer needs setting up, ldm isn't useful.
1185 Base arm5 doesn't have ldrd, but armv5te does. */
1186 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1187 tcg_out_memop_8(s, COND_AL, INSN_LDRD_IMM, TCG_REG_R0,
1188 TCG_REG_R2, tlb_offset, 1, 1);
1189 } else {
1190 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R0,
1191 TCG_REG_R2, tlb_offset, 1, 1);
1192 if (TARGET_LONG_BITS == 64) {
8ddaeb1b 1193 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R1,
702b33b1
RH
1194 TCG_REG_R2, 4, 1, 0);
1195 }
d17bd1d8 1196 }
cee87be8 1197
3979144c 1198 /* Check alignment. */
cee87be8 1199 if (s_bits) {
702b33b1 1200 tcg_out_dat_imm(s, COND_AL, ARITH_TST,
cee87be8
RH
1201 0, addrlo, (1 << s_bits) - 1);
1202 }
1203
702b33b1
RH
1204 tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0,
1205 TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1206
cee87be8 1207 if (TARGET_LONG_BITS == 64) {
cee87be8
RH
1208 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1209 TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
1210 }
1211}
df5e0ef7
RH
1212
1213/* Record the context of a call to the out of line helper code for the slow
1214 path for a load or store, so that we can later generate the correct
1215 helper code. */
1216static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
1217 int data_reg, int data_reg2, int addrlo_reg,
1218 int addrhi_reg, int mem_index,
1219 uint8_t *raddr, uint8_t *label_ptr)
1220{
1221 int idx;
1222 TCGLabelQemuLdst *label;
1223
1224 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
1225 tcg_abort();
1226 }
1227
1228 idx = s->nb_qemu_ldst_labels++;
1229 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
1230 label->is_ld = is_ld;
1231 label->opc = opc;
1232 label->datalo_reg = data_reg;
1233 label->datahi_reg = data_reg2;
1234 label->addrlo_reg = addrlo_reg;
1235 label->addrhi_reg = addrhi_reg;
1236 label->mem_index = mem_index;
1237 label->raddr = raddr;
1238 label->label_ptr[0] = label_ptr;
1239}
1240
1241static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1242{
1243 TCGReg argreg, data_reg, data_reg2;
1244 uint8_t *start;
1245
1246 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1247
1248 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1249 if (TARGET_LONG_BITS == 64) {
1250 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1251 } else {
1252 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1253 }
1254 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1255 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[lb->opc & 3]);
1256
1257 data_reg = lb->datalo_reg;
1258 data_reg2 = lb->datahi_reg;
1259
1260 start = s->code_ptr;
1261 switch (lb->opc) {
1262 case 0 | 4:
1263 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1264 break;
1265 case 1 | 4:
1266 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1267 break;
1268 case 0:
1269 case 1:
1270 case 2:
1271 default:
1272 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1273 break;
1274 case 3:
1275 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1276 tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
1277 break;
1278 }
1279
1280 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1281 the call and the branch back to straight-line code. Note that the
1282 moves above could be elided by register allocation, nor do we know
1283 which code alternative we chose for extension. */
1284 switch (s->code_ptr - start) {
1285 case 0:
1286 tcg_out_nop(s);
1287 /* FALLTHRU */
1288 case 4:
1289 tcg_out_nop(s);
1290 /* FALLTHRU */
1291 case 8:
1292 break;
1293 default:
1294 abort();
1295 }
1296
1297 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1298}
1299
1300static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1301{
1302 TCGReg argreg, data_reg, data_reg2;
1303
1304 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1305
1306 argreg = TCG_REG_R0;
1307 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1308 if (TARGET_LONG_BITS == 64) {
1309 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1310 } else {
1311 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1312 }
1313
1314 data_reg = lb->datalo_reg;
1315 data_reg2 = lb->datahi_reg;
1316 switch (lb->opc) {
1317 case 0:
1318 argreg = tcg_out_arg_reg8(s, argreg, data_reg);
1319 break;
1320 case 1:
1321 argreg = tcg_out_arg_reg16(s, argreg, data_reg);
1322 break;
1323 case 2:
1324 argreg = tcg_out_arg_reg32(s, argreg, data_reg);
1325 break;
1326 case 3:
1327 argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
1328 break;
1329 }
1330
1331 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1332 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[lb->opc & 3]);
1333
1334 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1335 the call and the branch back to straight-line code. */
1336 tcg_out_nop(s);
1337 tcg_out_nop(s);
1338 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1339}
cee87be8
RH
1340#endif /* SOFTMMU */
1341
1342static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1343{
1344 TCGReg addr_reg, data_reg, data_reg2;
1345 bool bswap;
1346#ifdef CONFIG_SOFTMMU
1347 int mem_index, s_bits;
df5e0ef7
RH
1348 TCGReg addr_reg2;
1349 uint8_t *label_ptr;
cee87be8
RH
1350#endif
1351#ifdef TARGET_WORDS_BIGENDIAN
1352 bswap = 1;
1353#else
1354 bswap = 0;
1355#endif
1356
1357 data_reg = *args++;
1358 data_reg2 = (opc == 3 ? *args++ : 0);
1359 addr_reg = *args++;
1360#ifdef CONFIG_SOFTMMU
1361 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1362 mem_index = *args;
1363 s_bits = opc & 3;
1364
1365 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1366 offsetof(CPUArchState, tlb_table[mem_index][0].addr_read));
1367
df5e0ef7
RH
1368 label_ptr = s->code_ptr;
1369 tcg_out_b_noaddr(s, COND_NE);
1370
1371 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
d17bd1d8
AJ
1372 offsetof(CPUTLBEntry, addend)
1373 - offsetof(CPUTLBEntry, addr_read));
811d4cf4
AZ
1374
1375 switch (opc) {
1376 case 0:
df5e0ef7 1377 tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1378 break;
1379 case 0 | 4:
df5e0ef7 1380 tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1381 break;
1382 case 1:
df5e0ef7 1383 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1384 if (bswap) {
df5e0ef7 1385 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
67dcab73 1386 }
811d4cf4
AZ
1387 break;
1388 case 1 | 4:
67dcab73 1389 if (bswap) {
df5e0ef7
RH
1390 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1391 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
67dcab73 1392 } else {
df5e0ef7 1393 tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1394 }
811d4cf4
AZ
1395 break;
1396 case 2:
1397 default:
df5e0ef7 1398 tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1399 if (bswap) {
df5e0ef7 1400 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
67dcab73 1401 }
811d4cf4
AZ
1402 break;
1403 case 3:
67dcab73 1404 if (bswap) {
df5e0ef7
RH
1405 tcg_out_ld32_rwb(s, COND_AL, data_reg2, TCG_REG_R1, addr_reg);
1406 tcg_out_ld32_12(s, COND_AL, data_reg, TCG_REG_R1, 4);
1407 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1408 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
67dcab73 1409 } else {
df5e0ef7
RH
1410 tcg_out_ld32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1411 tcg_out_ld32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
67dcab73 1412 }
811d4cf4
AZ
1413 break;
1414 }
1415
df5e0ef7
RH
1416 add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1417 mem_index, s->code_ptr, label_ptr);
379f6698
PB
1418#else /* !CONFIG_SOFTMMU */
1419 if (GUEST_BASE) {
1420 uint32_t offset = GUEST_BASE;
cee87be8 1421 int i, rot;
379f6698
PB
1422
1423 while (offset) {
1424 i = ctz32(offset) & ~1;
1425 rot = ((32 - i) << 7) & 0xf00;
1426
4346457a 1427 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
379f6698 1428 ((offset >> i) & 0xff) | rot);
4346457a 1429 addr_reg = TCG_REG_TMP;
379f6698
PB
1430 offset &= ~(0xff << i);
1431 }
1432 }
811d4cf4
AZ
1433 switch (opc) {
1434 case 0:
1435 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1436 break;
1437 case 0 | 4:
1438 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1439 break;
1440 case 1:
1441 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1442 if (bswap) {
1443 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1444 }
811d4cf4
AZ
1445 break;
1446 case 1 | 4:
67dcab73
AJ
1447 if (bswap) {
1448 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1449 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1450 } else {
1451 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1452 }
811d4cf4
AZ
1453 break;
1454 case 2:
1455 default:
1456 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1457 if (bswap) {
1458 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1459 }
811d4cf4
AZ
1460 break;
1461 case 3:
eae6ce52
AZ
1462 /* TODO: use block load -
1463 * check that data_reg2 > data_reg or the other way */
419bafa5 1464 if (data_reg == addr_reg) {
67dcab73
AJ
1465 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1466 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
419bafa5 1467 } else {
67dcab73
AJ
1468 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1469 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1470 }
1471 if (bswap) {
1472 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1473 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
419bafa5 1474 }
811d4cf4
AZ
1475 break;
1476 }
1477#endif
1478}
1479
cee87be8 1480static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1481{
cee87be8
RH
1482 TCGReg addr_reg, data_reg, data_reg2;
1483 bool bswap;
811d4cf4 1484#ifdef CONFIG_SOFTMMU
cee87be8 1485 int mem_index, s_bits;
df5e0ef7
RH
1486 TCGReg addr_reg2;
1487 uint8_t *label_ptr;
811d4cf4 1488#endif
67dcab73
AJ
1489#ifdef TARGET_WORDS_BIGENDIAN
1490 bswap = 1;
1491#else
1492 bswap = 0;
1493#endif
cee87be8 1494
811d4cf4 1495 data_reg = *args++;
cee87be8 1496 data_reg2 = (opc == 3 ? *args++ : 0);
811d4cf4 1497 addr_reg = *args++;
811d4cf4 1498#ifdef CONFIG_SOFTMMU
cee87be8 1499 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
811d4cf4
AZ
1500 mem_index = *args;
1501 s_bits = opc & 3;
1502
cee87be8
RH
1503 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1504 offsetof(CPUArchState,
1505 tlb_table[mem_index][0].addr_write));
1506
df5e0ef7
RH
1507 label_ptr = s->code_ptr;
1508 tcg_out_b_noaddr(s, COND_NE);
1509
1510 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
d17bd1d8
AJ
1511 offsetof(CPUTLBEntry, addend)
1512 - offsetof(CPUTLBEntry, addr_write));
811d4cf4
AZ
1513
1514 switch (opc) {
1515 case 0:
df5e0ef7 1516 tcg_out_st8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4 1517 break;
811d4cf4 1518 case 1:
67dcab73 1519 if (bswap) {
df5e0ef7
RH
1520 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
1521 tcg_out_st16_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
67dcab73 1522 } else {
df5e0ef7 1523 tcg_out_st16_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1524 }
811d4cf4
AZ
1525 break;
1526 case 2:
1527 default:
67dcab73 1528 if (bswap) {
df5e0ef7
RH
1529 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1530 tcg_out_st32_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
67dcab73 1531 } else {
df5e0ef7 1532 tcg_out_st32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1533 }
811d4cf4
AZ
1534 break;
1535 case 3:
67dcab73 1536 if (bswap) {
df5e0ef7
RH
1537 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1538 tcg_out_st32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R1, addr_reg);
1539 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1540 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R1, 4);
67dcab73 1541 } else {
df5e0ef7
RH
1542 tcg_out_st32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1543 tcg_out_st32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
67dcab73 1544 }
811d4cf4
AZ
1545 break;
1546 }
1547
df5e0ef7
RH
1548 add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1549 mem_index, s->code_ptr, label_ptr);
379f6698
PB
1550#else /* !CONFIG_SOFTMMU */
1551 if (GUEST_BASE) {
1552 uint32_t offset = GUEST_BASE;
1553 int i;
1554 int rot;
1555
1556 while (offset) {
1557 i = ctz32(offset) & ~1;
1558 rot = ((32 - i) << 7) & 0xf00;
1559
67dcab73 1560 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
379f6698 1561 ((offset >> i) & 0xff) | rot);
67dcab73 1562 addr_reg = TCG_REG_R1;
379f6698
PB
1563 offset &= ~(0xff << i);
1564 }
1565 }
811d4cf4
AZ
1566 switch (opc) {
1567 case 0:
1568 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1569 break;
811d4cf4 1570 case 1:
67dcab73 1571 if (bswap) {
7aab08aa 1572 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
67dcab73
AJ
1573 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1574 } else {
1575 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1576 }
811d4cf4
AZ
1577 break;
1578 case 2:
1579 default:
67dcab73
AJ
1580 if (bswap) {
1581 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1582 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1583 } else {
1584 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1585 }
811d4cf4
AZ
1586 break;
1587 case 3:
eae6ce52
AZ
1588 /* TODO: use block store -
1589 * check that data_reg2 > data_reg or the other way */
67dcab73
AJ
1590 if (bswap) {
1591 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1592 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1593 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1594 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1595 } else {
1596 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1597 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1598 }
811d4cf4
AZ
1599 break;
1600 }
1601#endif
1602}
1603
811d4cf4
AZ
1604static uint8_t *tb_ret_addr;
1605
a9751609 1606static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
811d4cf4
AZ
1607 const TCGArg *args, const int *const_args)
1608{
2df3f1ee 1609 TCGArg a0, a1, a2, a3, a4, a5;
811d4cf4
AZ
1610 int c;
1611
1612 switch (opc) {
1613 case INDEX_op_exit_tb:
c9e53a4c
RH
1614 if (use_armv7_instructions || check_fit_imm(args[0])) {
1615 tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1616 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1617 } else {
fe33867b 1618 uint8_t *ld_ptr = s->code_ptr;
c9e53a4c 1619 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
fe33867b 1620 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
c9e53a4c
RH
1621 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1622 tcg_out32(s, args[0]);
fe33867b 1623 }
811d4cf4
AZ
1624 break;
1625 case INDEX_op_goto_tb:
1626 if (s->tb_jmp_offset) {
1627 /* Direct jump method */
fe33867b 1628#if defined(USE_DIRECT_JUMP)
811d4cf4 1629 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
c69806ab 1630 tcg_out_b_noaddr(s, COND_AL);
811d4cf4 1631#else
c8d80cef 1632 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1633 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1634 tcg_out32(s, 0);
1635#endif
1636 } else {
1637 /* Indirect jump method */
1638#if 1
1639 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1640 if (c > 0xfff || c < -0xfff) {
1641 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1642 (tcg_target_long) (s->tb_next + args[0]));
c8d80cef 1643 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4 1644 } else
c8d80cef 1645 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
811d4cf4 1646#else
c8d80cef
AJ
1647 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1648 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4
AZ
1649 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1650#endif
1651 }
1652 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1653 break;
1654 case INDEX_op_call:
1655 if (const_args[0])
24e838b7 1656 tcg_out_call(s, args[0]);
811d4cf4
AZ
1657 else
1658 tcg_out_callr(s, COND_AL, args[0]);
1659 break;
811d4cf4
AZ
1660 case INDEX_op_br:
1661 tcg_out_goto_label(s, COND_AL, args[0]);
1662 break;
1663
1664 case INDEX_op_ld8u_i32:
1665 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1666 break;
1667 case INDEX_op_ld8s_i32:
1668 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1669 break;
1670 case INDEX_op_ld16u_i32:
1671 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1672 break;
1673 case INDEX_op_ld16s_i32:
1674 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1675 break;
1676 case INDEX_op_ld_i32:
1677 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1678 break;
1679 case INDEX_op_st8_i32:
f694a27e 1680 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1681 break;
1682 case INDEX_op_st16_i32:
f694a27e 1683 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1684 break;
1685 case INDEX_op_st_i32:
1686 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1687 break;
1688
1689 case INDEX_op_mov_i32:
1690 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1691 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1692 break;
1693 case INDEX_op_movi_i32:
1694 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1695 break;
4a1d241e
PM
1696 case INDEX_op_movcond_i32:
1697 /* Constraints mean that v2 is always in the same register as dest,
1698 * so we only need to do "if condition passed, move v1 to dest".
1699 */
5d53b4c9
RH
1700 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1701 args[1], args[2], const_args[2]);
1702 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1703 ARITH_MVN, args[0], 0, args[3], const_args[3]);
4a1d241e 1704 break;
811d4cf4 1705 case INDEX_op_add_i32:
a9a86ae9
RH
1706 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1707 args[0], args[1], args[2], const_args[2]);
1708 break;
811d4cf4 1709 case INDEX_op_sub_i32:
d9fda575
RH
1710 if (const_args[1]) {
1711 if (const_args[2]) {
1712 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1713 } else {
1714 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1715 args[0], args[2], args[1], 1);
1716 }
1717 } else {
1718 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1719 args[0], args[1], args[2], const_args[2]);
1720 }
a9a86ae9 1721 break;
811d4cf4 1722 case INDEX_op_and_i32:
19b62bf4
RH
1723 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1724 args[0], args[1], args[2], const_args[2]);
1725 break;
932234f6 1726 case INDEX_op_andc_i32:
19b62bf4
RH
1727 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1728 args[0], args[1], args[2], const_args[2]);
1729 break;
811d4cf4
AZ
1730 case INDEX_op_or_i32:
1731 c = ARITH_ORR;
1732 goto gen_arith;
1733 case INDEX_op_xor_i32:
1734 c = ARITH_EOR;
1735 /* Fall through. */
1736 gen_arith:
7fc645bf 1737 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
811d4cf4
AZ
1738 break;
1739 case INDEX_op_add2_i32:
2df3f1ee
RH
1740 a0 = args[0], a1 = args[1], a2 = args[2];
1741 a3 = args[3], a4 = args[4], a5 = args[5];
1742 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
4346457a 1743 a0 = TCG_REG_TMP;
2df3f1ee
RH
1744 }
1745 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1746 a0, a2, a4, const_args[4]);
1747 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1748 a1, a3, a5, const_args[5]);
1749 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4
AZ
1750 break;
1751 case INDEX_op_sub2_i32:
2df3f1ee
RH
1752 a0 = args[0], a1 = args[1], a2 = args[2];
1753 a3 = args[3], a4 = args[4], a5 = args[5];
1754 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
4346457a 1755 a0 = TCG_REG_TMP;
2df3f1ee
RH
1756 }
1757 if (const_args[2]) {
1758 if (const_args[4]) {
1759 tcg_out_movi32(s, COND_AL, a0, a4);
1760 a4 = a0;
1761 }
1762 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1763 } else {
1764 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1765 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1766 }
1767 if (const_args[3]) {
1768 if (const_args[5]) {
1769 tcg_out_movi32(s, COND_AL, a1, a5);
1770 a5 = a1;
1771 }
1772 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1773 } else {
1774 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1775 a1, a3, a5, const_args[5]);
1776 }
1777 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4 1778 break;
650bbb36
AZ
1779 case INDEX_op_neg_i32:
1780 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1781 break;
f878d2d2
LD
1782 case INDEX_op_not_i32:
1783 tcg_out_dat_reg(s, COND_AL,
1784 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1785 break;
811d4cf4
AZ
1786 case INDEX_op_mul_i32:
1787 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1788 break;
1789 case INDEX_op_mulu2_i32:
1790 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1791 break;
d693e147
RH
1792 case INDEX_op_muls2_i32:
1793 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1794 break;
811d4cf4
AZ
1795 /* XXX: Perhaps args[2] & 0x1f is wrong */
1796 case INDEX_op_shl_i32:
1797 c = const_args[2] ?
1798 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1799 goto gen_shift32;
1800 case INDEX_op_shr_i32:
1801 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1802 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1803 goto gen_shift32;
1804 case INDEX_op_sar_i32:
1805 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1806 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
293579e5
AJ
1807 goto gen_shift32;
1808 case INDEX_op_rotr_i32:
1809 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1810 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
811d4cf4
AZ
1811 /* Fall through. */
1812 gen_shift32:
1813 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1814 break;
1815
293579e5
AJ
1816 case INDEX_op_rotl_i32:
1817 if (const_args[2]) {
1818 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1819 ((0x20 - args[2]) & 0x1f) ?
1820 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1821 SHIFT_IMM_LSL(0));
1822 } else {
4346457a 1823 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
293579e5 1824 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
4346457a 1825 SHIFT_REG_ROR(TCG_REG_TMP));
293579e5
AJ
1826 }
1827 break;
1828
811d4cf4 1829 case INDEX_op_brcond_i32:
5d53b4c9 1830 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
7fc645bf 1831 args[0], args[1], const_args[1]);
811d4cf4
AZ
1832 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1833 break;
1834 case INDEX_op_brcond2_i32:
1835 /* The resulting conditions are:
1836 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1837 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1838 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1839 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1840 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1841 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1842 */
5d53b4c9
RH
1843 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1844 args[1], args[3], const_args[3]);
1845 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1846 args[0], args[2], const_args[2]);
811d4cf4
AZ
1847 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1848 break;
f72a6cd7 1849 case INDEX_op_setcond_i32:
5d53b4c9
RH
1850 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1851 args[1], args[2], const_args[2]);
f72a6cd7
AJ
1852 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1853 ARITH_MOV, args[0], 0, 1);
1854 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1855 ARITH_MOV, args[0], 0, 0);
1856 break;
e0404769
AJ
1857 case INDEX_op_setcond2_i32:
1858 /* See brcond2_i32 comment */
5d53b4c9
RH
1859 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1860 args[2], args[4], const_args[4]);
1861 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1862 args[1], args[3], const_args[3]);
e0404769
AJ
1863 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1864 ARITH_MOV, args[0], 0, 1);
1865 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1866 ARITH_MOV, args[0], 0, 0);
b525f0a9 1867 break;
811d4cf4
AZ
1868
1869 case INDEX_op_qemu_ld8u:
7e0d9562 1870 tcg_out_qemu_ld(s, args, 0);
811d4cf4
AZ
1871 break;
1872 case INDEX_op_qemu_ld8s:
7e0d9562 1873 tcg_out_qemu_ld(s, args, 0 | 4);
811d4cf4
AZ
1874 break;
1875 case INDEX_op_qemu_ld16u:
7e0d9562 1876 tcg_out_qemu_ld(s, args, 1);
811d4cf4
AZ
1877 break;
1878 case INDEX_op_qemu_ld16s:
7e0d9562 1879 tcg_out_qemu_ld(s, args, 1 | 4);
811d4cf4 1880 break;
86feb1c8 1881 case INDEX_op_qemu_ld32:
7e0d9562 1882 tcg_out_qemu_ld(s, args, 2);
811d4cf4
AZ
1883 break;
1884 case INDEX_op_qemu_ld64:
7e0d9562 1885 tcg_out_qemu_ld(s, args, 3);
811d4cf4 1886 break;
650bbb36 1887
811d4cf4 1888 case INDEX_op_qemu_st8:
7e0d9562 1889 tcg_out_qemu_st(s, args, 0);
811d4cf4
AZ
1890 break;
1891 case INDEX_op_qemu_st16:
7e0d9562 1892 tcg_out_qemu_st(s, args, 1);
811d4cf4
AZ
1893 break;
1894 case INDEX_op_qemu_st32:
7e0d9562 1895 tcg_out_qemu_st(s, args, 2);
811d4cf4
AZ
1896 break;
1897 case INDEX_op_qemu_st64:
7e0d9562 1898 tcg_out_qemu_st(s, args, 3);
811d4cf4
AZ
1899 break;
1900
244b1e81
AJ
1901 case INDEX_op_bswap16_i32:
1902 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1903 break;
1904 case INDEX_op_bswap32_i32:
1905 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1906 break;
1907
811d4cf4 1908 case INDEX_op_ext8s_i32:
9517094f 1909 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1910 break;
1911 case INDEX_op_ext16s_i32:
9517094f
AJ
1912 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1913 break;
1914 case INDEX_op_ext16u_i32:
1915 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1916 break;
1917
b6b24cb0
RH
1918 case INDEX_op_deposit_i32:
1919 tcg_out_deposit(s, COND_AL, args[0], args[2],
1920 args[3], args[4], const_args[2]);
1921 break;
1922
0637c56c
RH
1923 case INDEX_op_div_i32:
1924 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1925 break;
1926 case INDEX_op_divu_i32:
1927 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1928 break;
1929 case INDEX_op_rem_i32:
4346457a
RH
1930 tcg_out_sdiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1931 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1932 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1933 SHIFT_IMM_LSL(0));
1934 break;
1935 case INDEX_op_remu_i32:
4346457a
RH
1936 tcg_out_udiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1937 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1938 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1939 SHIFT_IMM_LSL(0));
1940 break;
1941
811d4cf4
AZ
1942 default:
1943 tcg_abort();
1944 }
1945}
1946
df5e0ef7
RH
1947#ifdef CONFIG_SOFTMMU
1948/* Generate TB finalization at the end of block. */
1949void tcg_out_tb_finalize(TCGContext *s)
1950{
1951 int i;
1952 for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
1953 TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
1954 if (label->is_ld) {
1955 tcg_out_qemu_ld_slow_path(s, label);
1956 } else {
1957 tcg_out_qemu_st_slow_path(s, label);
1958 }
1959 }
1960}
1961#endif /* SOFTMMU */
1962
811d4cf4
AZ
1963static const TCGTargetOpDef arm_op_defs[] = {
1964 { INDEX_op_exit_tb, { } },
1965 { INDEX_op_goto_tb, { } },
1966 { INDEX_op_call, { "ri" } },
811d4cf4
AZ
1967 { INDEX_op_br, { } },
1968
1969 { INDEX_op_mov_i32, { "r", "r" } },
1970 { INDEX_op_movi_i32, { "r" } },
1971
1972 { INDEX_op_ld8u_i32, { "r", "r" } },
1973 { INDEX_op_ld8s_i32, { "r", "r" } },
1974 { INDEX_op_ld16u_i32, { "r", "r" } },
1975 { INDEX_op_ld16s_i32, { "r", "r" } },
1976 { INDEX_op_ld_i32, { "r", "r" } },
1977 { INDEX_op_st8_i32, { "r", "r" } },
1978 { INDEX_op_st16_i32, { "r", "r" } },
1979 { INDEX_op_st_i32, { "r", "r" } },
1980
1981 /* TODO: "r", "r", "ri" */
a9a86ae9 1982 { INDEX_op_add_i32, { "r", "r", "rIN" } },
d9fda575 1983 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
811d4cf4
AZ
1984 { INDEX_op_mul_i32, { "r", "r", "r" } },
1985 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
d693e147 1986 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
19b62bf4
RH
1987 { INDEX_op_and_i32, { "r", "r", "rIK" } },
1988 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
cb4e581f
LD
1989 { INDEX_op_or_i32, { "r", "r", "rI" } },
1990 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1991 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1992 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1993
1994 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1995 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1996 { INDEX_op_sar_i32, { "r", "r", "ri" } },
293579e5
AJ
1997 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1998 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
811d4cf4 1999
5d53b4c9
RH
2000 { INDEX_op_brcond_i32, { "r", "rIN" } },
2001 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
2002 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
811d4cf4 2003
2df3f1ee
RH
2004 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
2005 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
5d53b4c9
RH
2006 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
2007 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
811d4cf4 2008
26c5d372 2009#if TARGET_LONG_BITS == 32
67dcab73
AJ
2010 { INDEX_op_qemu_ld8u, { "r", "l" } },
2011 { INDEX_op_qemu_ld8s, { "r", "l" } },
2012 { INDEX_op_qemu_ld16u, { "r", "l" } },
2013 { INDEX_op_qemu_ld16s, { "r", "l" } },
2014 { INDEX_op_qemu_ld32, { "r", "l" } },
2015 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
2016
2017 { INDEX_op_qemu_st8, { "s", "s" } },
2018 { INDEX_op_qemu_st16, { "s", "s" } },
2019 { INDEX_op_qemu_st32, { "s", "s" } },
595b5397 2020 { INDEX_op_qemu_st64, { "s", "s", "s" } },
26c5d372 2021#else
67dcab73
AJ
2022 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
2023 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
2024 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
2025 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
2026 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
2027 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
2028
2029 { INDEX_op_qemu_st8, { "s", "s", "s" } },
2030 { INDEX_op_qemu_st16, { "s", "s", "s" } },
2031 { INDEX_op_qemu_st32, { "s", "s", "s" } },
595b5397 2032 { INDEX_op_qemu_st64, { "s", "s", "s", "s" } },
26c5d372 2033#endif
811d4cf4 2034
244b1e81
AJ
2035 { INDEX_op_bswap16_i32, { "r", "r" } },
2036 { INDEX_op_bswap32_i32, { "r", "r" } },
2037
811d4cf4
AZ
2038 { INDEX_op_ext8s_i32, { "r", "r" } },
2039 { INDEX_op_ext16s_i32, { "r", "r" } },
9517094f 2040 { INDEX_op_ext16u_i32, { "r", "r" } },
811d4cf4 2041
b6b24cb0
RH
2042 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2043
0637c56c
RH
2044#if TCG_TARGET_HAS_div_i32
2045 { INDEX_op_div_i32, { "r", "r", "r" } },
2046 { INDEX_op_rem_i32, { "r", "r", "r" } },
2047 { INDEX_op_divu_i32, { "r", "r", "r" } },
2048 { INDEX_op_remu_i32, { "r", "r", "r" } },
2049#endif
2050
811d4cf4
AZ
2051 { -1 },
2052};
2053
e4d58b41 2054static void tcg_target_init(TCGContext *s)
811d4cf4 2055{
e4a7d5e8 2056 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
811d4cf4 2057 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
e4a7d5e8
AJ
2058 (1 << TCG_REG_R0) |
2059 (1 << TCG_REG_R1) |
2060 (1 << TCG_REG_R2) |
2061 (1 << TCG_REG_R3) |
2062 (1 << TCG_REG_R12) |
2063 (1 << TCG_REG_R14));
811d4cf4
AZ
2064
2065 tcg_regset_clear(s->reserved_regs);
811d4cf4 2066 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4346457a 2067 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
e4a7d5e8 2068 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
811d4cf4
AZ
2069
2070 tcg_add_target_add_op_defs(arm_op_defs);
2071}
2072
2a534aff
RH
2073static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2074 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2075{
2076 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2077}
2078
2a534aff
RH
2079static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2080 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2081{
2082 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2083}
2084
2a534aff
RH
2085static inline void tcg_out_mov(TCGContext *s, TCGType type,
2086 TCGReg ret, TCGReg arg)
811d4cf4
AZ
2087{
2088 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2089}
2090
2091static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 2092 TCGReg ret, tcg_target_long arg)
811d4cf4
AZ
2093{
2094 tcg_out_movi32(s, COND_AL, ret, arg);
2095}
2096
e4d58b41 2097static void tcg_target_qemu_prologue(TCGContext *s)
811d4cf4 2098{
fc4d60ee
RH
2099 int frame_size;
2100
2101 /* Calling convention requires us to save r4-r11 and lr. */
2102 /* stmdb sp!, { r4 - r11, lr } */
2103 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
cea5f9a2 2104
fc4d60ee
RH
2105 /* Allocate the local stack frame. */
2106 frame_size = TCG_STATIC_CALL_ARGS_SIZE;
2107 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2108 /* We saved an odd number of registers above; keep an 8 aligned stack. */
2109 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
2110 & -TCG_TARGET_STACK_ALIGN) + 4;
2111
2112 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2113 TCG_REG_CALL_STACK, frame_size, 1);
2114 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2115 CPU_TEMP_BUF_NLONGS * sizeof(long));
4e17eae9 2116
cea5f9a2 2117 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
811d4cf4 2118
cea5f9a2 2119 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
811d4cf4
AZ
2120 tb_ret_addr = s->code_ptr;
2121
fc4d60ee
RH
2122 /* Epilogue. We branch here via tb_ret_addr. */
2123 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2124 TCG_REG_CALL_STACK, frame_size, 1);
2125
2126 /* ldmia sp!, { r4 - r11, pc } */
2127 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
811d4cf4 2128}
This page took 0.99198 seconds and 4 git commands to generate.