]>
Commit | Line | Data |
---|---|---|
811d4cf4 AZ |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Andrzej Zaborowski | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
d4a9eb1f BS |
24 | |
25 | #ifndef NDEBUG | |
26 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
811d4cf4 AZ |
27 | "%r0", |
28 | "%r1", | |
29 | "%r2", | |
30 | "%r3", | |
31 | "%r4", | |
32 | "%r5", | |
33 | "%r6", | |
34 | "%r7", | |
35 | "%r8", | |
36 | "%r9", | |
37 | "%r10", | |
38 | "%r11", | |
39 | "%r12", | |
40 | "%r13", | |
41 | "%r14", | |
42 | }; | |
d4a9eb1f | 43 | #endif |
811d4cf4 | 44 | |
d4a9eb1f | 45 | static const int tcg_target_reg_alloc_order[] = { |
811d4cf4 AZ |
46 | TCG_REG_R0, |
47 | TCG_REG_R1, | |
48 | TCG_REG_R2, | |
49 | TCG_REG_R3, | |
50 | TCG_REG_R4, | |
51 | TCG_REG_R5, | |
52 | TCG_REG_R6, | |
53 | TCG_REG_R7, | |
54 | TCG_REG_R8, | |
55 | TCG_REG_R9, | |
56 | TCG_REG_R10, | |
57 | TCG_REG_R11, | |
58 | TCG_REG_R12, | |
59 | TCG_REG_R13, | |
60 | TCG_REG_R14, | |
61 | }; | |
62 | ||
d4a9eb1f | 63 | static const int tcg_target_call_iarg_regs[4] = { |
811d4cf4 AZ |
64 | TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 |
65 | }; | |
d4a9eb1f | 66 | static const int tcg_target_call_oarg_regs[2] = { |
811d4cf4 AZ |
67 | TCG_REG_R0, TCG_REG_R1 |
68 | }; | |
69 | ||
650bbb36 | 70 | static void patch_reloc(uint8_t *code_ptr, int type, |
811d4cf4 AZ |
71 | tcg_target_long value, tcg_target_long addend) |
72 | { | |
73 | switch (type) { | |
74 | case R_ARM_ABS32: | |
75 | *(uint32_t *) code_ptr = value; | |
76 | break; | |
77 | ||
78 | case R_ARM_CALL: | |
79 | case R_ARM_JUMP24: | |
80 | default: | |
81 | tcg_abort(); | |
82 | ||
83 | case R_ARM_PC24: | |
eae6ce52 | 84 | *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) | |
e936243a | 85 | (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff); |
811d4cf4 AZ |
86 | break; |
87 | } | |
88 | } | |
89 | ||
90 | /* maximum number of register used for input function arguments */ | |
91 | static inline int tcg_target_get_call_iarg_regs_count(int flags) | |
92 | { | |
93 | return 4; | |
94 | } | |
95 | ||
811d4cf4 | 96 | /* parse target specific constraints */ |
d4a9eb1f | 97 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
811d4cf4 AZ |
98 | { |
99 | const char *ct_str; | |
100 | ||
101 | ct_str = *pct_str; | |
102 | switch (ct_str[0]) { | |
cb4e581f LD |
103 | case 'I': |
104 | ct->ct |= TCG_CT_CONST_ARM; | |
105 | break; | |
106 | ||
811d4cf4 AZ |
107 | case 'r': |
108 | #ifndef CONFIG_SOFTMMU | |
109 | case 'd': | |
110 | case 'D': | |
111 | case 'x': | |
112 | case 'X': | |
113 | #endif | |
114 | ct->ct |= TCG_CT_REG; | |
115 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
116 | break; | |
117 | ||
118 | #ifdef CONFIG_SOFTMMU | |
d0660ed4 | 119 | /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */ |
811d4cf4 AZ |
120 | case 'x': |
121 | ct->ct |= TCG_CT_REG; | |
122 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
811d4cf4 AZ |
123 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); |
124 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
811d4cf4 AZ |
125 | break; |
126 | ||
d0660ed4 AZ |
127 | /* qemu_ld64 data_reg */ |
128 | case 'd': | |
129 | ct->ct |= TCG_CT_REG; | |
130 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
131 | /* r1 is still needed to load data_reg2, so don't use it. */ | |
132 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
133 | break; | |
134 | ||
811d4cf4 AZ |
135 | /* qemu_ld/st64 data_reg2 */ |
136 | case 'D': | |
137 | ct->ct |= TCG_CT_REG; | |
138 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
139 | /* r0, r1 and optionally r2 will be overwritten by the address | |
140 | * and the low word of data, so don't use these. */ | |
141 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
142 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
143 | # if TARGET_LONG_BITS == 64 | |
144 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | |
145 | # endif | |
146 | break; | |
147 | ||
148 | # if TARGET_LONG_BITS == 64 | |
149 | /* qemu_ld/st addr_reg2 */ | |
150 | case 'X': | |
151 | ct->ct |= TCG_CT_REG; | |
152 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
153 | /* r0 will be overwritten by the low word of base, so don't use it. */ | |
154 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
811d4cf4 | 155 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); |
811d4cf4 AZ |
156 | break; |
157 | # endif | |
158 | #endif | |
159 | ||
160 | case '1': | |
161 | ct->ct |= TCG_CT_REG; | |
162 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
163 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
164 | break; | |
165 | ||
166 | case '2': | |
167 | ct->ct |= TCG_CT_REG; | |
168 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
169 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
170 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
171 | break; | |
172 | ||
173 | default: | |
174 | return -1; | |
175 | } | |
176 | ct_str++; | |
177 | *pct_str = ct_str; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
94953e6d LD |
182 | static inline uint32_t rotl(uint32_t val, int n) |
183 | { | |
184 | return (val << n) | (val >> (32 - n)); | |
185 | } | |
186 | ||
187 | /* ARM immediates for ALU instructions are made of an unsigned 8-bit | |
188 | right-rotated by an even amount between 0 and 30. */ | |
189 | static inline int encode_imm(uint32_t imm) | |
190 | { | |
4e6f6d4c LD |
191 | int shift; |
192 | ||
94953e6d LD |
193 | /* simple case, only lower bits */ |
194 | if ((imm & ~0xff) == 0) | |
195 | return 0; | |
196 | /* then try a simple even shift */ | |
197 | shift = ctz32(imm) & ~1; | |
198 | if (((imm >> shift) & ~0xff) == 0) | |
199 | return 32 - shift; | |
200 | /* now try harder with rotations */ | |
201 | if ((rotl(imm, 2) & ~0xff) == 0) | |
202 | return 2; | |
203 | if ((rotl(imm, 4) & ~0xff) == 0) | |
204 | return 4; | |
205 | if ((rotl(imm, 6) & ~0xff) == 0) | |
206 | return 6; | |
207 | /* imm can't be encoded */ | |
208 | return -1; | |
209 | } | |
cb4e581f LD |
210 | |
211 | static inline int check_fit_imm(uint32_t imm) | |
212 | { | |
94953e6d | 213 | return encode_imm(imm) >= 0; |
cb4e581f LD |
214 | } |
215 | ||
811d4cf4 AZ |
216 | /* Test if a constant matches the constraint. |
217 | * TODO: define constraints for: | |
218 | * | |
219 | * ldr/str offset: between -0xfff and 0xfff | |
220 | * ldrh/strh offset: between -0xff and 0xff | |
221 | * mov operand2: values represented with x << (2 * y), x < 0x100 | |
222 | * add, sub, eor...: ditto | |
223 | */ | |
224 | static inline int tcg_target_const_match(tcg_target_long val, | |
225 | const TCGArgConstraint *arg_ct) | |
226 | { | |
227 | int ct; | |
228 | ct = arg_ct->ct; | |
229 | if (ct & TCG_CT_CONST) | |
230 | return 1; | |
cb4e581f LD |
231 | else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) |
232 | return 1; | |
811d4cf4 AZ |
233 | else |
234 | return 0; | |
235 | } | |
236 | ||
237 | enum arm_data_opc_e { | |
238 | ARITH_AND = 0x0, | |
239 | ARITH_EOR = 0x1, | |
240 | ARITH_SUB = 0x2, | |
241 | ARITH_RSB = 0x3, | |
242 | ARITH_ADD = 0x4, | |
243 | ARITH_ADC = 0x5, | |
244 | ARITH_SBC = 0x6, | |
245 | ARITH_RSC = 0x7, | |
3979144c | 246 | ARITH_TST = 0x8, |
811d4cf4 AZ |
247 | ARITH_CMP = 0xa, |
248 | ARITH_CMN = 0xb, | |
249 | ARITH_ORR = 0xc, | |
250 | ARITH_MOV = 0xd, | |
251 | ARITH_BIC = 0xe, | |
252 | ARITH_MVN = 0xf, | |
253 | }; | |
254 | ||
3979144c PB |
255 | #define TO_CPSR(opc) \ |
256 | ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20) | |
811d4cf4 AZ |
257 | |
258 | #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) | |
259 | #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) | |
260 | #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) | |
261 | #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) | |
262 | #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) | |
263 | #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) | |
264 | #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) | |
265 | #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) | |
266 | ||
267 | enum arm_cond_code_e { | |
268 | COND_EQ = 0x0, | |
269 | COND_NE = 0x1, | |
270 | COND_CS = 0x2, /* Unsigned greater or equal */ | |
271 | COND_CC = 0x3, /* Unsigned less than */ | |
272 | COND_MI = 0x4, /* Negative */ | |
273 | COND_PL = 0x5, /* Zero or greater */ | |
274 | COND_VS = 0x6, /* Overflow */ | |
275 | COND_VC = 0x7, /* No overflow */ | |
276 | COND_HI = 0x8, /* Unsigned greater than */ | |
277 | COND_LS = 0x9, /* Unsigned less or equal */ | |
278 | COND_GE = 0xa, | |
279 | COND_LT = 0xb, | |
280 | COND_GT = 0xc, | |
281 | COND_LE = 0xd, | |
282 | COND_AL = 0xe, | |
283 | }; | |
284 | ||
285 | static const uint8_t tcg_cond_to_arm_cond[10] = { | |
286 | [TCG_COND_EQ] = COND_EQ, | |
287 | [TCG_COND_NE] = COND_NE, | |
288 | [TCG_COND_LT] = COND_LT, | |
289 | [TCG_COND_GE] = COND_GE, | |
290 | [TCG_COND_LE] = COND_LE, | |
291 | [TCG_COND_GT] = COND_GT, | |
292 | /* unsigned */ | |
293 | [TCG_COND_LTU] = COND_CC, | |
294 | [TCG_COND_GEU] = COND_CS, | |
295 | [TCG_COND_LEU] = COND_LS, | |
296 | [TCG_COND_GTU] = COND_HI, | |
297 | }; | |
298 | ||
299 | static inline void tcg_out_bx(TCGContext *s, int cond, int rn) | |
300 | { | |
301 | tcg_out32(s, (cond << 28) | 0x012fff10 | rn); | |
302 | } | |
303 | ||
304 | static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) | |
305 | { | |
306 | tcg_out32(s, (cond << 28) | 0x0a000000 | | |
307 | (((offset - 8) >> 2) & 0x00ffffff)); | |
308 | } | |
309 | ||
e936243a AZ |
310 | static inline void tcg_out_b_noaddr(TCGContext *s, int cond) |
311 | { | |
e2542fe2 | 312 | #ifdef HOST_WORDS_BIGENDIAN |
e936243a AZ |
313 | tcg_out8(s, (cond << 4) | 0x0a); |
314 | s->code_ptr += 3; | |
315 | #else | |
316 | s->code_ptr += 3; | |
317 | tcg_out8(s, (cond << 4) | 0x0a); | |
318 | #endif | |
319 | } | |
320 | ||
811d4cf4 AZ |
321 | static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) |
322 | { | |
323 | tcg_out32(s, (cond << 28) | 0x0b000000 | | |
324 | (((offset - 8) >> 2) & 0x00ffffff)); | |
325 | } | |
326 | ||
327 | static inline void tcg_out_dat_reg(TCGContext *s, | |
328 | int cond, int opc, int rd, int rn, int rm, int shift) | |
329 | { | |
330 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) | | |
331 | (rn << 16) | (rd << 12) | shift | rm); | |
332 | } | |
333 | ||
334 | static inline void tcg_out_dat_reg2(TCGContext *s, | |
335 | int cond, int opc0, int opc1, int rd0, int rd1, | |
336 | int rn0, int rn1, int rm0, int rm1, int shift) | |
337 | { | |
0c9c3a9e AZ |
338 | if (rd0 == rn1 || rd0 == rm1) { |
339 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
340 | (rn0 << 16) | (8 << 12) | shift | rm0); | |
341 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
342 | (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
343 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
344 | rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
345 | } else { | |
346 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
347 | (rn0 << 16) | (rd0 << 12) | shift | rm0); | |
348 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
349 | (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
350 | } | |
811d4cf4 AZ |
351 | } |
352 | ||
353 | static inline void tcg_out_dat_imm(TCGContext *s, | |
354 | int cond, int opc, int rd, int rn, int im) | |
355 | { | |
3979144c | 356 | tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) | |
811d4cf4 AZ |
357 | (rn << 16) | (rd << 12) | im); |
358 | } | |
359 | ||
360 | static inline void tcg_out_movi32(TCGContext *s, | |
361 | int cond, int rd, int32_t arg) | |
362 | { | |
363 | int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8); | |
364 | ||
365 | /* TODO: This is very suboptimal, we can easily have a constant | |
366 | * pool somewhere after all the instructions. */ | |
367 | ||
368 | if (arg < 0 && arg > -0x100) | |
369 | return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff); | |
370 | ||
371 | if (offset < 0x100 && offset > -0x100) | |
372 | return offset >= 0 ? | |
373 | tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) : | |
374 | tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset); | |
375 | ||
cb4e581f LD |
376 | #ifdef __ARM_ARCH_7A__ |
377 | /* use movw/movt */ | |
378 | /* movw */ | |
379 | tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) | |
380 | | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); | |
381 | if (arg & 0xffff0000) | |
382 | /* movt */ | |
383 | tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) | |
384 | | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); | |
385 | #else | |
811d4cf4 AZ |
386 | tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff); |
387 | if (arg & 0x0000ff00) | |
388 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
389 | ((arg >> 8) & 0xff) | 0xc00); | |
390 | if (arg & 0x00ff0000) | |
391 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
392 | ((arg >> 16) & 0xff) | 0x800); | |
393 | if (arg & 0xff000000) | |
394 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
395 | ((arg >> 24) & 0xff) | 0x400); | |
cb4e581f | 396 | #endif |
811d4cf4 AZ |
397 | } |
398 | ||
399 | static inline void tcg_out_mul32(TCGContext *s, | |
400 | int cond, int rd, int rs, int rm) | |
401 | { | |
402 | if (rd != rm) | |
403 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
404 | (rs << 8) | 0x90 | rm); | |
405 | else if (rd != rs) | |
406 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
407 | (rm << 8) | 0x90 | rs); | |
408 | else { | |
409 | tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) | | |
410 | (rs << 8) | 0x90 | rm); | |
411 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
412 | rd, 0, 8, SHIFT_IMM_LSL(0)); | |
413 | } | |
414 | } | |
415 | ||
416 | static inline void tcg_out_umull32(TCGContext *s, | |
417 | int cond, int rd0, int rd1, int rs, int rm) | |
418 | { | |
419 | if (rd0 != rm && rd1 != rm) | |
420 | tcg_out32(s, (cond << 28) | 0x800090 | | |
421 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
422 | else if (rd0 != rs && rd1 != rs) | |
423 | tcg_out32(s, (cond << 28) | 0x800090 | | |
424 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
425 | else { | |
426 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
427 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
428 | tcg_out32(s, (cond << 28) | 0x800098 | | |
429 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
430 | } | |
431 | } | |
432 | ||
433 | static inline void tcg_out_smull32(TCGContext *s, | |
434 | int cond, int rd0, int rd1, int rs, int rm) | |
435 | { | |
436 | if (rd0 != rm && rd1 != rm) | |
437 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
438 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
439 | else if (rd0 != rs && rd1 != rs) | |
440 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
441 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
442 | else { | |
443 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
444 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
445 | tcg_out32(s, (cond << 28) | 0xc00098 | | |
446 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
447 | } | |
448 | } | |
449 | ||
450 | static inline void tcg_out_ld32_12(TCGContext *s, int cond, | |
451 | int rd, int rn, tcg_target_long im) | |
452 | { | |
453 | if (im >= 0) | |
454 | tcg_out32(s, (cond << 28) | 0x05900000 | | |
455 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
456 | else | |
457 | tcg_out32(s, (cond << 28) | 0x05100000 | | |
458 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
459 | } | |
460 | ||
461 | static inline void tcg_out_st32_12(TCGContext *s, int cond, | |
462 | int rd, int rn, tcg_target_long im) | |
463 | { | |
464 | if (im >= 0) | |
465 | tcg_out32(s, (cond << 28) | 0x05800000 | | |
466 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
467 | else | |
468 | tcg_out32(s, (cond << 28) | 0x05000000 | | |
469 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
470 | } | |
471 | ||
472 | static inline void tcg_out_ld32_r(TCGContext *s, int cond, | |
473 | int rd, int rn, int rm) | |
474 | { | |
475 | tcg_out32(s, (cond << 28) | 0x07900000 | | |
476 | (rn << 16) | (rd << 12) | rm); | |
477 | } | |
478 | ||
479 | static inline void tcg_out_st32_r(TCGContext *s, int cond, | |
480 | int rd, int rn, int rm) | |
481 | { | |
482 | tcg_out32(s, (cond << 28) | 0x07800000 | | |
483 | (rn << 16) | (rd << 12) | rm); | |
484 | } | |
485 | ||
3979144c PB |
486 | /* Register pre-increment with base writeback. */ |
487 | static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, | |
488 | int rd, int rn, int rm) | |
489 | { | |
490 | tcg_out32(s, (cond << 28) | 0x07b00000 | | |
491 | (rn << 16) | (rd << 12) | rm); | |
492 | } | |
493 | ||
494 | static inline void tcg_out_st32_rwb(TCGContext *s, int cond, | |
495 | int rd, int rn, int rm) | |
496 | { | |
497 | tcg_out32(s, (cond << 28) | 0x07a00000 | | |
498 | (rn << 16) | (rd << 12) | rm); | |
499 | } | |
500 | ||
811d4cf4 AZ |
501 | static inline void tcg_out_ld16u_8(TCGContext *s, int cond, |
502 | int rd, int rn, tcg_target_long im) | |
503 | { | |
504 | if (im >= 0) | |
505 | tcg_out32(s, (cond << 28) | 0x01d000b0 | | |
506 | (rn << 16) | (rd << 12) | | |
507 | ((im & 0xf0) << 4) | (im & 0xf)); | |
508 | else | |
509 | tcg_out32(s, (cond << 28) | 0x015000b0 | | |
510 | (rn << 16) | (rd << 12) | | |
511 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
512 | } | |
513 | ||
514 | static inline void tcg_out_st16u_8(TCGContext *s, int cond, | |
515 | int rd, int rn, tcg_target_long im) | |
516 | { | |
517 | if (im >= 0) | |
518 | tcg_out32(s, (cond << 28) | 0x01c000b0 | | |
519 | (rn << 16) | (rd << 12) | | |
520 | ((im & 0xf0) << 4) | (im & 0xf)); | |
521 | else | |
522 | tcg_out32(s, (cond << 28) | 0x014000b0 | | |
523 | (rn << 16) | (rd << 12) | | |
524 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
525 | } | |
526 | ||
527 | static inline void tcg_out_ld16u_r(TCGContext *s, int cond, | |
528 | int rd, int rn, int rm) | |
529 | { | |
530 | tcg_out32(s, (cond << 28) | 0x019000b0 | | |
531 | (rn << 16) | (rd << 12) | rm); | |
532 | } | |
533 | ||
534 | static inline void tcg_out_st16u_r(TCGContext *s, int cond, | |
535 | int rd, int rn, int rm) | |
536 | { | |
537 | tcg_out32(s, (cond << 28) | 0x018000b0 | | |
538 | (rn << 16) | (rd << 12) | rm); | |
539 | } | |
540 | ||
541 | static inline void tcg_out_ld16s_8(TCGContext *s, int cond, | |
542 | int rd, int rn, tcg_target_long im) | |
543 | { | |
544 | if (im >= 0) | |
545 | tcg_out32(s, (cond << 28) | 0x01d000f0 | | |
546 | (rn << 16) | (rd << 12) | | |
547 | ((im & 0xf0) << 4) | (im & 0xf)); | |
548 | else | |
549 | tcg_out32(s, (cond << 28) | 0x015000f0 | | |
550 | (rn << 16) | (rd << 12) | | |
551 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
552 | } | |
553 | ||
554 | static inline void tcg_out_st16s_8(TCGContext *s, int cond, | |
555 | int rd, int rn, tcg_target_long im) | |
556 | { | |
557 | if (im >= 0) | |
558 | tcg_out32(s, (cond << 28) | 0x01c000f0 | | |
559 | (rn << 16) | (rd << 12) | | |
560 | ((im & 0xf0) << 4) | (im & 0xf)); | |
561 | else | |
562 | tcg_out32(s, (cond << 28) | 0x014000f0 | | |
563 | (rn << 16) | (rd << 12) | | |
564 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
565 | } | |
566 | ||
567 | static inline void tcg_out_ld16s_r(TCGContext *s, int cond, | |
568 | int rd, int rn, int rm) | |
569 | { | |
570 | tcg_out32(s, (cond << 28) | 0x019000f0 | | |
571 | (rn << 16) | (rd << 12) | rm); | |
572 | } | |
573 | ||
574 | static inline void tcg_out_st16s_r(TCGContext *s, int cond, | |
575 | int rd, int rn, int rm) | |
576 | { | |
577 | tcg_out32(s, (cond << 28) | 0x018000f0 | | |
578 | (rn << 16) | (rd << 12) | rm); | |
579 | } | |
580 | ||
581 | static inline void tcg_out_ld8_12(TCGContext *s, int cond, | |
582 | int rd, int rn, tcg_target_long im) | |
583 | { | |
584 | if (im >= 0) | |
585 | tcg_out32(s, (cond << 28) | 0x05d00000 | | |
586 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
587 | else | |
588 | tcg_out32(s, (cond << 28) | 0x05500000 | | |
589 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
590 | } | |
591 | ||
592 | static inline void tcg_out_st8_12(TCGContext *s, int cond, | |
593 | int rd, int rn, tcg_target_long im) | |
594 | { | |
595 | if (im >= 0) | |
596 | tcg_out32(s, (cond << 28) | 0x05c00000 | | |
597 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
598 | else | |
599 | tcg_out32(s, (cond << 28) | 0x05400000 | | |
600 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
601 | } | |
602 | ||
603 | static inline void tcg_out_ld8_r(TCGContext *s, int cond, | |
604 | int rd, int rn, int rm) | |
605 | { | |
606 | tcg_out32(s, (cond << 28) | 0x07d00000 | | |
607 | (rn << 16) | (rd << 12) | rm); | |
608 | } | |
609 | ||
610 | static inline void tcg_out_st8_r(TCGContext *s, int cond, | |
611 | int rd, int rn, int rm) | |
612 | { | |
613 | tcg_out32(s, (cond << 28) | 0x07c00000 | | |
614 | (rn << 16) | (rd << 12) | rm); | |
615 | } | |
616 | ||
617 | static inline void tcg_out_ld8s_8(TCGContext *s, int cond, | |
618 | int rd, int rn, tcg_target_long im) | |
619 | { | |
620 | if (im >= 0) | |
621 | tcg_out32(s, (cond << 28) | 0x01d000d0 | | |
622 | (rn << 16) | (rd << 12) | | |
623 | ((im & 0xf0) << 4) | (im & 0xf)); | |
624 | else | |
625 | tcg_out32(s, (cond << 28) | 0x015000d0 | | |
626 | (rn << 16) | (rd << 12) | | |
627 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
628 | } | |
629 | ||
630 | static inline void tcg_out_st8s_8(TCGContext *s, int cond, | |
631 | int rd, int rn, tcg_target_long im) | |
632 | { | |
633 | if (im >= 0) | |
634 | tcg_out32(s, (cond << 28) | 0x01c000d0 | | |
635 | (rn << 16) | (rd << 12) | | |
636 | ((im & 0xf0) << 4) | (im & 0xf)); | |
637 | else | |
638 | tcg_out32(s, (cond << 28) | 0x014000d0 | | |
639 | (rn << 16) | (rd << 12) | | |
640 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
641 | } | |
642 | ||
643 | static inline void tcg_out_ld8s_r(TCGContext *s, int cond, | |
644 | int rd, int rn, int rm) | |
645 | { | |
204c1674 | 646 | tcg_out32(s, (cond << 28) | 0x019000d0 | |
811d4cf4 AZ |
647 | (rn << 16) | (rd << 12) | rm); |
648 | } | |
649 | ||
650 | static inline void tcg_out_st8s_r(TCGContext *s, int cond, | |
651 | int rd, int rn, int rm) | |
652 | { | |
204c1674 | 653 | tcg_out32(s, (cond << 28) | 0x018000d0 | |
811d4cf4 AZ |
654 | (rn << 16) | (rd << 12) | rm); |
655 | } | |
656 | ||
657 | static inline void tcg_out_ld32u(TCGContext *s, int cond, | |
658 | int rd, int rn, int32_t offset) | |
659 | { | |
660 | if (offset > 0xfff || offset < -0xfff) { | |
661 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
662 | tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8); | |
663 | } else | |
664 | tcg_out_ld32_12(s, cond, rd, rn, offset); | |
665 | } | |
666 | ||
667 | static inline void tcg_out_st32(TCGContext *s, int cond, | |
668 | int rd, int rn, int32_t offset) | |
669 | { | |
670 | if (offset > 0xfff || offset < -0xfff) { | |
671 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
672 | tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8); | |
673 | } else | |
674 | tcg_out_st32_12(s, cond, rd, rn, offset); | |
675 | } | |
676 | ||
677 | static inline void tcg_out_ld16u(TCGContext *s, int cond, | |
678 | int rd, int rn, int32_t offset) | |
679 | { | |
680 | if (offset > 0xff || offset < -0xff) { | |
681 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
682 | tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8); | |
683 | } else | |
684 | tcg_out_ld16u_8(s, cond, rd, rn, offset); | |
685 | } | |
686 | ||
687 | static inline void tcg_out_ld16s(TCGContext *s, int cond, | |
688 | int rd, int rn, int32_t offset) | |
689 | { | |
690 | if (offset > 0xff || offset < -0xff) { | |
691 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
692 | tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8); | |
693 | } else | |
694 | tcg_out_ld16s_8(s, cond, rd, rn, offset); | |
695 | } | |
696 | ||
697 | static inline void tcg_out_st16u(TCGContext *s, int cond, | |
698 | int rd, int rn, int32_t offset) | |
699 | { | |
700 | if (offset > 0xff || offset < -0xff) { | |
701 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
702 | tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8); | |
703 | } else | |
704 | tcg_out_st16u_8(s, cond, rd, rn, offset); | |
705 | } | |
706 | ||
707 | static inline void tcg_out_ld8u(TCGContext *s, int cond, | |
708 | int rd, int rn, int32_t offset) | |
709 | { | |
710 | if (offset > 0xfff || offset < -0xfff) { | |
711 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
712 | tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8); | |
713 | } else | |
714 | tcg_out_ld8_12(s, cond, rd, rn, offset); | |
715 | } | |
716 | ||
717 | static inline void tcg_out_ld8s(TCGContext *s, int cond, | |
718 | int rd, int rn, int32_t offset) | |
719 | { | |
720 | if (offset > 0xff || offset < -0xff) { | |
721 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
722 | tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8); | |
723 | } else | |
724 | tcg_out_ld8s_8(s, cond, rd, rn, offset); | |
725 | } | |
726 | ||
727 | static inline void tcg_out_st8u(TCGContext *s, int cond, | |
728 | int rd, int rn, int32_t offset) | |
729 | { | |
730 | if (offset > 0xfff || offset < -0xfff) { | |
731 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
732 | tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8); | |
733 | } else | |
734 | tcg_out_st8_12(s, cond, rd, rn, offset); | |
735 | } | |
736 | ||
737 | static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) | |
738 | { | |
739 | int32_t val; | |
740 | ||
741 | val = addr - (tcg_target_long) s->code_ptr; | |
742 | if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd) | |
743 | tcg_out_b(s, cond, val); | |
744 | else { | |
745 | #if 1 | |
746 | tcg_abort(); | |
747 | #else | |
748 | if (cond == COND_AL) { | |
749 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
750 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
751 | } else { | |
752 | tcg_out_movi32(s, cond, TCG_REG_R8, val - 8); | |
753 | tcg_out_dat_reg(s, cond, ARITH_ADD, | |
754 | 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
755 | } | |
756 | #endif | |
757 | } | |
758 | } | |
759 | ||
760 | static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) | |
761 | { | |
762 | int32_t val; | |
763 | ||
764 | #ifdef SAVE_LR | |
765 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
766 | #endif | |
767 | ||
768 | val = addr - (tcg_target_long) s->code_ptr; | |
769 | if (val < 0x01fffffd && val > -0x01fffffd) | |
770 | tcg_out_bl(s, cond, val); | |
771 | else { | |
772 | #if 1 | |
773 | tcg_abort(); | |
774 | #else | |
775 | if (cond == COND_AL) { | |
776 | tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4); | |
777 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
778 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
779 | } else { | |
780 | tcg_out_movi32(s, cond, TCG_REG_R9, addr); | |
781 | tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15); | |
782 | tcg_out_bx(s, cond, TCG_REG_R9); | |
783 | } | |
784 | #endif | |
785 | } | |
786 | ||
787 | #ifdef SAVE_LR | |
788 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
789 | #endif | |
790 | } | |
791 | ||
792 | static inline void tcg_out_callr(TCGContext *s, int cond, int arg) | |
793 | { | |
794 | #ifdef SAVE_LR | |
795 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
796 | #endif | |
797 | /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */ | |
798 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0)); | |
799 | tcg_out_bx(s, cond, arg); | |
800 | #ifdef SAVE_LR | |
801 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
802 | #endif | |
803 | } | |
804 | ||
805 | static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) | |
806 | { | |
807 | TCGLabel *l = &s->labels[label_index]; | |
808 | ||
809 | if (l->has_value) | |
810 | tcg_out_goto(s, cond, l->u.value); | |
811 | else if (cond == COND_AL) { | |
812 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
813 | tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337); | |
814 | s->code_ptr += 4; | |
815 | } else { | |
816 | /* Probably this should be preferred even for COND_AL... */ | |
817 | tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337); | |
e936243a | 818 | tcg_out_b_noaddr(s, cond); |
811d4cf4 AZ |
819 | } |
820 | } | |
821 | ||
822 | static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args, | |
823 | void *helper_div, void *helper_rem, int shift) | |
824 | { | |
825 | int div_reg = args[0]; | |
826 | int rem_reg = args[1]; | |
827 | ||
828 | /* stmdb sp!, { r0 - r3, ip, lr } */ | |
829 | /* (Note that we need an even number of registers as per EABI) */ | |
830 | tcg_out32(s, (cond << 28) | 0x092d500f); | |
831 | ||
832 | tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
833 | tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
834 | tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
835 | tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
836 | ||
837 | tcg_out_call(s, cond, (uint32_t) helper_div); | |
838 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0)); | |
839 | ||
840 | /* ldmia sp, { r0 - r3, fp, lr } */ | |
841 | tcg_out32(s, (cond << 28) | 0x089d500f); | |
842 | ||
843 | tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
844 | tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
845 | tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
846 | tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
847 | ||
848 | tcg_out_call(s, cond, (uint32_t) helper_rem); | |
849 | ||
850 | tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
851 | tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0)); | |
852 | ||
853 | /* ldr r0, [sp], #4 */ | |
6b658613 | 854 | if (rem_reg != 0 && div_reg != 0) { |
811d4cf4 | 855 | tcg_out32(s, (cond << 28) | 0x04bd0004); |
6b658613 AJ |
856 | } else { |
857 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
858 | } | |
811d4cf4 | 859 | /* ldr r1, [sp], #4 */ |
6b658613 | 860 | if (rem_reg != 1 && div_reg != 1) { |
811d4cf4 | 861 | tcg_out32(s, (cond << 28) | 0x04bd1004); |
6b658613 AJ |
862 | } else { |
863 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
864 | } | |
811d4cf4 | 865 | /* ldr r2, [sp], #4 */ |
6b658613 | 866 | if (rem_reg != 2 && div_reg != 2) { |
811d4cf4 | 867 | tcg_out32(s, (cond << 28) | 0x04bd2004); |
6b658613 AJ |
868 | } else { |
869 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
870 | } | |
811d4cf4 | 871 | /* ldr r3, [sp], #4 */ |
6b658613 | 872 | if (rem_reg != 3 && div_reg != 3) { |
811d4cf4 | 873 | tcg_out32(s, (cond << 28) | 0x04bd3004); |
6b658613 AJ |
874 | } else { |
875 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
876 | } | |
811d4cf4 | 877 | /* ldr ip, [sp], #4 */ |
6b658613 | 878 | if (rem_reg != 12 && div_reg != 12) { |
811d4cf4 | 879 | tcg_out32(s, (cond << 28) | 0x04bdc004); |
6b658613 AJ |
880 | } else { |
881 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
882 | } | |
811d4cf4 | 883 | /* ldr lr, [sp], #4 */ |
6b658613 | 884 | if (rem_reg != 14 && div_reg != 14) { |
811d4cf4 | 885 | tcg_out32(s, (cond << 28) | 0x04bde004); |
6b658613 AJ |
886 | } else { |
887 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 4); | |
888 | } | |
811d4cf4 AZ |
889 | } |
890 | ||
891 | #ifdef CONFIG_SOFTMMU | |
79383c9c BS |
892 | |
893 | #include "../../softmmu_defs.h" | |
811d4cf4 AZ |
894 | |
895 | static void *qemu_ld_helpers[4] = { | |
896 | __ldb_mmu, | |
897 | __ldw_mmu, | |
898 | __ldl_mmu, | |
899 | __ldq_mmu, | |
900 | }; | |
901 | ||
902 | static void *qemu_st_helpers[4] = { | |
903 | __stb_mmu, | |
904 | __stw_mmu, | |
905 | __stl_mmu, | |
906 | __stq_mmu, | |
907 | }; | |
908 | #endif | |
909 | ||
3979144c PB |
910 | #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) |
911 | ||
811d4cf4 AZ |
912 | static inline void tcg_out_qemu_ld(TCGContext *s, int cond, |
913 | const TCGArg *args, int opc) | |
914 | { | |
915 | int addr_reg, data_reg, data_reg2; | |
916 | #ifdef CONFIG_SOFTMMU | |
917 | int mem_index, s_bits; | |
918 | # if TARGET_LONG_BITS == 64 | |
919 | int addr_reg2; | |
920 | # endif | |
811d4cf4 | 921 | uint32_t *label_ptr; |
811d4cf4 AZ |
922 | #endif |
923 | ||
924 | data_reg = *args++; | |
925 | if (opc == 3) | |
926 | data_reg2 = *args++; | |
927 | else | |
d89c682f | 928 | data_reg2 = 0; /* suppress warning */ |
811d4cf4 | 929 | addr_reg = *args++; |
811d4cf4 | 930 | #ifdef CONFIG_SOFTMMU |
aef3a282 AZ |
931 | # if TARGET_LONG_BITS == 64 |
932 | addr_reg2 = *args++; | |
933 | # endif | |
811d4cf4 AZ |
934 | mem_index = *args; |
935 | s_bits = opc & 3; | |
936 | ||
91a3c1b0 | 937 | /* Should generate something like the following: |
3979144c | 938 | * shr r8, addr_reg, #TARGET_PAGE_BITS |
91a3c1b0 | 939 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 |
3979144c | 940 | * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS |
91a3c1b0 AZ |
941 | */ |
942 | # if CPU_TLB_BITS > 8 | |
943 | # error | |
944 | # endif | |
811d4cf4 | 945 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
3979144c | 946 | 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); |
811d4cf4 AZ |
947 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, |
948 | 0, 8, CPU_TLB_SIZE - 1); | |
949 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
950 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
951 | /* In the |
952 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] | |
953 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
954 | * not exceed otherwise, so use an | |
955 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
956 | * before. | |
957 | */ | |
225b4376 AZ |
958 | if (mem_index) |
959 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
960 | (mem_index << (TLB_SHIFT & 1)) | | |
961 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 962 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 963 | offsetof(CPUState, tlb_table[0][0].addr_read)); |
811d4cf4 AZ |
964 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
965 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
3979144c PB |
966 | /* Check alignment. */ |
967 | if (s_bits) | |
968 | tcg_out_dat_imm(s, COND_EQ, ARITH_TST, | |
969 | 0, addr_reg, (1 << s_bits) - 1); | |
811d4cf4 AZ |
970 | # if TARGET_LONG_BITS == 64 |
971 | /* XXX: possibly we could use a block data load or writeback in | |
972 | * the first access. */ | |
973 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 974 | offsetof(CPUState, tlb_table[0][0].addr_read) + 4); |
811d4cf4 AZ |
975 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, |
976 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
977 | # endif | |
978 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 979 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
980 | |
981 | switch (opc) { | |
982 | case 0: | |
983 | tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
984 | break; | |
985 | case 0 | 4: | |
986 | tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
987 | break; | |
988 | case 1: | |
989 | tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
990 | break; | |
991 | case 1 | 4: | |
992 | tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
993 | break; | |
994 | case 2: | |
995 | default: | |
996 | tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
997 | break; | |
998 | case 3: | |
3979144c | 999 | tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg); |
811d4cf4 AZ |
1000 | tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4); |
1001 | break; | |
1002 | } | |
1003 | ||
1004 | label_ptr = (void *) s->code_ptr; | |
1005 | tcg_out_b(s, COND_EQ, 8); | |
811d4cf4 AZ |
1006 | |
1007 | # ifdef SAVE_LR | |
1008 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
1009 | # endif | |
1010 | ||
1011 | /* TODO: move this code to where the constants pool will be */ | |
1012 | if (addr_reg) | |
1013 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1014 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
1015 | # if TARGET_LONG_BITS == 32 | |
1016 | tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index); | |
1017 | # else | |
1018 | if (addr_reg2 != 1) | |
1019 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1020 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
1021 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1022 | # endif | |
650bbb36 | 1023 | tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - |
811d4cf4 AZ |
1024 | (tcg_target_long) s->code_ptr); |
1025 | ||
1026 | switch (opc) { | |
1027 | case 0 | 4: | |
1028 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1029 | 0, 0, 0, SHIFT_IMM_LSL(24)); | |
1030 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1031 | data_reg, 0, 0, SHIFT_IMM_ASR(24)); | |
1032 | break; | |
1033 | case 1 | 4: | |
1034 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1035 | 0, 0, 0, SHIFT_IMM_LSL(16)); | |
1036 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1037 | data_reg, 0, 0, SHIFT_IMM_ASR(16)); | |
1038 | break; | |
1039 | case 0: | |
1040 | case 1: | |
1041 | case 2: | |
1042 | default: | |
1043 | if (data_reg) | |
1044 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1045 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
1046 | break; | |
1047 | case 3: | |
d0660ed4 AZ |
1048 | if (data_reg != 0) |
1049 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1050 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
1051 | if (data_reg2 != 1) |
1052 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1053 | data_reg2, 0, 1, SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
1054 | break; |
1055 | } | |
1056 | ||
1057 | # ifdef SAVE_LR | |
1058 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
1059 | # endif | |
1060 | ||
811d4cf4 | 1061 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; |
379f6698 PB |
1062 | #else /* !CONFIG_SOFTMMU */ |
1063 | if (GUEST_BASE) { | |
1064 | uint32_t offset = GUEST_BASE; | |
1065 | int i; | |
1066 | int rot; | |
1067 | ||
1068 | while (offset) { | |
1069 | i = ctz32(offset) & ~1; | |
1070 | rot = ((32 - i) << 7) & 0xf00; | |
1071 | ||
1072 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, | |
1073 | ((offset >> i) & 0xff) | rot); | |
1074 | addr_reg = 8; | |
1075 | offset &= ~(0xff << i); | |
1076 | } | |
1077 | } | |
811d4cf4 AZ |
1078 | switch (opc) { |
1079 | case 0: | |
1080 | tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0); | |
1081 | break; | |
1082 | case 0 | 4: | |
1083 | tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1084 | break; | |
1085 | case 1: | |
1086 | tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
1087 | break; | |
1088 | case 1 | 4: | |
1089 | tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1090 | break; | |
1091 | case 2: | |
1092 | default: | |
1093 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1094 | break; | |
1095 | case 3: | |
eae6ce52 AZ |
1096 | /* TODO: use block load - |
1097 | * check that data_reg2 > data_reg or the other way */ | |
419bafa5 AJ |
1098 | if (data_reg == addr_reg) { |
1099 | tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1100 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1101 | } else { | |
1102 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1103 | tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1104 | } | |
811d4cf4 AZ |
1105 | break; |
1106 | } | |
1107 | #endif | |
1108 | } | |
1109 | ||
1110 | static inline void tcg_out_qemu_st(TCGContext *s, int cond, | |
1111 | const TCGArg *args, int opc) | |
1112 | { | |
1113 | int addr_reg, data_reg, data_reg2; | |
1114 | #ifdef CONFIG_SOFTMMU | |
1115 | int mem_index, s_bits; | |
1116 | # if TARGET_LONG_BITS == 64 | |
1117 | int addr_reg2; | |
1118 | # endif | |
811d4cf4 | 1119 | uint32_t *label_ptr; |
811d4cf4 AZ |
1120 | #endif |
1121 | ||
1122 | data_reg = *args++; | |
1123 | if (opc == 3) | |
1124 | data_reg2 = *args++; | |
1125 | else | |
d89c682f | 1126 | data_reg2 = 0; /* suppress warning */ |
811d4cf4 | 1127 | addr_reg = *args++; |
811d4cf4 | 1128 | #ifdef CONFIG_SOFTMMU |
aef3a282 AZ |
1129 | # if TARGET_LONG_BITS == 64 |
1130 | addr_reg2 = *args++; | |
1131 | # endif | |
811d4cf4 AZ |
1132 | mem_index = *args; |
1133 | s_bits = opc & 3; | |
1134 | ||
91a3c1b0 | 1135 | /* Should generate something like the following: |
3979144c | 1136 | * shr r8, addr_reg, #TARGET_PAGE_BITS |
91a3c1b0 | 1137 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 |
3979144c | 1138 | * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS |
91a3c1b0 | 1139 | */ |
811d4cf4 | 1140 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
3979144c | 1141 | 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); |
811d4cf4 AZ |
1142 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, |
1143 | 0, 8, CPU_TLB_SIZE - 1); | |
1144 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
1145 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
1146 | /* In the |
1147 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] | |
1148 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
1149 | * not exceed otherwise, so use an | |
1150 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
1151 | * before. | |
1152 | */ | |
225b4376 AZ |
1153 | if (mem_index) |
1154 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
1155 | (mem_index << (TLB_SHIFT & 1)) | | |
1156 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 1157 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 1158 | offsetof(CPUState, tlb_table[0][0].addr_write)); |
811d4cf4 AZ |
1159 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
1160 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
3979144c PB |
1161 | /* Check alignment. */ |
1162 | if (s_bits) | |
1163 | tcg_out_dat_imm(s, COND_EQ, ARITH_TST, | |
1164 | 0, addr_reg, (1 << s_bits) - 1); | |
811d4cf4 AZ |
1165 | # if TARGET_LONG_BITS == 64 |
1166 | /* XXX: possibly we could use a block data load or writeback in | |
1167 | * the first access. */ | |
1168 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1169 | offsetof(CPUState, tlb_table[0][0].addr_write) |
811d4cf4 AZ |
1170 | + 4); |
1171 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, | |
1172 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
1173 | # endif | |
1174 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1175 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
1176 | |
1177 | switch (opc) { | |
1178 | case 0: | |
1179 | tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1180 | break; | |
1181 | case 0 | 4: | |
1182 | tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1183 | break; | |
1184 | case 1: | |
1185 | tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1186 | break; | |
1187 | case 1 | 4: | |
1188 | tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1189 | break; | |
1190 | case 2: | |
1191 | default: | |
1192 | tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1193 | break; | |
1194 | case 3: | |
3979144c | 1195 | tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg); |
811d4cf4 AZ |
1196 | tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); |
1197 | break; | |
1198 | } | |
1199 | ||
1200 | label_ptr = (void *) s->code_ptr; | |
1201 | tcg_out_b(s, COND_EQ, 8); | |
811d4cf4 | 1202 | |
811d4cf4 AZ |
1203 | /* TODO: move this code to where the constants pool will be */ |
1204 | if (addr_reg) | |
1205 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1206 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
1207 | # if TARGET_LONG_BITS == 32 | |
1208 | switch (opc) { | |
1209 | case 0: | |
1210 | tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); | |
1211 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1212 | break; | |
1213 | case 1: | |
1214 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1215 | 1, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1216 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1217 | 1, 0, 1, SHIFT_IMM_LSR(16)); | |
1218 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1219 | break; | |
1220 | case 2: | |
1221 | if (data_reg != 1) | |
1222 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1223 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1224 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1225 | break; | |
1226 | case 3: | |
1227 | if (data_reg != 1) | |
1228 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1229 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1230 | if (data_reg2 != 2) | |
1231 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1232 | 2, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1233 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1234 | break; | |
1235 | } | |
1236 | # else | |
1237 | if (addr_reg2 != 1) | |
1238 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1239 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
1240 | switch (opc) { | |
1241 | case 0: | |
1242 | tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); | |
1243 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1244 | break; | |
1245 | case 1: | |
1246 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1247 | 2, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1248 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1249 | 2, 0, 2, SHIFT_IMM_LSR(16)); | |
1250 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1251 | break; | |
1252 | case 2: | |
1253 | if (data_reg != 2) | |
1254 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1255 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1256 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1257 | break; | |
1258 | case 3: | |
91a3c1b0 AZ |
1259 | tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index); |
1260 | tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ | |
811d4cf4 AZ |
1261 | if (data_reg != 2) |
1262 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1263 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1264 | if (data_reg2 != 3) | |
1265 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1266 | 3, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1267 | break; | |
1268 | } | |
1269 | # endif | |
1270 | ||
91a3c1b0 AZ |
1271 | # ifdef SAVE_LR |
1272 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
1273 | # endif | |
1274 | ||
204c1674 | 1275 | tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - |
811d4cf4 | 1276 | (tcg_target_long) s->code_ptr); |
811d4cf4 AZ |
1277 | # if TARGET_LONG_BITS == 64 |
1278 | if (opc == 3) | |
1279 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); | |
1280 | # endif | |
1281 | ||
1282 | # ifdef SAVE_LR | |
1283 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
1284 | # endif | |
1285 | ||
811d4cf4 | 1286 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; |
379f6698 PB |
1287 | #else /* !CONFIG_SOFTMMU */ |
1288 | if (GUEST_BASE) { | |
1289 | uint32_t offset = GUEST_BASE; | |
1290 | int i; | |
1291 | int rot; | |
1292 | ||
1293 | while (offset) { | |
1294 | i = ctz32(offset) & ~1; | |
1295 | rot = ((32 - i) << 7) & 0xf00; | |
1296 | ||
1297 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, | |
1298 | ((offset >> i) & 0xff) | rot); | |
1299 | addr_reg = 8; | |
1300 | offset &= ~(0xff << i); | |
1301 | } | |
1302 | } | |
811d4cf4 AZ |
1303 | switch (opc) { |
1304 | case 0: | |
1305 | tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); | |
1306 | break; | |
1307 | case 0 | 4: | |
204c1674 | 1308 | tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0); |
811d4cf4 AZ |
1309 | break; |
1310 | case 1: | |
1311 | tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
1312 | break; | |
1313 | case 1 | 4: | |
1314 | tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1315 | break; | |
1316 | case 2: | |
1317 | default: | |
1318 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1319 | break; | |
1320 | case 3: | |
eae6ce52 AZ |
1321 | /* TODO: use block store - |
1322 | * check that data_reg2 > data_reg or the other way */ | |
811d4cf4 AZ |
1323 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); |
1324 | tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1325 | break; | |
1326 | } | |
1327 | #endif | |
1328 | } | |
1329 | ||
811d4cf4 AZ |
1330 | static uint8_t *tb_ret_addr; |
1331 | ||
650bbb36 | 1332 | static inline void tcg_out_op(TCGContext *s, int opc, |
811d4cf4 AZ |
1333 | const TCGArg *args, const int *const_args) |
1334 | { | |
1335 | int c; | |
1336 | ||
1337 | switch (opc) { | |
1338 | case INDEX_op_exit_tb: | |
1339 | #ifdef SAVE_LR | |
1340 | if (args[0] >> 8) | |
1341 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1342 | else | |
1343 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]); | |
1344 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0)); | |
1345 | if (args[0] >> 8) | |
1346 | tcg_out32(s, args[0]); | |
1347 | #else | |
fe33867b AZ |
1348 | { |
1349 | uint8_t *ld_ptr = s->code_ptr; | |
1350 | if (args[0] >> 8) | |
1351 | tcg_out_ld32_12(s, COND_AL, 0, 15, 0); | |
1352 | else | |
1353 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]); | |
1354 | tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); | |
1355 | if (args[0] >> 8) { | |
1356 | *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8; | |
1357 | tcg_out32(s, args[0]); | |
1358 | } | |
1359 | } | |
811d4cf4 AZ |
1360 | #endif |
1361 | break; | |
1362 | case INDEX_op_goto_tb: | |
1363 | if (s->tb_jmp_offset) { | |
1364 | /* Direct jump method */ | |
fe33867b | 1365 | #if defined(USE_DIRECT_JUMP) |
811d4cf4 AZ |
1366 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; |
1367 | tcg_out_b(s, COND_AL, 8); | |
1368 | #else | |
1369 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
1370 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
1371 | tcg_out32(s, 0); | |
1372 | #endif | |
1373 | } else { | |
1374 | /* Indirect jump method */ | |
1375 | #if 1 | |
1376 | c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8); | |
1377 | if (c > 0xfff || c < -0xfff) { | |
1378 | tcg_out_movi32(s, COND_AL, TCG_REG_R0, | |
1379 | (tcg_target_long) (s->tb_next + args[0])); | |
1380 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1381 | } else | |
1382 | tcg_out_ld32_12(s, COND_AL, 15, 15, c); | |
1383 | #else | |
1384 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1385 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1386 | tcg_out32(s, (tcg_target_long) (s->tb_next + args[0])); | |
1387 | #endif | |
1388 | } | |
1389 | s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
1390 | break; | |
1391 | case INDEX_op_call: | |
1392 | if (const_args[0]) | |
1393 | tcg_out_call(s, COND_AL, args[0]); | |
1394 | else | |
1395 | tcg_out_callr(s, COND_AL, args[0]); | |
1396 | break; | |
1397 | case INDEX_op_jmp: | |
1398 | if (const_args[0]) | |
1399 | tcg_out_goto(s, COND_AL, args[0]); | |
1400 | else | |
1401 | tcg_out_bx(s, COND_AL, args[0]); | |
1402 | break; | |
1403 | case INDEX_op_br: | |
1404 | tcg_out_goto_label(s, COND_AL, args[0]); | |
1405 | break; | |
1406 | ||
1407 | case INDEX_op_ld8u_i32: | |
1408 | tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); | |
1409 | break; | |
1410 | case INDEX_op_ld8s_i32: | |
1411 | tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); | |
1412 | break; | |
1413 | case INDEX_op_ld16u_i32: | |
1414 | tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); | |
1415 | break; | |
1416 | case INDEX_op_ld16s_i32: | |
1417 | tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); | |
1418 | break; | |
1419 | case INDEX_op_ld_i32: | |
1420 | tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); | |
1421 | break; | |
1422 | case INDEX_op_st8_i32: | |
1423 | tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]); | |
1424 | break; | |
1425 | case INDEX_op_st16_i32: | |
1426 | tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]); | |
1427 | break; | |
1428 | case INDEX_op_st_i32: | |
1429 | tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); | |
1430 | break; | |
1431 | ||
1432 | case INDEX_op_mov_i32: | |
1433 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1434 | args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
1435 | break; | |
1436 | case INDEX_op_movi_i32: | |
1437 | tcg_out_movi32(s, COND_AL, args[0], args[1]); | |
1438 | break; | |
1439 | case INDEX_op_add_i32: | |
1440 | c = ARITH_ADD; | |
1441 | goto gen_arith; | |
1442 | case INDEX_op_sub_i32: | |
1443 | c = ARITH_SUB; | |
1444 | goto gen_arith; | |
1445 | case INDEX_op_and_i32: | |
1446 | c = ARITH_AND; | |
1447 | goto gen_arith; | |
1448 | case INDEX_op_or_i32: | |
1449 | c = ARITH_ORR; | |
1450 | goto gen_arith; | |
1451 | case INDEX_op_xor_i32: | |
1452 | c = ARITH_EOR; | |
1453 | /* Fall through. */ | |
1454 | gen_arith: | |
94953e6d LD |
1455 | if (const_args[2]) { |
1456 | int rot; | |
1457 | rot = encode_imm(args[2]); | |
cb4e581f | 1458 | tcg_out_dat_imm(s, COND_AL, c, |
94953e6d LD |
1459 | args[0], args[1], rotl(args[2], rot) | (rot << 7)); |
1460 | } else | |
cb4e581f LD |
1461 | tcg_out_dat_reg(s, COND_AL, c, |
1462 | args[0], args[1], args[2], SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
1463 | break; |
1464 | case INDEX_op_add2_i32: | |
1465 | tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC, | |
1466 | args[0], args[1], args[2], args[3], | |
1467 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1468 | break; | |
1469 | case INDEX_op_sub2_i32: | |
1470 | tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC, | |
1471 | args[0], args[1], args[2], args[3], | |
1472 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1473 | break; | |
650bbb36 AZ |
1474 | case INDEX_op_neg_i32: |
1475 | tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); | |
1476 | break; | |
f878d2d2 LD |
1477 | case INDEX_op_not_i32: |
1478 | tcg_out_dat_reg(s, COND_AL, | |
1479 | ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
1480 | break; | |
811d4cf4 AZ |
1481 | case INDEX_op_mul_i32: |
1482 | tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); | |
1483 | break; | |
1484 | case INDEX_op_mulu2_i32: | |
1485 | tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); | |
1486 | break; | |
1487 | case INDEX_op_div2_i32: | |
1488 | tcg_out_div_helper(s, COND_AL, args, | |
1489 | tcg_helper_div_i64, tcg_helper_rem_i64, | |
1490 | SHIFT_IMM_ASR(31)); | |
1491 | break; | |
1492 | case INDEX_op_divu2_i32: | |
1493 | tcg_out_div_helper(s, COND_AL, args, | |
1494 | tcg_helper_divu_i64, tcg_helper_remu_i64, | |
1495 | SHIFT_IMM_LSR(31)); | |
1496 | break; | |
1497 | /* XXX: Perhaps args[2] & 0x1f is wrong */ | |
1498 | case INDEX_op_shl_i32: | |
1499 | c = const_args[2] ? | |
1500 | SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); | |
1501 | goto gen_shift32; | |
1502 | case INDEX_op_shr_i32: | |
1503 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : | |
1504 | SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); | |
1505 | goto gen_shift32; | |
1506 | case INDEX_op_sar_i32: | |
1507 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : | |
1508 | SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); | |
1509 | /* Fall through. */ | |
1510 | gen_shift32: | |
1511 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); | |
1512 | break; | |
1513 | ||
1514 | case INDEX_op_brcond_i32: | |
023e77f8 AJ |
1515 | if (const_args[1]) { |
1516 | int rot; | |
1517 | rot = encode_imm(args[1]); | |
1518 | tcg_out_dat_imm(s, COND_AL, ARITH_CMP, | |
1519 | 0, args[0], rotl(args[1], rot) | (rot << 7)); | |
1520 | } else { | |
1521 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1522 | args[0], args[1], SHIFT_IMM_LSL(0)); | |
1523 | } | |
811d4cf4 AZ |
1524 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]); |
1525 | break; | |
1526 | case INDEX_op_brcond2_i32: | |
1527 | /* The resulting conditions are: | |
1528 | * TCG_COND_EQ --> a0 == a2 && a1 == a3, | |
1529 | * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3, | |
1530 | * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3, | |
1531 | * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3), | |
1532 | * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3), | |
1533 | * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3, | |
1534 | */ | |
1535 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1536 | args[1], args[3], SHIFT_IMM_LSL(0)); | |
1537 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
1538 | args[0], args[2], SHIFT_IMM_LSL(0)); | |
1539 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]); | |
1540 | break; | |
f72a6cd7 | 1541 | case INDEX_op_setcond_i32: |
023e77f8 AJ |
1542 | if (const_args[2]) { |
1543 | int rot; | |
1544 | rot = encode_imm(args[2]); | |
1545 | tcg_out_dat_imm(s, COND_AL, ARITH_CMP, | |
1546 | 0, args[1], rotl(args[2], rot) | (rot << 7)); | |
1547 | } else { | |
1548 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1549 | args[1], args[2], SHIFT_IMM_LSL(0)); | |
1550 | } | |
f72a6cd7 AJ |
1551 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], |
1552 | ARITH_MOV, args[0], 0, 1); | |
1553 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], | |
1554 | ARITH_MOV, args[0], 0, 0); | |
1555 | break; | |
e0404769 AJ |
1556 | case INDEX_op_setcond2_i32: |
1557 | /* See brcond2_i32 comment */ | |
1558 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1559 | args[2], args[4], SHIFT_IMM_LSL(0)); | |
1560 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
1561 | args[1], args[3], SHIFT_IMM_LSL(0)); | |
1562 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]], | |
1563 | ARITH_MOV, args[0], 0, 1); | |
1564 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])], | |
1565 | ARITH_MOV, args[0], 0, 0); | |
b525f0a9 | 1566 | break; |
811d4cf4 AZ |
1567 | |
1568 | case INDEX_op_qemu_ld8u: | |
1569 | tcg_out_qemu_ld(s, COND_AL, args, 0); | |
1570 | break; | |
1571 | case INDEX_op_qemu_ld8s: | |
1572 | tcg_out_qemu_ld(s, COND_AL, args, 0 | 4); | |
1573 | break; | |
1574 | case INDEX_op_qemu_ld16u: | |
1575 | tcg_out_qemu_ld(s, COND_AL, args, 1); | |
1576 | break; | |
1577 | case INDEX_op_qemu_ld16s: | |
1578 | tcg_out_qemu_ld(s, COND_AL, args, 1 | 4); | |
1579 | break; | |
1580 | case INDEX_op_qemu_ld32u: | |
1581 | tcg_out_qemu_ld(s, COND_AL, args, 2); | |
1582 | break; | |
1583 | case INDEX_op_qemu_ld64: | |
1584 | tcg_out_qemu_ld(s, COND_AL, args, 3); | |
1585 | break; | |
650bbb36 | 1586 | |
811d4cf4 AZ |
1587 | case INDEX_op_qemu_st8: |
1588 | tcg_out_qemu_st(s, COND_AL, args, 0); | |
1589 | break; | |
1590 | case INDEX_op_qemu_st16: | |
1591 | tcg_out_qemu_st(s, COND_AL, args, 1); | |
1592 | break; | |
1593 | case INDEX_op_qemu_st32: | |
1594 | tcg_out_qemu_st(s, COND_AL, args, 2); | |
1595 | break; | |
1596 | case INDEX_op_qemu_st64: | |
1597 | tcg_out_qemu_st(s, COND_AL, args, 3); | |
1598 | break; | |
1599 | ||
1600 | case INDEX_op_ext8s_i32: | |
7990496d LD |
1601 | #ifdef __ARM_ARCH_7A__ |
1602 | /* sxtb */ | |
1603 | tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]); | |
1604 | #else | |
811d4cf4 AZ |
1605 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
1606 | args[0], 0, args[1], SHIFT_IMM_LSL(24)); | |
1607 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1608 | args[0], 0, args[0], SHIFT_IMM_ASR(24)); | |
7990496d | 1609 | #endif |
811d4cf4 AZ |
1610 | break; |
1611 | case INDEX_op_ext16s_i32: | |
7990496d LD |
1612 | #ifdef __ARM_ARCH_7A__ |
1613 | /* sxth */ | |
1614 | tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]); | |
1615 | #else | |
811d4cf4 AZ |
1616 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
1617 | args[0], 0, args[1], SHIFT_IMM_LSL(16)); | |
1618 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1619 | args[0], 0, args[0], SHIFT_IMM_ASR(16)); | |
7990496d | 1620 | #endif |
811d4cf4 AZ |
1621 | break; |
1622 | ||
1623 | default: | |
1624 | tcg_abort(); | |
1625 | } | |
1626 | } | |
1627 | ||
1628 | static const TCGTargetOpDef arm_op_defs[] = { | |
1629 | { INDEX_op_exit_tb, { } }, | |
1630 | { INDEX_op_goto_tb, { } }, | |
1631 | { INDEX_op_call, { "ri" } }, | |
1632 | { INDEX_op_jmp, { "ri" } }, | |
1633 | { INDEX_op_br, { } }, | |
1634 | ||
1635 | { INDEX_op_mov_i32, { "r", "r" } }, | |
1636 | { INDEX_op_movi_i32, { "r" } }, | |
1637 | ||
1638 | { INDEX_op_ld8u_i32, { "r", "r" } }, | |
1639 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
1640 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
1641 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
1642 | { INDEX_op_ld_i32, { "r", "r" } }, | |
1643 | { INDEX_op_st8_i32, { "r", "r" } }, | |
1644 | { INDEX_op_st16_i32, { "r", "r" } }, | |
1645 | { INDEX_op_st_i32, { "r", "r" } }, | |
1646 | ||
1647 | /* TODO: "r", "r", "ri" */ | |
cb4e581f LD |
1648 | { INDEX_op_add_i32, { "r", "r", "rI" } }, |
1649 | { INDEX_op_sub_i32, { "r", "r", "rI" } }, | |
811d4cf4 AZ |
1650 | { INDEX_op_mul_i32, { "r", "r", "r" } }, |
1651 | { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, | |
1652 | { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } }, | |
1653 | { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } }, | |
cb4e581f LD |
1654 | { INDEX_op_and_i32, { "r", "r", "rI" } }, |
1655 | { INDEX_op_or_i32, { "r", "r", "rI" } }, | |
1656 | { INDEX_op_xor_i32, { "r", "r", "rI" } }, | |
650bbb36 | 1657 | { INDEX_op_neg_i32, { "r", "r" } }, |
f878d2d2 | 1658 | { INDEX_op_not_i32, { "r", "r" } }, |
811d4cf4 AZ |
1659 | |
1660 | { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
1661 | { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
1662 | { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
1663 | ||
023e77f8 AJ |
1664 | { INDEX_op_brcond_i32, { "r", "rI" } }, |
1665 | { INDEX_op_setcond_i32, { "r", "r", "rI" } }, | |
811d4cf4 AZ |
1666 | |
1667 | /* TODO: "r", "r", "r", "r", "ri", "ri" */ | |
1668 | { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1669 | { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1670 | { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } }, | |
e0404769 | 1671 | { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } }, |
811d4cf4 AZ |
1672 | |
1673 | { INDEX_op_qemu_ld8u, { "r", "x", "X" } }, | |
1674 | { INDEX_op_qemu_ld8s, { "r", "x", "X" } }, | |
1675 | { INDEX_op_qemu_ld16u, { "r", "x", "X" } }, | |
1676 | { INDEX_op_qemu_ld16s, { "r", "x", "X" } }, | |
1677 | { INDEX_op_qemu_ld32u, { "r", "x", "X" } }, | |
d0660ed4 | 1678 | { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } }, |
811d4cf4 | 1679 | |
3979144c PB |
1680 | { INDEX_op_qemu_st8, { "x", "x", "X" } }, |
1681 | { INDEX_op_qemu_st16, { "x", "x", "X" } }, | |
1682 | { INDEX_op_qemu_st32, { "x", "x", "X" } }, | |
1683 | { INDEX_op_qemu_st64, { "x", "D", "x", "X" } }, | |
811d4cf4 AZ |
1684 | |
1685 | { INDEX_op_ext8s_i32, { "r", "r" } }, | |
1686 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
1687 | ||
1688 | { -1 }, | |
1689 | }; | |
1690 | ||
1691 | void tcg_target_init(TCGContext *s) | |
1692 | { | |
20cb400d | 1693 | #if !defined(CONFIG_USER_ONLY) |
811d4cf4 AZ |
1694 | /* fail safe */ |
1695 | if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) | |
1696 | tcg_abort(); | |
20cb400d | 1697 | #endif |
811d4cf4 AZ |
1698 | |
1699 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, | |
1700 | ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8)); | |
1701 | tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
1702 | ((2 << TCG_REG_R3) - 1) | | |
1703 | (1 << TCG_REG_R12) | (1 << TCG_REG_R14)); | |
1704 | ||
1705 | tcg_regset_clear(s->reserved_regs); | |
1706 | #ifdef SAVE_LR | |
1707 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14); | |
1708 | #endif | |
1709 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
1710 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8); | |
1711 | ||
1712 | tcg_add_target_add_op_defs(arm_op_defs); | |
1713 | } | |
1714 | ||
1715 | static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg, | |
1716 | int arg1, tcg_target_long arg2) | |
1717 | { | |
1718 | tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); | |
1719 | } | |
1720 | ||
1721 | static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, | |
1722 | int arg1, tcg_target_long arg2) | |
1723 | { | |
1724 | tcg_out_st32(s, COND_AL, arg, arg1, arg2); | |
1725 | } | |
1726 | ||
2d69f359 | 1727 | static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
811d4cf4 AZ |
1728 | { |
1729 | if (val > 0) | |
1730 | if (val < 0x100) | |
1731 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val); | |
1732 | else | |
1733 | tcg_abort(); | |
1734 | else if (val < 0) { | |
1735 | if (val > -0x100) | |
1736 | tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val); | |
1737 | else | |
1738 | tcg_abort(); | |
1739 | } | |
1740 | } | |
1741 | ||
1742 | static inline void tcg_out_mov(TCGContext *s, int ret, int arg) | |
1743 | { | |
1744 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0)); | |
1745 | } | |
1746 | ||
1747 | static inline void tcg_out_movi(TCGContext *s, TCGType type, | |
1748 | int ret, tcg_target_long arg) | |
1749 | { | |
1750 | tcg_out_movi32(s, COND_AL, ret, arg); | |
1751 | } | |
1752 | ||
1753 | void tcg_target_qemu_prologue(TCGContext *s) | |
1754 | { | |
4e17eae9 AJ |
1755 | /* Theoretically there is no need to save r12, but an |
1756 | even number of registers to be saved as per EABI */ | |
1757 | ||
1758 | /* stmdb sp!, { r4 - r12, lr } */ | |
1759 | tcg_out32(s, (COND_AL << 28) | 0x092d5ff0); | |
811d4cf4 AZ |
1760 | |
1761 | tcg_out_bx(s, COND_AL, TCG_REG_R0); | |
1762 | tb_ret_addr = s->code_ptr; | |
1763 | ||
4e17eae9 AJ |
1764 | /* ldmia sp!, { r4 - r12, pc } */ |
1765 | tcg_out32(s, (COND_AL << 28) | 0x08bd9ff0); | |
811d4cf4 | 1766 | } |