]>
Commit | Line | Data |
---|---|---|
811d4cf4 AZ |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Andrzej Zaborowski | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
d4a9eb1f BS |
24 | |
25 | #ifndef NDEBUG | |
26 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
811d4cf4 AZ |
27 | "%r0", |
28 | "%r1", | |
29 | "%r2", | |
30 | "%r3", | |
31 | "%r4", | |
32 | "%r5", | |
33 | "%r6", | |
34 | "%r7", | |
35 | "%r8", | |
36 | "%r9", | |
37 | "%r10", | |
38 | "%r11", | |
39 | "%r12", | |
40 | "%r13", | |
41 | "%r14", | |
42 | }; | |
d4a9eb1f | 43 | #endif |
811d4cf4 | 44 | |
d4a9eb1f | 45 | static const int tcg_target_reg_alloc_order[] = { |
811d4cf4 AZ |
46 | TCG_REG_R0, |
47 | TCG_REG_R1, | |
48 | TCG_REG_R2, | |
49 | TCG_REG_R3, | |
50 | TCG_REG_R4, | |
51 | TCG_REG_R5, | |
52 | TCG_REG_R6, | |
53 | TCG_REG_R7, | |
54 | TCG_REG_R8, | |
55 | TCG_REG_R9, | |
56 | TCG_REG_R10, | |
57 | TCG_REG_R11, | |
58 | TCG_REG_R12, | |
59 | TCG_REG_R13, | |
60 | TCG_REG_R14, | |
61 | }; | |
62 | ||
d4a9eb1f | 63 | static const int tcg_target_call_iarg_regs[4] = { |
811d4cf4 AZ |
64 | TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 |
65 | }; | |
d4a9eb1f | 66 | static const int tcg_target_call_oarg_regs[2] = { |
811d4cf4 AZ |
67 | TCG_REG_R0, TCG_REG_R1 |
68 | }; | |
69 | ||
650bbb36 | 70 | static void patch_reloc(uint8_t *code_ptr, int type, |
811d4cf4 AZ |
71 | tcg_target_long value, tcg_target_long addend) |
72 | { | |
73 | switch (type) { | |
74 | case R_ARM_ABS32: | |
75 | *(uint32_t *) code_ptr = value; | |
76 | break; | |
77 | ||
78 | case R_ARM_CALL: | |
79 | case R_ARM_JUMP24: | |
80 | default: | |
81 | tcg_abort(); | |
82 | ||
83 | case R_ARM_PC24: | |
eae6ce52 | 84 | *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) | |
e936243a | 85 | (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff); |
811d4cf4 AZ |
86 | break; |
87 | } | |
88 | } | |
89 | ||
90 | /* maximum number of register used for input function arguments */ | |
91 | static inline int tcg_target_get_call_iarg_regs_count(int flags) | |
92 | { | |
93 | return 4; | |
94 | } | |
95 | ||
811d4cf4 | 96 | /* parse target specific constraints */ |
d4a9eb1f | 97 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) |
811d4cf4 AZ |
98 | { |
99 | const char *ct_str; | |
100 | ||
101 | ct_str = *pct_str; | |
102 | switch (ct_str[0]) { | |
cb4e581f LD |
103 | case 'I': |
104 | ct->ct |= TCG_CT_CONST_ARM; | |
105 | break; | |
106 | ||
811d4cf4 AZ |
107 | case 'r': |
108 | #ifndef CONFIG_SOFTMMU | |
109 | case 'd': | |
110 | case 'D': | |
111 | case 'x': | |
112 | case 'X': | |
113 | #endif | |
114 | ct->ct |= TCG_CT_REG; | |
115 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
116 | break; | |
117 | ||
118 | #ifdef CONFIG_SOFTMMU | |
d0660ed4 | 119 | /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */ |
811d4cf4 AZ |
120 | case 'x': |
121 | ct->ct |= TCG_CT_REG; | |
122 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
811d4cf4 AZ |
123 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); |
124 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
811d4cf4 AZ |
125 | break; |
126 | ||
d0660ed4 AZ |
127 | /* qemu_ld64 data_reg */ |
128 | case 'd': | |
129 | ct->ct |= TCG_CT_REG; | |
130 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
131 | /* r1 is still needed to load data_reg2, so don't use it. */ | |
132 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
133 | break; | |
134 | ||
811d4cf4 AZ |
135 | /* qemu_ld/st64 data_reg2 */ |
136 | case 'D': | |
137 | ct->ct |= TCG_CT_REG; | |
138 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
139 | /* r0, r1 and optionally r2 will be overwritten by the address | |
140 | * and the low word of data, so don't use these. */ | |
141 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
142 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
143 | # if TARGET_LONG_BITS == 64 | |
144 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | |
145 | # endif | |
146 | break; | |
147 | ||
148 | # if TARGET_LONG_BITS == 64 | |
149 | /* qemu_ld/st addr_reg2 */ | |
150 | case 'X': | |
151 | ct->ct |= TCG_CT_REG; | |
152 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
153 | /* r0 will be overwritten by the low word of base, so don't use it. */ | |
154 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
811d4cf4 | 155 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); |
811d4cf4 AZ |
156 | break; |
157 | # endif | |
158 | #endif | |
159 | ||
811d4cf4 AZ |
160 | default: |
161 | return -1; | |
162 | } | |
163 | ct_str++; | |
164 | *pct_str = ct_str; | |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
94953e6d LD |
169 | static inline uint32_t rotl(uint32_t val, int n) |
170 | { | |
171 | return (val << n) | (val >> (32 - n)); | |
172 | } | |
173 | ||
174 | /* ARM immediates for ALU instructions are made of an unsigned 8-bit | |
175 | right-rotated by an even amount between 0 and 30. */ | |
176 | static inline int encode_imm(uint32_t imm) | |
177 | { | |
4e6f6d4c LD |
178 | int shift; |
179 | ||
94953e6d LD |
180 | /* simple case, only lower bits */ |
181 | if ((imm & ~0xff) == 0) | |
182 | return 0; | |
183 | /* then try a simple even shift */ | |
184 | shift = ctz32(imm) & ~1; | |
185 | if (((imm >> shift) & ~0xff) == 0) | |
186 | return 32 - shift; | |
187 | /* now try harder with rotations */ | |
188 | if ((rotl(imm, 2) & ~0xff) == 0) | |
189 | return 2; | |
190 | if ((rotl(imm, 4) & ~0xff) == 0) | |
191 | return 4; | |
192 | if ((rotl(imm, 6) & ~0xff) == 0) | |
193 | return 6; | |
194 | /* imm can't be encoded */ | |
195 | return -1; | |
196 | } | |
cb4e581f LD |
197 | |
198 | static inline int check_fit_imm(uint32_t imm) | |
199 | { | |
94953e6d | 200 | return encode_imm(imm) >= 0; |
cb4e581f LD |
201 | } |
202 | ||
811d4cf4 AZ |
203 | /* Test if a constant matches the constraint. |
204 | * TODO: define constraints for: | |
205 | * | |
206 | * ldr/str offset: between -0xfff and 0xfff | |
207 | * ldrh/strh offset: between -0xff and 0xff | |
208 | * mov operand2: values represented with x << (2 * y), x < 0x100 | |
209 | * add, sub, eor...: ditto | |
210 | */ | |
211 | static inline int tcg_target_const_match(tcg_target_long val, | |
212 | const TCGArgConstraint *arg_ct) | |
213 | { | |
214 | int ct; | |
215 | ct = arg_ct->ct; | |
216 | if (ct & TCG_CT_CONST) | |
217 | return 1; | |
cb4e581f LD |
218 | else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) |
219 | return 1; | |
811d4cf4 AZ |
220 | else |
221 | return 0; | |
222 | } | |
223 | ||
224 | enum arm_data_opc_e { | |
225 | ARITH_AND = 0x0, | |
226 | ARITH_EOR = 0x1, | |
227 | ARITH_SUB = 0x2, | |
228 | ARITH_RSB = 0x3, | |
229 | ARITH_ADD = 0x4, | |
230 | ARITH_ADC = 0x5, | |
231 | ARITH_SBC = 0x6, | |
232 | ARITH_RSC = 0x7, | |
3979144c | 233 | ARITH_TST = 0x8, |
811d4cf4 AZ |
234 | ARITH_CMP = 0xa, |
235 | ARITH_CMN = 0xb, | |
236 | ARITH_ORR = 0xc, | |
237 | ARITH_MOV = 0xd, | |
238 | ARITH_BIC = 0xe, | |
239 | ARITH_MVN = 0xf, | |
240 | }; | |
241 | ||
3979144c PB |
242 | #define TO_CPSR(opc) \ |
243 | ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20) | |
811d4cf4 AZ |
244 | |
245 | #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) | |
246 | #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) | |
247 | #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) | |
248 | #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) | |
249 | #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) | |
250 | #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) | |
251 | #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) | |
252 | #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) | |
253 | ||
254 | enum arm_cond_code_e { | |
255 | COND_EQ = 0x0, | |
256 | COND_NE = 0x1, | |
257 | COND_CS = 0x2, /* Unsigned greater or equal */ | |
258 | COND_CC = 0x3, /* Unsigned less than */ | |
259 | COND_MI = 0x4, /* Negative */ | |
260 | COND_PL = 0x5, /* Zero or greater */ | |
261 | COND_VS = 0x6, /* Overflow */ | |
262 | COND_VC = 0x7, /* No overflow */ | |
263 | COND_HI = 0x8, /* Unsigned greater than */ | |
264 | COND_LS = 0x9, /* Unsigned less or equal */ | |
265 | COND_GE = 0xa, | |
266 | COND_LT = 0xb, | |
267 | COND_GT = 0xc, | |
268 | COND_LE = 0xd, | |
269 | COND_AL = 0xe, | |
270 | }; | |
271 | ||
272 | static const uint8_t tcg_cond_to_arm_cond[10] = { | |
273 | [TCG_COND_EQ] = COND_EQ, | |
274 | [TCG_COND_NE] = COND_NE, | |
275 | [TCG_COND_LT] = COND_LT, | |
276 | [TCG_COND_GE] = COND_GE, | |
277 | [TCG_COND_LE] = COND_LE, | |
278 | [TCG_COND_GT] = COND_GT, | |
279 | /* unsigned */ | |
280 | [TCG_COND_LTU] = COND_CC, | |
281 | [TCG_COND_GEU] = COND_CS, | |
282 | [TCG_COND_LEU] = COND_LS, | |
283 | [TCG_COND_GTU] = COND_HI, | |
284 | }; | |
285 | ||
286 | static inline void tcg_out_bx(TCGContext *s, int cond, int rn) | |
287 | { | |
288 | tcg_out32(s, (cond << 28) | 0x012fff10 | rn); | |
289 | } | |
290 | ||
291 | static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) | |
292 | { | |
293 | tcg_out32(s, (cond << 28) | 0x0a000000 | | |
294 | (((offset - 8) >> 2) & 0x00ffffff)); | |
295 | } | |
296 | ||
e936243a AZ |
297 | static inline void tcg_out_b_noaddr(TCGContext *s, int cond) |
298 | { | |
e2542fe2 | 299 | #ifdef HOST_WORDS_BIGENDIAN |
e936243a AZ |
300 | tcg_out8(s, (cond << 4) | 0x0a); |
301 | s->code_ptr += 3; | |
302 | #else | |
303 | s->code_ptr += 3; | |
304 | tcg_out8(s, (cond << 4) | 0x0a); | |
305 | #endif | |
306 | } | |
307 | ||
811d4cf4 AZ |
308 | static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) |
309 | { | |
310 | tcg_out32(s, (cond << 28) | 0x0b000000 | | |
311 | (((offset - 8) >> 2) & 0x00ffffff)); | |
312 | } | |
313 | ||
314 | static inline void tcg_out_dat_reg(TCGContext *s, | |
315 | int cond, int opc, int rd, int rn, int rm, int shift) | |
316 | { | |
317 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) | | |
318 | (rn << 16) | (rd << 12) | shift | rm); | |
319 | } | |
320 | ||
321 | static inline void tcg_out_dat_reg2(TCGContext *s, | |
322 | int cond, int opc0, int opc1, int rd0, int rd1, | |
323 | int rn0, int rn1, int rm0, int rm1, int shift) | |
324 | { | |
0c9c3a9e AZ |
325 | if (rd0 == rn1 || rd0 == rm1) { |
326 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
327 | (rn0 << 16) | (8 << 12) | shift | rm0); | |
328 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
329 | (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
330 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
331 | rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
332 | } else { | |
333 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
334 | (rn0 << 16) | (rd0 << 12) | shift | rm0); | |
335 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
336 | (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
337 | } | |
811d4cf4 AZ |
338 | } |
339 | ||
340 | static inline void tcg_out_dat_imm(TCGContext *s, | |
341 | int cond, int opc, int rd, int rn, int im) | |
342 | { | |
3979144c | 343 | tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) | |
811d4cf4 AZ |
344 | (rn << 16) | (rd << 12) | im); |
345 | } | |
346 | ||
347 | static inline void tcg_out_movi32(TCGContext *s, | |
348 | int cond, int rd, int32_t arg) | |
349 | { | |
350 | int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8); | |
351 | ||
352 | /* TODO: This is very suboptimal, we can easily have a constant | |
353 | * pool somewhere after all the instructions. */ | |
354 | ||
355 | if (arg < 0 && arg > -0x100) | |
356 | return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff); | |
357 | ||
358 | if (offset < 0x100 && offset > -0x100) | |
359 | return offset >= 0 ? | |
360 | tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) : | |
361 | tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset); | |
362 | ||
cb4e581f LD |
363 | #ifdef __ARM_ARCH_7A__ |
364 | /* use movw/movt */ | |
365 | /* movw */ | |
366 | tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12) | |
367 | | ((arg << 4) & 0x000f0000) | (arg & 0xfff)); | |
368 | if (arg & 0xffff0000) | |
369 | /* movt */ | |
370 | tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12) | |
371 | | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff)); | |
372 | #else | |
811d4cf4 AZ |
373 | tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff); |
374 | if (arg & 0x0000ff00) | |
375 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
376 | ((arg >> 8) & 0xff) | 0xc00); | |
377 | if (arg & 0x00ff0000) | |
378 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
379 | ((arg >> 16) & 0xff) | 0x800); | |
380 | if (arg & 0xff000000) | |
381 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
382 | ((arg >> 24) & 0xff) | 0x400); | |
cb4e581f | 383 | #endif |
811d4cf4 AZ |
384 | } |
385 | ||
386 | static inline void tcg_out_mul32(TCGContext *s, | |
387 | int cond, int rd, int rs, int rm) | |
388 | { | |
389 | if (rd != rm) | |
390 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
391 | (rs << 8) | 0x90 | rm); | |
392 | else if (rd != rs) | |
393 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
394 | (rm << 8) | 0x90 | rs); | |
395 | else { | |
396 | tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) | | |
397 | (rs << 8) | 0x90 | rm); | |
398 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
399 | rd, 0, 8, SHIFT_IMM_LSL(0)); | |
400 | } | |
401 | } | |
402 | ||
403 | static inline void tcg_out_umull32(TCGContext *s, | |
404 | int cond, int rd0, int rd1, int rs, int rm) | |
405 | { | |
406 | if (rd0 != rm && rd1 != rm) | |
407 | tcg_out32(s, (cond << 28) | 0x800090 | | |
408 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
409 | else if (rd0 != rs && rd1 != rs) | |
410 | tcg_out32(s, (cond << 28) | 0x800090 | | |
411 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
412 | else { | |
413 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
414 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
415 | tcg_out32(s, (cond << 28) | 0x800098 | | |
416 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
417 | } | |
418 | } | |
419 | ||
420 | static inline void tcg_out_smull32(TCGContext *s, | |
421 | int cond, int rd0, int rd1, int rs, int rm) | |
422 | { | |
423 | if (rd0 != rm && rd1 != rm) | |
424 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
425 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
426 | else if (rd0 != rs && rd1 != rs) | |
427 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
428 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
429 | else { | |
430 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
431 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
432 | tcg_out32(s, (cond << 28) | 0xc00098 | | |
433 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
434 | } | |
435 | } | |
436 | ||
437 | static inline void tcg_out_ld32_12(TCGContext *s, int cond, | |
438 | int rd, int rn, tcg_target_long im) | |
439 | { | |
440 | if (im >= 0) | |
441 | tcg_out32(s, (cond << 28) | 0x05900000 | | |
442 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
443 | else | |
444 | tcg_out32(s, (cond << 28) | 0x05100000 | | |
445 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
446 | } | |
447 | ||
448 | static inline void tcg_out_st32_12(TCGContext *s, int cond, | |
449 | int rd, int rn, tcg_target_long im) | |
450 | { | |
451 | if (im >= 0) | |
452 | tcg_out32(s, (cond << 28) | 0x05800000 | | |
453 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
454 | else | |
455 | tcg_out32(s, (cond << 28) | 0x05000000 | | |
456 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
457 | } | |
458 | ||
459 | static inline void tcg_out_ld32_r(TCGContext *s, int cond, | |
460 | int rd, int rn, int rm) | |
461 | { | |
462 | tcg_out32(s, (cond << 28) | 0x07900000 | | |
463 | (rn << 16) | (rd << 12) | rm); | |
464 | } | |
465 | ||
466 | static inline void tcg_out_st32_r(TCGContext *s, int cond, | |
467 | int rd, int rn, int rm) | |
468 | { | |
469 | tcg_out32(s, (cond << 28) | 0x07800000 | | |
470 | (rn << 16) | (rd << 12) | rm); | |
471 | } | |
472 | ||
3979144c PB |
473 | /* Register pre-increment with base writeback. */ |
474 | static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, | |
475 | int rd, int rn, int rm) | |
476 | { | |
477 | tcg_out32(s, (cond << 28) | 0x07b00000 | | |
478 | (rn << 16) | (rd << 12) | rm); | |
479 | } | |
480 | ||
481 | static inline void tcg_out_st32_rwb(TCGContext *s, int cond, | |
482 | int rd, int rn, int rm) | |
483 | { | |
484 | tcg_out32(s, (cond << 28) | 0x07a00000 | | |
485 | (rn << 16) | (rd << 12) | rm); | |
486 | } | |
487 | ||
811d4cf4 AZ |
488 | static inline void tcg_out_ld16u_8(TCGContext *s, int cond, |
489 | int rd, int rn, tcg_target_long im) | |
490 | { | |
491 | if (im >= 0) | |
492 | tcg_out32(s, (cond << 28) | 0x01d000b0 | | |
493 | (rn << 16) | (rd << 12) | | |
494 | ((im & 0xf0) << 4) | (im & 0xf)); | |
495 | else | |
496 | tcg_out32(s, (cond << 28) | 0x015000b0 | | |
497 | (rn << 16) | (rd << 12) | | |
498 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
499 | } | |
500 | ||
501 | static inline void tcg_out_st16u_8(TCGContext *s, int cond, | |
502 | int rd, int rn, tcg_target_long im) | |
503 | { | |
504 | if (im >= 0) | |
505 | tcg_out32(s, (cond << 28) | 0x01c000b0 | | |
506 | (rn << 16) | (rd << 12) | | |
507 | ((im & 0xf0) << 4) | (im & 0xf)); | |
508 | else | |
509 | tcg_out32(s, (cond << 28) | 0x014000b0 | | |
510 | (rn << 16) | (rd << 12) | | |
511 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
512 | } | |
513 | ||
514 | static inline void tcg_out_ld16u_r(TCGContext *s, int cond, | |
515 | int rd, int rn, int rm) | |
516 | { | |
517 | tcg_out32(s, (cond << 28) | 0x019000b0 | | |
518 | (rn << 16) | (rd << 12) | rm); | |
519 | } | |
520 | ||
521 | static inline void tcg_out_st16u_r(TCGContext *s, int cond, | |
522 | int rd, int rn, int rm) | |
523 | { | |
524 | tcg_out32(s, (cond << 28) | 0x018000b0 | | |
525 | (rn << 16) | (rd << 12) | rm); | |
526 | } | |
527 | ||
528 | static inline void tcg_out_ld16s_8(TCGContext *s, int cond, | |
529 | int rd, int rn, tcg_target_long im) | |
530 | { | |
531 | if (im >= 0) | |
532 | tcg_out32(s, (cond << 28) | 0x01d000f0 | | |
533 | (rn << 16) | (rd << 12) | | |
534 | ((im & 0xf0) << 4) | (im & 0xf)); | |
535 | else | |
536 | tcg_out32(s, (cond << 28) | 0x015000f0 | | |
537 | (rn << 16) | (rd << 12) | | |
538 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
539 | } | |
540 | ||
541 | static inline void tcg_out_st16s_8(TCGContext *s, int cond, | |
542 | int rd, int rn, tcg_target_long im) | |
543 | { | |
544 | if (im >= 0) | |
545 | tcg_out32(s, (cond << 28) | 0x01c000f0 | | |
546 | (rn << 16) | (rd << 12) | | |
547 | ((im & 0xf0) << 4) | (im & 0xf)); | |
548 | else | |
549 | tcg_out32(s, (cond << 28) | 0x014000f0 | | |
550 | (rn << 16) | (rd << 12) | | |
551 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
552 | } | |
553 | ||
554 | static inline void tcg_out_ld16s_r(TCGContext *s, int cond, | |
555 | int rd, int rn, int rm) | |
556 | { | |
557 | tcg_out32(s, (cond << 28) | 0x019000f0 | | |
558 | (rn << 16) | (rd << 12) | rm); | |
559 | } | |
560 | ||
561 | static inline void tcg_out_st16s_r(TCGContext *s, int cond, | |
562 | int rd, int rn, int rm) | |
563 | { | |
564 | tcg_out32(s, (cond << 28) | 0x018000f0 | | |
565 | (rn << 16) | (rd << 12) | rm); | |
566 | } | |
567 | ||
568 | static inline void tcg_out_ld8_12(TCGContext *s, int cond, | |
569 | int rd, int rn, tcg_target_long im) | |
570 | { | |
571 | if (im >= 0) | |
572 | tcg_out32(s, (cond << 28) | 0x05d00000 | | |
573 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
574 | else | |
575 | tcg_out32(s, (cond << 28) | 0x05500000 | | |
576 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
577 | } | |
578 | ||
579 | static inline void tcg_out_st8_12(TCGContext *s, int cond, | |
580 | int rd, int rn, tcg_target_long im) | |
581 | { | |
582 | if (im >= 0) | |
583 | tcg_out32(s, (cond << 28) | 0x05c00000 | | |
584 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
585 | else | |
586 | tcg_out32(s, (cond << 28) | 0x05400000 | | |
587 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
588 | } | |
589 | ||
590 | static inline void tcg_out_ld8_r(TCGContext *s, int cond, | |
591 | int rd, int rn, int rm) | |
592 | { | |
593 | tcg_out32(s, (cond << 28) | 0x07d00000 | | |
594 | (rn << 16) | (rd << 12) | rm); | |
595 | } | |
596 | ||
597 | static inline void tcg_out_st8_r(TCGContext *s, int cond, | |
598 | int rd, int rn, int rm) | |
599 | { | |
600 | tcg_out32(s, (cond << 28) | 0x07c00000 | | |
601 | (rn << 16) | (rd << 12) | rm); | |
602 | } | |
603 | ||
604 | static inline void tcg_out_ld8s_8(TCGContext *s, int cond, | |
605 | int rd, int rn, tcg_target_long im) | |
606 | { | |
607 | if (im >= 0) | |
608 | tcg_out32(s, (cond << 28) | 0x01d000d0 | | |
609 | (rn << 16) | (rd << 12) | | |
610 | ((im & 0xf0) << 4) | (im & 0xf)); | |
611 | else | |
612 | tcg_out32(s, (cond << 28) | 0x015000d0 | | |
613 | (rn << 16) | (rd << 12) | | |
614 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
615 | } | |
616 | ||
617 | static inline void tcg_out_st8s_8(TCGContext *s, int cond, | |
618 | int rd, int rn, tcg_target_long im) | |
619 | { | |
620 | if (im >= 0) | |
621 | tcg_out32(s, (cond << 28) | 0x01c000d0 | | |
622 | (rn << 16) | (rd << 12) | | |
623 | ((im & 0xf0) << 4) | (im & 0xf)); | |
624 | else | |
625 | tcg_out32(s, (cond << 28) | 0x014000d0 | | |
626 | (rn << 16) | (rd << 12) | | |
627 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
628 | } | |
629 | ||
630 | static inline void tcg_out_ld8s_r(TCGContext *s, int cond, | |
631 | int rd, int rn, int rm) | |
632 | { | |
204c1674 | 633 | tcg_out32(s, (cond << 28) | 0x019000d0 | |
811d4cf4 AZ |
634 | (rn << 16) | (rd << 12) | rm); |
635 | } | |
636 | ||
637 | static inline void tcg_out_st8s_r(TCGContext *s, int cond, | |
638 | int rd, int rn, int rm) | |
639 | { | |
204c1674 | 640 | tcg_out32(s, (cond << 28) | 0x018000d0 | |
811d4cf4 AZ |
641 | (rn << 16) | (rd << 12) | rm); |
642 | } | |
643 | ||
644 | static inline void tcg_out_ld32u(TCGContext *s, int cond, | |
645 | int rd, int rn, int32_t offset) | |
646 | { | |
647 | if (offset > 0xfff || offset < -0xfff) { | |
648 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
649 | tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8); | |
650 | } else | |
651 | tcg_out_ld32_12(s, cond, rd, rn, offset); | |
652 | } | |
653 | ||
654 | static inline void tcg_out_st32(TCGContext *s, int cond, | |
655 | int rd, int rn, int32_t offset) | |
656 | { | |
657 | if (offset > 0xfff || offset < -0xfff) { | |
658 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
659 | tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8); | |
660 | } else | |
661 | tcg_out_st32_12(s, cond, rd, rn, offset); | |
662 | } | |
663 | ||
664 | static inline void tcg_out_ld16u(TCGContext *s, int cond, | |
665 | int rd, int rn, int32_t offset) | |
666 | { | |
667 | if (offset > 0xff || offset < -0xff) { | |
668 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
669 | tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8); | |
670 | } else | |
671 | tcg_out_ld16u_8(s, cond, rd, rn, offset); | |
672 | } | |
673 | ||
674 | static inline void tcg_out_ld16s(TCGContext *s, int cond, | |
675 | int rd, int rn, int32_t offset) | |
676 | { | |
677 | if (offset > 0xff || offset < -0xff) { | |
678 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
679 | tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8); | |
680 | } else | |
681 | tcg_out_ld16s_8(s, cond, rd, rn, offset); | |
682 | } | |
683 | ||
684 | static inline void tcg_out_st16u(TCGContext *s, int cond, | |
685 | int rd, int rn, int32_t offset) | |
686 | { | |
687 | if (offset > 0xff || offset < -0xff) { | |
688 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
689 | tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8); | |
690 | } else | |
691 | tcg_out_st16u_8(s, cond, rd, rn, offset); | |
692 | } | |
693 | ||
694 | static inline void tcg_out_ld8u(TCGContext *s, int cond, | |
695 | int rd, int rn, int32_t offset) | |
696 | { | |
697 | if (offset > 0xfff || offset < -0xfff) { | |
698 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
699 | tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8); | |
700 | } else | |
701 | tcg_out_ld8_12(s, cond, rd, rn, offset); | |
702 | } | |
703 | ||
704 | static inline void tcg_out_ld8s(TCGContext *s, int cond, | |
705 | int rd, int rn, int32_t offset) | |
706 | { | |
707 | if (offset > 0xff || offset < -0xff) { | |
708 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
709 | tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8); | |
710 | } else | |
711 | tcg_out_ld8s_8(s, cond, rd, rn, offset); | |
712 | } | |
713 | ||
714 | static inline void tcg_out_st8u(TCGContext *s, int cond, | |
715 | int rd, int rn, int32_t offset) | |
716 | { | |
717 | if (offset > 0xfff || offset < -0xfff) { | |
718 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
719 | tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8); | |
720 | } else | |
721 | tcg_out_st8_12(s, cond, rd, rn, offset); | |
722 | } | |
723 | ||
724 | static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) | |
725 | { | |
726 | int32_t val; | |
727 | ||
728 | val = addr - (tcg_target_long) s->code_ptr; | |
729 | if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd) | |
730 | tcg_out_b(s, cond, val); | |
731 | else { | |
732 | #if 1 | |
733 | tcg_abort(); | |
734 | #else | |
735 | if (cond == COND_AL) { | |
736 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
737 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
738 | } else { | |
739 | tcg_out_movi32(s, cond, TCG_REG_R8, val - 8); | |
740 | tcg_out_dat_reg(s, cond, ARITH_ADD, | |
741 | 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
742 | } | |
743 | #endif | |
744 | } | |
745 | } | |
746 | ||
747 | static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) | |
748 | { | |
749 | int32_t val; | |
750 | ||
751 | #ifdef SAVE_LR | |
752 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
753 | #endif | |
754 | ||
755 | val = addr - (tcg_target_long) s->code_ptr; | |
756 | if (val < 0x01fffffd && val > -0x01fffffd) | |
757 | tcg_out_bl(s, cond, val); | |
758 | else { | |
759 | #if 1 | |
760 | tcg_abort(); | |
761 | #else | |
762 | if (cond == COND_AL) { | |
763 | tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4); | |
764 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
765 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
766 | } else { | |
767 | tcg_out_movi32(s, cond, TCG_REG_R9, addr); | |
768 | tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15); | |
769 | tcg_out_bx(s, cond, TCG_REG_R9); | |
770 | } | |
771 | #endif | |
772 | } | |
773 | ||
774 | #ifdef SAVE_LR | |
775 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
776 | #endif | |
777 | } | |
778 | ||
779 | static inline void tcg_out_callr(TCGContext *s, int cond, int arg) | |
780 | { | |
781 | #ifdef SAVE_LR | |
782 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
783 | #endif | |
784 | /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */ | |
785 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0)); | |
786 | tcg_out_bx(s, cond, arg); | |
787 | #ifdef SAVE_LR | |
788 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
789 | #endif | |
790 | } | |
791 | ||
792 | static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) | |
793 | { | |
794 | TCGLabel *l = &s->labels[label_index]; | |
795 | ||
796 | if (l->has_value) | |
797 | tcg_out_goto(s, cond, l->u.value); | |
798 | else if (cond == COND_AL) { | |
799 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
800 | tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337); | |
801 | s->code_ptr += 4; | |
802 | } else { | |
803 | /* Probably this should be preferred even for COND_AL... */ | |
804 | tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337); | |
e936243a | 805 | tcg_out_b_noaddr(s, cond); |
811d4cf4 AZ |
806 | } |
807 | } | |
808 | ||
811d4cf4 | 809 | #ifdef CONFIG_SOFTMMU |
79383c9c BS |
810 | |
811 | #include "../../softmmu_defs.h" | |
811d4cf4 AZ |
812 | |
813 | static void *qemu_ld_helpers[4] = { | |
814 | __ldb_mmu, | |
815 | __ldw_mmu, | |
816 | __ldl_mmu, | |
817 | __ldq_mmu, | |
818 | }; | |
819 | ||
820 | static void *qemu_st_helpers[4] = { | |
821 | __stb_mmu, | |
822 | __stw_mmu, | |
823 | __stl_mmu, | |
824 | __stq_mmu, | |
825 | }; | |
826 | #endif | |
827 | ||
3979144c PB |
828 | #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) |
829 | ||
811d4cf4 AZ |
830 | static inline void tcg_out_qemu_ld(TCGContext *s, int cond, |
831 | const TCGArg *args, int opc) | |
832 | { | |
833 | int addr_reg, data_reg, data_reg2; | |
834 | #ifdef CONFIG_SOFTMMU | |
835 | int mem_index, s_bits; | |
836 | # if TARGET_LONG_BITS == 64 | |
837 | int addr_reg2; | |
838 | # endif | |
811d4cf4 | 839 | uint32_t *label_ptr; |
811d4cf4 AZ |
840 | #endif |
841 | ||
842 | data_reg = *args++; | |
843 | if (opc == 3) | |
844 | data_reg2 = *args++; | |
845 | else | |
d89c682f | 846 | data_reg2 = 0; /* suppress warning */ |
811d4cf4 | 847 | addr_reg = *args++; |
811d4cf4 | 848 | #ifdef CONFIG_SOFTMMU |
aef3a282 AZ |
849 | # if TARGET_LONG_BITS == 64 |
850 | addr_reg2 = *args++; | |
851 | # endif | |
811d4cf4 AZ |
852 | mem_index = *args; |
853 | s_bits = opc & 3; | |
854 | ||
91a3c1b0 | 855 | /* Should generate something like the following: |
3979144c | 856 | * shr r8, addr_reg, #TARGET_PAGE_BITS |
91a3c1b0 | 857 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 |
3979144c | 858 | * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS |
91a3c1b0 AZ |
859 | */ |
860 | # if CPU_TLB_BITS > 8 | |
861 | # error | |
862 | # endif | |
811d4cf4 | 863 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
3979144c | 864 | 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); |
811d4cf4 AZ |
865 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, |
866 | 0, 8, CPU_TLB_SIZE - 1); | |
867 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
868 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
869 | /* In the |
870 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] | |
871 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
872 | * not exceed otherwise, so use an | |
873 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
874 | * before. | |
875 | */ | |
225b4376 AZ |
876 | if (mem_index) |
877 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
878 | (mem_index << (TLB_SHIFT & 1)) | | |
879 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 880 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 881 | offsetof(CPUState, tlb_table[0][0].addr_read)); |
811d4cf4 AZ |
882 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
883 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
3979144c PB |
884 | /* Check alignment. */ |
885 | if (s_bits) | |
886 | tcg_out_dat_imm(s, COND_EQ, ARITH_TST, | |
887 | 0, addr_reg, (1 << s_bits) - 1); | |
811d4cf4 AZ |
888 | # if TARGET_LONG_BITS == 64 |
889 | /* XXX: possibly we could use a block data load or writeback in | |
890 | * the first access. */ | |
891 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 892 | offsetof(CPUState, tlb_table[0][0].addr_read) + 4); |
811d4cf4 AZ |
893 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, |
894 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
895 | # endif | |
896 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 897 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
898 | |
899 | switch (opc) { | |
900 | case 0: | |
901 | tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
902 | break; | |
903 | case 0 | 4: | |
904 | tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
905 | break; | |
906 | case 1: | |
907 | tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
908 | break; | |
909 | case 1 | 4: | |
910 | tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
911 | break; | |
912 | case 2: | |
913 | default: | |
914 | tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
915 | break; | |
916 | case 3: | |
3979144c | 917 | tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg); |
811d4cf4 AZ |
918 | tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4); |
919 | break; | |
920 | } | |
921 | ||
922 | label_ptr = (void *) s->code_ptr; | |
923 | tcg_out_b(s, COND_EQ, 8); | |
811d4cf4 AZ |
924 | |
925 | # ifdef SAVE_LR | |
926 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
927 | # endif | |
928 | ||
929 | /* TODO: move this code to where the constants pool will be */ | |
930 | if (addr_reg) | |
931 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
932 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
933 | # if TARGET_LONG_BITS == 32 | |
934 | tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index); | |
935 | # else | |
936 | if (addr_reg2 != 1) | |
937 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
938 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
939 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
940 | # endif | |
650bbb36 | 941 | tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - |
811d4cf4 AZ |
942 | (tcg_target_long) s->code_ptr); |
943 | ||
944 | switch (opc) { | |
945 | case 0 | 4: | |
946 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
947 | 0, 0, 0, SHIFT_IMM_LSL(24)); | |
948 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
949 | data_reg, 0, 0, SHIFT_IMM_ASR(24)); | |
950 | break; | |
951 | case 1 | 4: | |
952 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
953 | 0, 0, 0, SHIFT_IMM_LSL(16)); | |
954 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
955 | data_reg, 0, 0, SHIFT_IMM_ASR(16)); | |
956 | break; | |
957 | case 0: | |
958 | case 1: | |
959 | case 2: | |
960 | default: | |
961 | if (data_reg) | |
962 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
963 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
964 | break; | |
965 | case 3: | |
d0660ed4 AZ |
966 | if (data_reg != 0) |
967 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
968 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
969 | if (data_reg2 != 1) |
970 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
971 | data_reg2, 0, 1, SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
972 | break; |
973 | } | |
974 | ||
975 | # ifdef SAVE_LR | |
976 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
977 | # endif | |
978 | ||
811d4cf4 | 979 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; |
379f6698 PB |
980 | #else /* !CONFIG_SOFTMMU */ |
981 | if (GUEST_BASE) { | |
982 | uint32_t offset = GUEST_BASE; | |
983 | int i; | |
984 | int rot; | |
985 | ||
986 | while (offset) { | |
987 | i = ctz32(offset) & ~1; | |
988 | rot = ((32 - i) << 7) & 0xf00; | |
989 | ||
990 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, | |
991 | ((offset >> i) & 0xff) | rot); | |
992 | addr_reg = 8; | |
993 | offset &= ~(0xff << i); | |
994 | } | |
995 | } | |
811d4cf4 AZ |
996 | switch (opc) { |
997 | case 0: | |
998 | tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0); | |
999 | break; | |
1000 | case 0 | 4: | |
1001 | tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1002 | break; | |
1003 | case 1: | |
1004 | tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
1005 | break; | |
1006 | case 1 | 4: | |
1007 | tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1008 | break; | |
1009 | case 2: | |
1010 | default: | |
1011 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1012 | break; | |
1013 | case 3: | |
eae6ce52 AZ |
1014 | /* TODO: use block load - |
1015 | * check that data_reg2 > data_reg or the other way */ | |
419bafa5 AJ |
1016 | if (data_reg == addr_reg) { |
1017 | tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1018 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1019 | } else { | |
1020 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1021 | tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1022 | } | |
811d4cf4 AZ |
1023 | break; |
1024 | } | |
1025 | #endif | |
1026 | } | |
1027 | ||
1028 | static inline void tcg_out_qemu_st(TCGContext *s, int cond, | |
1029 | const TCGArg *args, int opc) | |
1030 | { | |
1031 | int addr_reg, data_reg, data_reg2; | |
1032 | #ifdef CONFIG_SOFTMMU | |
1033 | int mem_index, s_bits; | |
1034 | # if TARGET_LONG_BITS == 64 | |
1035 | int addr_reg2; | |
1036 | # endif | |
811d4cf4 | 1037 | uint32_t *label_ptr; |
811d4cf4 AZ |
1038 | #endif |
1039 | ||
1040 | data_reg = *args++; | |
1041 | if (opc == 3) | |
1042 | data_reg2 = *args++; | |
1043 | else | |
d89c682f | 1044 | data_reg2 = 0; /* suppress warning */ |
811d4cf4 | 1045 | addr_reg = *args++; |
811d4cf4 | 1046 | #ifdef CONFIG_SOFTMMU |
aef3a282 AZ |
1047 | # if TARGET_LONG_BITS == 64 |
1048 | addr_reg2 = *args++; | |
1049 | # endif | |
811d4cf4 AZ |
1050 | mem_index = *args; |
1051 | s_bits = opc & 3; | |
1052 | ||
91a3c1b0 | 1053 | /* Should generate something like the following: |
3979144c | 1054 | * shr r8, addr_reg, #TARGET_PAGE_BITS |
91a3c1b0 | 1055 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 |
3979144c | 1056 | * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS |
91a3c1b0 | 1057 | */ |
811d4cf4 | 1058 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
3979144c | 1059 | 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); |
811d4cf4 AZ |
1060 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, |
1061 | 0, 8, CPU_TLB_SIZE - 1); | |
1062 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
1063 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
1064 | /* In the |
1065 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] | |
1066 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
1067 | * not exceed otherwise, so use an | |
1068 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
1069 | * before. | |
1070 | */ | |
225b4376 AZ |
1071 | if (mem_index) |
1072 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
1073 | (mem_index << (TLB_SHIFT & 1)) | | |
1074 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 1075 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 1076 | offsetof(CPUState, tlb_table[0][0].addr_write)); |
811d4cf4 AZ |
1077 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
1078 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
3979144c PB |
1079 | /* Check alignment. */ |
1080 | if (s_bits) | |
1081 | tcg_out_dat_imm(s, COND_EQ, ARITH_TST, | |
1082 | 0, addr_reg, (1 << s_bits) - 1); | |
811d4cf4 AZ |
1083 | # if TARGET_LONG_BITS == 64 |
1084 | /* XXX: possibly we could use a block data load or writeback in | |
1085 | * the first access. */ | |
1086 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1087 | offsetof(CPUState, tlb_table[0][0].addr_write) |
811d4cf4 AZ |
1088 | + 4); |
1089 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, | |
1090 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
1091 | # endif | |
1092 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1093 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
1094 | |
1095 | switch (opc) { | |
1096 | case 0: | |
1097 | tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1098 | break; | |
1099 | case 0 | 4: | |
1100 | tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1101 | break; | |
1102 | case 1: | |
1103 | tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1104 | break; | |
1105 | case 1 | 4: | |
1106 | tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1107 | break; | |
1108 | case 2: | |
1109 | default: | |
1110 | tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1111 | break; | |
1112 | case 3: | |
3979144c | 1113 | tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg); |
811d4cf4 AZ |
1114 | tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); |
1115 | break; | |
1116 | } | |
1117 | ||
1118 | label_ptr = (void *) s->code_ptr; | |
1119 | tcg_out_b(s, COND_EQ, 8); | |
811d4cf4 | 1120 | |
811d4cf4 AZ |
1121 | /* TODO: move this code to where the constants pool will be */ |
1122 | if (addr_reg) | |
1123 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1124 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
1125 | # if TARGET_LONG_BITS == 32 | |
1126 | switch (opc) { | |
1127 | case 0: | |
1128 | tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); | |
1129 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1130 | break; | |
1131 | case 1: | |
1132 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1133 | 1, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1134 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1135 | 1, 0, 1, SHIFT_IMM_LSR(16)); | |
1136 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1137 | break; | |
1138 | case 2: | |
1139 | if (data_reg != 1) | |
1140 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1141 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1142 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1143 | break; | |
1144 | case 3: | |
1145 | if (data_reg != 1) | |
1146 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1147 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1148 | if (data_reg2 != 2) | |
1149 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1150 | 2, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1151 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1152 | break; | |
1153 | } | |
1154 | # else | |
1155 | if (addr_reg2 != 1) | |
1156 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1157 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
1158 | switch (opc) { | |
1159 | case 0: | |
1160 | tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); | |
1161 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1162 | break; | |
1163 | case 1: | |
1164 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1165 | 2, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1166 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1167 | 2, 0, 2, SHIFT_IMM_LSR(16)); | |
1168 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1169 | break; | |
1170 | case 2: | |
1171 | if (data_reg != 2) | |
1172 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1173 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1174 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1175 | break; | |
1176 | case 3: | |
91a3c1b0 AZ |
1177 | tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index); |
1178 | tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ | |
811d4cf4 AZ |
1179 | if (data_reg != 2) |
1180 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1181 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1182 | if (data_reg2 != 3) | |
1183 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1184 | 3, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1185 | break; | |
1186 | } | |
1187 | # endif | |
1188 | ||
91a3c1b0 AZ |
1189 | # ifdef SAVE_LR |
1190 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
1191 | # endif | |
1192 | ||
204c1674 | 1193 | tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - |
811d4cf4 | 1194 | (tcg_target_long) s->code_ptr); |
811d4cf4 AZ |
1195 | # if TARGET_LONG_BITS == 64 |
1196 | if (opc == 3) | |
1197 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); | |
1198 | # endif | |
1199 | ||
1200 | # ifdef SAVE_LR | |
1201 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
1202 | # endif | |
1203 | ||
811d4cf4 | 1204 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; |
379f6698 PB |
1205 | #else /* !CONFIG_SOFTMMU */ |
1206 | if (GUEST_BASE) { | |
1207 | uint32_t offset = GUEST_BASE; | |
1208 | int i; | |
1209 | int rot; | |
1210 | ||
1211 | while (offset) { | |
1212 | i = ctz32(offset) & ~1; | |
1213 | rot = ((32 - i) << 7) & 0xf00; | |
1214 | ||
1215 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg, | |
1216 | ((offset >> i) & 0xff) | rot); | |
1217 | addr_reg = 8; | |
1218 | offset &= ~(0xff << i); | |
1219 | } | |
1220 | } | |
811d4cf4 AZ |
1221 | switch (opc) { |
1222 | case 0: | |
1223 | tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); | |
1224 | break; | |
1225 | case 0 | 4: | |
204c1674 | 1226 | tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0); |
811d4cf4 AZ |
1227 | break; |
1228 | case 1: | |
1229 | tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
1230 | break; | |
1231 | case 1 | 4: | |
1232 | tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1233 | break; | |
1234 | case 2: | |
1235 | default: | |
1236 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1237 | break; | |
1238 | case 3: | |
eae6ce52 AZ |
1239 | /* TODO: use block store - |
1240 | * check that data_reg2 > data_reg or the other way */ | |
811d4cf4 AZ |
1241 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); |
1242 | tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1243 | break; | |
1244 | } | |
1245 | #endif | |
1246 | } | |
1247 | ||
811d4cf4 AZ |
1248 | static uint8_t *tb_ret_addr; |
1249 | ||
a9751609 | 1250 | static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, |
811d4cf4 AZ |
1251 | const TCGArg *args, const int *const_args) |
1252 | { | |
1253 | int c; | |
1254 | ||
1255 | switch (opc) { | |
1256 | case INDEX_op_exit_tb: | |
1257 | #ifdef SAVE_LR | |
1258 | if (args[0] >> 8) | |
1259 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1260 | else | |
1261 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]); | |
1262 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0)); | |
1263 | if (args[0] >> 8) | |
1264 | tcg_out32(s, args[0]); | |
1265 | #else | |
fe33867b AZ |
1266 | { |
1267 | uint8_t *ld_ptr = s->code_ptr; | |
1268 | if (args[0] >> 8) | |
1269 | tcg_out_ld32_12(s, COND_AL, 0, 15, 0); | |
1270 | else | |
1271 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]); | |
1272 | tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); | |
1273 | if (args[0] >> 8) { | |
1274 | *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8; | |
1275 | tcg_out32(s, args[0]); | |
1276 | } | |
1277 | } | |
811d4cf4 AZ |
1278 | #endif |
1279 | break; | |
1280 | case INDEX_op_goto_tb: | |
1281 | if (s->tb_jmp_offset) { | |
1282 | /* Direct jump method */ | |
fe33867b | 1283 | #if defined(USE_DIRECT_JUMP) |
811d4cf4 AZ |
1284 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; |
1285 | tcg_out_b(s, COND_AL, 8); | |
1286 | #else | |
1287 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
1288 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
1289 | tcg_out32(s, 0); | |
1290 | #endif | |
1291 | } else { | |
1292 | /* Indirect jump method */ | |
1293 | #if 1 | |
1294 | c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8); | |
1295 | if (c > 0xfff || c < -0xfff) { | |
1296 | tcg_out_movi32(s, COND_AL, TCG_REG_R0, | |
1297 | (tcg_target_long) (s->tb_next + args[0])); | |
1298 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1299 | } else | |
1300 | tcg_out_ld32_12(s, COND_AL, 15, 15, c); | |
1301 | #else | |
1302 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1303 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1304 | tcg_out32(s, (tcg_target_long) (s->tb_next + args[0])); | |
1305 | #endif | |
1306 | } | |
1307 | s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
1308 | break; | |
1309 | case INDEX_op_call: | |
1310 | if (const_args[0]) | |
1311 | tcg_out_call(s, COND_AL, args[0]); | |
1312 | else | |
1313 | tcg_out_callr(s, COND_AL, args[0]); | |
1314 | break; | |
1315 | case INDEX_op_jmp: | |
1316 | if (const_args[0]) | |
1317 | tcg_out_goto(s, COND_AL, args[0]); | |
1318 | else | |
1319 | tcg_out_bx(s, COND_AL, args[0]); | |
1320 | break; | |
1321 | case INDEX_op_br: | |
1322 | tcg_out_goto_label(s, COND_AL, args[0]); | |
1323 | break; | |
1324 | ||
1325 | case INDEX_op_ld8u_i32: | |
1326 | tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); | |
1327 | break; | |
1328 | case INDEX_op_ld8s_i32: | |
1329 | tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); | |
1330 | break; | |
1331 | case INDEX_op_ld16u_i32: | |
1332 | tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); | |
1333 | break; | |
1334 | case INDEX_op_ld16s_i32: | |
1335 | tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); | |
1336 | break; | |
1337 | case INDEX_op_ld_i32: | |
1338 | tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); | |
1339 | break; | |
1340 | case INDEX_op_st8_i32: | |
1341 | tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]); | |
1342 | break; | |
1343 | case INDEX_op_st16_i32: | |
1344 | tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]); | |
1345 | break; | |
1346 | case INDEX_op_st_i32: | |
1347 | tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); | |
1348 | break; | |
1349 | ||
1350 | case INDEX_op_mov_i32: | |
1351 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1352 | args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
1353 | break; | |
1354 | case INDEX_op_movi_i32: | |
1355 | tcg_out_movi32(s, COND_AL, args[0], args[1]); | |
1356 | break; | |
1357 | case INDEX_op_add_i32: | |
1358 | c = ARITH_ADD; | |
1359 | goto gen_arith; | |
1360 | case INDEX_op_sub_i32: | |
1361 | c = ARITH_SUB; | |
1362 | goto gen_arith; | |
1363 | case INDEX_op_and_i32: | |
1364 | c = ARITH_AND; | |
1365 | goto gen_arith; | |
932234f6 AJ |
1366 | case INDEX_op_andc_i32: |
1367 | c = ARITH_BIC; | |
1368 | goto gen_arith; | |
811d4cf4 AZ |
1369 | case INDEX_op_or_i32: |
1370 | c = ARITH_ORR; | |
1371 | goto gen_arith; | |
1372 | case INDEX_op_xor_i32: | |
1373 | c = ARITH_EOR; | |
1374 | /* Fall through. */ | |
1375 | gen_arith: | |
94953e6d LD |
1376 | if (const_args[2]) { |
1377 | int rot; | |
1378 | rot = encode_imm(args[2]); | |
cb4e581f | 1379 | tcg_out_dat_imm(s, COND_AL, c, |
94953e6d LD |
1380 | args[0], args[1], rotl(args[2], rot) | (rot << 7)); |
1381 | } else | |
cb4e581f LD |
1382 | tcg_out_dat_reg(s, COND_AL, c, |
1383 | args[0], args[1], args[2], SHIFT_IMM_LSL(0)); | |
811d4cf4 AZ |
1384 | break; |
1385 | case INDEX_op_add2_i32: | |
1386 | tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC, | |
1387 | args[0], args[1], args[2], args[3], | |
1388 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1389 | break; | |
1390 | case INDEX_op_sub2_i32: | |
1391 | tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC, | |
1392 | args[0], args[1], args[2], args[3], | |
1393 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1394 | break; | |
650bbb36 AZ |
1395 | case INDEX_op_neg_i32: |
1396 | tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); | |
1397 | break; | |
f878d2d2 LD |
1398 | case INDEX_op_not_i32: |
1399 | tcg_out_dat_reg(s, COND_AL, | |
1400 | ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
1401 | break; | |
811d4cf4 AZ |
1402 | case INDEX_op_mul_i32: |
1403 | tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); | |
1404 | break; | |
1405 | case INDEX_op_mulu2_i32: | |
1406 | tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); | |
1407 | break; | |
811d4cf4 AZ |
1408 | /* XXX: Perhaps args[2] & 0x1f is wrong */ |
1409 | case INDEX_op_shl_i32: | |
1410 | c = const_args[2] ? | |
1411 | SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); | |
1412 | goto gen_shift32; | |
1413 | case INDEX_op_shr_i32: | |
1414 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : | |
1415 | SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); | |
1416 | goto gen_shift32; | |
1417 | case INDEX_op_sar_i32: | |
1418 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : | |
1419 | SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); | |
1420 | /* Fall through. */ | |
1421 | gen_shift32: | |
1422 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); | |
1423 | break; | |
1424 | ||
1425 | case INDEX_op_brcond_i32: | |
023e77f8 AJ |
1426 | if (const_args[1]) { |
1427 | int rot; | |
1428 | rot = encode_imm(args[1]); | |
1429 | tcg_out_dat_imm(s, COND_AL, ARITH_CMP, | |
1430 | 0, args[0], rotl(args[1], rot) | (rot << 7)); | |
1431 | } else { | |
1432 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1433 | args[0], args[1], SHIFT_IMM_LSL(0)); | |
1434 | } | |
811d4cf4 AZ |
1435 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]); |
1436 | break; | |
1437 | case INDEX_op_brcond2_i32: | |
1438 | /* The resulting conditions are: | |
1439 | * TCG_COND_EQ --> a0 == a2 && a1 == a3, | |
1440 | * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3, | |
1441 | * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3, | |
1442 | * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3), | |
1443 | * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3), | |
1444 | * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3, | |
1445 | */ | |
1446 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1447 | args[1], args[3], SHIFT_IMM_LSL(0)); | |
1448 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
1449 | args[0], args[2], SHIFT_IMM_LSL(0)); | |
1450 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]); | |
1451 | break; | |
f72a6cd7 | 1452 | case INDEX_op_setcond_i32: |
023e77f8 AJ |
1453 | if (const_args[2]) { |
1454 | int rot; | |
1455 | rot = encode_imm(args[2]); | |
1456 | tcg_out_dat_imm(s, COND_AL, ARITH_CMP, | |
1457 | 0, args[1], rotl(args[2], rot) | (rot << 7)); | |
1458 | } else { | |
1459 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1460 | args[1], args[2], SHIFT_IMM_LSL(0)); | |
1461 | } | |
f72a6cd7 AJ |
1462 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]], |
1463 | ARITH_MOV, args[0], 0, 1); | |
1464 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])], | |
1465 | ARITH_MOV, args[0], 0, 0); | |
1466 | break; | |
e0404769 AJ |
1467 | case INDEX_op_setcond2_i32: |
1468 | /* See brcond2_i32 comment */ | |
1469 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1470 | args[2], args[4], SHIFT_IMM_LSL(0)); | |
1471 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
1472 | args[1], args[3], SHIFT_IMM_LSL(0)); | |
1473 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]], | |
1474 | ARITH_MOV, args[0], 0, 1); | |
1475 | tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])], | |
1476 | ARITH_MOV, args[0], 0, 0); | |
b525f0a9 | 1477 | break; |
811d4cf4 AZ |
1478 | |
1479 | case INDEX_op_qemu_ld8u: | |
1480 | tcg_out_qemu_ld(s, COND_AL, args, 0); | |
1481 | break; | |
1482 | case INDEX_op_qemu_ld8s: | |
1483 | tcg_out_qemu_ld(s, COND_AL, args, 0 | 4); | |
1484 | break; | |
1485 | case INDEX_op_qemu_ld16u: | |
1486 | tcg_out_qemu_ld(s, COND_AL, args, 1); | |
1487 | break; | |
1488 | case INDEX_op_qemu_ld16s: | |
1489 | tcg_out_qemu_ld(s, COND_AL, args, 1 | 4); | |
1490 | break; | |
86feb1c8 | 1491 | case INDEX_op_qemu_ld32: |
811d4cf4 AZ |
1492 | tcg_out_qemu_ld(s, COND_AL, args, 2); |
1493 | break; | |
1494 | case INDEX_op_qemu_ld64: | |
1495 | tcg_out_qemu_ld(s, COND_AL, args, 3); | |
1496 | break; | |
650bbb36 | 1497 | |
811d4cf4 AZ |
1498 | case INDEX_op_qemu_st8: |
1499 | tcg_out_qemu_st(s, COND_AL, args, 0); | |
1500 | break; | |
1501 | case INDEX_op_qemu_st16: | |
1502 | tcg_out_qemu_st(s, COND_AL, args, 1); | |
1503 | break; | |
1504 | case INDEX_op_qemu_st32: | |
1505 | tcg_out_qemu_st(s, COND_AL, args, 2); | |
1506 | break; | |
1507 | case INDEX_op_qemu_st64: | |
1508 | tcg_out_qemu_st(s, COND_AL, args, 3); | |
1509 | break; | |
1510 | ||
1511 | case INDEX_op_ext8s_i32: | |
7990496d LD |
1512 | #ifdef __ARM_ARCH_7A__ |
1513 | /* sxtb */ | |
1514 | tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]); | |
1515 | #else | |
811d4cf4 AZ |
1516 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
1517 | args[0], 0, args[1], SHIFT_IMM_LSL(24)); | |
1518 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1519 | args[0], 0, args[0], SHIFT_IMM_ASR(24)); | |
7990496d | 1520 | #endif |
811d4cf4 AZ |
1521 | break; |
1522 | case INDEX_op_ext16s_i32: | |
7990496d LD |
1523 | #ifdef __ARM_ARCH_7A__ |
1524 | /* sxth */ | |
1525 | tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]); | |
1526 | #else | |
811d4cf4 AZ |
1527 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
1528 | args[0], 0, args[1], SHIFT_IMM_LSL(16)); | |
1529 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1530 | args[0], 0, args[0], SHIFT_IMM_ASR(16)); | |
7990496d | 1531 | #endif |
811d4cf4 AZ |
1532 | break; |
1533 | ||
1534 | default: | |
1535 | tcg_abort(); | |
1536 | } | |
1537 | } | |
1538 | ||
1539 | static const TCGTargetOpDef arm_op_defs[] = { | |
1540 | { INDEX_op_exit_tb, { } }, | |
1541 | { INDEX_op_goto_tb, { } }, | |
1542 | { INDEX_op_call, { "ri" } }, | |
1543 | { INDEX_op_jmp, { "ri" } }, | |
1544 | { INDEX_op_br, { } }, | |
1545 | ||
1546 | { INDEX_op_mov_i32, { "r", "r" } }, | |
1547 | { INDEX_op_movi_i32, { "r" } }, | |
1548 | ||
1549 | { INDEX_op_ld8u_i32, { "r", "r" } }, | |
1550 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
1551 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
1552 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
1553 | { INDEX_op_ld_i32, { "r", "r" } }, | |
1554 | { INDEX_op_st8_i32, { "r", "r" } }, | |
1555 | { INDEX_op_st16_i32, { "r", "r" } }, | |
1556 | { INDEX_op_st_i32, { "r", "r" } }, | |
1557 | ||
1558 | /* TODO: "r", "r", "ri" */ | |
cb4e581f LD |
1559 | { INDEX_op_add_i32, { "r", "r", "rI" } }, |
1560 | { INDEX_op_sub_i32, { "r", "r", "rI" } }, | |
811d4cf4 AZ |
1561 | { INDEX_op_mul_i32, { "r", "r", "r" } }, |
1562 | { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, | |
cb4e581f | 1563 | { INDEX_op_and_i32, { "r", "r", "rI" } }, |
932234f6 | 1564 | { INDEX_op_andc_i32, { "r", "r", "rI" } }, |
cb4e581f LD |
1565 | { INDEX_op_or_i32, { "r", "r", "rI" } }, |
1566 | { INDEX_op_xor_i32, { "r", "r", "rI" } }, | |
650bbb36 | 1567 | { INDEX_op_neg_i32, { "r", "r" } }, |
f878d2d2 | 1568 | { INDEX_op_not_i32, { "r", "r" } }, |
811d4cf4 AZ |
1569 | |
1570 | { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
1571 | { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
1572 | { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
1573 | ||
023e77f8 AJ |
1574 | { INDEX_op_brcond_i32, { "r", "rI" } }, |
1575 | { INDEX_op_setcond_i32, { "r", "r", "rI" } }, | |
811d4cf4 AZ |
1576 | |
1577 | /* TODO: "r", "r", "r", "r", "ri", "ri" */ | |
1578 | { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1579 | { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1580 | { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } }, | |
e0404769 | 1581 | { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } }, |
811d4cf4 | 1582 | |
26c5d372 AJ |
1583 | #if TARGET_LONG_BITS == 32 |
1584 | { INDEX_op_qemu_ld8u, { "r", "x" } }, | |
1585 | { INDEX_op_qemu_ld8s, { "r", "x" } }, | |
1586 | { INDEX_op_qemu_ld16u, { "r", "x" } }, | |
1587 | { INDEX_op_qemu_ld16s, { "r", "x" } }, | |
1588 | { INDEX_op_qemu_ld32u, { "r", "x" } }, | |
1589 | { INDEX_op_qemu_ld64, { "d", "r", "x" } }, | |
1590 | ||
1591 | { INDEX_op_qemu_st8, { "x", "x" } }, | |
1592 | { INDEX_op_qemu_st16, { "x", "x" } }, | |
1593 | { INDEX_op_qemu_st32, { "x", "x" } }, | |
1594 | { INDEX_op_qemu_st64, { "x", "D", "x" } }, | |
1595 | #else | |
811d4cf4 AZ |
1596 | { INDEX_op_qemu_ld8u, { "r", "x", "X" } }, |
1597 | { INDEX_op_qemu_ld8s, { "r", "x", "X" } }, | |
1598 | { INDEX_op_qemu_ld16u, { "r", "x", "X" } }, | |
1599 | { INDEX_op_qemu_ld16s, { "r", "x", "X" } }, | |
86feb1c8 | 1600 | { INDEX_op_qemu_ld32, { "r", "x", "X" } }, |
d0660ed4 | 1601 | { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } }, |
811d4cf4 | 1602 | |
3979144c PB |
1603 | { INDEX_op_qemu_st8, { "x", "x", "X" } }, |
1604 | { INDEX_op_qemu_st16, { "x", "x", "X" } }, | |
1605 | { INDEX_op_qemu_st32, { "x", "x", "X" } }, | |
1606 | { INDEX_op_qemu_st64, { "x", "D", "x", "X" } }, | |
26c5d372 | 1607 | #endif |
811d4cf4 AZ |
1608 | |
1609 | { INDEX_op_ext8s_i32, { "r", "r" } }, | |
1610 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
1611 | ||
1612 | { -1 }, | |
1613 | }; | |
1614 | ||
1615 | void tcg_target_init(TCGContext *s) | |
1616 | { | |
20cb400d | 1617 | #if !defined(CONFIG_USER_ONLY) |
811d4cf4 AZ |
1618 | /* fail safe */ |
1619 | if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) | |
1620 | tcg_abort(); | |
20cb400d | 1621 | #endif |
811d4cf4 AZ |
1622 | |
1623 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, | |
1624 | ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8)); | |
1625 | tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
1626 | ((2 << TCG_REG_R3) - 1) | | |
1627 | (1 << TCG_REG_R12) | (1 << TCG_REG_R14)); | |
1628 | ||
1629 | tcg_regset_clear(s->reserved_regs); | |
1630 | #ifdef SAVE_LR | |
1631 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14); | |
1632 | #endif | |
1633 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
1634 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8); | |
1635 | ||
1636 | tcg_add_target_add_op_defs(arm_op_defs); | |
1637 | } | |
1638 | ||
1639 | static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg, | |
1640 | int arg1, tcg_target_long arg2) | |
1641 | { | |
1642 | tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); | |
1643 | } | |
1644 | ||
1645 | static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, | |
1646 | int arg1, tcg_target_long arg2) | |
1647 | { | |
1648 | tcg_out_st32(s, COND_AL, arg, arg1, arg2); | |
1649 | } | |
1650 | ||
2d69f359 | 1651 | static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) |
811d4cf4 AZ |
1652 | { |
1653 | if (val > 0) | |
1654 | if (val < 0x100) | |
1655 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val); | |
1656 | else | |
1657 | tcg_abort(); | |
1658 | else if (val < 0) { | |
1659 | if (val > -0x100) | |
1660 | tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val); | |
1661 | else | |
1662 | tcg_abort(); | |
1663 | } | |
1664 | } | |
1665 | ||
1666 | static inline void tcg_out_mov(TCGContext *s, int ret, int arg) | |
1667 | { | |
1668 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0)); | |
1669 | } | |
1670 | ||
1671 | static inline void tcg_out_movi(TCGContext *s, TCGType type, | |
1672 | int ret, tcg_target_long arg) | |
1673 | { | |
1674 | tcg_out_movi32(s, COND_AL, ret, arg); | |
1675 | } | |
1676 | ||
1677 | void tcg_target_qemu_prologue(TCGContext *s) | |
1678 | { | |
9e97d8e9 AJ |
1679 | /* There is no need to save r7, it is used to store the address |
1680 | of the env structure and is not modified by GCC. */ | |
4e17eae9 | 1681 | |
9e97d8e9 AJ |
1682 | /* stmdb sp!, { r4 - r6, r8 - r11, lr } */ |
1683 | tcg_out32(s, (COND_AL << 28) | 0x092d4f70); | |
811d4cf4 AZ |
1684 | |
1685 | tcg_out_bx(s, COND_AL, TCG_REG_R0); | |
1686 | tb_ret_addr = s->code_ptr; | |
1687 | ||
9e97d8e9 AJ |
1688 | /* ldmia sp!, { r4 - r6, r8 - r11, pc } */ |
1689 | tcg_out32(s, (COND_AL << 28) | 0x08bd8f70); | |
811d4cf4 | 1690 | } |