]>
Commit | Line | Data |
---|---|---|
811d4cf4 AZ |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Andrzej Zaborowski | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
25 | "%r0", | |
26 | "%r1", | |
27 | "%r2", | |
28 | "%r3", | |
29 | "%r4", | |
30 | "%r5", | |
31 | "%r6", | |
32 | "%r7", | |
33 | "%r8", | |
34 | "%r9", | |
35 | "%r10", | |
36 | "%r11", | |
37 | "%r12", | |
38 | "%r13", | |
39 | "%r14", | |
40 | }; | |
41 | ||
42 | int tcg_target_reg_alloc_order[] = { | |
43 | TCG_REG_R0, | |
44 | TCG_REG_R1, | |
45 | TCG_REG_R2, | |
46 | TCG_REG_R3, | |
47 | TCG_REG_R4, | |
48 | TCG_REG_R5, | |
49 | TCG_REG_R6, | |
50 | TCG_REG_R7, | |
51 | TCG_REG_R8, | |
52 | TCG_REG_R9, | |
53 | TCG_REG_R10, | |
54 | TCG_REG_R11, | |
55 | TCG_REG_R12, | |
56 | TCG_REG_R13, | |
57 | TCG_REG_R14, | |
58 | }; | |
59 | ||
60 | const int tcg_target_call_iarg_regs[4] = { | |
61 | TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3 | |
62 | }; | |
63 | const int tcg_target_call_oarg_regs[2] = { | |
64 | TCG_REG_R0, TCG_REG_R1 | |
65 | }; | |
66 | ||
650bbb36 | 67 | static void patch_reloc(uint8_t *code_ptr, int type, |
811d4cf4 AZ |
68 | tcg_target_long value, tcg_target_long addend) |
69 | { | |
70 | switch (type) { | |
71 | case R_ARM_ABS32: | |
72 | *(uint32_t *) code_ptr = value; | |
73 | break; | |
74 | ||
75 | case R_ARM_CALL: | |
76 | case R_ARM_JUMP24: | |
77 | default: | |
78 | tcg_abort(); | |
79 | ||
80 | case R_ARM_PC24: | |
e936243a AZ |
81 | *(uint32_t *) code_ptr |= (*(uint32_t *) code_ptr & 0xff000000) | |
82 | (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff); | |
811d4cf4 AZ |
83 | break; |
84 | } | |
85 | } | |
86 | ||
87 | /* maximum number of register used for input function arguments */ | |
88 | static inline int tcg_target_get_call_iarg_regs_count(int flags) | |
89 | { | |
90 | return 4; | |
91 | } | |
92 | ||
93 | #define USE_TLB | |
94 | ||
95 | /* parse target specific constraints */ | |
96 | int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
97 | { | |
98 | const char *ct_str; | |
99 | ||
100 | ct_str = *pct_str; | |
101 | switch (ct_str[0]) { | |
102 | case 'r': | |
103 | #ifndef CONFIG_SOFTMMU | |
104 | case 'd': | |
105 | case 'D': | |
106 | case 'x': | |
107 | case 'X': | |
108 | #endif | |
109 | ct->ct |= TCG_CT_REG; | |
110 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
111 | break; | |
112 | ||
113 | #ifdef CONFIG_SOFTMMU | |
114 | /* qemu_ld/st inputs (unless 'd', 'D' or 'X') */ | |
115 | case 'x': | |
116 | ct->ct |= TCG_CT_REG; | |
117 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
118 | # ifdef USE_TLB | |
119 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
120 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
121 | # endif | |
122 | break; | |
123 | ||
124 | /* qemu_ld/st data_reg */ | |
125 | case 'd': | |
126 | ct->ct |= TCG_CT_REG; | |
127 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
128 | /* r0 and optionally r1 will be overwritten by the address | |
129 | * so don't use these. */ | |
130 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
131 | # if TARGET_LONG_BITS == 64 || defined(USE_TLB) | |
132 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
133 | # endif | |
134 | break; | |
135 | ||
136 | /* qemu_ld/st64 data_reg2 */ | |
137 | case 'D': | |
138 | ct->ct |= TCG_CT_REG; | |
139 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
140 | /* r0, r1 and optionally r2 will be overwritten by the address | |
141 | * and the low word of data, so don't use these. */ | |
142 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
143 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
144 | # if TARGET_LONG_BITS == 64 | |
145 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); | |
146 | # endif | |
147 | break; | |
148 | ||
149 | # if TARGET_LONG_BITS == 64 | |
150 | /* qemu_ld/st addr_reg2 */ | |
151 | case 'X': | |
152 | ct->ct |= TCG_CT_REG; | |
153 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
154 | /* r0 will be overwritten by the low word of base, so don't use it. */ | |
155 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
156 | # ifdef USE_TLB | |
157 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
158 | # endif | |
159 | break; | |
160 | # endif | |
161 | #endif | |
162 | ||
163 | case '1': | |
164 | ct->ct |= TCG_CT_REG; | |
165 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
166 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
167 | break; | |
168 | ||
169 | case '2': | |
170 | ct->ct |= TCG_CT_REG; | |
171 | tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1); | |
172 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); | |
173 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); | |
174 | break; | |
175 | ||
176 | default: | |
177 | return -1; | |
178 | } | |
179 | ct_str++; | |
180 | *pct_str = ct_str; | |
181 | ||
182 | return 0; | |
183 | } | |
184 | ||
185 | /* Test if a constant matches the constraint. | |
186 | * TODO: define constraints for: | |
187 | * | |
188 | * ldr/str offset: between -0xfff and 0xfff | |
189 | * ldrh/strh offset: between -0xff and 0xff | |
190 | * mov operand2: values represented with x << (2 * y), x < 0x100 | |
191 | * add, sub, eor...: ditto | |
192 | */ | |
193 | static inline int tcg_target_const_match(tcg_target_long val, | |
194 | const TCGArgConstraint *arg_ct) | |
195 | { | |
196 | int ct; | |
197 | ct = arg_ct->ct; | |
198 | if (ct & TCG_CT_CONST) | |
199 | return 1; | |
200 | else | |
201 | return 0; | |
202 | } | |
203 | ||
204 | enum arm_data_opc_e { | |
205 | ARITH_AND = 0x0, | |
206 | ARITH_EOR = 0x1, | |
207 | ARITH_SUB = 0x2, | |
208 | ARITH_RSB = 0x3, | |
209 | ARITH_ADD = 0x4, | |
210 | ARITH_ADC = 0x5, | |
211 | ARITH_SBC = 0x6, | |
212 | ARITH_RSC = 0x7, | |
213 | ARITH_CMP = 0xa, | |
214 | ARITH_CMN = 0xb, | |
215 | ARITH_ORR = 0xc, | |
216 | ARITH_MOV = 0xd, | |
217 | ARITH_BIC = 0xe, | |
218 | ARITH_MVN = 0xf, | |
219 | }; | |
220 | ||
221 | #define TO_CPSR(opc) ((opc == ARITH_CMP || opc == ARITH_CMN) << 20) | |
222 | ||
223 | #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00) | |
224 | #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20) | |
225 | #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40) | |
226 | #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60) | |
227 | #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10) | |
228 | #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30) | |
229 | #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50) | |
230 | #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70) | |
231 | ||
232 | enum arm_cond_code_e { | |
233 | COND_EQ = 0x0, | |
234 | COND_NE = 0x1, | |
235 | COND_CS = 0x2, /* Unsigned greater or equal */ | |
236 | COND_CC = 0x3, /* Unsigned less than */ | |
237 | COND_MI = 0x4, /* Negative */ | |
238 | COND_PL = 0x5, /* Zero or greater */ | |
239 | COND_VS = 0x6, /* Overflow */ | |
240 | COND_VC = 0x7, /* No overflow */ | |
241 | COND_HI = 0x8, /* Unsigned greater than */ | |
242 | COND_LS = 0x9, /* Unsigned less or equal */ | |
243 | COND_GE = 0xa, | |
244 | COND_LT = 0xb, | |
245 | COND_GT = 0xc, | |
246 | COND_LE = 0xd, | |
247 | COND_AL = 0xe, | |
248 | }; | |
249 | ||
250 | static const uint8_t tcg_cond_to_arm_cond[10] = { | |
251 | [TCG_COND_EQ] = COND_EQ, | |
252 | [TCG_COND_NE] = COND_NE, | |
253 | [TCG_COND_LT] = COND_LT, | |
254 | [TCG_COND_GE] = COND_GE, | |
255 | [TCG_COND_LE] = COND_LE, | |
256 | [TCG_COND_GT] = COND_GT, | |
257 | /* unsigned */ | |
258 | [TCG_COND_LTU] = COND_CC, | |
259 | [TCG_COND_GEU] = COND_CS, | |
260 | [TCG_COND_LEU] = COND_LS, | |
261 | [TCG_COND_GTU] = COND_HI, | |
262 | }; | |
263 | ||
264 | static inline void tcg_out_bx(TCGContext *s, int cond, int rn) | |
265 | { | |
266 | tcg_out32(s, (cond << 28) | 0x012fff10 | rn); | |
267 | } | |
268 | ||
269 | static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset) | |
270 | { | |
271 | tcg_out32(s, (cond << 28) | 0x0a000000 | | |
272 | (((offset - 8) >> 2) & 0x00ffffff)); | |
273 | } | |
274 | ||
e936243a AZ |
275 | static inline void tcg_out_b_noaddr(TCGContext *s, int cond) |
276 | { | |
277 | #ifdef WORDS_BIGENDIAN | |
278 | tcg_out8(s, (cond << 4) | 0x0a); | |
279 | s->code_ptr += 3; | |
280 | #else | |
281 | s->code_ptr += 3; | |
282 | tcg_out8(s, (cond << 4) | 0x0a); | |
283 | #endif | |
284 | } | |
285 | ||
811d4cf4 AZ |
286 | static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset) |
287 | { | |
288 | tcg_out32(s, (cond << 28) | 0x0b000000 | | |
289 | (((offset - 8) >> 2) & 0x00ffffff)); | |
290 | } | |
291 | ||
292 | static inline void tcg_out_dat_reg(TCGContext *s, | |
293 | int cond, int opc, int rd, int rn, int rm, int shift) | |
294 | { | |
295 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) | | |
296 | (rn << 16) | (rd << 12) | shift | rm); | |
297 | } | |
298 | ||
299 | static inline void tcg_out_dat_reg2(TCGContext *s, | |
300 | int cond, int opc0, int opc1, int rd0, int rd1, | |
301 | int rn0, int rn1, int rm0, int rm1, int shift) | |
302 | { | |
303 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) | | |
304 | (rn0 << 16) | (rd0 << 12) | shift | rm0); | |
305 | tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) | | |
306 | (rn1 << 16) | (rd1 << 12) | shift | rm1); | |
307 | } | |
308 | ||
309 | static inline void tcg_out_dat_imm(TCGContext *s, | |
310 | int cond, int opc, int rd, int rn, int im) | |
311 | { | |
312 | tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | | |
313 | (rn << 16) | (rd << 12) | im); | |
314 | } | |
315 | ||
316 | static inline void tcg_out_movi32(TCGContext *s, | |
317 | int cond, int rd, int32_t arg) | |
318 | { | |
319 | int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8); | |
320 | ||
321 | /* TODO: This is very suboptimal, we can easily have a constant | |
322 | * pool somewhere after all the instructions. */ | |
323 | ||
324 | if (arg < 0 && arg > -0x100) | |
325 | return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff); | |
326 | ||
327 | if (offset < 0x100 && offset > -0x100) | |
328 | return offset >= 0 ? | |
329 | tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) : | |
330 | tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset); | |
331 | ||
332 | tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff); | |
333 | if (arg & 0x0000ff00) | |
334 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
335 | ((arg >> 8) & 0xff) | 0xc00); | |
336 | if (arg & 0x00ff0000) | |
337 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
338 | ((arg >> 16) & 0xff) | 0x800); | |
339 | if (arg & 0xff000000) | |
340 | tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd, | |
341 | ((arg >> 24) & 0xff) | 0x400); | |
342 | } | |
343 | ||
344 | static inline void tcg_out_mul32(TCGContext *s, | |
345 | int cond, int rd, int rs, int rm) | |
346 | { | |
347 | if (rd != rm) | |
348 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
349 | (rs << 8) | 0x90 | rm); | |
350 | else if (rd != rs) | |
351 | tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) | | |
352 | (rm << 8) | 0x90 | rs); | |
353 | else { | |
354 | tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) | | |
355 | (rs << 8) | 0x90 | rm); | |
356 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
357 | rd, 0, 8, SHIFT_IMM_LSL(0)); | |
358 | } | |
359 | } | |
360 | ||
361 | static inline void tcg_out_umull32(TCGContext *s, | |
362 | int cond, int rd0, int rd1, int rs, int rm) | |
363 | { | |
364 | if (rd0 != rm && rd1 != rm) | |
365 | tcg_out32(s, (cond << 28) | 0x800090 | | |
366 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
367 | else if (rd0 != rs && rd1 != rs) | |
368 | tcg_out32(s, (cond << 28) | 0x800090 | | |
369 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
370 | else { | |
371 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
372 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
373 | tcg_out32(s, (cond << 28) | 0x800098 | | |
374 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
375 | } | |
376 | } | |
377 | ||
378 | static inline void tcg_out_smull32(TCGContext *s, | |
379 | int cond, int rd0, int rd1, int rs, int rm) | |
380 | { | |
381 | if (rd0 != rm && rd1 != rm) | |
382 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
383 | (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm); | |
384 | else if (rd0 != rs && rd1 != rs) | |
385 | tcg_out32(s, (cond << 28) | 0xc00090 | | |
386 | (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs); | |
387 | else { | |
388 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
389 | TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0)); | |
390 | tcg_out32(s, (cond << 28) | 0xc00098 | | |
391 | (rd1 << 16) | (rd0 << 12) | (rs << 8)); | |
392 | } | |
393 | } | |
394 | ||
395 | static inline void tcg_out_ld32_12(TCGContext *s, int cond, | |
396 | int rd, int rn, tcg_target_long im) | |
397 | { | |
398 | if (im >= 0) | |
399 | tcg_out32(s, (cond << 28) | 0x05900000 | | |
400 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
401 | else | |
402 | tcg_out32(s, (cond << 28) | 0x05100000 | | |
403 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
404 | } | |
405 | ||
406 | static inline void tcg_out_st32_12(TCGContext *s, int cond, | |
407 | int rd, int rn, tcg_target_long im) | |
408 | { | |
409 | if (im >= 0) | |
410 | tcg_out32(s, (cond << 28) | 0x05800000 | | |
411 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
412 | else | |
413 | tcg_out32(s, (cond << 28) | 0x05000000 | | |
414 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
415 | } | |
416 | ||
417 | static inline void tcg_out_ld32_r(TCGContext *s, int cond, | |
418 | int rd, int rn, int rm) | |
419 | { | |
420 | tcg_out32(s, (cond << 28) | 0x07900000 | | |
421 | (rn << 16) | (rd << 12) | rm); | |
422 | } | |
423 | ||
424 | static inline void tcg_out_st32_r(TCGContext *s, int cond, | |
425 | int rd, int rn, int rm) | |
426 | { | |
427 | tcg_out32(s, (cond << 28) | 0x07800000 | | |
428 | (rn << 16) | (rd << 12) | rm); | |
429 | } | |
430 | ||
431 | static inline void tcg_out_ld16u_8(TCGContext *s, int cond, | |
432 | int rd, int rn, tcg_target_long im) | |
433 | { | |
434 | if (im >= 0) | |
435 | tcg_out32(s, (cond << 28) | 0x01d000b0 | | |
436 | (rn << 16) | (rd << 12) | | |
437 | ((im & 0xf0) << 4) | (im & 0xf)); | |
438 | else | |
439 | tcg_out32(s, (cond << 28) | 0x015000b0 | | |
440 | (rn << 16) | (rd << 12) | | |
441 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
442 | } | |
443 | ||
444 | static inline void tcg_out_st16u_8(TCGContext *s, int cond, | |
445 | int rd, int rn, tcg_target_long im) | |
446 | { | |
447 | if (im >= 0) | |
448 | tcg_out32(s, (cond << 28) | 0x01c000b0 | | |
449 | (rn << 16) | (rd << 12) | | |
450 | ((im & 0xf0) << 4) | (im & 0xf)); | |
451 | else | |
452 | tcg_out32(s, (cond << 28) | 0x014000b0 | | |
453 | (rn << 16) | (rd << 12) | | |
454 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
455 | } | |
456 | ||
457 | static inline void tcg_out_ld16u_r(TCGContext *s, int cond, | |
458 | int rd, int rn, int rm) | |
459 | { | |
460 | tcg_out32(s, (cond << 28) | 0x019000b0 | | |
461 | (rn << 16) | (rd << 12) | rm); | |
462 | } | |
463 | ||
464 | static inline void tcg_out_st16u_r(TCGContext *s, int cond, | |
465 | int rd, int rn, int rm) | |
466 | { | |
467 | tcg_out32(s, (cond << 28) | 0x018000b0 | | |
468 | (rn << 16) | (rd << 12) | rm); | |
469 | } | |
470 | ||
471 | static inline void tcg_out_ld16s_8(TCGContext *s, int cond, | |
472 | int rd, int rn, tcg_target_long im) | |
473 | { | |
474 | if (im >= 0) | |
475 | tcg_out32(s, (cond << 28) | 0x01d000f0 | | |
476 | (rn << 16) | (rd << 12) | | |
477 | ((im & 0xf0) << 4) | (im & 0xf)); | |
478 | else | |
479 | tcg_out32(s, (cond << 28) | 0x015000f0 | | |
480 | (rn << 16) | (rd << 12) | | |
481 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
482 | } | |
483 | ||
484 | static inline void tcg_out_st16s_8(TCGContext *s, int cond, | |
485 | int rd, int rn, tcg_target_long im) | |
486 | { | |
487 | if (im >= 0) | |
488 | tcg_out32(s, (cond << 28) | 0x01c000f0 | | |
489 | (rn << 16) | (rd << 12) | | |
490 | ((im & 0xf0) << 4) | (im & 0xf)); | |
491 | else | |
492 | tcg_out32(s, (cond << 28) | 0x014000f0 | | |
493 | (rn << 16) | (rd << 12) | | |
494 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
495 | } | |
496 | ||
497 | static inline void tcg_out_ld16s_r(TCGContext *s, int cond, | |
498 | int rd, int rn, int rm) | |
499 | { | |
500 | tcg_out32(s, (cond << 28) | 0x019000f0 | | |
501 | (rn << 16) | (rd << 12) | rm); | |
502 | } | |
503 | ||
504 | static inline void tcg_out_st16s_r(TCGContext *s, int cond, | |
505 | int rd, int rn, int rm) | |
506 | { | |
507 | tcg_out32(s, (cond << 28) | 0x018000f0 | | |
508 | (rn << 16) | (rd << 12) | rm); | |
509 | } | |
510 | ||
511 | static inline void tcg_out_ld8_12(TCGContext *s, int cond, | |
512 | int rd, int rn, tcg_target_long im) | |
513 | { | |
514 | if (im >= 0) | |
515 | tcg_out32(s, (cond << 28) | 0x05d00000 | | |
516 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
517 | else | |
518 | tcg_out32(s, (cond << 28) | 0x05500000 | | |
519 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
520 | } | |
521 | ||
522 | static inline void tcg_out_st8_12(TCGContext *s, int cond, | |
523 | int rd, int rn, tcg_target_long im) | |
524 | { | |
525 | if (im >= 0) | |
526 | tcg_out32(s, (cond << 28) | 0x05c00000 | | |
527 | (rn << 16) | (rd << 12) | (im & 0xfff)); | |
528 | else | |
529 | tcg_out32(s, (cond << 28) | 0x05400000 | | |
530 | (rn << 16) | (rd << 12) | ((-im) & 0xfff)); | |
531 | } | |
532 | ||
533 | static inline void tcg_out_ld8_r(TCGContext *s, int cond, | |
534 | int rd, int rn, int rm) | |
535 | { | |
536 | tcg_out32(s, (cond << 28) | 0x07d00000 | | |
537 | (rn << 16) | (rd << 12) | rm); | |
538 | } | |
539 | ||
540 | static inline void tcg_out_st8_r(TCGContext *s, int cond, | |
541 | int rd, int rn, int rm) | |
542 | { | |
543 | tcg_out32(s, (cond << 28) | 0x07c00000 | | |
544 | (rn << 16) | (rd << 12) | rm); | |
545 | } | |
546 | ||
547 | static inline void tcg_out_ld8s_8(TCGContext *s, int cond, | |
548 | int rd, int rn, tcg_target_long im) | |
549 | { | |
550 | if (im >= 0) | |
551 | tcg_out32(s, (cond << 28) | 0x01d000d0 | | |
552 | (rn << 16) | (rd << 12) | | |
553 | ((im & 0xf0) << 4) | (im & 0xf)); | |
554 | else | |
555 | tcg_out32(s, (cond << 28) | 0x015000d0 | | |
556 | (rn << 16) | (rd << 12) | | |
557 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
558 | } | |
559 | ||
560 | static inline void tcg_out_st8s_8(TCGContext *s, int cond, | |
561 | int rd, int rn, tcg_target_long im) | |
562 | { | |
563 | if (im >= 0) | |
564 | tcg_out32(s, (cond << 28) | 0x01c000d0 | | |
565 | (rn << 16) | (rd << 12) | | |
566 | ((im & 0xf0) << 4) | (im & 0xf)); | |
567 | else | |
568 | tcg_out32(s, (cond << 28) | 0x014000d0 | | |
569 | (rn << 16) | (rd << 12) | | |
570 | (((-im) & 0xf0) << 4) | ((-im) & 0xf)); | |
571 | } | |
572 | ||
573 | static inline void tcg_out_ld8s_r(TCGContext *s, int cond, | |
574 | int rd, int rn, int rm) | |
575 | { | |
204c1674 | 576 | tcg_out32(s, (cond << 28) | 0x019000d0 | |
811d4cf4 AZ |
577 | (rn << 16) | (rd << 12) | rm); |
578 | } | |
579 | ||
580 | static inline void tcg_out_st8s_r(TCGContext *s, int cond, | |
581 | int rd, int rn, int rm) | |
582 | { | |
204c1674 | 583 | tcg_out32(s, (cond << 28) | 0x018000d0 | |
811d4cf4 AZ |
584 | (rn << 16) | (rd << 12) | rm); |
585 | } | |
586 | ||
587 | static inline void tcg_out_ld32u(TCGContext *s, int cond, | |
588 | int rd, int rn, int32_t offset) | |
589 | { | |
590 | if (offset > 0xfff || offset < -0xfff) { | |
591 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
592 | tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8); | |
593 | } else | |
594 | tcg_out_ld32_12(s, cond, rd, rn, offset); | |
595 | } | |
596 | ||
597 | static inline void tcg_out_st32(TCGContext *s, int cond, | |
598 | int rd, int rn, int32_t offset) | |
599 | { | |
600 | if (offset > 0xfff || offset < -0xfff) { | |
601 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
602 | tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8); | |
603 | } else | |
604 | tcg_out_st32_12(s, cond, rd, rn, offset); | |
605 | } | |
606 | ||
607 | static inline void tcg_out_ld16u(TCGContext *s, int cond, | |
608 | int rd, int rn, int32_t offset) | |
609 | { | |
610 | if (offset > 0xff || offset < -0xff) { | |
611 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
612 | tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8); | |
613 | } else | |
614 | tcg_out_ld16u_8(s, cond, rd, rn, offset); | |
615 | } | |
616 | ||
617 | static inline void tcg_out_ld16s(TCGContext *s, int cond, | |
618 | int rd, int rn, int32_t offset) | |
619 | { | |
620 | if (offset > 0xff || offset < -0xff) { | |
621 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
622 | tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8); | |
623 | } else | |
624 | tcg_out_ld16s_8(s, cond, rd, rn, offset); | |
625 | } | |
626 | ||
627 | static inline void tcg_out_st16u(TCGContext *s, int cond, | |
628 | int rd, int rn, int32_t offset) | |
629 | { | |
630 | if (offset > 0xff || offset < -0xff) { | |
631 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
632 | tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8); | |
633 | } else | |
634 | tcg_out_st16u_8(s, cond, rd, rn, offset); | |
635 | } | |
636 | ||
637 | static inline void tcg_out_ld8u(TCGContext *s, int cond, | |
638 | int rd, int rn, int32_t offset) | |
639 | { | |
640 | if (offset > 0xfff || offset < -0xfff) { | |
641 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
642 | tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8); | |
643 | } else | |
644 | tcg_out_ld8_12(s, cond, rd, rn, offset); | |
645 | } | |
646 | ||
647 | static inline void tcg_out_ld8s(TCGContext *s, int cond, | |
648 | int rd, int rn, int32_t offset) | |
649 | { | |
650 | if (offset > 0xff || offset < -0xff) { | |
651 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
652 | tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8); | |
653 | } else | |
654 | tcg_out_ld8s_8(s, cond, rd, rn, offset); | |
655 | } | |
656 | ||
657 | static inline void tcg_out_st8u(TCGContext *s, int cond, | |
658 | int rd, int rn, int32_t offset) | |
659 | { | |
660 | if (offset > 0xfff || offset < -0xfff) { | |
661 | tcg_out_movi32(s, cond, TCG_REG_R8, offset); | |
662 | tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8); | |
663 | } else | |
664 | tcg_out_st8_12(s, cond, rd, rn, offset); | |
665 | } | |
666 | ||
667 | static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr) | |
668 | { | |
669 | int32_t val; | |
670 | ||
671 | val = addr - (tcg_target_long) s->code_ptr; | |
672 | if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd) | |
673 | tcg_out_b(s, cond, val); | |
674 | else { | |
675 | #if 1 | |
676 | tcg_abort(); | |
677 | #else | |
678 | if (cond == COND_AL) { | |
679 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
680 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
681 | } else { | |
682 | tcg_out_movi32(s, cond, TCG_REG_R8, val - 8); | |
683 | tcg_out_dat_reg(s, cond, ARITH_ADD, | |
684 | 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
685 | } | |
686 | #endif | |
687 | } | |
688 | } | |
689 | ||
690 | static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr) | |
691 | { | |
692 | int32_t val; | |
693 | ||
694 | #ifdef SAVE_LR | |
695 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
696 | #endif | |
697 | ||
698 | val = addr - (tcg_target_long) s->code_ptr; | |
699 | if (val < 0x01fffffd && val > -0x01fffffd) | |
700 | tcg_out_bl(s, cond, val); | |
701 | else { | |
702 | #if 1 | |
703 | tcg_abort(); | |
704 | #else | |
705 | if (cond == COND_AL) { | |
706 | tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4); | |
707 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
708 | tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */ | |
709 | } else { | |
710 | tcg_out_movi32(s, cond, TCG_REG_R9, addr); | |
711 | tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15); | |
712 | tcg_out_bx(s, cond, TCG_REG_R9); | |
713 | } | |
714 | #endif | |
715 | } | |
716 | ||
717 | #ifdef SAVE_LR | |
718 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
719 | #endif | |
720 | } | |
721 | ||
722 | static inline void tcg_out_callr(TCGContext *s, int cond, int arg) | |
723 | { | |
724 | #ifdef SAVE_LR | |
725 | tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0)); | |
726 | #endif | |
727 | /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */ | |
728 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0)); | |
729 | tcg_out_bx(s, cond, arg); | |
730 | #ifdef SAVE_LR | |
731 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0)); | |
732 | #endif | |
733 | } | |
734 | ||
735 | static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) | |
736 | { | |
737 | TCGLabel *l = &s->labels[label_index]; | |
738 | ||
739 | if (l->has_value) | |
740 | tcg_out_goto(s, cond, l->u.value); | |
741 | else if (cond == COND_AL) { | |
742 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
743 | tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337); | |
744 | s->code_ptr += 4; | |
745 | } else { | |
746 | /* Probably this should be preferred even for COND_AL... */ | |
747 | tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337); | |
e936243a | 748 | tcg_out_b_noaddr(s, cond); |
811d4cf4 AZ |
749 | } |
750 | } | |
751 | ||
752 | static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args, | |
753 | void *helper_div, void *helper_rem, int shift) | |
754 | { | |
755 | int div_reg = args[0]; | |
756 | int rem_reg = args[1]; | |
757 | ||
758 | /* stmdb sp!, { r0 - r3, ip, lr } */ | |
759 | /* (Note that we need an even number of registers as per EABI) */ | |
760 | tcg_out32(s, (cond << 28) | 0x092d500f); | |
761 | ||
762 | tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
763 | tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
764 | tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
765 | tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
766 | ||
767 | tcg_out_call(s, cond, (uint32_t) helper_div); | |
768 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0)); | |
769 | ||
770 | /* ldmia sp, { r0 - r3, fp, lr } */ | |
771 | tcg_out32(s, (cond << 28) | 0x089d500f); | |
772 | ||
773 | tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0)); | |
774 | tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0)); | |
775 | tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0)); | |
776 | tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift); | |
777 | ||
778 | tcg_out_call(s, cond, (uint32_t) helper_rem); | |
779 | ||
780 | tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
781 | tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0)); | |
782 | ||
783 | /* ldr r0, [sp], #4 */ | |
784 | if (rem_reg != 0 && div_reg != 0) | |
785 | tcg_out32(s, (cond << 28) | 0x04bd0004); | |
786 | /* ldr r1, [sp], #4 */ | |
787 | if (rem_reg != 1 && div_reg != 1) | |
788 | tcg_out32(s, (cond << 28) | 0x04bd1004); | |
789 | /* ldr r2, [sp], #4 */ | |
790 | if (rem_reg != 2 && div_reg != 2) | |
791 | tcg_out32(s, (cond << 28) | 0x04bd2004); | |
792 | /* ldr r3, [sp], #4 */ | |
793 | if (rem_reg != 3 && div_reg != 3) | |
794 | tcg_out32(s, (cond << 28) | 0x04bd3004); | |
795 | /* ldr ip, [sp], #4 */ | |
796 | if (rem_reg != 12 && div_reg != 12) | |
797 | tcg_out32(s, (cond << 28) | 0x04bdc004); | |
798 | /* ldr lr, [sp], #4 */ | |
799 | if (rem_reg != 14 && div_reg != 14) | |
800 | tcg_out32(s, (cond << 28) | 0x04bde004); | |
801 | } | |
802 | ||
803 | #ifdef CONFIG_SOFTMMU | |
804 | extern void __ldb_mmu(void); | |
805 | extern void __ldw_mmu(void); | |
806 | extern void __ldl_mmu(void); | |
807 | extern void __ldq_mmu(void); | |
808 | ||
809 | extern void __stb_mmu(void); | |
810 | extern void __stw_mmu(void); | |
811 | extern void __stl_mmu(void); | |
812 | extern void __stq_mmu(void); | |
813 | ||
814 | static void *qemu_ld_helpers[4] = { | |
815 | __ldb_mmu, | |
816 | __ldw_mmu, | |
817 | __ldl_mmu, | |
818 | __ldq_mmu, | |
819 | }; | |
820 | ||
821 | static void *qemu_st_helpers[4] = { | |
822 | __stb_mmu, | |
823 | __stw_mmu, | |
824 | __stl_mmu, | |
825 | __stq_mmu, | |
826 | }; | |
827 | #endif | |
828 | ||
829 | static inline void tcg_out_qemu_ld(TCGContext *s, int cond, | |
830 | const TCGArg *args, int opc) | |
831 | { | |
832 | int addr_reg, data_reg, data_reg2; | |
833 | #ifdef CONFIG_SOFTMMU | |
834 | int mem_index, s_bits; | |
835 | # if TARGET_LONG_BITS == 64 | |
836 | int addr_reg2; | |
837 | # endif | |
838 | # ifdef USE_TLB | |
839 | uint32_t *label_ptr; | |
840 | # endif | |
841 | #endif | |
842 | ||
843 | data_reg = *args++; | |
844 | if (opc == 3) | |
845 | data_reg2 = *args++; | |
846 | else | |
847 | data_reg2 = 0; /* surpress warning */ | |
848 | addr_reg = *args++; | |
849 | #if TARGET_LONG_BITS == 64 | |
850 | addr_reg2 = *args++; | |
851 | #endif | |
852 | #ifdef CONFIG_SOFTMMU | |
853 | mem_index = *args; | |
854 | s_bits = opc & 3; | |
855 | ||
856 | # ifdef USE_TLB | |
91a3c1b0 AZ |
857 | /* Should generate something like the following: |
858 | * ror r8, addr_reg, #TARGET_PAGE_BITS | |
859 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 | |
860 | * add r0, T0, r0 lsl #CPU_TLB_ENTRY_BITS | |
861 | */ | |
862 | # if CPU_TLB_BITS > 8 | |
863 | # error | |
864 | # endif | |
811d4cf4 AZ |
865 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
866 | 8, 0, addr_reg, SHIFT_IMM_ROR(TARGET_PAGE_BITS)); | |
867 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, | |
868 | 0, 8, CPU_TLB_SIZE - 1); | |
869 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
870 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
871 | /* In the |
872 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))] | |
873 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
874 | * not exceed otherwise, so use an | |
875 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
876 | * before. | |
877 | */ | |
225b4376 AZ |
878 | # define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) |
879 | if (mem_index) | |
880 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
881 | (mem_index << (TLB_SHIFT & 1)) | | |
882 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 883 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 884 | offsetof(CPUState, tlb_table[0][0].addr_read)); |
811d4cf4 AZ |
885 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
886 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
887 | /* TODO: alignment check? | |
888 | * if (s_bits) | |
889 | * tcg_out_data_reg(s, COND_EQ, ARITH_EOR, | |
890 | * 0, 1, 8, SHIFT_IMM_LSR(32 - s_bits)); | |
891 | */ | |
892 | # if TARGET_LONG_BITS == 64 | |
893 | /* XXX: possibly we could use a block data load or writeback in | |
894 | * the first access. */ | |
895 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 896 | offsetof(CPUState, tlb_table[0][0].addr_read) + 4); |
811d4cf4 AZ |
897 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, |
898 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
899 | # endif | |
900 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 901 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
902 | |
903 | switch (opc) { | |
904 | case 0: | |
905 | tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
906 | break; | |
907 | case 0 | 4: | |
908 | tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
909 | break; | |
910 | case 1: | |
911 | tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
912 | break; | |
913 | case 1 | 4: | |
914 | tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
915 | break; | |
916 | case 2: | |
917 | default: | |
918 | tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
919 | break; | |
920 | case 3: | |
921 | /* TODO: must write back */ | |
922 | tcg_out_ld32_r(s, COND_EQ, data_reg, 1, addr_reg); | |
923 | tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4); | |
924 | break; | |
925 | } | |
926 | ||
927 | label_ptr = (void *) s->code_ptr; | |
928 | tcg_out_b(s, COND_EQ, 8); | |
929 | # endif | |
930 | ||
931 | # ifdef SAVE_LR | |
932 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
933 | # endif | |
934 | ||
935 | /* TODO: move this code to where the constants pool will be */ | |
936 | if (addr_reg) | |
937 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
938 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
939 | # if TARGET_LONG_BITS == 32 | |
940 | tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index); | |
941 | # else | |
942 | if (addr_reg2 != 1) | |
943 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
944 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
945 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
946 | # endif | |
650bbb36 | 947 | tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] - |
811d4cf4 AZ |
948 | (tcg_target_long) s->code_ptr); |
949 | ||
950 | switch (opc) { | |
951 | case 0 | 4: | |
952 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
953 | 0, 0, 0, SHIFT_IMM_LSL(24)); | |
954 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
955 | data_reg, 0, 0, SHIFT_IMM_ASR(24)); | |
956 | break; | |
957 | case 1 | 4: | |
958 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
959 | 0, 0, 0, SHIFT_IMM_LSL(16)); | |
960 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
961 | data_reg, 0, 0, SHIFT_IMM_ASR(16)); | |
962 | break; | |
963 | case 0: | |
964 | case 1: | |
965 | case 2: | |
966 | default: | |
967 | if (data_reg) | |
968 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
969 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
970 | break; | |
971 | case 3: | |
972 | if (data_reg2 != 1) | |
973 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
974 | data_reg2, 0, 1, SHIFT_IMM_LSL(0)); | |
975 | if (data_reg != 0) | |
976 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
977 | data_reg, 0, 0, SHIFT_IMM_LSL(0)); | |
978 | break; | |
979 | } | |
980 | ||
981 | # ifdef SAVE_LR | |
982 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
983 | # endif | |
984 | ||
985 | # ifdef USE_TLB | |
986 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; | |
987 | # endif | |
988 | #else | |
989 | switch (opc) { | |
990 | case 0: | |
991 | tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0); | |
992 | break; | |
993 | case 0 | 4: | |
994 | tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0); | |
995 | break; | |
996 | case 1: | |
997 | tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
998 | break; | |
999 | case 1 | 4: | |
1000 | tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1001 | break; | |
1002 | case 2: | |
1003 | default: | |
1004 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1005 | break; | |
1006 | case 3: | |
1007 | /* TODO: use block load */ | |
1008 | tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1009 | tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1010 | break; | |
1011 | } | |
1012 | #endif | |
1013 | } | |
1014 | ||
1015 | static inline void tcg_out_qemu_st(TCGContext *s, int cond, | |
1016 | const TCGArg *args, int opc) | |
1017 | { | |
1018 | int addr_reg, data_reg, data_reg2; | |
1019 | #ifdef CONFIG_SOFTMMU | |
1020 | int mem_index, s_bits; | |
1021 | # if TARGET_LONG_BITS == 64 | |
1022 | int addr_reg2; | |
1023 | # endif | |
1024 | # ifdef USE_TLB | |
1025 | uint32_t *label_ptr; | |
1026 | # endif | |
1027 | #endif | |
1028 | ||
1029 | data_reg = *args++; | |
1030 | if (opc == 3) | |
1031 | data_reg2 = *args++; | |
1032 | else | |
1033 | data_reg2 = 0; /* surpress warning */ | |
1034 | addr_reg = *args++; | |
1035 | #if TARGET_LONG_BITS == 64 | |
1036 | addr_reg2 = *args++; | |
1037 | #endif | |
1038 | #ifdef CONFIG_SOFTMMU | |
1039 | mem_index = *args; | |
1040 | s_bits = opc & 3; | |
1041 | ||
1042 | # ifdef USE_TLB | |
91a3c1b0 AZ |
1043 | /* Should generate something like the following: |
1044 | * ror r8, addr_reg, #TARGET_PAGE_BITS | |
1045 | * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 | |
1046 | * add r0, T0, r0 lsl #CPU_TLB_ENTRY_BITS | |
1047 | */ | |
811d4cf4 AZ |
1048 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, |
1049 | 8, 0, addr_reg, SHIFT_IMM_ROR(TARGET_PAGE_BITS)); | |
1050 | tcg_out_dat_imm(s, COND_AL, ARITH_AND, | |
1051 | 0, 8, CPU_TLB_SIZE - 1); | |
1052 | tcg_out_dat_reg(s, COND_AL, ARITH_ADD, | |
1053 | 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); | |
91a3c1b0 AZ |
1054 | /* In the |
1055 | * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] | |
1056 | * below, the offset is likely to exceed 12 bits if mem_index != 0 and | |
1057 | * not exceed otherwise, so use an | |
1058 | * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) | |
1059 | * before. | |
1060 | */ | |
225b4376 AZ |
1061 | if (mem_index) |
1062 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, | |
1063 | (mem_index << (TLB_SHIFT & 1)) | | |
1064 | ((16 - (TLB_SHIFT >> 1)) << 8)); | |
811d4cf4 | 1065 | tcg_out_ld32_12(s, COND_AL, 1, 0, |
225b4376 | 1066 | offsetof(CPUState, tlb_table[0][0].addr_write)); |
811d4cf4 AZ |
1067 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, |
1068 | 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); | |
1069 | /* TODO: alignment check? | |
1070 | * if (s_bits) | |
1071 | * tcg_out_data_reg(s, COND_EQ, ARITH_EOR, | |
1072 | * 0, 1, 8, SHIFT_IMM_LSR(32 - s_bits)); | |
1073 | */ | |
1074 | # if TARGET_LONG_BITS == 64 | |
1075 | /* XXX: possibly we could use a block data load or writeback in | |
1076 | * the first access. */ | |
1077 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1078 | offsetof(CPUState, tlb_table[0][0].addr_write) |
811d4cf4 AZ |
1079 | + 4); |
1080 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, | |
1081 | 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); | |
1082 | # endif | |
1083 | tcg_out_ld32_12(s, COND_EQ, 1, 0, | |
225b4376 | 1084 | offsetof(CPUState, tlb_table[0][0].addend)); |
811d4cf4 AZ |
1085 | |
1086 | switch (opc) { | |
1087 | case 0: | |
1088 | tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1089 | break; | |
1090 | case 0 | 4: | |
1091 | tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1092 | break; | |
1093 | case 1: | |
1094 | tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1095 | break; | |
1096 | case 1 | 4: | |
1097 | tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1098 | break; | |
1099 | case 2: | |
1100 | default: | |
1101 | tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); | |
1102 | break; | |
1103 | case 3: | |
1104 | /* TODO: must write back */ | |
1105 | tcg_out_st32_r(s, COND_EQ, data_reg, 1, addr_reg); | |
1106 | tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); | |
1107 | break; | |
1108 | } | |
1109 | ||
1110 | label_ptr = (void *) s->code_ptr; | |
1111 | tcg_out_b(s, COND_EQ, 8); | |
1112 | # endif | |
1113 | ||
811d4cf4 AZ |
1114 | /* TODO: move this code to where the constants pool will be */ |
1115 | if (addr_reg) | |
1116 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1117 | 0, 0, addr_reg, SHIFT_IMM_LSL(0)); | |
1118 | # if TARGET_LONG_BITS == 32 | |
1119 | switch (opc) { | |
1120 | case 0: | |
1121 | tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); | |
1122 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1123 | break; | |
1124 | case 1: | |
1125 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1126 | 1, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1127 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1128 | 1, 0, 1, SHIFT_IMM_LSR(16)); | |
1129 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1130 | break; | |
1131 | case 2: | |
1132 | if (data_reg != 1) | |
1133 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1134 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1135 | tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); | |
1136 | break; | |
1137 | case 3: | |
1138 | if (data_reg != 1) | |
1139 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1140 | 1, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1141 | if (data_reg2 != 2) | |
1142 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1143 | 2, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1144 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1145 | break; | |
1146 | } | |
1147 | # else | |
1148 | if (addr_reg2 != 1) | |
1149 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1150 | 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); | |
1151 | switch (opc) { | |
1152 | case 0: | |
1153 | tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); | |
1154 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1155 | break; | |
1156 | case 1: | |
1157 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1158 | 2, 0, data_reg, SHIFT_IMM_LSL(16)); | |
1159 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1160 | 2, 0, 2, SHIFT_IMM_LSR(16)); | |
1161 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1162 | break; | |
1163 | case 2: | |
1164 | if (data_reg != 2) | |
1165 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1166 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1167 | tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); | |
1168 | break; | |
1169 | case 3: | |
91a3c1b0 AZ |
1170 | tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index); |
1171 | tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ | |
811d4cf4 AZ |
1172 | if (data_reg != 2) |
1173 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1174 | 2, 0, data_reg, SHIFT_IMM_LSL(0)); | |
1175 | if (data_reg2 != 3) | |
1176 | tcg_out_dat_reg(s, cond, ARITH_MOV, | |
1177 | 3, 0, data_reg2, SHIFT_IMM_LSL(0)); | |
1178 | break; | |
1179 | } | |
1180 | # endif | |
1181 | ||
91a3c1b0 AZ |
1182 | # ifdef SAVE_LR |
1183 | tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); | |
1184 | # endif | |
1185 | ||
204c1674 | 1186 | tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - |
811d4cf4 AZ |
1187 | (tcg_target_long) s->code_ptr); |
1188 | ||
1189 | # if TARGET_LONG_BITS == 64 | |
1190 | if (opc == 3) | |
1191 | tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); | |
1192 | # endif | |
1193 | ||
1194 | # ifdef SAVE_LR | |
1195 | tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); | |
1196 | # endif | |
1197 | ||
1198 | # ifdef USE_TLB | |
1199 | *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; | |
1200 | # endif | |
1201 | #else | |
1202 | switch (opc) { | |
1203 | case 0: | |
1204 | tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); | |
1205 | break; | |
1206 | case 0 | 4: | |
204c1674 | 1207 | tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0); |
811d4cf4 AZ |
1208 | break; |
1209 | case 1: | |
1210 | tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0); | |
1211 | break; | |
1212 | case 1 | 4: | |
1213 | tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0); | |
1214 | break; | |
1215 | case 2: | |
1216 | default: | |
1217 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1218 | break; | |
1219 | case 3: | |
1220 | /* TODO: use block store */ | |
1221 | tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); | |
1222 | tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); | |
1223 | break; | |
1224 | } | |
1225 | #endif | |
1226 | } | |
1227 | ||
811d4cf4 AZ |
1228 | static uint8_t *tb_ret_addr; |
1229 | ||
650bbb36 | 1230 | static inline void tcg_out_op(TCGContext *s, int opc, |
811d4cf4 AZ |
1231 | const TCGArg *args, const int *const_args) |
1232 | { | |
1233 | int c; | |
1234 | ||
1235 | switch (opc) { | |
1236 | case INDEX_op_exit_tb: | |
1237 | #ifdef SAVE_LR | |
1238 | if (args[0] >> 8) | |
1239 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1240 | else | |
1241 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]); | |
1242 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0)); | |
1243 | if (args[0] >> 8) | |
1244 | tcg_out32(s, args[0]); | |
1245 | #else | |
1246 | if (args[0] >> 8) | |
1247 | tcg_out_ld32_12(s, COND_AL, 0, 15, 0); | |
1248 | else | |
1249 | tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]); | |
1250 | tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr); | |
1251 | if (args[0] >> 8) | |
1252 | tcg_out32(s, args[0]); | |
1253 | #endif | |
1254 | break; | |
1255 | case INDEX_op_goto_tb: | |
1256 | if (s->tb_jmp_offset) { | |
1257 | /* Direct jump method */ | |
1258 | #if 1 | |
1259 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
1260 | tcg_out_b(s, COND_AL, 8); | |
1261 | #else | |
1262 | tcg_out_ld32_12(s, COND_AL, 15, 15, -4); | |
1263 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
1264 | tcg_out32(s, 0); | |
1265 | #endif | |
1266 | } else { | |
1267 | /* Indirect jump method */ | |
1268 | #if 1 | |
1269 | c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8); | |
1270 | if (c > 0xfff || c < -0xfff) { | |
1271 | tcg_out_movi32(s, COND_AL, TCG_REG_R0, | |
1272 | (tcg_target_long) (s->tb_next + args[0])); | |
1273 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1274 | } else | |
1275 | tcg_out_ld32_12(s, COND_AL, 15, 15, c); | |
1276 | #else | |
1277 | tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0); | |
1278 | tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0); | |
1279 | tcg_out32(s, (tcg_target_long) (s->tb_next + args[0])); | |
1280 | #endif | |
1281 | } | |
1282 | s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
1283 | break; | |
1284 | case INDEX_op_call: | |
1285 | if (const_args[0]) | |
1286 | tcg_out_call(s, COND_AL, args[0]); | |
1287 | else | |
1288 | tcg_out_callr(s, COND_AL, args[0]); | |
1289 | break; | |
1290 | case INDEX_op_jmp: | |
1291 | if (const_args[0]) | |
1292 | tcg_out_goto(s, COND_AL, args[0]); | |
1293 | else | |
1294 | tcg_out_bx(s, COND_AL, args[0]); | |
1295 | break; | |
1296 | case INDEX_op_br: | |
1297 | tcg_out_goto_label(s, COND_AL, args[0]); | |
1298 | break; | |
1299 | ||
1300 | case INDEX_op_ld8u_i32: | |
1301 | tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]); | |
1302 | break; | |
1303 | case INDEX_op_ld8s_i32: | |
1304 | tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]); | |
1305 | break; | |
1306 | case INDEX_op_ld16u_i32: | |
1307 | tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]); | |
1308 | break; | |
1309 | case INDEX_op_ld16s_i32: | |
1310 | tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]); | |
1311 | break; | |
1312 | case INDEX_op_ld_i32: | |
1313 | tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]); | |
1314 | break; | |
1315 | case INDEX_op_st8_i32: | |
1316 | tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]); | |
1317 | break; | |
1318 | case INDEX_op_st16_i32: | |
1319 | tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]); | |
1320 | break; | |
1321 | case INDEX_op_st_i32: | |
1322 | tcg_out_st32(s, COND_AL, args[0], args[1], args[2]); | |
1323 | break; | |
1324 | ||
1325 | case INDEX_op_mov_i32: | |
1326 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1327 | args[0], 0, args[1], SHIFT_IMM_LSL(0)); | |
1328 | break; | |
1329 | case INDEX_op_movi_i32: | |
1330 | tcg_out_movi32(s, COND_AL, args[0], args[1]); | |
1331 | break; | |
1332 | case INDEX_op_add_i32: | |
1333 | c = ARITH_ADD; | |
1334 | goto gen_arith; | |
1335 | case INDEX_op_sub_i32: | |
1336 | c = ARITH_SUB; | |
1337 | goto gen_arith; | |
1338 | case INDEX_op_and_i32: | |
1339 | c = ARITH_AND; | |
1340 | goto gen_arith; | |
1341 | case INDEX_op_or_i32: | |
1342 | c = ARITH_ORR; | |
1343 | goto gen_arith; | |
1344 | case INDEX_op_xor_i32: | |
1345 | c = ARITH_EOR; | |
1346 | /* Fall through. */ | |
1347 | gen_arith: | |
1348 | tcg_out_dat_reg(s, COND_AL, c, | |
1349 | args[0], args[1], args[2], SHIFT_IMM_LSL(0)); | |
1350 | break; | |
1351 | case INDEX_op_add2_i32: | |
1352 | tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC, | |
1353 | args[0], args[1], args[2], args[3], | |
1354 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1355 | break; | |
1356 | case INDEX_op_sub2_i32: | |
1357 | tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC, | |
1358 | args[0], args[1], args[2], args[3], | |
1359 | args[4], args[5], SHIFT_IMM_LSL(0)); | |
1360 | break; | |
650bbb36 AZ |
1361 | case INDEX_op_neg_i32: |
1362 | tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0); | |
1363 | break; | |
811d4cf4 AZ |
1364 | case INDEX_op_mul_i32: |
1365 | tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]); | |
1366 | break; | |
1367 | case INDEX_op_mulu2_i32: | |
1368 | tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]); | |
1369 | break; | |
1370 | case INDEX_op_div2_i32: | |
1371 | tcg_out_div_helper(s, COND_AL, args, | |
1372 | tcg_helper_div_i64, tcg_helper_rem_i64, | |
1373 | SHIFT_IMM_ASR(31)); | |
1374 | break; | |
1375 | case INDEX_op_divu2_i32: | |
1376 | tcg_out_div_helper(s, COND_AL, args, | |
1377 | tcg_helper_divu_i64, tcg_helper_remu_i64, | |
1378 | SHIFT_IMM_LSR(31)); | |
1379 | break; | |
1380 | /* XXX: Perhaps args[2] & 0x1f is wrong */ | |
1381 | case INDEX_op_shl_i32: | |
1382 | c = const_args[2] ? | |
1383 | SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]); | |
1384 | goto gen_shift32; | |
1385 | case INDEX_op_shr_i32: | |
1386 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) : | |
1387 | SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]); | |
1388 | goto gen_shift32; | |
1389 | case INDEX_op_sar_i32: | |
1390 | c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) : | |
1391 | SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]); | |
1392 | /* Fall through. */ | |
1393 | gen_shift32: | |
1394 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c); | |
1395 | break; | |
1396 | ||
1397 | case INDEX_op_brcond_i32: | |
1398 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1399 | args[0], args[1], SHIFT_IMM_LSL(0)); | |
1400 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]); | |
1401 | break; | |
1402 | case INDEX_op_brcond2_i32: | |
1403 | /* The resulting conditions are: | |
1404 | * TCG_COND_EQ --> a0 == a2 && a1 == a3, | |
1405 | * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3, | |
1406 | * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3, | |
1407 | * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3), | |
1408 | * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3), | |
1409 | * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3, | |
1410 | */ | |
1411 | tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, | |
1412 | args[1], args[3], SHIFT_IMM_LSL(0)); | |
1413 | tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, | |
1414 | args[0], args[2], SHIFT_IMM_LSL(0)); | |
1415 | tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]); | |
1416 | break; | |
1417 | ||
1418 | case INDEX_op_qemu_ld8u: | |
1419 | tcg_out_qemu_ld(s, COND_AL, args, 0); | |
1420 | break; | |
1421 | case INDEX_op_qemu_ld8s: | |
1422 | tcg_out_qemu_ld(s, COND_AL, args, 0 | 4); | |
1423 | break; | |
1424 | case INDEX_op_qemu_ld16u: | |
1425 | tcg_out_qemu_ld(s, COND_AL, args, 1); | |
1426 | break; | |
1427 | case INDEX_op_qemu_ld16s: | |
1428 | tcg_out_qemu_ld(s, COND_AL, args, 1 | 4); | |
1429 | break; | |
1430 | case INDEX_op_qemu_ld32u: | |
1431 | tcg_out_qemu_ld(s, COND_AL, args, 2); | |
1432 | break; | |
1433 | case INDEX_op_qemu_ld64: | |
1434 | tcg_out_qemu_ld(s, COND_AL, args, 3); | |
1435 | break; | |
650bbb36 | 1436 | |
811d4cf4 AZ |
1437 | case INDEX_op_qemu_st8: |
1438 | tcg_out_qemu_st(s, COND_AL, args, 0); | |
1439 | break; | |
1440 | case INDEX_op_qemu_st16: | |
1441 | tcg_out_qemu_st(s, COND_AL, args, 1); | |
1442 | break; | |
1443 | case INDEX_op_qemu_st32: | |
1444 | tcg_out_qemu_st(s, COND_AL, args, 2); | |
1445 | break; | |
1446 | case INDEX_op_qemu_st64: | |
1447 | tcg_out_qemu_st(s, COND_AL, args, 3); | |
1448 | break; | |
1449 | ||
1450 | case INDEX_op_ext8s_i32: | |
1451 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1452 | args[0], 0, args[1], SHIFT_IMM_LSL(24)); | |
1453 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1454 | args[0], 0, args[0], SHIFT_IMM_ASR(24)); | |
1455 | break; | |
1456 | case INDEX_op_ext16s_i32: | |
1457 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1458 | args[0], 0, args[1], SHIFT_IMM_LSL(16)); | |
1459 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, | |
1460 | args[0], 0, args[0], SHIFT_IMM_ASR(16)); | |
1461 | break; | |
1462 | ||
1463 | default: | |
1464 | tcg_abort(); | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | static const TCGTargetOpDef arm_op_defs[] = { | |
1469 | { INDEX_op_exit_tb, { } }, | |
1470 | { INDEX_op_goto_tb, { } }, | |
1471 | { INDEX_op_call, { "ri" } }, | |
1472 | { INDEX_op_jmp, { "ri" } }, | |
1473 | { INDEX_op_br, { } }, | |
1474 | ||
1475 | { INDEX_op_mov_i32, { "r", "r" } }, | |
1476 | { INDEX_op_movi_i32, { "r" } }, | |
1477 | ||
1478 | { INDEX_op_ld8u_i32, { "r", "r" } }, | |
1479 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
1480 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
1481 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
1482 | { INDEX_op_ld_i32, { "r", "r" } }, | |
1483 | { INDEX_op_st8_i32, { "r", "r" } }, | |
1484 | { INDEX_op_st16_i32, { "r", "r" } }, | |
1485 | { INDEX_op_st_i32, { "r", "r" } }, | |
1486 | ||
1487 | /* TODO: "r", "r", "ri" */ | |
1488 | { INDEX_op_add_i32, { "r", "r", "r" } }, | |
1489 | { INDEX_op_sub_i32, { "r", "r", "r" } }, | |
1490 | { INDEX_op_mul_i32, { "r", "r", "r" } }, | |
1491 | { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, | |
1492 | { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } }, | |
1493 | { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } }, | |
1494 | { INDEX_op_and_i32, { "r", "r", "r" } }, | |
1495 | { INDEX_op_or_i32, { "r", "r", "r" } }, | |
1496 | { INDEX_op_xor_i32, { "r", "r", "r" } }, | |
650bbb36 | 1497 | { INDEX_op_neg_i32, { "r", "r" } }, |
811d4cf4 AZ |
1498 | |
1499 | { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
1500 | { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
1501 | { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
1502 | ||
1503 | { INDEX_op_brcond_i32, { "r", "r" } }, | |
1504 | ||
1505 | /* TODO: "r", "r", "r", "r", "ri", "ri" */ | |
1506 | { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1507 | { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } }, | |
1508 | { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } }, | |
1509 | ||
1510 | { INDEX_op_qemu_ld8u, { "r", "x", "X" } }, | |
1511 | { INDEX_op_qemu_ld8s, { "r", "x", "X" } }, | |
1512 | { INDEX_op_qemu_ld16u, { "r", "x", "X" } }, | |
1513 | { INDEX_op_qemu_ld16s, { "r", "x", "X" } }, | |
1514 | { INDEX_op_qemu_ld32u, { "r", "x", "X" } }, | |
1515 | { INDEX_op_qemu_ld64, { "r", "d", "x", "X" } }, | |
1516 | ||
1517 | { INDEX_op_qemu_st8, { "d", "x", "X" } }, | |
1518 | { INDEX_op_qemu_st16, { "d", "x", "X" } }, | |
1519 | { INDEX_op_qemu_st32, { "d", "x", "X" } }, | |
1520 | { INDEX_op_qemu_st64, { "d", "D", "x", "X" } }, | |
1521 | ||
1522 | { INDEX_op_ext8s_i32, { "r", "r" } }, | |
1523 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
1524 | ||
1525 | { -1 }, | |
1526 | }; | |
1527 | ||
1528 | void tcg_target_init(TCGContext *s) | |
1529 | { | |
1530 | /* fail safe */ | |
1531 | if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) | |
1532 | tcg_abort(); | |
1533 | ||
1534 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, | |
1535 | ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8)); | |
1536 | tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
1537 | ((2 << TCG_REG_R3) - 1) | | |
1538 | (1 << TCG_REG_R12) | (1 << TCG_REG_R14)); | |
1539 | ||
1540 | tcg_regset_clear(s->reserved_regs); | |
1541 | #ifdef SAVE_LR | |
1542 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14); | |
1543 | #endif | |
1544 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
1545 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8); | |
1546 | ||
1547 | tcg_add_target_add_op_defs(arm_op_defs); | |
1548 | } | |
1549 | ||
1550 | static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg, | |
1551 | int arg1, tcg_target_long arg2) | |
1552 | { | |
1553 | tcg_out_ld32u(s, COND_AL, arg, arg1, arg2); | |
1554 | } | |
1555 | ||
1556 | static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, | |
1557 | int arg1, tcg_target_long arg2) | |
1558 | { | |
1559 | tcg_out_st32(s, COND_AL, arg, arg1, arg2); | |
1560 | } | |
1561 | ||
1562 | void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) | |
1563 | { | |
1564 | if (val > 0) | |
1565 | if (val < 0x100) | |
1566 | tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val); | |
1567 | else | |
1568 | tcg_abort(); | |
1569 | else if (val < 0) { | |
1570 | if (val > -0x100) | |
1571 | tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val); | |
1572 | else | |
1573 | tcg_abort(); | |
1574 | } | |
1575 | } | |
1576 | ||
1577 | static inline void tcg_out_mov(TCGContext *s, int ret, int arg) | |
1578 | { | |
1579 | tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0)); | |
1580 | } | |
1581 | ||
1582 | static inline void tcg_out_movi(TCGContext *s, TCGType type, | |
1583 | int ret, tcg_target_long arg) | |
1584 | { | |
1585 | tcg_out_movi32(s, COND_AL, ret, arg); | |
1586 | } | |
1587 | ||
1588 | void tcg_target_qemu_prologue(TCGContext *s) | |
1589 | { | |
1590 | /* stmdb sp!, { r9 - r11, lr } */ | |
1591 | tcg_out32(s, (COND_AL << 28) | 0x092d4e00); | |
1592 | ||
1593 | tcg_out_bx(s, COND_AL, TCG_REG_R0); | |
1594 | tb_ret_addr = s->code_ptr; | |
1595 | ||
1596 | /* ldmia sp!, { r9 - r11, pc } */ | |
1597 | tcg_out32(s, (COND_AL << 28) | 0x08bd8e00); | |
1598 | } |