]> Git Repo - qemu.git/blame - tcg/x86_64/tcg-target.c
Use a TCG global for regwptr
[qemu.git] / tcg / x86_64 / tcg-target.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24const char *tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
25 "%rax",
26 "%rcx",
27 "%rdx",
28 "%rbx",
29 "%rsp",
30 "%rbp",
31 "%rsi",
32 "%rdi",
33 "%r8",
34 "%r9",
35 "%r10",
36 "%r11",
37 "%r12",
38 "%r13",
39 "%r14",
40 "%r15",
41};
42
43int tcg_target_reg_alloc_order[TCG_TARGET_NB_REGS] = {
44 TCG_REG_RDI,
45 TCG_REG_RSI,
46 TCG_REG_RDX,
47 TCG_REG_RCX,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_RAX,
51 TCG_REG_R10,
52 TCG_REG_R11,
53
54 TCG_REG_RBP,
55 TCG_REG_RBX,
56 TCG_REG_R12,
57 TCG_REG_R13,
58 TCG_REG_R14,
59 TCG_REG_R15,
60};
61
62const int tcg_target_call_iarg_regs[6] = {
63 TCG_REG_RDI,
64 TCG_REG_RSI,
65 TCG_REG_RDX,
66 TCG_REG_RCX,
67 TCG_REG_R8,
68 TCG_REG_R9,
69};
70
71const int tcg_target_call_oarg_regs[2] = {
72 TCG_REG_RAX,
73 TCG_REG_RDX
74};
75
76static void patch_reloc(uint8_t *code_ptr, int type,
77 tcg_target_long value)
78{
79 switch(type) {
80 case R_X86_64_32:
81 if (value != (uint32_t)value)
82 tcg_abort();
83 *(uint32_t *)code_ptr = value;
84 break;
85 case R_X86_64_32S:
86 if (value != (int32_t)value)
87 tcg_abort();
88 *(uint32_t *)code_ptr = value;
89 break;
90 case R_386_PC32:
91 value -= (long)code_ptr;
92 if (value != (int32_t)value)
93 tcg_abort();
94 *(uint32_t *)code_ptr = value;
95 break;
96 default:
97 tcg_abort();
98 }
99}
100
101/* maximum number of register used for input function arguments */
102static inline int tcg_target_get_call_iarg_regs_count(int flags)
103{
104 return 6;
105}
106
107/* parse target specific constraints */
108int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
109{
110 const char *ct_str;
111
112 ct_str = *pct_str;
113 switch(ct_str[0]) {
114 case 'a':
115 ct->ct |= TCG_CT_REG;
116 tcg_regset_set_reg(ct->u.regs, TCG_REG_RAX);
117 break;
118 case 'b':
119 ct->ct |= TCG_CT_REG;
120 tcg_regset_set_reg(ct->u.regs, TCG_REG_RBX);
121 break;
122 case 'c':
123 ct->ct |= TCG_CT_REG;
124 tcg_regset_set_reg(ct->u.regs, TCG_REG_RCX);
125 break;
126 case 'd':
127 ct->ct |= TCG_CT_REG;
128 tcg_regset_set_reg(ct->u.regs, TCG_REG_RDX);
129 break;
130 case 'S':
131 ct->ct |= TCG_CT_REG;
132 tcg_regset_set_reg(ct->u.regs, TCG_REG_RSI);
133 break;
134 case 'D':
135 ct->ct |= TCG_CT_REG;
136 tcg_regset_set_reg(ct->u.regs, TCG_REG_RDI);
137 break;
138 case 'q':
139 ct->ct |= TCG_CT_REG;
140 tcg_regset_set32(ct->u.regs, 0, 0xf);
141 break;
142 case 'r':
143 ct->ct |= TCG_CT_REG;
144 tcg_regset_set32(ct->u.regs, 0, 0xffff);
145 break;
146 case 'L': /* qemu_ld/st constraint */
147 ct->ct |= TCG_CT_REG;
148 tcg_regset_set32(ct->u.regs, 0, 0xffff);
149 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI);
150 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI);
151 break;
152 case 'e':
153 ct->ct |= TCG_CT_CONST_S32;
154 break;
155 case 'Z':
156 ct->ct |= TCG_CT_CONST_U32;
157 break;
158 default:
159 return -1;
160 }
161 ct_str++;
162 *pct_str = ct_str;
163 return 0;
164}
165
166/* test if a constant matches the constraint */
167static inline int tcg_target_const_match(tcg_target_long val,
168 const TCGArgConstraint *arg_ct)
169{
170 int ct;
171 ct = arg_ct->ct;
172 if (ct & TCG_CT_CONST)
173 return 1;
174 else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val)
175 return 1;
176 else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val)
177 return 1;
178 else
179 return 0;
180}
181
182#define ARITH_ADD 0
183#define ARITH_OR 1
184#define ARITH_ADC 2
185#define ARITH_SBB 3
186#define ARITH_AND 4
187#define ARITH_SUB 5
188#define ARITH_XOR 6
189#define ARITH_CMP 7
190
191#define SHIFT_SHL 4
192#define SHIFT_SHR 5
193#define SHIFT_SAR 7
194
195#define JCC_JMP (-1)
196#define JCC_JO 0x0
197#define JCC_JNO 0x1
198#define JCC_JB 0x2
199#define JCC_JAE 0x3
200#define JCC_JE 0x4
201#define JCC_JNE 0x5
202#define JCC_JBE 0x6
203#define JCC_JA 0x7
204#define JCC_JS 0x8
205#define JCC_JNS 0x9
206#define JCC_JP 0xa
207#define JCC_JNP 0xb
208#define JCC_JL 0xc
209#define JCC_JGE 0xd
210#define JCC_JLE 0xe
211#define JCC_JG 0xf
212
213#define P_EXT 0x100 /* 0x0f opcode prefix */
214#define P_REXW 0x200 /* set rex.w = 1 */
215#define P_REX 0x400 /* force rex usage */
216
217static const uint8_t tcg_cond_to_jcc[10] = {
218 [TCG_COND_EQ] = JCC_JE,
219 [TCG_COND_NE] = JCC_JNE,
220 [TCG_COND_LT] = JCC_JL,
221 [TCG_COND_GE] = JCC_JGE,
222 [TCG_COND_LE] = JCC_JLE,
223 [TCG_COND_GT] = JCC_JG,
224 [TCG_COND_LTU] = JCC_JB,
225 [TCG_COND_GEU] = JCC_JAE,
226 [TCG_COND_LEU] = JCC_JBE,
227 [TCG_COND_GTU] = JCC_JA,
228};
229
230static inline void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
231{
232 int rex;
233 rex = ((opc >> 6) & 0x8) | ((r >> 1) & 0x4) |
234 ((x >> 2) & 2) | ((rm >> 3) & 1);
235 if (rex || (opc & P_REX)) {
236 tcg_out8(s, rex | 0x40);
237 }
238 if (opc & P_EXT)
239 tcg_out8(s, 0x0f);
240 tcg_out8(s, opc);
241}
242
243static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
244{
245 tcg_out_opc(s, opc, r, rm, 0);
246 tcg_out8(s, 0xc0 | ((r & 7) << 3) | (rm & 7));
247}
248
249/* rm < 0 means no register index plus (-rm - 1 immediate bytes) */
250static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
251 tcg_target_long offset)
252{
253 if (rm < 0) {
254 tcg_target_long val;
255 tcg_out_opc(s, opc, r, 0, 0);
256 val = offset - ((tcg_target_long)s->code_ptr + 5 + (-rm - 1));
257 if (val == (int32_t)val) {
258 /* eip relative */
259 tcg_out8(s, 0x05 | ((r & 7) << 3));
260 tcg_out32(s, val);
261 } else if (offset == (int32_t)offset) {
262 tcg_out8(s, 0x04 | ((r & 7) << 3));
263 tcg_out8(s, 0x25); /* sib */
264 tcg_out32(s, offset);
265 } else {
266 tcg_abort();
267 }
268 } else if (offset == 0 && (rm & 7) != TCG_REG_RBP) {
269 tcg_out_opc(s, opc, r, rm, 0);
270 if ((rm & 7) == TCG_REG_RSP) {
271 tcg_out8(s, 0x04 | ((r & 7) << 3));
272 tcg_out8(s, 0x24);
273 } else {
274 tcg_out8(s, 0x00 | ((r & 7) << 3) | (rm & 7));
275 }
276 } else if ((int8_t)offset == offset) {
277 tcg_out_opc(s, opc, r, rm, 0);
278 if ((rm & 7) == TCG_REG_RSP) {
279 tcg_out8(s, 0x44 | ((r & 7) << 3));
280 tcg_out8(s, 0x24);
281 } else {
282 tcg_out8(s, 0x40 | ((r & 7) << 3) | (rm & 7));
283 }
284 tcg_out8(s, offset);
285 } else {
286 tcg_out_opc(s, opc, r, rm, 0);
287 if ((rm & 7) == TCG_REG_RSP) {
288 tcg_out8(s, 0x84 | ((r & 7) << 3));
289 tcg_out8(s, 0x24);
290 } else {
291 tcg_out8(s, 0x80 | ((r & 7) << 3) | (rm & 7));
292 }
293 tcg_out32(s, offset);
294 }
295}
296
bffd92fe 297#if defined(CONFIG_SOFTMMU)
c896fe29
FB
298/* XXX: incomplete. index must be different from ESP */
299static void tcg_out_modrm_offset2(TCGContext *s, int opc, int r, int rm,
300 int index, int shift,
301 tcg_target_long offset)
302{
303 int mod;
304 if (rm == -1)
305 tcg_abort();
306 if (offset == 0 && (rm & 7) != TCG_REG_RBP) {
307 mod = 0;
308 } else if (offset == (int8_t)offset) {
309 mod = 0x40;
310 } else if (offset == (int32_t)offset) {
311 mod = 0x80;
312 } else {
313 tcg_abort();
314 }
315 if (index == -1) {
316 tcg_out_opc(s, opc, r, rm, 0);
317 if ((rm & 7) == TCG_REG_RSP) {
318 tcg_out8(s, mod | ((r & 7) << 3) | 0x04);
319 tcg_out8(s, 0x04 | (rm & 7));
320 } else {
321 tcg_out8(s, mod | ((r & 7) << 3) | (rm & 7));
322 }
323 } else {
324 tcg_out_opc(s, opc, r, rm, index);
325 tcg_out8(s, mod | ((r & 7) << 3) | 0x04);
326 tcg_out8(s, (shift << 6) | ((index & 7) << 3) | (rm & 7));
327 }
328 if (mod == 0x40) {
329 tcg_out8(s, offset);
330 } else if (mod == 0x80) {
331 tcg_out32(s, offset);
332 }
333}
bffd92fe 334#endif
c896fe29
FB
335
336static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
337{
338 tcg_out_modrm(s, 0x8b | P_REXW, ret, arg);
339}
340
341static inline void tcg_out_movi(TCGContext *s, TCGType type,
342 int ret, tcg_target_long arg)
343{
344 if (arg == 0) {
345 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); /* xor r0,r0 */
346 } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
347 tcg_out_opc(s, 0xb8 + (ret & 7), 0, ret, 0);
348 tcg_out32(s, arg);
349 } else if (arg == (int32_t)arg) {
350 tcg_out_modrm(s, 0xc7 | P_REXW, 0, ret);
351 tcg_out32(s, arg);
352 } else {
353 tcg_out_opc(s, (0xb8 + (ret & 7)) | P_REXW, 0, ret, 0);
354 tcg_out32(s, arg);
355 tcg_out32(s, arg >> 32);
356 }
357}
358
359static inline void tcg_out_ld(TCGContext *s, int ret,
360 int arg1, tcg_target_long arg2)
361{
362 tcg_out_modrm_offset(s, 0x8b | P_REXW, ret, arg1, arg2); /* movq */
363}
364
365static inline void tcg_out_st(TCGContext *s, int arg,
366 int arg1, tcg_target_long arg2)
367{
368 tcg_out_modrm_offset(s, 0x89 | P_REXW, arg, arg1, arg2); /* movq */
369}
370
371static inline void tgen_arithi32(TCGContext *s, int c, int r0, int32_t val)
372{
373 if (val == (int8_t)val) {
374 tcg_out_modrm(s, 0x83, c, r0);
375 tcg_out8(s, val);
376 } else {
377 tcg_out_modrm(s, 0x81, c, r0);
378 tcg_out32(s, val);
379 }
380}
381
382static inline void tgen_arithi64(TCGContext *s, int c, int r0, int64_t val)
383{
384 if (val == (int8_t)val) {
385 tcg_out_modrm(s, 0x83 | P_REXW, c, r0);
386 tcg_out8(s, val);
387 } else if (val == (int32_t)val) {
388 tcg_out_modrm(s, 0x81 | P_REXW, c, r0);
389 tcg_out32(s, val);
390 } else if (c == ARITH_AND && val == (uint32_t)val) {
391 tcg_out_modrm(s, 0x81, c, r0);
392 tcg_out32(s, val);
393 } else {
394 tcg_abort();
395 }
396}
397
398void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
399{
400 if (val != 0)
401 tgen_arithi64(s, ARITH_ADD, reg, val);
402}
403
404static void tcg_out_jxx(TCGContext *s, int opc, int label_index)
405{
406 int32_t val, val1;
407 TCGLabel *l = &s->labels[label_index];
408
409 if (l->has_value) {
410 val = l->u.value - (tcg_target_long)s->code_ptr;
411 val1 = val - 2;
412 if ((int8_t)val1 == val1) {
413 if (opc == -1)
414 tcg_out8(s, 0xeb);
415 else
416 tcg_out8(s, 0x70 + opc);
417 tcg_out8(s, val1);
418 } else {
419 if (opc == -1) {
420 tcg_out8(s, 0xe9);
421 tcg_out32(s, val - 5);
422 } else {
423 tcg_out8(s, 0x0f);
424 tcg_out8(s, 0x80 + opc);
425 tcg_out32(s, val - 6);
426 }
427 }
428 } else {
429 if (opc == -1) {
430 tcg_out8(s, 0xe9);
431 } else {
432 tcg_out8(s, 0x0f);
433 tcg_out8(s, 0x80 + opc);
434 }
435 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
623e265c 436 s->code_ptr += 4;
c896fe29
FB
437 }
438}
439
440static void tcg_out_brcond(TCGContext *s, int cond,
441 TCGArg arg1, TCGArg arg2, int const_arg2,
442 int label_index, int rexw)
443{
444 int c;
445 if (const_arg2) {
446 if (arg2 == 0) {
447 /* use test */
448 switch(cond) {
449 case TCG_COND_EQ:
bb210e78 450 c = JCC_JE;
c896fe29
FB
451 break;
452 case TCG_COND_NE:
453 c = JCC_JNE;
454 break;
455 case TCG_COND_LT:
456 c = JCC_JS;
457 break;
458 case TCG_COND_GE:
459 c = JCC_JNS;
460 break;
461 default:
462 goto do_cmpi;
463 }
464 /* test r, r */
465 tcg_out_modrm(s, 0x85 | rexw, arg1, arg1);
466 tcg_out_jxx(s, c, label_index);
467 } else {
468 do_cmpi:
469 if (rexw)
470 tgen_arithi64(s, ARITH_CMP, arg1, arg2);
471 else
472 tgen_arithi32(s, ARITH_CMP, arg1, arg2);
473 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
474 }
475 } else {
bb210e78 476 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3) | rexw, arg2, arg1);
c896fe29
FB
477 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
478 }
479}
480
481#if defined(CONFIG_SOFTMMU)
482extern void __ldb_mmu(void);
483extern void __ldw_mmu(void);
484extern void __ldl_mmu(void);
485extern void __ldq_mmu(void);
486
487extern void __stb_mmu(void);
488extern void __stw_mmu(void);
489extern void __stl_mmu(void);
490extern void __stq_mmu(void);
491
492
493static void *qemu_ld_helpers[4] = {
494 __ldb_mmu,
495 __ldw_mmu,
496 __ldl_mmu,
497 __ldq_mmu,
498};
499
500static void *qemu_st_helpers[4] = {
501 __stb_mmu,
502 __stw_mmu,
503 __stl_mmu,
504 __stq_mmu,
505};
506#endif
507
508static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
509 int opc)
510{
511 int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
512#if defined(CONFIG_SOFTMMU)
513 uint8_t *label1_ptr, *label2_ptr;
514#endif
515
516 data_reg = *args++;
517 addr_reg = *args++;
518 mem_index = *args;
519 s_bits = opc & 3;
520
521 r0 = TCG_REG_RDI;
522 r1 = TCG_REG_RSI;
523
524#if TARGET_LONG_BITS == 32
525 rexw = 0;
526#else
527 rexw = P_REXW;
528#endif
529#if defined(CONFIG_SOFTMMU)
530 /* mov */
531 tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
532
533 /* mov */
534 tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
535
536 tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */
537 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
538
539 tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */
540 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
541
542 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
543 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
544
545 /* lea offset(r1, env), r1 */
546 tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
547 offsetof(CPUState, tlb_table[mem_index][0].addr_read));
548
549 /* cmp 0(r1), r0 */
550 tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
551
552 /* mov */
553 tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
554
555 /* je label1 */
556 tcg_out8(s, 0x70 + JCC_JE);
557 label1_ptr = s->code_ptr;
558 s->code_ptr++;
559
560 /* XXX: move that code at the end of the TB */
561 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RSI, mem_index);
562 tcg_out8(s, 0xe8);
563 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
564 (tcg_target_long)s->code_ptr - 4);
565
566 switch(opc) {
567 case 0 | 4:
568 /* movsbq */
569 tcg_out_modrm(s, 0xbe | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
570 break;
571 case 1 | 4:
572 /* movswq */
573 tcg_out_modrm(s, 0xbf | P_EXT | P_REXW, data_reg, TCG_REG_RAX);
574 break;
575 case 2 | 4:
576 /* movslq */
577 tcg_out_modrm(s, 0x63 | P_REXW, data_reg, TCG_REG_RAX);
578 break;
579 case 0:
580 case 1:
581 case 2:
582 default:
583 /* movl */
584 tcg_out_modrm(s, 0x8b, data_reg, TCG_REG_RAX);
585 break;
586 case 3:
587 tcg_out_mov(s, data_reg, TCG_REG_RAX);
588 break;
589 }
590
591 /* jmp label2 */
592 tcg_out8(s, 0xeb);
593 label2_ptr = s->code_ptr;
594 s->code_ptr++;
595
596 /* label1: */
597 *label1_ptr = s->code_ptr - label1_ptr - 1;
598
599 /* add x(r1), r0 */
600 tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
601 offsetof(CPUTLBEntry, addr_read));
602#else
603 r0 = addr_reg;
604#endif
605
606#ifdef TARGET_WORDS_BIGENDIAN
607 bswap = 1;
608#else
609 bswap = 0;
610#endif
611 switch(opc) {
612 case 0:
613 /* movzbl */
614 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
615 break;
616 case 0 | 4:
617 /* movsbX */
618 tcg_out_modrm_offset(s, 0xbe | P_EXT | rexw, data_reg, r0, 0);
619 break;
620 case 1:
621 /* movzwl */
622 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
623 if (bswap) {
624 /* rolw $8, data_reg */
625 tcg_out8(s, 0x66);
626 tcg_out_modrm(s, 0xc1, 0, data_reg);
627 tcg_out8(s, 8);
628 }
629 break;
630 case 1 | 4:
631 if (bswap) {
632 /* movzwl */
633 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
634 /* rolw $8, data_reg */
635 tcg_out8(s, 0x66);
636 tcg_out_modrm(s, 0xc1, 0, data_reg);
637 tcg_out8(s, 8);
638
639 /* movswX data_reg, data_reg */
640 tcg_out_modrm(s, 0xbf | P_EXT | rexw, data_reg, data_reg);
641 } else {
642 /* movswX */
643 tcg_out_modrm_offset(s, 0xbf | P_EXT | rexw, data_reg, r0, 0);
644 }
645 break;
646 case 2:
647 /* movl (r0), data_reg */
648 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
649 if (bswap) {
650 /* bswap */
651 tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0);
652 }
653 break;
654 case 2 | 4:
655 if (bswap) {
656 /* movl (r0), data_reg */
657 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
658 /* bswap */
659 tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT, 0, data_reg, 0);
660 /* movslq */
661 tcg_out_modrm(s, 0x63 | P_REXW, data_reg, data_reg);
662 } else {
663 /* movslq */
664 tcg_out_modrm_offset(s, 0x63 | P_REXW, data_reg, r0, 0);
665 }
666 break;
667 case 3:
668 /* movq (r0), data_reg */
669 tcg_out_modrm_offset(s, 0x8b | P_REXW, data_reg, r0, 0);
670 if (bswap) {
671 /* bswap */
672 tcg_out_opc(s, (0xc8 + (data_reg & 7)) | P_EXT | P_REXW, 0, data_reg, 0);
673 }
674 break;
675 default:
676 tcg_abort();
677 }
678
679#if defined(CONFIG_SOFTMMU)
680 /* label2: */
681 *label2_ptr = s->code_ptr - label2_ptr - 1;
682#endif
683}
684
685static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
686 int opc)
687{
688 int addr_reg, data_reg, r0, r1, mem_index, s_bits, bswap, rexw;
689#if defined(CONFIG_SOFTMMU)
690 uint8_t *label1_ptr, *label2_ptr;
691#endif
692
693 data_reg = *args++;
694 addr_reg = *args++;
695 mem_index = *args;
696
697 s_bits = opc;
698
699 r0 = TCG_REG_RDI;
700 r1 = TCG_REG_RSI;
701
702#if TARGET_LONG_BITS == 32
703 rexw = 0;
704#else
705 rexw = P_REXW;
706#endif
707#if defined(CONFIG_SOFTMMU)
708 /* mov */
709 tcg_out_modrm(s, 0x8b | rexw, r1, addr_reg);
710
711 /* mov */
712 tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
713
714 tcg_out_modrm(s, 0xc1 | rexw, 5, r1); /* shr $x, r1 */
715 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
716
717 tcg_out_modrm(s, 0x81 | rexw, 4, r0); /* andl $x, r0 */
718 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
719
720 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
721 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
722
723 /* lea offset(r1, env), r1 */
724 tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
725 offsetof(CPUState, tlb_table[mem_index][0].addr_write));
726
727 /* cmp 0(r1), r0 */
728 tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
729
730 /* mov */
731 tcg_out_modrm(s, 0x8b | rexw, r0, addr_reg);
732
733 /* je label1 */
734 tcg_out8(s, 0x70 + JCC_JE);
735 label1_ptr = s->code_ptr;
736 s->code_ptr++;
737
738 /* XXX: move that code at the end of the TB */
739 switch(opc) {
740 case 0:
741 /* movzbl */
742 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_RSI, data_reg);
743 break;
744 case 1:
745 /* movzwl */
746 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_RSI, data_reg);
747 break;
748 case 2:
749 /* movl */
750 tcg_out_modrm(s, 0x8b, TCG_REG_RSI, data_reg);
751 break;
752 default:
753 case 3:
754 tcg_out_mov(s, TCG_REG_RSI, data_reg);
755 break;
756 }
757 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index);
758 tcg_out8(s, 0xe8);
759 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
760 (tcg_target_long)s->code_ptr - 4);
761
762 /* jmp label2 */
763 tcg_out8(s, 0xeb);
764 label2_ptr = s->code_ptr;
765 s->code_ptr++;
766
767 /* label1: */
768 *label1_ptr = s->code_ptr - label1_ptr - 1;
769
770 /* add x(r1), r0 */
771 tcg_out_modrm_offset(s, 0x03 | P_REXW, r0, r1, offsetof(CPUTLBEntry, addend) -
772 offsetof(CPUTLBEntry, addr_write));
773#else
774 r0 = addr_reg;
775#endif
776
777#ifdef TARGET_WORDS_BIGENDIAN
778 bswap = 1;
779#else
780 bswap = 0;
781#endif
782 switch(opc) {
783 case 0:
784 /* movb */
785 tcg_out_modrm_offset(s, 0x88 | P_REX, data_reg, r0, 0);
786 break;
787 case 1:
788 if (bswap) {
789 tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */
790 tcg_out8(s, 0x66); /* rolw $8, %ecx */
791 tcg_out_modrm(s, 0xc1, 0, r1);
792 tcg_out8(s, 8);
793 data_reg = r1;
794 }
795 /* movw */
796 tcg_out8(s, 0x66);
797 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
798 break;
799 case 2:
800 if (bswap) {
801 tcg_out_modrm(s, 0x8b, r1, data_reg); /* movl */
802 /* bswap data_reg */
803 tcg_out_opc(s, (0xc8 + r1) | P_EXT, 0, r1, 0);
804 data_reg = r1;
805 }
806 /* movl */
807 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
808 break;
809 case 3:
810 if (bswap) {
811 tcg_out_mov(s, r1, data_reg);
812 /* bswap data_reg */
813 tcg_out_opc(s, (0xc8 + r1) | P_EXT | P_REXW, 0, r1, 0);
814 data_reg = r1;
815 }
816 /* movq */
817 tcg_out_modrm_offset(s, 0x89 | P_REXW, data_reg, r0, 0);
818 break;
819 default:
820 tcg_abort();
821 }
822
823#if defined(CONFIG_SOFTMMU)
824 /* label2: */
825 *label2_ptr = s->code_ptr - label2_ptr - 1;
826#endif
827}
828
829static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
830 const int *const_args)
831{
832 int c;
833
834 switch(opc) {
835 case INDEX_op_exit_tb:
836 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, args[0]);
837 tcg_out8(s, 0xc3); /* ret */
838 break;
839 case INDEX_op_goto_tb:
840 if (s->tb_jmp_offset) {
841 /* direct jump method */
842 tcg_out8(s, 0xe9); /* jmp im */
843 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
844 tcg_out32(s, 0);
845 } else {
846 /* indirect jump method */
847 /* jmp Ev */
848 tcg_out_modrm_offset(s, 0xff, 4, -1,
849 (tcg_target_long)(s->tb_next +
850 args[0]));
851 }
852 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
853 break;
854 case INDEX_op_call:
855 if (const_args[0]) {
856 tcg_out8(s, 0xe8);
857 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
858 } else {
859 tcg_out_modrm(s, 0xff, 2, args[0]);
860 }
861 break;
862 case INDEX_op_jmp:
863 if (const_args[0]) {
864 tcg_out8(s, 0xe9);
865 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
866 } else {
867 tcg_out_modrm(s, 0xff, 4, args[0]);
868 }
869 break;
870 case INDEX_op_br:
871 tcg_out_jxx(s, JCC_JMP, args[0]);
872 break;
873 case INDEX_op_movi_i32:
874 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
875 break;
876 case INDEX_op_movi_i64:
877 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
878 break;
879 case INDEX_op_ld8u_i32:
880 case INDEX_op_ld8u_i64:
881 /* movzbl */
882 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
883 break;
884 case INDEX_op_ld8s_i32:
885 /* movsbl */
886 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
887 break;
888 case INDEX_op_ld8s_i64:
889 /* movsbq */
890 tcg_out_modrm_offset(s, 0xbe | P_EXT | P_REXW, args[0], args[1], args[2]);
891 break;
892 case INDEX_op_ld16u_i32:
893 case INDEX_op_ld16u_i64:
894 /* movzwl */
895 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
896 break;
897 case INDEX_op_ld16s_i32:
898 /* movswl */
899 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
900 break;
901 case INDEX_op_ld16s_i64:
902 /* movswq */
903 tcg_out_modrm_offset(s, 0xbf | P_EXT | P_REXW, args[0], args[1], args[2]);
904 break;
905 case INDEX_op_ld_i32:
906 case INDEX_op_ld32u_i64:
907 /* movl */
908 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
909 break;
910 case INDEX_op_ld32s_i64:
911 /* movslq */
912 tcg_out_modrm_offset(s, 0x63 | P_REXW, args[0], args[1], args[2]);
913 break;
914 case INDEX_op_ld_i64:
915 /* movq */
916 tcg_out_modrm_offset(s, 0x8b | P_REXW, args[0], args[1], args[2]);
917 break;
918
919 case INDEX_op_st8_i32:
920 case INDEX_op_st8_i64:
921 /* movb */
922 tcg_out_modrm_offset(s, 0x88 | P_REX, args[0], args[1], args[2]);
923 break;
924 case INDEX_op_st16_i32:
925 case INDEX_op_st16_i64:
926 /* movw */
927 tcg_out8(s, 0x66);
928 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
929 break;
930 case INDEX_op_st_i32:
931 case INDEX_op_st32_i64:
932 /* movl */
933 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
934 break;
935 case INDEX_op_st_i64:
936 /* movq */
937 tcg_out_modrm_offset(s, 0x89 | P_REXW, args[0], args[1], args[2]);
938 break;
939
940 case INDEX_op_sub_i32:
941 c = ARITH_SUB;
942 goto gen_arith32;
943 case INDEX_op_and_i32:
944 c = ARITH_AND;
945 goto gen_arith32;
946 case INDEX_op_or_i32:
947 c = ARITH_OR;
948 goto gen_arith32;
949 case INDEX_op_xor_i32:
950 c = ARITH_XOR;
951 goto gen_arith32;
952 case INDEX_op_add_i32:
953 c = ARITH_ADD;
954 gen_arith32:
955 if (const_args[2]) {
956 tgen_arithi32(s, c, args[0], args[2]);
957 } else {
958 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
959 }
960 break;
961
962 case INDEX_op_sub_i64:
963 c = ARITH_SUB;
964 goto gen_arith64;
965 case INDEX_op_and_i64:
966 c = ARITH_AND;
967 goto gen_arith64;
968 case INDEX_op_or_i64:
969 c = ARITH_OR;
970 goto gen_arith64;
971 case INDEX_op_xor_i64:
972 c = ARITH_XOR;
973 goto gen_arith64;
974 case INDEX_op_add_i64:
975 c = ARITH_ADD;
976 gen_arith64:
977 if (const_args[2]) {
978 tgen_arithi64(s, c, args[0], args[2]);
979 } else {
980 tcg_out_modrm(s, 0x01 | (c << 3) | P_REXW, args[2], args[0]);
981 }
982 break;
983
984 case INDEX_op_mul_i32:
985 if (const_args[2]) {
986 int32_t val;
987 val = args[2];
988 if (val == (int8_t)val) {
989 tcg_out_modrm(s, 0x6b, args[0], args[0]);
990 tcg_out8(s, val);
991 } else {
992 tcg_out_modrm(s, 0x69, args[0], args[0]);
993 tcg_out32(s, val);
994 }
995 } else {
996 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
997 }
998 break;
999 case INDEX_op_mul_i64:
1000 if (const_args[2]) {
1001 int32_t val;
1002 val = args[2];
1003 if (val == (int8_t)val) {
1004 tcg_out_modrm(s, 0x6b | P_REXW, args[0], args[0]);
1005 tcg_out8(s, val);
1006 } else {
1007 tcg_out_modrm(s, 0x69 | P_REXW, args[0], args[0]);
1008 tcg_out32(s, val);
1009 }
1010 } else {
1011 tcg_out_modrm(s, 0xaf | P_EXT | P_REXW, args[0], args[2]);
1012 }
1013 break;
1014 case INDEX_op_div2_i32:
1015 tcg_out_modrm(s, 0xf7, 7, args[4]);
1016 break;
1017 case INDEX_op_divu2_i32:
1018 tcg_out_modrm(s, 0xf7, 6, args[4]);
1019 break;
1020 case INDEX_op_div2_i64:
1021 tcg_out_modrm(s, 0xf7 | P_REXW, 7, args[4]);
1022 break;
1023 case INDEX_op_divu2_i64:
1024 tcg_out_modrm(s, 0xf7 | P_REXW, 6, args[4]);
1025 break;
1026
1027 case INDEX_op_shl_i32:
1028 c = SHIFT_SHL;
1029 gen_shift32:
1030 if (const_args[2]) {
1031 if (args[2] == 1) {
1032 tcg_out_modrm(s, 0xd1, c, args[0]);
1033 } else {
1034 tcg_out_modrm(s, 0xc1, c, args[0]);
1035 tcg_out8(s, args[2]);
1036 }
1037 } else {
1038 tcg_out_modrm(s, 0xd3, c, args[0]);
1039 }
1040 break;
1041 case INDEX_op_shr_i32:
1042 c = SHIFT_SHR;
1043 goto gen_shift32;
1044 case INDEX_op_sar_i32:
1045 c = SHIFT_SAR;
1046 goto gen_shift32;
1047
1048 case INDEX_op_shl_i64:
1049 c = SHIFT_SHL;
1050 gen_shift64:
1051 if (const_args[2]) {
1052 if (args[2] == 1) {
1053 tcg_out_modrm(s, 0xd1 | P_REXW, c, args[0]);
1054 } else {
1055 tcg_out_modrm(s, 0xc1 | P_REXW, c, args[0]);
1056 tcg_out8(s, args[2]);
1057 }
1058 } else {
1059 tcg_out_modrm(s, 0xd3 | P_REXW, c, args[0]);
1060 }
1061 break;
1062 case INDEX_op_shr_i64:
1063 c = SHIFT_SHR;
1064 goto gen_shift64;
1065 case INDEX_op_sar_i64:
1066 c = SHIFT_SAR;
1067 goto gen_shift64;
1068
1069 case INDEX_op_brcond_i32:
1070 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1071 args[3], 0);
1072 break;
1073 case INDEX_op_brcond_i64:
1074 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1075 args[3], P_REXW);
1076 break;
1077
1078 case INDEX_op_bswap_i32:
1079 tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT, 0, args[0], 0);
1080 break;
1081 case INDEX_op_bswap_i64:
1082 tcg_out_opc(s, (0xc8 + (args[0] & 7)) | P_EXT | P_REXW, 0, args[0], 0);
1083 break;
1084
1085 case INDEX_op_qemu_ld8u:
1086 tcg_out_qemu_ld(s, args, 0);
1087 break;
1088 case INDEX_op_qemu_ld8s:
1089 tcg_out_qemu_ld(s, args, 0 | 4);
1090 break;
1091 case INDEX_op_qemu_ld16u:
1092 tcg_out_qemu_ld(s, args, 1);
1093 break;
1094 case INDEX_op_qemu_ld16s:
1095 tcg_out_qemu_ld(s, args, 1 | 4);
1096 break;
1097 case INDEX_op_qemu_ld32u:
1098 tcg_out_qemu_ld(s, args, 2);
1099 break;
1100 case INDEX_op_qemu_ld32s:
1101 tcg_out_qemu_ld(s, args, 2 | 4);
1102 break;
1103 case INDEX_op_qemu_ld64:
1104 tcg_out_qemu_ld(s, args, 3);
1105 break;
1106
1107 case INDEX_op_qemu_st8:
1108 tcg_out_qemu_st(s, args, 0);
1109 break;
1110 case INDEX_op_qemu_st16:
1111 tcg_out_qemu_st(s, args, 1);
1112 break;
1113 case INDEX_op_qemu_st32:
1114 tcg_out_qemu_st(s, args, 2);
1115 break;
1116 case INDEX_op_qemu_st64:
1117 tcg_out_qemu_st(s, args, 3);
1118 break;
1119
1120 default:
1121 tcg_abort();
1122 }
1123}
1124
1125static const TCGTargetOpDef x86_64_op_defs[] = {
1126 { INDEX_op_exit_tb, { } },
1127 { INDEX_op_goto_tb, { } },
1128 { INDEX_op_call, { "ri" } }, /* XXX: might need a specific constant constraint */
1129 { INDEX_op_jmp, { "ri" } }, /* XXX: might need a specific constant constraint */
1130 { INDEX_op_br, { } },
1131
1132 { INDEX_op_mov_i32, { "r", "r" } },
1133 { INDEX_op_movi_i32, { "r" } },
1134 { INDEX_op_ld8u_i32, { "r", "r" } },
1135 { INDEX_op_ld8s_i32, { "r", "r" } },
1136 { INDEX_op_ld16u_i32, { "r", "r" } },
1137 { INDEX_op_ld16s_i32, { "r", "r" } },
1138 { INDEX_op_ld_i32, { "r", "r" } },
1139 { INDEX_op_st8_i32, { "r", "r" } },
1140 { INDEX_op_st16_i32, { "r", "r" } },
1141 { INDEX_op_st_i32, { "r", "r" } },
1142
1143 { INDEX_op_add_i32, { "r", "0", "ri" } },
1144 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1145 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1146 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1147 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1148 { INDEX_op_and_i32, { "r", "0", "ri" } },
1149 { INDEX_op_or_i32, { "r", "0", "ri" } },
1150 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1151
1152 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1153 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1154 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1155
1156 { INDEX_op_brcond_i32, { "r", "ri" } },
1157
1158 { INDEX_op_mov_i64, { "r", "r" } },
1159 { INDEX_op_movi_i64, { "r" } },
1160 { INDEX_op_ld8u_i64, { "r", "r" } },
1161 { INDEX_op_ld8s_i64, { "r", "r" } },
1162 { INDEX_op_ld16u_i64, { "r", "r" } },
1163 { INDEX_op_ld16s_i64, { "r", "r" } },
1164 { INDEX_op_ld32u_i64, { "r", "r" } },
1165 { INDEX_op_ld32s_i64, { "r", "r" } },
1166 { INDEX_op_ld_i64, { "r", "r" } },
1167 { INDEX_op_st8_i64, { "r", "r" } },
1168 { INDEX_op_st16_i64, { "r", "r" } },
1169 { INDEX_op_st32_i64, { "r", "r" } },
1170 { INDEX_op_st_i64, { "r", "r" } },
1171
1172 { INDEX_op_add_i64, { "r", "0", "re" } },
1173 { INDEX_op_mul_i64, { "r", "0", "re" } },
1174 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
1175 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
1176 { INDEX_op_sub_i64, { "r", "0", "re" } },
1177 { INDEX_op_and_i64, { "r", "0", "reZ" } },
1178 { INDEX_op_or_i64, { "r", "0", "re" } },
1179 { INDEX_op_xor_i64, { "r", "0", "re" } },
1180
1181 { INDEX_op_shl_i64, { "r", "0", "ci" } },
1182 { INDEX_op_shr_i64, { "r", "0", "ci" } },
1183 { INDEX_op_sar_i64, { "r", "0", "ci" } },
1184
1185 { INDEX_op_brcond_i64, { "r", "re" } },
1186
1187 { INDEX_op_bswap_i32, { "r", "0" } },
1188 { INDEX_op_bswap_i64, { "r", "0" } },
1189
1190 { INDEX_op_qemu_ld8u, { "r", "L" } },
1191 { INDEX_op_qemu_ld8s, { "r", "L" } },
1192 { INDEX_op_qemu_ld16u, { "r", "L" } },
1193 { INDEX_op_qemu_ld16s, { "r", "L" } },
1194 { INDEX_op_qemu_ld32u, { "r", "L" } },
1195 { INDEX_op_qemu_ld32s, { "r", "L" } },
1196 { INDEX_op_qemu_ld64, { "r", "L" } },
1197
1198 { INDEX_op_qemu_st8, { "L", "L" } },
1199 { INDEX_op_qemu_st16, { "L", "L" } },
1200 { INDEX_op_qemu_st32, { "L", "L" } },
1201 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1202
1203 { -1 },
1204};
1205
1206void tcg_target_init(TCGContext *s)
1207{
1208 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
1209 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
1210 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1211 (1 << TCG_REG_RDI) |
1212 (1 << TCG_REG_RSI) |
1213 (1 << TCG_REG_RDX) |
1214 (1 << TCG_REG_RCX) |
1215 (1 << TCG_REG_R8) |
1216 (1 << TCG_REG_R9) |
1217 (1 << TCG_REG_RAX) |
1218 (1 << TCG_REG_R10) |
1219 (1 << TCG_REG_R11));
1220
1221 tcg_regset_clear(s->reserved_regs);
1222 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RSP);
1223 /* XXX: will be suppresed when proper global TB entry code will be
1224 generated */
1225 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RBX);
1226 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RBP);
1227
1228 tcg_add_target_add_op_defs(x86_64_op_defs);
1229}
This page took 0.141565 seconds and 4 git commands to generate.