2 * Tiny Code Interpreter for QEMU
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
23 #include "exec/cpu_ldst.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-ldst.h"
26 #include "qemu/compiler.h"
31 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
32 * Without assertions, the interpreter runs much faster.
34 #if defined(CONFIG_DEBUG_TCG)
35 # define tci_assert(cond) assert(cond)
37 # define tci_assert(cond) ((void)(cond))
40 __thread uintptr_t tci_tb_ptr;
42 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
43 uint32_t low_index, uint64_t value)
45 regs[low_index] = (uint32_t)value;
46 regs[high_index] = value >> 32;
49 /* Create a 64 bit value from two 32 bit values. */
50 static uint64_t tci_uint64(uint32_t high, uint32_t low)
52 return ((uint64_t)high << 32) + low;
56 * Load sets of arguments all at once. The naming convention is:
57 * tci_args_<arguments>
58 * where arguments is a sequence of
60 * b = immediate (bit position)
61 * c = condition (TCGCond)
62 * i = immediate (uint32_t)
63 * I = immediate (tcg_target_ulong)
64 * l = label or pointer
65 * m = immediate (MemOpIdx)
66 * n = immediate (call return length)
68 * s = signed ldst offset
71 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
73 int diff = sextract32(insn, 12, 20);
74 *l0 = diff ? (void *)tb_ptr + diff : NULL;
77 static void tci_args_r(uint32_t insn, TCGReg *r0)
79 *r0 = extract32(insn, 8, 4);
82 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
83 uint8_t *n0, void **l1)
85 *n0 = extract32(insn, 8, 4);
86 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
89 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
90 TCGReg *r0, void **l1)
92 *r0 = extract32(insn, 8, 4);
93 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
96 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
98 *r0 = extract32(insn, 8, 4);
99 *r1 = extract32(insn, 12, 4);
102 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
104 *r0 = extract32(insn, 8, 4);
105 *i1 = sextract32(insn, 12, 20);
108 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
109 TCGReg *r1, MemOpIdx *m2)
111 *r0 = extract32(insn, 8, 4);
112 *r1 = extract32(insn, 12, 4);
113 *m2 = extract32(insn, 20, 12);
116 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
118 *r0 = extract32(insn, 8, 4);
119 *r1 = extract32(insn, 12, 4);
120 *r2 = extract32(insn, 16, 4);
123 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
125 *r0 = extract32(insn, 8, 4);
126 *r1 = extract32(insn, 12, 4);
127 *i2 = sextract32(insn, 16, 16);
130 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
131 uint8_t *i2, uint8_t *i3)
133 *r0 = extract32(insn, 8, 4);
134 *r1 = extract32(insn, 12, 4);
135 *i2 = extract32(insn, 16, 6);
136 *i3 = extract32(insn, 22, 6);
139 static void tci_args_rrrc(uint32_t insn,
140 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
142 *r0 = extract32(insn, 8, 4);
143 *r1 = extract32(insn, 12, 4);
144 *r2 = extract32(insn, 16, 4);
145 *c3 = extract32(insn, 20, 4);
148 static void tci_args_rrrm(uint32_t insn,
149 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
151 *r0 = extract32(insn, 8, 4);
152 *r1 = extract32(insn, 12, 4);
153 *r2 = extract32(insn, 16, 4);
154 *m3 = extract32(insn, 20, 12);
157 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
158 TCGReg *r2, uint8_t *i3, uint8_t *i4)
160 *r0 = extract32(insn, 8, 4);
161 *r1 = extract32(insn, 12, 4);
162 *r2 = extract32(insn, 16, 4);
163 *i3 = extract32(insn, 20, 6);
164 *i4 = extract32(insn, 26, 6);
167 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
168 TCGReg *r2, TCGReg *r3, TCGReg *r4)
170 *r0 = extract32(insn, 8, 4);
171 *r1 = extract32(insn, 12, 4);
172 *r2 = extract32(insn, 16, 4);
173 *r3 = extract32(insn, 20, 4);
174 *r4 = extract32(insn, 24, 4);
177 static void tci_args_rrrr(uint32_t insn,
178 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
180 *r0 = extract32(insn, 8, 4);
181 *r1 = extract32(insn, 12, 4);
182 *r2 = extract32(insn, 16, 4);
183 *r3 = extract32(insn, 20, 4);
186 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
187 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
189 *r0 = extract32(insn, 8, 4);
190 *r1 = extract32(insn, 12, 4);
191 *r2 = extract32(insn, 16, 4);
192 *r3 = extract32(insn, 20, 4);
193 *r4 = extract32(insn, 24, 4);
194 *c5 = extract32(insn, 28, 4);
197 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
198 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
200 *r0 = extract32(insn, 8, 4);
201 *r1 = extract32(insn, 12, 4);
202 *r2 = extract32(insn, 16, 4);
203 *r3 = extract32(insn, 20, 4);
204 *r4 = extract32(insn, 24, 4);
205 *r5 = extract32(insn, 28, 4);
208 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
245 g_assert_not_reached();
250 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
287 g_assert_not_reached();
292 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
293 MemOpIdx oi, const void *tb_ptr)
295 MemOp mop = get_memop(oi);
296 uintptr_t ra = (uintptr_t)tb_ptr;
298 #ifdef CONFIG_SOFTMMU
299 switch (mop & (MO_BSWAP | MO_SSIZE)) {
301 return helper_ret_ldub_mmu(env, taddr, oi, ra);
303 return helper_ret_ldsb_mmu(env, taddr, oi, ra);
305 return helper_le_lduw_mmu(env, taddr, oi, ra);
307 return helper_le_ldsw_mmu(env, taddr, oi, ra);
309 return helper_le_ldul_mmu(env, taddr, oi, ra);
311 return helper_le_ldsl_mmu(env, taddr, oi, ra);
313 return helper_le_ldq_mmu(env, taddr, oi, ra);
315 return helper_be_lduw_mmu(env, taddr, oi, ra);
317 return helper_be_ldsw_mmu(env, taddr, oi, ra);
319 return helper_be_ldul_mmu(env, taddr, oi, ra);
321 return helper_be_ldsl_mmu(env, taddr, oi, ra);
323 return helper_be_ldq_mmu(env, taddr, oi, ra);
325 g_assert_not_reached();
328 void *haddr = g2h(env_cpu(env), taddr);
329 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
332 set_helper_retaddr(ra);
333 if (taddr & a_mask) {
334 helper_unaligned_ld(env, taddr);
336 switch (mop & (MO_BSWAP | MO_SSIZE)) {
344 ret = lduw_le_p(haddr);
347 ret = ldsw_le_p(haddr);
350 ret = (uint32_t)ldl_le_p(haddr);
353 ret = (int32_t)ldl_le_p(haddr);
356 ret = ldq_le_p(haddr);
359 ret = lduw_be_p(haddr);
362 ret = ldsw_be_p(haddr);
365 ret = (uint32_t)ldl_be_p(haddr);
368 ret = (int32_t)ldl_be_p(haddr);
371 ret = ldq_be_p(haddr);
374 g_assert_not_reached();
376 clear_helper_retaddr();
381 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
382 MemOpIdx oi, const void *tb_ptr)
384 MemOp mop = get_memop(oi);
385 uintptr_t ra = (uintptr_t)tb_ptr;
387 #ifdef CONFIG_SOFTMMU
388 switch (mop & (MO_BSWAP | MO_SIZE)) {
390 helper_ret_stb_mmu(env, taddr, val, oi, ra);
393 helper_le_stw_mmu(env, taddr, val, oi, ra);
396 helper_le_stl_mmu(env, taddr, val, oi, ra);
399 helper_le_stq_mmu(env, taddr, val, oi, ra);
402 helper_be_stw_mmu(env, taddr, val, oi, ra);
405 helper_be_stl_mmu(env, taddr, val, oi, ra);
408 helper_be_stq_mmu(env, taddr, val, oi, ra);
411 g_assert_not_reached();
414 void *haddr = g2h(env_cpu(env), taddr);
415 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
417 set_helper_retaddr(ra);
418 if (taddr & a_mask) {
419 helper_unaligned_st(env, taddr);
421 switch (mop & (MO_BSWAP | MO_SIZE)) {
426 stw_le_p(haddr, val);
429 stl_le_p(haddr, val);
432 stq_le_p(haddr, val);
435 stw_be_p(haddr, val);
438 stl_be_p(haddr, val);
441 stq_be_p(haddr, val);
444 g_assert_not_reached();
446 clear_helper_retaddr();
450 #if TCG_TARGET_REG_BITS == 64
451 # define CASE_32_64(x) \
452 case glue(glue(INDEX_op_, x), _i64): \
453 case glue(glue(INDEX_op_, x), _i32):
454 # define CASE_64(x) \
455 case glue(glue(INDEX_op_, x), _i64):
457 # define CASE_32_64(x) \
458 case glue(glue(INDEX_op_, x), _i32):
462 /* Interpret pseudo code in tb. */
464 * Disable CFI checks.
465 * One possible operation in the pseudo code is a call to binary code.
466 * Therefore, disable CFI checks in the interpreter function
468 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
469 const void *v_tb_ptr)
471 const uint32_t *tb_ptr = v_tb_ptr;
472 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
473 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
475 void *call_slots[TCG_STATIC_CALL_ARGS_SIZE / sizeof(uint64_t)];
477 regs[TCG_AREG0] = (tcg_target_ulong)env;
478 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
479 /* Other call_slots entries initialized at first use (see below). */
480 call_slots[0] = NULL;
486 TCGReg r0, r1, r2, r3, r4, r5;
499 opc = extract32(insn, 0, 8);
504 * Set up the ffi_avalue array once, delayed until now
505 * because many TB's do not make any calls. In tcg_gen_callN,
506 * we arranged for every real argument to be "left-aligned"
507 * in each 64-bit slot.
509 if (unlikely(call_slots[0] == NULL)) {
510 for (int i = 0; i < ARRAY_SIZE(call_slots); ++i) {
511 call_slots[i] = &stack[i];
515 tci_args_nl(insn, tb_ptr, &len, &ptr);
517 /* Helper functions may need to access the "return address" */
518 tci_tb_ptr = (uintptr_t)tb_ptr;
522 ffi_call(pptr[1], pptr[0], stack, call_slots);
525 /* Any result winds up "left-aligned" in the stack[0] slot. */
529 case 1: /* uint32_t */
531 * Note that libffi has an odd special case in that it will
532 * always widen an integral result to ffi_arg.
534 if (sizeof(ffi_arg) == 4) {
535 regs[TCG_REG_R0] = *(uint32_t *)stack;
539 case 2: /* uint64_t */
540 if (TCG_TARGET_REG_BITS == 32) {
541 tci_write_reg64(regs, TCG_REG_R1, TCG_REG_R0, stack[0]);
543 regs[TCG_REG_R0] = stack[0];
547 g_assert_not_reached();
552 tci_args_l(insn, tb_ptr, &ptr);
555 case INDEX_op_setcond_i32:
556 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
557 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
559 case INDEX_op_movcond_i32:
560 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
561 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
562 regs[r0] = regs[tmp32 ? r3 : r4];
564 #if TCG_TARGET_REG_BITS == 32
565 case INDEX_op_setcond2_i32:
566 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
567 T1 = tci_uint64(regs[r2], regs[r1]);
568 T2 = tci_uint64(regs[r4], regs[r3]);
569 regs[r0] = tci_compare64(T1, T2, condition);
571 #elif TCG_TARGET_REG_BITS == 64
572 case INDEX_op_setcond_i64:
573 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
574 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
576 case INDEX_op_movcond_i64:
577 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
578 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
579 regs[r0] = regs[tmp32 ? r3 : r4];
583 tci_args_rr(insn, &r0, &r1);
586 case INDEX_op_tci_movi:
587 tci_args_ri(insn, &r0, &t1);
590 case INDEX_op_tci_movl:
591 tci_args_rl(insn, tb_ptr, &r0, &ptr);
592 regs[r0] = *(tcg_target_ulong *)ptr;
595 /* Load/store operations (32 bit). */
598 tci_args_rrs(insn, &r0, &r1, &ofs);
599 ptr = (void *)(regs[r1] + ofs);
600 regs[r0] = *(uint8_t *)ptr;
603 tci_args_rrs(insn, &r0, &r1, &ofs);
604 ptr = (void *)(regs[r1] + ofs);
605 regs[r0] = *(int8_t *)ptr;
608 tci_args_rrs(insn, &r0, &r1, &ofs);
609 ptr = (void *)(regs[r1] + ofs);
610 regs[r0] = *(uint16_t *)ptr;
613 tci_args_rrs(insn, &r0, &r1, &ofs);
614 ptr = (void *)(regs[r1] + ofs);
615 regs[r0] = *(int16_t *)ptr;
617 case INDEX_op_ld_i32:
619 tci_args_rrs(insn, &r0, &r1, &ofs);
620 ptr = (void *)(regs[r1] + ofs);
621 regs[r0] = *(uint32_t *)ptr;
624 tci_args_rrs(insn, &r0, &r1, &ofs);
625 ptr = (void *)(regs[r1] + ofs);
626 *(uint8_t *)ptr = regs[r0];
629 tci_args_rrs(insn, &r0, &r1, &ofs);
630 ptr = (void *)(regs[r1] + ofs);
631 *(uint16_t *)ptr = regs[r0];
633 case INDEX_op_st_i32:
635 tci_args_rrs(insn, &r0, &r1, &ofs);
636 ptr = (void *)(regs[r1] + ofs);
637 *(uint32_t *)ptr = regs[r0];
640 /* Arithmetic operations (mixed 32/64 bit). */
643 tci_args_rrr(insn, &r0, &r1, &r2);
644 regs[r0] = regs[r1] + regs[r2];
647 tci_args_rrr(insn, &r0, &r1, &r2);
648 regs[r0] = regs[r1] - regs[r2];
651 tci_args_rrr(insn, &r0, &r1, &r2);
652 regs[r0] = regs[r1] * regs[r2];
655 tci_args_rrr(insn, &r0, &r1, &r2);
656 regs[r0] = regs[r1] & regs[r2];
659 tci_args_rrr(insn, &r0, &r1, &r2);
660 regs[r0] = regs[r1] | regs[r2];
663 tci_args_rrr(insn, &r0, &r1, &r2);
664 regs[r0] = regs[r1] ^ regs[r2];
666 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
668 tci_args_rrr(insn, &r0, &r1, &r2);
669 regs[r0] = regs[r1] & ~regs[r2];
672 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
674 tci_args_rrr(insn, &r0, &r1, &r2);
675 regs[r0] = regs[r1] | ~regs[r2];
678 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
680 tci_args_rrr(insn, &r0, &r1, &r2);
681 regs[r0] = ~(regs[r1] ^ regs[r2]);
684 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
686 tci_args_rrr(insn, &r0, &r1, &r2);
687 regs[r0] = ~(regs[r1] & regs[r2]);
690 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
692 tci_args_rrr(insn, &r0, &r1, &r2);
693 regs[r0] = ~(regs[r1] | regs[r2]);
697 /* Arithmetic operations (32 bit). */
699 case INDEX_op_div_i32:
700 tci_args_rrr(insn, &r0, &r1, &r2);
701 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
703 case INDEX_op_divu_i32:
704 tci_args_rrr(insn, &r0, &r1, &r2);
705 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
707 case INDEX_op_rem_i32:
708 tci_args_rrr(insn, &r0, &r1, &r2);
709 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
711 case INDEX_op_remu_i32:
712 tci_args_rrr(insn, &r0, &r1, &r2);
713 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
715 #if TCG_TARGET_HAS_clz_i32
716 case INDEX_op_clz_i32:
717 tci_args_rrr(insn, &r0, &r1, &r2);
719 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
722 #if TCG_TARGET_HAS_ctz_i32
723 case INDEX_op_ctz_i32:
724 tci_args_rrr(insn, &r0, &r1, &r2);
726 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
729 #if TCG_TARGET_HAS_ctpop_i32
730 case INDEX_op_ctpop_i32:
731 tci_args_rr(insn, &r0, &r1);
732 regs[r0] = ctpop32(regs[r1]);
736 /* Shift/rotate operations (32 bit). */
738 case INDEX_op_shl_i32:
739 tci_args_rrr(insn, &r0, &r1, &r2);
740 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
742 case INDEX_op_shr_i32:
743 tci_args_rrr(insn, &r0, &r1, &r2);
744 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
746 case INDEX_op_sar_i32:
747 tci_args_rrr(insn, &r0, &r1, &r2);
748 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
750 #if TCG_TARGET_HAS_rot_i32
751 case INDEX_op_rotl_i32:
752 tci_args_rrr(insn, &r0, &r1, &r2);
753 regs[r0] = rol32(regs[r1], regs[r2] & 31);
755 case INDEX_op_rotr_i32:
756 tci_args_rrr(insn, &r0, &r1, &r2);
757 regs[r0] = ror32(regs[r1], regs[r2] & 31);
760 #if TCG_TARGET_HAS_deposit_i32
761 case INDEX_op_deposit_i32:
762 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
763 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
766 #if TCG_TARGET_HAS_extract_i32
767 case INDEX_op_extract_i32:
768 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
769 regs[r0] = extract32(regs[r1], pos, len);
772 #if TCG_TARGET_HAS_sextract_i32
773 case INDEX_op_sextract_i32:
774 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
775 regs[r0] = sextract32(regs[r1], pos, len);
778 case INDEX_op_brcond_i32:
779 tci_args_rl(insn, tb_ptr, &r0, &ptr);
780 if ((uint32_t)regs[r0]) {
784 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
785 case INDEX_op_add2_i32:
786 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
787 T1 = tci_uint64(regs[r3], regs[r2]);
788 T2 = tci_uint64(regs[r5], regs[r4]);
789 tci_write_reg64(regs, r1, r0, T1 + T2);
792 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
793 case INDEX_op_sub2_i32:
794 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
795 T1 = tci_uint64(regs[r3], regs[r2]);
796 T2 = tci_uint64(regs[r5], regs[r4]);
797 tci_write_reg64(regs, r1, r0, T1 - T2);
800 #if TCG_TARGET_HAS_mulu2_i32
801 case INDEX_op_mulu2_i32:
802 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
803 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
804 tci_write_reg64(regs, r1, r0, tmp64);
807 #if TCG_TARGET_HAS_muls2_i32
808 case INDEX_op_muls2_i32:
809 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
810 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
811 tci_write_reg64(regs, r1, r0, tmp64);
814 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
816 tci_args_rr(insn, &r0, &r1);
817 regs[r0] = (int8_t)regs[r1];
820 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
821 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
823 tci_args_rr(insn, &r0, &r1);
824 regs[r0] = (int16_t)regs[r1];
827 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
829 tci_args_rr(insn, &r0, &r1);
830 regs[r0] = (uint8_t)regs[r1];
833 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
835 tci_args_rr(insn, &r0, &r1);
836 regs[r0] = (uint16_t)regs[r1];
839 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
841 tci_args_rr(insn, &r0, &r1);
842 regs[r0] = bswap16(regs[r1]);
845 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
847 tci_args_rr(insn, &r0, &r1);
848 regs[r0] = bswap32(regs[r1]);
851 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
853 tci_args_rr(insn, &r0, &r1);
854 regs[r0] = ~regs[r1];
857 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
859 tci_args_rr(insn, &r0, &r1);
860 regs[r0] = -regs[r1];
863 #if TCG_TARGET_REG_BITS == 64
864 /* Load/store operations (64 bit). */
866 case INDEX_op_ld32s_i64:
867 tci_args_rrs(insn, &r0, &r1, &ofs);
868 ptr = (void *)(regs[r1] + ofs);
869 regs[r0] = *(int32_t *)ptr;
871 case INDEX_op_ld_i64:
872 tci_args_rrs(insn, &r0, &r1, &ofs);
873 ptr = (void *)(regs[r1] + ofs);
874 regs[r0] = *(uint64_t *)ptr;
876 case INDEX_op_st_i64:
877 tci_args_rrs(insn, &r0, &r1, &ofs);
878 ptr = (void *)(regs[r1] + ofs);
879 *(uint64_t *)ptr = regs[r0];
882 /* Arithmetic operations (64 bit). */
884 case INDEX_op_div_i64:
885 tci_args_rrr(insn, &r0, &r1, &r2);
886 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
888 case INDEX_op_divu_i64:
889 tci_args_rrr(insn, &r0, &r1, &r2);
890 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
892 case INDEX_op_rem_i64:
893 tci_args_rrr(insn, &r0, &r1, &r2);
894 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
896 case INDEX_op_remu_i64:
897 tci_args_rrr(insn, &r0, &r1, &r2);
898 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
900 #if TCG_TARGET_HAS_clz_i64
901 case INDEX_op_clz_i64:
902 tci_args_rrr(insn, &r0, &r1, &r2);
903 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
906 #if TCG_TARGET_HAS_ctz_i64
907 case INDEX_op_ctz_i64:
908 tci_args_rrr(insn, &r0, &r1, &r2);
909 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
912 #if TCG_TARGET_HAS_ctpop_i64
913 case INDEX_op_ctpop_i64:
914 tci_args_rr(insn, &r0, &r1);
915 regs[r0] = ctpop64(regs[r1]);
918 #if TCG_TARGET_HAS_mulu2_i64
919 case INDEX_op_mulu2_i64:
920 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
921 mulu64(®s[r0], ®s[r1], regs[r2], regs[r3]);
924 #if TCG_TARGET_HAS_muls2_i64
925 case INDEX_op_muls2_i64:
926 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
927 muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
930 #if TCG_TARGET_HAS_add2_i64
931 case INDEX_op_add2_i64:
932 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
933 T1 = regs[r2] + regs[r4];
934 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
939 #if TCG_TARGET_HAS_add2_i64
940 case INDEX_op_sub2_i64:
941 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
942 T1 = regs[r2] - regs[r4];
943 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
949 /* Shift/rotate operations (64 bit). */
951 case INDEX_op_shl_i64:
952 tci_args_rrr(insn, &r0, &r1, &r2);
953 regs[r0] = regs[r1] << (regs[r2] & 63);
955 case INDEX_op_shr_i64:
956 tci_args_rrr(insn, &r0, &r1, &r2);
957 regs[r0] = regs[r1] >> (regs[r2] & 63);
959 case INDEX_op_sar_i64:
960 tci_args_rrr(insn, &r0, &r1, &r2);
961 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
963 #if TCG_TARGET_HAS_rot_i64
964 case INDEX_op_rotl_i64:
965 tci_args_rrr(insn, &r0, &r1, &r2);
966 regs[r0] = rol64(regs[r1], regs[r2] & 63);
968 case INDEX_op_rotr_i64:
969 tci_args_rrr(insn, &r0, &r1, &r2);
970 regs[r0] = ror64(regs[r1], regs[r2] & 63);
973 #if TCG_TARGET_HAS_deposit_i64
974 case INDEX_op_deposit_i64:
975 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
976 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
979 #if TCG_TARGET_HAS_extract_i64
980 case INDEX_op_extract_i64:
981 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
982 regs[r0] = extract64(regs[r1], pos, len);
985 #if TCG_TARGET_HAS_sextract_i64
986 case INDEX_op_sextract_i64:
987 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
988 regs[r0] = sextract64(regs[r1], pos, len);
991 case INDEX_op_brcond_i64:
992 tci_args_rl(insn, tb_ptr, &r0, &ptr);
997 case INDEX_op_ext32s_i64:
998 case INDEX_op_ext_i32_i64:
999 tci_args_rr(insn, &r0, &r1);
1000 regs[r0] = (int32_t)regs[r1];
1002 case INDEX_op_ext32u_i64:
1003 case INDEX_op_extu_i32_i64:
1004 tci_args_rr(insn, &r0, &r1);
1005 regs[r0] = (uint32_t)regs[r1];
1007 #if TCG_TARGET_HAS_bswap64_i64
1008 case INDEX_op_bswap64_i64:
1009 tci_args_rr(insn, &r0, &r1);
1010 regs[r0] = bswap64(regs[r1]);
1013 #endif /* TCG_TARGET_REG_BITS == 64 */
1015 /* QEMU specific operations. */
1017 case INDEX_op_exit_tb:
1018 tci_args_l(insn, tb_ptr, &ptr);
1019 return (uintptr_t)ptr;
1021 case INDEX_op_goto_tb:
1022 tci_args_l(insn, tb_ptr, &ptr);
1023 tb_ptr = *(void **)ptr;
1026 case INDEX_op_goto_ptr:
1027 tci_args_r(insn, &r0);
1028 ptr = (void *)regs[r0];
1035 case INDEX_op_qemu_ld_i32:
1036 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1037 tci_args_rrm(insn, &r0, &r1, &oi);
1040 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1041 taddr = tci_uint64(regs[r2], regs[r1]);
1043 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1047 case INDEX_op_qemu_ld_i64:
1048 if (TCG_TARGET_REG_BITS == 64) {
1049 tci_args_rrm(insn, &r0, &r1, &oi);
1051 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1052 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1055 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1056 taddr = tci_uint64(regs[r3], regs[r2]);
1059 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1060 if (TCG_TARGET_REG_BITS == 32) {
1061 tci_write_reg64(regs, r1, r0, tmp64);
1067 case INDEX_op_qemu_st_i32:
1068 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1069 tci_args_rrm(insn, &r0, &r1, &oi);
1072 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1073 taddr = tci_uint64(regs[r2], regs[r1]);
1076 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
1079 case INDEX_op_qemu_st_i64:
1080 if (TCG_TARGET_REG_BITS == 64) {
1081 tci_args_rrm(insn, &r0, &r1, &oi);
1085 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1086 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1089 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1090 taddr = tci_uint64(regs[r3], regs[r2]);
1093 tmp64 = tci_uint64(regs[r1], regs[r0]);
1095 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1099 /* Ensure ordering for all kinds */
1103 g_assert_not_reached();
1109 * Disassembler that matches the interpreter
1112 static const char *str_r(TCGReg r)
1114 static const char regs[TCG_TARGET_NB_REGS][4] = {
1115 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1116 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1119 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1120 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1122 assert((unsigned)r < TCG_TARGET_NB_REGS);
1126 static const char *str_c(TCGCond c)
1128 static const char cond[16][8] = {
1129 [TCG_COND_NEVER] = "never",
1130 [TCG_COND_ALWAYS] = "always",
1131 [TCG_COND_EQ] = "eq",
1132 [TCG_COND_NE] = "ne",
1133 [TCG_COND_LT] = "lt",
1134 [TCG_COND_GE] = "ge",
1135 [TCG_COND_LE] = "le",
1136 [TCG_COND_GT] = "gt",
1137 [TCG_COND_LTU] = "ltu",
1138 [TCG_COND_GEU] = "geu",
1139 [TCG_COND_LEU] = "leu",
1140 [TCG_COND_GTU] = "gtu",
1143 assert((unsigned)c < ARRAY_SIZE(cond));
1144 assert(cond[c][0] != 0);
1148 /* Disassemble TCI bytecode. */
1149 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1151 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1152 const TCGOpDef *def;
1153 const char *op_name;
1156 TCGReg r0, r1, r2, r3, r4, r5;
1157 tcg_target_ulong i1;
1164 /* TCI is always the host, so we don't need to load indirect. */
1167 info->fprintf_func(info->stream, "%08x ", insn);
1169 op = extract32(insn, 0, 8);
1170 def = &tcg_op_defs[op];
1171 op_name = def->name;
1175 case INDEX_op_exit_tb:
1176 case INDEX_op_goto_tb:
1177 tci_args_l(insn, tb_ptr, &ptr);
1178 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1181 case INDEX_op_goto_ptr:
1182 tci_args_r(insn, &r0);
1183 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1187 tci_args_nl(insn, tb_ptr, &len, &ptr);
1188 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1191 case INDEX_op_brcond_i32:
1192 case INDEX_op_brcond_i64:
1193 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1194 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1195 op_name, str_r(r0), ptr);
1198 case INDEX_op_setcond_i32:
1199 case INDEX_op_setcond_i64:
1200 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1201 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1202 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1205 case INDEX_op_tci_movi:
1206 tci_args_ri(insn, &r0, &i1);
1207 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1208 op_name, str_r(r0), i1);
1211 case INDEX_op_tci_movl:
1212 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1213 info->fprintf_func(info->stream, "%-12s %s, %p",
1214 op_name, str_r(r0), ptr);
1217 case INDEX_op_ld8u_i32:
1218 case INDEX_op_ld8u_i64:
1219 case INDEX_op_ld8s_i32:
1220 case INDEX_op_ld8s_i64:
1221 case INDEX_op_ld16u_i32:
1222 case INDEX_op_ld16u_i64:
1223 case INDEX_op_ld16s_i32:
1224 case INDEX_op_ld16s_i64:
1225 case INDEX_op_ld32u_i64:
1226 case INDEX_op_ld32s_i64:
1227 case INDEX_op_ld_i32:
1228 case INDEX_op_ld_i64:
1229 case INDEX_op_st8_i32:
1230 case INDEX_op_st8_i64:
1231 case INDEX_op_st16_i32:
1232 case INDEX_op_st16_i64:
1233 case INDEX_op_st32_i64:
1234 case INDEX_op_st_i32:
1235 case INDEX_op_st_i64:
1236 tci_args_rrs(insn, &r0, &r1, &s2);
1237 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1238 op_name, str_r(r0), str_r(r1), s2);
1241 case INDEX_op_mov_i32:
1242 case INDEX_op_mov_i64:
1243 case INDEX_op_ext8s_i32:
1244 case INDEX_op_ext8s_i64:
1245 case INDEX_op_ext8u_i32:
1246 case INDEX_op_ext8u_i64:
1247 case INDEX_op_ext16s_i32:
1248 case INDEX_op_ext16s_i64:
1249 case INDEX_op_ext16u_i32:
1250 case INDEX_op_ext32s_i64:
1251 case INDEX_op_ext32u_i64:
1252 case INDEX_op_ext_i32_i64:
1253 case INDEX_op_extu_i32_i64:
1254 case INDEX_op_bswap16_i32:
1255 case INDEX_op_bswap16_i64:
1256 case INDEX_op_bswap32_i32:
1257 case INDEX_op_bswap32_i64:
1258 case INDEX_op_bswap64_i64:
1259 case INDEX_op_not_i32:
1260 case INDEX_op_not_i64:
1261 case INDEX_op_neg_i32:
1262 case INDEX_op_neg_i64:
1263 case INDEX_op_ctpop_i32:
1264 case INDEX_op_ctpop_i64:
1265 tci_args_rr(insn, &r0, &r1);
1266 info->fprintf_func(info->stream, "%-12s %s, %s",
1267 op_name, str_r(r0), str_r(r1));
1270 case INDEX_op_add_i32:
1271 case INDEX_op_add_i64:
1272 case INDEX_op_sub_i32:
1273 case INDEX_op_sub_i64:
1274 case INDEX_op_mul_i32:
1275 case INDEX_op_mul_i64:
1276 case INDEX_op_and_i32:
1277 case INDEX_op_and_i64:
1278 case INDEX_op_or_i32:
1279 case INDEX_op_or_i64:
1280 case INDEX_op_xor_i32:
1281 case INDEX_op_xor_i64:
1282 case INDEX_op_andc_i32:
1283 case INDEX_op_andc_i64:
1284 case INDEX_op_orc_i32:
1285 case INDEX_op_orc_i64:
1286 case INDEX_op_eqv_i32:
1287 case INDEX_op_eqv_i64:
1288 case INDEX_op_nand_i32:
1289 case INDEX_op_nand_i64:
1290 case INDEX_op_nor_i32:
1291 case INDEX_op_nor_i64:
1292 case INDEX_op_div_i32:
1293 case INDEX_op_div_i64:
1294 case INDEX_op_rem_i32:
1295 case INDEX_op_rem_i64:
1296 case INDEX_op_divu_i32:
1297 case INDEX_op_divu_i64:
1298 case INDEX_op_remu_i32:
1299 case INDEX_op_remu_i64:
1300 case INDEX_op_shl_i32:
1301 case INDEX_op_shl_i64:
1302 case INDEX_op_shr_i32:
1303 case INDEX_op_shr_i64:
1304 case INDEX_op_sar_i32:
1305 case INDEX_op_sar_i64:
1306 case INDEX_op_rotl_i32:
1307 case INDEX_op_rotl_i64:
1308 case INDEX_op_rotr_i32:
1309 case INDEX_op_rotr_i64:
1310 case INDEX_op_clz_i32:
1311 case INDEX_op_clz_i64:
1312 case INDEX_op_ctz_i32:
1313 case INDEX_op_ctz_i64:
1314 tci_args_rrr(insn, &r0, &r1, &r2);
1315 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1316 op_name, str_r(r0), str_r(r1), str_r(r2));
1319 case INDEX_op_deposit_i32:
1320 case INDEX_op_deposit_i64:
1321 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1322 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1323 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1326 case INDEX_op_extract_i32:
1327 case INDEX_op_extract_i64:
1328 case INDEX_op_sextract_i32:
1329 case INDEX_op_sextract_i64:
1330 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1331 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1332 op_name, str_r(r0), str_r(r1), pos, len);
1335 case INDEX_op_movcond_i32:
1336 case INDEX_op_movcond_i64:
1337 case INDEX_op_setcond2_i32:
1338 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1339 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1340 op_name, str_r(r0), str_r(r1), str_r(r2),
1341 str_r(r3), str_r(r4), str_c(c));
1344 case INDEX_op_mulu2_i32:
1345 case INDEX_op_mulu2_i64:
1346 case INDEX_op_muls2_i32:
1347 case INDEX_op_muls2_i64:
1348 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1349 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1350 op_name, str_r(r0), str_r(r1),
1351 str_r(r2), str_r(r3));
1354 case INDEX_op_add2_i32:
1355 case INDEX_op_add2_i64:
1356 case INDEX_op_sub2_i32:
1357 case INDEX_op_sub2_i64:
1358 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1359 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1360 op_name, str_r(r0), str_r(r1), str_r(r2),
1361 str_r(r3), str_r(r4), str_r(r5));
1364 case INDEX_op_qemu_ld_i64:
1365 case INDEX_op_qemu_st_i64:
1366 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1368 case INDEX_op_qemu_ld_i32:
1369 case INDEX_op_qemu_st_i32:
1372 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1375 tci_args_rrm(insn, &r0, &r1, &oi);
1376 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1377 op_name, str_r(r0), str_r(r1), oi);
1380 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1381 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1382 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1385 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1386 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1387 op_name, str_r(r0), str_r(r1),
1388 str_r(r2), str_r(r3), str_r(r4));
1391 g_assert_not_reached();
1396 /* tcg_out_nop_fill uses zeros */
1398 info->fprintf_func(info->stream, "align");
1404 info->fprintf_func(info->stream, "illegal opcode %d", op);
1408 return sizeof(insn);