2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu-common.h"
30 #include "exec/tb-context.h"
31 #include "qemu/bitops.h"
32 #include "tcg-target.h"
34 /* XXX: make safe guess about sizes */
35 #define MAX_OP_PER_INSTR 266
37 #if HOST_LONG_BITS == 32
38 #define MAX_OPC_PARAM_PER_ARG 2
40 #define MAX_OPC_PARAM_PER_ARG 1
42 #define MAX_OPC_PARAM_IARGS 5
43 #define MAX_OPC_PARAM_OARGS 1
44 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
46 /* A Call op needs up to 4 + 2N parameters on 32-bit archs,
47 * and up to 4 + N parameters on 64-bit archs
48 * (N = number of input arguments + output arguments). */
49 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
50 #define OPC_BUF_SIZE 640
51 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
53 #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
55 #define CPU_TEMP_BUF_NLONGS 128
57 /* Default target word size to pointer size. */
58 #ifndef TCG_TARGET_REG_BITS
59 # if UINTPTR_MAX == UINT32_MAX
60 # define TCG_TARGET_REG_BITS 32
61 # elif UINTPTR_MAX == UINT64_MAX
62 # define TCG_TARGET_REG_BITS 64
64 # error Unknown pointer size for tcg target
68 #if TCG_TARGET_REG_BITS == 32
69 typedef int32_t tcg_target_long;
70 typedef uint32_t tcg_target_ulong;
71 #define TCG_PRIlx PRIx32
72 #define TCG_PRIld PRId32
73 #elif TCG_TARGET_REG_BITS == 64
74 typedef int64_t tcg_target_long;
75 typedef uint64_t tcg_target_ulong;
76 #define TCG_PRIlx PRIx64
77 #define TCG_PRIld PRId64
82 #if TCG_TARGET_NB_REGS <= 32
83 typedef uint32_t TCGRegSet;
84 #elif TCG_TARGET_NB_REGS <= 64
85 typedef uint64_t TCGRegSet;
90 #if TCG_TARGET_REG_BITS == 32
91 /* Turn some undef macros into false macros. */
92 #define TCG_TARGET_HAS_extrl_i64_i32 0
93 #define TCG_TARGET_HAS_extrh_i64_i32 0
94 #define TCG_TARGET_HAS_div_i64 0
95 #define TCG_TARGET_HAS_rem_i64 0
96 #define TCG_TARGET_HAS_div2_i64 0
97 #define TCG_TARGET_HAS_rot_i64 0
98 #define TCG_TARGET_HAS_ext8s_i64 0
99 #define TCG_TARGET_HAS_ext16s_i64 0
100 #define TCG_TARGET_HAS_ext32s_i64 0
101 #define TCG_TARGET_HAS_ext8u_i64 0
102 #define TCG_TARGET_HAS_ext16u_i64 0
103 #define TCG_TARGET_HAS_ext32u_i64 0
104 #define TCG_TARGET_HAS_bswap16_i64 0
105 #define TCG_TARGET_HAS_bswap32_i64 0
106 #define TCG_TARGET_HAS_bswap64_i64 0
107 #define TCG_TARGET_HAS_neg_i64 0
108 #define TCG_TARGET_HAS_not_i64 0
109 #define TCG_TARGET_HAS_andc_i64 0
110 #define TCG_TARGET_HAS_orc_i64 0
111 #define TCG_TARGET_HAS_eqv_i64 0
112 #define TCG_TARGET_HAS_nand_i64 0
113 #define TCG_TARGET_HAS_nor_i64 0
114 #define TCG_TARGET_HAS_deposit_i64 0
115 #define TCG_TARGET_HAS_movcond_i64 0
116 #define TCG_TARGET_HAS_add2_i64 0
117 #define TCG_TARGET_HAS_sub2_i64 0
118 #define TCG_TARGET_HAS_mulu2_i64 0
119 #define TCG_TARGET_HAS_muls2_i64 0
120 #define TCG_TARGET_HAS_muluh_i64 0
121 #define TCG_TARGET_HAS_mulsh_i64 0
122 /* Turn some undef macros into true macros. */
123 #define TCG_TARGET_HAS_add2_i32 1
124 #define TCG_TARGET_HAS_sub2_i32 1
127 #ifndef TCG_TARGET_deposit_i32_valid
128 #define TCG_TARGET_deposit_i32_valid(ofs, len) 1
130 #ifndef TCG_TARGET_deposit_i64_valid
131 #define TCG_TARGET_deposit_i64_valid(ofs, len) 1
134 /* Only one of DIV or DIV2 should be defined. */
135 #if defined(TCG_TARGET_HAS_div_i32)
136 #define TCG_TARGET_HAS_div2_i32 0
137 #elif defined(TCG_TARGET_HAS_div2_i32)
138 #define TCG_TARGET_HAS_div_i32 0
139 #define TCG_TARGET_HAS_rem_i32 0
141 #if defined(TCG_TARGET_HAS_div_i64)
142 #define TCG_TARGET_HAS_div2_i64 0
143 #elif defined(TCG_TARGET_HAS_div2_i64)
144 #define TCG_TARGET_HAS_div_i64 0
145 #define TCG_TARGET_HAS_rem_i64 0
148 /* For 32-bit targets, some sort of unsigned widening multiply is required. */
149 #if TCG_TARGET_REG_BITS == 32 \
150 && !(defined(TCG_TARGET_HAS_mulu2_i32) \
151 || defined(TCG_TARGET_HAS_muluh_i32))
152 # error "Missing unsigned widening multiply"
155 #ifndef TARGET_INSN_START_EXTRA_WORDS
156 # define TARGET_INSN_START_WORDS 1
158 # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
161 typedef enum TCGOpcode {
162 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
168 #define tcg_regset_clear(d) (d) = 0
169 #define tcg_regset_set(d, s) (d) = (s)
170 #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
171 #define tcg_regset_set_reg(d, r) (d) |= 1L << (r)
172 #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r))
173 #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
174 #define tcg_regset_or(d, a, b) (d) = (a) | (b)
175 #define tcg_regset_and(d, a, b) (d) = (a) & (b)
176 #define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
177 #define tcg_regset_not(d, a) (d) = ~(a)
179 #ifndef TCG_TARGET_INSN_UNIT_SIZE
180 # error "Missing TCG_TARGET_INSN_UNIT_SIZE"
181 #elif TCG_TARGET_INSN_UNIT_SIZE == 1
182 typedef uint8_t tcg_insn_unit;
183 #elif TCG_TARGET_INSN_UNIT_SIZE == 2
184 typedef uint16_t tcg_insn_unit;
185 #elif TCG_TARGET_INSN_UNIT_SIZE == 4
186 typedef uint32_t tcg_insn_unit;
187 #elif TCG_TARGET_INSN_UNIT_SIZE == 8
188 typedef uint64_t tcg_insn_unit;
190 /* The port better have done this. */
194 #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
195 # define tcg_debug_assert(X) do { assert(X); } while (0)
196 #elif QEMU_GNUC_PREREQ(4, 5)
197 # define tcg_debug_assert(X) \
198 do { if (!(X)) { __builtin_unreachable(); } } while (0)
200 # define tcg_debug_assert(X) do { (void)(X); } while (0)
203 typedef struct TCGRelocation {
204 struct TCGRelocation *next;
210 typedef struct TCGLabel {
211 unsigned has_value : 1;
215 tcg_insn_unit *value_ptr;
216 TCGRelocation *first_reloc;
220 typedef struct TCGPool {
221 struct TCGPool *next;
223 uint8_t data[0] __attribute__ ((aligned));
226 #define TCG_POOL_CHUNK_SIZE 32768
228 #define TCG_MAX_TEMPS 512
229 #define TCG_MAX_INSNS 512
231 /* when the size of the arguments of a called function is smaller than
232 this value, they are statically allocated in the TB stack frame */
233 #define TCG_STATIC_CALL_ARGS_SIZE 128
235 typedef enum TCGType {
238 TCG_TYPE_COUNT, /* number of different types */
240 /* An alias for the size of the host register. */
241 #if TCG_TARGET_REG_BITS == 32
242 TCG_TYPE_REG = TCG_TYPE_I32,
244 TCG_TYPE_REG = TCG_TYPE_I64,
247 /* An alias for the size of the native pointer. */
248 #if UINTPTR_MAX == UINT32_MAX
249 TCG_TYPE_PTR = TCG_TYPE_I32,
251 TCG_TYPE_PTR = TCG_TYPE_I64,
254 /* An alias for the size of the target "long", aka register. */
255 #if TARGET_LONG_BITS == 64
256 TCG_TYPE_TL = TCG_TYPE_I64,
258 TCG_TYPE_TL = TCG_TYPE_I32,
262 /* Constants for qemu_ld and qemu_st for the Memory Operation field. */
263 typedef enum TCGMemOp {
268 MO_SIZE = 3, /* Mask for the above. */
270 MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
272 MO_BSWAP = 8, /* Host reverse endian. */
273 #ifdef HOST_WORDS_BIGENDIAN
280 #ifdef TARGET_WORDS_BIGENDIAN
286 /* MO_UNALN accesses are never checked for alignment.
287 * MO_ALIGN accesses will result in a call to the CPU's
288 * do_unaligned_access hook if the guest address is not aligned.
289 * The default depends on whether the target CPU defines ALIGNED_ONLY.
290 * Some architectures (e.g. ARMv8) need the address which is aligned
291 * to a size more than the size of the memory access.
292 * To support such check it's enough the current costless alignment
293 * check implementation in QEMU, but we need to support
294 * an alignment size specifying.
295 * MO_ALIGN supposes a natural alignment
296 * (i.e. the alignment size is the size of a memory access).
297 * Note that an alignment size must be equal or greater
298 * than an access size.
299 * There are three options:
300 * - an alignment to the size of an access (MO_ALIGN);
301 * - an alignment to the specified size that is equal or greater than
302 * an access size (MO_ALIGN_x where 'x' is a size in bytes);
303 * - unaligned access permitted (MO_UNALN).
306 MO_AMASK = 7 << MO_ASHIFT,
314 MO_ALIGN_2 = 1 << MO_ASHIFT,
315 MO_ALIGN_4 = 2 << MO_ASHIFT,
316 MO_ALIGN_8 = 3 << MO_ASHIFT,
317 MO_ALIGN_16 = 4 << MO_ASHIFT,
318 MO_ALIGN_32 = 5 << MO_ASHIFT,
319 MO_ALIGN_64 = 6 << MO_ASHIFT,
321 /* Combinations of the above, for ease of use. */
325 MO_SB = MO_SIGN | MO_8,
326 MO_SW = MO_SIGN | MO_16,
327 MO_SL = MO_SIGN | MO_32,
330 MO_LEUW = MO_LE | MO_UW,
331 MO_LEUL = MO_LE | MO_UL,
332 MO_LESW = MO_LE | MO_SW,
333 MO_LESL = MO_LE | MO_SL,
334 MO_LEQ = MO_LE | MO_Q,
336 MO_BEUW = MO_BE | MO_UW,
337 MO_BEUL = MO_BE | MO_UL,
338 MO_BESW = MO_BE | MO_SW,
339 MO_BESL = MO_BE | MO_SL,
340 MO_BEQ = MO_BE | MO_Q,
342 MO_TEUW = MO_TE | MO_UW,
343 MO_TEUL = MO_TE | MO_UL,
344 MO_TESW = MO_TE | MO_SW,
345 MO_TESL = MO_TE | MO_SL,
346 MO_TEQ = MO_TE | MO_Q,
348 MO_SSIZE = MO_SIZE | MO_SIGN,
353 * @memop: TCGMemOp value
355 * Extract the alignment size from the memop.
357 * Returns: 0 in case of byte access (which is always aligned);
358 * positive value - number of alignment bits;
359 * negative value if unaligned access enabled
360 * and this is not a byte access.
362 static inline int get_alignment_bits(TCGMemOp memop)
364 int a = memop & MO_AMASK;
365 int s = memop & MO_SIZE;
369 /* Negative value if unaligned access enabled,
370 * or zero value in case of byte access.
373 } else if (a == MO_ALIGN) {
374 /* A natural alignment: return a number of access size bits */
377 /* Specific alignment size. It must be equal or greater
378 * than the access size.
381 tcg_debug_assert(r >= s);
383 #if defined(CONFIG_SOFTMMU)
384 /* The requested alignment cannot overlap the TLB flags. */
385 tcg_debug_assert((TLB_FLAGS_MASK & ((1 << r) - 1)) == 0);
390 typedef tcg_target_ulong TCGArg;
392 /* Define a type and accessor macros for variables. Using pointer types
393 is nice because it gives some level of type safely. Converting to and
394 from intptr_t rather than int reduces the number of sign-extension
395 instructions that get implied on 64-bit hosts. Users of tcg_gen_* don't
396 need to know about any of this, and should treat TCGv as an opaque type.
397 In addition we do typechecking for different types of variables. TCGv_i32
398 and TCGv_i64 are 32/64-bit variables respectively. TCGv and TCGv_ptr
399 are aliases for target_ulong and host pointer sized values respectively. */
401 typedef struct TCGv_i32_d *TCGv_i32;
402 typedef struct TCGv_i64_d *TCGv_i64;
403 typedef struct TCGv_ptr_d *TCGv_ptr;
404 typedef TCGv_ptr TCGv_env;
405 #if TARGET_LONG_BITS == 32
406 #define TCGv TCGv_i32
407 #elif TARGET_LONG_BITS == 64
408 #define TCGv TCGv_i64
410 #error Unhandled TARGET_LONG_BITS value
413 static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
418 static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
423 static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
428 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
433 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
438 static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
443 #if TCG_TARGET_REG_BITS == 32
444 #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
445 #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
448 #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
449 #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
450 #define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
452 /* Dummy definition to avoid compiler warnings. */
453 #define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
454 #define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
455 #define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
457 #define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
458 #define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
459 #define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
462 /* Helper does not read globals (either directly or through an exception). It
463 implies TCG_CALL_NO_WRITE_GLOBALS. */
464 #define TCG_CALL_NO_READ_GLOBALS 0x0010
465 /* Helper does not write globals */
466 #define TCG_CALL_NO_WRITE_GLOBALS 0x0020
467 /* Helper can be safely suppressed if the return value is not used. */
468 #define TCG_CALL_NO_SIDE_EFFECTS 0x0040
470 /* convenience version of most used call flags */
471 #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
472 #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS
473 #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS
474 #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
475 #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE)
477 /* used to align parameters */
478 #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1)
479 #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1))
481 /* Conditions. Note that these are laid out for easy manipulation by
483 bit 0 is used for inverting;
486 bit 3 is used with bit 0 for swapping signed/unsigned. */
489 TCG_COND_NEVER = 0 | 0 | 0 | 0,
490 TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
491 TCG_COND_EQ = 8 | 0 | 0 | 0,
492 TCG_COND_NE = 8 | 0 | 0 | 1,
494 TCG_COND_LT = 0 | 0 | 2 | 0,
495 TCG_COND_GE = 0 | 0 | 2 | 1,
496 TCG_COND_LE = 8 | 0 | 2 | 0,
497 TCG_COND_GT = 8 | 0 | 2 | 1,
499 TCG_COND_LTU = 0 | 4 | 0 | 0,
500 TCG_COND_GEU = 0 | 4 | 0 | 1,
501 TCG_COND_LEU = 8 | 4 | 0 | 0,
502 TCG_COND_GTU = 8 | 4 | 0 | 1,
505 /* Invert the sense of the comparison. */
506 static inline TCGCond tcg_invert_cond(TCGCond c)
508 return (TCGCond)(c ^ 1);
511 /* Swap the operands in a comparison. */
512 static inline TCGCond tcg_swap_cond(TCGCond c)
514 return c & 6 ? (TCGCond)(c ^ 9) : c;
517 /* Create an "unsigned" version of a "signed" comparison. */
518 static inline TCGCond tcg_unsigned_cond(TCGCond c)
520 return c & 2 ? (TCGCond)(c ^ 6) : c;
523 /* Must a comparison be considered unsigned? */
524 static inline bool is_unsigned_cond(TCGCond c)
529 /* Create a "high" version of a double-word comparison.
530 This removes equality from a LTE or GTE comparison. */
531 static inline TCGCond tcg_high_cond(TCGCond c)
538 return (TCGCond)(c ^ 8);
544 typedef enum TCGTempVal {
551 typedef struct TCGTemp {
553 TCGTempVal val_type:8;
556 unsigned int fixed_reg:1;
557 unsigned int indirect_reg:1;
558 unsigned int indirect_base:1;
559 unsigned int mem_coherent:1;
560 unsigned int mem_allocated:1;
561 unsigned int temp_local:1; /* If true, the temp is saved across
562 basic blocks. Otherwise, it is not
563 preserved across basic blocks. */
564 unsigned int temp_allocated:1; /* never used for code gen */
567 struct TCGTemp *mem_base;
572 typedef struct TCGContext TCGContext;
574 typedef struct TCGTempSet {
575 unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
578 /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
579 this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
580 There are never more than 2 outputs, which means that we can store all
581 dead + sync data within 16 bits. */
584 typedef uint16_t TCGLifeData;
586 /* The layout here is designed to avoid crossing of a 32-bit boundary.
587 If we do so, gcc adds padding, expanding the size to 12. */
588 typedef struct TCGOp {
589 TCGOpcode opc : 8; /* 8 */
591 /* Index of the prev/next op, or 0 for the end of the list. */
592 unsigned prev : 10; /* 18 */
593 unsigned next : 10; /* 28 */
595 /* The number of out and in parameter for a call. */
596 unsigned calli : 4; /* 32 */
597 unsigned callo : 2; /* 34 */
599 /* Index of the arguments for this op, or 0 for zero-operand ops. */
600 unsigned args : 14; /* 48 */
602 /* Lifetime data of the operands. */
603 unsigned life : 16; /* 64 */
606 /* Make sure operands fit in the bitfields above. */
607 QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
608 QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 10));
609 QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE > (1 << 14));
611 /* Make sure that we don't overflow 64 bits without noticing. */
612 QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8);
615 uint8_t *pool_cur, *pool_end;
616 TCGPool *pool_first, *pool_current, *pool_first_large;
621 /* goto_tb support */
622 tcg_insn_unit *code_buf;
623 uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
624 uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
625 uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
627 TCGRegSet reserved_regs;
628 intptr_t current_frame_offset;
629 intptr_t frame_start;
633 tcg_insn_unit *code_ptr;
637 #ifdef CONFIG_PROFILER
641 int64_t op_count; /* total insn count */
642 int op_count_max; /* max insn per TB */
645 int64_t del_op_count;
647 int64_t code_out_len;
648 int64_t search_out_len;
653 int64_t restore_count;
654 int64_t restore_time;
657 #ifdef CONFIG_DEBUG_TCG
659 int goto_tb_issue_mask;
663 int gen_next_parm_idx;
665 /* Code generation. Note that we specifically do not use tcg_insn_unit
666 here, because there's too much arithmetic throughout that relies
667 on addition and subtraction working on bytes. Rely on the GCC
668 extension that allows arithmetic on void*. */
669 int code_gen_max_blocks;
670 void *code_gen_prologue;
671 void *code_gen_buffer;
672 size_t code_gen_buffer_size;
675 /* Threshold to flush the translated code buffer. */
676 void *code_gen_highwater;
680 /* Track which vCPU triggers events */
681 CPUState *cpu; /* *_trans */
682 TCGv_env tcg_env; /* *_exec */
684 /* The TCGBackendData structure is private to tcg-target.inc.c. */
685 struct TCGBackendData *be;
687 TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
688 TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
690 /* Tells which temporary holds a given register.
691 It does not take into account fixed registers */
692 TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
694 TCGOp gen_op_buf[OPC_BUF_SIZE];
695 TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
697 uint16_t gen_insn_end_off[TCG_MAX_INSNS];
698 target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
701 extern TCGContext tcg_ctx;
703 static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
705 int op_argi = tcg_ctx.gen_op_buf[op_idx].args;
706 tcg_ctx.gen_opparam_buf[op_argi + arg] = v;
709 /* The number of opcodes emitted so far. */
710 static inline int tcg_op_buf_count(void)
712 return tcg_ctx.gen_next_op_idx;
715 /* Test for whether to terminate the TB for using too many opcodes. */
716 static inline bool tcg_op_buf_full(void)
718 return tcg_op_buf_count() >= OPC_MAX_SIZE;
721 /* pool based memory allocation */
723 void *tcg_malloc_internal(TCGContext *s, int size);
724 void tcg_pool_reset(TCGContext *s);
725 void tcg_pool_delete(TCGContext *s);
728 void tb_unlock(void);
729 void tb_lock_reset(void);
731 static inline void *tcg_malloc(int size)
733 TCGContext *s = &tcg_ctx;
734 uint8_t *ptr, *ptr_end;
735 size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1);
737 ptr_end = ptr + size;
738 if (unlikely(ptr_end > s->pool_end)) {
739 return tcg_malloc_internal(&tcg_ctx, size);
741 s->pool_cur = ptr_end;
746 void tcg_context_init(TCGContext *s);
747 void tcg_prologue_init(TCGContext *s);
748 void tcg_func_start(TCGContext *s);
750 int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
752 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
754 int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);
756 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
757 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
759 TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
760 TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
762 void tcg_temp_free_i32(TCGv_i32 arg);
763 void tcg_temp_free_i64(TCGv_i64 arg);
765 static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
768 int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
769 return MAKE_TCGV_I32(idx);
772 static inline TCGv_i32 tcg_temp_new_i32(void)
774 return tcg_temp_new_internal_i32(0);
777 static inline TCGv_i32 tcg_temp_local_new_i32(void)
779 return tcg_temp_new_internal_i32(1);
782 static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
785 int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
786 return MAKE_TCGV_I64(idx);
789 static inline TCGv_i64 tcg_temp_new_i64(void)
791 return tcg_temp_new_internal_i64(0);
794 static inline TCGv_i64 tcg_temp_local_new_i64(void)
796 return tcg_temp_new_internal_i64(1);
799 #if defined(CONFIG_DEBUG_TCG)
800 /* If you call tcg_clear_temp_count() at the start of a section of
801 * code which is not supposed to leak any TCG temporaries, then
802 * calling tcg_check_temp_count() at the end of the section will
803 * return 1 if the section did in fact leak a temporary.
805 void tcg_clear_temp_count(void);
806 int tcg_check_temp_count(void);
808 #define tcg_clear_temp_count() do { } while (0)
809 #define tcg_check_temp_count() 0
812 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
813 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
815 #define TCG_CT_ALIAS 0x80
816 #define TCG_CT_IALIAS 0x40
817 #define TCG_CT_REG 0x01
818 #define TCG_CT_CONST 0x02 /* any constant of register size */
820 typedef struct TCGArgConstraint {
828 #define TCG_MAX_OP_ARGS 16
830 /* Bits for TCGOpDef->flags, 8 bits available. */
832 /* Instruction defines the end of a basic block. */
833 TCG_OPF_BB_END = 0x01,
834 /* Instruction clobbers call registers and potentially update globals. */
835 TCG_OPF_CALL_CLOBBER = 0x02,
836 /* Instruction has side effects: it cannot be removed if its outputs
837 are not used, and might trigger exceptions. */
838 TCG_OPF_SIDE_EFFECTS = 0x04,
839 /* Instruction operands are 64-bits (otherwise 32-bits). */
840 TCG_OPF_64BIT = 0x08,
841 /* Instruction is optional and not implemented by the host, or insn
842 is generic and should not be implemened by the host. */
843 TCG_OPF_NOT_PRESENT = 0x10,
846 typedef struct TCGOpDef {
848 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
850 TCGArgConstraint *args_ct;
852 #if defined(CONFIG_DEBUG_TCG)
857 extern TCGOpDef tcg_op_defs[];
858 extern const size_t tcg_op_defs_max;
860 typedef struct TCGTargetOpDef {
862 const char *args_ct_str[TCG_MAX_OP_ARGS];
865 #define tcg_abort() \
867 fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
871 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
873 #if UINTPTR_MAX == UINT32_MAX
874 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
875 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
877 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
878 #define tcg_global_reg_new_ptr(R, N) \
879 TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
880 #define tcg_global_mem_new_ptr(R, O, N) \
881 TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
882 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
883 #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
885 #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
886 #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))
888 #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
889 #define tcg_global_reg_new_ptr(R, N) \
890 TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
891 #define tcg_global_mem_new_ptr(R, O, N) \
892 TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
893 #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
894 #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
897 void tcg_gen_callN(TCGContext *s, void *func,
898 TCGArg ret, int nargs, TCGArg *args);
900 void tcg_op_remove(TCGContext *s, TCGOp *op);
901 void tcg_optimize(TCGContext *s);
903 /* only used for debugging purposes */
904 void tcg_dump_ops(TCGContext *s);
906 void dump_ops(const uint16_t *opc_buf, const TCGArg *opparam_buf);
907 TCGv_i32 tcg_const_i32(int32_t val);
908 TCGv_i64 tcg_const_i64(int64_t val);
909 TCGv_i32 tcg_const_local_i32(int32_t val);
910 TCGv_i64 tcg_const_local_i64(int64_t val);
912 TCGLabel *gen_new_label(void);
918 * Encode a label for storage in the TCG opcode stream.
921 static inline TCGArg label_arg(TCGLabel *l)
930 * The opposite of label_arg. Retrieve a label from the
931 * encoding of the TCG opcode stream.
934 static inline TCGLabel *arg_label(TCGArg i)
936 return (TCGLabel *)(uintptr_t)i;
941 * @a, @b: addresses to be differenced
943 * There are many places within the TCG backends where we need a byte
944 * difference between two pointers. While this can be accomplished
945 * with local casting, it's easy to get wrong -- especially if one is
946 * concerned with the signedness of the result.
948 * This version relies on GCC's void pointer arithmetic to get the
952 static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
959 * @s: the tcg context
960 * @target: address of the target
962 * Produce a pc-relative difference, from the current code_ptr
963 * to the destination address.
966 static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
968 return tcg_ptr_byte_diff(target, s->code_ptr);
972 * tcg_current_code_size
973 * @s: the tcg context
975 * Compute the current code size within the translation block.
976 * This is used to fill in qemu's data structures for goto_tb.
979 static inline size_t tcg_current_code_size(TCGContext *s)
981 return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
984 /* Combine the TCGMemOp and mmu_idx parameters into a single value. */
985 typedef uint32_t TCGMemOpIdx;
989 * @op: memory operation
992 * Encode these values into a single parameter.
994 static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
996 tcg_debug_assert(idx <= 15);
997 return (op << 4) | idx;
1002 * @oi: combined op/idx parameter
1004 * Extract the memory operation from the combined value.
1006 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
1013 * @oi: combined op/idx parameter
1015 * Extract the mmu index from the combined value.
1017 static inline unsigned get_mmuidx(TCGMemOpIdx oi)
1024 * @env: pointer to CPUArchState for the CPU
1025 * @tb_ptr: address of generated code for the TB to execute
1027 * Start executing code from a given translation block.
1028 * Where translation blocks have been linked, execution
1029 * may proceed from the given TB into successive ones.
1030 * Control eventually returns only when some action is needed
1031 * from the top-level loop: either control must pass to a TB
1032 * which has not yet been directly linked, or an asynchronous
1033 * event such as an interrupt needs handling.
1035 * Return: The return value is the value passed to the corresponding
1036 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
1037 * The value is either zero or a 4-byte aligned pointer to that TB combined
1038 * with additional information in its two least significant bits. The
1039 * additional information is encoded as follows:
1040 * 0, 1: the link between this TB and the next is via the specified
1041 * TB index (0 or 1). That is, we left the TB via (the equivalent
1042 * of) "goto_tb <index>". The main loop uses this to determine
1043 * how to link the TB just executed to the next.
1044 * 2: we are using instruction counting code generation, and we
1045 * did not start executing this TB because the instruction counter
1046 * would hit zero midway through it. In this case the pointer
1047 * returned is the TB we were about to execute, and the caller must
1048 * arrange to execute the remaining count of instructions.
1049 * 3: we stopped because the CPU's exit_request flag was set
1050 * (usually meaning that there is an interrupt that needs to be
1051 * handled). The pointer returned is the TB we were about to execute
1052 * when we noticed the pending exit request.
1054 * If the bottom two bits indicate an exit-via-index then the CPU
1055 * state is correctly synchronised and ready for execution of the next
1056 * TB (and in particular the guest PC is the address to execute next).
1057 * Otherwise, we gave up on execution of this TB before it started, and
1058 * the caller must fix up the CPU state by calling the CPU's
1059 * synchronize_from_tb() method with the TB pointer we return (falling
1060 * back to calling the CPU's set_pc method with tb->pb if no
1061 * synchronize_from_tb() method exists).
1063 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
1064 * to this default (which just calls the prologue.code emitted by
1065 * tcg_target_qemu_prologue()).
1067 #define TB_EXIT_MASK 3
1068 #define TB_EXIT_IDX0 0
1069 #define TB_EXIT_IDX1 1
1070 #define TB_EXIT_ICOUNT_EXPIRED 2
1071 #define TB_EXIT_REQUESTED 3
1073 #ifdef HAVE_TCG_QEMU_TB_EXEC
1074 uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
1076 # define tcg_qemu_tb_exec(env, tb_ptr) \
1077 ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
1080 void tcg_register_jit(void *buf, size_t buf_size);
1083 * Memory helpers that will be used by TCG generated code.
1085 #ifdef CONFIG_SOFTMMU
1086 /* Value zero-extended to tcg register size. */
1087 tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1088 TCGMemOpIdx oi, uintptr_t retaddr);
1089 tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1090 TCGMemOpIdx oi, uintptr_t retaddr);
1091 tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1092 TCGMemOpIdx oi, uintptr_t retaddr);
1093 uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1094 TCGMemOpIdx oi, uintptr_t retaddr);
1095 tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1096 TCGMemOpIdx oi, uintptr_t retaddr);
1097 tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1098 TCGMemOpIdx oi, uintptr_t retaddr);
1099 uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1100 TCGMemOpIdx oi, uintptr_t retaddr);
1102 /* Value sign-extended to tcg register size. */
1103 tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1104 TCGMemOpIdx oi, uintptr_t retaddr);
1105 tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1106 TCGMemOpIdx oi, uintptr_t retaddr);
1107 tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1108 TCGMemOpIdx oi, uintptr_t retaddr);
1109 tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1110 TCGMemOpIdx oi, uintptr_t retaddr);
1111 tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1112 TCGMemOpIdx oi, uintptr_t retaddr);
1114 void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1115 TCGMemOpIdx oi, uintptr_t retaddr);
1116 void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1117 TCGMemOpIdx oi, uintptr_t retaddr);
1118 void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1119 TCGMemOpIdx oi, uintptr_t retaddr);
1120 void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1121 TCGMemOpIdx oi, uintptr_t retaddr);
1122 void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1123 TCGMemOpIdx oi, uintptr_t retaddr);
1124 void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1125 TCGMemOpIdx oi, uintptr_t retaddr);
1126 void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1127 TCGMemOpIdx oi, uintptr_t retaddr);
1129 uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1130 TCGMemOpIdx oi, uintptr_t retaddr);
1131 uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1132 TCGMemOpIdx oi, uintptr_t retaddr);
1133 uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1134 TCGMemOpIdx oi, uintptr_t retaddr);
1135 uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1136 TCGMemOpIdx oi, uintptr_t retaddr);
1137 uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1138 TCGMemOpIdx oi, uintptr_t retaddr);
1139 uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1140 TCGMemOpIdx oi, uintptr_t retaddr);
1141 uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1142 TCGMemOpIdx oi, uintptr_t retaddr);
1144 /* Temporary aliases until backends are converted. */
1145 #ifdef TARGET_WORDS_BIGENDIAN
1146 # define helper_ret_ldsw_mmu helper_be_ldsw_mmu
1147 # define helper_ret_lduw_mmu helper_be_lduw_mmu
1148 # define helper_ret_ldsl_mmu helper_be_ldsl_mmu
1149 # define helper_ret_ldul_mmu helper_be_ldul_mmu
1150 # define helper_ret_ldl_mmu helper_be_ldul_mmu
1151 # define helper_ret_ldq_mmu helper_be_ldq_mmu
1152 # define helper_ret_stw_mmu helper_be_stw_mmu
1153 # define helper_ret_stl_mmu helper_be_stl_mmu
1154 # define helper_ret_stq_mmu helper_be_stq_mmu
1155 # define helper_ret_ldw_cmmu helper_be_ldw_cmmu
1156 # define helper_ret_ldl_cmmu helper_be_ldl_cmmu
1157 # define helper_ret_ldq_cmmu helper_be_ldq_cmmu
1159 # define helper_ret_ldsw_mmu helper_le_ldsw_mmu
1160 # define helper_ret_lduw_mmu helper_le_lduw_mmu
1161 # define helper_ret_ldsl_mmu helper_le_ldsl_mmu
1162 # define helper_ret_ldul_mmu helper_le_ldul_mmu
1163 # define helper_ret_ldl_mmu helper_le_ldul_mmu
1164 # define helper_ret_ldq_mmu helper_le_ldq_mmu
1165 # define helper_ret_stw_mmu helper_le_stw_mmu
1166 # define helper_ret_stl_mmu helper_le_stl_mmu
1167 # define helper_ret_stq_mmu helper_le_stq_mmu
1168 # define helper_ret_ldw_cmmu helper_le_ldw_cmmu
1169 # define helper_ret_ldl_cmmu helper_le_ldl_cmmu
1170 # define helper_ret_ldq_cmmu helper_le_ldq_cmmu
1173 #endif /* CONFIG_SOFTMMU */