2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
31 /* Define to jump the ELF file used to communicate with GDB. */
34 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
35 /* define it to suppress various consistency checks (faster) */
39 #include "qemu-common.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/timer.h"
43 /* Note: the long term plan is to reduce the dependencies on the QEMU
44 CPU definitions. Currently they are used for qemu_ld/st
46 #define NO_CPU_IO_DEFS
51 #if UINTPTR_MAX == UINT32_MAX
52 # define ELF_CLASS ELFCLASS32
54 # define ELF_CLASS ELFCLASS64
56 #ifdef HOST_WORDS_BIGENDIAN
57 # define ELF_DATA ELFDATA2MSB
59 # define ELF_DATA ELFDATA2LSB
64 /* Forward declarations for functions declared in tcg-target.c and used here. */
65 static void tcg_target_init(TCGContext *s);
66 static void tcg_target_qemu_prologue(TCGContext *s);
67 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
68 intptr_t value, intptr_t addend);
70 /* The CIE and FDE header definitions will be common to all hosts. */
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
78 uint8_t return_column;
81 typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
86 } DebugFrameFDEHeader;
88 typedef struct QEMU_PACKED {
90 DebugFrameFDEHeader fde;
93 static void tcg_register_jit_int(void *buf, size_t size,
94 const void *debug_frame,
95 size_t debug_frame_size)
96 __attribute__((unused));
98 /* Forward declarations for functions declared and used in tcg-target.c. */
99 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
100 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
102 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
103 static void tcg_out_movi(TCGContext *s, TCGType type,
104 TCGReg ret, tcg_target_long arg);
105 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
106 const int *const_args);
107 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
109 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
110 static int tcg_target_const_match(tcg_target_long val, TCGType type,
111 const TCGArgConstraint *arg_ct);
112 static void tcg_out_tb_init(TCGContext *s);
113 static void tcg_out_tb_finalize(TCGContext *s);
117 static TCGRegSet tcg_target_available_regs[2];
118 static TCGRegSet tcg_target_call_clobber_regs;
120 #if TCG_TARGET_INSN_UNIT_SIZE == 1
121 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
126 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
133 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
134 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
136 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
139 tcg_insn_unit *p = s->code_ptr;
140 memcpy(p, &v, sizeof(v));
141 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
145 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
148 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
151 memcpy(p, &v, sizeof(v));
156 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
157 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
159 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
162 tcg_insn_unit *p = s->code_ptr;
163 memcpy(p, &v, sizeof(v));
164 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
168 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
171 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
174 memcpy(p, &v, sizeof(v));
179 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
180 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
182 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
185 tcg_insn_unit *p = s->code_ptr;
186 memcpy(p, &v, sizeof(v));
187 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
191 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
194 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
197 memcpy(p, &v, sizeof(v));
202 /* label relocation processing */
204 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
205 TCGLabel *l, intptr_t addend)
210 /* FIXME: This may break relocations on RISC targets that
211 modify instruction fields in place. The caller may not have
212 written the initial value. */
213 patch_reloc(code_ptr, type, l->u.value, addend);
215 /* add a new relocation entry */
216 r = tcg_malloc(sizeof(TCGRelocation));
220 r->next = l->u.first_reloc;
221 l->u.first_reloc = r;
225 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
227 intptr_t value = (intptr_t)ptr;
230 assert(!l->has_value);
232 for (r = l->u.first_reloc; r != NULL; r = r->next) {
233 patch_reloc(r->ptr, r->type, value, r->addend);
237 l->u.value_ptr = ptr;
240 TCGLabel *gen_new_label(void)
242 TCGContext *s = &tcg_ctx;
243 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
252 #include "tcg-target.c"
254 /* pool based memory allocation */
255 void *tcg_malloc_internal(TCGContext *s, int size)
260 if (size > TCG_POOL_CHUNK_SIZE) {
261 /* big malloc: insert a new pool (XXX: could optimize) */
262 p = g_malloc(sizeof(TCGPool) + size);
264 p->next = s->pool_first_large;
265 s->pool_first_large = p;
276 pool_size = TCG_POOL_CHUNK_SIZE;
277 p = g_malloc(sizeof(TCGPool) + pool_size);
281 s->pool_current->next = p;
290 s->pool_cur = p->data + size;
291 s->pool_end = p->data + p->size;
295 void tcg_pool_reset(TCGContext *s)
298 for (p = s->pool_first_large; p; p = t) {
302 s->pool_first_large = NULL;
303 s->pool_cur = s->pool_end = NULL;
304 s->pool_current = NULL;
307 typedef struct TCGHelperInfo {
314 #include "exec/helper-proto.h"
316 static const TCGHelperInfo all_helpers[] = {
317 #include "exec/helper-tcg.h"
320 void tcg_context_init(TCGContext *s)
322 int op, total_args, n, i;
324 TCGArgConstraint *args_ct;
326 GHashTable *helper_table;
328 memset(s, 0, sizeof(*s));
331 /* Count total number of arguments and allocate the corresponding
334 for(op = 0; op < NB_OPS; op++) {
335 def = &tcg_op_defs[op];
336 n = def->nb_iargs + def->nb_oargs;
340 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
341 sorted_args = g_malloc(sizeof(int) * total_args);
343 for(op = 0; op < NB_OPS; op++) {
344 def = &tcg_op_defs[op];
345 def->args_ct = args_ct;
346 def->sorted_args = sorted_args;
347 n = def->nb_iargs + def->nb_oargs;
352 /* Register helpers. */
353 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
354 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
356 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
357 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
358 (gpointer)&all_helpers[i]);
364 void tcg_prologue_init(TCGContext *s)
366 size_t prologue_size, total_size;
369 /* Put the prologue at the beginning of code_gen_buffer. */
370 buf0 = s->code_gen_buffer;
373 s->code_gen_prologue = buf0;
375 /* Generate the prologue. */
376 tcg_target_qemu_prologue(s);
378 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
380 /* Deduct the prologue from the buffer. */
381 prologue_size = tcg_current_code_size(s);
382 s->code_gen_ptr = buf1;
383 s->code_gen_buffer = buf1;
385 total_size = s->code_gen_buffer_size - prologue_size;
386 s->code_gen_buffer_size = total_size;
388 /* Compute a high-water mark, at which we voluntarily flush the
389 buffer and start over. */
390 s->code_gen_buffer_max_size = total_size - TCG_MAX_OP_SIZE * OPC_BUF_SIZE;
392 tcg_register_jit(s->code_gen_buffer, total_size);
395 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
396 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
397 log_disas(buf0, prologue_size);
404 void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
406 s->frame_start = start;
407 s->frame_end = start + size;
411 void tcg_func_start(TCGContext *s)
414 s->nb_temps = s->nb_globals;
416 /* No temps have been previously allocated for size or locality. */
417 memset(s->free_temps, 0, sizeof(s->free_temps));
420 s->current_frame_offset = s->frame_start;
422 #ifdef CONFIG_DEBUG_TCG
423 s->goto_tb_issue_mask = 0;
426 s->gen_first_op_idx = 0;
427 s->gen_last_op_idx = -1;
428 s->gen_next_op_idx = 0;
429 s->gen_next_parm_idx = 0;
431 s->be = tcg_malloc(sizeof(TCGBackendData));
434 static inline void tcg_temp_alloc(TCGContext *s, int n)
436 if (n > TCG_MAX_TEMPS)
440 static inline int tcg_global_reg_new_internal(TCGType type, int reg,
443 TCGContext *s = &tcg_ctx;
447 #if TCG_TARGET_REG_BITS == 32
448 if (type != TCG_TYPE_I32)
451 if (tcg_regset_test_reg(s->reserved_regs, reg))
454 tcg_temp_alloc(s, s->nb_globals + 1);
455 ts = &s->temps[s->nb_globals];
456 ts->base_type = type;
462 tcg_regset_set_reg(s->reserved_regs, reg);
466 TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
470 idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
471 return MAKE_TCGV_I32(idx);
474 TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
478 idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
479 return MAKE_TCGV_I64(idx);
482 static inline int tcg_global_mem_new_internal(TCGType type, int reg,
486 TCGContext *s = &tcg_ctx;
491 #if TCG_TARGET_REG_BITS == 32
492 if (type == TCG_TYPE_I64) {
494 tcg_temp_alloc(s, s->nb_globals + 2);
495 ts = &s->temps[s->nb_globals];
496 ts->base_type = type;
497 ts->type = TCG_TYPE_I32;
499 ts->mem_allocated = 1;
501 #ifdef HOST_WORDS_BIGENDIAN
502 ts->mem_offset = offset + 4;
504 ts->mem_offset = offset;
506 pstrcpy(buf, sizeof(buf), name);
507 pstrcat(buf, sizeof(buf), "_0");
508 ts->name = strdup(buf);
511 ts->base_type = type;
512 ts->type = TCG_TYPE_I32;
514 ts->mem_allocated = 1;
516 #ifdef HOST_WORDS_BIGENDIAN
517 ts->mem_offset = offset;
519 ts->mem_offset = offset + 4;
521 pstrcpy(buf, sizeof(buf), name);
522 pstrcat(buf, sizeof(buf), "_1");
523 ts->name = strdup(buf);
529 tcg_temp_alloc(s, s->nb_globals + 1);
530 ts = &s->temps[s->nb_globals];
531 ts->base_type = type;
534 ts->mem_allocated = 1;
536 ts->mem_offset = offset;
543 TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
545 int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
546 return MAKE_TCGV_I32(idx);
549 TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
551 int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
552 return MAKE_TCGV_I64(idx);
555 static inline int tcg_temp_new_internal(TCGType type, int temp_local)
557 TCGContext *s = &tcg_ctx;
561 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
562 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
563 if (idx < TCG_MAX_TEMPS) {
564 /* There is already an available temp with the right type. */
565 clear_bit(idx, s->free_temps[k].l);
568 ts->temp_allocated = 1;
569 assert(ts->base_type == type);
570 assert(ts->temp_local == temp_local);
573 #if TCG_TARGET_REG_BITS == 32
574 if (type == TCG_TYPE_I64) {
575 tcg_temp_alloc(s, s->nb_temps + 2);
576 ts = &s->temps[s->nb_temps];
577 ts->base_type = type;
578 ts->type = TCG_TYPE_I32;
579 ts->temp_allocated = 1;
580 ts->temp_local = temp_local;
583 ts->base_type = type;
584 ts->type = TCG_TYPE_I32;
585 ts->temp_allocated = 1;
586 ts->temp_local = temp_local;
592 tcg_temp_alloc(s, s->nb_temps + 1);
593 ts = &s->temps[s->nb_temps];
594 ts->base_type = type;
596 ts->temp_allocated = 1;
597 ts->temp_local = temp_local;
603 #if defined(CONFIG_DEBUG_TCG)
609 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
613 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
614 return MAKE_TCGV_I32(idx);
617 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
621 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
622 return MAKE_TCGV_I64(idx);
625 static void tcg_temp_free_internal(int idx)
627 TCGContext *s = &tcg_ctx;
631 #if defined(CONFIG_DEBUG_TCG)
633 if (s->temps_in_use < 0) {
634 fprintf(stderr, "More temporaries freed than allocated!\n");
638 assert(idx >= s->nb_globals && idx < s->nb_temps);
640 assert(ts->temp_allocated != 0);
641 ts->temp_allocated = 0;
643 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
644 set_bit(idx, s->free_temps[k].l);
647 void tcg_temp_free_i32(TCGv_i32 arg)
649 tcg_temp_free_internal(GET_TCGV_I32(arg));
652 void tcg_temp_free_i64(TCGv_i64 arg)
654 tcg_temp_free_internal(GET_TCGV_I64(arg));
657 TCGv_i32 tcg_const_i32(int32_t val)
660 t0 = tcg_temp_new_i32();
661 tcg_gen_movi_i32(t0, val);
665 TCGv_i64 tcg_const_i64(int64_t val)
668 t0 = tcg_temp_new_i64();
669 tcg_gen_movi_i64(t0, val);
673 TCGv_i32 tcg_const_local_i32(int32_t val)
676 t0 = tcg_temp_local_new_i32();
677 tcg_gen_movi_i32(t0, val);
681 TCGv_i64 tcg_const_local_i64(int64_t val)
684 t0 = tcg_temp_local_new_i64();
685 tcg_gen_movi_i64(t0, val);
689 #if defined(CONFIG_DEBUG_TCG)
690 void tcg_clear_temp_count(void)
692 TCGContext *s = &tcg_ctx;
696 int tcg_check_temp_count(void)
698 TCGContext *s = &tcg_ctx;
699 if (s->temps_in_use) {
700 /* Clear the count so that we don't give another
701 * warning immediately next time around.
710 /* Note: we convert the 64 bit args to 32 bit and do some alignment
711 and endian swap. Maybe it would be better to do the alignment
712 and endian swap in tcg_reg_alloc_call(). */
713 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
714 int nargs, TCGArg *args)
716 int i, real_args, nb_rets, pi, pi_first;
717 unsigned sizemask, flags;
720 info = g_hash_table_lookup(s->helpers, (gpointer)func);
722 sizemask = info->sizemask;
724 #if defined(__sparc__) && !defined(__arch64__) \
725 && !defined(CONFIG_TCG_INTERPRETER)
726 /* We have 64-bit values in one register, but need to pass as two
727 separate parameters. Split them. */
728 int orig_sizemask = sizemask;
729 int orig_nargs = nargs;
732 TCGV_UNUSED_I64(retl);
733 TCGV_UNUSED_I64(reth);
735 TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
736 for (i = real_args = 0; i < nargs; ++i) {
737 int is_64bit = sizemask & (1 << (i+1)*2);
739 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
740 TCGv_i32 h = tcg_temp_new_i32();
741 TCGv_i32 l = tcg_temp_new_i32();
742 tcg_gen_extr_i64_i32(l, h, orig);
743 split_args[real_args++] = GET_TCGV_I32(h);
744 split_args[real_args++] = GET_TCGV_I32(l);
746 split_args[real_args++] = args[i];
753 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
754 for (i = 0; i < nargs; ++i) {
755 int is_64bit = sizemask & (1 << (i+1)*2);
756 int is_signed = sizemask & (2 << (i+1)*2);
758 TCGv_i64 temp = tcg_temp_new_i64();
759 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
761 tcg_gen_ext32s_i64(temp, orig);
763 tcg_gen_ext32u_i64(temp, orig);
765 args[i] = GET_TCGV_I64(temp);
768 #endif /* TCG_TARGET_EXTEND_ARGS */
770 pi_first = pi = s->gen_next_parm_idx;
771 if (ret != TCG_CALL_DUMMY_ARG) {
772 #if defined(__sparc__) && !defined(__arch64__) \
773 && !defined(CONFIG_TCG_INTERPRETER)
774 if (orig_sizemask & 1) {
775 /* The 32-bit ABI is going to return the 64-bit value in
776 the %o0/%o1 register pair. Prepare for this by using
777 two return temporaries, and reassemble below. */
778 retl = tcg_temp_new_i64();
779 reth = tcg_temp_new_i64();
780 s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
781 s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
784 s->gen_opparam_buf[pi++] = ret;
788 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
789 #ifdef HOST_WORDS_BIGENDIAN
790 s->gen_opparam_buf[pi++] = ret + 1;
791 s->gen_opparam_buf[pi++] = ret;
793 s->gen_opparam_buf[pi++] = ret;
794 s->gen_opparam_buf[pi++] = ret + 1;
798 s->gen_opparam_buf[pi++] = ret;
806 for (i = 0; i < nargs; i++) {
807 int is_64bit = sizemask & (1 << (i+1)*2);
808 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
809 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
810 /* some targets want aligned 64 bit args */
812 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
816 /* If stack grows up, then we will be placing successive
817 arguments at lower addresses, which means we need to
818 reverse the order compared to how we would normally
819 treat either big or little-endian. For those arguments
820 that will wind up in registers, this still works for
821 HPPA (the only current STACK_GROWSUP target) since the
822 argument registers are *also* allocated in decreasing
823 order. If another such target is added, this logic may
824 have to get more complicated to differentiate between
825 stack arguments and register arguments. */
826 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
827 s->gen_opparam_buf[pi++] = args[i] + 1;
828 s->gen_opparam_buf[pi++] = args[i];
830 s->gen_opparam_buf[pi++] = args[i];
831 s->gen_opparam_buf[pi++] = args[i] + 1;
837 s->gen_opparam_buf[pi++] = args[i];
840 s->gen_opparam_buf[pi++] = (uintptr_t)func;
841 s->gen_opparam_buf[pi++] = flags;
843 i = s->gen_next_op_idx;
844 tcg_debug_assert(i < OPC_BUF_SIZE);
845 tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
847 /* Set links for sequential allocation during translation. */
848 s->gen_op_buf[i] = (TCGOp){
849 .opc = INDEX_op_call,
857 /* Make sure the calli field didn't overflow. */
858 tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
860 s->gen_last_op_idx = i;
861 s->gen_next_op_idx = i + 1;
862 s->gen_next_parm_idx = pi;
864 #if defined(__sparc__) && !defined(__arch64__) \
865 && !defined(CONFIG_TCG_INTERPRETER)
866 /* Free all of the parts we allocated above. */
867 for (i = real_args = 0; i < orig_nargs; ++i) {
868 int is_64bit = orig_sizemask & (1 << (i+1)*2);
870 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
871 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
872 tcg_temp_free_i32(h);
873 tcg_temp_free_i32(l);
878 if (orig_sizemask & 1) {
879 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
880 Note that describing these as TCGv_i64 eliminates an unnecessary
881 zero-extension that tcg_gen_concat_i32_i64 would create. */
882 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
883 tcg_temp_free_i64(retl);
884 tcg_temp_free_i64(reth);
886 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
887 for (i = 0; i < nargs; ++i) {
888 int is_64bit = sizemask & (1 << (i+1)*2);
890 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
891 tcg_temp_free_i64(temp);
894 #endif /* TCG_TARGET_EXTEND_ARGS */
897 static void tcg_reg_alloc_start(TCGContext *s)
901 for(i = 0; i < s->nb_globals; i++) {
904 ts->val_type = TEMP_VAL_REG;
906 ts->val_type = TEMP_VAL_MEM;
909 for(i = s->nb_globals; i < s->nb_temps; i++) {
911 if (ts->temp_local) {
912 ts->val_type = TEMP_VAL_MEM;
914 ts->val_type = TEMP_VAL_DEAD;
916 ts->mem_allocated = 0;
919 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
920 s->reg_to_temp[i] = -1;
924 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
929 assert(idx >= 0 && idx < s->nb_temps);
931 if (idx < s->nb_globals) {
932 pstrcpy(buf, buf_size, ts->name);
935 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
937 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
942 char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
944 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
947 char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
949 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
952 /* Find helper name. */
953 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
955 const char *ret = NULL;
957 TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
965 static const char * const cond_name[] =
967 [TCG_COND_NEVER] = "never",
968 [TCG_COND_ALWAYS] = "always",
969 [TCG_COND_EQ] = "eq",
970 [TCG_COND_NE] = "ne",
971 [TCG_COND_LT] = "lt",
972 [TCG_COND_GE] = "ge",
973 [TCG_COND_LE] = "le",
974 [TCG_COND_GT] = "gt",
975 [TCG_COND_LTU] = "ltu",
976 [TCG_COND_GEU] = "geu",
977 [TCG_COND_LEU] = "leu",
978 [TCG_COND_GTU] = "gtu"
981 static const char * const ldst_name[] =
997 void tcg_dump_ops(TCGContext *s)
1003 for (oi = s->gen_first_op_idx; oi >= 0; oi = op->next) {
1004 int i, k, nb_oargs, nb_iargs, nb_cargs;
1005 const TCGOpDef *def;
1009 op = &s->gen_op_buf[oi];
1011 def = &tcg_op_defs[c];
1012 args = &s->gen_opparam_buf[op->args];
1014 if (c == INDEX_op_insn_start) {
1015 qemu_log("%s ----", oi != s->gen_first_op_idx ? "\n" : "");
1017 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1019 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1020 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1024 qemu_log(" " TARGET_FMT_lx, a);
1026 } else if (c == INDEX_op_call) {
1027 /* variable number of arguments */
1028 nb_oargs = op->callo;
1029 nb_iargs = op->calli;
1030 nb_cargs = def->nb_cargs;
1032 /* function name, flags, out args */
1033 qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1034 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1035 args[nb_oargs + nb_iargs + 1], nb_oargs);
1036 for (i = 0; i < nb_oargs; i++) {
1037 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1040 for (i = 0; i < nb_iargs; i++) {
1041 TCGArg arg = args[nb_oargs + i];
1042 const char *t = "<dummy>";
1043 if (arg != TCG_CALL_DUMMY_ARG) {
1044 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1049 qemu_log(" %s ", def->name);
1051 nb_oargs = def->nb_oargs;
1052 nb_iargs = def->nb_iargs;
1053 nb_cargs = def->nb_cargs;
1056 for (i = 0; i < nb_oargs; i++) {
1060 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1063 for (i = 0; i < nb_iargs; i++) {
1067 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1071 case INDEX_op_brcond_i32:
1072 case INDEX_op_setcond_i32:
1073 case INDEX_op_movcond_i32:
1074 case INDEX_op_brcond2_i32:
1075 case INDEX_op_setcond2_i32:
1076 case INDEX_op_brcond_i64:
1077 case INDEX_op_setcond_i64:
1078 case INDEX_op_movcond_i64:
1079 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1080 qemu_log(",%s", cond_name[args[k++]]);
1082 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1086 case INDEX_op_qemu_ld_i32:
1087 case INDEX_op_qemu_st_i32:
1088 case INDEX_op_qemu_ld_i64:
1089 case INDEX_op_qemu_st_i64:
1091 TCGMemOpIdx oi = args[k++];
1092 TCGMemOp op = get_memop(oi);
1093 unsigned ix = get_mmuidx(oi);
1095 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1096 qemu_log(",$0x%x,%u", op, ix);
1098 const char *s_al = "", *s_op;
1099 if (op & MO_AMASK) {
1100 if ((op & MO_AMASK) == MO_ALIGN) {
1106 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1107 qemu_log(",%s%s,%u", s_al, s_op, ix);
1117 case INDEX_op_set_label:
1119 case INDEX_op_brcond_i32:
1120 case INDEX_op_brcond_i64:
1121 case INDEX_op_brcond2_i32:
1122 qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1128 for (; i < nb_cargs; i++, k++) {
1129 qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1136 /* we give more priority to constraints with less registers */
1137 static int get_constraint_priority(const TCGOpDef *def, int k)
1139 const TCGArgConstraint *arg_ct;
1142 arg_ct = &def->args_ct[k];
1143 if (arg_ct->ct & TCG_CT_ALIAS) {
1144 /* an alias is equivalent to a single register */
1147 if (!(arg_ct->ct & TCG_CT_REG))
1150 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1151 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1155 return TCG_TARGET_NB_REGS - n + 1;
1158 /* sort from highest priority to lowest */
1159 static void sort_constraints(TCGOpDef *def, int start, int n)
1161 int i, j, p1, p2, tmp;
1163 for(i = 0; i < n; i++)
1164 def->sorted_args[start + i] = start + i;
1167 for(i = 0; i < n - 1; i++) {
1168 for(j = i + 1; j < n; j++) {
1169 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1170 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1172 tmp = def->sorted_args[start + i];
1173 def->sorted_args[start + i] = def->sorted_args[start + j];
1174 def->sorted_args[start + j] = tmp;
1180 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1188 if (tdefs->op == (TCGOpcode)-1)
1191 assert((unsigned)op < NB_OPS);
1192 def = &tcg_op_defs[op];
1193 #if defined(CONFIG_DEBUG_TCG)
1194 /* Duplicate entry in op definitions? */
1198 nb_args = def->nb_iargs + def->nb_oargs;
1199 for(i = 0; i < nb_args; i++) {
1200 ct_str = tdefs->args_ct_str[i];
1201 /* Incomplete TCGTargetOpDef entry? */
1202 assert(ct_str != NULL);
1203 tcg_regset_clear(def->args_ct[i].u.regs);
1204 def->args_ct[i].ct = 0;
1205 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1207 oarg = ct_str[0] - '0';
1208 assert(oarg < def->nb_oargs);
1209 assert(def->args_ct[oarg].ct & TCG_CT_REG);
1210 /* TCG_CT_ALIAS is for the output arguments. The input
1211 argument is tagged with TCG_CT_IALIAS. */
1212 def->args_ct[i] = def->args_ct[oarg];
1213 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1214 def->args_ct[oarg].alias_index = i;
1215 def->args_ct[i].ct |= TCG_CT_IALIAS;
1216 def->args_ct[i].alias_index = oarg;
1219 if (*ct_str == '\0')
1223 def->args_ct[i].ct |= TCG_CT_CONST;
1227 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1228 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1229 ct_str, i, def->name);
1237 /* TCGTargetOpDef entry with too much information? */
1238 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1240 /* sort the constraints (XXX: this is just an heuristic) */
1241 sort_constraints(def, 0, def->nb_oargs);
1242 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1248 printf("%s: sorted=", def->name);
1249 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1250 printf(" %d", def->sorted_args[i]);
1257 #if defined(CONFIG_DEBUG_TCG)
1259 for (op = 0; op < tcg_op_defs_max; op++) {
1260 const TCGOpDef *def = &tcg_op_defs[op];
1261 if (def->flags & TCG_OPF_NOT_PRESENT) {
1262 /* Wrong entry in op definitions? */
1264 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1268 /* Missing entry in op definitions? */
1270 fprintf(stderr, "Missing op definition for %s\n", def->name);
1281 void tcg_op_remove(TCGContext *s, TCGOp *op)
1283 int next = op->next;
1284 int prev = op->prev;
1287 s->gen_op_buf[next].prev = prev;
1289 s->gen_last_op_idx = prev;
1292 s->gen_op_buf[prev].next = next;
1294 s->gen_first_op_idx = next;
1297 memset(op, -1, sizeof(*op));
1299 #ifdef CONFIG_PROFILER
1304 #ifdef USE_LIVENESS_ANALYSIS
1305 /* liveness analysis: end of function: all temps are dead, and globals
1306 should be in memory. */
1307 static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps,
1310 memset(dead_temps, 1, s->nb_temps);
1311 memset(mem_temps, 1, s->nb_globals);
1312 memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals);
1315 /* liveness analysis: end of basic block: all temps are dead, globals
1316 and local temps should be in memory. */
1317 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps,
1322 memset(dead_temps, 1, s->nb_temps);
1323 memset(mem_temps, 1, s->nb_globals);
1324 for(i = s->nb_globals; i < s->nb_temps; i++) {
1325 mem_temps[i] = s->temps[i].temp_local;
1329 /* Liveness analysis : update the opc_dead_args array to tell if a
1330 given input arguments is dead. Instructions updating dead
1331 temporaries are removed. */
1332 static void tcg_liveness_analysis(TCGContext *s)
1334 uint8_t *dead_temps, *mem_temps;
1335 int oi, oi_prev, nb_ops;
1337 nb_ops = s->gen_next_op_idx;
1338 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1339 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1341 dead_temps = tcg_malloc(s->nb_temps);
1342 mem_temps = tcg_malloc(s->nb_temps);
1343 tcg_la_func_end(s, dead_temps, mem_temps);
1345 for (oi = s->gen_last_op_idx; oi >= 0; oi = oi_prev) {
1346 int i, nb_iargs, nb_oargs;
1347 TCGOpcode opc_new, opc_new2;
1353 TCGOp * const op = &s->gen_op_buf[oi];
1354 TCGArg * const args = &s->gen_opparam_buf[op->args];
1355 TCGOpcode opc = op->opc;
1356 const TCGOpDef *def = &tcg_op_defs[opc];
1365 nb_oargs = op->callo;
1366 nb_iargs = op->calli;
1367 call_flags = args[nb_oargs + nb_iargs + 1];
1369 /* pure functions can be removed if their result is unused */
1370 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1371 for (i = 0; i < nb_oargs; i++) {
1373 if (!dead_temps[arg] || mem_temps[arg]) {
1374 goto do_not_remove_call;
1381 /* output args are dead */
1384 for (i = 0; i < nb_oargs; i++) {
1386 if (dead_temps[arg]) {
1387 dead_args |= (1 << i);
1389 if (mem_temps[arg]) {
1390 sync_args |= (1 << i);
1392 dead_temps[arg] = 1;
1396 if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1397 /* globals should be synced to memory */
1398 memset(mem_temps, 1, s->nb_globals);
1400 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1401 TCG_CALL_NO_READ_GLOBALS))) {
1402 /* globals should go back to memory */
1403 memset(dead_temps, 1, s->nb_globals);
1406 /* record arguments that die in this helper */
1407 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1409 if (arg != TCG_CALL_DUMMY_ARG) {
1410 if (dead_temps[arg]) {
1411 dead_args |= (1 << i);
1415 /* input arguments are live for preceding opcodes */
1416 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1418 dead_temps[arg] = 0;
1420 s->op_dead_args[oi] = dead_args;
1421 s->op_sync_args[oi] = sync_args;
1425 case INDEX_op_insn_start:
1427 case INDEX_op_discard:
1428 /* mark the temporary as dead */
1429 dead_temps[args[0]] = 1;
1430 mem_temps[args[0]] = 0;
1433 case INDEX_op_add2_i32:
1434 opc_new = INDEX_op_add_i32;
1436 case INDEX_op_sub2_i32:
1437 opc_new = INDEX_op_sub_i32;
1439 case INDEX_op_add2_i64:
1440 opc_new = INDEX_op_add_i64;
1442 case INDEX_op_sub2_i64:
1443 opc_new = INDEX_op_sub_i64;
1447 /* Test if the high part of the operation is dead, but not
1448 the low part. The result can be optimized to a simple
1449 add or sub. This happens often for x86_64 guest when the
1450 cpu mode is set to 32 bit. */
1451 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1452 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1455 /* Replace the opcode and adjust the args in place,
1456 leaving 3 unused args at the end. */
1457 op->opc = opc = opc_new;
1460 /* Fall through and mark the single-word operation live. */
1466 case INDEX_op_mulu2_i32:
1467 opc_new = INDEX_op_mul_i32;
1468 opc_new2 = INDEX_op_muluh_i32;
1469 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1471 case INDEX_op_muls2_i32:
1472 opc_new = INDEX_op_mul_i32;
1473 opc_new2 = INDEX_op_mulsh_i32;
1474 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1476 case INDEX_op_mulu2_i64:
1477 opc_new = INDEX_op_mul_i64;
1478 opc_new2 = INDEX_op_muluh_i64;
1479 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1481 case INDEX_op_muls2_i64:
1482 opc_new = INDEX_op_mul_i64;
1483 opc_new2 = INDEX_op_mulsh_i64;
1484 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1489 if (dead_temps[args[1]] && !mem_temps[args[1]]) {
1490 if (dead_temps[args[0]] && !mem_temps[args[0]]) {
1491 /* Both parts of the operation are dead. */
1494 /* The high part of the operation is dead; generate the low. */
1495 op->opc = opc = opc_new;
1498 } else if (have_opc_new2 && dead_temps[args[0]]
1499 && !mem_temps[args[0]]) {
1500 /* The low part of the operation is dead; generate the high. */
1501 op->opc = opc = opc_new2;
1508 /* Mark the single-word operation live. */
1513 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1514 nb_iargs = def->nb_iargs;
1515 nb_oargs = def->nb_oargs;
1517 /* Test if the operation can be removed because all
1518 its outputs are dead. We assume that nb_oargs == 0
1519 implies side effects */
1520 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1521 for (i = 0; i < nb_oargs; i++) {
1523 if (!dead_temps[arg] || mem_temps[arg]) {
1528 tcg_op_remove(s, op);
1531 /* output args are dead */
1534 for (i = 0; i < nb_oargs; i++) {
1536 if (dead_temps[arg]) {
1537 dead_args |= (1 << i);
1539 if (mem_temps[arg]) {
1540 sync_args |= (1 << i);
1542 dead_temps[arg] = 1;
1546 /* if end of basic block, update */
1547 if (def->flags & TCG_OPF_BB_END) {
1548 tcg_la_bb_end(s, dead_temps, mem_temps);
1549 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1550 /* globals should be synced to memory */
1551 memset(mem_temps, 1, s->nb_globals);
1554 /* record arguments that die in this opcode */
1555 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1557 if (dead_temps[arg]) {
1558 dead_args |= (1 << i);
1561 /* input arguments are live for preceding opcodes */
1562 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1564 dead_temps[arg] = 0;
1566 s->op_dead_args[oi] = dead_args;
1567 s->op_sync_args[oi] = sync_args;
1574 /* dummy liveness analysis */
1575 static void tcg_liveness_analysis(TCGContext *s)
1578 nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
1580 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1581 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1582 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1583 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1588 static void dump_regs(TCGContext *s)
1594 for(i = 0; i < s->nb_temps; i++) {
1596 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1597 switch(ts->val_type) {
1599 printf("%s", tcg_target_reg_names[ts->reg]);
1602 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1604 case TEMP_VAL_CONST:
1605 printf("$0x%" TCG_PRIlx, ts->val);
1617 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1618 if (s->reg_to_temp[i] >= 0) {
1620 tcg_target_reg_names[i],
1621 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1626 static void check_regs(TCGContext *s)
1632 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1633 k = s->reg_to_temp[reg];
1636 if (ts->val_type != TEMP_VAL_REG ||
1638 printf("Inconsistency for register %s:\n",
1639 tcg_target_reg_names[reg]);
1644 for(k = 0; k < s->nb_temps; k++) {
1646 if (ts->val_type == TEMP_VAL_REG &&
1648 s->reg_to_temp[ts->reg] != k) {
1649 printf("Inconsistency for temp %s:\n",
1650 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1652 printf("reg state:\n");
1660 static void temp_allocate_frame(TCGContext *s, int temp)
1663 ts = &s->temps[temp];
1664 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1665 /* Sparc64 stack is accessed with offset of 2047 */
1666 s->current_frame_offset = (s->current_frame_offset +
1667 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1668 ~(sizeof(tcg_target_long) - 1);
1670 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1674 ts->mem_offset = s->current_frame_offset;
1675 ts->mem_reg = s->frame_reg;
1676 ts->mem_allocated = 1;
1677 s->current_frame_offset += sizeof(tcg_target_long);
1680 /* sync register 'reg' by saving it to the corresponding temporary */
1681 static inline void tcg_reg_sync(TCGContext *s, int reg)
1686 temp = s->reg_to_temp[reg];
1687 ts = &s->temps[temp];
1688 assert(ts->val_type == TEMP_VAL_REG);
1689 if (!ts->mem_coherent && !ts->fixed_reg) {
1690 if (!ts->mem_allocated) {
1691 temp_allocate_frame(s, temp);
1693 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1695 ts->mem_coherent = 1;
1698 /* free register 'reg' by spilling the corresponding temporary if necessary */
1699 static void tcg_reg_free(TCGContext *s, int reg)
1703 temp = s->reg_to_temp[reg];
1705 tcg_reg_sync(s, reg);
1706 s->temps[temp].val_type = TEMP_VAL_MEM;
1707 s->reg_to_temp[reg] = -1;
1711 /* Allocate a register belonging to reg1 & ~reg2 */
1712 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1717 tcg_regset_andnot(reg_ct, reg1, reg2);
1719 /* first try free registers */
1720 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1721 reg = tcg_target_reg_alloc_order[i];
1722 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1726 /* XXX: do better spill choice */
1727 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1728 reg = tcg_target_reg_alloc_order[i];
1729 if (tcg_regset_test_reg(reg_ct, reg)) {
1730 tcg_reg_free(s, reg);
1738 /* mark a temporary as dead. */
1739 static inline void temp_dead(TCGContext *s, int temp)
1743 ts = &s->temps[temp];
1744 if (!ts->fixed_reg) {
1745 if (ts->val_type == TEMP_VAL_REG) {
1746 s->reg_to_temp[ts->reg] = -1;
1748 if (temp < s->nb_globals || ts->temp_local) {
1749 ts->val_type = TEMP_VAL_MEM;
1751 ts->val_type = TEMP_VAL_DEAD;
1756 /* sync a temporary to memory. 'allocated_regs' is used in case a
1757 temporary registers needs to be allocated to store a constant. */
1758 static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
1762 ts = &s->temps[temp];
1763 if (!ts->fixed_reg) {
1764 switch(ts->val_type) {
1765 case TEMP_VAL_CONST:
1766 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1768 ts->val_type = TEMP_VAL_REG;
1769 s->reg_to_temp[ts->reg] = temp;
1770 ts->mem_coherent = 0;
1771 tcg_out_movi(s, ts->type, ts->reg, ts->val);
1774 tcg_reg_sync(s, ts->reg);
1785 /* save a temporary to memory. 'allocated_regs' is used in case a
1786 temporary registers needs to be allocated to store a constant. */
1787 static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
1789 #ifdef USE_LIVENESS_ANALYSIS
1790 /* The liveness analysis already ensures that globals are back
1791 in memory. Keep an assert for safety. */
1792 assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
1794 temp_sync(s, temp, allocated_regs);
1799 /* save globals to their canonical location and assume they can be
1800 modified be the following code. 'allocated_regs' is used in case a
1801 temporary registers needs to be allocated to store a constant. */
1802 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1806 for(i = 0; i < s->nb_globals; i++) {
1807 temp_save(s, i, allocated_regs);
1811 /* sync globals to their canonical location and assume they can be
1812 read by the following code. 'allocated_regs' is used in case a
1813 temporary registers needs to be allocated to store a constant. */
1814 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1818 for (i = 0; i < s->nb_globals; i++) {
1819 #ifdef USE_LIVENESS_ANALYSIS
1820 assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
1821 s->temps[i].mem_coherent);
1823 temp_sync(s, i, allocated_regs);
1828 /* at the end of a basic block, we assume all temporaries are dead and
1829 all globals are stored at their canonical location. */
1830 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1835 for(i = s->nb_globals; i < s->nb_temps; i++) {
1837 if (ts->temp_local) {
1838 temp_save(s, i, allocated_regs);
1840 #ifdef USE_LIVENESS_ANALYSIS
1841 /* The liveness analysis already ensures that temps are dead.
1842 Keep an assert for safety. */
1843 assert(ts->val_type == TEMP_VAL_DEAD);
1850 save_globals(s, allocated_regs);
1853 #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1)
1854 #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1)
1856 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1857 uint16_t dead_args, uint8_t sync_args)
1860 tcg_target_ulong val;
1862 ots = &s->temps[args[0]];
1865 if (ots->fixed_reg) {
1866 /* for fixed registers, we do not do any constant
1868 tcg_out_movi(s, ots->type, ots->reg, val);
1870 /* The movi is not explicitly generated here */
1871 if (ots->val_type == TEMP_VAL_REG)
1872 s->reg_to_temp[ots->reg] = -1;
1873 ots->val_type = TEMP_VAL_CONST;
1876 if (NEED_SYNC_ARG(0)) {
1877 temp_sync(s, args[0], s->reserved_regs);
1879 if (IS_DEAD_ARG(0)) {
1880 temp_dead(s, args[0]);
1884 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1885 const TCGArg *args, uint16_t dead_args,
1888 TCGRegSet allocated_regs;
1890 TCGType otype, itype;
1892 tcg_regset_set(allocated_regs, s->reserved_regs);
1893 ots = &s->temps[args[0]];
1894 ts = &s->temps[args[1]];
1896 /* Note that otype != itype for no-op truncation. */
1900 /* If the source value is not in a register, and we're going to be
1901 forced to have it in a register in order to perform the copy,
1902 then copy the SOURCE value into its own register first. That way
1903 we don't have to reload SOURCE the next time it is used. */
1904 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1905 || ts->val_type == TEMP_VAL_MEM) {
1906 ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[itype],
1908 if (ts->val_type == TEMP_VAL_MEM) {
1909 tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
1910 ts->mem_coherent = 1;
1911 } else if (ts->val_type == TEMP_VAL_CONST) {
1912 tcg_out_movi(s, itype, ts->reg, ts->val);
1913 ts->mem_coherent = 0;
1915 s->reg_to_temp[ts->reg] = args[1];
1916 ts->val_type = TEMP_VAL_REG;
1919 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1920 /* mov to a non-saved dead register makes no sense (even with
1921 liveness analysis disabled). */
1922 assert(NEED_SYNC_ARG(0));
1923 /* The code above should have moved the temp to a register. */
1924 assert(ts->val_type == TEMP_VAL_REG);
1925 if (!ots->mem_allocated) {
1926 temp_allocate_frame(s, args[0]);
1928 tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset);
1929 if (IS_DEAD_ARG(1)) {
1930 temp_dead(s, args[1]);
1932 temp_dead(s, args[0]);
1933 } else if (ts->val_type == TEMP_VAL_CONST) {
1934 /* propagate constant */
1935 if (ots->val_type == TEMP_VAL_REG) {
1936 s->reg_to_temp[ots->reg] = -1;
1938 ots->val_type = TEMP_VAL_CONST;
1940 if (IS_DEAD_ARG(1)) {
1941 temp_dead(s, args[1]);
1944 /* The code in the first if block should have moved the
1945 temp to a register. */
1946 assert(ts->val_type == TEMP_VAL_REG);
1947 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1948 /* the mov can be suppressed */
1949 if (ots->val_type == TEMP_VAL_REG) {
1950 s->reg_to_temp[ots->reg] = -1;
1953 temp_dead(s, args[1]);
1955 if (ots->val_type != TEMP_VAL_REG) {
1956 /* When allocating a new register, make sure to not spill the
1958 tcg_regset_set_reg(allocated_regs, ts->reg);
1959 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
1962 tcg_out_mov(s, otype, ots->reg, ts->reg);
1964 ots->val_type = TEMP_VAL_REG;
1965 ots->mem_coherent = 0;
1966 s->reg_to_temp[ots->reg] = args[0];
1967 if (NEED_SYNC_ARG(0)) {
1968 tcg_reg_sync(s, ots->reg);
1973 static void tcg_reg_alloc_op(TCGContext *s,
1974 const TCGOpDef *def, TCGOpcode opc,
1975 const TCGArg *args, uint16_t dead_args,
1978 TCGRegSet allocated_regs;
1979 int i, k, nb_iargs, nb_oargs, reg;
1981 const TCGArgConstraint *arg_ct;
1983 TCGArg new_args[TCG_MAX_OP_ARGS];
1984 int const_args[TCG_MAX_OP_ARGS];
1986 nb_oargs = def->nb_oargs;
1987 nb_iargs = def->nb_iargs;
1989 /* copy constants */
1990 memcpy(new_args + nb_oargs + nb_iargs,
1991 args + nb_oargs + nb_iargs,
1992 sizeof(TCGArg) * def->nb_cargs);
1994 /* satisfy input constraints */
1995 tcg_regset_set(allocated_regs, s->reserved_regs);
1996 for(k = 0; k < nb_iargs; k++) {
1997 i = def->sorted_args[nb_oargs + k];
1999 arg_ct = &def->args_ct[i];
2000 ts = &s->temps[arg];
2001 if (ts->val_type == TEMP_VAL_MEM) {
2002 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2003 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2004 ts->val_type = TEMP_VAL_REG;
2006 ts->mem_coherent = 1;
2007 s->reg_to_temp[reg] = arg;
2008 } else if (ts->val_type == TEMP_VAL_CONST) {
2009 if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2010 /* constant is OK for instruction */
2012 new_args[i] = ts->val;
2015 /* need to move to a register */
2016 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2017 tcg_out_movi(s, ts->type, reg, ts->val);
2018 ts->val_type = TEMP_VAL_REG;
2020 ts->mem_coherent = 0;
2021 s->reg_to_temp[reg] = arg;
2024 assert(ts->val_type == TEMP_VAL_REG);
2025 if (arg_ct->ct & TCG_CT_IALIAS) {
2026 if (ts->fixed_reg) {
2027 /* if fixed register, we must allocate a new register
2028 if the alias is not the same register */
2029 if (arg != args[arg_ct->alias_index])
2030 goto allocate_in_reg;
2032 /* if the input is aliased to an output and if it is
2033 not dead after the instruction, we must allocate
2034 a new register and move it */
2035 if (!IS_DEAD_ARG(i)) {
2036 goto allocate_in_reg;
2038 /* check if the current register has already been allocated
2039 for another input aliased to an output */
2041 for (k2 = 0 ; k2 < k ; k2++) {
2042 i2 = def->sorted_args[nb_oargs + k2];
2043 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2044 (new_args[i2] == ts->reg)) {
2045 goto allocate_in_reg;
2051 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2052 /* nothing to do : the constraint is satisfied */
2055 /* allocate a new register matching the constraint
2056 and move the temporary register into it */
2057 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2058 tcg_out_mov(s, ts->type, reg, ts->reg);
2062 tcg_regset_set_reg(allocated_regs, reg);
2066 /* mark dead temporaries and free the associated registers */
2067 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2068 if (IS_DEAD_ARG(i)) {
2069 temp_dead(s, args[i]);
2073 if (def->flags & TCG_OPF_BB_END) {
2074 tcg_reg_alloc_bb_end(s, allocated_regs);
2076 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2077 /* XXX: permit generic clobber register list ? */
2078 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2079 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2080 tcg_reg_free(s, reg);
2084 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2085 /* sync globals if the op has side effects and might trigger
2087 sync_globals(s, allocated_regs);
2090 /* satisfy the output constraints */
2091 tcg_regset_set(allocated_regs, s->reserved_regs);
2092 for(k = 0; k < nb_oargs; k++) {
2093 i = def->sorted_args[k];
2095 arg_ct = &def->args_ct[i];
2096 ts = &s->temps[arg];
2097 if (arg_ct->ct & TCG_CT_ALIAS) {
2098 reg = new_args[arg_ct->alias_index];
2100 /* if fixed register, we try to use it */
2102 if (ts->fixed_reg &&
2103 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2106 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
2108 tcg_regset_set_reg(allocated_regs, reg);
2109 /* if a fixed register is used, then a move will be done afterwards */
2110 if (!ts->fixed_reg) {
2111 if (ts->val_type == TEMP_VAL_REG) {
2112 s->reg_to_temp[ts->reg] = -1;
2114 ts->val_type = TEMP_VAL_REG;
2116 /* temp value is modified, so the value kept in memory is
2117 potentially not the same */
2118 ts->mem_coherent = 0;
2119 s->reg_to_temp[reg] = arg;
2126 /* emit instruction */
2127 tcg_out_op(s, opc, new_args, const_args);
2129 /* move the outputs in the correct register if needed */
2130 for(i = 0; i < nb_oargs; i++) {
2131 ts = &s->temps[args[i]];
2133 if (ts->fixed_reg && ts->reg != reg) {
2134 tcg_out_mov(s, ts->type, ts->reg, reg);
2136 if (NEED_SYNC_ARG(i)) {
2137 tcg_reg_sync(s, reg);
2139 if (IS_DEAD_ARG(i)) {
2140 temp_dead(s, args[i]);
2145 #ifdef TCG_TARGET_STACK_GROWSUP
2146 #define STACK_DIR(x) (-(x))
2148 #define STACK_DIR(x) (x)
2151 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2152 const TCGArg * const args, uint16_t dead_args,
2155 int flags, nb_regs, i, reg;
2158 intptr_t stack_offset;
2159 size_t call_stack_size;
2160 tcg_insn_unit *func_addr;
2162 TCGRegSet allocated_regs;
2164 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2165 flags = args[nb_oargs + nb_iargs + 1];
2167 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2168 if (nb_regs > nb_iargs) {
2172 /* assign stack slots first */
2173 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2174 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2175 ~(TCG_TARGET_STACK_ALIGN - 1);
2176 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2177 if (allocate_args) {
2178 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2179 preallocate call stack */
2183 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2184 for(i = nb_regs; i < nb_iargs; i++) {
2185 arg = args[nb_oargs + i];
2186 #ifdef TCG_TARGET_STACK_GROWSUP
2187 stack_offset -= sizeof(tcg_target_long);
2189 if (arg != TCG_CALL_DUMMY_ARG) {
2190 ts = &s->temps[arg];
2191 if (ts->val_type == TEMP_VAL_REG) {
2192 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2193 } else if (ts->val_type == TEMP_VAL_MEM) {
2194 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2196 /* XXX: not correct if reading values from the stack */
2197 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2198 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2199 } else if (ts->val_type == TEMP_VAL_CONST) {
2200 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
2202 /* XXX: sign extend may be needed on some targets */
2203 tcg_out_movi(s, ts->type, reg, ts->val);
2204 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
2209 #ifndef TCG_TARGET_STACK_GROWSUP
2210 stack_offset += sizeof(tcg_target_long);
2214 /* assign input registers */
2215 tcg_regset_set(allocated_regs, s->reserved_regs);
2216 for(i = 0; i < nb_regs; i++) {
2217 arg = args[nb_oargs + i];
2218 if (arg != TCG_CALL_DUMMY_ARG) {
2219 ts = &s->temps[arg];
2220 reg = tcg_target_call_iarg_regs[i];
2221 tcg_reg_free(s, reg);
2222 if (ts->val_type == TEMP_VAL_REG) {
2223 if (ts->reg != reg) {
2224 tcg_out_mov(s, ts->type, reg, ts->reg);
2226 } else if (ts->val_type == TEMP_VAL_MEM) {
2227 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
2228 } else if (ts->val_type == TEMP_VAL_CONST) {
2229 /* XXX: sign extend ? */
2230 tcg_out_movi(s, ts->type, reg, ts->val);
2234 tcg_regset_set_reg(allocated_regs, reg);
2238 /* mark dead temporaries and free the associated registers */
2239 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2240 if (IS_DEAD_ARG(i)) {
2241 temp_dead(s, args[i]);
2245 /* clobber call registers */
2246 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2247 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
2248 tcg_reg_free(s, reg);
2252 /* Save globals if they might be written by the helper, sync them if
2253 they might be read. */
2254 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2256 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2257 sync_globals(s, allocated_regs);
2259 save_globals(s, allocated_regs);
2262 tcg_out_call(s, func_addr);
2264 /* assign output registers and emit moves if needed */
2265 for(i = 0; i < nb_oargs; i++) {
2267 ts = &s->temps[arg];
2268 reg = tcg_target_call_oarg_regs[i];
2269 assert(s->reg_to_temp[reg] == -1);
2271 if (ts->fixed_reg) {
2272 if (ts->reg != reg) {
2273 tcg_out_mov(s, ts->type, ts->reg, reg);
2276 if (ts->val_type == TEMP_VAL_REG) {
2277 s->reg_to_temp[ts->reg] = -1;
2279 ts->val_type = TEMP_VAL_REG;
2281 ts->mem_coherent = 0;
2282 s->reg_to_temp[reg] = arg;
2283 if (NEED_SYNC_ARG(i)) {
2284 tcg_reg_sync(s, reg);
2286 if (IS_DEAD_ARG(i)) {
2287 temp_dead(s, args[i]);
2293 #ifdef CONFIG_PROFILER
2295 static int64_t tcg_table_op_count[NB_OPS];
2297 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2301 for (i = 0; i < NB_OPS; i++) {
2302 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2303 tcg_table_op_count[i]);
2307 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2309 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2314 int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
2316 int i, oi, oi_next, num_insns;
2318 #ifdef CONFIG_PROFILER
2322 n = s->gen_last_op_idx + 1;
2324 if (n > s->op_count_max) {
2325 s->op_count_max = n;
2330 if (n > s->temp_count_max) {
2331 s->temp_count_max = n;
2337 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
2344 #ifdef CONFIG_PROFILER
2345 s->opt_time -= profile_getclock();
2348 #ifdef USE_TCG_OPTIMIZATIONS
2352 #ifdef CONFIG_PROFILER
2353 s->opt_time += profile_getclock();
2354 s->la_time -= profile_getclock();
2357 tcg_liveness_analysis(s);
2359 #ifdef CONFIG_PROFILER
2360 s->la_time += profile_getclock();
2364 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
2365 qemu_log("OP after optimization and liveness analysis:\n");
2371 tcg_reg_alloc_start(s);
2373 s->code_buf = gen_code_buf;
2374 s->code_ptr = gen_code_buf;
2379 for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
2380 TCGOp * const op = &s->gen_op_buf[oi];
2381 TCGArg * const args = &s->gen_opparam_buf[op->args];
2382 TCGOpcode opc = op->opc;
2383 const TCGOpDef *def = &tcg_op_defs[opc];
2384 uint16_t dead_args = s->op_dead_args[oi];
2385 uint8_t sync_args = s->op_sync_args[oi];
2388 #ifdef CONFIG_PROFILER
2389 tcg_table_op_count[opc]++;
2393 case INDEX_op_mov_i32:
2394 case INDEX_op_mov_i64:
2395 tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
2397 case INDEX_op_movi_i32:
2398 case INDEX_op_movi_i64:
2399 tcg_reg_alloc_movi(s, args, dead_args, sync_args);
2401 case INDEX_op_insn_start:
2402 if (num_insns >= 0) {
2403 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2406 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2408 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2409 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2413 s->gen_insn_data[num_insns][i] = a;
2416 case INDEX_op_discard:
2417 temp_dead(s, args[0]);
2419 case INDEX_op_set_label:
2420 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2421 tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2424 tcg_reg_alloc_call(s, op->callo, op->calli, args,
2425 dead_args, sync_args);
2428 /* Sanity check that we've not introduced any unhandled opcodes. */
2429 if (def->flags & TCG_OPF_NOT_PRESENT) {
2432 /* Note: in order to speed up the code, it would be much
2433 faster to have specialized register allocator functions for
2434 some common argument patterns */
2435 tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
2442 tcg_debug_assert(num_insns >= 0);
2443 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2445 /* Generate TB finalization at the end of block */
2446 tcg_out_tb_finalize(s);
2448 /* flush instruction cache */
2449 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2451 return tcg_current_code_size(s);
2454 #ifdef CONFIG_PROFILER
2455 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2457 TCGContext *s = &tcg_ctx;
2458 int64_t tb_count = s->tb_count;
2459 int64_t tb_div_count = tb_count ? tb_count : 1;
2460 int64_t tot = s->interm_time + s->code_time;
2462 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2464 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2465 tb_count, s->tb_count1 - tb_count,
2466 (double)(s->tb_count1 - s->tb_count)
2467 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2468 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2469 (double)s->op_count / tb_div_count, s->op_count_max);
2470 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2471 (double)s->del_op_count / tb_div_count);
2472 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2473 (double)s->temp_count / tb_div_count, s->temp_count_max);
2474 cpu_fprintf(f, "avg host code/TB %0.1f\n",
2475 (double)s->code_out_len / tb_div_count);
2476 cpu_fprintf(f, "avg search data/TB %0.1f\n",
2477 (double)s->search_out_len / tb_div_count);
2479 cpu_fprintf(f, "cycles/op %0.1f\n",
2480 s->op_count ? (double)tot / s->op_count : 0);
2481 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2482 s->code_in_len ? (double)tot / s->code_in_len : 0);
2483 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2484 s->code_out_len ? (double)tot / s->code_out_len : 0);
2485 cpu_fprintf(f, "cycles/search byte %0.1f\n",
2486 s->search_out_len ? (double)tot / s->search_out_len : 0);
2490 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2491 (double)s->interm_time / tot * 100.0);
2492 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2493 (double)s->code_time / tot * 100.0);
2494 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2495 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2497 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2498 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2499 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2501 cpu_fprintf(f, " avg cycles %0.1f\n",
2502 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2505 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2507 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2511 #ifdef ELF_HOST_MACHINE
2512 /* In order to use this feature, the backend needs to do three things:
2514 (1) Define ELF_HOST_MACHINE to indicate both what value to
2515 put into the ELF image and to indicate support for the feature.
2517 (2) Define tcg_register_jit. This should create a buffer containing
2518 the contents of a .debug_frame section that describes the post-
2519 prologue unwind info for the tcg machine.
2521 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2524 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2531 struct jit_code_entry {
2532 struct jit_code_entry *next_entry;
2533 struct jit_code_entry *prev_entry;
2534 const void *symfile_addr;
2535 uint64_t symfile_size;
2538 struct jit_descriptor {
2540 uint32_t action_flag;
2541 struct jit_code_entry *relevant_entry;
2542 struct jit_code_entry *first_entry;
2545 void __jit_debug_register_code(void) __attribute__((noinline));
2546 void __jit_debug_register_code(void)
2551 /* Must statically initialize the version, because GDB may check
2552 the version before we can set it. */
2553 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2555 /* End GDB interface. */
2557 static int find_string(const char *strtab, const char *str)
2559 const char *p = strtab + 1;
2562 if (strcmp(p, str) == 0) {
2569 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2570 const void *debug_frame,
2571 size_t debug_frame_size)
2573 struct __attribute__((packed)) DebugInfo {
2580 uintptr_t cu_low_pc;
2581 uintptr_t cu_high_pc;
2584 uintptr_t fn_low_pc;
2585 uintptr_t fn_high_pc;
2594 struct DebugInfo di;
2599 struct ElfImage *img;
2601 static const struct ElfImage img_template = {
2603 .e_ident[EI_MAG0] = ELFMAG0,
2604 .e_ident[EI_MAG1] = ELFMAG1,
2605 .e_ident[EI_MAG2] = ELFMAG2,
2606 .e_ident[EI_MAG3] = ELFMAG3,
2607 .e_ident[EI_CLASS] = ELF_CLASS,
2608 .e_ident[EI_DATA] = ELF_DATA,
2609 .e_ident[EI_VERSION] = EV_CURRENT,
2611 .e_machine = ELF_HOST_MACHINE,
2612 .e_version = EV_CURRENT,
2613 .e_phoff = offsetof(struct ElfImage, phdr),
2614 .e_shoff = offsetof(struct ElfImage, shdr),
2615 .e_ehsize = sizeof(ElfW(Shdr)),
2616 .e_phentsize = sizeof(ElfW(Phdr)),
2618 .e_shentsize = sizeof(ElfW(Shdr)),
2619 .e_shnum = ARRAY_SIZE(img->shdr),
2620 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2621 #ifdef ELF_HOST_FLAGS
2622 .e_flags = ELF_HOST_FLAGS,
2625 .e_ident[EI_OSABI] = ELF_OSABI,
2633 [0] = { .sh_type = SHT_NULL },
2634 /* Trick: The contents of code_gen_buffer are not present in
2635 this fake ELF file; that got allocated elsewhere. Therefore
2636 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2637 will not look for contents. We can record any address. */
2639 .sh_type = SHT_NOBITS,
2640 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2642 [2] = { /* .debug_info */
2643 .sh_type = SHT_PROGBITS,
2644 .sh_offset = offsetof(struct ElfImage, di),
2645 .sh_size = sizeof(struct DebugInfo),
2647 [3] = { /* .debug_abbrev */
2648 .sh_type = SHT_PROGBITS,
2649 .sh_offset = offsetof(struct ElfImage, da),
2650 .sh_size = sizeof(img->da),
2652 [4] = { /* .debug_frame */
2653 .sh_type = SHT_PROGBITS,
2654 .sh_offset = sizeof(struct ElfImage),
2656 [5] = { /* .symtab */
2657 .sh_type = SHT_SYMTAB,
2658 .sh_offset = offsetof(struct ElfImage, sym),
2659 .sh_size = sizeof(img->sym),
2661 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2662 .sh_entsize = sizeof(ElfW(Sym)),
2664 [6] = { /* .strtab */
2665 .sh_type = SHT_STRTAB,
2666 .sh_offset = offsetof(struct ElfImage, str),
2667 .sh_size = sizeof(img->str),
2671 [1] = { /* code_gen_buffer */
2672 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2677 .len = sizeof(struct DebugInfo) - 4,
2679 .ptr_size = sizeof(void *),
2681 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2683 .fn_name = "code_gen_buffer"
2686 1, /* abbrev number (the cu) */
2687 0x11, 1, /* DW_TAG_compile_unit, has children */
2688 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2689 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2690 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2691 0, 0, /* end of abbrev */
2692 2, /* abbrev number (the fn) */
2693 0x2e, 0, /* DW_TAG_subprogram, no children */
2694 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2695 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2696 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2697 0, 0, /* end of abbrev */
2698 0 /* no more abbrev */
2700 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2701 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2704 /* We only need a single jit entry; statically allocate it. */
2705 static struct jit_code_entry one_entry;
2707 uintptr_t buf = (uintptr_t)buf_ptr;
2708 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2709 DebugFrameHeader *dfh;
2711 img = g_malloc(img_size);
2712 *img = img_template;
2714 img->phdr.p_vaddr = buf;
2715 img->phdr.p_paddr = buf;
2716 img->phdr.p_memsz = buf_size;
2718 img->shdr[1].sh_name = find_string(img->str, ".text");
2719 img->shdr[1].sh_addr = buf;
2720 img->shdr[1].sh_size = buf_size;
2722 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2723 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2725 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2726 img->shdr[4].sh_size = debug_frame_size;
2728 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2729 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2731 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2732 img->sym[1].st_value = buf;
2733 img->sym[1].st_size = buf_size;
2735 img->di.cu_low_pc = buf;
2736 img->di.cu_high_pc = buf + buf_size;
2737 img->di.fn_low_pc = buf;
2738 img->di.fn_high_pc = buf + buf_size;
2740 dfh = (DebugFrameHeader *)(img + 1);
2741 memcpy(dfh, debug_frame, debug_frame_size);
2742 dfh->fde.func_start = buf;
2743 dfh->fde.func_len = buf_size;
2746 /* Enable this block to be able to debug the ELF image file creation.
2747 One can use readelf, objdump, or other inspection utilities. */
2749 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2751 if (fwrite(img, img_size, 1, f) != img_size) {
2752 /* Avoid stupid unused return value warning for fwrite. */
2759 one_entry.symfile_addr = img;
2760 one_entry.symfile_size = img_size;
2762 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2763 __jit_debug_descriptor.relevant_entry = &one_entry;
2764 __jit_debug_descriptor.first_entry = &one_entry;
2765 __jit_debug_register_code();
2768 /* No support for the feature. Provide the entry point expected by exec.c,
2769 and implement the internal function we declared earlier. */
2771 static void tcg_register_jit_int(void *buf, size_t size,
2772 const void *debug_frame,
2773 size_t debug_frame_size)
2777 void tcg_register_jit(void *buf, size_t buf_size)
2780 #endif /* ELF_HOST_MACHINE */