2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to use liveness analysis (better code) */
26 #define USE_LIVENESS_ANALYSIS
27 #define USE_TCG_OPTIMIZATIONS
29 #include "qemu/osdep.h"
31 /* Define to jump the ELF file used to communicate with GDB. */
34 #include "qemu/cutils.h"
35 #include "qemu/host-utils.h"
36 #include "qemu/timer.h"
38 /* Note: the long term plan is to reduce the dependencies on the QEMU
39 CPU definitions. Currently they are used for qemu_ld/st
41 #define NO_CPU_IO_DEFS
44 #include "qemu/host-utils.h"
45 #include "qemu/timer.h"
46 #include "exec/cpu-common.h"
47 #include "exec/exec-all.h"
51 #if UINTPTR_MAX == UINT32_MAX
52 # define ELF_CLASS ELFCLASS32
54 # define ELF_CLASS ELFCLASS64
56 #ifdef HOST_WORDS_BIGENDIAN
57 # define ELF_DATA ELFDATA2MSB
59 # define ELF_DATA ELFDATA2LSB
65 /* Forward declarations for functions declared in tcg-target.inc.c and
67 static void tcg_target_init(TCGContext *s);
68 static void tcg_target_qemu_prologue(TCGContext *s);
69 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
70 intptr_t value, intptr_t addend);
72 /* The CIE and FDE header definitions will be common to all hosts. */
74 uint32_t len __attribute__((aligned((sizeof(void *)))));
80 uint8_t return_column;
83 typedef struct QEMU_PACKED {
84 uint32_t len __attribute__((aligned((sizeof(void *)))));
88 } DebugFrameFDEHeader;
90 typedef struct QEMU_PACKED {
92 DebugFrameFDEHeader fde;
95 static void tcg_register_jit_int(void *buf, size_t size,
96 const void *debug_frame,
97 size_t debug_frame_size)
98 __attribute__((unused));
100 /* Forward declarations for functions declared and used in tcg-target.inc.c. */
101 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
102 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
104 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
105 static void tcg_out_movi(TCGContext *s, TCGType type,
106 TCGReg ret, tcg_target_long arg);
107 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
108 const int *const_args);
109 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
111 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
112 TCGReg base, intptr_t ofs);
113 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
114 static int tcg_target_const_match(tcg_target_long val, TCGType type,
115 const TCGArgConstraint *arg_ct);
116 static void tcg_out_tb_init(TCGContext *s);
117 static bool tcg_out_tb_finalize(TCGContext *s);
121 static TCGRegSet tcg_target_available_regs[2];
122 static TCGRegSet tcg_target_call_clobber_regs;
124 #if TCG_TARGET_INSN_UNIT_SIZE == 1
125 static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
130 static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
137 #if TCG_TARGET_INSN_UNIT_SIZE <= 2
138 static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
140 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
143 tcg_insn_unit *p = s->code_ptr;
144 memcpy(p, &v, sizeof(v));
145 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
149 static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
152 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
155 memcpy(p, &v, sizeof(v));
160 #if TCG_TARGET_INSN_UNIT_SIZE <= 4
161 static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
163 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
166 tcg_insn_unit *p = s->code_ptr;
167 memcpy(p, &v, sizeof(v));
168 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
172 static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
175 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
178 memcpy(p, &v, sizeof(v));
183 #if TCG_TARGET_INSN_UNIT_SIZE <= 8
184 static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
186 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
189 tcg_insn_unit *p = s->code_ptr;
190 memcpy(p, &v, sizeof(v));
191 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
195 static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
198 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
201 memcpy(p, &v, sizeof(v));
206 /* label relocation processing */
208 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
209 TCGLabel *l, intptr_t addend)
214 /* FIXME: This may break relocations on RISC targets that
215 modify instruction fields in place. The caller may not have
216 written the initial value. */
217 patch_reloc(code_ptr, type, l->u.value, addend);
219 /* add a new relocation entry */
220 r = tcg_malloc(sizeof(TCGRelocation));
224 r->next = l->u.first_reloc;
225 l->u.first_reloc = r;
229 static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
231 intptr_t value = (intptr_t)ptr;
234 tcg_debug_assert(!l->has_value);
236 for (r = l->u.first_reloc; r != NULL; r = r->next) {
237 patch_reloc(r->ptr, r->type, value, r->addend);
241 l->u.value_ptr = ptr;
244 TCGLabel *gen_new_label(void)
246 TCGContext *s = &tcg_ctx;
247 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
256 #include "tcg-target.inc.c"
258 /* pool based memory allocation */
259 void *tcg_malloc_internal(TCGContext *s, int size)
264 if (size > TCG_POOL_CHUNK_SIZE) {
265 /* big malloc: insert a new pool (XXX: could optimize) */
266 p = g_malloc(sizeof(TCGPool) + size);
268 p->next = s->pool_first_large;
269 s->pool_first_large = p;
280 pool_size = TCG_POOL_CHUNK_SIZE;
281 p = g_malloc(sizeof(TCGPool) + pool_size);
285 s->pool_current->next = p;
294 s->pool_cur = p->data + size;
295 s->pool_end = p->data + p->size;
299 void tcg_pool_reset(TCGContext *s)
302 for (p = s->pool_first_large; p; p = t) {
306 s->pool_first_large = NULL;
307 s->pool_cur = s->pool_end = NULL;
308 s->pool_current = NULL;
311 typedef struct TCGHelperInfo {
318 #include "exec/helper-proto.h"
320 static const TCGHelperInfo all_helpers[] = {
321 #include "exec/helper-tcg.h"
324 static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
326 void tcg_context_init(TCGContext *s)
328 int op, total_args, n, i;
330 TCGArgConstraint *args_ct;
332 GHashTable *helper_table;
334 memset(s, 0, sizeof(*s));
337 /* Count total number of arguments and allocate the corresponding
340 for(op = 0; op < NB_OPS; op++) {
341 def = &tcg_op_defs[op];
342 n = def->nb_iargs + def->nb_oargs;
346 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
347 sorted_args = g_malloc(sizeof(int) * total_args);
349 for(op = 0; op < NB_OPS; op++) {
350 def = &tcg_op_defs[op];
351 def->args_ct = args_ct;
352 def->sorted_args = sorted_args;
353 n = def->nb_iargs + def->nb_oargs;
358 /* Register helpers. */
359 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
360 s->helpers = helper_table = g_hash_table_new(NULL, NULL);
362 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
363 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
364 (gpointer)&all_helpers[i]);
369 /* Reverse the order of the saved registers, assuming they're all at
370 the start of tcg_target_reg_alloc_order. */
371 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
372 int r = tcg_target_reg_alloc_order[n];
373 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
377 for (i = 0; i < n; ++i) {
378 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
380 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
381 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
385 void tcg_prologue_init(TCGContext *s)
387 size_t prologue_size, total_size;
390 /* Put the prologue at the beginning of code_gen_buffer. */
391 buf0 = s->code_gen_buffer;
394 s->code_gen_prologue = buf0;
396 /* Generate the prologue. */
397 tcg_target_qemu_prologue(s);
399 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
401 /* Deduct the prologue from the buffer. */
402 prologue_size = tcg_current_code_size(s);
403 s->code_gen_ptr = buf1;
404 s->code_gen_buffer = buf1;
406 total_size = s->code_gen_buffer_size - prologue_size;
407 s->code_gen_buffer_size = total_size;
409 /* Compute a high-water mark, at which we voluntarily flush the buffer
410 and start over. The size here is arbitrary, significantly larger
411 than we expect the code generation for any one opcode to require. */
412 s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
414 tcg_register_jit(s->code_gen_buffer, total_size);
417 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
418 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
419 log_disas(buf0, prologue_size);
426 void tcg_func_start(TCGContext *s)
429 s->nb_temps = s->nb_globals;
431 /* No temps have been previously allocated for size or locality. */
432 memset(s->free_temps, 0, sizeof(s->free_temps));
435 s->current_frame_offset = s->frame_start;
437 #ifdef CONFIG_DEBUG_TCG
438 s->goto_tb_issue_mask = 0;
441 s->gen_op_buf[0].next = 1;
442 s->gen_op_buf[0].prev = 0;
443 s->gen_next_op_idx = 1;
444 s->gen_next_parm_idx = 0;
446 s->be = tcg_malloc(sizeof(TCGBackendData));
449 static inline int temp_idx(TCGContext *s, TCGTemp *ts)
451 ptrdiff_t n = ts - s->temps;
452 tcg_debug_assert(n >= 0 && n < s->nb_temps);
456 static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
458 int n = s->nb_temps++;
459 tcg_debug_assert(n < TCG_MAX_TEMPS);
460 return memset(&s->temps[n], 0, sizeof(TCGTemp));
463 static inline TCGTemp *tcg_global_alloc(TCGContext *s)
465 tcg_debug_assert(s->nb_globals == s->nb_temps);
467 return tcg_temp_alloc(s);
470 static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
471 TCGReg reg, const char *name)
475 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
479 ts = tcg_global_alloc(s);
480 ts->base_type = type;
485 tcg_regset_set_reg(s->reserved_regs, reg);
487 return temp_idx(s, ts);
490 void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
493 s->frame_start = start;
494 s->frame_end = start + size;
495 idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
496 s->frame_temp = &s->temps[idx];
499 TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
501 TCGContext *s = &tcg_ctx;
504 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
507 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
508 return MAKE_TCGV_I32(idx);
511 TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
513 TCGContext *s = &tcg_ctx;
516 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
519 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
520 return MAKE_TCGV_I64(idx);
523 int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
524 intptr_t offset, const char *name)
526 TCGContext *s = &tcg_ctx;
527 TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
528 TCGTemp *ts = tcg_global_alloc(s);
529 int indirect_reg = 0, bigendian = 0;
530 #ifdef HOST_WORDS_BIGENDIAN
534 if (!base_ts->fixed_reg) {
536 base_ts->indirect_base = 1;
539 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
540 TCGTemp *ts2 = tcg_global_alloc(s);
543 ts->base_type = TCG_TYPE_I64;
544 ts->type = TCG_TYPE_I32;
545 ts->indirect_reg = indirect_reg;
546 ts->mem_allocated = 1;
547 ts->mem_base = base_ts;
548 ts->mem_offset = offset + bigendian * 4;
549 pstrcpy(buf, sizeof(buf), name);
550 pstrcat(buf, sizeof(buf), "_0");
551 ts->name = strdup(buf);
553 tcg_debug_assert(ts2 == ts + 1);
554 ts2->base_type = TCG_TYPE_I64;
555 ts2->type = TCG_TYPE_I32;
556 ts2->indirect_reg = indirect_reg;
557 ts2->mem_allocated = 1;
558 ts2->mem_base = base_ts;
559 ts2->mem_offset = offset + (1 - bigendian) * 4;
560 pstrcpy(buf, sizeof(buf), name);
561 pstrcat(buf, sizeof(buf), "_1");
562 ts2->name = strdup(buf);
564 ts->base_type = type;
566 ts->indirect_reg = indirect_reg;
567 ts->mem_allocated = 1;
568 ts->mem_base = base_ts;
569 ts->mem_offset = offset;
572 return temp_idx(s, ts);
575 static int tcg_temp_new_internal(TCGType type, int temp_local)
577 TCGContext *s = &tcg_ctx;
581 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
582 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
583 if (idx < TCG_MAX_TEMPS) {
584 /* There is already an available temp with the right type. */
585 clear_bit(idx, s->free_temps[k].l);
588 ts->temp_allocated = 1;
589 tcg_debug_assert(ts->base_type == type);
590 tcg_debug_assert(ts->temp_local == temp_local);
592 ts = tcg_temp_alloc(s);
593 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
594 TCGTemp *ts2 = tcg_temp_alloc(s);
596 ts->base_type = type;
597 ts->type = TCG_TYPE_I32;
598 ts->temp_allocated = 1;
599 ts->temp_local = temp_local;
601 tcg_debug_assert(ts2 == ts + 1);
602 ts2->base_type = TCG_TYPE_I64;
603 ts2->type = TCG_TYPE_I32;
604 ts2->temp_allocated = 1;
605 ts2->temp_local = temp_local;
607 ts->base_type = type;
609 ts->temp_allocated = 1;
610 ts->temp_local = temp_local;
612 idx = temp_idx(s, ts);
615 #if defined(CONFIG_DEBUG_TCG)
621 TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
625 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
626 return MAKE_TCGV_I32(idx);
629 TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
633 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
634 return MAKE_TCGV_I64(idx);
637 static void tcg_temp_free_internal(int idx)
639 TCGContext *s = &tcg_ctx;
643 #if defined(CONFIG_DEBUG_TCG)
645 if (s->temps_in_use < 0) {
646 fprintf(stderr, "More temporaries freed than allocated!\n");
650 tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
652 tcg_debug_assert(ts->temp_allocated != 0);
653 ts->temp_allocated = 0;
655 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
656 set_bit(idx, s->free_temps[k].l);
659 void tcg_temp_free_i32(TCGv_i32 arg)
661 tcg_temp_free_internal(GET_TCGV_I32(arg));
664 void tcg_temp_free_i64(TCGv_i64 arg)
666 tcg_temp_free_internal(GET_TCGV_I64(arg));
669 TCGv_i32 tcg_const_i32(int32_t val)
672 t0 = tcg_temp_new_i32();
673 tcg_gen_movi_i32(t0, val);
677 TCGv_i64 tcg_const_i64(int64_t val)
680 t0 = tcg_temp_new_i64();
681 tcg_gen_movi_i64(t0, val);
685 TCGv_i32 tcg_const_local_i32(int32_t val)
688 t0 = tcg_temp_local_new_i32();
689 tcg_gen_movi_i32(t0, val);
693 TCGv_i64 tcg_const_local_i64(int64_t val)
696 t0 = tcg_temp_local_new_i64();
697 tcg_gen_movi_i64(t0, val);
701 #if defined(CONFIG_DEBUG_TCG)
702 void tcg_clear_temp_count(void)
704 TCGContext *s = &tcg_ctx;
708 int tcg_check_temp_count(void)
710 TCGContext *s = &tcg_ctx;
711 if (s->temps_in_use) {
712 /* Clear the count so that we don't give another
713 * warning immediately next time around.
722 /* Note: we convert the 64 bit args to 32 bit and do some alignment
723 and endian swap. Maybe it would be better to do the alignment
724 and endian swap in tcg_reg_alloc_call(). */
725 void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
726 int nargs, TCGArg *args)
728 int i, real_args, nb_rets, pi, pi_first;
729 unsigned sizemask, flags;
732 info = g_hash_table_lookup(s->helpers, (gpointer)func);
734 sizemask = info->sizemask;
736 #if defined(__sparc__) && !defined(__arch64__) \
737 && !defined(CONFIG_TCG_INTERPRETER)
738 /* We have 64-bit values in one register, but need to pass as two
739 separate parameters. Split them. */
740 int orig_sizemask = sizemask;
741 int orig_nargs = nargs;
744 TCGV_UNUSED_I64(retl);
745 TCGV_UNUSED_I64(reth);
747 TCGArg *split_args = __builtin_alloca(sizeof(TCGArg) * nargs * 2);
748 for (i = real_args = 0; i < nargs; ++i) {
749 int is_64bit = sizemask & (1 << (i+1)*2);
751 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
752 TCGv_i32 h = tcg_temp_new_i32();
753 TCGv_i32 l = tcg_temp_new_i32();
754 tcg_gen_extr_i64_i32(l, h, orig);
755 split_args[real_args++] = GET_TCGV_I32(h);
756 split_args[real_args++] = GET_TCGV_I32(l);
758 split_args[real_args++] = args[i];
765 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
766 for (i = 0; i < nargs; ++i) {
767 int is_64bit = sizemask & (1 << (i+1)*2);
768 int is_signed = sizemask & (2 << (i+1)*2);
770 TCGv_i64 temp = tcg_temp_new_i64();
771 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
773 tcg_gen_ext32s_i64(temp, orig);
775 tcg_gen_ext32u_i64(temp, orig);
777 args[i] = GET_TCGV_I64(temp);
780 #endif /* TCG_TARGET_EXTEND_ARGS */
782 pi_first = pi = s->gen_next_parm_idx;
783 if (ret != TCG_CALL_DUMMY_ARG) {
784 #if defined(__sparc__) && !defined(__arch64__) \
785 && !defined(CONFIG_TCG_INTERPRETER)
786 if (orig_sizemask & 1) {
787 /* The 32-bit ABI is going to return the 64-bit value in
788 the %o0/%o1 register pair. Prepare for this by using
789 two return temporaries, and reassemble below. */
790 retl = tcg_temp_new_i64();
791 reth = tcg_temp_new_i64();
792 s->gen_opparam_buf[pi++] = GET_TCGV_I64(reth);
793 s->gen_opparam_buf[pi++] = GET_TCGV_I64(retl);
796 s->gen_opparam_buf[pi++] = ret;
800 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
801 #ifdef HOST_WORDS_BIGENDIAN
802 s->gen_opparam_buf[pi++] = ret + 1;
803 s->gen_opparam_buf[pi++] = ret;
805 s->gen_opparam_buf[pi++] = ret;
806 s->gen_opparam_buf[pi++] = ret + 1;
810 s->gen_opparam_buf[pi++] = ret;
818 for (i = 0; i < nargs; i++) {
819 int is_64bit = sizemask & (1 << (i+1)*2);
820 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
821 #ifdef TCG_TARGET_CALL_ALIGN_ARGS
822 /* some targets want aligned 64 bit args */
824 s->gen_opparam_buf[pi++] = TCG_CALL_DUMMY_ARG;
828 /* If stack grows up, then we will be placing successive
829 arguments at lower addresses, which means we need to
830 reverse the order compared to how we would normally
831 treat either big or little-endian. For those arguments
832 that will wind up in registers, this still works for
833 HPPA (the only current STACK_GROWSUP target) since the
834 argument registers are *also* allocated in decreasing
835 order. If another such target is added, this logic may
836 have to get more complicated to differentiate between
837 stack arguments and register arguments. */
838 #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
839 s->gen_opparam_buf[pi++] = args[i] + 1;
840 s->gen_opparam_buf[pi++] = args[i];
842 s->gen_opparam_buf[pi++] = args[i];
843 s->gen_opparam_buf[pi++] = args[i] + 1;
849 s->gen_opparam_buf[pi++] = args[i];
852 s->gen_opparam_buf[pi++] = (uintptr_t)func;
853 s->gen_opparam_buf[pi++] = flags;
855 i = s->gen_next_op_idx;
856 tcg_debug_assert(i < OPC_BUF_SIZE);
857 tcg_debug_assert(pi <= OPPARAM_BUF_SIZE);
859 /* Set links for sequential allocation during translation. */
860 s->gen_op_buf[i] = (TCGOp){
861 .opc = INDEX_op_call,
869 /* Make sure the calli field didn't overflow. */
870 tcg_debug_assert(s->gen_op_buf[i].calli == real_args);
872 s->gen_op_buf[0].prev = i;
873 s->gen_next_op_idx = i + 1;
874 s->gen_next_parm_idx = pi;
876 #if defined(__sparc__) && !defined(__arch64__) \
877 && !defined(CONFIG_TCG_INTERPRETER)
878 /* Free all of the parts we allocated above. */
879 for (i = real_args = 0; i < orig_nargs; ++i) {
880 int is_64bit = orig_sizemask & (1 << (i+1)*2);
882 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
883 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
884 tcg_temp_free_i32(h);
885 tcg_temp_free_i32(l);
890 if (orig_sizemask & 1) {
891 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
892 Note that describing these as TCGv_i64 eliminates an unnecessary
893 zero-extension that tcg_gen_concat_i32_i64 would create. */
894 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
895 tcg_temp_free_i64(retl);
896 tcg_temp_free_i64(reth);
898 #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
899 for (i = 0; i < nargs; ++i) {
900 int is_64bit = sizemask & (1 << (i+1)*2);
902 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
903 tcg_temp_free_i64(temp);
906 #endif /* TCG_TARGET_EXTEND_ARGS */
909 static void tcg_reg_alloc_start(TCGContext *s)
913 for(i = 0; i < s->nb_globals; i++) {
916 ts->val_type = TEMP_VAL_REG;
918 ts->val_type = TEMP_VAL_MEM;
921 for(i = s->nb_globals; i < s->nb_temps; i++) {
923 if (ts->temp_local) {
924 ts->val_type = TEMP_VAL_MEM;
926 ts->val_type = TEMP_VAL_DEAD;
928 ts->mem_allocated = 0;
932 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
935 static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
938 int idx = temp_idx(s, ts);
940 if (idx < s->nb_globals) {
941 pstrcpy(buf, buf_size, ts->name);
942 } else if (ts->temp_local) {
943 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
945 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
950 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
951 int buf_size, int idx)
953 tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
954 return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
957 /* Find helper name. */
958 static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
960 const char *ret = NULL;
962 TCGHelperInfo *info = g_hash_table_lookup(s->helpers, (gpointer)val);
970 static const char * const cond_name[] =
972 [TCG_COND_NEVER] = "never",
973 [TCG_COND_ALWAYS] = "always",
974 [TCG_COND_EQ] = "eq",
975 [TCG_COND_NE] = "ne",
976 [TCG_COND_LT] = "lt",
977 [TCG_COND_GE] = "ge",
978 [TCG_COND_LE] = "le",
979 [TCG_COND_GT] = "gt",
980 [TCG_COND_LTU] = "ltu",
981 [TCG_COND_GEU] = "geu",
982 [TCG_COND_LEU] = "leu",
983 [TCG_COND_GTU] = "gtu"
986 static const char * const ldst_name[] =
1002 static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1004 [MO_UNALN >> MO_ASHIFT] = "un+",
1005 [MO_ALIGN >> MO_ASHIFT] = "",
1007 [MO_UNALN >> MO_ASHIFT] = "",
1008 [MO_ALIGN >> MO_ASHIFT] = "al+",
1010 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1011 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1012 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1013 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1014 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1015 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1018 void tcg_dump_ops(TCGContext *s)
1024 for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
1025 int i, k, nb_oargs, nb_iargs, nb_cargs;
1026 const TCGOpDef *def;
1030 op = &s->gen_op_buf[oi];
1032 def = &tcg_op_defs[c];
1033 args = &s->gen_opparam_buf[op->args];
1035 if (c == INDEX_op_insn_start) {
1036 qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
1038 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1040 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
1041 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
1045 qemu_log(" " TARGET_FMT_lx, a);
1047 } else if (c == INDEX_op_call) {
1048 /* variable number of arguments */
1049 nb_oargs = op->callo;
1050 nb_iargs = op->calli;
1051 nb_cargs = def->nb_cargs;
1053 /* function name, flags, out args */
1054 qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1055 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1056 args[nb_oargs + nb_iargs + 1], nb_oargs);
1057 for (i = 0; i < nb_oargs; i++) {
1058 qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1061 for (i = 0; i < nb_iargs; i++) {
1062 TCGArg arg = args[nb_oargs + i];
1063 const char *t = "<dummy>";
1064 if (arg != TCG_CALL_DUMMY_ARG) {
1065 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
1070 qemu_log(" %s ", def->name);
1072 nb_oargs = def->nb_oargs;
1073 nb_iargs = def->nb_iargs;
1074 nb_cargs = def->nb_cargs;
1077 for (i = 0; i < nb_oargs; i++) {
1081 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1084 for (i = 0; i < nb_iargs; i++) {
1088 qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1092 case INDEX_op_brcond_i32:
1093 case INDEX_op_setcond_i32:
1094 case INDEX_op_movcond_i32:
1095 case INDEX_op_brcond2_i32:
1096 case INDEX_op_setcond2_i32:
1097 case INDEX_op_brcond_i64:
1098 case INDEX_op_setcond_i64:
1099 case INDEX_op_movcond_i64:
1100 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
1101 qemu_log(",%s", cond_name[args[k++]]);
1103 qemu_log(",$0x%" TCG_PRIlx, args[k++]);
1107 case INDEX_op_qemu_ld_i32:
1108 case INDEX_op_qemu_st_i32:
1109 case INDEX_op_qemu_ld_i64:
1110 case INDEX_op_qemu_st_i64:
1112 TCGMemOpIdx oi = args[k++];
1113 TCGMemOp op = get_memop(oi);
1114 unsigned ix = get_mmuidx(oi);
1116 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
1117 qemu_log(",$0x%x,%u", op, ix);
1119 const char *s_al, *s_op;
1120 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
1121 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
1122 qemu_log(",%s%s,%u", s_al, s_op, ix);
1132 case INDEX_op_set_label:
1134 case INDEX_op_brcond_i32:
1135 case INDEX_op_brcond_i64:
1136 case INDEX_op_brcond2_i32:
1137 qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
1143 for (; i < nb_cargs; i++, k++) {
1144 qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1151 /* we give more priority to constraints with less registers */
1152 static int get_constraint_priority(const TCGOpDef *def, int k)
1154 const TCGArgConstraint *arg_ct;
1157 arg_ct = &def->args_ct[k];
1158 if (arg_ct->ct & TCG_CT_ALIAS) {
1159 /* an alias is equivalent to a single register */
1162 if (!(arg_ct->ct & TCG_CT_REG))
1165 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1166 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1170 return TCG_TARGET_NB_REGS - n + 1;
1173 /* sort from highest priority to lowest */
1174 static void sort_constraints(TCGOpDef *def, int start, int n)
1176 int i, j, p1, p2, tmp;
1178 for(i = 0; i < n; i++)
1179 def->sorted_args[start + i] = start + i;
1182 for(i = 0; i < n - 1; i++) {
1183 for(j = i + 1; j < n; j++) {
1184 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1185 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1187 tmp = def->sorted_args[start + i];
1188 def->sorted_args[start + i] = def->sorted_args[start + j];
1189 def->sorted_args[start + j] = tmp;
1195 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
1203 if (tdefs->op == (TCGOpcode)-1)
1206 tcg_debug_assert((unsigned)op < NB_OPS);
1207 def = &tcg_op_defs[op];
1208 #if defined(CONFIG_DEBUG_TCG)
1209 /* Duplicate entry in op definitions? */
1210 tcg_debug_assert(!def->used);
1213 nb_args = def->nb_iargs + def->nb_oargs;
1214 for(i = 0; i < nb_args; i++) {
1215 ct_str = tdefs->args_ct_str[i];
1216 /* Incomplete TCGTargetOpDef entry? */
1217 tcg_debug_assert(ct_str != NULL);
1218 tcg_regset_clear(def->args_ct[i].u.regs);
1219 def->args_ct[i].ct = 0;
1220 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
1222 oarg = ct_str[0] - '0';
1223 tcg_debug_assert(oarg < def->nb_oargs);
1224 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1225 /* TCG_CT_ALIAS is for the output arguments. The input
1226 argument is tagged with TCG_CT_IALIAS. */
1227 def->args_ct[i] = def->args_ct[oarg];
1228 def->args_ct[oarg].ct = TCG_CT_ALIAS;
1229 def->args_ct[oarg].alias_index = i;
1230 def->args_ct[i].ct |= TCG_CT_IALIAS;
1231 def->args_ct[i].alias_index = oarg;
1234 if (*ct_str == '\0')
1238 def->args_ct[i].ct |= TCG_CT_CONST;
1242 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
1243 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
1244 ct_str, i, def->name);
1252 /* TCGTargetOpDef entry with too much information? */
1253 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
1255 /* sort the constraints (XXX: this is just an heuristic) */
1256 sort_constraints(def, 0, def->nb_oargs);
1257 sort_constraints(def, def->nb_oargs, def->nb_iargs);
1263 printf("%s: sorted=", def->name);
1264 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
1265 printf(" %d", def->sorted_args[i]);
1272 #if defined(CONFIG_DEBUG_TCG)
1274 for (op = 0; op < tcg_op_defs_max; op++) {
1275 const TCGOpDef *def = &tcg_op_defs[op];
1276 if (def->flags & TCG_OPF_NOT_PRESENT) {
1277 /* Wrong entry in op definitions? */
1279 fprintf(stderr, "Invalid op definition for %s\n", def->name);
1283 /* Missing entry in op definitions? */
1285 fprintf(stderr, "Missing op definition for %s\n", def->name);
1296 void tcg_op_remove(TCGContext *s, TCGOp *op)
1298 int next = op->next;
1299 int prev = op->prev;
1301 /* We should never attempt to remove the list terminator. */
1302 tcg_debug_assert(op != &s->gen_op_buf[0]);
1304 s->gen_op_buf[next].prev = prev;
1305 s->gen_op_buf[prev].next = next;
1307 memset(op, 0, sizeof(*op));
1309 #ifdef CONFIG_PROFILER
1314 #ifdef USE_LIVENESS_ANALYSIS
1319 /* liveness analysis: end of function: all temps are dead, and globals
1320 should be in memory. */
1321 static inline void tcg_la_func_end(TCGContext *s, uint8_t *temp_state)
1323 memset(temp_state, TS_DEAD | TS_MEM, s->nb_globals);
1324 memset(temp_state + s->nb_globals, TS_DEAD, s->nb_temps - s->nb_globals);
1327 /* liveness analysis: end of basic block: all temps are dead, globals
1328 and local temps should be in memory. */
1329 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *temp_state)
1333 tcg_la_func_end(s, temp_state);
1334 for (i = s->nb_globals, n = s->nb_temps; i < n; i++) {
1335 if (s->temps[i].temp_local) {
1336 temp_state[i] |= TS_MEM;
1341 /* Liveness analysis : update the opc_arg_life array to tell if a
1342 given input arguments is dead. Instructions updating dead
1343 temporaries are removed. */
1344 static void tcg_liveness_analysis(TCGContext *s)
1346 uint8_t *temp_state;
1348 int nb_globals = s->nb_globals;
1350 temp_state = tcg_malloc(s->nb_temps);
1351 tcg_la_func_end(s, temp_state);
1353 for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
1354 int i, nb_iargs, nb_oargs;
1355 TCGOpcode opc_new, opc_new2;
1357 TCGLifeData arg_life = 0;
1360 TCGOp * const op = &s->gen_op_buf[oi];
1361 TCGArg * const args = &s->gen_opparam_buf[op->args];
1362 TCGOpcode opc = op->opc;
1363 const TCGOpDef *def = &tcg_op_defs[opc];
1372 nb_oargs = op->callo;
1373 nb_iargs = op->calli;
1374 call_flags = args[nb_oargs + nb_iargs + 1];
1376 /* pure functions can be removed if their result is unused */
1377 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
1378 for (i = 0; i < nb_oargs; i++) {
1380 if (temp_state[arg] != TS_DEAD) {
1381 goto do_not_remove_call;
1388 /* output args are dead */
1389 for (i = 0; i < nb_oargs; i++) {
1391 if (temp_state[arg] & TS_DEAD) {
1392 arg_life |= DEAD_ARG << i;
1394 if (temp_state[arg] & TS_MEM) {
1395 arg_life |= SYNC_ARG << i;
1397 temp_state[arg] = TS_DEAD;
1400 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1401 TCG_CALL_NO_READ_GLOBALS))) {
1402 /* globals should go back to memory */
1403 memset(temp_state, TS_DEAD | TS_MEM, nb_globals);
1404 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1405 /* globals should be synced to memory */
1406 for (i = 0; i < nb_globals; i++) {
1407 temp_state[i] |= TS_MEM;
1411 /* record arguments that die in this helper */
1412 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1414 if (arg != TCG_CALL_DUMMY_ARG) {
1415 if (temp_state[arg] & TS_DEAD) {
1416 arg_life |= DEAD_ARG << i;
1420 /* input arguments are live for preceding opcodes */
1421 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1423 if (arg != TCG_CALL_DUMMY_ARG) {
1424 temp_state[arg] &= ~TS_DEAD;
1430 case INDEX_op_insn_start:
1432 case INDEX_op_discard:
1433 /* mark the temporary as dead */
1434 temp_state[args[0]] = TS_DEAD;
1437 case INDEX_op_add2_i32:
1438 opc_new = INDEX_op_add_i32;
1440 case INDEX_op_sub2_i32:
1441 opc_new = INDEX_op_sub_i32;
1443 case INDEX_op_add2_i64:
1444 opc_new = INDEX_op_add_i64;
1446 case INDEX_op_sub2_i64:
1447 opc_new = INDEX_op_sub_i64;
1451 /* Test if the high part of the operation is dead, but not
1452 the low part. The result can be optimized to a simple
1453 add or sub. This happens often for x86_64 guest when the
1454 cpu mode is set to 32 bit. */
1455 if (temp_state[args[1]] == TS_DEAD) {
1456 if (temp_state[args[0]] == TS_DEAD) {
1459 /* Replace the opcode and adjust the args in place,
1460 leaving 3 unused args at the end. */
1461 op->opc = opc = opc_new;
1464 /* Fall through and mark the single-word operation live. */
1470 case INDEX_op_mulu2_i32:
1471 opc_new = INDEX_op_mul_i32;
1472 opc_new2 = INDEX_op_muluh_i32;
1473 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
1475 case INDEX_op_muls2_i32:
1476 opc_new = INDEX_op_mul_i32;
1477 opc_new2 = INDEX_op_mulsh_i32;
1478 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
1480 case INDEX_op_mulu2_i64:
1481 opc_new = INDEX_op_mul_i64;
1482 opc_new2 = INDEX_op_muluh_i64;
1483 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
1485 case INDEX_op_muls2_i64:
1486 opc_new = INDEX_op_mul_i64;
1487 opc_new2 = INDEX_op_mulsh_i64;
1488 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
1493 if (temp_state[args[1]] == TS_DEAD) {
1494 if (temp_state[args[0]] == TS_DEAD) {
1495 /* Both parts of the operation are dead. */
1498 /* The high part of the operation is dead; generate the low. */
1499 op->opc = opc = opc_new;
1502 } else if (temp_state[args[0]] == TS_DEAD && have_opc_new2) {
1503 /* The low part of the operation is dead; generate the high. */
1504 op->opc = opc = opc_new2;
1511 /* Mark the single-word operation live. */
1516 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
1517 nb_iargs = def->nb_iargs;
1518 nb_oargs = def->nb_oargs;
1520 /* Test if the operation can be removed because all
1521 its outputs are dead. We assume that nb_oargs == 0
1522 implies side effects */
1523 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1524 for (i = 0; i < nb_oargs; i++) {
1525 if (temp_state[args[i]] != TS_DEAD) {
1530 tcg_op_remove(s, op);
1533 /* output args are dead */
1534 for (i = 0; i < nb_oargs; i++) {
1536 if (temp_state[arg] & TS_DEAD) {
1537 arg_life |= DEAD_ARG << i;
1539 if (temp_state[arg] & TS_MEM) {
1540 arg_life |= SYNC_ARG << i;
1542 temp_state[arg] = TS_DEAD;
1545 /* if end of basic block, update */
1546 if (def->flags & TCG_OPF_BB_END) {
1547 tcg_la_bb_end(s, temp_state);
1548 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1549 /* globals should be synced to memory */
1550 for (i = 0; i < nb_globals; i++) {
1551 temp_state[i] |= TS_MEM;
1555 /* record arguments that die in this opcode */
1556 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1558 if (temp_state[arg] & TS_DEAD) {
1559 arg_life |= DEAD_ARG << i;
1562 /* input arguments are live for preceding opcodes */
1563 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
1564 temp_state[args[i]] &= ~TS_DEAD;
1569 op->life = arg_life;
1573 /* dummy liveness analysis */
1574 static void tcg_liveness_analysis(TCGContext *s)
1576 int nb_ops = s->gen_next_op_idx;
1578 s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
1579 memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
1580 s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t));
1581 memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t));
1585 #ifdef CONFIG_DEBUG_TCG
1586 static void dump_regs(TCGContext *s)
1592 for(i = 0; i < s->nb_temps; i++) {
1594 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1595 switch(ts->val_type) {
1597 printf("%s", tcg_target_reg_names[ts->reg]);
1600 printf("%d(%s)", (int)ts->mem_offset,
1601 tcg_target_reg_names[ts->mem_base->reg]);
1603 case TEMP_VAL_CONST:
1604 printf("$0x%" TCG_PRIlx, ts->val);
1616 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1617 if (s->reg_to_temp[i] != NULL) {
1619 tcg_target_reg_names[i],
1620 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
1625 static void check_regs(TCGContext *s)
1632 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1633 ts = s->reg_to_temp[reg];
1635 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
1636 printf("Inconsistency for register %s:\n",
1637 tcg_target_reg_names[reg]);
1642 for (k = 0; k < s->nb_temps; k++) {
1644 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
1645 && s->reg_to_temp[ts->reg] != ts) {
1646 printf("Inconsistency for temp %s:\n",
1647 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
1649 printf("reg state:\n");
1657 static void temp_allocate_frame(TCGContext *s, int temp)
1660 ts = &s->temps[temp];
1661 #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
1662 /* Sparc64 stack is accessed with offset of 2047 */
1663 s->current_frame_offset = (s->current_frame_offset +
1664 (tcg_target_long)sizeof(tcg_target_long) - 1) &
1665 ~(sizeof(tcg_target_long) - 1);
1667 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
1671 ts->mem_offset = s->current_frame_offset;
1672 ts->mem_base = s->frame_temp;
1673 ts->mem_allocated = 1;
1674 s->current_frame_offset += sizeof(tcg_target_long);
1677 static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
1679 /* Mark a temporary as free or dead. If 'free_or_dead' is negative,
1680 mark it free; otherwise mark it dead. */
1681 static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
1683 if (ts->fixed_reg) {
1686 if (ts->val_type == TEMP_VAL_REG) {
1687 s->reg_to_temp[ts->reg] = NULL;
1689 ts->val_type = (free_or_dead < 0
1691 || temp_idx(s, ts) < s->nb_globals
1692 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
1695 /* Mark a temporary as dead. */
1696 static inline void temp_dead(TCGContext *s, TCGTemp *ts)
1698 temp_free_or_dead(s, ts, 1);
1701 /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
1702 registers needs to be allocated to store a constant. If 'free_or_dead'
1703 is non-zero, subsequently release the temporary; if it is positive, the
1704 temp is dead; if it is negative, the temp is free. */
1705 static void temp_sync(TCGContext *s, TCGTemp *ts,
1706 TCGRegSet allocated_regs, int free_or_dead)
1708 if (ts->fixed_reg) {
1711 if (!ts->mem_coherent) {
1712 if (!ts->mem_allocated) {
1713 temp_allocate_frame(s, temp_idx(s, ts));
1715 if (ts->indirect_reg) {
1716 if (ts->val_type == TEMP_VAL_REG) {
1717 tcg_regset_set_reg(allocated_regs, ts->reg);
1719 temp_load(s, ts->mem_base,
1720 tcg_target_available_regs[TCG_TYPE_PTR],
1723 switch (ts->val_type) {
1724 case TEMP_VAL_CONST:
1725 /* If we're going to free the temp immediately, then we won't
1726 require it later in a register, so attempt to store the
1727 constant to memory directly. */
1729 && tcg_out_sti(s, ts->type, ts->val,
1730 ts->mem_base->reg, ts->mem_offset)) {
1733 temp_load(s, ts, tcg_target_available_regs[ts->type],
1738 tcg_out_st(s, ts->type, ts->reg,
1739 ts->mem_base->reg, ts->mem_offset);
1749 ts->mem_coherent = 1;
1752 temp_free_or_dead(s, ts, free_or_dead);
1756 /* free register 'reg' by spilling the corresponding temporary if necessary */
1757 static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
1759 TCGTemp *ts = s->reg_to_temp[reg];
1761 temp_sync(s, ts, allocated_regs, -1);
1765 /* Allocate a register belonging to reg1 & ~reg2 */
1766 static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
1767 TCGRegSet allocated_regs, bool rev)
1769 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
1774 tcg_regset_andnot(reg_ct, desired_regs, allocated_regs);
1775 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
1777 /* first try free registers */
1778 for(i = 0; i < n; i++) {
1780 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
1784 /* XXX: do better spill choice */
1785 for(i = 0; i < n; i++) {
1787 if (tcg_regset_test_reg(reg_ct, reg)) {
1788 tcg_reg_free(s, reg, allocated_regs);
1796 /* Make sure the temporary is in a register. If needed, allocate the register
1797 from DESIRED while avoiding ALLOCATED. */
1798 static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
1799 TCGRegSet allocated_regs)
1803 switch (ts->val_type) {
1806 case TEMP_VAL_CONST:
1807 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1808 tcg_out_movi(s, ts->type, reg, ts->val);
1809 ts->mem_coherent = 0;
1812 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
1813 if (ts->indirect_reg) {
1814 tcg_regset_set_reg(allocated_regs, reg);
1815 temp_load(s, ts->mem_base,
1816 tcg_target_available_regs[TCG_TYPE_PTR],
1819 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
1820 ts->mem_coherent = 1;
1827 ts->val_type = TEMP_VAL_REG;
1828 s->reg_to_temp[reg] = ts;
1831 /* Save a temporary to memory. 'allocated_regs' is used in case a
1832 temporary registers needs to be allocated to store a constant. */
1833 static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1835 #ifdef USE_LIVENESS_ANALYSIS
1836 /* ??? Liveness does not yet incorporate indirect bases. */
1837 if (!ts->indirect_base) {
1838 /* The liveness analysis already ensures that globals are back
1839 in memory. Keep an tcg_debug_assert for safety. */
1840 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1844 temp_sync(s, ts, allocated_regs, 1);
1847 /* save globals to their canonical location and assume they can be
1848 modified be the following code. 'allocated_regs' is used in case a
1849 temporary registers needs to be allocated to store a constant. */
1850 static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
1854 for (i = 0; i < s->nb_globals; i++) {
1855 temp_save(s, &s->temps[i], allocated_regs);
1859 /* sync globals to their canonical location and assume they can be
1860 read by the following code. 'allocated_regs' is used in case a
1861 temporary registers needs to be allocated to store a constant. */
1862 static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
1866 for (i = 0; i < s->nb_globals; i++) {
1867 TCGTemp *ts = &s->temps[i];
1868 #ifdef USE_LIVENESS_ANALYSIS
1869 /* ??? Liveness does not yet incorporate indirect bases. */
1870 if (!ts->indirect_base) {
1871 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
1873 || ts->mem_coherent);
1877 temp_sync(s, ts, allocated_regs, 0);
1881 /* at the end of a basic block, we assume all temporaries are dead and
1882 all globals are stored at their canonical location. */
1883 static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
1887 for (i = s->nb_globals; i < s->nb_temps; i++) {
1888 TCGTemp *ts = &s->temps[i];
1889 if (ts->temp_local) {
1890 temp_save(s, ts, allocated_regs);
1892 #ifdef USE_LIVENESS_ANALYSIS
1893 /* ??? Liveness does not yet incorporate indirect bases. */
1894 if (!ts->indirect_base) {
1895 /* The liveness analysis already ensures that temps are dead.
1896 Keep an tcg_debug_assert for safety. */
1897 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
1905 save_globals(s, allocated_regs);
1908 #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
1909 #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1911 static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
1912 TCGLifeData arg_life)
1915 tcg_target_ulong val;
1917 ots = &s->temps[args[0]];
1920 if (ots->fixed_reg) {
1921 /* For fixed registers, we do not do any constant propagation. */
1922 tcg_out_movi(s, ots->type, ots->reg, val);
1926 /* The movi is not explicitly generated here. */
1927 if (ots->val_type == TEMP_VAL_REG) {
1928 s->reg_to_temp[ots->reg] = NULL;
1930 ots->val_type = TEMP_VAL_CONST;
1932 ots->mem_coherent = 0;
1933 if (NEED_SYNC_ARG(0)) {
1934 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
1935 } else if (IS_DEAD_ARG(0)) {
1940 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1941 const TCGArg *args, TCGLifeData arg_life)
1943 TCGRegSet allocated_regs;
1945 TCGType otype, itype;
1947 tcg_regset_set(allocated_regs, s->reserved_regs);
1948 ots = &s->temps[args[0]];
1949 ts = &s->temps[args[1]];
1951 /* Note that otype != itype for no-op truncation. */
1955 /* If the source value is not in a register, and we're going to be
1956 forced to have it in a register in order to perform the copy,
1957 then copy the SOURCE value into its own register first. That way
1958 we don't have to reload SOURCE the next time it is used. */
1959 if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
1960 || ts->val_type == TEMP_VAL_MEM) {
1961 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
1964 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
1965 /* mov to a non-saved dead register makes no sense (even with
1966 liveness analysis disabled). */
1967 tcg_debug_assert(NEED_SYNC_ARG(0));
1968 /* The code above should have moved the temp to a register. */
1969 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1970 if (!ots->mem_allocated) {
1971 temp_allocate_frame(s, args[0]);
1973 if (ots->indirect_reg) {
1974 tcg_regset_set_reg(allocated_regs, ts->reg);
1975 temp_load(s, ots->mem_base,
1976 tcg_target_available_regs[TCG_TYPE_PTR],
1979 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
1980 if (IS_DEAD_ARG(1)) {
1984 } else if (ts->val_type == TEMP_VAL_CONST) {
1985 /* propagate constant */
1986 if (ots->val_type == TEMP_VAL_REG) {
1987 s->reg_to_temp[ots->reg] = NULL;
1989 ots->val_type = TEMP_VAL_CONST;
1991 if (IS_DEAD_ARG(1)) {
1995 /* The code in the first if block should have moved the
1996 temp to a register. */
1997 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
1998 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
1999 /* the mov can be suppressed */
2000 if (ots->val_type == TEMP_VAL_REG) {
2001 s->reg_to_temp[ots->reg] = NULL;
2006 if (ots->val_type != TEMP_VAL_REG) {
2007 /* When allocating a new register, make sure to not spill the
2009 tcg_regset_set_reg(allocated_regs, ts->reg);
2010 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
2011 allocated_regs, ots->indirect_base);
2013 tcg_out_mov(s, otype, ots->reg, ts->reg);
2015 ots->val_type = TEMP_VAL_REG;
2016 ots->mem_coherent = 0;
2017 s->reg_to_temp[ots->reg] = ots;
2018 if (NEED_SYNC_ARG(0)) {
2019 temp_sync(s, ots, allocated_regs, 0);
2024 static void tcg_reg_alloc_op(TCGContext *s,
2025 const TCGOpDef *def, TCGOpcode opc,
2026 const TCGArg *args, TCGLifeData arg_life)
2028 TCGRegSet allocated_regs;
2029 int i, k, nb_iargs, nb_oargs;
2032 const TCGArgConstraint *arg_ct;
2034 TCGArg new_args[TCG_MAX_OP_ARGS];
2035 int const_args[TCG_MAX_OP_ARGS];
2037 nb_oargs = def->nb_oargs;
2038 nb_iargs = def->nb_iargs;
2040 /* copy constants */
2041 memcpy(new_args + nb_oargs + nb_iargs,
2042 args + nb_oargs + nb_iargs,
2043 sizeof(TCGArg) * def->nb_cargs);
2045 /* satisfy input constraints */
2046 tcg_regset_set(allocated_regs, s->reserved_regs);
2047 for(k = 0; k < nb_iargs; k++) {
2048 i = def->sorted_args[nb_oargs + k];
2050 arg_ct = &def->args_ct[i];
2051 ts = &s->temps[arg];
2053 if (ts->val_type == TEMP_VAL_CONST
2054 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2055 /* constant is OK for instruction */
2057 new_args[i] = ts->val;
2061 temp_load(s, ts, arg_ct->u.regs, allocated_regs);
2063 if (arg_ct->ct & TCG_CT_IALIAS) {
2064 if (ts->fixed_reg) {
2065 /* if fixed register, we must allocate a new register
2066 if the alias is not the same register */
2067 if (arg != args[arg_ct->alias_index])
2068 goto allocate_in_reg;
2070 /* if the input is aliased to an output and if it is
2071 not dead after the instruction, we must allocate
2072 a new register and move it */
2073 if (!IS_DEAD_ARG(i)) {
2074 goto allocate_in_reg;
2076 /* check if the current register has already been allocated
2077 for another input aliased to an output */
2079 for (k2 = 0 ; k2 < k ; k2++) {
2080 i2 = def->sorted_args[nb_oargs + k2];
2081 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2082 (new_args[i2] == ts->reg)) {
2083 goto allocate_in_reg;
2089 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2090 /* nothing to do : the constraint is satisfied */
2093 /* allocate a new register matching the constraint
2094 and move the temporary register into it */
2095 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2097 tcg_out_mov(s, ts->type, reg, ts->reg);
2101 tcg_regset_set_reg(allocated_regs, reg);
2105 /* mark dead temporaries and free the associated registers */
2106 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2107 if (IS_DEAD_ARG(i)) {
2108 temp_dead(s, &s->temps[args[i]]);
2112 if (def->flags & TCG_OPF_BB_END) {
2113 tcg_reg_alloc_bb_end(s, allocated_regs);
2115 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2116 /* XXX: permit generic clobber register list ? */
2117 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2118 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2119 tcg_reg_free(s, i, allocated_regs);
2123 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2124 /* sync globals if the op has side effects and might trigger
2126 sync_globals(s, allocated_regs);
2129 /* satisfy the output constraints */
2130 tcg_regset_set(allocated_regs, s->reserved_regs);
2131 for(k = 0; k < nb_oargs; k++) {
2132 i = def->sorted_args[k];
2134 arg_ct = &def->args_ct[i];
2135 ts = &s->temps[arg];
2136 if (arg_ct->ct & TCG_CT_ALIAS) {
2137 reg = new_args[arg_ct->alias_index];
2139 /* if fixed register, we try to use it */
2141 if (ts->fixed_reg &&
2142 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2145 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
2148 tcg_regset_set_reg(allocated_regs, reg);
2149 /* if a fixed register is used, then a move will be done afterwards */
2150 if (!ts->fixed_reg) {
2151 if (ts->val_type == TEMP_VAL_REG) {
2152 s->reg_to_temp[ts->reg] = NULL;
2154 ts->val_type = TEMP_VAL_REG;
2156 /* temp value is modified, so the value kept in memory is
2157 potentially not the same */
2158 ts->mem_coherent = 0;
2159 s->reg_to_temp[reg] = ts;
2166 /* emit instruction */
2167 tcg_out_op(s, opc, new_args, const_args);
2169 /* move the outputs in the correct register if needed */
2170 for(i = 0; i < nb_oargs; i++) {
2171 ts = &s->temps[args[i]];
2173 if (ts->fixed_reg && ts->reg != reg) {
2174 tcg_out_mov(s, ts->type, ts->reg, reg);
2176 if (NEED_SYNC_ARG(i)) {
2177 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2178 } else if (IS_DEAD_ARG(i)) {
2184 #ifdef TCG_TARGET_STACK_GROWSUP
2185 #define STACK_DIR(x) (-(x))
2187 #define STACK_DIR(x) (x)
2190 static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
2191 const TCGArg * const args, TCGLifeData arg_life)
2193 int flags, nb_regs, i;
2197 intptr_t stack_offset;
2198 size_t call_stack_size;
2199 tcg_insn_unit *func_addr;
2201 TCGRegSet allocated_regs;
2203 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2204 flags = args[nb_oargs + nb_iargs + 1];
2206 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2207 if (nb_regs > nb_iargs) {
2211 /* assign stack slots first */
2212 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
2213 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2214 ~(TCG_TARGET_STACK_ALIGN - 1);
2215 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2216 if (allocate_args) {
2217 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2218 preallocate call stack */
2222 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
2223 for(i = nb_regs; i < nb_iargs; i++) {
2224 arg = args[nb_oargs + i];
2225 #ifdef TCG_TARGET_STACK_GROWSUP
2226 stack_offset -= sizeof(tcg_target_long);
2228 if (arg != TCG_CALL_DUMMY_ARG) {
2229 ts = &s->temps[arg];
2230 temp_load(s, ts, tcg_target_available_regs[ts->type],
2232 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
2234 #ifndef TCG_TARGET_STACK_GROWSUP
2235 stack_offset += sizeof(tcg_target_long);
2239 /* assign input registers */
2240 tcg_regset_set(allocated_regs, s->reserved_regs);
2241 for(i = 0; i < nb_regs; i++) {
2242 arg = args[nb_oargs + i];
2243 if (arg != TCG_CALL_DUMMY_ARG) {
2244 ts = &s->temps[arg];
2245 reg = tcg_target_call_iarg_regs[i];
2246 tcg_reg_free(s, reg, allocated_regs);
2248 if (ts->val_type == TEMP_VAL_REG) {
2249 if (ts->reg != reg) {
2250 tcg_out_mov(s, ts->type, reg, ts->reg);
2255 tcg_regset_clear(arg_set);
2256 tcg_regset_set_reg(arg_set, reg);
2257 temp_load(s, ts, arg_set, allocated_regs);
2260 tcg_regset_set_reg(allocated_regs, reg);
2264 /* mark dead temporaries and free the associated registers */
2265 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
2266 if (IS_DEAD_ARG(i)) {
2267 temp_dead(s, &s->temps[args[i]]);
2271 /* clobber call registers */
2272 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2273 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
2274 tcg_reg_free(s, i, allocated_regs);
2278 /* Save globals if they might be written by the helper, sync them if
2279 they might be read. */
2280 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2282 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2283 sync_globals(s, allocated_regs);
2285 save_globals(s, allocated_regs);
2288 tcg_out_call(s, func_addr);
2290 /* assign output registers and emit moves if needed */
2291 for(i = 0; i < nb_oargs; i++) {
2293 ts = &s->temps[arg];
2294 reg = tcg_target_call_oarg_regs[i];
2295 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
2297 if (ts->fixed_reg) {
2298 if (ts->reg != reg) {
2299 tcg_out_mov(s, ts->type, ts->reg, reg);
2302 if (ts->val_type == TEMP_VAL_REG) {
2303 s->reg_to_temp[ts->reg] = NULL;
2305 ts->val_type = TEMP_VAL_REG;
2307 ts->mem_coherent = 0;
2308 s->reg_to_temp[reg] = ts;
2309 if (NEED_SYNC_ARG(i)) {
2310 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2311 } else if (IS_DEAD_ARG(i)) {
2318 #ifdef CONFIG_PROFILER
2320 static int64_t tcg_table_op_count[NB_OPS];
2322 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2326 for (i = 0; i < NB_OPS; i++) {
2327 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2328 tcg_table_op_count[i]);
2332 void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2334 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2339 int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
2341 int i, oi, oi_next, num_insns;
2343 #ifdef CONFIG_PROFILER
2347 n = s->gen_op_buf[0].prev + 1;
2349 if (n > s->op_count_max) {
2350 s->op_count_max = n;
2355 if (n > s->temp_count_max) {
2356 s->temp_count_max = n;
2362 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2363 && qemu_log_in_addr_range(tb->pc))) {
2370 #ifdef CONFIG_PROFILER
2371 s->opt_time -= profile_getclock();
2374 #ifdef USE_TCG_OPTIMIZATIONS
2378 #ifdef CONFIG_PROFILER
2379 s->opt_time += profile_getclock();
2380 s->la_time -= profile_getclock();
2383 tcg_liveness_analysis(s);
2385 #ifdef CONFIG_PROFILER
2386 s->la_time += profile_getclock();
2390 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2391 && qemu_log_in_addr_range(tb->pc))) {
2392 qemu_log("OP after optimization and liveness analysis:\n");
2398 tcg_reg_alloc_start(s);
2400 s->code_buf = tb->tc_ptr;
2401 s->code_ptr = tb->tc_ptr;
2406 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
2407 TCGOp * const op = &s->gen_op_buf[oi];
2408 TCGArg * const args = &s->gen_opparam_buf[op->args];
2409 TCGOpcode opc = op->opc;
2410 const TCGOpDef *def = &tcg_op_defs[opc];
2411 TCGLifeData arg_life = op->life;
2414 #ifdef CONFIG_PROFILER
2415 tcg_table_op_count[opc]++;
2419 case INDEX_op_mov_i32:
2420 case INDEX_op_mov_i64:
2421 tcg_reg_alloc_mov(s, def, args, arg_life);
2423 case INDEX_op_movi_i32:
2424 case INDEX_op_movi_i64:
2425 tcg_reg_alloc_movi(s, args, arg_life);
2427 case INDEX_op_insn_start:
2428 if (num_insns >= 0) {
2429 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2432 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2434 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2435 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2439 s->gen_insn_data[num_insns][i] = a;
2442 case INDEX_op_discard:
2443 temp_dead(s, &s->temps[args[0]]);
2445 case INDEX_op_set_label:
2446 tcg_reg_alloc_bb_end(s, s->reserved_regs);
2447 tcg_out_label(s, arg_label(args[0]), s->code_ptr);
2450 tcg_reg_alloc_call(s, op->callo, op->calli, args, arg_life);
2453 /* Sanity check that we've not introduced any unhandled opcodes. */
2454 if (def->flags & TCG_OPF_NOT_PRESENT) {
2457 /* Note: in order to speed up the code, it would be much
2458 faster to have specialized register allocator functions for
2459 some common argument patterns */
2460 tcg_reg_alloc_op(s, def, opc, args, arg_life);
2463 #ifdef CONFIG_DEBUG_TCG
2466 /* Test for (pending) buffer overflow. The assumption is that any
2467 one operation beginning below the high water mark cannot overrun
2468 the buffer completely. Thus we can test for overflow after
2469 generating code without having to check during generation. */
2470 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
2474 tcg_debug_assert(num_insns >= 0);
2475 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2477 /* Generate TB finalization at the end of block */
2478 if (!tcg_out_tb_finalize(s)) {
2482 /* flush instruction cache */
2483 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2485 return tcg_current_code_size(s);
2488 #ifdef CONFIG_PROFILER
2489 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2491 TCGContext *s = &tcg_ctx;
2492 int64_t tb_count = s->tb_count;
2493 int64_t tb_div_count = tb_count ? tb_count : 1;
2494 int64_t tot = s->interm_time + s->code_time;
2496 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2498 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2499 tb_count, s->tb_count1 - tb_count,
2500 (double)(s->tb_count1 - s->tb_count)
2501 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
2502 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2503 (double)s->op_count / tb_div_count, s->op_count_max);
2504 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2505 (double)s->del_op_count / tb_div_count);
2506 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
2507 (double)s->temp_count / tb_div_count, s->temp_count_max);
2508 cpu_fprintf(f, "avg host code/TB %0.1f\n",
2509 (double)s->code_out_len / tb_div_count);
2510 cpu_fprintf(f, "avg search data/TB %0.1f\n",
2511 (double)s->search_out_len / tb_div_count);
2513 cpu_fprintf(f, "cycles/op %0.1f\n",
2514 s->op_count ? (double)tot / s->op_count : 0);
2515 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2516 s->code_in_len ? (double)tot / s->code_in_len : 0);
2517 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2518 s->code_out_len ? (double)tot / s->code_out_len : 0);
2519 cpu_fprintf(f, "cycles/search byte %0.1f\n",
2520 s->search_out_len ? (double)tot / s->search_out_len : 0);
2524 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2525 (double)s->interm_time / tot * 100.0);
2526 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2527 (double)s->code_time / tot * 100.0);
2528 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2529 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2531 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2532 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2533 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2535 cpu_fprintf(f, " avg cycles %0.1f\n",
2536 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
2539 void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
2541 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2545 #ifdef ELF_HOST_MACHINE
2546 /* In order to use this feature, the backend needs to do three things:
2548 (1) Define ELF_HOST_MACHINE to indicate both what value to
2549 put into the ELF image and to indicate support for the feature.
2551 (2) Define tcg_register_jit. This should create a buffer containing
2552 the contents of a .debug_frame section that describes the post-
2553 prologue unwind info for the tcg machine.
2555 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2558 /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
2565 struct jit_code_entry {
2566 struct jit_code_entry *next_entry;
2567 struct jit_code_entry *prev_entry;
2568 const void *symfile_addr;
2569 uint64_t symfile_size;
2572 struct jit_descriptor {
2574 uint32_t action_flag;
2575 struct jit_code_entry *relevant_entry;
2576 struct jit_code_entry *first_entry;
2579 void __jit_debug_register_code(void) __attribute__((noinline));
2580 void __jit_debug_register_code(void)
2585 /* Must statically initialize the version, because GDB may check
2586 the version before we can set it. */
2587 struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
2589 /* End GDB interface. */
2591 static int find_string(const char *strtab, const char *str)
2593 const char *p = strtab + 1;
2596 if (strcmp(p, str) == 0) {
2603 static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2604 const void *debug_frame,
2605 size_t debug_frame_size)
2607 struct __attribute__((packed)) DebugInfo {
2614 uintptr_t cu_low_pc;
2615 uintptr_t cu_high_pc;
2618 uintptr_t fn_low_pc;
2619 uintptr_t fn_high_pc;
2628 struct DebugInfo di;
2633 struct ElfImage *img;
2635 static const struct ElfImage img_template = {
2637 .e_ident[EI_MAG0] = ELFMAG0,
2638 .e_ident[EI_MAG1] = ELFMAG1,
2639 .e_ident[EI_MAG2] = ELFMAG2,
2640 .e_ident[EI_MAG3] = ELFMAG3,
2641 .e_ident[EI_CLASS] = ELF_CLASS,
2642 .e_ident[EI_DATA] = ELF_DATA,
2643 .e_ident[EI_VERSION] = EV_CURRENT,
2645 .e_machine = ELF_HOST_MACHINE,
2646 .e_version = EV_CURRENT,
2647 .e_phoff = offsetof(struct ElfImage, phdr),
2648 .e_shoff = offsetof(struct ElfImage, shdr),
2649 .e_ehsize = sizeof(ElfW(Shdr)),
2650 .e_phentsize = sizeof(ElfW(Phdr)),
2652 .e_shentsize = sizeof(ElfW(Shdr)),
2653 .e_shnum = ARRAY_SIZE(img->shdr),
2654 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
2655 #ifdef ELF_HOST_FLAGS
2656 .e_flags = ELF_HOST_FLAGS,
2659 .e_ident[EI_OSABI] = ELF_OSABI,
2667 [0] = { .sh_type = SHT_NULL },
2668 /* Trick: The contents of code_gen_buffer are not present in
2669 this fake ELF file; that got allocated elsewhere. Therefore
2670 we mark .text as SHT_NOBITS (similar to .bss) so that readers
2671 will not look for contents. We can record any address. */
2673 .sh_type = SHT_NOBITS,
2674 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
2676 [2] = { /* .debug_info */
2677 .sh_type = SHT_PROGBITS,
2678 .sh_offset = offsetof(struct ElfImage, di),
2679 .sh_size = sizeof(struct DebugInfo),
2681 [3] = { /* .debug_abbrev */
2682 .sh_type = SHT_PROGBITS,
2683 .sh_offset = offsetof(struct ElfImage, da),
2684 .sh_size = sizeof(img->da),
2686 [4] = { /* .debug_frame */
2687 .sh_type = SHT_PROGBITS,
2688 .sh_offset = sizeof(struct ElfImage),
2690 [5] = { /* .symtab */
2691 .sh_type = SHT_SYMTAB,
2692 .sh_offset = offsetof(struct ElfImage, sym),
2693 .sh_size = sizeof(img->sym),
2695 .sh_link = ARRAY_SIZE(img->shdr) - 1,
2696 .sh_entsize = sizeof(ElfW(Sym)),
2698 [6] = { /* .strtab */
2699 .sh_type = SHT_STRTAB,
2700 .sh_offset = offsetof(struct ElfImage, str),
2701 .sh_size = sizeof(img->str),
2705 [1] = { /* code_gen_buffer */
2706 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
2711 .len = sizeof(struct DebugInfo) - 4,
2713 .ptr_size = sizeof(void *),
2715 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
2717 .fn_name = "code_gen_buffer"
2720 1, /* abbrev number (the cu) */
2721 0x11, 1, /* DW_TAG_compile_unit, has children */
2722 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
2723 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2724 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2725 0, 0, /* end of abbrev */
2726 2, /* abbrev number (the fn) */
2727 0x2e, 0, /* DW_TAG_subprogram, no children */
2728 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
2729 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
2730 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
2731 0, 0, /* end of abbrev */
2732 0 /* no more abbrev */
2734 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
2735 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
2738 /* We only need a single jit entry; statically allocate it. */
2739 static struct jit_code_entry one_entry;
2741 uintptr_t buf = (uintptr_t)buf_ptr;
2742 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2743 DebugFrameHeader *dfh;
2745 img = g_malloc(img_size);
2746 *img = img_template;
2748 img->phdr.p_vaddr = buf;
2749 img->phdr.p_paddr = buf;
2750 img->phdr.p_memsz = buf_size;
2752 img->shdr[1].sh_name = find_string(img->str, ".text");
2753 img->shdr[1].sh_addr = buf;
2754 img->shdr[1].sh_size = buf_size;
2756 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
2757 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
2759 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
2760 img->shdr[4].sh_size = debug_frame_size;
2762 img->shdr[5].sh_name = find_string(img->str, ".symtab");
2763 img->shdr[6].sh_name = find_string(img->str, ".strtab");
2765 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
2766 img->sym[1].st_value = buf;
2767 img->sym[1].st_size = buf_size;
2769 img->di.cu_low_pc = buf;
2770 img->di.cu_high_pc = buf + buf_size;
2771 img->di.fn_low_pc = buf;
2772 img->di.fn_high_pc = buf + buf_size;
2774 dfh = (DebugFrameHeader *)(img + 1);
2775 memcpy(dfh, debug_frame, debug_frame_size);
2776 dfh->fde.func_start = buf;
2777 dfh->fde.func_len = buf_size;
2780 /* Enable this block to be able to debug the ELF image file creation.
2781 One can use readelf, objdump, or other inspection utilities. */
2783 FILE *f = fopen("/tmp/qemu.jit", "w+b");
2785 if (fwrite(img, img_size, 1, f) != img_size) {
2786 /* Avoid stupid unused return value warning for fwrite. */
2793 one_entry.symfile_addr = img;
2794 one_entry.symfile_size = img_size;
2796 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
2797 __jit_debug_descriptor.relevant_entry = &one_entry;
2798 __jit_debug_descriptor.first_entry = &one_entry;
2799 __jit_debug_register_code();
2802 /* No support for the feature. Provide the entry point expected by exec.c,
2803 and implement the internal function we declared earlier. */
2805 static void tcg_register_jit_int(void *buf, size_t size,
2806 const void *debug_frame,
2807 size_t debug_frame_size)
2811 void tcg_register_jit(void *buf, size_t buf_size)
2814 #endif /* ELF_HOST_MACHINE */