2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* define it to suppress various consistency checks (faster) */
28 /* define it to use liveness analysis (better code) */
29 #define USE_LIVENESS_ANALYSIS
42 #include "qemu-common.h"
44 /* Note: the long term plan is to reduce the dependancies on the QEMU
45 CPU definitions. Currently they are used for qemu_ld/st
47 #define NO_CPU_IO_DEFS
55 static void patch_reloc(uint8_t *code_ptr, int type,
56 tcg_target_long value, tcg_target_long addend);
58 TCGOpDef tcg_op_defs[] = {
59 #define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size },
60 #define DEF2(s, iargs, oargs, cargs, flags) { #s, iargs, oargs, cargs, iargs + oargs + cargs, flags, 0 },
66 TCGRegSet tcg_target_available_regs[2];
67 TCGRegSet tcg_target_call_clobber_regs;
69 /* XXX: move that inside the context */
70 uint16_t *gen_opc_ptr;
71 TCGArg *gen_opparam_ptr;
73 static inline void tcg_out8(TCGContext *s, uint8_t v)
78 static inline void tcg_out16(TCGContext *s, uint16_t v)
80 *(uint16_t *)s->code_ptr = v;
84 static inline void tcg_out32(TCGContext *s, uint32_t v)
86 *(uint32_t *)s->code_ptr = v;
90 /* label relocation processing */
92 void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
93 int label_index, long addend)
98 l = &s->labels[label_index];
100 /* FIXME: This may break relocations on RISC targets that
101 modify instruction fields in place. The caller may not have
102 written the initial value. */
103 patch_reloc(code_ptr, type, l->u.value, addend);
105 /* add a new relocation entry */
106 r = tcg_malloc(sizeof(TCGRelocation));
110 r->next = l->u.first_reloc;
111 l->u.first_reloc = r;
115 static void tcg_out_label(TCGContext *s, int label_index,
116 tcg_target_long value)
121 l = &s->labels[label_index];
124 r = l->u.first_reloc;
126 patch_reloc(r->ptr, r->type, value, r->addend);
133 int gen_new_label(void)
135 TCGContext *s = &tcg_ctx;
139 if (s->nb_labels >= TCG_MAX_LABELS)
141 idx = s->nb_labels++;
144 l->u.first_reloc = NULL;
148 #include "tcg-target.c"
150 /* pool based memory allocation */
151 void *tcg_malloc_internal(TCGContext *s, int size)
156 if (size > TCG_POOL_CHUNK_SIZE) {
157 /* big malloc: insert a new pool (XXX: could optimize) */
158 p = qemu_malloc(sizeof(TCGPool) + size);
161 s->pool_current->next = p;
164 p->next = s->pool_current;
174 pool_size = TCG_POOL_CHUNK_SIZE;
175 p = qemu_malloc(sizeof(TCGPool) + pool_size);
179 s->pool_current->next = p;
188 s->pool_cur = p->data + size;
189 s->pool_end = p->data + p->size;
193 void tcg_pool_reset(TCGContext *s)
195 s->pool_cur = s->pool_end = NULL;
196 s->pool_current = NULL;
199 /* free all the pool */
200 void tcg_pool_free(TCGContext *s)
204 for(p = s->pool_first; p != NULL; p = p1) {
208 s->pool_first = NULL;
209 s->pool_cur = s->pool_end = NULL;
212 void tcg_context_init(TCGContext *s)
214 int op, total_args, n;
216 TCGArgConstraint *args_ct;
219 memset(s, 0, sizeof(*s));
220 s->temps = s->static_temps;
223 /* Count total number of arguments and allocate the corresponding
226 for(op = 0; op < NB_OPS; op++) {
227 def = &tcg_op_defs[op];
228 n = def->nb_iargs + def->nb_oargs;
232 args_ct = qemu_malloc(sizeof(TCGArgConstraint) * total_args);
233 sorted_args = qemu_malloc(sizeof(int) * total_args);
235 for(op = 0; op < NB_OPS; op++) {
236 def = &tcg_op_defs[op];
237 def->args_ct = args_ct;
238 def->sorted_args = sorted_args;
239 n = def->nb_iargs + def->nb_oargs;
247 void tcg_set_frame(TCGContext *s, int reg,
248 tcg_target_long start, tcg_target_long size)
250 s->frame_start = start;
251 s->frame_end = start + size;
255 void tcg_set_macro_func(TCGContext *s, TCGMacroFunc *func)
257 s->macro_func = func;
260 void tcg_func_start(TCGContext *s)
263 s->nb_temps = s->nb_globals;
264 s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS);
266 s->current_frame_offset = s->frame_start;
268 gen_opc_ptr = gen_opc_buf;
269 gen_opparam_ptr = gen_opparam_buf;
272 static inline void tcg_temp_alloc(TCGContext *s, int n)
274 if (n > TCG_MAX_TEMPS)
278 TCGv tcg_global_reg_new(TCGType type, int reg, const char *name)
280 TCGContext *s = &tcg_ctx;
284 #if TCG_TARGET_REG_BITS == 32
285 if (type != TCG_TYPE_I32)
288 if (tcg_regset_test_reg(s->reserved_regs, reg))
291 tcg_temp_alloc(s, s->nb_globals + 1);
292 ts = &s->temps[s->nb_globals];
293 ts->base_type = type;
297 ts->val_type = TEMP_VAL_REG;
300 tcg_regset_set_reg(s->reserved_regs, reg);
301 return MAKE_TCGV(idx);
304 TCGv tcg_global_mem_new(TCGType type, int reg, tcg_target_long offset,
307 TCGContext *s = &tcg_ctx;
312 #if TCG_TARGET_REG_BITS == 32
313 if (type == TCG_TYPE_I64) {
315 tcg_temp_alloc(s, s->nb_globals + 1);
316 ts = &s->temps[s->nb_globals];
317 ts->base_type = type;
318 ts->type = TCG_TYPE_I32;
320 ts->mem_allocated = 1;
322 #ifdef TCG_TARGET_WORDS_BIGENDIAN
323 ts->mem_offset = offset + 4;
325 ts->mem_offset = offset;
327 ts->val_type = TEMP_VAL_MEM;
328 pstrcpy(buf, sizeof(buf), name);
329 pstrcat(buf, sizeof(buf), "_0");
330 ts->name = strdup(buf);
333 ts->base_type = type;
334 ts->type = TCG_TYPE_I32;
336 ts->mem_allocated = 1;
338 #ifdef TCG_TARGET_WORDS_BIGENDIAN
339 ts->mem_offset = offset;
341 ts->mem_offset = offset + 4;
343 ts->val_type = TEMP_VAL_MEM;
344 pstrcpy(buf, sizeof(buf), name);
345 pstrcat(buf, sizeof(buf), "_1");
346 ts->name = strdup(buf);
352 tcg_temp_alloc(s, s->nb_globals + 1);
353 ts = &s->temps[s->nb_globals];
354 ts->base_type = type;
357 ts->mem_allocated = 1;
359 ts->mem_offset = offset;
360 ts->val_type = TEMP_VAL_MEM;
364 return MAKE_TCGV(idx);
367 TCGv tcg_temp_new(TCGType type)
369 TCGContext *s = &tcg_ctx;
374 #if TCG_TARGET_REG_BITS == 32
375 if (type == TCG_TYPE_I64) {
376 tcg_temp_alloc(s, s->nb_temps + 1);
377 ts = &s->temps[s->nb_temps];
378 ts->base_type = type;
379 ts->type = TCG_TYPE_I32;
381 ts->val_type = TEMP_VAL_DEAD;
382 ts->mem_allocated = 0;
385 ts->base_type = TCG_TYPE_I32;
386 ts->type = TCG_TYPE_I32;
387 ts->val_type = TEMP_VAL_DEAD;
389 ts->mem_allocated = 0;
395 tcg_temp_alloc(s, s->nb_temps + 1);
396 ts = &s->temps[s->nb_temps];
397 ts->base_type = type;
400 ts->val_type = TEMP_VAL_DEAD;
401 ts->mem_allocated = 0;
405 return MAKE_TCGV(idx);
408 TCGv tcg_const_i32(int32_t val)
410 TCGContext *s = &tcg_ctx;
415 tcg_temp_alloc(s, idx + 1);
417 ts->base_type = ts->type = TCG_TYPE_I32;
418 ts->val_type = TEMP_VAL_CONST;
422 return MAKE_TCGV(idx);
425 TCGv tcg_const_i64(int64_t val)
427 TCGContext *s = &tcg_ctx;
432 #if TCG_TARGET_REG_BITS == 32
433 tcg_temp_alloc(s, idx + 2);
435 ts->base_type = TCG_TYPE_I64;
436 ts->type = TCG_TYPE_I32;
437 ts->val_type = TEMP_VAL_CONST;
441 ts->base_type = TCG_TYPE_I32;
442 ts->type = TCG_TYPE_I32;
443 ts->val_type = TEMP_VAL_CONST;
448 tcg_temp_alloc(s, idx + 1);
450 ts->base_type = ts->type = TCG_TYPE_I64;
451 ts->val_type = TEMP_VAL_CONST;
456 return MAKE_TCGV(idx);
459 void tcg_register_helper(void *func, const char *name)
461 TCGContext *s = &tcg_ctx;
463 if ((s->nb_helpers + 1) > s->allocated_helpers) {
464 n = s->allocated_helpers;
470 s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo));
471 s->allocated_helpers = n;
473 s->helpers[s->nb_helpers].func = func;
474 s->helpers[s->nb_helpers].name = name;
478 const char *tcg_helper_get_name(TCGContext *s, void *func)
482 for(i = 0; i < s->nb_helpers; i++) {
483 if (s->helpers[i].func == func)
484 return s->helpers[i].name;
489 static inline TCGType tcg_get_base_type(TCGContext *s, TCGv arg)
491 return s->temps[GET_TCGV(arg)].base_type;
494 static void tcg_gen_call_internal(TCGContext *s, TCGv func,
496 unsigned int nb_rets, const TCGv *rets,
497 unsigned int nb_params, const TCGv *params)
500 *gen_opc_ptr++ = INDEX_op_call;
501 *gen_opparam_ptr++ = (nb_rets << 16) | (nb_params + 1);
502 for(i = 0; i < nb_rets; i++) {
503 *gen_opparam_ptr++ = GET_TCGV(rets[i]);
505 for(i = 0; i < nb_params; i++) {
506 *gen_opparam_ptr++ = GET_TCGV(params[i]);
508 *gen_opparam_ptr++ = GET_TCGV(func);
510 *gen_opparam_ptr++ = flags;
511 /* total parameters, needed to go backward in the instruction stream */
512 *gen_opparam_ptr++ = 1 + nb_rets + nb_params + 3;
516 #if TCG_TARGET_REG_BITS < 64
517 /* Note: we convert the 64 bit args to 32 bit */
518 void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,
519 unsigned int nb_rets, const TCGv *rets,
520 unsigned int nb_params, const TCGv *args1)
522 TCGv ret, *args2, rets_2[2], arg;
527 if (tcg_get_base_type(s, ret) == TCG_TYPE_I64) {
530 rets_2[1] = TCGV_HIGH(ret);
534 args2 = alloca((nb_params * 2) * sizeof(TCGv));
536 call_type = (flags & TCG_CALL_TYPE_MASK);
537 for(i = 0; i < nb_params; i++) {
539 if (tcg_get_base_type(s, arg) == TCG_TYPE_I64) {
540 #ifdef TCG_TARGET_I386
541 /* REGPARM case: if the third parameter is 64 bit, it is
542 allocated on the stack */
543 if (j == 2 && call_type == TCG_CALL_TYPE_REGPARM) {
544 call_type = TCG_CALL_TYPE_REGPARM_2;
545 flags = (flags & ~TCG_CALL_TYPE_MASK) | call_type;
548 args2[j++] = TCGV_HIGH(arg);
550 #ifdef TCG_TARGET_WORDS_BIGENDIAN
551 args2[j++] = TCGV_HIGH(arg);
555 args2[j++] = TCGV_HIGH(arg);
562 tcg_gen_call_internal(s, func, flags,
563 nb_rets, rets, j, args2);
566 void tcg_gen_call(TCGContext *s, TCGv func, unsigned int flags,
567 unsigned int nb_rets, const TCGv *rets,
568 unsigned int nb_params, const TCGv *args1)
570 tcg_gen_call_internal(s, func, flags,
571 nb_rets, rets, nb_params, args1);
575 #if TCG_TARGET_REG_BITS == 32
576 void tcg_gen_shifti_i64(TCGv ret, TCGv arg1,
577 int c, int right, int arith)
585 tcg_gen_sari_i32(ret, TCGV_HIGH(arg1), c);
586 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31);
588 tcg_gen_shri_i32(ret, TCGV_HIGH(arg1), c);
589 tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
592 tcg_gen_shli_i32(TCGV_HIGH(ret), arg1, c);
593 tcg_gen_movi_i32(ret, 0);
598 t0 = tcg_temp_new(TCG_TYPE_I32);
599 t1 = tcg_temp_new(TCG_TYPE_I32);
601 tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c);
603 tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c);
605 tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c);
606 tcg_gen_shri_i32(ret, arg1, c);
607 tcg_gen_or_i32(ret, ret, t0);
608 tcg_gen_mov_i32(TCGV_HIGH(ret), t1);
610 tcg_gen_shri_i32(t0, arg1, 32 - c);
611 /* Note: ret can be the same as arg1, so we use t1 */
612 tcg_gen_shli_i32(t1, arg1, c);
613 tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
614 tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0);
615 tcg_gen_mov_i32(ret, t1);
621 void tcg_reg_alloc_start(TCGContext *s)
625 for(i = 0; i < s->nb_globals; i++) {
628 ts->val_type = TEMP_VAL_REG;
630 ts->val_type = TEMP_VAL_MEM;
633 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
634 s->reg_to_temp[i] = -1;
638 static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
644 if (idx < s->nb_globals) {
645 pstrcpy(buf, buf_size, ts->name);
647 if (ts->val_type == TEMP_VAL_CONST) {
648 snprintf(buf, buf_size, "$0x%" TCG_PRIlx , ts->val);
650 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
656 char *tcg_get_arg_str(TCGContext *s, char *buf, int buf_size, TCGv arg)
658 return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV(arg));
661 void tcg_dump_ops(TCGContext *s, FILE *outfile)
663 const uint16_t *opc_ptr;
666 int c, i, k, nb_oargs, nb_iargs, nb_cargs;
670 opc_ptr = gen_opc_buf;
671 args = gen_opparam_buf;
672 while (opc_ptr < gen_opc_ptr) {
674 def = &tcg_op_defs[c];
675 fprintf(outfile, " %s ", def->name);
676 if (c == INDEX_op_call) {
678 /* variable number of arguments */
680 nb_oargs = arg >> 16;
681 nb_iargs = arg & 0xffff;
682 nb_cargs = def->nb_cargs;
683 } else if (c == INDEX_op_nopn) {
684 /* variable number of arguments */
689 nb_oargs = def->nb_oargs;
690 nb_iargs = def->nb_iargs;
691 nb_cargs = def->nb_cargs;
695 for(i = 0; i < nb_oargs; i++) {
697 fprintf(outfile, ",");
698 fprintf(outfile, "%s",
699 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
701 for(i = 0; i < nb_iargs; i++) {
703 fprintf(outfile, ",");
704 /* XXX: dump helper name for call */
705 fprintf(outfile, "%s",
706 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++]));
708 for(i = 0; i < nb_cargs; i++) {
710 fprintf(outfile, ",");
712 fprintf(outfile, "$0x%" TCG_PRIlx, arg);
714 fprintf(outfile, "\n");
715 args += nb_iargs + nb_oargs + nb_cargs;
719 /* we give more priority to constraints with less registers */
720 static int get_constraint_priority(const TCGOpDef *def, int k)
722 const TCGArgConstraint *arg_ct;
725 arg_ct = &def->args_ct[k];
726 if (arg_ct->ct & TCG_CT_ALIAS) {
727 /* an alias is equivalent to a single register */
730 if (!(arg_ct->ct & TCG_CT_REG))
733 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
734 if (tcg_regset_test_reg(arg_ct->u.regs, i))
738 return TCG_TARGET_NB_REGS - n + 1;
741 /* sort from highest priority to lowest */
742 static void sort_constraints(TCGOpDef *def, int start, int n)
744 int i, j, p1, p2, tmp;
746 for(i = 0; i < n; i++)
747 def->sorted_args[start + i] = start + i;
750 for(i = 0; i < n - 1; i++) {
751 for(j = i + 1; j < n; j++) {
752 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
753 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
755 tmp = def->sorted_args[start + i];
756 def->sorted_args[start + i] = def->sorted_args[start + j];
757 def->sorted_args[start + j] = tmp;
763 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
774 assert(op >= 0 && op < NB_OPS);
775 def = &tcg_op_defs[op];
776 nb_args = def->nb_iargs + def->nb_oargs;
777 for(i = 0; i < nb_args; i++) {
778 ct_str = tdefs->args_ct_str[i];
779 tcg_regset_clear(def->args_ct[i].u.regs);
780 def->args_ct[i].ct = 0;
781 if (ct_str[0] >= '0' && ct_str[0] <= '9') {
783 oarg = ct_str[0] - '0';
784 assert(oarg < def->nb_oargs);
785 assert(def->args_ct[oarg].ct & TCG_CT_REG);
786 /* TCG_CT_ALIAS is for the output arguments. The input
787 argument is tagged with TCG_CT_IALIAS. */
788 def->args_ct[i] = def->args_ct[oarg];
789 def->args_ct[oarg].ct = TCG_CT_ALIAS;
790 def->args_ct[oarg].alias_index = i;
791 def->args_ct[i].ct |= TCG_CT_IALIAS;
792 def->args_ct[i].alias_index = oarg;
799 def->args_ct[i].ct |= TCG_CT_CONST;
803 if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
804 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
805 ct_str, i, def->name);
813 /* sort the constraints (XXX: this is just an heuristic) */
814 sort_constraints(def, 0, def->nb_oargs);
815 sort_constraints(def, def->nb_oargs, def->nb_iargs);
821 printf("%s: sorted=", def->name);
822 for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
823 printf(" %d", def->sorted_args[i]);
832 #ifdef USE_LIVENESS_ANALYSIS
834 /* set a nop for an operation using 'nb_args' */
835 static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr,
836 TCGArg *args, int nb_args)
839 *opc_ptr = INDEX_op_nop;
841 *opc_ptr = INDEX_op_nopn;
843 args[nb_args - 1] = nb_args;
847 /* liveness analysis: end of basic block: globals are live, temps are dead */
848 static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps)
850 memset(dead_temps, 0, s->nb_globals);
851 memset(dead_temps + s->nb_globals, 1, s->nb_temps - s->nb_globals);
854 /* Liveness analysis : update the opc_dead_iargs array to tell if a
855 given input arguments is dead. Instructions updating dead
856 temporaries are removed. */
857 void tcg_liveness_analysis(TCGContext *s)
859 int i, op_index, op, nb_args, nb_iargs, nb_oargs, arg, nb_ops;
863 unsigned int dead_iargs;
865 gen_opc_ptr++; /* skip end */
867 nb_ops = gen_opc_ptr - gen_opc_buf;
869 /* XXX: make it really dynamic */
870 s->op_dead_iargs = tcg_malloc(OPC_BUF_SIZE * sizeof(uint16_t));
872 dead_temps = tcg_malloc(s->nb_temps);
873 memset(dead_temps, 1, s->nb_temps);
875 args = gen_opparam_ptr;
876 op_index = nb_ops - 1;
877 while (op_index >= 0) {
878 op = gen_opc_buf[op_index];
879 def = &tcg_op_defs[op];
884 nb_iargs = args[0] & 0xffff;
885 nb_oargs = args[0] >> 16;
888 /* output args are dead */
889 for(i = 0; i < nb_oargs; i++) {
894 /* globals are live (they may be used by the call) */
895 memset(dead_temps, 0, s->nb_globals);
897 /* input args are live */
899 for(i = 0; i < nb_iargs; i++) {
900 arg = args[i + nb_oargs];
901 if (dead_temps[arg]) {
902 dead_iargs |= (1 << i);
906 s->op_dead_iargs[op_index] = dead_iargs;
909 case INDEX_op_set_label:
911 /* mark end of basic block */
912 tcg_la_bb_end(s, dead_temps);
918 case INDEX_op_discard:
920 /* mark the temporary as dead */
921 dead_temps[args[0]] = 1;
923 case INDEX_op_macro_2:
925 int dead_args[2], macro_id;
926 int saved_op_index, saved_arg_index;
927 int macro_op_index, macro_arg_index;
928 int macro_end_op_index, macro_end_arg_index;
933 dead_args[0] = dead_temps[args[0]];
934 dead_args[1] = dead_temps[args[1]];
937 /* call the macro function which generate code
938 depending on the live outputs */
939 saved_op_index = op_index;
940 saved_arg_index = args - gen_opparam_buf;
942 /* add a macro start instruction */
943 *gen_opc_ptr++ = INDEX_op_macro_start;
944 *gen_opparam_ptr++ = saved_op_index;
945 *gen_opparam_ptr++ = saved_arg_index;
947 macro_op_index = gen_opc_ptr - gen_opc_buf;
948 macro_arg_index = gen_opparam_ptr - gen_opparam_buf;
950 last_nb_temps = s->nb_temps;
952 s->macro_func(s, macro_id, dead_args);
954 /* realloc temp info (XXX: make it faster) */
955 if (s->nb_temps > last_nb_temps) {
956 uint8_t *new_dead_temps;
958 new_dead_temps = tcg_malloc(s->nb_temps);
959 memcpy(new_dead_temps, dead_temps, last_nb_temps);
960 memset(new_dead_temps + last_nb_temps, 1,
961 s->nb_temps - last_nb_temps);
962 dead_temps = new_dead_temps;
965 macro_end_op_index = gen_opc_ptr - gen_opc_buf;
966 macro_end_arg_index = gen_opparam_ptr - gen_opparam_buf;
968 /* end of macro: add a goto to the next instruction */
969 *gen_opc_ptr++ = INDEX_op_macro_end;
970 *gen_opparam_ptr++ = op_index + 1;
971 *gen_opparam_ptr++ = saved_arg_index + nb_args;
973 /* modify the macro operation to be a macro_goto */
974 gen_opc_buf[op_index] = INDEX_op_macro_goto;
975 args[0] = macro_op_index;
976 args[1] = macro_arg_index;
977 args[2] = 0; /* dummy third arg to match the
980 /* set the next instruction to the end of the macro */
981 op_index = macro_end_op_index;
982 args = macro_end_arg_index + gen_opparam_buf;
985 case INDEX_op_macro_start:
988 args = gen_opparam_buf + args[1];
990 case INDEX_op_macro_goto:
991 case INDEX_op_macro_end:
992 tcg_abort(); /* should never happen in liveness analysis */
995 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
997 if (op > INDEX_op_end) {
998 args -= def->nb_args;
999 nb_iargs = def->nb_iargs;
1000 nb_oargs = def->nb_oargs;
1002 /* Test if the operation can be removed because all
1003 its outputs are dead. We assume that nb_oargs == 0
1004 implies side effects */
1005 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
1006 for(i = 0; i < nb_oargs; i++) {
1008 if (!dead_temps[arg])
1011 tcg_set_nop(s, gen_opc_buf + op_index, args, def->nb_args);
1012 #ifdef CONFIG_PROFILER
1014 extern int64_t dyngen_tcg_del_op_count;
1015 dyngen_tcg_del_op_count++;
1021 /* output args are dead */
1022 for(i = 0; i < nb_oargs; i++) {
1024 dead_temps[arg] = 1;
1027 /* if end of basic block, update */
1028 if (def->flags & TCG_OPF_BB_END) {
1029 tcg_la_bb_end(s, dead_temps);
1032 /* input args are live */
1034 for(i = 0; i < nb_iargs; i++) {
1035 arg = args[i + nb_oargs];
1036 if (dead_temps[arg]) {
1037 dead_iargs |= (1 << i);
1039 dead_temps[arg] = 0;
1041 s->op_dead_iargs[op_index] = dead_iargs;
1044 /* legacy dyngen operations */
1045 args -= def->nb_args;
1046 /* mark end of basic block */
1047 tcg_la_bb_end(s, dead_temps);
1054 if (args != gen_opparam_buf)
1058 /* dummy liveness analysis */
1059 void tcg_liveness_analysis(TCGContext *s)
1062 nb_ops = gen_opc_ptr - gen_opc_buf;
1064 s->op_dead_iargs = tcg_malloc(nb_ops * sizeof(uint16_t));
1065 memset(s->op_dead_iargs, 0, nb_ops * sizeof(uint16_t));
1070 static void dump_regs(TCGContext *s)
1076 for(i = 0; i < s->nb_temps; i++) {
1078 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
1079 switch(ts->val_type) {
1081 printf("%s", tcg_target_reg_names[ts->reg]);
1084 printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
1086 case TEMP_VAL_CONST:
1087 printf("$0x%" TCG_PRIlx, ts->val);
1099 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1100 if (s->reg_to_temp[i] >= 0) {
1102 tcg_target_reg_names[i],
1103 tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
1108 static void check_regs(TCGContext *s)
1114 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1115 k = s->reg_to_temp[reg];
1118 if (ts->val_type != TEMP_VAL_REG ||
1120 printf("Inconsistency for register %s:\n",
1121 tcg_target_reg_names[reg]);
1122 printf("reg state:\n");
1128 for(k = 0; k < s->nb_temps; k++) {
1130 if (ts->val_type == TEMP_VAL_REG &&
1132 s->reg_to_temp[ts->reg] != k) {
1133 printf("Inconsistency for temp %s:\n",
1134 tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
1135 printf("reg state:\n");
1143 static void temp_allocate_frame(TCGContext *s, int temp)
1146 ts = &s->temps[temp];
1147 s->current_frame_offset = (s->current_frame_offset + sizeof(tcg_target_long) - 1) & ~(sizeof(tcg_target_long) - 1);
1148 if (s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end)
1150 ts->mem_offset = s->current_frame_offset;
1151 ts->mem_reg = s->frame_reg;
1152 ts->mem_allocated = 1;
1153 s->current_frame_offset += sizeof(tcg_target_long);
1156 /* free register 'reg' by spilling the corresponding temporary if necessary */
1157 static void tcg_reg_free(TCGContext *s, int reg)
1162 temp = s->reg_to_temp[reg];
1164 ts = &s->temps[temp];
1165 assert(ts->val_type == TEMP_VAL_REG);
1166 if (!ts->mem_coherent) {
1167 if (!ts->mem_allocated)
1168 temp_allocate_frame(s, temp);
1169 tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1171 ts->val_type = TEMP_VAL_MEM;
1172 s->reg_to_temp[reg] = -1;
1176 /* Allocate a register belonging to reg1 & ~reg2 */
1177 static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
1182 tcg_regset_andnot(reg_ct, reg1, reg2);
1184 /* first try free registers */
1185 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1186 reg = tcg_target_reg_alloc_order[i];
1187 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
1191 /* XXX: do better spill choice */
1192 for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
1193 reg = tcg_target_reg_alloc_order[i];
1194 if (tcg_regset_test_reg(reg_ct, reg)) {
1195 tcg_reg_free(s, reg);
1203 /* at the end of a basic block, we assume all temporaries are dead and
1204 all globals are stored at their canonical location */
1205 /* XXX: optimize by handling constants in another array ? */
1206 void tcg_reg_alloc_bb_end(TCGContext *s)
1211 for(i = 0; i < s->nb_globals; i++) {
1213 if (!ts->fixed_reg) {
1214 if (ts->val_type == TEMP_VAL_REG) {
1215 tcg_reg_free(s, ts->reg);
1220 for(i = s->nb_globals; i < s->nb_temps; i++) {
1222 if (ts->val_type != TEMP_VAL_CONST) {
1223 if (ts->val_type == TEMP_VAL_REG) {
1224 s->reg_to_temp[ts->reg] = -1;
1226 ts->val_type = TEMP_VAL_DEAD;
1231 #define IS_DEAD_IARG(n) ((dead_iargs >> (n)) & 1)
1233 static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
1235 unsigned int dead_iargs)
1239 const TCGArgConstraint *arg_ct;
1241 ots = &s->temps[args[0]];
1242 ts = &s->temps[args[1]];
1243 arg_ct = &def->args_ct[0];
1245 if (ts->val_type == TEMP_VAL_REG) {
1246 if (IS_DEAD_IARG(0) && !ts->fixed_reg && !ots->fixed_reg) {
1247 /* the mov can be suppressed */
1248 if (ots->val_type == TEMP_VAL_REG)
1249 s->reg_to_temp[ots->reg] = -1;
1251 s->reg_to_temp[reg] = -1;
1252 ts->val_type = TEMP_VAL_DEAD;
1254 if (ots->val_type == TEMP_VAL_REG) {
1257 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1259 if (ts->reg != reg) {
1260 tcg_out_mov(s, reg, ts->reg);
1263 } else if (ts->val_type == TEMP_VAL_MEM) {
1264 if (ots->val_type == TEMP_VAL_REG) {
1267 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1269 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1270 } else if (ts->val_type == TEMP_VAL_CONST) {
1271 if (ots->val_type == TEMP_VAL_REG) {
1274 reg = tcg_reg_alloc(s, arg_ct->u.regs, s->reserved_regs);
1276 tcg_out_movi(s, ots->type, reg, ts->val);
1280 s->reg_to_temp[reg] = args[0];
1282 ots->val_type = TEMP_VAL_REG;
1283 ots->mem_coherent = 0;
1286 static void tcg_reg_alloc_op(TCGContext *s,
1287 const TCGOpDef *def, int opc,
1289 unsigned int dead_iargs)
1291 TCGRegSet allocated_regs;
1292 int i, k, nb_iargs, nb_oargs, reg;
1294 const TCGArgConstraint *arg_ct;
1296 TCGArg new_args[TCG_MAX_OP_ARGS];
1297 int const_args[TCG_MAX_OP_ARGS];
1299 nb_oargs = def->nb_oargs;
1300 nb_iargs = def->nb_iargs;
1302 /* copy constants */
1303 memcpy(new_args + nb_oargs + nb_iargs,
1304 args + nb_oargs + nb_iargs,
1305 sizeof(TCGArg) * def->nb_cargs);
1307 /* satisfy input constraints */
1308 tcg_regset_set(allocated_regs, s->reserved_regs);
1309 for(k = 0; k < nb_iargs; k++) {
1310 i = def->sorted_args[nb_oargs + k];
1312 arg_ct = &def->args_ct[i];
1313 ts = &s->temps[arg];
1314 if (ts->val_type == TEMP_VAL_MEM) {
1315 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1316 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1317 ts->val_type = TEMP_VAL_REG;
1319 ts->mem_coherent = 1;
1320 s->reg_to_temp[reg] = arg;
1321 } else if (ts->val_type == TEMP_VAL_CONST) {
1322 if (tcg_target_const_match(ts->val, arg_ct)) {
1323 /* constant is OK for instruction */
1325 new_args[i] = ts->val;
1328 /* need to move to a register*/
1329 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1330 tcg_out_movi(s, ts->type, reg, ts->val);
1334 assert(ts->val_type == TEMP_VAL_REG);
1335 if (arg_ct->ct & TCG_CT_IALIAS) {
1336 if (ts->fixed_reg) {
1337 /* if fixed register, we must allocate a new register
1338 if the alias is not the same register */
1339 if (arg != args[arg_ct->alias_index])
1340 goto allocate_in_reg;
1342 /* if the input is aliased to an output and if it is
1343 not dead after the instruction, we must allocate
1344 a new register and move it */
1345 if (!IS_DEAD_IARG(i - nb_oargs))
1346 goto allocate_in_reg;
1350 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1351 /* nothing to do : the constraint is satisfied */
1354 /* allocate a new register matching the constraint
1355 and move the temporary register into it */
1356 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1357 tcg_out_mov(s, reg, ts->reg);
1362 tcg_regset_set_reg(allocated_regs, reg);
1366 /* mark dead temporaries and free the associated registers */
1367 for(i = 0; i < nb_iargs; i++) {
1368 arg = args[nb_oargs + i];
1369 if (IS_DEAD_IARG(i)) {
1370 ts = &s->temps[arg];
1371 if (ts->val_type != TEMP_VAL_CONST && !ts->fixed_reg) {
1372 if (ts->val_type == TEMP_VAL_REG)
1373 s->reg_to_temp[ts->reg] = -1;
1374 ts->val_type = TEMP_VAL_DEAD;
1379 /* XXX: permit generic clobber register list ? */
1380 if (def->flags & TCG_OPF_CALL_CLOBBER) {
1381 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1382 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1383 tcg_reg_free(s, reg);
1388 /* satisfy the output constraints */
1389 tcg_regset_set(allocated_regs, s->reserved_regs);
1390 for(k = 0; k < nb_oargs; k++) {
1391 i = def->sorted_args[k];
1393 arg_ct = &def->args_ct[i];
1394 ts = &s->temps[arg];
1395 if (arg_ct->ct & TCG_CT_ALIAS) {
1396 reg = new_args[arg_ct->alias_index];
1398 /* if fixed register, we try to use it */
1400 if (ts->fixed_reg &&
1401 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1404 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1406 tcg_regset_set_reg(allocated_regs, reg);
1407 /* if a fixed register is used, then a move will be done afterwards */
1408 if (!ts->fixed_reg) {
1409 if (ts->val_type == TEMP_VAL_REG)
1410 s->reg_to_temp[ts->reg] = -1;
1411 ts->val_type = TEMP_VAL_REG;
1413 /* temp value is modified, so the value kept in memory is
1414 potentially not the same */
1415 ts->mem_coherent = 0;
1416 s->reg_to_temp[reg] = arg;
1422 if (def->flags & TCG_OPF_BB_END)
1423 tcg_reg_alloc_bb_end(s);
1425 /* emit instruction */
1426 tcg_out_op(s, opc, new_args, const_args);
1428 /* move the outputs in the correct register if needed */
1429 for(i = 0; i < nb_oargs; i++) {
1430 ts = &s->temps[args[i]];
1432 if (ts->fixed_reg && ts->reg != reg) {
1433 tcg_out_mov(s, ts->reg, reg);
1438 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
1439 int opc, const TCGArg *args,
1440 unsigned int dead_iargs)
1442 int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
1443 TCGArg arg, func_arg;
1445 tcg_target_long stack_offset, call_stack_size, func_addr;
1447 TCGRegSet allocated_regs;
1448 const TCGArgConstraint *arg_ct;
1452 nb_oargs = arg >> 16;
1453 nb_iargs = arg & 0xffff;
1454 nb_params = nb_iargs - 1;
1456 flags = args[nb_oargs + nb_iargs];
1458 nb_regs = tcg_target_get_call_iarg_regs_count(flags);
1459 if (nb_regs > nb_params)
1460 nb_regs = nb_params;
1462 /* assign stack slots first */
1463 /* XXX: preallocate call stack */
1464 call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
1465 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
1466 ~(TCG_TARGET_STACK_ALIGN - 1);
1467 #ifdef TCG_TARGET_STACK_GROWSUP
1468 tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size);
1470 tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size);
1474 for(i = nb_regs; i < nb_params; i++) {
1475 arg = args[nb_oargs + i];
1476 ts = &s->temps[arg];
1477 if (ts->val_type == TEMP_VAL_REG) {
1478 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
1479 } else if (ts->val_type == TEMP_VAL_MEM) {
1480 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1482 /* XXX: not correct if reading values from the stack */
1483 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1484 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1485 } else if (ts->val_type == TEMP_VAL_CONST) {
1486 reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
1488 /* XXX: sign extend may be needed on some targets */
1489 tcg_out_movi(s, ts->type, reg, ts->val);
1490 tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
1494 #ifdef TCG_TARGET_STACK_GROWSUP
1495 stack_offset -= sizeof(tcg_target_long);
1497 stack_offset += sizeof(tcg_target_long);
1501 /* assign input registers */
1502 tcg_regset_set(allocated_regs, s->reserved_regs);
1503 for(i = 0; i < nb_regs; i++) {
1504 arg = args[nb_oargs + i];
1505 ts = &s->temps[arg];
1506 reg = tcg_target_call_iarg_regs[i];
1507 tcg_reg_free(s, reg);
1508 if (ts->val_type == TEMP_VAL_REG) {
1509 if (ts->reg != reg) {
1510 tcg_out_mov(s, reg, ts->reg);
1512 } else if (ts->val_type == TEMP_VAL_MEM) {
1513 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1514 } else if (ts->val_type == TEMP_VAL_CONST) {
1515 /* XXX: sign extend ? */
1516 tcg_out_movi(s, ts->type, reg, ts->val);
1520 tcg_regset_set_reg(allocated_regs, reg);
1523 /* assign function address */
1524 func_arg = args[nb_oargs + nb_iargs - 1];
1525 arg_ct = &def->args_ct[0];
1526 ts = &s->temps[func_arg];
1527 func_addr = ts->val;
1529 func_addr = (tcg_target_long)__canonicalize_funcptr_for_compare((void *)func_addr);
1532 if (ts->val_type == TEMP_VAL_MEM) {
1533 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1534 tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
1536 } else if (ts->val_type == TEMP_VAL_REG) {
1538 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) {
1539 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1540 tcg_out_mov(s, reg, ts->reg);
1543 } else if (ts->val_type == TEMP_VAL_CONST) {
1544 if (tcg_target_const_match(func_addr, arg_ct)) {
1546 func_arg = func_addr;
1548 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
1549 tcg_out_movi(s, ts->type, reg, func_addr);
1556 /* mark dead temporaries and free the associated registers */
1557 for(i = 0; i < nb_params; i++) {
1558 arg = args[nb_oargs + i];
1559 if (IS_DEAD_IARG(i)) {
1560 ts = &s->temps[arg];
1561 if (ts->val_type != TEMP_VAL_CONST && !ts->fixed_reg) {
1562 if (ts->val_type == TEMP_VAL_REG)
1563 s->reg_to_temp[ts->reg] = -1;
1564 ts->val_type = TEMP_VAL_DEAD;
1569 /* clobber call registers */
1570 for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
1571 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
1572 tcg_reg_free(s, reg);
1576 /* store globals and free associated registers (we assume the call
1577 can modify any global. */
1578 for(i = 0; i < s->nb_globals; i++) {
1580 if (!ts->fixed_reg) {
1581 if (ts->val_type == TEMP_VAL_REG) {
1582 tcg_reg_free(s, ts->reg);
1587 tcg_out_op(s, opc, &func_arg, &const_func_arg);
1589 #ifdef TCG_TARGET_STACK_GROWSUP
1590 tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size);
1592 tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size);
1595 /* assign output registers and emit moves if needed */
1596 for(i = 0; i < nb_oargs; i++) {
1598 ts = &s->temps[arg];
1599 reg = tcg_target_call_oarg_regs[i];
1600 tcg_reg_free(s, reg);
1601 if (ts->fixed_reg) {
1602 if (ts->reg != reg) {
1603 tcg_out_mov(s, ts->reg, reg);
1606 if (ts->val_type == TEMP_VAL_REG)
1607 s->reg_to_temp[ts->reg] = -1;
1608 ts->val_type = TEMP_VAL_REG;
1610 ts->mem_coherent = 0;
1611 s->reg_to_temp[reg] = arg;
1615 return nb_iargs + nb_oargs + def->nb_cargs + 1;
1618 #ifdef CONFIG_PROFILER
1620 static int64_t dyngen_table_op_count[NB_OPS];
1622 void dump_op_count(void)
1626 f = fopen("/tmp/op1.log", "w");
1627 for(i = 0; i < INDEX_op_end; i++) {
1628 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]);
1631 f = fopen("/tmp/op2.log", "w");
1632 for(i = INDEX_op_end; i < NB_OPS; i++) {
1633 fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, dyngen_table_op_count[i]);
1640 static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
1643 int opc, op_index, macro_op_index;
1644 const TCGOpDef *def;
1645 unsigned int dead_iargs;
1649 if (unlikely(loglevel & CPU_LOG_TB_OP)) {
1650 fprintf(logfile, "OP:\n");
1651 tcg_dump_ops(s, logfile);
1652 fprintf(logfile, "\n");
1656 tcg_liveness_analysis(s);
1659 if (unlikely(loglevel & CPU_LOG_TB_OP_OPT)) {
1660 fprintf(logfile, "OP after la:\n");
1661 tcg_dump_ops(s, logfile);
1662 fprintf(logfile, "\n");
1666 tcg_reg_alloc_start(s);
1668 s->code_buf = gen_code_buf;
1669 s->code_ptr = gen_code_buf;
1671 macro_op_index = -1;
1672 args = gen_opparam_buf;
1675 #ifdef TCG_TARGET_NEEDS_PROLOGUE
1676 tcg_target_prologue(s);
1680 opc = gen_opc_buf[op_index];
1681 #ifdef CONFIG_PROFILER
1682 dyngen_table_op_count[opc]++;
1684 def = &tcg_op_defs[opc];
1686 printf("%s: %d %d %d\n", def->name,
1687 def->nb_oargs, def->nb_iargs, def->nb_cargs);
1691 case INDEX_op_mov_i32:
1692 #if TCG_TARGET_REG_BITS == 64
1693 case INDEX_op_mov_i64:
1695 dead_iargs = s->op_dead_iargs[op_index];
1696 tcg_reg_alloc_mov(s, def, args, dead_iargs);
1706 case INDEX_op_discard:
1709 ts = &s->temps[args[0]];
1710 /* mark the temporary as dead */
1711 if (ts->val_type != TEMP_VAL_CONST && !ts->fixed_reg) {
1712 if (ts->val_type == TEMP_VAL_REG)
1713 s->reg_to_temp[ts->reg] = -1;
1714 ts->val_type = TEMP_VAL_DEAD;
1718 case INDEX_op_macro_goto:
1719 macro_op_index = op_index; /* only used for exceptions */
1720 op_index = args[0] - 1;
1721 args = gen_opparam_buf + args[1];
1723 case INDEX_op_macro_end:
1724 macro_op_index = -1; /* only used for exceptions */
1725 op_index = args[0] - 1;
1726 args = gen_opparam_buf + args[1];
1728 case INDEX_op_macro_start:
1729 /* must never happen here */
1731 case INDEX_op_set_label:
1732 tcg_reg_alloc_bb_end(s);
1733 tcg_out_label(s, args[0], (long)s->code_ptr);
1736 dead_iargs = s->op_dead_iargs[op_index];
1737 args += tcg_reg_alloc_call(s, def, opc, args, dead_iargs);
1742 #ifndef CONFIG_NO_DYNGEN_OP
1743 case 0 ... INDEX_op_end - 1:
1744 /* legacy dyngen ops */
1745 #ifdef CONFIG_PROFILER
1747 extern int64_t dyngen_old_op_count;
1748 dyngen_old_op_count++;
1751 tcg_reg_alloc_bb_end(s);
1752 if (search_pc >= 0) {
1753 s->code_ptr += def->copy_size;
1754 args += def->nb_args;
1756 args = dyngen_op(s, opc, args);
1761 /* Note: in order to speed up the code, it would be much
1762 faster to have specialized register allocator functions for
1763 some common argument patterns */
1764 dead_iargs = s->op_dead_iargs[op_index];
1765 tcg_reg_alloc_op(s, def, opc, args, dead_iargs);
1768 args += def->nb_args;
1770 if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) {
1771 if (macro_op_index >= 0)
1772 return macro_op_index;
1785 int dyngen_code(TCGContext *s, uint8_t *gen_code_buf)
1787 #ifdef CONFIG_PROFILER
1789 extern int64_t dyngen_op_count;
1790 extern int dyngen_op_count_max;
1792 n = (gen_opc_ptr - gen_opc_buf);
1793 dyngen_op_count += n;
1794 if (n > dyngen_op_count_max)
1795 dyngen_op_count_max = n;
1799 tcg_gen_code_common(s, gen_code_buf, -1);
1801 /* flush instruction cache */
1802 flush_icache_range((unsigned long)gen_code_buf,
1803 (unsigned long)s->code_ptr);
1804 return s->code_ptr - gen_code_buf;
1807 /* Return the index of the micro operation such as the pc after is <
1808 offset bytes from the start of the TB. The contents of gen_code_buf must
1809 not be changed, though writing the same values is ok.
1810 Return -1 if not found. */
1811 int dyngen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset)
1813 return tcg_gen_code_common(s, gen_code_buf, offset);