]> Git Repo - qemu.git/blame - tcg/tcg.c
tcg: Propagate args to op->args in optimizer
[qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
f348b6d1 33#include "qemu/cutils.h"
1de7afc9
PB
34#include "qemu/host-utils.h"
35#include "qemu/timer.h"
c896fe29 36
c5d3c498 37/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
38 CPU definitions. Currently they are used for qemu_ld/st
39 instructions */
40#define NO_CPU_IO_DEFS
41#include "cpu.h"
c896fe29 42
63c91552
PB
43#include "exec/cpu-common.h"
44#include "exec/exec-all.h"
45
c896fe29 46#include "tcg-op.h"
813da627 47
edee2579 48#if UINTPTR_MAX == UINT32_MAX
813da627 49# define ELF_CLASS ELFCLASS32
edee2579
RH
50#else
51# define ELF_CLASS ELFCLASS64
813da627
RH
52#endif
53#ifdef HOST_WORDS_BIGENDIAN
54# define ELF_DATA ELFDATA2MSB
55#else
56# define ELF_DATA ELFDATA2LSB
57#endif
58
c896fe29 59#include "elf.h"
508127e2 60#include "exec/log.h"
c896fe29 61
ce151109
PM
62/* Forward declarations for functions declared in tcg-target.inc.c and
63 used here. */
e4d58b41 64static void tcg_target_init(TCGContext *s);
f69d277e 65static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
e4d58b41 66static void tcg_target_qemu_prologue(TCGContext *s);
1813e175 67static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 68 intptr_t value, intptr_t addend);
c896fe29 69
497a22eb
RH
70/* The CIE and FDE header definitions will be common to all hosts. */
71typedef struct {
72 uint32_t len __attribute__((aligned((sizeof(void *)))));
73 uint32_t id;
74 uint8_t version;
75 char augmentation[1];
76 uint8_t code_align;
77 uint8_t data_align;
78 uint8_t return_column;
79} DebugFrameCIE;
80
81typedef struct QEMU_PACKED {
82 uint32_t len __attribute__((aligned((sizeof(void *)))));
83 uint32_t cie_offset;
edee2579
RH
84 uintptr_t func_start;
85 uintptr_t func_len;
497a22eb
RH
86} DebugFrameFDEHeader;
87
2c90784a
RH
88typedef struct QEMU_PACKED {
89 DebugFrameCIE cie;
90 DebugFrameFDEHeader fde;
91} DebugFrameHeader;
92
813da627 93static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
94 const void *debug_frame,
95 size_t debug_frame_size)
813da627
RH
96 __attribute__((unused));
97
ce151109 98/* Forward declarations for functions declared and used in tcg-target.inc.c. */
069ea736
RH
99static const char *target_parse_constraint(TCGArgConstraint *ct,
100 const char *ct_str, TCGType type);
2a534aff 101static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 102 intptr_t arg2);
2a534aff 103static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 104static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 105 TCGReg ret, tcg_target_long arg);
c0ad3001
SW
106static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
107 const int *const_args);
2a534aff 108static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 109 intptr_t arg2);
59d7c14e
RH
110static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
111 TCGReg base, intptr_t ofs);
cf066674 112static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
f6c6afc1 113static int tcg_target_const_match(tcg_target_long val, TCGType type,
c0ad3001 114 const TCGArgConstraint *arg_ct);
659ef5cb
RH
115#ifdef TCG_TARGET_NEED_LDST_LABELS
116static bool tcg_out_ldst_finalize(TCGContext *s);
117#endif
c896fe29 118
a505785c
EC
119#define TCG_HIGHWATER 1024
120
b1d8e52e
BS
121static TCGRegSet tcg_target_available_regs[2];
122static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 123
1813e175 124#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 125static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
126{
127 *s->code_ptr++ = v;
128}
129
4196dca6
PM
130static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
131 uint8_t v)
5c53bb81 132{
1813e175 133 *p = v;
5c53bb81 134}
1813e175 135#endif
5c53bb81 136
1813e175 137#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 138static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 139{
1813e175
RH
140 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
141 *s->code_ptr++ = v;
142 } else {
143 tcg_insn_unit *p = s->code_ptr;
144 memcpy(p, &v, sizeof(v));
145 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
146 }
c896fe29
FB
147}
148
4196dca6
PM
149static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
150 uint16_t v)
5c53bb81 151{
1813e175
RH
152 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
153 *p = v;
154 } else {
155 memcpy(p, &v, sizeof(v));
156 }
5c53bb81 157}
1813e175 158#endif
5c53bb81 159
1813e175 160#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 161static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 162{
1813e175
RH
163 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
164 *s->code_ptr++ = v;
165 } else {
166 tcg_insn_unit *p = s->code_ptr;
167 memcpy(p, &v, sizeof(v));
168 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
169 }
c896fe29
FB
170}
171
4196dca6
PM
172static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
173 uint32_t v)
5c53bb81 174{
1813e175
RH
175 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
176 *p = v;
177 } else {
178 memcpy(p, &v, sizeof(v));
179 }
5c53bb81 180}
1813e175 181#endif
5c53bb81 182
1813e175 183#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 184static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 185{
1813e175
RH
186 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
187 *s->code_ptr++ = v;
188 } else {
189 tcg_insn_unit *p = s->code_ptr;
190 memcpy(p, &v, sizeof(v));
191 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
192 }
ac26eb69
RH
193}
194
4196dca6
PM
195static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
196 uint64_t v)
5c53bb81 197{
1813e175
RH
198 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
199 *p = v;
200 } else {
201 memcpy(p, &v, sizeof(v));
202 }
5c53bb81 203}
1813e175 204#endif
5c53bb81 205
c896fe29
FB
206/* label relocation processing */
207
1813e175 208static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 209 TCGLabel *l, intptr_t addend)
c896fe29 210{
c896fe29
FB
211 TCGRelocation *r;
212
c896fe29 213 if (l->has_value) {
623e265c
PB
214 /* FIXME: This may break relocations on RISC targets that
215 modify instruction fields in place. The caller may not have
216 written the initial value. */
f54b3f92 217 patch_reloc(code_ptr, type, l->u.value, addend);
c896fe29
FB
218 } else {
219 /* add a new relocation entry */
220 r = tcg_malloc(sizeof(TCGRelocation));
221 r->type = type;
222 r->ptr = code_ptr;
223 r->addend = addend;
224 r->next = l->u.first_reloc;
225 l->u.first_reloc = r;
226 }
227}
228
bec16311 229static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
c896fe29 230{
2ba7fae2 231 intptr_t value = (intptr_t)ptr;
1813e175 232 TCGRelocation *r;
c896fe29 233
eabb7b91 234 tcg_debug_assert(!l->has_value);
1813e175
RH
235
236 for (r = l->u.first_reloc; r != NULL; r = r->next) {
f54b3f92 237 patch_reloc(r->ptr, r->type, value, r->addend);
c896fe29 238 }
1813e175 239
c896fe29 240 l->has_value = 1;
1813e175 241 l->u.value_ptr = ptr;
c896fe29
FB
242}
243
42a268c2 244TCGLabel *gen_new_label(void)
c896fe29
FB
245{
246 TCGContext *s = &tcg_ctx;
51e3972c 247 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 248
51e3972c
RH
249 *l = (TCGLabel){
250 .id = s->nb_labels++
251 };
42a268c2
RH
252
253 return l;
c896fe29
FB
254}
255
ce151109 256#include "tcg-target.inc.c"
c896fe29 257
c896fe29
FB
258/* pool based memory allocation */
259void *tcg_malloc_internal(TCGContext *s, int size)
260{
261 TCGPool *p;
262 int pool_size;
263
264 if (size > TCG_POOL_CHUNK_SIZE) {
265 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 266 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 267 p->size = size;
4055299e
KB
268 p->next = s->pool_first_large;
269 s->pool_first_large = p;
270 return p->data;
c896fe29
FB
271 } else {
272 p = s->pool_current;
273 if (!p) {
274 p = s->pool_first;
275 if (!p)
276 goto new_pool;
277 } else {
278 if (!p->next) {
279 new_pool:
280 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 281 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
282 p->size = pool_size;
283 p->next = NULL;
284 if (s->pool_current)
285 s->pool_current->next = p;
286 else
287 s->pool_first = p;
288 } else {
289 p = p->next;
290 }
291 }
292 }
293 s->pool_current = p;
294 s->pool_cur = p->data + size;
295 s->pool_end = p->data + p->size;
296 return p->data;
297}
298
299void tcg_pool_reset(TCGContext *s)
300{
4055299e
KB
301 TCGPool *p, *t;
302 for (p = s->pool_first_large; p; p = t) {
303 t = p->next;
304 g_free(p);
305 }
306 s->pool_first_large = NULL;
c896fe29
FB
307 s->pool_cur = s->pool_end = NULL;
308 s->pool_current = NULL;
309}
310
100b5e01
RH
311typedef struct TCGHelperInfo {
312 void *func;
313 const char *name;
afb49896
RH
314 unsigned flags;
315 unsigned sizemask;
100b5e01
RH
316} TCGHelperInfo;
317
2ef6175a
RH
318#include "exec/helper-proto.h"
319
100b5e01 320static const TCGHelperInfo all_helpers[] = {
2ef6175a 321#include "exec/helper-tcg.h"
100b5e01 322};
619205fd 323static GHashTable *helper_table;
100b5e01 324
91478cef 325static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 326static void process_op_defs(TCGContext *s);
91478cef 327
c896fe29
FB
328void tcg_context_init(TCGContext *s)
329{
100b5e01 330 int op, total_args, n, i;
c896fe29
FB
331 TCGOpDef *def;
332 TCGArgConstraint *args_ct;
333 int *sorted_args;
334
335 memset(s, 0, sizeof(*s));
c896fe29 336 s->nb_globals = 0;
c70fbf0a 337
c896fe29
FB
338 /* Count total number of arguments and allocate the corresponding
339 space */
340 total_args = 0;
341 for(op = 0; op < NB_OPS; op++) {
342 def = &tcg_op_defs[op];
343 n = def->nb_iargs + def->nb_oargs;
344 total_args += n;
345 }
346
7267c094
AL
347 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
348 sorted_args = g_malloc(sizeof(int) * total_args);
c896fe29
FB
349
350 for(op = 0; op < NB_OPS; op++) {
351 def = &tcg_op_defs[op];
352 def->args_ct = args_ct;
353 def->sorted_args = sorted_args;
354 n = def->nb_iargs + def->nb_oargs;
355 sorted_args += n;
356 args_ct += n;
357 }
5cd8f621
RH
358
359 /* Register helpers. */
84fd9dd3 360 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 361 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 362
100b5e01 363 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
84fd9dd3 364 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 365 (gpointer)&all_helpers[i]);
100b5e01 366 }
5cd8f621 367
c896fe29 368 tcg_target_init(s);
f69d277e 369 process_op_defs(s);
91478cef
RH
370
371 /* Reverse the order of the saved registers, assuming they're all at
372 the start of tcg_target_reg_alloc_order. */
373 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
374 int r = tcg_target_reg_alloc_order[n];
375 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
376 break;
377 }
378 }
379 for (i = 0; i < n; ++i) {
380 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
381 }
382 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
383 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
384 }
9002ec79 385}
b03cce8e 386
6e3b2bfd
EC
387/*
388 * Allocate TBs right before their corresponding translated code, making
389 * sure that TBs and code are on different cache lines.
390 */
391TranslationBlock *tcg_tb_alloc(TCGContext *s)
392{
393 uintptr_t align = qemu_icache_linesize;
394 TranslationBlock *tb;
395 void *next;
396
397 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
398 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
399
400 if (unlikely(next > s->code_gen_highwater)) {
401 return NULL;
402 }
403 s->code_gen_ptr = next;
57a26946 404 s->data_gen_ptr = NULL;
6e3b2bfd
EC
405 return tb;
406}
407
9002ec79
RH
408void tcg_prologue_init(TCGContext *s)
409{
8163b749
RH
410 size_t prologue_size, total_size;
411 void *buf0, *buf1;
412
413 /* Put the prologue at the beginning of code_gen_buffer. */
414 buf0 = s->code_gen_buffer;
415 s->code_ptr = buf0;
416 s->code_buf = buf0;
417 s->code_gen_prologue = buf0;
418
419 /* Generate the prologue. */
b03cce8e 420 tcg_target_qemu_prologue(s);
8163b749
RH
421 buf1 = s->code_ptr;
422 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
423
424 /* Deduct the prologue from the buffer. */
425 prologue_size = tcg_current_code_size(s);
426 s->code_gen_ptr = buf1;
427 s->code_gen_buffer = buf1;
428 s->code_buf = buf1;
429 total_size = s->code_gen_buffer_size - prologue_size;
430 s->code_gen_buffer_size = total_size;
431
b125f9dc
RH
432 /* Compute a high-water mark, at which we voluntarily flush the buffer
433 and start over. The size here is arbitrary, significantly larger
434 than we expect the code generation for any one opcode to require. */
a505785c 435 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
8163b749
RH
436
437 tcg_register_jit(s->code_gen_buffer, total_size);
d6b64b2b
RH
438
439#ifdef DEBUG_DISAS
440 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1ee73216 441 qemu_log_lock();
8163b749
RH
442 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
443 log_disas(buf0, prologue_size);
d6b64b2b
RH
444 qemu_log("\n");
445 qemu_log_flush();
1ee73216 446 qemu_log_unlock();
d6b64b2b
RH
447 }
448#endif
cedbcb01
EC
449
450 /* Assert that goto_ptr is implemented completely. */
451 if (TCG_TARGET_HAS_goto_ptr) {
452 tcg_debug_assert(s->code_gen_epilogue != NULL);
453 }
c896fe29
FB
454}
455
c896fe29
FB
456void tcg_func_start(TCGContext *s)
457{
458 tcg_pool_reset(s);
459 s->nb_temps = s->nb_globals;
0ec9eabc
RH
460
461 /* No temps have been previously allocated for size or locality. */
462 memset(s->free_temps, 0, sizeof(s->free_temps));
463
c896fe29
FB
464 s->nb_labels = 0;
465 s->current_frame_offset = s->frame_start;
466
0a209d4b
RH
467#ifdef CONFIG_DEBUG_TCG
468 s->goto_tb_issue_mask = 0;
469#endif
470
dcb8e758
RH
471 s->gen_op_buf[0].next = 1;
472 s->gen_op_buf[0].prev = 0;
473 s->gen_next_op_idx = 1;
c896fe29
FB
474}
475
7ca4b752 476static inline int temp_idx(TCGContext *s, TCGTemp *ts)
c896fe29 477{
7ca4b752
RH
478 ptrdiff_t n = ts - s->temps;
479 tcg_debug_assert(n >= 0 && n < s->nb_temps);
480 return n;
481}
482
483static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
484{
485 int n = s->nb_temps++;
486 tcg_debug_assert(n < TCG_MAX_TEMPS);
487 return memset(&s->temps[n], 0, sizeof(TCGTemp));
488}
489
490static inline TCGTemp *tcg_global_alloc(TCGContext *s)
491{
492 tcg_debug_assert(s->nb_globals == s->nb_temps);
493 s->nb_globals++;
494 return tcg_temp_alloc(s);
c896fe29
FB
495}
496
b3a62939 497static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
b6638662 498 TCGReg reg, const char *name)
c896fe29 499{
c896fe29 500 TCGTemp *ts;
c896fe29 501
b3a62939 502 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 503 tcg_abort();
b3a62939 504 }
7ca4b752
RH
505
506 ts = tcg_global_alloc(s);
c896fe29
FB
507 ts->base_type = type;
508 ts->type = type;
509 ts->fixed_reg = 1;
510 ts->reg = reg;
c896fe29 511 ts->name = name;
c896fe29 512 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752
RH
513
514 return temp_idx(s, ts);
a7812ae4
PB
515}
516
b6638662 517void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939
RH
518{
519 int idx;
520 s->frame_start = start;
521 s->frame_end = start + size;
522 idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
523 s->frame_temp = &s->temps[idx];
524}
525
b6638662 526TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
a7812ae4 527{
b3a62939 528 TCGContext *s = &tcg_ctx;
a7812ae4
PB
529 int idx;
530
b3a62939
RH
531 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
532 tcg_abort();
533 }
534 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
a7812ae4
PB
535 return MAKE_TCGV_I32(idx);
536}
537
b6638662 538TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
a7812ae4 539{
b3a62939 540 TCGContext *s = &tcg_ctx;
a7812ae4
PB
541 int idx;
542
b3a62939
RH
543 if (tcg_regset_test_reg(s->reserved_regs, reg)) {
544 tcg_abort();
545 }
546 idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
a7812ae4 547 return MAKE_TCGV_I64(idx);
c896fe29
FB
548}
549
e1ccc054
RH
550int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
551 intptr_t offset, const char *name)
c896fe29
FB
552{
553 TCGContext *s = &tcg_ctx;
7ca4b752
RH
554 TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
555 TCGTemp *ts = tcg_global_alloc(s);
b3915dbb 556 int indirect_reg = 0, bigendian = 0;
7ca4b752
RH
557#ifdef HOST_WORDS_BIGENDIAN
558 bigendian = 1;
559#endif
c896fe29 560
b3915dbb 561 if (!base_ts->fixed_reg) {
5a18407f
RH
562 /* We do not support double-indirect registers. */
563 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 564 base_ts->indirect_base = 1;
5a18407f
RH
565 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
566 ? 2 : 1);
567 indirect_reg = 1;
b3915dbb
RH
568 }
569
7ca4b752
RH
570 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
571 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 572 char buf[64];
7ca4b752
RH
573
574 ts->base_type = TCG_TYPE_I64;
c896fe29 575 ts->type = TCG_TYPE_I32;
b3915dbb 576 ts->indirect_reg = indirect_reg;
c896fe29 577 ts->mem_allocated = 1;
b3a62939 578 ts->mem_base = base_ts;
7ca4b752 579 ts->mem_offset = offset + bigendian * 4;
c896fe29
FB
580 pstrcpy(buf, sizeof(buf), name);
581 pstrcat(buf, sizeof(buf), "_0");
582 ts->name = strdup(buf);
c896fe29 583
7ca4b752
RH
584 tcg_debug_assert(ts2 == ts + 1);
585 ts2->base_type = TCG_TYPE_I64;
586 ts2->type = TCG_TYPE_I32;
b3915dbb 587 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
588 ts2->mem_allocated = 1;
589 ts2->mem_base = base_ts;
590 ts2->mem_offset = offset + (1 - bigendian) * 4;
c896fe29
FB
591 pstrcpy(buf, sizeof(buf), name);
592 pstrcat(buf, sizeof(buf), "_1");
120c1084 593 ts2->name = strdup(buf);
7ca4b752 594 } else {
c896fe29
FB
595 ts->base_type = type;
596 ts->type = type;
b3915dbb 597 ts->indirect_reg = indirect_reg;
c896fe29 598 ts->mem_allocated = 1;
b3a62939 599 ts->mem_base = base_ts;
c896fe29 600 ts->mem_offset = offset;
c896fe29 601 ts->name = name;
c896fe29 602 }
7ca4b752 603 return temp_idx(s, ts);
a7812ae4
PB
604}
605
7ca4b752 606static int tcg_temp_new_internal(TCGType type, int temp_local)
c896fe29
FB
607{
608 TCGContext *s = &tcg_ctx;
609 TCGTemp *ts;
641d5fbe 610 int idx, k;
c896fe29 611
0ec9eabc
RH
612 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
613 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
614 if (idx < TCG_MAX_TEMPS) {
615 /* There is already an available temp with the right type. */
616 clear_bit(idx, s->free_temps[k].l);
617
e8996ee0 618 ts = &s->temps[idx];
e8996ee0 619 ts->temp_allocated = 1;
7ca4b752
RH
620 tcg_debug_assert(ts->base_type == type);
621 tcg_debug_assert(ts->temp_local == temp_local);
e8996ee0 622 } else {
7ca4b752
RH
623 ts = tcg_temp_alloc(s);
624 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
625 TCGTemp *ts2 = tcg_temp_alloc(s);
626
f6aa2f7d 627 ts->base_type = type;
e8996ee0
FB
628 ts->type = TCG_TYPE_I32;
629 ts->temp_allocated = 1;
641d5fbe 630 ts->temp_local = temp_local;
7ca4b752
RH
631
632 tcg_debug_assert(ts2 == ts + 1);
633 ts2->base_type = TCG_TYPE_I64;
634 ts2->type = TCG_TYPE_I32;
635 ts2->temp_allocated = 1;
636 ts2->temp_local = temp_local;
637 } else {
e8996ee0
FB
638 ts->base_type = type;
639 ts->type = type;
640 ts->temp_allocated = 1;
641d5fbe 641 ts->temp_local = temp_local;
e8996ee0 642 }
7ca4b752 643 idx = temp_idx(s, ts);
c896fe29 644 }
27bfd83c
PM
645
646#if defined(CONFIG_DEBUG_TCG)
647 s->temps_in_use++;
648#endif
a7812ae4 649 return idx;
c896fe29
FB
650}
651
a7812ae4
PB
652TCGv_i32 tcg_temp_new_internal_i32(int temp_local)
653{
654 int idx;
655
656 idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local);
657 return MAKE_TCGV_I32(idx);
658}
659
660TCGv_i64 tcg_temp_new_internal_i64(int temp_local)
661{
662 int idx;
663
664 idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local);
665 return MAKE_TCGV_I64(idx);
666}
667
0ec9eabc 668static void tcg_temp_free_internal(int idx)
c896fe29
FB
669{
670 TCGContext *s = &tcg_ctx;
671 TCGTemp *ts;
641d5fbe 672 int k;
c896fe29 673
27bfd83c
PM
674#if defined(CONFIG_DEBUG_TCG)
675 s->temps_in_use--;
676 if (s->temps_in_use < 0) {
677 fprintf(stderr, "More temporaries freed than allocated!\n");
678 }
679#endif
680
eabb7b91 681 tcg_debug_assert(idx >= s->nb_globals && idx < s->nb_temps);
c896fe29 682 ts = &s->temps[idx];
eabb7b91 683 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 684 ts->temp_allocated = 0;
0ec9eabc 685
18d13fa2 686 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
0ec9eabc 687 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
688}
689
a7812ae4
PB
690void tcg_temp_free_i32(TCGv_i32 arg)
691{
692 tcg_temp_free_internal(GET_TCGV_I32(arg));
693}
694
695void tcg_temp_free_i64(TCGv_i64 arg)
696{
697 tcg_temp_free_internal(GET_TCGV_I64(arg));
698}
e8996ee0 699
a7812ae4 700TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 701{
a7812ae4
PB
702 TCGv_i32 t0;
703 t0 = tcg_temp_new_i32();
e8996ee0
FB
704 tcg_gen_movi_i32(t0, val);
705 return t0;
706}
c896fe29 707
a7812ae4 708TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 709{
a7812ae4
PB
710 TCGv_i64 t0;
711 t0 = tcg_temp_new_i64();
e8996ee0
FB
712 tcg_gen_movi_i64(t0, val);
713 return t0;
c896fe29
FB
714}
715
a7812ae4 716TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 717{
a7812ae4
PB
718 TCGv_i32 t0;
719 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
720 tcg_gen_movi_i32(t0, val);
721 return t0;
722}
723
a7812ae4 724TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 725{
a7812ae4
PB
726 TCGv_i64 t0;
727 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
728 tcg_gen_movi_i64(t0, val);
729 return t0;
730}
731
27bfd83c
PM
732#if defined(CONFIG_DEBUG_TCG)
733void tcg_clear_temp_count(void)
734{
735 TCGContext *s = &tcg_ctx;
736 s->temps_in_use = 0;
737}
738
739int tcg_check_temp_count(void)
740{
741 TCGContext *s = &tcg_ctx;
742 if (s->temps_in_use) {
743 /* Clear the count so that we don't give another
744 * warning immediately next time around.
745 */
746 s->temps_in_use = 0;
747 return 1;
748 }
749 return 0;
750}
751#endif
752
be0f34b5
RH
753/* Return true if OP may appear in the opcode stream.
754 Test the runtime variable that controls each opcode. */
755bool tcg_op_supported(TCGOpcode op)
756{
757 switch (op) {
758 case INDEX_op_discard:
759 case INDEX_op_set_label:
760 case INDEX_op_call:
761 case INDEX_op_br:
762 case INDEX_op_mb:
763 case INDEX_op_insn_start:
764 case INDEX_op_exit_tb:
765 case INDEX_op_goto_tb:
766 case INDEX_op_qemu_ld_i32:
767 case INDEX_op_qemu_st_i32:
768 case INDEX_op_qemu_ld_i64:
769 case INDEX_op_qemu_st_i64:
770 return true;
771
772 case INDEX_op_goto_ptr:
773 return TCG_TARGET_HAS_goto_ptr;
774
775 case INDEX_op_mov_i32:
776 case INDEX_op_movi_i32:
777 case INDEX_op_setcond_i32:
778 case INDEX_op_brcond_i32:
779 case INDEX_op_ld8u_i32:
780 case INDEX_op_ld8s_i32:
781 case INDEX_op_ld16u_i32:
782 case INDEX_op_ld16s_i32:
783 case INDEX_op_ld_i32:
784 case INDEX_op_st8_i32:
785 case INDEX_op_st16_i32:
786 case INDEX_op_st_i32:
787 case INDEX_op_add_i32:
788 case INDEX_op_sub_i32:
789 case INDEX_op_mul_i32:
790 case INDEX_op_and_i32:
791 case INDEX_op_or_i32:
792 case INDEX_op_xor_i32:
793 case INDEX_op_shl_i32:
794 case INDEX_op_shr_i32:
795 case INDEX_op_sar_i32:
796 return true;
797
798 case INDEX_op_movcond_i32:
799 return TCG_TARGET_HAS_movcond_i32;
800 case INDEX_op_div_i32:
801 case INDEX_op_divu_i32:
802 return TCG_TARGET_HAS_div_i32;
803 case INDEX_op_rem_i32:
804 case INDEX_op_remu_i32:
805 return TCG_TARGET_HAS_rem_i32;
806 case INDEX_op_div2_i32:
807 case INDEX_op_divu2_i32:
808 return TCG_TARGET_HAS_div2_i32;
809 case INDEX_op_rotl_i32:
810 case INDEX_op_rotr_i32:
811 return TCG_TARGET_HAS_rot_i32;
812 case INDEX_op_deposit_i32:
813 return TCG_TARGET_HAS_deposit_i32;
814 case INDEX_op_extract_i32:
815 return TCG_TARGET_HAS_extract_i32;
816 case INDEX_op_sextract_i32:
817 return TCG_TARGET_HAS_sextract_i32;
818 case INDEX_op_add2_i32:
819 return TCG_TARGET_HAS_add2_i32;
820 case INDEX_op_sub2_i32:
821 return TCG_TARGET_HAS_sub2_i32;
822 case INDEX_op_mulu2_i32:
823 return TCG_TARGET_HAS_mulu2_i32;
824 case INDEX_op_muls2_i32:
825 return TCG_TARGET_HAS_muls2_i32;
826 case INDEX_op_muluh_i32:
827 return TCG_TARGET_HAS_muluh_i32;
828 case INDEX_op_mulsh_i32:
829 return TCG_TARGET_HAS_mulsh_i32;
830 case INDEX_op_ext8s_i32:
831 return TCG_TARGET_HAS_ext8s_i32;
832 case INDEX_op_ext16s_i32:
833 return TCG_TARGET_HAS_ext16s_i32;
834 case INDEX_op_ext8u_i32:
835 return TCG_TARGET_HAS_ext8u_i32;
836 case INDEX_op_ext16u_i32:
837 return TCG_TARGET_HAS_ext16u_i32;
838 case INDEX_op_bswap16_i32:
839 return TCG_TARGET_HAS_bswap16_i32;
840 case INDEX_op_bswap32_i32:
841 return TCG_TARGET_HAS_bswap32_i32;
842 case INDEX_op_not_i32:
843 return TCG_TARGET_HAS_not_i32;
844 case INDEX_op_neg_i32:
845 return TCG_TARGET_HAS_neg_i32;
846 case INDEX_op_andc_i32:
847 return TCG_TARGET_HAS_andc_i32;
848 case INDEX_op_orc_i32:
849 return TCG_TARGET_HAS_orc_i32;
850 case INDEX_op_eqv_i32:
851 return TCG_TARGET_HAS_eqv_i32;
852 case INDEX_op_nand_i32:
853 return TCG_TARGET_HAS_nand_i32;
854 case INDEX_op_nor_i32:
855 return TCG_TARGET_HAS_nor_i32;
856 case INDEX_op_clz_i32:
857 return TCG_TARGET_HAS_clz_i32;
858 case INDEX_op_ctz_i32:
859 return TCG_TARGET_HAS_ctz_i32;
860 case INDEX_op_ctpop_i32:
861 return TCG_TARGET_HAS_ctpop_i32;
862
863 case INDEX_op_brcond2_i32:
864 case INDEX_op_setcond2_i32:
865 return TCG_TARGET_REG_BITS == 32;
866
867 case INDEX_op_mov_i64:
868 case INDEX_op_movi_i64:
869 case INDEX_op_setcond_i64:
870 case INDEX_op_brcond_i64:
871 case INDEX_op_ld8u_i64:
872 case INDEX_op_ld8s_i64:
873 case INDEX_op_ld16u_i64:
874 case INDEX_op_ld16s_i64:
875 case INDEX_op_ld32u_i64:
876 case INDEX_op_ld32s_i64:
877 case INDEX_op_ld_i64:
878 case INDEX_op_st8_i64:
879 case INDEX_op_st16_i64:
880 case INDEX_op_st32_i64:
881 case INDEX_op_st_i64:
882 case INDEX_op_add_i64:
883 case INDEX_op_sub_i64:
884 case INDEX_op_mul_i64:
885 case INDEX_op_and_i64:
886 case INDEX_op_or_i64:
887 case INDEX_op_xor_i64:
888 case INDEX_op_shl_i64:
889 case INDEX_op_shr_i64:
890 case INDEX_op_sar_i64:
891 case INDEX_op_ext_i32_i64:
892 case INDEX_op_extu_i32_i64:
893 return TCG_TARGET_REG_BITS == 64;
894
895 case INDEX_op_movcond_i64:
896 return TCG_TARGET_HAS_movcond_i64;
897 case INDEX_op_div_i64:
898 case INDEX_op_divu_i64:
899 return TCG_TARGET_HAS_div_i64;
900 case INDEX_op_rem_i64:
901 case INDEX_op_remu_i64:
902 return TCG_TARGET_HAS_rem_i64;
903 case INDEX_op_div2_i64:
904 case INDEX_op_divu2_i64:
905 return TCG_TARGET_HAS_div2_i64;
906 case INDEX_op_rotl_i64:
907 case INDEX_op_rotr_i64:
908 return TCG_TARGET_HAS_rot_i64;
909 case INDEX_op_deposit_i64:
910 return TCG_TARGET_HAS_deposit_i64;
911 case INDEX_op_extract_i64:
912 return TCG_TARGET_HAS_extract_i64;
913 case INDEX_op_sextract_i64:
914 return TCG_TARGET_HAS_sextract_i64;
915 case INDEX_op_extrl_i64_i32:
916 return TCG_TARGET_HAS_extrl_i64_i32;
917 case INDEX_op_extrh_i64_i32:
918 return TCG_TARGET_HAS_extrh_i64_i32;
919 case INDEX_op_ext8s_i64:
920 return TCG_TARGET_HAS_ext8s_i64;
921 case INDEX_op_ext16s_i64:
922 return TCG_TARGET_HAS_ext16s_i64;
923 case INDEX_op_ext32s_i64:
924 return TCG_TARGET_HAS_ext32s_i64;
925 case INDEX_op_ext8u_i64:
926 return TCG_TARGET_HAS_ext8u_i64;
927 case INDEX_op_ext16u_i64:
928 return TCG_TARGET_HAS_ext16u_i64;
929 case INDEX_op_ext32u_i64:
930 return TCG_TARGET_HAS_ext32u_i64;
931 case INDEX_op_bswap16_i64:
932 return TCG_TARGET_HAS_bswap16_i64;
933 case INDEX_op_bswap32_i64:
934 return TCG_TARGET_HAS_bswap32_i64;
935 case INDEX_op_bswap64_i64:
936 return TCG_TARGET_HAS_bswap64_i64;
937 case INDEX_op_not_i64:
938 return TCG_TARGET_HAS_not_i64;
939 case INDEX_op_neg_i64:
940 return TCG_TARGET_HAS_neg_i64;
941 case INDEX_op_andc_i64:
942 return TCG_TARGET_HAS_andc_i64;
943 case INDEX_op_orc_i64:
944 return TCG_TARGET_HAS_orc_i64;
945 case INDEX_op_eqv_i64:
946 return TCG_TARGET_HAS_eqv_i64;
947 case INDEX_op_nand_i64:
948 return TCG_TARGET_HAS_nand_i64;
949 case INDEX_op_nor_i64:
950 return TCG_TARGET_HAS_nor_i64;
951 case INDEX_op_clz_i64:
952 return TCG_TARGET_HAS_clz_i64;
953 case INDEX_op_ctz_i64:
954 return TCG_TARGET_HAS_ctz_i64;
955 case INDEX_op_ctpop_i64:
956 return TCG_TARGET_HAS_ctpop_i64;
957 case INDEX_op_add2_i64:
958 return TCG_TARGET_HAS_add2_i64;
959 case INDEX_op_sub2_i64:
960 return TCG_TARGET_HAS_sub2_i64;
961 case INDEX_op_mulu2_i64:
962 return TCG_TARGET_HAS_mulu2_i64;
963 case INDEX_op_muls2_i64:
964 return TCG_TARGET_HAS_muls2_i64;
965 case INDEX_op_muluh_i64:
966 return TCG_TARGET_HAS_muluh_i64;
967 case INDEX_op_mulsh_i64:
968 return TCG_TARGET_HAS_mulsh_i64;
969
970 case NB_OPS:
971 break;
972 }
973 g_assert_not_reached();
974}
975
39cf05d3
FB
976/* Note: we convert the 64 bit args to 32 bit and do some alignment
977 and endian swap. Maybe it would be better to do the alignment
978 and endian swap in tcg_reg_alloc_call(). */
bbb8a1b4
RH
979void tcg_gen_callN(TCGContext *s, void *func, TCGArg ret,
980 int nargs, TCGArg *args)
c896fe29 981{
75e8b9b7 982 int i, real_args, nb_rets, pi;
bbb8a1b4 983 unsigned sizemask, flags;
afb49896 984 TCGHelperInfo *info;
75e8b9b7 985 TCGOp *op;
afb49896 986
619205fd 987 info = g_hash_table_lookup(helper_table, (gpointer)func);
bbb8a1b4
RH
988 flags = info->flags;
989 sizemask = info->sizemask;
2bece2c8 990
34b1a49c
RH
991#if defined(__sparc__) && !defined(__arch64__) \
992 && !defined(CONFIG_TCG_INTERPRETER)
993 /* We have 64-bit values in one register, but need to pass as two
994 separate parameters. Split them. */
995 int orig_sizemask = sizemask;
996 int orig_nargs = nargs;
997 TCGv_i64 retl, reth;
75e8b9b7 998 TCGArg split_args[MAX_OPC_PARAM];
34b1a49c
RH
999
1000 TCGV_UNUSED_I64(retl);
1001 TCGV_UNUSED_I64(reth);
1002 if (sizemask != 0) {
34b1a49c
RH
1003 for (i = real_args = 0; i < nargs; ++i) {
1004 int is_64bit = sizemask & (1 << (i+1)*2);
1005 if (is_64bit) {
1006 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
1007 TCGv_i32 h = tcg_temp_new_i32();
1008 TCGv_i32 l = tcg_temp_new_i32();
1009 tcg_gen_extr_i64_i32(l, h, orig);
1010 split_args[real_args++] = GET_TCGV_I32(h);
1011 split_args[real_args++] = GET_TCGV_I32(l);
1012 } else {
1013 split_args[real_args++] = args[i];
1014 }
1015 }
1016 nargs = real_args;
1017 args = split_args;
1018 sizemask = 0;
1019 }
1020#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1021 for (i = 0; i < nargs; ++i) {
1022 int is_64bit = sizemask & (1 << (i+1)*2);
1023 int is_signed = sizemask & (2 << (i+1)*2);
1024 if (!is_64bit) {
1025 TCGv_i64 temp = tcg_temp_new_i64();
1026 TCGv_i64 orig = MAKE_TCGV_I64(args[i]);
1027 if (is_signed) {
1028 tcg_gen_ext32s_i64(temp, orig);
1029 } else {
1030 tcg_gen_ext32u_i64(temp, orig);
1031 }
1032 args[i] = GET_TCGV_I64(temp);
1033 }
1034 }
1035#endif /* TCG_TARGET_EXTEND_ARGS */
1036
75e8b9b7
RH
1037 i = s->gen_next_op_idx;
1038 tcg_debug_assert(i < OPC_BUF_SIZE);
1039 s->gen_op_buf[0].prev = i;
1040 s->gen_next_op_idx = i + 1;
1041 op = &s->gen_op_buf[i];
1042
1043 /* Set links for sequential allocation during translation. */
1044 memset(op, 0, offsetof(TCGOp, args));
1045 op->opc = INDEX_op_call;
1046 op->prev = i - 1;
1047 op->next = i + 1;
1048
1049 pi = 0;
a7812ae4 1050 if (ret != TCG_CALL_DUMMY_ARG) {
34b1a49c
RH
1051#if defined(__sparc__) && !defined(__arch64__) \
1052 && !defined(CONFIG_TCG_INTERPRETER)
1053 if (orig_sizemask & 1) {
1054 /* The 32-bit ABI is going to return the 64-bit value in
1055 the %o0/%o1 register pair. Prepare for this by using
1056 two return temporaries, and reassemble below. */
1057 retl = tcg_temp_new_i64();
1058 reth = tcg_temp_new_i64();
75e8b9b7
RH
1059 op->args[pi++] = GET_TCGV_I64(reth);
1060 op->args[pi++] = GET_TCGV_I64(retl);
34b1a49c
RH
1061 nb_rets = 2;
1062 } else {
75e8b9b7 1063 op->args[pi++] = ret;
34b1a49c
RH
1064 nb_rets = 1;
1065 }
1066#else
1067 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
02eb19d0 1068#ifdef HOST_WORDS_BIGENDIAN
75e8b9b7
RH
1069 op->args[pi++] = ret + 1;
1070 op->args[pi++] = ret;
39cf05d3 1071#else
75e8b9b7
RH
1072 op->args[pi++] = ret;
1073 op->args[pi++] = ret + 1;
39cf05d3 1074#endif
a7812ae4 1075 nb_rets = 2;
34b1a49c 1076 } else {
75e8b9b7 1077 op->args[pi++] = ret;
a7812ae4 1078 nb_rets = 1;
c896fe29 1079 }
34b1a49c 1080#endif
a7812ae4
PB
1081 } else {
1082 nb_rets = 0;
c896fe29 1083 }
75e8b9b7
RH
1084 op->callo = nb_rets;
1085
a7812ae4
PB
1086 real_args = 0;
1087 for (i = 0; i < nargs; i++) {
2bece2c8 1088 int is_64bit = sizemask & (1 << (i+1)*2);
bbb8a1b4 1089 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
39cf05d3
FB
1090#ifdef TCG_TARGET_CALL_ALIGN_ARGS
1091 /* some targets want aligned 64 bit args */
ebd486d5 1092 if (real_args & 1) {
75e8b9b7 1093 op->args[pi++] = TCG_CALL_DUMMY_ARG;
ebd486d5 1094 real_args++;
39cf05d3
FB
1095 }
1096#endif
c70fbf0a
RH
1097 /* If stack grows up, then we will be placing successive
1098 arguments at lower addresses, which means we need to
1099 reverse the order compared to how we would normally
1100 treat either big or little-endian. For those arguments
1101 that will wind up in registers, this still works for
1102 HPPA (the only current STACK_GROWSUP target) since the
1103 argument registers are *also* allocated in decreasing
1104 order. If another such target is added, this logic may
1105 have to get more complicated to differentiate between
1106 stack arguments and register arguments. */
02eb19d0 1107#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
75e8b9b7
RH
1108 op->args[pi++] = args[i] + 1;
1109 op->args[pi++] = args[i];
c896fe29 1110#else
75e8b9b7
RH
1111 op->args[pi++] = args[i];
1112 op->args[pi++] = args[i] + 1;
c896fe29 1113#endif
a7812ae4 1114 real_args += 2;
2bece2c8 1115 continue;
c896fe29 1116 }
2bece2c8 1117
75e8b9b7 1118 op->args[pi++] = args[i];
2bece2c8 1119 real_args++;
c896fe29 1120 }
75e8b9b7
RH
1121 op->args[pi++] = (uintptr_t)func;
1122 op->args[pi++] = flags;
1123 op->calli = real_args;
a7812ae4 1124
75e8b9b7
RH
1125 /* Make sure the fields didn't overflow. */
1126 tcg_debug_assert(op->calli == real_args);
1127 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
2bece2c8 1128
34b1a49c
RH
1129#if defined(__sparc__) && !defined(__arch64__) \
1130 && !defined(CONFIG_TCG_INTERPRETER)
1131 /* Free all of the parts we allocated above. */
1132 for (i = real_args = 0; i < orig_nargs; ++i) {
1133 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1134 if (is_64bit) {
1135 TCGv_i32 h = MAKE_TCGV_I32(args[real_args++]);
1136 TCGv_i32 l = MAKE_TCGV_I32(args[real_args++]);
1137 tcg_temp_free_i32(h);
1138 tcg_temp_free_i32(l);
1139 } else {
1140 real_args++;
1141 }
1142 }
1143 if (orig_sizemask & 1) {
1144 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1145 Note that describing these as TCGv_i64 eliminates an unnecessary
1146 zero-extension that tcg_gen_concat_i32_i64 would create. */
1147 tcg_gen_concat32_i64(MAKE_TCGV_I64(ret), retl, reth);
1148 tcg_temp_free_i64(retl);
1149 tcg_temp_free_i64(reth);
1150 }
1151#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1152 for (i = 0; i < nargs; ++i) {
1153 int is_64bit = sizemask & (1 << (i+1)*2);
1154 if (!is_64bit) {
1155 TCGv_i64 temp = MAKE_TCGV_I64(args[i]);
1156 tcg_temp_free_i64(temp);
1157 }
1158 }
1159#endif /* TCG_TARGET_EXTEND_ARGS */
c896fe29 1160}
c896fe29 1161
8fcd3692 1162static void tcg_reg_alloc_start(TCGContext *s)
c896fe29
FB
1163{
1164 int i;
1165 TCGTemp *ts;
1166 for(i = 0; i < s->nb_globals; i++) {
1167 ts = &s->temps[i];
1168 if (ts->fixed_reg) {
1169 ts->val_type = TEMP_VAL_REG;
1170 } else {
1171 ts->val_type = TEMP_VAL_MEM;
1172 }
1173 }
e8996ee0
FB
1174 for(i = s->nb_globals; i < s->nb_temps; i++) {
1175 ts = &s->temps[i];
7dfd8c6a
AJ
1176 if (ts->temp_local) {
1177 ts->val_type = TEMP_VAL_MEM;
1178 } else {
1179 ts->val_type = TEMP_VAL_DEAD;
1180 }
e8996ee0
FB
1181 ts->mem_allocated = 0;
1182 ts->fixed_reg = 0;
1183 }
f8b2f202
RH
1184
1185 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1186}
1187
f8b2f202
RH
1188static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1189 TCGTemp *ts)
c896fe29 1190{
f8b2f202 1191 int idx = temp_idx(s, ts);
ac56dd48 1192
ac56dd48
PB
1193 if (idx < s->nb_globals) {
1194 pstrcpy(buf, buf_size, ts->name);
f8b2f202
RH
1195 } else if (ts->temp_local) {
1196 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
c896fe29 1197 } else {
f8b2f202 1198 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
c896fe29
FB
1199 }
1200 return buf;
1201}
1202
f8b2f202
RH
1203static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
1204 int buf_size, int idx)
1205{
eabb7b91 1206 tcg_debug_assert(idx >= 0 && idx < s->nb_temps);
f8b2f202
RH
1207 return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
1208}
1209
6e085f72
RH
1210/* Find helper name. */
1211static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
4dc81f28 1212{
6e085f72 1213 const char *ret = NULL;
619205fd
EC
1214 if (helper_table) {
1215 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
72866e82
RH
1216 if (info) {
1217 ret = info->name;
1218 }
4dc81f28 1219 }
6e085f72 1220 return ret;
4dc81f28
FB
1221}
1222
f48f3ede
BS
1223static const char * const cond_name[] =
1224{
0aed257f
RH
1225 [TCG_COND_NEVER] = "never",
1226 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1227 [TCG_COND_EQ] = "eq",
1228 [TCG_COND_NE] = "ne",
1229 [TCG_COND_LT] = "lt",
1230 [TCG_COND_GE] = "ge",
1231 [TCG_COND_LE] = "le",
1232 [TCG_COND_GT] = "gt",
1233 [TCG_COND_LTU] = "ltu",
1234 [TCG_COND_GEU] = "geu",
1235 [TCG_COND_LEU] = "leu",
1236 [TCG_COND_GTU] = "gtu"
1237};
1238
f713d6ad
RH
1239static const char * const ldst_name[] =
1240{
1241 [MO_UB] = "ub",
1242 [MO_SB] = "sb",
1243 [MO_LEUW] = "leuw",
1244 [MO_LESW] = "lesw",
1245 [MO_LEUL] = "leul",
1246 [MO_LESL] = "lesl",
1247 [MO_LEQ] = "leq",
1248 [MO_BEUW] = "beuw",
1249 [MO_BESW] = "besw",
1250 [MO_BEUL] = "beul",
1251 [MO_BESL] = "besl",
1252 [MO_BEQ] = "beq",
1253};
1254
1f00b27f
SS
1255static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1256#ifdef ALIGNED_ONLY
1257 [MO_UNALN >> MO_ASHIFT] = "un+",
1258 [MO_ALIGN >> MO_ASHIFT] = "",
1259#else
1260 [MO_UNALN >> MO_ASHIFT] = "",
1261 [MO_ALIGN >> MO_ASHIFT] = "al+",
1262#endif
1263 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1264 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1265 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1266 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1267 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1268 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1269};
1270
eeacee4d 1271void tcg_dump_ops(TCGContext *s)
c896fe29 1272{
c896fe29 1273 char buf[128];
c45cb8bb
RH
1274 TCGOp *op;
1275 int oi;
1276
dcb8e758 1277 for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
c45cb8bb
RH
1278 int i, k, nb_oargs, nb_iargs, nb_cargs;
1279 const TCGOpDef *def;
1280 const TCGArg *args;
1281 TCGOpcode c;
bdfb460e 1282 int col = 0;
c896fe29 1283
c45cb8bb
RH
1284 op = &s->gen_op_buf[oi];
1285 c = op->opc;
c896fe29 1286 def = &tcg_op_defs[c];
75e8b9b7 1287 args = op->args;
c45cb8bb 1288
765b842a 1289 if (c == INDEX_op_insn_start) {
bdfb460e 1290 col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
9aef40ed
RH
1291
1292 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1293 target_ulong a;
7e4597d7 1294#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
9aef40ed 1295 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
7e4597d7 1296#else
9aef40ed 1297 a = args[i];
7e4597d7 1298#endif
bdfb460e 1299 col += qemu_log(" " TARGET_FMT_lx, a);
eeacee4d 1300 }
7e4597d7 1301 } else if (c == INDEX_op_call) {
c896fe29 1302 /* variable number of arguments */
c45cb8bb
RH
1303 nb_oargs = op->callo;
1304 nb_iargs = op->calli;
c896fe29 1305 nb_cargs = def->nb_cargs;
c896fe29 1306
cf066674 1307 /* function name, flags, out args */
bdfb460e
RH
1308 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
1309 tcg_find_helper(s, args[nb_oargs + nb_iargs]),
1310 args[nb_oargs + nb_iargs + 1], nb_oargs);
cf066674 1311 for (i = 0; i < nb_oargs; i++) {
bdfb460e
RH
1312 col += qemu_log(",%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1313 args[i]));
b03cce8e 1314 }
cf066674
RH
1315 for (i = 0; i < nb_iargs; i++) {
1316 TCGArg arg = args[nb_oargs + i];
1317 const char *t = "<dummy>";
1318 if (arg != TCG_CALL_DUMMY_ARG) {
1319 t = tcg_get_arg_str_idx(s, buf, sizeof(buf), arg);
eeacee4d 1320 }
bdfb460e 1321 col += qemu_log(",%s", t);
e8996ee0 1322 }
b03cce8e 1323 } else {
bdfb460e 1324 col += qemu_log(" %s ", def->name);
c45cb8bb
RH
1325
1326 nb_oargs = def->nb_oargs;
1327 nb_iargs = def->nb_iargs;
1328 nb_cargs = def->nb_cargs;
1329
b03cce8e 1330 k = 0;
c45cb8bb 1331 for (i = 0; i < nb_oargs; i++) {
eeacee4d 1332 if (k != 0) {
bdfb460e 1333 col += qemu_log(",");
eeacee4d 1334 }
bdfb460e
RH
1335 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1336 args[k++]));
b03cce8e 1337 }
c45cb8bb 1338 for (i = 0; i < nb_iargs; i++) {
eeacee4d 1339 if (k != 0) {
bdfb460e 1340 col += qemu_log(",");
eeacee4d 1341 }
bdfb460e
RH
1342 col += qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf),
1343 args[k++]));
b03cce8e 1344 }
be210acb
RH
1345 switch (c) {
1346 case INDEX_op_brcond_i32:
be210acb 1347 case INDEX_op_setcond_i32:
ffc5ea09 1348 case INDEX_op_movcond_i32:
ffc5ea09 1349 case INDEX_op_brcond2_i32:
be210acb 1350 case INDEX_op_setcond2_i32:
ffc5ea09 1351 case INDEX_op_brcond_i64:
be210acb 1352 case INDEX_op_setcond_i64:
ffc5ea09 1353 case INDEX_op_movcond_i64:
eeacee4d 1354 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) {
bdfb460e 1355 col += qemu_log(",%s", cond_name[args[k++]]);
eeacee4d 1356 } else {
bdfb460e 1357 col += qemu_log(",$0x%" TCG_PRIlx, args[k++]);
eeacee4d 1358 }
f48f3ede 1359 i = 1;
be210acb 1360 break;
f713d6ad
RH
1361 case INDEX_op_qemu_ld_i32:
1362 case INDEX_op_qemu_st_i32:
1363 case INDEX_op_qemu_ld_i64:
1364 case INDEX_op_qemu_st_i64:
59227d5d
RH
1365 {
1366 TCGMemOpIdx oi = args[k++];
1367 TCGMemOp op = get_memop(oi);
1368 unsigned ix = get_mmuidx(oi);
1369
59c4b7e8 1370 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
bdfb460e 1371 col += qemu_log(",$0x%x,%u", op, ix);
59c4b7e8 1372 } else {
1f00b27f
SS
1373 const char *s_al, *s_op;
1374 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 1375 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
bdfb460e 1376 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
1377 }
1378 i = 1;
f713d6ad 1379 }
f713d6ad 1380 break;
be210acb 1381 default:
f48f3ede 1382 i = 0;
be210acb
RH
1383 break;
1384 }
51e3972c
RH
1385 switch (c) {
1386 case INDEX_op_set_label:
1387 case INDEX_op_br:
1388 case INDEX_op_brcond_i32:
1389 case INDEX_op_brcond_i64:
1390 case INDEX_op_brcond2_i32:
bdfb460e 1391 col += qemu_log("%s$L%d", k ? "," : "", arg_label(args[k])->id);
51e3972c
RH
1392 i++, k++;
1393 break;
1394 default:
1395 break;
1396 }
1397 for (; i < nb_cargs; i++, k++) {
bdfb460e
RH
1398 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", args[k]);
1399 }
1400 }
1401 if (op->life) {
1402 unsigned life = op->life;
1403
1404 for (; col < 48; ++col) {
1405 putc(' ', qemu_logfile);
1406 }
1407
1408 if (life & (SYNC_ARG * 3)) {
1409 qemu_log(" sync:");
1410 for (i = 0; i < 2; ++i) {
1411 if (life & (SYNC_ARG << i)) {
1412 qemu_log(" %d", i);
1413 }
1414 }
1415 }
1416 life /= DEAD_ARG;
1417 if (life) {
1418 qemu_log(" dead:");
1419 for (i = 0; life; ++i, life >>= 1) {
1420 if (life & 1) {
1421 qemu_log(" %d", i);
1422 }
1423 }
b03cce8e 1424 }
c896fe29 1425 }
eeacee4d 1426 qemu_log("\n");
c896fe29
FB
1427 }
1428}
1429
1430/* we give more priority to constraints with less registers */
1431static int get_constraint_priority(const TCGOpDef *def, int k)
1432{
1433 const TCGArgConstraint *arg_ct;
1434
1435 int i, n;
1436 arg_ct = &def->args_ct[k];
1437 if (arg_ct->ct & TCG_CT_ALIAS) {
1438 /* an alias is equivalent to a single register */
1439 n = 1;
1440 } else {
1441 if (!(arg_ct->ct & TCG_CT_REG))
1442 return 0;
1443 n = 0;
1444 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
1445 if (tcg_regset_test_reg(arg_ct->u.regs, i))
1446 n++;
1447 }
1448 }
1449 return TCG_TARGET_NB_REGS - n + 1;
1450}
1451
1452/* sort from highest priority to lowest */
1453static void sort_constraints(TCGOpDef *def, int start, int n)
1454{
1455 int i, j, p1, p2, tmp;
1456
1457 for(i = 0; i < n; i++)
1458 def->sorted_args[start + i] = start + i;
1459 if (n <= 1)
1460 return;
1461 for(i = 0; i < n - 1; i++) {
1462 for(j = i + 1; j < n; j++) {
1463 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
1464 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
1465 if (p1 < p2) {
1466 tmp = def->sorted_args[start + i];
1467 def->sorted_args[start + i] = def->sorted_args[start + j];
1468 def->sorted_args[start + j] = tmp;
1469 }
1470 }
1471 }
1472}
1473
f69d277e 1474static void process_op_defs(TCGContext *s)
c896fe29 1475{
a9751609 1476 TCGOpcode op;
c896fe29 1477
f69d277e
RH
1478 for (op = 0; op < NB_OPS; op++) {
1479 TCGOpDef *def = &tcg_op_defs[op];
1480 const TCGTargetOpDef *tdefs;
069ea736
RH
1481 TCGType type;
1482 int i, nb_args;
f69d277e
RH
1483
1484 if (def->flags & TCG_OPF_NOT_PRESENT) {
1485 continue;
1486 }
1487
c896fe29 1488 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
1489 if (nb_args == 0) {
1490 continue;
1491 }
1492
1493 tdefs = tcg_target_op_def(op);
1494 /* Missing TCGTargetOpDef entry. */
1495 tcg_debug_assert(tdefs != NULL);
1496
069ea736 1497 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
f69d277e
RH
1498 for (i = 0; i < nb_args; i++) {
1499 const char *ct_str = tdefs->args_ct_str[i];
1500 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 1501 tcg_debug_assert(ct_str != NULL);
f69d277e 1502
ccb1bb66 1503 def->args_ct[i].u.regs = 0;
c896fe29 1504 def->args_ct[i].ct = 0;
17280ff4
RH
1505 while (*ct_str != '\0') {
1506 switch(*ct_str) {
1507 case '0' ... '9':
1508 {
1509 int oarg = *ct_str - '0';
1510 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
1511 tcg_debug_assert(oarg < def->nb_oargs);
1512 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
1513 /* TCG_CT_ALIAS is for the output arguments.
1514 The input is tagged with TCG_CT_IALIAS. */
1515 def->args_ct[i] = def->args_ct[oarg];
1516 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
1517 def->args_ct[oarg].alias_index = i;
1518 def->args_ct[i].ct |= TCG_CT_IALIAS;
1519 def->args_ct[i].alias_index = oarg;
c896fe29 1520 }
17280ff4
RH
1521 ct_str++;
1522 break;
1523 case '&':
1524 def->args_ct[i].ct |= TCG_CT_NEWREG;
1525 ct_str++;
1526 break;
1527 case 'i':
1528 def->args_ct[i].ct |= TCG_CT_CONST;
1529 ct_str++;
1530 break;
1531 default:
1532 ct_str = target_parse_constraint(&def->args_ct[i],
1533 ct_str, type);
1534 /* Typo in TCGTargetOpDef constraint. */
1535 tcg_debug_assert(ct_str != NULL);
c896fe29
FB
1536 }
1537 }
1538 }
1539
c68aaa18 1540 /* TCGTargetOpDef entry with too much information? */
eabb7b91 1541 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 1542
c896fe29
FB
1543 /* sort the constraints (XXX: this is just an heuristic) */
1544 sort_constraints(def, 0, def->nb_oargs);
1545 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 1546 }
c896fe29
FB
1547}
1548
0c627cdc
RH
1549void tcg_op_remove(TCGContext *s, TCGOp *op)
1550{
1551 int next = op->next;
1552 int prev = op->prev;
1553
dcb8e758
RH
1554 /* We should never attempt to remove the list terminator. */
1555 tcg_debug_assert(op != &s->gen_op_buf[0]);
1556
1557 s->gen_op_buf[next].prev = prev;
1558 s->gen_op_buf[prev].next = next;
0c627cdc 1559
dcb8e758 1560 memset(op, 0, sizeof(*op));
0c627cdc
RH
1561
1562#ifdef CONFIG_PROFILER
1563 s->del_op_count++;
1564#endif
1565}
1566
5a18407f
RH
1567TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
1568 TCGOpcode opc, int nargs)
1569{
1570 int oi = s->gen_next_op_idx;
5a18407f
RH
1571 int prev = old_op->prev;
1572 int next = old_op - s->gen_op_buf;
1573 TCGOp *new_op;
1574
1575 tcg_debug_assert(oi < OPC_BUF_SIZE);
5a18407f 1576 s->gen_next_op_idx = oi + 1;
5a18407f
RH
1577
1578 new_op = &s->gen_op_buf[oi];
1579 *new_op = (TCGOp){
1580 .opc = opc,
5a18407f
RH
1581 .prev = prev,
1582 .next = next
1583 };
1584 s->gen_op_buf[prev].next = oi;
1585 old_op->prev = oi;
1586
1587 return new_op;
1588}
1589
1590TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
1591 TCGOpcode opc, int nargs)
1592{
1593 int oi = s->gen_next_op_idx;
5a18407f
RH
1594 int prev = old_op - s->gen_op_buf;
1595 int next = old_op->next;
1596 TCGOp *new_op;
1597
1598 tcg_debug_assert(oi < OPC_BUF_SIZE);
5a18407f 1599 s->gen_next_op_idx = oi + 1;
5a18407f
RH
1600
1601 new_op = &s->gen_op_buf[oi];
1602 *new_op = (TCGOp){
1603 .opc = opc,
5a18407f
RH
1604 .prev = prev,
1605 .next = next
1606 };
1607 s->gen_op_buf[next].prev = oi;
1608 old_op->next = oi;
1609
1610 return new_op;
1611}
1612
c70fbf0a
RH
1613#define TS_DEAD 1
1614#define TS_MEM 2
1615
5a18407f
RH
1616#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
1617#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
1618
9c43b68d
AJ
1619/* liveness analysis: end of function: all temps are dead, and globals
1620 should be in memory. */
c70fbf0a 1621static inline void tcg_la_func_end(TCGContext *s, uint8_t *temp_state)
c896fe29 1622{
c70fbf0a
RH
1623 memset(temp_state, TS_DEAD | TS_MEM, s->nb_globals);
1624 memset(temp_state + s->nb_globals, TS_DEAD, s->nb_temps - s->nb_globals);
c896fe29
FB
1625}
1626
9c43b68d
AJ
1627/* liveness analysis: end of basic block: all temps are dead, globals
1628 and local temps should be in memory. */
c70fbf0a 1629static inline void tcg_la_bb_end(TCGContext *s, uint8_t *temp_state)
641d5fbe 1630{
c70fbf0a 1631 int i, n;
641d5fbe 1632
c70fbf0a
RH
1633 tcg_la_func_end(s, temp_state);
1634 for (i = s->nb_globals, n = s->nb_temps; i < n; i++) {
1635 if (s->temps[i].temp_local) {
1636 temp_state[i] |= TS_MEM;
1637 }
641d5fbe
FB
1638 }
1639}
1640
a1b3c48d 1641/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
1642 given input arguments is dead. Instructions updating dead
1643 temporaries are removed. */
5a18407f 1644static void liveness_pass_1(TCGContext *s, uint8_t *temp_state)
c896fe29 1645{
c70fbf0a 1646 int nb_globals = s->nb_globals;
5a18407f 1647 int oi, oi_prev;
a1b3c48d 1648
c70fbf0a 1649 tcg_la_func_end(s, temp_state);
c896fe29 1650
dcb8e758 1651 for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
c45cb8bb
RH
1652 int i, nb_iargs, nb_oargs;
1653 TCGOpcode opc_new, opc_new2;
1654 bool have_opc_new2;
a1b3c48d 1655 TCGLifeData arg_life = 0;
c45cb8bb
RH
1656 TCGArg arg;
1657
1658 TCGOp * const op = &s->gen_op_buf[oi];
75e8b9b7 1659 TCGArg * const args = op->args;
c45cb8bb
RH
1660 TCGOpcode opc = op->opc;
1661 const TCGOpDef *def = &tcg_op_defs[opc];
1662
1663 oi_prev = op->prev;
1664
1665 switch (opc) {
c896fe29 1666 case INDEX_op_call:
c6e113f5
FB
1667 {
1668 int call_flags;
c896fe29 1669
c45cb8bb
RH
1670 nb_oargs = op->callo;
1671 nb_iargs = op->calli;
cf066674 1672 call_flags = args[nb_oargs + nb_iargs + 1];
c6e113f5 1673
c45cb8bb 1674 /* pure functions can be removed if their result is unused */
78505279 1675 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 1676 for (i = 0; i < nb_oargs; i++) {
c6e113f5 1677 arg = args[i];
c70fbf0a 1678 if (temp_state[arg] != TS_DEAD) {
c6e113f5 1679 goto do_not_remove_call;
9c43b68d 1680 }
c6e113f5 1681 }
c45cb8bb 1682 goto do_remove;
c6e113f5
FB
1683 } else {
1684 do_not_remove_call:
c896fe29 1685
c6e113f5 1686 /* output args are dead */
cf066674 1687 for (i = 0; i < nb_oargs; i++) {
c6e113f5 1688 arg = args[i];
c70fbf0a 1689 if (temp_state[arg] & TS_DEAD) {
a1b3c48d 1690 arg_life |= DEAD_ARG << i;
6b64b624 1691 }
c70fbf0a 1692 if (temp_state[arg] & TS_MEM) {
a1b3c48d 1693 arg_life |= SYNC_ARG << i;
9c43b68d 1694 }
c70fbf0a 1695 temp_state[arg] = TS_DEAD;
c6e113f5 1696 }
78505279 1697
78505279
AJ
1698 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
1699 TCG_CALL_NO_READ_GLOBALS))) {
9c43b68d 1700 /* globals should go back to memory */
c70fbf0a
RH
1701 memset(temp_state, TS_DEAD | TS_MEM, nb_globals);
1702 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
1703 /* globals should be synced to memory */
1704 for (i = 0; i < nb_globals; i++) {
1705 temp_state[i] |= TS_MEM;
1706 }
b9c18f56
AJ
1707 }
1708
c19f47bf 1709 /* record arguments that die in this helper */
cf066674 1710 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 1711 arg = args[i];
39cf05d3 1712 if (arg != TCG_CALL_DUMMY_ARG) {
c70fbf0a 1713 if (temp_state[arg] & TS_DEAD) {
a1b3c48d 1714 arg_life |= DEAD_ARG << i;
39cf05d3 1715 }
c6e113f5 1716 }
c6e113f5 1717 }
67cc32eb 1718 /* input arguments are live for preceding opcodes */
c70fbf0a 1719 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
c19f47bf 1720 arg = args[i];
c70fbf0a
RH
1721 if (arg != TCG_CALL_DUMMY_ARG) {
1722 temp_state[arg] &= ~TS_DEAD;
1723 }
c19f47bf 1724 }
c896fe29 1725 }
c896fe29 1726 }
c896fe29 1727 break;
765b842a 1728 case INDEX_op_insn_start:
c896fe29 1729 break;
5ff9d6a4 1730 case INDEX_op_discard:
5ff9d6a4 1731 /* mark the temporary as dead */
c70fbf0a 1732 temp_state[args[0]] = TS_DEAD;
5ff9d6a4 1733 break;
1305c451
RH
1734
1735 case INDEX_op_add2_i32:
c45cb8bb 1736 opc_new = INDEX_op_add_i32;
f1fae40c 1737 goto do_addsub2;
1305c451 1738 case INDEX_op_sub2_i32:
c45cb8bb 1739 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
1740 goto do_addsub2;
1741 case INDEX_op_add2_i64:
c45cb8bb 1742 opc_new = INDEX_op_add_i64;
f1fae40c
RH
1743 goto do_addsub2;
1744 case INDEX_op_sub2_i64:
c45cb8bb 1745 opc_new = INDEX_op_sub_i64;
f1fae40c 1746 do_addsub2:
1305c451
RH
1747 nb_iargs = 4;
1748 nb_oargs = 2;
1749 /* Test if the high part of the operation is dead, but not
1750 the low part. The result can be optimized to a simple
1751 add or sub. This happens often for x86_64 guest when the
1752 cpu mode is set to 32 bit. */
c70fbf0a
RH
1753 if (temp_state[args[1]] == TS_DEAD) {
1754 if (temp_state[args[0]] == TS_DEAD) {
1305c451
RH
1755 goto do_remove;
1756 }
c45cb8bb
RH
1757 /* Replace the opcode and adjust the args in place,
1758 leaving 3 unused args at the end. */
1759 op->opc = opc = opc_new;
1305c451
RH
1760 args[1] = args[2];
1761 args[2] = args[4];
1305c451
RH
1762 /* Fall through and mark the single-word operation live. */
1763 nb_iargs = 2;
1764 nb_oargs = 1;
1765 }
1766 goto do_not_remove;
1767
1414968a 1768 case INDEX_op_mulu2_i32:
c45cb8bb
RH
1769 opc_new = INDEX_op_mul_i32;
1770 opc_new2 = INDEX_op_muluh_i32;
1771 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 1772 goto do_mul2;
f1fae40c 1773 case INDEX_op_muls2_i32:
c45cb8bb
RH
1774 opc_new = INDEX_op_mul_i32;
1775 opc_new2 = INDEX_op_mulsh_i32;
1776 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
1777 goto do_mul2;
1778 case INDEX_op_mulu2_i64:
c45cb8bb
RH
1779 opc_new = INDEX_op_mul_i64;
1780 opc_new2 = INDEX_op_muluh_i64;
1781 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 1782 goto do_mul2;
f1fae40c 1783 case INDEX_op_muls2_i64:
c45cb8bb
RH
1784 opc_new = INDEX_op_mul_i64;
1785 opc_new2 = INDEX_op_mulsh_i64;
1786 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 1787 goto do_mul2;
f1fae40c 1788 do_mul2:
1414968a
RH
1789 nb_iargs = 2;
1790 nb_oargs = 2;
c70fbf0a
RH
1791 if (temp_state[args[1]] == TS_DEAD) {
1792 if (temp_state[args[0]] == TS_DEAD) {
03271524 1793 /* Both parts of the operation are dead. */
1414968a
RH
1794 goto do_remove;
1795 }
03271524 1796 /* The high part of the operation is dead; generate the low. */
c45cb8bb 1797 op->opc = opc = opc_new;
1414968a
RH
1798 args[1] = args[2];
1799 args[2] = args[3];
c70fbf0a 1800 } else if (temp_state[args[0]] == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
1801 /* The low part of the operation is dead; generate the high. */
1802 op->opc = opc = opc_new2;
03271524
RH
1803 args[0] = args[1];
1804 args[1] = args[2];
1805 args[2] = args[3];
1806 } else {
1807 goto do_not_remove;
1414968a 1808 }
03271524
RH
1809 /* Mark the single-word operation live. */
1810 nb_oargs = 1;
1414968a
RH
1811 goto do_not_remove;
1812
c896fe29 1813 default:
1305c451 1814 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
1815 nb_iargs = def->nb_iargs;
1816 nb_oargs = def->nb_oargs;
c896fe29 1817
49516bc0
AJ
1818 /* Test if the operation can be removed because all
1819 its outputs are dead. We assume that nb_oargs == 0
1820 implies side effects */
1821 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 1822 for (i = 0; i < nb_oargs; i++) {
c70fbf0a 1823 if (temp_state[args[i]] != TS_DEAD) {
49516bc0 1824 goto do_not_remove;
9c43b68d 1825 }
49516bc0 1826 }
1305c451 1827 do_remove:
0c627cdc 1828 tcg_op_remove(s, op);
49516bc0
AJ
1829 } else {
1830 do_not_remove:
49516bc0 1831 /* output args are dead */
c45cb8bb 1832 for (i = 0; i < nb_oargs; i++) {
49516bc0 1833 arg = args[i];
c70fbf0a 1834 if (temp_state[arg] & TS_DEAD) {
a1b3c48d 1835 arg_life |= DEAD_ARG << i;
6b64b624 1836 }
c70fbf0a 1837 if (temp_state[arg] & TS_MEM) {
a1b3c48d 1838 arg_life |= SYNC_ARG << i;
9c43b68d 1839 }
c70fbf0a 1840 temp_state[arg] = TS_DEAD;
49516bc0
AJ
1841 }
1842
1843 /* if end of basic block, update */
1844 if (def->flags & TCG_OPF_BB_END) {
c70fbf0a 1845 tcg_la_bb_end(s, temp_state);
3d5c5f87
AJ
1846 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1847 /* globals should be synced to memory */
c70fbf0a
RH
1848 for (i = 0; i < nb_globals; i++) {
1849 temp_state[i] |= TS_MEM;
1850 }
49516bc0
AJ
1851 }
1852
c19f47bf 1853 /* record arguments that die in this opcode */
c45cb8bb 1854 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
866cb6cb 1855 arg = args[i];
c70fbf0a 1856 if (temp_state[arg] & TS_DEAD) {
a1b3c48d 1857 arg_life |= DEAD_ARG << i;
c896fe29 1858 }
c19f47bf 1859 }
67cc32eb 1860 /* input arguments are live for preceding opcodes */
c19f47bf 1861 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
c70fbf0a 1862 temp_state[args[i]] &= ~TS_DEAD;
c896fe29 1863 }
c896fe29
FB
1864 }
1865 break;
1866 }
bee158cb 1867 op->life = arg_life;
1ff0a2c5 1868 }
c896fe29 1869}
c896fe29 1870
5a18407f
RH
1871/* Liveness analysis: Convert indirect regs to direct temporaries. */
1872static bool liveness_pass_2(TCGContext *s, uint8_t *temp_state)
1873{
1874 int nb_globals = s->nb_globals;
1875 int16_t *dir_temps;
1876 int i, oi, oi_next;
1877 bool changes = false;
1878
1879 dir_temps = tcg_malloc(nb_globals * sizeof(int16_t));
1880 memset(dir_temps, 0, nb_globals * sizeof(int16_t));
1881
1882 /* Create a temporary for each indirect global. */
1883 for (i = 0; i < nb_globals; ++i) {
1884 TCGTemp *its = &s->temps[i];
1885 if (its->indirect_reg) {
1886 TCGTemp *dts = tcg_temp_alloc(s);
1887 dts->type = its->type;
1888 dts->base_type = its->base_type;
1889 dir_temps[i] = temp_idx(s, dts);
1890 }
1891 }
1892
1893 memset(temp_state, TS_DEAD, nb_globals);
1894
1895 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
1896 TCGOp *op = &s->gen_op_buf[oi];
75e8b9b7 1897 TCGArg *args = op->args;
5a18407f
RH
1898 TCGOpcode opc = op->opc;
1899 const TCGOpDef *def = &tcg_op_defs[opc];
1900 TCGLifeData arg_life = op->life;
1901 int nb_iargs, nb_oargs, call_flags;
1902 TCGArg arg, dir;
1903
1904 oi_next = op->next;
1905
1906 if (opc == INDEX_op_call) {
1907 nb_oargs = op->callo;
1908 nb_iargs = op->calli;
1909 call_flags = args[nb_oargs + nb_iargs + 1];
1910 } else {
1911 nb_iargs = def->nb_iargs;
1912 nb_oargs = def->nb_oargs;
1913
1914 /* Set flags similar to how calls require. */
1915 if (def->flags & TCG_OPF_BB_END) {
1916 /* Like writing globals: save_globals */
1917 call_flags = 0;
1918 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
1919 /* Like reading globals: sync_globals */
1920 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
1921 } else {
1922 /* No effect on globals. */
1923 call_flags = (TCG_CALL_NO_READ_GLOBALS |
1924 TCG_CALL_NO_WRITE_GLOBALS);
1925 }
1926 }
1927
1928 /* Make sure that input arguments are available. */
1929 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1930 arg = args[i];
1931 /* Note this unsigned test catches TCG_CALL_ARG_DUMMY too. */
1932 if (arg < nb_globals) {
1933 dir = dir_temps[arg];
1934 if (dir != 0 && temp_state[arg] == TS_DEAD) {
1935 TCGTemp *its = &s->temps[arg];
1936 TCGOpcode lopc = (its->type == TCG_TYPE_I32
1937 ? INDEX_op_ld_i32
1938 : INDEX_op_ld_i64);
1939 TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3);
75e8b9b7 1940 TCGArg *largs = lop->args;
5a18407f
RH
1941
1942 largs[0] = dir;
1943 largs[1] = temp_idx(s, its->mem_base);
1944 largs[2] = its->mem_offset;
1945
1946 /* Loaded, but synced with memory. */
1947 temp_state[arg] = TS_MEM;
1948 }
1949 }
1950 }
1951
1952 /* Perform input replacement, and mark inputs that became dead.
1953 No action is required except keeping temp_state up to date
1954 so that we reload when needed. */
1955 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
1956 arg = args[i];
1957 if (arg < nb_globals) {
1958 dir = dir_temps[arg];
1959 if (dir != 0) {
1960 args[i] = dir;
1961 changes = true;
1962 if (IS_DEAD_ARG(i)) {
1963 temp_state[arg] = TS_DEAD;
1964 }
1965 }
1966 }
1967 }
1968
1969 /* Liveness analysis should ensure that the following are
1970 all correct, for call sites and basic block end points. */
1971 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
1972 /* Nothing to do */
1973 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
1974 for (i = 0; i < nb_globals; ++i) {
1975 /* Liveness should see that globals are synced back,
1976 that is, either TS_DEAD or TS_MEM. */
1977 tcg_debug_assert(dir_temps[i] == 0
1978 || temp_state[i] != 0);
1979 }
1980 } else {
1981 for (i = 0; i < nb_globals; ++i) {
1982 /* Liveness should see that globals are saved back,
1983 that is, TS_DEAD, waiting to be reloaded. */
1984 tcg_debug_assert(dir_temps[i] == 0
1985 || temp_state[i] == TS_DEAD);
1986 }
1987 }
1988
1989 /* Outputs become available. */
1990 for (i = 0; i < nb_oargs; i++) {
1991 arg = args[i];
1992 if (arg >= nb_globals) {
1993 continue;
1994 }
1995 dir = dir_temps[arg];
1996 if (dir == 0) {
1997 continue;
1998 }
1999 args[i] = dir;
2000 changes = true;
2001
2002 /* The output is now live and modified. */
2003 temp_state[arg] = 0;
2004
2005 /* Sync outputs upon their last write. */
2006 if (NEED_SYNC_ARG(i)) {
2007 TCGTemp *its = &s->temps[arg];
2008 TCGOpcode sopc = (its->type == TCG_TYPE_I32
2009 ? INDEX_op_st_i32
2010 : INDEX_op_st_i64);
2011 TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3);
75e8b9b7 2012 TCGArg *sargs = sop->args;
5a18407f
RH
2013
2014 sargs[0] = dir;
2015 sargs[1] = temp_idx(s, its->mem_base);
2016 sargs[2] = its->mem_offset;
2017
2018 temp_state[arg] = TS_MEM;
2019 }
2020 /* Drop outputs that are dead. */
2021 if (IS_DEAD_ARG(i)) {
2022 temp_state[arg] = TS_DEAD;
2023 }
2024 }
2025 }
2026
2027 return changes;
2028}
2029
8d8fdbae 2030#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
2031static void dump_regs(TCGContext *s)
2032{
2033 TCGTemp *ts;
2034 int i;
2035 char buf[64];
2036
2037 for(i = 0; i < s->nb_temps; i++) {
2038 ts = &s->temps[i];
ac56dd48 2039 printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i));
c896fe29
FB
2040 switch(ts->val_type) {
2041 case TEMP_VAL_REG:
2042 printf("%s", tcg_target_reg_names[ts->reg]);
2043 break;
2044 case TEMP_VAL_MEM:
b3a62939
RH
2045 printf("%d(%s)", (int)ts->mem_offset,
2046 tcg_target_reg_names[ts->mem_base->reg]);
c896fe29
FB
2047 break;
2048 case TEMP_VAL_CONST:
2049 printf("$0x%" TCG_PRIlx, ts->val);
2050 break;
2051 case TEMP_VAL_DEAD:
2052 printf("D");
2053 break;
2054 default:
2055 printf("???");
2056 break;
2057 }
2058 printf("\n");
2059 }
2060
2061 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
f8b2f202 2062 if (s->reg_to_temp[i] != NULL) {
c896fe29
FB
2063 printf("%s: %s\n",
2064 tcg_target_reg_names[i],
f8b2f202 2065 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
c896fe29
FB
2066 }
2067 }
2068}
2069
2070static void check_regs(TCGContext *s)
2071{
869938ae 2072 int reg;
b6638662 2073 int k;
c896fe29
FB
2074 TCGTemp *ts;
2075 char buf[64];
2076
f8b2f202
RH
2077 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
2078 ts = s->reg_to_temp[reg];
2079 if (ts != NULL) {
2080 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
c896fe29
FB
2081 printf("Inconsistency for register %s:\n",
2082 tcg_target_reg_names[reg]);
b03cce8e 2083 goto fail;
c896fe29
FB
2084 }
2085 }
2086 }
f8b2f202 2087 for (k = 0; k < s->nb_temps; k++) {
c896fe29 2088 ts = &s->temps[k];
f8b2f202
RH
2089 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
2090 && s->reg_to_temp[ts->reg] != ts) {
2091 printf("Inconsistency for temp %s:\n",
2092 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
b03cce8e 2093 fail:
f8b2f202
RH
2094 printf("reg state:\n");
2095 dump_regs(s);
2096 tcg_abort();
c896fe29
FB
2097 }
2098 }
2099}
2100#endif
2101
2102static void temp_allocate_frame(TCGContext *s, int temp)
2103{
2104 TCGTemp *ts;
2105 ts = &s->temps[temp];
9b9c37c3
RH
2106#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
2107 /* Sparc64 stack is accessed with offset of 2047 */
b591dc59
BS
2108 s->current_frame_offset = (s->current_frame_offset +
2109 (tcg_target_long)sizeof(tcg_target_long) - 1) &
2110 ~(sizeof(tcg_target_long) - 1);
f44c9960 2111#endif
b591dc59
BS
2112 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
2113 s->frame_end) {
5ff9d6a4 2114 tcg_abort();
b591dc59 2115 }
c896fe29 2116 ts->mem_offset = s->current_frame_offset;
b3a62939 2117 ts->mem_base = s->frame_temp;
c896fe29 2118 ts->mem_allocated = 1;
e2c6d1b4 2119 s->current_frame_offset += sizeof(tcg_target_long);
c896fe29
FB
2120}
2121
b3915dbb
RH
2122static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet);
2123
59d7c14e
RH
2124/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
2125 mark it free; otherwise mark it dead. */
2126static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 2127{
59d7c14e
RH
2128 if (ts->fixed_reg) {
2129 return;
2130 }
2131 if (ts->val_type == TEMP_VAL_REG) {
2132 s->reg_to_temp[ts->reg] = NULL;
2133 }
2134 ts->val_type = (free_or_dead < 0
2135 || ts->temp_local
2136 || temp_idx(s, ts) < s->nb_globals
2137 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
2138}
7f6ceedf 2139
59d7c14e
RH
2140/* Mark a temporary as dead. */
2141static inline void temp_dead(TCGContext *s, TCGTemp *ts)
2142{
2143 temp_free_or_dead(s, ts, 1);
2144}
2145
2146/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
2147 registers needs to be allocated to store a constant. If 'free_or_dead'
2148 is non-zero, subsequently release the temporary; if it is positive, the
2149 temp is dead; if it is negative, the temp is free. */
2150static void temp_sync(TCGContext *s, TCGTemp *ts,
2151 TCGRegSet allocated_regs, int free_or_dead)
2152{
2153 if (ts->fixed_reg) {
2154 return;
2155 }
2156 if (!ts->mem_coherent) {
7f6ceedf 2157 if (!ts->mem_allocated) {
f8b2f202 2158 temp_allocate_frame(s, temp_idx(s, ts));
59d7c14e 2159 }
59d7c14e
RH
2160 switch (ts->val_type) {
2161 case TEMP_VAL_CONST:
2162 /* If we're going to free the temp immediately, then we won't
2163 require it later in a register, so attempt to store the
2164 constant to memory directly. */
2165 if (free_or_dead
2166 && tcg_out_sti(s, ts->type, ts->val,
2167 ts->mem_base->reg, ts->mem_offset)) {
2168 break;
2169 }
2170 temp_load(s, ts, tcg_target_available_regs[ts->type],
2171 allocated_regs);
2172 /* fallthrough */
2173
2174 case TEMP_VAL_REG:
2175 tcg_out_st(s, ts->type, ts->reg,
2176 ts->mem_base->reg, ts->mem_offset);
2177 break;
2178
2179 case TEMP_VAL_MEM:
2180 break;
2181
2182 case TEMP_VAL_DEAD:
2183 default:
2184 tcg_abort();
2185 }
2186 ts->mem_coherent = 1;
2187 }
2188 if (free_or_dead) {
2189 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 2190 }
7f6ceedf
AJ
2191}
2192
c896fe29 2193/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 2194static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 2195{
f8b2f202 2196 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 2197 if (ts != NULL) {
59d7c14e 2198 temp_sync(s, ts, allocated_regs, -1);
c896fe29
FB
2199 }
2200}
2201
2202/* Allocate a register belonging to reg1 & ~reg2 */
b3915dbb 2203static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs,
91478cef 2204 TCGRegSet allocated_regs, bool rev)
c896fe29 2205{
91478cef
RH
2206 int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
2207 const int *order;
b6638662 2208 TCGReg reg;
c896fe29
FB
2209 TCGRegSet reg_ct;
2210
07ddf036 2211 reg_ct = desired_regs & ~allocated_regs;
91478cef 2212 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29
FB
2213
2214 /* first try free registers */
91478cef
RH
2215 for(i = 0; i < n; i++) {
2216 reg = order[i];
f8b2f202 2217 if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
c896fe29
FB
2218 return reg;
2219 }
2220
2221 /* XXX: do better spill choice */
91478cef
RH
2222 for(i = 0; i < n; i++) {
2223 reg = order[i];
c896fe29 2224 if (tcg_regset_test_reg(reg_ct, reg)) {
b3915dbb 2225 tcg_reg_free(s, reg, allocated_regs);
c896fe29
FB
2226 return reg;
2227 }
2228 }
2229
2230 tcg_abort();
2231}
2232
40ae5c62
RH
2233/* Make sure the temporary is in a register. If needed, allocate the register
2234 from DESIRED while avoiding ALLOCATED. */
2235static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
2236 TCGRegSet allocated_regs)
2237{
2238 TCGReg reg;
2239
2240 switch (ts->val_type) {
2241 case TEMP_VAL_REG:
2242 return;
2243 case TEMP_VAL_CONST:
91478cef 2244 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2245 tcg_out_movi(s, ts->type, reg, ts->val);
2246 ts->mem_coherent = 0;
2247 break;
2248 case TEMP_VAL_MEM:
91478cef 2249 reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base);
40ae5c62
RH
2250 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
2251 ts->mem_coherent = 1;
2252 break;
2253 case TEMP_VAL_DEAD:
2254 default:
2255 tcg_abort();
2256 }
2257 ts->reg = reg;
2258 ts->val_type = TEMP_VAL_REG;
2259 s->reg_to_temp[reg] = ts;
2260}
2261
59d7c14e
RH
2262/* Save a temporary to memory. 'allocated_regs' is used in case a
2263 temporary registers needs to be allocated to store a constant. */
2264static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 2265{
5a18407f
RH
2266 /* The liveness analysis already ensures that globals are back
2267 in memory. Keep an tcg_debug_assert for safety. */
2268 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1ad80729
AJ
2269}
2270
9814dd27 2271/* save globals to their canonical location and assume they can be
e8996ee0
FB
2272 modified be the following code. 'allocated_regs' is used in case a
2273 temporary registers needs to be allocated to store a constant. */
2274static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 2275{
641d5fbe 2276 int i;
c896fe29 2277
b13eb728
RH
2278 for (i = 0; i < s->nb_globals; i++) {
2279 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 2280 }
e5097dc8
FB
2281}
2282
3d5c5f87
AJ
2283/* sync globals to their canonical location and assume they can be
2284 read by the following code. 'allocated_regs' is used in case a
2285 temporary registers needs to be allocated to store a constant. */
2286static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
2287{
2288 int i;
2289
2290 for (i = 0; i < s->nb_globals; i++) {
12b9b11a 2291 TCGTemp *ts = &s->temps[i];
5a18407f
RH
2292 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
2293 || ts->fixed_reg
2294 || ts->mem_coherent);
3d5c5f87
AJ
2295 }
2296}
2297
e5097dc8 2298/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
2299 all globals are stored at their canonical location. */
2300static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 2301{
e5097dc8
FB
2302 int i;
2303
b13eb728
RH
2304 for (i = s->nb_globals; i < s->nb_temps; i++) {
2305 TCGTemp *ts = &s->temps[i];
641d5fbe 2306 if (ts->temp_local) {
b13eb728 2307 temp_save(s, ts, allocated_regs);
641d5fbe 2308 } else {
5a18407f
RH
2309 /* The liveness analysis already ensures that temps are dead.
2310 Keep an tcg_debug_assert for safety. */
2311 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c896fe29
FB
2312 }
2313 }
e8996ee0
FB
2314
2315 save_globals(s, allocated_regs);
c896fe29
FB
2316}
2317
0fe4fca4
PB
2318static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
2319 tcg_target_ulong val, TCGLifeData arg_life)
e8996ee0 2320{
e8996ee0 2321 if (ots->fixed_reg) {
59d7c14e 2322 /* For fixed registers, we do not do any constant propagation. */
e8996ee0 2323 tcg_out_movi(s, ots->type, ots->reg, val);
59d7c14e 2324 return;
e8996ee0 2325 }
59d7c14e
RH
2326
2327 /* The movi is not explicitly generated here. */
2328 if (ots->val_type == TEMP_VAL_REG) {
2329 s->reg_to_temp[ots->reg] = NULL;
ec7a869d 2330 }
59d7c14e
RH
2331 ots->val_type = TEMP_VAL_CONST;
2332 ots->val = val;
2333 ots->mem_coherent = 0;
2334 if (NEED_SYNC_ARG(0)) {
2335 temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0));
2336 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 2337 temp_dead(s, ots);
4c4e1ab2 2338 }
e8996ee0
FB
2339}
2340
0fe4fca4
PB
2341static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
2342 TCGLifeData arg_life)
2343{
2344 TCGTemp *ots = &s->temps[args[0]];
2345 tcg_target_ulong val = args[1];
2346
2347 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2348}
2349
c896fe29 2350static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
a1b3c48d 2351 const TCGArg *args, TCGLifeData arg_life)
c896fe29 2352{
c29c1d7e 2353 TCGRegSet allocated_regs;
c896fe29 2354 TCGTemp *ts, *ots;
450445d5 2355 TCGType otype, itype;
c896fe29 2356
d21369f5 2357 allocated_regs = s->reserved_regs;
c896fe29
FB
2358 ots = &s->temps[args[0]];
2359 ts = &s->temps[args[1]];
450445d5
RH
2360
2361 /* Note that otype != itype for no-op truncation. */
2362 otype = ots->type;
2363 itype = ts->type;
c29c1d7e 2364
0fe4fca4
PB
2365 if (ts->val_type == TEMP_VAL_CONST) {
2366 /* propagate constant or generate sti */
2367 tcg_target_ulong val = ts->val;
2368 if (IS_DEAD_ARG(1)) {
2369 temp_dead(s, ts);
2370 }
2371 tcg_reg_alloc_do_movi(s, ots, val, arg_life);
2372 return;
2373 }
2374
2375 /* If the source value is in memory we're going to be forced
2376 to have it in a register in order to perform the copy. Copy
2377 the SOURCE value into its own register first, that way we
2378 don't have to reload SOURCE the next time it is used. */
2379 if (ts->val_type == TEMP_VAL_MEM) {
40ae5c62 2380 temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
c29c1d7e 2381 }
c896fe29 2382
0fe4fca4 2383 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
c29c1d7e
AJ
2384 if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
2385 /* mov to a non-saved dead register makes no sense (even with
2386 liveness analysis disabled). */
eabb7b91 2387 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e
AJ
2388 if (!ots->mem_allocated) {
2389 temp_allocate_frame(s, args[0]);
2390 }
b3a62939 2391 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 2392 if (IS_DEAD_ARG(1)) {
f8bf00f1 2393 temp_dead(s, ts);
c29c1d7e 2394 }
f8bf00f1 2395 temp_dead(s, ots);
c29c1d7e 2396 } else {
866cb6cb 2397 if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
c896fe29 2398 /* the mov can be suppressed */
c29c1d7e 2399 if (ots->val_type == TEMP_VAL_REG) {
f8b2f202 2400 s->reg_to_temp[ots->reg] = NULL;
c29c1d7e
AJ
2401 }
2402 ots->reg = ts->reg;
f8bf00f1 2403 temp_dead(s, ts);
c896fe29 2404 } else {
c29c1d7e
AJ
2405 if (ots->val_type != TEMP_VAL_REG) {
2406 /* When allocating a new register, make sure to not spill the
2407 input one. */
2408 tcg_regset_set_reg(allocated_regs, ts->reg);
450445d5 2409 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
91478cef 2410 allocated_regs, ots->indirect_base);
c896fe29 2411 }
450445d5 2412 tcg_out_mov(s, otype, ots->reg, ts->reg);
c896fe29 2413 }
c29c1d7e
AJ
2414 ots->val_type = TEMP_VAL_REG;
2415 ots->mem_coherent = 0;
f8b2f202 2416 s->reg_to_temp[ots->reg] = ots;
c29c1d7e 2417 if (NEED_SYNC_ARG(0)) {
59d7c14e 2418 temp_sync(s, ots, allocated_regs, 0);
c896fe29 2419 }
ec7a869d 2420 }
c896fe29
FB
2421}
2422
2423static void tcg_reg_alloc_op(TCGContext *s,
a9751609 2424 const TCGOpDef *def, TCGOpcode opc,
a1b3c48d 2425 const TCGArg *args, TCGLifeData arg_life)
c896fe29 2426{
82790a87
RH
2427 TCGRegSet i_allocated_regs;
2428 TCGRegSet o_allocated_regs;
b6638662
RH
2429 int i, k, nb_iargs, nb_oargs;
2430 TCGReg reg;
c896fe29
FB
2431 TCGArg arg;
2432 const TCGArgConstraint *arg_ct;
2433 TCGTemp *ts;
2434 TCGArg new_args[TCG_MAX_OP_ARGS];
2435 int const_args[TCG_MAX_OP_ARGS];
2436
2437 nb_oargs = def->nb_oargs;
2438 nb_iargs = def->nb_iargs;
2439
2440 /* copy constants */
2441 memcpy(new_args + nb_oargs + nb_iargs,
2442 args + nb_oargs + nb_iargs,
2443 sizeof(TCGArg) * def->nb_cargs);
2444
d21369f5
RH
2445 i_allocated_regs = s->reserved_regs;
2446 o_allocated_regs = s->reserved_regs;
82790a87 2447
c896fe29 2448 /* satisfy input constraints */
c896fe29
FB
2449 for(k = 0; k < nb_iargs; k++) {
2450 i = def->sorted_args[nb_oargs + k];
2451 arg = args[i];
2452 arg_ct = &def->args_ct[i];
2453 ts = &s->temps[arg];
40ae5c62
RH
2454
2455 if (ts->val_type == TEMP_VAL_CONST
2456 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
2457 /* constant is OK for instruction */
2458 const_args[i] = 1;
2459 new_args[i] = ts->val;
2460 goto iarg_end;
c896fe29 2461 }
40ae5c62 2462
82790a87 2463 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
40ae5c62 2464
5ff9d6a4
FB
2465 if (arg_ct->ct & TCG_CT_IALIAS) {
2466 if (ts->fixed_reg) {
2467 /* if fixed register, we must allocate a new register
2468 if the alias is not the same register */
2469 if (arg != args[arg_ct->alias_index])
2470 goto allocate_in_reg;
2471 } else {
2472 /* if the input is aliased to an output and if it is
2473 not dead after the instruction, we must allocate
2474 a new register and move it */
866cb6cb 2475 if (!IS_DEAD_ARG(i)) {
5ff9d6a4 2476 goto allocate_in_reg;
866cb6cb 2477 }
7e1df267
AJ
2478 /* check if the current register has already been allocated
2479 for another input aliased to an output */
2480 int k2, i2;
2481 for (k2 = 0 ; k2 < k ; k2++) {
2482 i2 = def->sorted_args[nb_oargs + k2];
2483 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
2484 (new_args[i2] == ts->reg)) {
2485 goto allocate_in_reg;
2486 }
2487 }
5ff9d6a4 2488 }
c896fe29
FB
2489 }
2490 reg = ts->reg;
2491 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2492 /* nothing to do : the constraint is satisfied */
2493 } else {
2494 allocate_in_reg:
2495 /* allocate a new register matching the constraint
2496 and move the temporary register into it */
82790a87 2497 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
91478cef 2498 ts->indirect_base);
3b6dac34 2499 tcg_out_mov(s, ts->type, reg, ts->reg);
c896fe29 2500 }
c896fe29
FB
2501 new_args[i] = reg;
2502 const_args[i] = 0;
82790a87 2503 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29
FB
2504 iarg_end: ;
2505 }
2506
a52ad07e
AJ
2507 /* mark dead temporaries and free the associated registers */
2508 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2509 if (IS_DEAD_ARG(i)) {
f8bf00f1 2510 temp_dead(s, &s->temps[args[i]]);
a52ad07e
AJ
2511 }
2512 }
2513
e8996ee0 2514 if (def->flags & TCG_OPF_BB_END) {
82790a87 2515 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 2516 } else {
e8996ee0
FB
2517 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2518 /* XXX: permit generic clobber register list ? */
c8074023
RH
2519 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2520 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 2521 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 2522 }
c896fe29 2523 }
3d5c5f87
AJ
2524 }
2525 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2526 /* sync globals if the op has side effects and might trigger
2527 an exception. */
82790a87 2528 sync_globals(s, i_allocated_regs);
c896fe29 2529 }
e8996ee0
FB
2530
2531 /* satisfy the output constraints */
e8996ee0
FB
2532 for(k = 0; k < nb_oargs; k++) {
2533 i = def->sorted_args[k];
2534 arg = args[i];
2535 arg_ct = &def->args_ct[i];
2536 ts = &s->temps[arg];
17280ff4
RH
2537 if ((arg_ct->ct & TCG_CT_ALIAS)
2538 && !const_args[arg_ct->alias_index]) {
e8996ee0 2539 reg = new_args[arg_ct->alias_index];
82790a87
RH
2540 } else if (arg_ct->ct & TCG_CT_NEWREG) {
2541 reg = tcg_reg_alloc(s, arg_ct->u.regs,
2542 i_allocated_regs | o_allocated_regs,
2543 ts->indirect_base);
e8996ee0
FB
2544 } else {
2545 /* if fixed register, we try to use it */
2546 reg = ts->reg;
2547 if (ts->fixed_reg &&
2548 tcg_regset_test_reg(arg_ct->u.regs, reg)) {
2549 goto oarg_end;
2550 }
82790a87 2551 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
91478cef 2552 ts->indirect_base);
c896fe29 2553 }
82790a87 2554 tcg_regset_set_reg(o_allocated_regs, reg);
e8996ee0
FB
2555 /* if a fixed register is used, then a move will be done afterwards */
2556 if (!ts->fixed_reg) {
ec7a869d 2557 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 2558 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
2559 }
2560 ts->val_type = TEMP_VAL_REG;
2561 ts->reg = reg;
2562 /* temp value is modified, so the value kept in memory is
2563 potentially not the same */
2564 ts->mem_coherent = 0;
f8b2f202 2565 s->reg_to_temp[reg] = ts;
e8996ee0
FB
2566 }
2567 oarg_end:
2568 new_args[i] = reg;
c896fe29 2569 }
c896fe29
FB
2570 }
2571
c896fe29
FB
2572 /* emit instruction */
2573 tcg_out_op(s, opc, new_args, const_args);
2574
2575 /* move the outputs in the correct register if needed */
2576 for(i = 0; i < nb_oargs; i++) {
2577 ts = &s->temps[args[i]];
2578 reg = new_args[i];
2579 if (ts->fixed_reg && ts->reg != reg) {
3b6dac34 2580 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29 2581 }
ec7a869d 2582 if (NEED_SYNC_ARG(i)) {
82790a87 2583 temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
59d7c14e 2584 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 2585 temp_dead(s, ts);
ec7a869d 2586 }
c896fe29
FB
2587 }
2588}
2589
b03cce8e
FB
2590#ifdef TCG_TARGET_STACK_GROWSUP
2591#define STACK_DIR(x) (-(x))
2592#else
2593#define STACK_DIR(x) (x)
2594#endif
2595
c45cb8bb 2596static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
a1b3c48d 2597 const TCGArg * const args, TCGLifeData arg_life)
c896fe29 2598{
b6638662
RH
2599 int flags, nb_regs, i;
2600 TCGReg reg;
cf066674 2601 TCGArg arg;
c896fe29 2602 TCGTemp *ts;
d3452f1f
RH
2603 intptr_t stack_offset;
2604 size_t call_stack_size;
cf066674
RH
2605 tcg_insn_unit *func_addr;
2606 int allocate_args;
c896fe29 2607 TCGRegSet allocated_regs;
c896fe29 2608
cf066674
RH
2609 func_addr = (tcg_insn_unit *)(intptr_t)args[nb_oargs + nb_iargs];
2610 flags = args[nb_oargs + nb_iargs + 1];
c896fe29 2611
6e17d0c5 2612 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
c45cb8bb
RH
2613 if (nb_regs > nb_iargs) {
2614 nb_regs = nb_iargs;
cf066674 2615 }
c896fe29
FB
2616
2617 /* assign stack slots first */
c45cb8bb 2618 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
c896fe29
FB
2619 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
2620 ~(TCG_TARGET_STACK_ALIGN - 1);
b03cce8e
FB
2621 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
2622 if (allocate_args) {
345649c0
BS
2623 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
2624 preallocate call stack */
2625 tcg_abort();
b03cce8e 2626 }
39cf05d3
FB
2627
2628 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
c45cb8bb 2629 for(i = nb_regs; i < nb_iargs; i++) {
c896fe29 2630 arg = args[nb_oargs + i];
39cf05d3
FB
2631#ifdef TCG_TARGET_STACK_GROWSUP
2632 stack_offset -= sizeof(tcg_target_long);
2633#endif
2634 if (arg != TCG_CALL_DUMMY_ARG) {
2635 ts = &s->temps[arg];
40ae5c62
RH
2636 temp_load(s, ts, tcg_target_available_regs[ts->type],
2637 s->reserved_regs);
2638 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
c896fe29 2639 }
39cf05d3
FB
2640#ifndef TCG_TARGET_STACK_GROWSUP
2641 stack_offset += sizeof(tcg_target_long);
2642#endif
c896fe29
FB
2643 }
2644
2645 /* assign input registers */
d21369f5 2646 allocated_regs = s->reserved_regs;
c896fe29
FB
2647 for(i = 0; i < nb_regs; i++) {
2648 arg = args[nb_oargs + i];
39cf05d3
FB
2649 if (arg != TCG_CALL_DUMMY_ARG) {
2650 ts = &s->temps[arg];
2651 reg = tcg_target_call_iarg_regs[i];
b3915dbb 2652 tcg_reg_free(s, reg, allocated_regs);
40ae5c62 2653
39cf05d3
FB
2654 if (ts->val_type == TEMP_VAL_REG) {
2655 if (ts->reg != reg) {
3b6dac34 2656 tcg_out_mov(s, ts->type, reg, ts->reg);
39cf05d3 2657 }
39cf05d3 2658 } else {
ccb1bb66 2659 TCGRegSet arg_set = 0;
40ae5c62 2660
40ae5c62
RH
2661 tcg_regset_set_reg(arg_set, reg);
2662 temp_load(s, ts, arg_set, allocated_regs);
c896fe29 2663 }
40ae5c62 2664
39cf05d3 2665 tcg_regset_set_reg(allocated_regs, reg);
c896fe29 2666 }
c896fe29
FB
2667 }
2668
c896fe29 2669 /* mark dead temporaries and free the associated registers */
866cb6cb 2670 for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 2671 if (IS_DEAD_ARG(i)) {
f8bf00f1 2672 temp_dead(s, &s->temps[args[i]]);
c896fe29
FB
2673 }
2674 }
2675
2676 /* clobber call registers */
c8074023
RH
2677 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
2678 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 2679 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
2680 }
2681 }
78505279
AJ
2682
2683 /* Save globals if they might be written by the helper, sync them if
2684 they might be read. */
2685 if (flags & TCG_CALL_NO_READ_GLOBALS) {
2686 /* Nothing to do */
2687 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
2688 sync_globals(s, allocated_regs);
2689 } else {
b9c18f56
AJ
2690 save_globals(s, allocated_regs);
2691 }
c896fe29 2692
cf066674 2693 tcg_out_call(s, func_addr);
c896fe29
FB
2694
2695 /* assign output registers and emit moves if needed */
2696 for(i = 0; i < nb_oargs; i++) {
2697 arg = args[i];
2698 ts = &s->temps[arg];
2699 reg = tcg_target_call_oarg_regs[i];
eabb7b91 2700 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
34b1a49c 2701
c896fe29
FB
2702 if (ts->fixed_reg) {
2703 if (ts->reg != reg) {
3b6dac34 2704 tcg_out_mov(s, ts->type, ts->reg, reg);
c896fe29
FB
2705 }
2706 } else {
ec7a869d 2707 if (ts->val_type == TEMP_VAL_REG) {
f8b2f202 2708 s->reg_to_temp[ts->reg] = NULL;
ec7a869d
AJ
2709 }
2710 ts->val_type = TEMP_VAL_REG;
2711 ts->reg = reg;
2712 ts->mem_coherent = 0;
f8b2f202 2713 s->reg_to_temp[reg] = ts;
ec7a869d 2714 if (NEED_SYNC_ARG(i)) {
59d7c14e
RH
2715 temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
2716 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 2717 temp_dead(s, ts);
8c11ad25 2718 }
c896fe29
FB
2719 }
2720 }
c896fe29
FB
2721}
2722
2723#ifdef CONFIG_PROFILER
2724
54604f74 2725static int64_t tcg_table_op_count[NB_OPS];
c896fe29 2726
246ae24d 2727void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
c896fe29
FB
2728{
2729 int i;
d70724ce 2730
15fc7daa 2731 for (i = 0; i < NB_OPS; i++) {
246ae24d
MF
2732 cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name,
2733 tcg_table_op_count[i]);
c896fe29 2734 }
c896fe29 2735}
246ae24d
MF
2736#else
2737void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf)
2738{
2739 cpu_fprintf(f, "[TCG profiler not compiled]\n");
2740}
c896fe29
FB
2741#endif
2742
2743
5bd2ec3d 2744int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
c896fe29 2745{
fca8a500 2746 int i, oi, oi_next, num_insns;
c896fe29 2747
04fe6400
RH
2748#ifdef CONFIG_PROFILER
2749 {
2750 int n;
2751
dcb8e758 2752 n = s->gen_op_buf[0].prev + 1;
04fe6400
RH
2753 s->op_count += n;
2754 if (n > s->op_count_max) {
2755 s->op_count_max = n;
2756 }
2757
2758 n = s->nb_temps;
2759 s->temp_count += n;
2760 if (n > s->temp_count_max) {
2761 s->temp_count_max = n;
2762 }
2763 }
2764#endif
2765
c896fe29 2766#ifdef DEBUG_DISAS
d977e1c2
AB
2767 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
2768 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 2769 qemu_log_lock();
93fcfe39 2770 qemu_log("OP:\n");
eeacee4d 2771 tcg_dump_ops(s);
93fcfe39 2772 qemu_log("\n");
1ee73216 2773 qemu_log_unlock();
c896fe29
FB
2774 }
2775#endif
2776
c5cc28ff
AJ
2777#ifdef CONFIG_PROFILER
2778 s->opt_time -= profile_getclock();
2779#endif
2780
8f2e8c07 2781#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 2782 tcg_optimize(s);
8f2e8c07
KB
2783#endif
2784
a23a9ec6 2785#ifdef CONFIG_PROFILER
c5cc28ff 2786 s->opt_time += profile_getclock();
a23a9ec6
FB
2787 s->la_time -= profile_getclock();
2788#endif
c5cc28ff 2789
5a18407f
RH
2790 {
2791 uint8_t *temp_state = tcg_malloc(s->nb_temps + s->nb_indirects);
2792
2793 liveness_pass_1(s, temp_state);
2794
2795 if (s->nb_indirects > 0) {
2796#ifdef DEBUG_DISAS
2797 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
2798 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 2799 qemu_log_lock();
5a18407f
RH
2800 qemu_log("OP before indirect lowering:\n");
2801 tcg_dump_ops(s);
2802 qemu_log("\n");
1ee73216 2803 qemu_log_unlock();
5a18407f
RH
2804 }
2805#endif
2806 /* Replace indirect temps with direct temps. */
2807 if (liveness_pass_2(s, temp_state)) {
2808 /* If changes were made, re-run liveness. */
2809 liveness_pass_1(s, temp_state);
2810 }
2811 }
2812 }
c5cc28ff 2813
a23a9ec6
FB
2814#ifdef CONFIG_PROFILER
2815 s->la_time += profile_getclock();
2816#endif
c896fe29
FB
2817
2818#ifdef DEBUG_DISAS
d977e1c2
AB
2819 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
2820 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 2821 qemu_log_lock();
c5cc28ff 2822 qemu_log("OP after optimization and liveness analysis:\n");
eeacee4d 2823 tcg_dump_ops(s);
93fcfe39 2824 qemu_log("\n");
1ee73216 2825 qemu_log_unlock();
c896fe29
FB
2826 }
2827#endif
2828
2829 tcg_reg_alloc_start(s);
2830
e7e168f4
EC
2831 s->code_buf = tb->tc.ptr;
2832 s->code_ptr = tb->tc.ptr;
c896fe29 2833
659ef5cb
RH
2834#ifdef TCG_TARGET_NEED_LDST_LABELS
2835 s->ldst_labels = NULL;
2836#endif
57a26946
RH
2837#ifdef TCG_TARGET_NEED_POOL_LABELS
2838 s->pool_labels = NULL;
2839#endif
9ecefc84 2840
fca8a500 2841 num_insns = -1;
dcb8e758 2842 for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
c45cb8bb 2843 TCGOp * const op = &s->gen_op_buf[oi];
75e8b9b7 2844 TCGArg * const args = op->args;
c45cb8bb
RH
2845 TCGOpcode opc = op->opc;
2846 const TCGOpDef *def = &tcg_op_defs[opc];
bee158cb 2847 TCGLifeData arg_life = op->life;
b3db8758 2848
c45cb8bb 2849 oi_next = op->next;
c896fe29 2850#ifdef CONFIG_PROFILER
54604f74 2851 tcg_table_op_count[opc]++;
c896fe29 2852#endif
c45cb8bb
RH
2853
2854 switch (opc) {
c896fe29 2855 case INDEX_op_mov_i32:
c896fe29 2856 case INDEX_op_mov_i64:
a1b3c48d 2857 tcg_reg_alloc_mov(s, def, args, arg_life);
c896fe29 2858 break;
e8996ee0 2859 case INDEX_op_movi_i32:
e8996ee0 2860 case INDEX_op_movi_i64:
a1b3c48d 2861 tcg_reg_alloc_movi(s, args, arg_life);
e8996ee0 2862 break;
765b842a 2863 case INDEX_op_insn_start:
fca8a500
RH
2864 if (num_insns >= 0) {
2865 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
2866 }
2867 num_insns++;
bad729e2
RH
2868 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
2869 target_ulong a;
2870#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
2871 a = ((target_ulong)args[i * 2 + 1] << 32) | args[i * 2];
2872#else
2873 a = args[i];
2874#endif
fca8a500 2875 s->gen_insn_data[num_insns][i] = a;
bad729e2 2876 }
c896fe29 2877 break;
5ff9d6a4 2878 case INDEX_op_discard:
f8bf00f1 2879 temp_dead(s, &s->temps[args[0]]);
5ff9d6a4 2880 break;
c896fe29 2881 case INDEX_op_set_label:
e8996ee0 2882 tcg_reg_alloc_bb_end(s, s->reserved_regs);
bec16311 2883 tcg_out_label(s, arg_label(args[0]), s->code_ptr);
c896fe29
FB
2884 break;
2885 case INDEX_op_call:
a1b3c48d 2886 tcg_reg_alloc_call(s, op->callo, op->calli, args, arg_life);
c45cb8bb 2887 break;
c896fe29 2888 default:
25c4d9cc 2889 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 2890 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
2891 /* Note: in order to speed up the code, it would be much
2892 faster to have specialized register allocator functions for
2893 some common argument patterns */
a1b3c48d 2894 tcg_reg_alloc_op(s, def, opc, args, arg_life);
c896fe29
FB
2895 break;
2896 }
8d8fdbae 2897#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
2898 check_regs(s);
2899#endif
b125f9dc
RH
2900 /* Test for (pending) buffer overflow. The assumption is that any
2901 one operation beginning below the high water mark cannot overrun
2902 the buffer completely. Thus we can test for overflow after
2903 generating code without having to check during generation. */
644da9b3 2904 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
2905 return -1;
2906 }
c896fe29 2907 }
fca8a500
RH
2908 tcg_debug_assert(num_insns >= 0);
2909 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 2910
b76f0d8c 2911 /* Generate TB finalization at the end of block */
659ef5cb
RH
2912#ifdef TCG_TARGET_NEED_LDST_LABELS
2913 if (!tcg_out_ldst_finalize(s)) {
23dceda6
RH
2914 return -1;
2915 }
659ef5cb 2916#endif
57a26946
RH
2917#ifdef TCG_TARGET_NEED_POOL_LABELS
2918 if (!tcg_out_pool_finalize(s)) {
2919 return -1;
2920 }
2921#endif
c896fe29
FB
2922
2923 /* flush instruction cache */
1813e175 2924 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2aeabc08 2925
1813e175 2926 return tcg_current_code_size(s);
c896fe29
FB
2927}
2928
a23a9ec6 2929#ifdef CONFIG_PROFILER
405cf9ff 2930void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6
FB
2931{
2932 TCGContext *s = &tcg_ctx;
fca8a500
RH
2933 int64_t tb_count = s->tb_count;
2934 int64_t tb_div_count = tb_count ? tb_count : 1;
2935 int64_t tot = s->interm_time + s->code_time;
a23a9ec6 2936
a23a9ec6
FB
2937 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2938 tot, tot / 2.4e9);
2939 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
fca8a500
RH
2940 tb_count, s->tb_count1 - tb_count,
2941 (double)(s->tb_count1 - s->tb_count)
2942 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
a23a9ec6 2943 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
fca8a500 2944 (double)s->op_count / tb_div_count, s->op_count_max);
a23a9ec6 2945 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
fca8a500 2946 (double)s->del_op_count / tb_div_count);
a23a9ec6 2947 cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
fca8a500
RH
2948 (double)s->temp_count / tb_div_count, s->temp_count_max);
2949 cpu_fprintf(f, "avg host code/TB %0.1f\n",
2950 (double)s->code_out_len / tb_div_count);
2951 cpu_fprintf(f, "avg search data/TB %0.1f\n",
2952 (double)s->search_out_len / tb_div_count);
a23a9ec6
FB
2953
2954 cpu_fprintf(f, "cycles/op %0.1f\n",
2955 s->op_count ? (double)tot / s->op_count : 0);
2956 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2957 s->code_in_len ? (double)tot / s->code_in_len : 0);
2958 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2959 s->code_out_len ? (double)tot / s->code_out_len : 0);
fca8a500
RH
2960 cpu_fprintf(f, "cycles/search byte %0.1f\n",
2961 s->search_out_len ? (double)tot / s->search_out_len : 0);
2962 if (tot == 0) {
a23a9ec6 2963 tot = 1;
fca8a500 2964 }
a23a9ec6
FB
2965 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2966 (double)s->interm_time / tot * 100.0);
2967 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2968 (double)s->code_time / tot * 100.0);
c5cc28ff
AJ
2969 cpu_fprintf(f, "optim./code time %0.1f%%\n",
2970 (double)s->opt_time / (s->code_time ? s->code_time : 1)
2971 * 100.0);
a23a9ec6
FB
2972 cpu_fprintf(f, "liveness/code time %0.1f%%\n",
2973 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
2974 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2975 s->restore_count);
2976 cpu_fprintf(f, " avg cycles %0.1f\n",
2977 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
2978}
2979#else
405cf9ff 2980void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
a23a9ec6 2981{
24bf7b3a 2982 cpu_fprintf(f, "[TCG profiler not compiled]\n");
a23a9ec6
FB
2983}
2984#endif
813da627
RH
2985
2986#ifdef ELF_HOST_MACHINE
5872bbf2
RH
2987/* In order to use this feature, the backend needs to do three things:
2988
2989 (1) Define ELF_HOST_MACHINE to indicate both what value to
2990 put into the ELF image and to indicate support for the feature.
2991
2992 (2) Define tcg_register_jit. This should create a buffer containing
2993 the contents of a .debug_frame section that describes the post-
2994 prologue unwind info for the tcg machine.
2995
2996 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
2997*/
813da627
RH
2998
2999/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
3000typedef enum {
3001 JIT_NOACTION = 0,
3002 JIT_REGISTER_FN,
3003 JIT_UNREGISTER_FN
3004} jit_actions_t;
3005
3006struct jit_code_entry {
3007 struct jit_code_entry *next_entry;
3008 struct jit_code_entry *prev_entry;
3009 const void *symfile_addr;
3010 uint64_t symfile_size;
3011};
3012
3013struct jit_descriptor {
3014 uint32_t version;
3015 uint32_t action_flag;
3016 struct jit_code_entry *relevant_entry;
3017 struct jit_code_entry *first_entry;
3018};
3019
3020void __jit_debug_register_code(void) __attribute__((noinline));
3021void __jit_debug_register_code(void)
3022{
3023 asm("");
3024}
3025
3026/* Must statically initialize the version, because GDB may check
3027 the version before we can set it. */
3028struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
3029
3030/* End GDB interface. */
3031
3032static int find_string(const char *strtab, const char *str)
3033{
3034 const char *p = strtab + 1;
3035
3036 while (1) {
3037 if (strcmp(p, str) == 0) {
3038 return p - strtab;
3039 }
3040 p += strlen(p) + 1;
3041 }
3042}
3043
5872bbf2 3044static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2c90784a
RH
3045 const void *debug_frame,
3046 size_t debug_frame_size)
813da627 3047{
5872bbf2
RH
3048 struct __attribute__((packed)) DebugInfo {
3049 uint32_t len;
3050 uint16_t version;
3051 uint32_t abbrev;
3052 uint8_t ptr_size;
3053 uint8_t cu_die;
3054 uint16_t cu_lang;
3055 uintptr_t cu_low_pc;
3056 uintptr_t cu_high_pc;
3057 uint8_t fn_die;
3058 char fn_name[16];
3059 uintptr_t fn_low_pc;
3060 uintptr_t fn_high_pc;
3061 uint8_t cu_eoc;
3062 };
813da627
RH
3063
3064 struct ElfImage {
3065 ElfW(Ehdr) ehdr;
3066 ElfW(Phdr) phdr;
5872bbf2
RH
3067 ElfW(Shdr) shdr[7];
3068 ElfW(Sym) sym[2];
3069 struct DebugInfo di;
3070 uint8_t da[24];
3071 char str[80];
3072 };
3073
3074 struct ElfImage *img;
3075
3076 static const struct ElfImage img_template = {
3077 .ehdr = {
3078 .e_ident[EI_MAG0] = ELFMAG0,
3079 .e_ident[EI_MAG1] = ELFMAG1,
3080 .e_ident[EI_MAG2] = ELFMAG2,
3081 .e_ident[EI_MAG3] = ELFMAG3,
3082 .e_ident[EI_CLASS] = ELF_CLASS,
3083 .e_ident[EI_DATA] = ELF_DATA,
3084 .e_ident[EI_VERSION] = EV_CURRENT,
3085 .e_type = ET_EXEC,
3086 .e_machine = ELF_HOST_MACHINE,
3087 .e_version = EV_CURRENT,
3088 .e_phoff = offsetof(struct ElfImage, phdr),
3089 .e_shoff = offsetof(struct ElfImage, shdr),
3090 .e_ehsize = sizeof(ElfW(Shdr)),
3091 .e_phentsize = sizeof(ElfW(Phdr)),
3092 .e_phnum = 1,
3093 .e_shentsize = sizeof(ElfW(Shdr)),
3094 .e_shnum = ARRAY_SIZE(img->shdr),
3095 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
3096#ifdef ELF_HOST_FLAGS
3097 .e_flags = ELF_HOST_FLAGS,
3098#endif
3099#ifdef ELF_OSABI
3100 .e_ident[EI_OSABI] = ELF_OSABI,
3101#endif
5872bbf2
RH
3102 },
3103 .phdr = {
3104 .p_type = PT_LOAD,
3105 .p_flags = PF_X,
3106 },
3107 .shdr = {
3108 [0] = { .sh_type = SHT_NULL },
3109 /* Trick: The contents of code_gen_buffer are not present in
3110 this fake ELF file; that got allocated elsewhere. Therefore
3111 we mark .text as SHT_NOBITS (similar to .bss) so that readers
3112 will not look for contents. We can record any address. */
3113 [1] = { /* .text */
3114 .sh_type = SHT_NOBITS,
3115 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
3116 },
3117 [2] = { /* .debug_info */
3118 .sh_type = SHT_PROGBITS,
3119 .sh_offset = offsetof(struct ElfImage, di),
3120 .sh_size = sizeof(struct DebugInfo),
3121 },
3122 [3] = { /* .debug_abbrev */
3123 .sh_type = SHT_PROGBITS,
3124 .sh_offset = offsetof(struct ElfImage, da),
3125 .sh_size = sizeof(img->da),
3126 },
3127 [4] = { /* .debug_frame */
3128 .sh_type = SHT_PROGBITS,
3129 .sh_offset = sizeof(struct ElfImage),
3130 },
3131 [5] = { /* .symtab */
3132 .sh_type = SHT_SYMTAB,
3133 .sh_offset = offsetof(struct ElfImage, sym),
3134 .sh_size = sizeof(img->sym),
3135 .sh_info = 1,
3136 .sh_link = ARRAY_SIZE(img->shdr) - 1,
3137 .sh_entsize = sizeof(ElfW(Sym)),
3138 },
3139 [6] = { /* .strtab */
3140 .sh_type = SHT_STRTAB,
3141 .sh_offset = offsetof(struct ElfImage, str),
3142 .sh_size = sizeof(img->str),
3143 }
3144 },
3145 .sym = {
3146 [1] = { /* code_gen_buffer */
3147 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
3148 .st_shndx = 1,
3149 }
3150 },
3151 .di = {
3152 .len = sizeof(struct DebugInfo) - 4,
3153 .version = 2,
3154 .ptr_size = sizeof(void *),
3155 .cu_die = 1,
3156 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
3157 .fn_die = 2,
3158 .fn_name = "code_gen_buffer"
3159 },
3160 .da = {
3161 1, /* abbrev number (the cu) */
3162 0x11, 1, /* DW_TAG_compile_unit, has children */
3163 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
3164 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3165 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3166 0, 0, /* end of abbrev */
3167 2, /* abbrev number (the fn) */
3168 0x2e, 0, /* DW_TAG_subprogram, no children */
3169 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
3170 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
3171 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
3172 0, 0, /* end of abbrev */
3173 0 /* no more abbrev */
3174 },
3175 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
3176 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
3177 };
3178
3179 /* We only need a single jit entry; statically allocate it. */
3180 static struct jit_code_entry one_entry;
3181
5872bbf2 3182 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 3183 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 3184 DebugFrameHeader *dfh;
813da627 3185
5872bbf2
RH
3186 img = g_malloc(img_size);
3187 *img = img_template;
813da627 3188
5872bbf2
RH
3189 img->phdr.p_vaddr = buf;
3190 img->phdr.p_paddr = buf;
3191 img->phdr.p_memsz = buf_size;
813da627 3192
813da627 3193 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 3194 img->shdr[1].sh_addr = buf;
813da627
RH
3195 img->shdr[1].sh_size = buf_size;
3196
5872bbf2
RH
3197 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
3198 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
3199
3200 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
3201 img->shdr[4].sh_size = debug_frame_size;
3202
3203 img->shdr[5].sh_name = find_string(img->str, ".symtab");
3204 img->shdr[6].sh_name = find_string(img->str, ".strtab");
3205
3206 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
3207 img->sym[1].st_value = buf;
3208 img->sym[1].st_size = buf_size;
813da627 3209
5872bbf2 3210 img->di.cu_low_pc = buf;
45aba097 3211 img->di.cu_high_pc = buf + buf_size;
5872bbf2 3212 img->di.fn_low_pc = buf;
45aba097 3213 img->di.fn_high_pc = buf + buf_size;
813da627 3214
2c90784a
RH
3215 dfh = (DebugFrameHeader *)(img + 1);
3216 memcpy(dfh, debug_frame, debug_frame_size);
3217 dfh->fde.func_start = buf;
3218 dfh->fde.func_len = buf_size;
3219
813da627
RH
3220#ifdef DEBUG_JIT
3221 /* Enable this block to be able to debug the ELF image file creation.
3222 One can use readelf, objdump, or other inspection utilities. */
3223 {
3224 FILE *f = fopen("/tmp/qemu.jit", "w+b");
3225 if (f) {
5872bbf2 3226 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
3227 /* Avoid stupid unused return value warning for fwrite. */
3228 }
3229 fclose(f);
3230 }
3231 }
3232#endif
3233
3234 one_entry.symfile_addr = img;
3235 one_entry.symfile_size = img_size;
3236
3237 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
3238 __jit_debug_descriptor.relevant_entry = &one_entry;
3239 __jit_debug_descriptor.first_entry = &one_entry;
3240 __jit_debug_register_code();
3241}
3242#else
5872bbf2
RH
3243/* No support for the feature. Provide the entry point expected by exec.c,
3244 and implement the internal function we declared earlier. */
813da627
RH
3245
3246static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
3247 const void *debug_frame,
3248 size_t debug_frame_size)
813da627
RH
3249{
3250}
3251
3252void tcg_register_jit(void *buf, size_t buf_size)
3253{
3254}
3255#endif /* ELF_HOST_MACHINE */
This page took 1.415916 seconds and 4 git commands to generate.