]> Git Repo - qemu.git/blame - tcg/tcg.c
tcg: Promote tcg_out_{dup,dupi}_vec to backend interface
[qemu.git] / tcg / tcg.c
CommitLineData
c896fe29
FB
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
c896fe29 25/* define it to use liveness analysis (better code) */
8f2e8c07 26#define USE_TCG_OPTIMIZATIONS
c896fe29 27
757e725b 28#include "qemu/osdep.h"
cca82982 29
813da627
RH
30/* Define to jump the ELF file used to communicate with GDB. */
31#undef DEBUG_JIT
32
72fd2efb 33#include "qemu/error-report.h"
f348b6d1 34#include "qemu/cutils.h"
1de7afc9 35#include "qemu/host-utils.h"
d4c51a0a 36#include "qemu/qemu-print.h"
1de7afc9 37#include "qemu/timer.h"
c896fe29 38
c5d3c498 39/* Note: the long term plan is to reduce the dependencies on the QEMU
c896fe29
FB
40 CPU definitions. Currently they are used for qemu_ld/st
41 instructions */
42#define NO_CPU_IO_DEFS
43#include "cpu.h"
c896fe29 44
63c91552
PB
45#include "exec/cpu-common.h"
46#include "exec/exec-all.h"
47
c896fe29 48#include "tcg-op.h"
813da627 49
edee2579 50#if UINTPTR_MAX == UINT32_MAX
813da627 51# define ELF_CLASS ELFCLASS32
edee2579
RH
52#else
53# define ELF_CLASS ELFCLASS64
813da627
RH
54#endif
55#ifdef HOST_WORDS_BIGENDIAN
56# define ELF_DATA ELFDATA2MSB
57#else
58# define ELF_DATA ELFDATA2LSB
59#endif
60
c896fe29 61#include "elf.h"
508127e2 62#include "exec/log.h"
3468b59e 63#include "sysemu/sysemu.h"
c896fe29 64
ce151109
PM
65/* Forward declarations for functions declared in tcg-target.inc.c and
66 used here. */
e4d58b41 67static void tcg_target_init(TCGContext *s);
f69d277e 68static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
e4d58b41 69static void tcg_target_qemu_prologue(TCGContext *s);
6ac17786 70static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 71 intptr_t value, intptr_t addend);
c896fe29 72
497a22eb
RH
73/* The CIE and FDE header definitions will be common to all hosts. */
74typedef struct {
75 uint32_t len __attribute__((aligned((sizeof(void *)))));
76 uint32_t id;
77 uint8_t version;
78 char augmentation[1];
79 uint8_t code_align;
80 uint8_t data_align;
81 uint8_t return_column;
82} DebugFrameCIE;
83
84typedef struct QEMU_PACKED {
85 uint32_t len __attribute__((aligned((sizeof(void *)))));
86 uint32_t cie_offset;
edee2579
RH
87 uintptr_t func_start;
88 uintptr_t func_len;
497a22eb
RH
89} DebugFrameFDEHeader;
90
2c90784a
RH
91typedef struct QEMU_PACKED {
92 DebugFrameCIE cie;
93 DebugFrameFDEHeader fde;
94} DebugFrameHeader;
95
813da627 96static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
97 const void *debug_frame,
98 size_t debug_frame_size)
813da627
RH
99 __attribute__((unused));
100
ce151109 101/* Forward declarations for functions declared and used in tcg-target.inc.c. */
069ea736
RH
102static const char *target_parse_constraint(TCGArgConstraint *ct,
103 const char *ct_str, TCGType type);
2a534aff 104static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 105 intptr_t arg2);
78113e83 106static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
c0ad3001 107static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 108 TCGReg ret, tcg_target_long arg);
c0ad3001
SW
109static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
110 const int *const_args);
d2fd745f 111#if TCG_TARGET_MAYBE_vec
e7632cfa
RH
112static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
113 TCGReg dst, TCGReg src);
114static void tcg_out_dupi_vec(TCGContext *s, TCGType type,
115 TCGReg dst, tcg_target_long arg);
d2fd745f
RH
116static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
117 unsigned vece, const TCGArg *args,
118 const int *const_args);
119#else
e7632cfa
RH
120static inline bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
121 TCGReg dst, TCGReg src)
122{
123 g_assert_not_reached();
124}
125static inline void tcg_out_dupi_vec(TCGContext *s, TCGType type,
126 TCGReg dst, tcg_target_long arg)
127{
128 g_assert_not_reached();
129}
d2fd745f
RH
130static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl,
131 unsigned vece, const TCGArg *args,
132 const int *const_args)
133{
134 g_assert_not_reached();
135}
136#endif
2a534aff 137static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 138 intptr_t arg2);
59d7c14e
RH
139static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
140 TCGReg base, intptr_t ofs);
cf066674 141static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
f6c6afc1 142static int tcg_target_const_match(tcg_target_long val, TCGType type,
c0ad3001 143 const TCGArgConstraint *arg_ct);
659ef5cb 144#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5 145static int tcg_out_ldst_finalize(TCGContext *s);
659ef5cb 146#endif
c896fe29 147
a505785c
EC
148#define TCG_HIGHWATER 1024
149
df2cce29
EC
150static TCGContext **tcg_ctxs;
151static unsigned int n_tcg_ctxs;
1c2adb95 152TCGv_env cpu_env = 0;
df2cce29 153
be2cdc5e
EC
154struct tcg_region_tree {
155 QemuMutex lock;
156 GTree *tree;
157 /* padding to avoid false sharing is computed at run-time */
158};
159
e8feb96f
EC
160/*
161 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
162 * dynamically allocate from as demand dictates. Given appropriate region
163 * sizing, this minimizes flushes even when some TCG threads generate a lot
164 * more code than others.
165 */
166struct tcg_region_state {
167 QemuMutex lock;
168
169 /* fields set at init time */
170 void *start;
171 void *start_aligned;
172 void *end;
173 size_t n;
174 size_t size; /* size of one region */
175 size_t stride; /* .size + guard size */
176
177 /* fields protected by the lock */
178 size_t current; /* current region index */
179 size_t agg_size_full; /* aggregate size of full regions */
180};
181
182static struct tcg_region_state region;
be2cdc5e
EC
183/*
184 * This is an array of struct tcg_region_tree's, with padding.
185 * We use void * to simplify the computation of region_trees[i]; each
186 * struct is found every tree_size bytes.
187 */
188static void *region_trees;
189static size_t tree_size;
d2fd745f 190static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
b1d8e52e 191static TCGRegSet tcg_target_call_clobber_regs;
c896fe29 192
1813e175 193#if TCG_TARGET_INSN_UNIT_SIZE == 1
4196dca6 194static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v)
c896fe29
FB
195{
196 *s->code_ptr++ = v;
197}
198
4196dca6
PM
199static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p,
200 uint8_t v)
5c53bb81 201{
1813e175 202 *p = v;
5c53bb81 203}
1813e175 204#endif
5c53bb81 205
1813e175 206#if TCG_TARGET_INSN_UNIT_SIZE <= 2
4196dca6 207static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v)
c896fe29 208{
1813e175
RH
209 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
210 *s->code_ptr++ = v;
211 } else {
212 tcg_insn_unit *p = s->code_ptr;
213 memcpy(p, &v, sizeof(v));
214 s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE);
215 }
c896fe29
FB
216}
217
4196dca6
PM
218static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p,
219 uint16_t v)
5c53bb81 220{
1813e175
RH
221 if (TCG_TARGET_INSN_UNIT_SIZE == 2) {
222 *p = v;
223 } else {
224 memcpy(p, &v, sizeof(v));
225 }
5c53bb81 226}
1813e175 227#endif
5c53bb81 228
1813e175 229#if TCG_TARGET_INSN_UNIT_SIZE <= 4
4196dca6 230static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v)
c896fe29 231{
1813e175
RH
232 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
233 *s->code_ptr++ = v;
234 } else {
235 tcg_insn_unit *p = s->code_ptr;
236 memcpy(p, &v, sizeof(v));
237 s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE);
238 }
c896fe29
FB
239}
240
4196dca6
PM
241static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p,
242 uint32_t v)
5c53bb81 243{
1813e175
RH
244 if (TCG_TARGET_INSN_UNIT_SIZE == 4) {
245 *p = v;
246 } else {
247 memcpy(p, &v, sizeof(v));
248 }
5c53bb81 249}
1813e175 250#endif
5c53bb81 251
1813e175 252#if TCG_TARGET_INSN_UNIT_SIZE <= 8
4196dca6 253static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v)
ac26eb69 254{
1813e175
RH
255 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
256 *s->code_ptr++ = v;
257 } else {
258 tcg_insn_unit *p = s->code_ptr;
259 memcpy(p, &v, sizeof(v));
260 s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE);
261 }
ac26eb69
RH
262}
263
4196dca6
PM
264static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p,
265 uint64_t v)
5c53bb81 266{
1813e175
RH
267 if (TCG_TARGET_INSN_UNIT_SIZE == 8) {
268 *p = v;
269 } else {
270 memcpy(p, &v, sizeof(v));
271 }
5c53bb81 272}
1813e175 273#endif
5c53bb81 274
c896fe29
FB
275/* label relocation processing */
276
1813e175 277static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
bec16311 278 TCGLabel *l, intptr_t addend)
c896fe29 279{
7ecd02a0 280 TCGRelocation *r = tcg_malloc(sizeof(TCGRelocation));
c896fe29 281
7ecd02a0
RH
282 r->type = type;
283 r->ptr = code_ptr;
284 r->addend = addend;
285 QSIMPLEQ_INSERT_TAIL(&l->relocs, r, next);
c896fe29
FB
286}
287
bec16311 288static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr)
c896fe29 289{
eabb7b91 290 tcg_debug_assert(!l->has_value);
c896fe29 291 l->has_value = 1;
1813e175 292 l->u.value_ptr = ptr;
c896fe29
FB
293}
294
42a268c2 295TCGLabel *gen_new_label(void)
c896fe29 296{
b1311c4a 297 TCGContext *s = tcg_ctx;
51e3972c 298 TCGLabel *l = tcg_malloc(sizeof(TCGLabel));
c896fe29 299
7ecd02a0
RH
300 memset(l, 0, sizeof(TCGLabel));
301 l->id = s->nb_labels++;
302 QSIMPLEQ_INIT(&l->relocs);
303
bef16ab4 304 QSIMPLEQ_INSERT_TAIL(&s->labels, l, next);
42a268c2
RH
305
306 return l;
c896fe29
FB
307}
308
7ecd02a0
RH
309static bool tcg_resolve_relocs(TCGContext *s)
310{
311 TCGLabel *l;
312
313 QSIMPLEQ_FOREACH(l, &s->labels, next) {
314 TCGRelocation *r;
315 uintptr_t value = l->u.value;
316
317 QSIMPLEQ_FOREACH(r, &l->relocs, next) {
318 if (!patch_reloc(r->ptr, r->type, value, r->addend)) {
319 return false;
320 }
321 }
322 }
323 return true;
324}
325
9f754620
RH
326static void set_jmp_reset_offset(TCGContext *s, int which)
327{
328 size_t off = tcg_current_code_size(s);
329 s->tb_jmp_reset_offset[which] = off;
330 /* Make sure that we didn't overflow the stored offset. */
331 assert(s->tb_jmp_reset_offset[which] == off);
332}
333
ce151109 334#include "tcg-target.inc.c"
c896fe29 335
be2cdc5e
EC
336/* compare a pointer @ptr and a tb_tc @s */
337static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
338{
339 if (ptr >= s->ptr + s->size) {
340 return 1;
341 } else if (ptr < s->ptr) {
342 return -1;
343 }
344 return 0;
345}
346
347static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
348{
349 const struct tb_tc *a = ap;
350 const struct tb_tc *b = bp;
351
352 /*
353 * When both sizes are set, we know this isn't a lookup.
354 * This is the most likely case: every TB must be inserted; lookups
355 * are a lot less frequent.
356 */
357 if (likely(a->size && b->size)) {
358 if (a->ptr > b->ptr) {
359 return 1;
360 } else if (a->ptr < b->ptr) {
361 return -1;
362 }
363 /* a->ptr == b->ptr should happen only on deletions */
364 g_assert(a->size == b->size);
365 return 0;
366 }
367 /*
368 * All lookups have either .size field set to 0.
369 * From the glib sources we see that @ap is always the lookup key. However
370 * the docs provide no guarantee, so we just mark this case as likely.
371 */
372 if (likely(a->size == 0)) {
373 return ptr_cmp_tb_tc(a->ptr, b);
374 }
375 return ptr_cmp_tb_tc(b->ptr, a);
376}
377
378static void tcg_region_trees_init(void)
379{
380 size_t i;
381
382 tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
383 region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
384 for (i = 0; i < region.n; i++) {
385 struct tcg_region_tree *rt = region_trees + i * tree_size;
386
387 qemu_mutex_init(&rt->lock);
388 rt->tree = g_tree_new(tb_tc_cmp);
389 }
390}
391
392static struct tcg_region_tree *tc_ptr_to_region_tree(void *p)
393{
394 size_t region_idx;
395
396 if (p < region.start_aligned) {
397 region_idx = 0;
398 } else {
399 ptrdiff_t offset = p - region.start_aligned;
400
401 if (offset > region.stride * (region.n - 1)) {
402 region_idx = region.n - 1;
403 } else {
404 region_idx = offset / region.stride;
405 }
406 }
407 return region_trees + region_idx * tree_size;
408}
409
410void tcg_tb_insert(TranslationBlock *tb)
411{
412 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
413
414 qemu_mutex_lock(&rt->lock);
415 g_tree_insert(rt->tree, &tb->tc, tb);
416 qemu_mutex_unlock(&rt->lock);
417}
418
419void tcg_tb_remove(TranslationBlock *tb)
420{
421 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
422
423 qemu_mutex_lock(&rt->lock);
424 g_tree_remove(rt->tree, &tb->tc);
425 qemu_mutex_unlock(&rt->lock);
426}
427
428/*
429 * Find the TB 'tb' such that
430 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
431 * Return NULL if not found.
432 */
433TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
434{
435 struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
436 TranslationBlock *tb;
437 struct tb_tc s = { .ptr = (void *)tc_ptr };
438
439 qemu_mutex_lock(&rt->lock);
440 tb = g_tree_lookup(rt->tree, &s);
441 qemu_mutex_unlock(&rt->lock);
442 return tb;
443}
444
445static void tcg_region_tree_lock_all(void)
446{
447 size_t i;
448
449 for (i = 0; i < region.n; i++) {
450 struct tcg_region_tree *rt = region_trees + i * tree_size;
451
452 qemu_mutex_lock(&rt->lock);
453 }
454}
455
456static void tcg_region_tree_unlock_all(void)
457{
458 size_t i;
459
460 for (i = 0; i < region.n; i++) {
461 struct tcg_region_tree *rt = region_trees + i * tree_size;
462
463 qemu_mutex_unlock(&rt->lock);
464 }
465}
466
467void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
468{
469 size_t i;
470
471 tcg_region_tree_lock_all();
472 for (i = 0; i < region.n; i++) {
473 struct tcg_region_tree *rt = region_trees + i * tree_size;
474
475 g_tree_foreach(rt->tree, func, user_data);
476 }
477 tcg_region_tree_unlock_all();
478}
479
480size_t tcg_nb_tbs(void)
481{
482 size_t nb_tbs = 0;
483 size_t i;
484
485 tcg_region_tree_lock_all();
486 for (i = 0; i < region.n; i++) {
487 struct tcg_region_tree *rt = region_trees + i * tree_size;
488
489 nb_tbs += g_tree_nnodes(rt->tree);
490 }
491 tcg_region_tree_unlock_all();
492 return nb_tbs;
493}
494
495static void tcg_region_tree_reset_all(void)
496{
497 size_t i;
498
499 tcg_region_tree_lock_all();
500 for (i = 0; i < region.n; i++) {
501 struct tcg_region_tree *rt = region_trees + i * tree_size;
502
503 /* Increment the refcount first so that destroy acts as a reset */
504 g_tree_ref(rt->tree);
505 g_tree_destroy(rt->tree);
506 }
507 tcg_region_tree_unlock_all();
508}
509
e8feb96f
EC
510static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
511{
512 void *start, *end;
513
514 start = region.start_aligned + curr_region * region.stride;
515 end = start + region.size;
516
517 if (curr_region == 0) {
518 start = region.start;
519 }
520 if (curr_region == region.n - 1) {
521 end = region.end;
522 }
523
524 *pstart = start;
525 *pend = end;
526}
527
528static void tcg_region_assign(TCGContext *s, size_t curr_region)
529{
530 void *start, *end;
531
532 tcg_region_bounds(curr_region, &start, &end);
533
534 s->code_gen_buffer = start;
535 s->code_gen_ptr = start;
536 s->code_gen_buffer_size = end - start;
537 s->code_gen_highwater = end - TCG_HIGHWATER;
538}
539
540static bool tcg_region_alloc__locked(TCGContext *s)
541{
542 if (region.current == region.n) {
543 return true;
544 }
545 tcg_region_assign(s, region.current);
546 region.current++;
547 return false;
548}
549
550/*
551 * Request a new region once the one in use has filled up.
552 * Returns true on error.
553 */
554static bool tcg_region_alloc(TCGContext *s)
555{
556 bool err;
557 /* read the region size now; alloc__locked will overwrite it on success */
558 size_t size_full = s->code_gen_buffer_size;
559
560 qemu_mutex_lock(&region.lock);
561 err = tcg_region_alloc__locked(s);
562 if (!err) {
563 region.agg_size_full += size_full - TCG_HIGHWATER;
564 }
565 qemu_mutex_unlock(&region.lock);
566 return err;
567}
568
569/*
570 * Perform a context's first region allocation.
571 * This function does _not_ increment region.agg_size_full.
572 */
573static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
574{
575 return tcg_region_alloc__locked(s);
576}
577
578/* Call from a safe-work context */
579void tcg_region_reset_all(void)
580{
3468b59e 581 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
582 unsigned int i;
583
584 qemu_mutex_lock(&region.lock);
585 region.current = 0;
586 region.agg_size_full = 0;
587
3468b59e
EC
588 for (i = 0; i < n_ctxs; i++) {
589 TCGContext *s = atomic_read(&tcg_ctxs[i]);
590 bool err = tcg_region_initial_alloc__locked(s);
e8feb96f
EC
591
592 g_assert(!err);
593 }
594 qemu_mutex_unlock(&region.lock);
be2cdc5e
EC
595
596 tcg_region_tree_reset_all();
e8feb96f
EC
597}
598
3468b59e
EC
599#ifdef CONFIG_USER_ONLY
600static size_t tcg_n_regions(void)
601{
602 return 1;
603}
604#else
605/*
606 * It is likely that some vCPUs will translate more code than others, so we
607 * first try to set more regions than max_cpus, with those regions being of
608 * reasonable size. If that's not possible we make do by evenly dividing
609 * the code_gen_buffer among the vCPUs.
610 */
611static size_t tcg_n_regions(void)
612{
613 size_t i;
614
615 /* Use a single region if all we have is one vCPU thread */
616 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
617 return 1;
618 }
619
620 /* Try to have more regions than max_cpus, with each region being >= 2 MB */
621 for (i = 8; i > 0; i--) {
622 size_t regions_per_thread = i;
623 size_t region_size;
624
625 region_size = tcg_init_ctx.code_gen_buffer_size;
626 region_size /= max_cpus * regions_per_thread;
627
628 if (region_size >= 2 * 1024u * 1024) {
629 return max_cpus * regions_per_thread;
630 }
631 }
632 /* If we can't, then just allocate one region per vCPU thread */
633 return max_cpus;
634}
635#endif
636
e8feb96f
EC
637/*
638 * Initializes region partitioning.
639 *
640 * Called at init time from the parent thread (i.e. the one calling
641 * tcg_context_init), after the target's TCG globals have been set.
3468b59e
EC
642 *
643 * Region partitioning works by splitting code_gen_buffer into separate regions,
644 * and then assigning regions to TCG threads so that the threads can translate
645 * code in parallel without synchronization.
646 *
647 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
648 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
649 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
650 * must have been parsed before calling this function, since it calls
651 * qemu_tcg_mttcg_enabled().
652 *
653 * In user-mode we use a single region. Having multiple regions in user-mode
654 * is not supported, because the number of vCPU threads (recall that each thread
655 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
656 * OS, and usually this number is huge (tens of thousands is not uncommon).
657 * Thus, given this large bound on the number of vCPU threads and the fact
658 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
659 * that the availability of at least one region per vCPU thread.
660 *
661 * However, this user-mode limitation is unlikely to be a significant problem
662 * in practice. Multi-threaded guests share most if not all of their translated
663 * code, which makes parallel code generation less appealing than in softmmu.
e8feb96f
EC
664 */
665void tcg_region_init(void)
666{
667 void *buf = tcg_init_ctx.code_gen_buffer;
668 void *aligned;
669 size_t size = tcg_init_ctx.code_gen_buffer_size;
670 size_t page_size = qemu_real_host_page_size;
671 size_t region_size;
672 size_t n_regions;
673 size_t i;
674
3468b59e 675 n_regions = tcg_n_regions();
e8feb96f
EC
676
677 /* The first region will be 'aligned - buf' bytes larger than the others */
678 aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
679 g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
680 /*
681 * Make region_size a multiple of page_size, using aligned as the start.
682 * As a result of this we might end up with a few extra pages at the end of
683 * the buffer; we will assign those to the last region.
684 */
685 region_size = (size - (aligned - buf)) / n_regions;
686 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
687
688 /* A region must have at least 2 pages; one code, one guard */
689 g_assert(region_size >= 2 * page_size);
690
691 /* init the region struct */
692 qemu_mutex_init(&region.lock);
693 region.n = n_regions;
694 region.size = region_size - page_size;
695 region.stride = region_size;
696 region.start = buf;
697 region.start_aligned = aligned;
698 /* page-align the end, since its last page will be a guard page */
699 region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
700 /* account for that last guard page */
701 region.end -= page_size;
702
703 /* set guard pages */
704 for (i = 0; i < region.n; i++) {
705 void *start, *end;
706 int rc;
707
708 tcg_region_bounds(i, &start, &end);
709 rc = qemu_mprotect_none(end, page_size);
710 g_assert(!rc);
711 }
712
be2cdc5e
EC
713 tcg_region_trees_init();
714
3468b59e
EC
715 /* In user-mode we support only one ctx, so do the initial allocation now */
716#ifdef CONFIG_USER_ONLY
e8feb96f
EC
717 {
718 bool err = tcg_region_initial_alloc__locked(tcg_ctx);
719
720 g_assert(!err);
721 }
3468b59e
EC
722#endif
723}
724
725/*
726 * All TCG threads except the parent (i.e. the one that called tcg_context_init
727 * and registered the target's TCG globals) must register with this function
728 * before initiating translation.
729 *
730 * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation
731 * of tcg_region_init() for the reasoning behind this.
732 *
733 * In softmmu each caller registers its context in tcg_ctxs[]. Note that in
734 * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context
735 * is not used anymore for translation once this function is called.
736 *
737 * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates
738 * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode.
739 */
740#ifdef CONFIG_USER_ONLY
741void tcg_register_thread(void)
742{
743 tcg_ctx = &tcg_init_ctx;
744}
745#else
746void tcg_register_thread(void)
747{
748 TCGContext *s = g_malloc(sizeof(*s));
749 unsigned int i, n;
750 bool err;
751
752 *s = tcg_init_ctx;
753
754 /* Relink mem_base. */
755 for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) {
756 if (tcg_init_ctx.temps[i].mem_base) {
757 ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps;
758 tcg_debug_assert(b >= 0 && b < n);
759 s->temps[i].mem_base = &s->temps[b];
760 }
761 }
762
763 /* Claim an entry in tcg_ctxs */
764 n = atomic_fetch_inc(&n_tcg_ctxs);
765 g_assert(n < max_cpus);
766 atomic_set(&tcg_ctxs[n], s);
767
768 tcg_ctx = s;
769 qemu_mutex_lock(&region.lock);
770 err = tcg_region_initial_alloc__locked(tcg_ctx);
771 g_assert(!err);
772 qemu_mutex_unlock(&region.lock);
e8feb96f 773}
3468b59e 774#endif /* !CONFIG_USER_ONLY */
e8feb96f
EC
775
776/*
777 * Returns the size (in bytes) of all translated code (i.e. from all regions)
778 * currently in the cache.
779 * See also: tcg_code_capacity()
780 * Do not confuse with tcg_current_code_size(); that one applies to a single
781 * TCG context.
782 */
783size_t tcg_code_size(void)
784{
3468b59e 785 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
e8feb96f
EC
786 unsigned int i;
787 size_t total;
788
789 qemu_mutex_lock(&region.lock);
790 total = region.agg_size_full;
3468b59e
EC
791 for (i = 0; i < n_ctxs; i++) {
792 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
e8feb96f
EC
793 size_t size;
794
795 size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
796 g_assert(size <= s->code_gen_buffer_size);
797 total += size;
798 }
799 qemu_mutex_unlock(&region.lock);
800 return total;
801}
802
803/*
804 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
805 * regions.
806 * See also: tcg_code_size()
807 */
808size_t tcg_code_capacity(void)
809{
810 size_t guard_size, capacity;
811
812 /* no need for synchronization; these variables are set at init time */
813 guard_size = region.stride - region.size;
814 capacity = region.end + guard_size - region.start;
815 capacity -= region.n * (guard_size + TCG_HIGHWATER);
816 return capacity;
817}
818
128ed227
EC
819size_t tcg_tb_phys_invalidate_count(void)
820{
821 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
822 unsigned int i;
823 size_t total = 0;
824
825 for (i = 0; i < n_ctxs; i++) {
826 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
827
828 total += atomic_read(&s->tb_phys_invalidate_count);
829 }
830 return total;
831}
832
c896fe29
FB
833/* pool based memory allocation */
834void *tcg_malloc_internal(TCGContext *s, int size)
835{
836 TCGPool *p;
837 int pool_size;
838
839 if (size > TCG_POOL_CHUNK_SIZE) {
840 /* big malloc: insert a new pool (XXX: could optimize) */
7267c094 841 p = g_malloc(sizeof(TCGPool) + size);
c896fe29 842 p->size = size;
4055299e
KB
843 p->next = s->pool_first_large;
844 s->pool_first_large = p;
845 return p->data;
c896fe29
FB
846 } else {
847 p = s->pool_current;
848 if (!p) {
849 p = s->pool_first;
850 if (!p)
851 goto new_pool;
852 } else {
853 if (!p->next) {
854 new_pool:
855 pool_size = TCG_POOL_CHUNK_SIZE;
7267c094 856 p = g_malloc(sizeof(TCGPool) + pool_size);
c896fe29
FB
857 p->size = pool_size;
858 p->next = NULL;
859 if (s->pool_current)
860 s->pool_current->next = p;
861 else
862 s->pool_first = p;
863 } else {
864 p = p->next;
865 }
866 }
867 }
868 s->pool_current = p;
869 s->pool_cur = p->data + size;
870 s->pool_end = p->data + p->size;
871 return p->data;
872}
873
874void tcg_pool_reset(TCGContext *s)
875{
4055299e
KB
876 TCGPool *p, *t;
877 for (p = s->pool_first_large; p; p = t) {
878 t = p->next;
879 g_free(p);
880 }
881 s->pool_first_large = NULL;
c896fe29
FB
882 s->pool_cur = s->pool_end = NULL;
883 s->pool_current = NULL;
884}
885
100b5e01
RH
886typedef struct TCGHelperInfo {
887 void *func;
888 const char *name;
afb49896
RH
889 unsigned flags;
890 unsigned sizemask;
100b5e01
RH
891} TCGHelperInfo;
892
2ef6175a
RH
893#include "exec/helper-proto.h"
894
100b5e01 895static const TCGHelperInfo all_helpers[] = {
2ef6175a 896#include "exec/helper-tcg.h"
100b5e01 897};
619205fd 898static GHashTable *helper_table;
100b5e01 899
91478cef 900static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
f69d277e 901static void process_op_defs(TCGContext *s);
1c2adb95
RH
902static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
903 TCGReg reg, const char *name);
91478cef 904
c896fe29
FB
905void tcg_context_init(TCGContext *s)
906{
100b5e01 907 int op, total_args, n, i;
c896fe29
FB
908 TCGOpDef *def;
909 TCGArgConstraint *args_ct;
910 int *sorted_args;
1c2adb95 911 TCGTemp *ts;
c896fe29
FB
912
913 memset(s, 0, sizeof(*s));
c896fe29 914 s->nb_globals = 0;
c70fbf0a 915
c896fe29
FB
916 /* Count total number of arguments and allocate the corresponding
917 space */
918 total_args = 0;
919 for(op = 0; op < NB_OPS; op++) {
920 def = &tcg_op_defs[op];
921 n = def->nb_iargs + def->nb_oargs;
922 total_args += n;
923 }
924
7267c094
AL
925 args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args);
926 sorted_args = g_malloc(sizeof(int) * total_args);
c896fe29
FB
927
928 for(op = 0; op < NB_OPS; op++) {
929 def = &tcg_op_defs[op];
930 def->args_ct = args_ct;
931 def->sorted_args = sorted_args;
932 n = def->nb_iargs + def->nb_oargs;
933 sorted_args += n;
934 args_ct += n;
935 }
5cd8f621
RH
936
937 /* Register helpers. */
84fd9dd3 938 /* Use g_direct_hash/equal for direct pointer comparisons on func. */
619205fd 939 helper_table = g_hash_table_new(NULL, NULL);
84fd9dd3 940
100b5e01 941 for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
84fd9dd3 942 g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
72866e82 943 (gpointer)&all_helpers[i]);
100b5e01 944 }
5cd8f621 945
c896fe29 946 tcg_target_init(s);
f69d277e 947 process_op_defs(s);
91478cef
RH
948
949 /* Reverse the order of the saved registers, assuming they're all at
950 the start of tcg_target_reg_alloc_order. */
951 for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) {
952 int r = tcg_target_reg_alloc_order[n];
953 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) {
954 break;
955 }
956 }
957 for (i = 0; i < n; ++i) {
958 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i];
959 }
960 for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) {
961 indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
962 }
b1311c4a
EC
963
964 tcg_ctx = s;
3468b59e
EC
965 /*
966 * In user-mode we simply share the init context among threads, since we
967 * use a single region. See the documentation tcg_region_init() for the
968 * reasoning behind this.
969 * In softmmu we will have at most max_cpus TCG threads.
970 */
971#ifdef CONFIG_USER_ONLY
df2cce29
EC
972 tcg_ctxs = &tcg_ctx;
973 n_tcg_ctxs = 1;
3468b59e
EC
974#else
975 tcg_ctxs = g_new(TCGContext *, max_cpus);
976#endif
1c2adb95
RH
977
978 tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
979 ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env");
980 cpu_env = temp_tcgv_ptr(ts);
9002ec79 981}
b03cce8e 982
6e3b2bfd
EC
983/*
984 * Allocate TBs right before their corresponding translated code, making
985 * sure that TBs and code are on different cache lines.
986 */
987TranslationBlock *tcg_tb_alloc(TCGContext *s)
988{
989 uintptr_t align = qemu_icache_linesize;
990 TranslationBlock *tb;
991 void *next;
992
e8feb96f 993 retry:
6e3b2bfd
EC
994 tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align);
995 next = (void *)ROUND_UP((uintptr_t)(tb + 1), align);
996
997 if (unlikely(next > s->code_gen_highwater)) {
e8feb96f
EC
998 if (tcg_region_alloc(s)) {
999 return NULL;
1000 }
1001 goto retry;
6e3b2bfd 1002 }
e8feb96f 1003 atomic_set(&s->code_gen_ptr, next);
57a26946 1004 s->data_gen_ptr = NULL;
6e3b2bfd
EC
1005 return tb;
1006}
1007
9002ec79
RH
1008void tcg_prologue_init(TCGContext *s)
1009{
8163b749
RH
1010 size_t prologue_size, total_size;
1011 void *buf0, *buf1;
1012
1013 /* Put the prologue at the beginning of code_gen_buffer. */
1014 buf0 = s->code_gen_buffer;
5b38ee31 1015 total_size = s->code_gen_buffer_size;
8163b749
RH
1016 s->code_ptr = buf0;
1017 s->code_buf = buf0;
5b38ee31 1018 s->data_gen_ptr = NULL;
8163b749
RH
1019 s->code_gen_prologue = buf0;
1020
5b38ee31
RH
1021 /* Compute a high-water mark, at which we voluntarily flush the buffer
1022 and start over. The size here is arbitrary, significantly larger
1023 than we expect the code generation for any one opcode to require. */
1024 s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
1025
1026#ifdef TCG_TARGET_NEED_POOL_LABELS
1027 s->pool_labels = NULL;
1028#endif
1029
8163b749 1030 /* Generate the prologue. */
b03cce8e 1031 tcg_target_qemu_prologue(s);
5b38ee31
RH
1032
1033#ifdef TCG_TARGET_NEED_POOL_LABELS
1034 /* Allow the prologue to put e.g. guest_base into a pool entry. */
1035 {
1768987b
RH
1036 int result = tcg_out_pool_finalize(s);
1037 tcg_debug_assert(result == 0);
5b38ee31
RH
1038 }
1039#endif
1040
8163b749
RH
1041 buf1 = s->code_ptr;
1042 flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1);
1043
1044 /* Deduct the prologue from the buffer. */
1045 prologue_size = tcg_current_code_size(s);
1046 s->code_gen_ptr = buf1;
1047 s->code_gen_buffer = buf1;
1048 s->code_buf = buf1;
5b38ee31 1049 total_size -= prologue_size;
8163b749
RH
1050 s->code_gen_buffer_size = total_size;
1051
8163b749 1052 tcg_register_jit(s->code_gen_buffer, total_size);
d6b64b2b
RH
1053
1054#ifdef DEBUG_DISAS
1055 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1ee73216 1056 qemu_log_lock();
8163b749 1057 qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
5b38ee31
RH
1058 if (s->data_gen_ptr) {
1059 size_t code_size = s->data_gen_ptr - buf0;
1060 size_t data_size = prologue_size - code_size;
1061 size_t i;
1062
1063 log_disas(buf0, code_size);
1064
1065 for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
1066 if (sizeof(tcg_target_ulong) == 8) {
1067 qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n",
1068 (uintptr_t)s->data_gen_ptr + i,
1069 *(uint64_t *)(s->data_gen_ptr + i));
1070 } else {
1071 qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n",
1072 (uintptr_t)s->data_gen_ptr + i,
1073 *(uint32_t *)(s->data_gen_ptr + i));
1074 }
1075 }
1076 } else {
1077 log_disas(buf0, prologue_size);
1078 }
d6b64b2b
RH
1079 qemu_log("\n");
1080 qemu_log_flush();
1ee73216 1081 qemu_log_unlock();
d6b64b2b
RH
1082 }
1083#endif
cedbcb01
EC
1084
1085 /* Assert that goto_ptr is implemented completely. */
1086 if (TCG_TARGET_HAS_goto_ptr) {
1087 tcg_debug_assert(s->code_gen_epilogue != NULL);
1088 }
c896fe29
FB
1089}
1090
c896fe29
FB
1091void tcg_func_start(TCGContext *s)
1092{
1093 tcg_pool_reset(s);
1094 s->nb_temps = s->nb_globals;
0ec9eabc
RH
1095
1096 /* No temps have been previously allocated for size or locality. */
1097 memset(s->free_temps, 0, sizeof(s->free_temps));
1098
abebf925 1099 s->nb_ops = 0;
c896fe29
FB
1100 s->nb_labels = 0;
1101 s->current_frame_offset = s->frame_start;
1102
0a209d4b
RH
1103#ifdef CONFIG_DEBUG_TCG
1104 s->goto_tb_issue_mask = 0;
1105#endif
1106
15fa08f8
RH
1107 QTAILQ_INIT(&s->ops);
1108 QTAILQ_INIT(&s->free_ops);
bef16ab4 1109 QSIMPLEQ_INIT(&s->labels);
c896fe29
FB
1110}
1111
7ca4b752
RH
1112static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
1113{
1114 int n = s->nb_temps++;
1115 tcg_debug_assert(n < TCG_MAX_TEMPS);
1116 return memset(&s->temps[n], 0, sizeof(TCGTemp));
1117}
1118
1119static inline TCGTemp *tcg_global_alloc(TCGContext *s)
1120{
fa477d25
RH
1121 TCGTemp *ts;
1122
7ca4b752
RH
1123 tcg_debug_assert(s->nb_globals == s->nb_temps);
1124 s->nb_globals++;
fa477d25
RH
1125 ts = tcg_temp_alloc(s);
1126 ts->temp_global = 1;
1127
1128 return ts;
c896fe29
FB
1129}
1130
085272b3
RH
1131static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
1132 TCGReg reg, const char *name)
c896fe29 1133{
c896fe29 1134 TCGTemp *ts;
c896fe29 1135
b3a62939 1136 if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
c896fe29 1137 tcg_abort();
b3a62939 1138 }
7ca4b752
RH
1139
1140 ts = tcg_global_alloc(s);
c896fe29
FB
1141 ts->base_type = type;
1142 ts->type = type;
1143 ts->fixed_reg = 1;
1144 ts->reg = reg;
c896fe29 1145 ts->name = name;
c896fe29 1146 tcg_regset_set_reg(s->reserved_regs, reg);
7ca4b752 1147
085272b3 1148 return ts;
a7812ae4
PB
1149}
1150
b6638662 1151void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
b3a62939 1152{
b3a62939
RH
1153 s->frame_start = start;
1154 s->frame_end = start + size;
085272b3
RH
1155 s->frame_temp
1156 = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
b3a62939
RH
1157}
1158
085272b3
RH
1159TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
1160 intptr_t offset, const char *name)
c896fe29 1161{
b1311c4a 1162 TCGContext *s = tcg_ctx;
dc41aa7d 1163 TCGTemp *base_ts = tcgv_ptr_temp(base);
7ca4b752 1164 TCGTemp *ts = tcg_global_alloc(s);
b3915dbb 1165 int indirect_reg = 0, bigendian = 0;
7ca4b752
RH
1166#ifdef HOST_WORDS_BIGENDIAN
1167 bigendian = 1;
1168#endif
c896fe29 1169
b3915dbb 1170 if (!base_ts->fixed_reg) {
5a18407f
RH
1171 /* We do not support double-indirect registers. */
1172 tcg_debug_assert(!base_ts->indirect_reg);
b3915dbb 1173 base_ts->indirect_base = 1;
5a18407f
RH
1174 s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64
1175 ? 2 : 1);
1176 indirect_reg = 1;
b3915dbb
RH
1177 }
1178
7ca4b752
RH
1179 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1180 TCGTemp *ts2 = tcg_global_alloc(s);
c896fe29 1181 char buf[64];
7ca4b752
RH
1182
1183 ts->base_type = TCG_TYPE_I64;
c896fe29 1184 ts->type = TCG_TYPE_I32;
b3915dbb 1185 ts->indirect_reg = indirect_reg;
c896fe29 1186 ts->mem_allocated = 1;
b3a62939 1187 ts->mem_base = base_ts;
7ca4b752 1188 ts->mem_offset = offset + bigendian * 4;
c896fe29
FB
1189 pstrcpy(buf, sizeof(buf), name);
1190 pstrcat(buf, sizeof(buf), "_0");
1191 ts->name = strdup(buf);
c896fe29 1192
7ca4b752
RH
1193 tcg_debug_assert(ts2 == ts + 1);
1194 ts2->base_type = TCG_TYPE_I64;
1195 ts2->type = TCG_TYPE_I32;
b3915dbb 1196 ts2->indirect_reg = indirect_reg;
7ca4b752
RH
1197 ts2->mem_allocated = 1;
1198 ts2->mem_base = base_ts;
1199 ts2->mem_offset = offset + (1 - bigendian) * 4;
c896fe29
FB
1200 pstrcpy(buf, sizeof(buf), name);
1201 pstrcat(buf, sizeof(buf), "_1");
120c1084 1202 ts2->name = strdup(buf);
7ca4b752 1203 } else {
c896fe29
FB
1204 ts->base_type = type;
1205 ts->type = type;
b3915dbb 1206 ts->indirect_reg = indirect_reg;
c896fe29 1207 ts->mem_allocated = 1;
b3a62939 1208 ts->mem_base = base_ts;
c896fe29 1209 ts->mem_offset = offset;
c896fe29 1210 ts->name = name;
c896fe29 1211 }
085272b3 1212 return ts;
a7812ae4
PB
1213}
1214
5bfa8034 1215TCGTemp *tcg_temp_new_internal(TCGType type, bool temp_local)
c896fe29 1216{
b1311c4a 1217 TCGContext *s = tcg_ctx;
c896fe29 1218 TCGTemp *ts;
641d5fbe 1219 int idx, k;
c896fe29 1220
0ec9eabc
RH
1221 k = type + (temp_local ? TCG_TYPE_COUNT : 0);
1222 idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS);
1223 if (idx < TCG_MAX_TEMPS) {
1224 /* There is already an available temp with the right type. */
1225 clear_bit(idx, s->free_temps[k].l);
1226
e8996ee0 1227 ts = &s->temps[idx];
e8996ee0 1228 ts->temp_allocated = 1;
7ca4b752
RH
1229 tcg_debug_assert(ts->base_type == type);
1230 tcg_debug_assert(ts->temp_local == temp_local);
e8996ee0 1231 } else {
7ca4b752
RH
1232 ts = tcg_temp_alloc(s);
1233 if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
1234 TCGTemp *ts2 = tcg_temp_alloc(s);
1235
f6aa2f7d 1236 ts->base_type = type;
e8996ee0
FB
1237 ts->type = TCG_TYPE_I32;
1238 ts->temp_allocated = 1;
641d5fbe 1239 ts->temp_local = temp_local;
7ca4b752
RH
1240
1241 tcg_debug_assert(ts2 == ts + 1);
1242 ts2->base_type = TCG_TYPE_I64;
1243 ts2->type = TCG_TYPE_I32;
1244 ts2->temp_allocated = 1;
1245 ts2->temp_local = temp_local;
1246 } else {
e8996ee0
FB
1247 ts->base_type = type;
1248 ts->type = type;
1249 ts->temp_allocated = 1;
641d5fbe 1250 ts->temp_local = temp_local;
e8996ee0 1251 }
c896fe29 1252 }
27bfd83c
PM
1253
1254#if defined(CONFIG_DEBUG_TCG)
1255 s->temps_in_use++;
1256#endif
085272b3 1257 return ts;
c896fe29
FB
1258}
1259
d2fd745f
RH
1260TCGv_vec tcg_temp_new_vec(TCGType type)
1261{
1262 TCGTemp *t;
1263
1264#ifdef CONFIG_DEBUG_TCG
1265 switch (type) {
1266 case TCG_TYPE_V64:
1267 assert(TCG_TARGET_HAS_v64);
1268 break;
1269 case TCG_TYPE_V128:
1270 assert(TCG_TARGET_HAS_v128);
1271 break;
1272 case TCG_TYPE_V256:
1273 assert(TCG_TARGET_HAS_v256);
1274 break;
1275 default:
1276 g_assert_not_reached();
1277 }
1278#endif
1279
1280 t = tcg_temp_new_internal(type, 0);
1281 return temp_tcgv_vec(t);
1282}
1283
1284/* Create a new temp of the same type as an existing temp. */
1285TCGv_vec tcg_temp_new_vec_matching(TCGv_vec match)
1286{
1287 TCGTemp *t = tcgv_vec_temp(match);
1288
1289 tcg_debug_assert(t->temp_allocated != 0);
1290
1291 t = tcg_temp_new_internal(t->base_type, 0);
1292 return temp_tcgv_vec(t);
1293}
1294
5bfa8034 1295void tcg_temp_free_internal(TCGTemp *ts)
c896fe29 1296{
b1311c4a 1297 TCGContext *s = tcg_ctx;
085272b3 1298 int k, idx;
c896fe29 1299
27bfd83c
PM
1300#if defined(CONFIG_DEBUG_TCG)
1301 s->temps_in_use--;
1302 if (s->temps_in_use < 0) {
1303 fprintf(stderr, "More temporaries freed than allocated!\n");
1304 }
1305#endif
1306
085272b3 1307 tcg_debug_assert(ts->temp_global == 0);
eabb7b91 1308 tcg_debug_assert(ts->temp_allocated != 0);
e8996ee0 1309 ts->temp_allocated = 0;
0ec9eabc 1310
085272b3 1311 idx = temp_idx(ts);
18d13fa2 1312 k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0);
0ec9eabc 1313 set_bit(idx, s->free_temps[k].l);
c896fe29
FB
1314}
1315
a7812ae4 1316TCGv_i32 tcg_const_i32(int32_t val)
c896fe29 1317{
a7812ae4
PB
1318 TCGv_i32 t0;
1319 t0 = tcg_temp_new_i32();
e8996ee0
FB
1320 tcg_gen_movi_i32(t0, val);
1321 return t0;
1322}
c896fe29 1323
a7812ae4 1324TCGv_i64 tcg_const_i64(int64_t val)
e8996ee0 1325{
a7812ae4
PB
1326 TCGv_i64 t0;
1327 t0 = tcg_temp_new_i64();
e8996ee0
FB
1328 tcg_gen_movi_i64(t0, val);
1329 return t0;
c896fe29
FB
1330}
1331
a7812ae4 1332TCGv_i32 tcg_const_local_i32(int32_t val)
bdffd4a9 1333{
a7812ae4
PB
1334 TCGv_i32 t0;
1335 t0 = tcg_temp_local_new_i32();
bdffd4a9
AJ
1336 tcg_gen_movi_i32(t0, val);
1337 return t0;
1338}
1339
a7812ae4 1340TCGv_i64 tcg_const_local_i64(int64_t val)
bdffd4a9 1341{
a7812ae4
PB
1342 TCGv_i64 t0;
1343 t0 = tcg_temp_local_new_i64();
bdffd4a9
AJ
1344 tcg_gen_movi_i64(t0, val);
1345 return t0;
1346}
1347
27bfd83c
PM
1348#if defined(CONFIG_DEBUG_TCG)
1349void tcg_clear_temp_count(void)
1350{
b1311c4a 1351 TCGContext *s = tcg_ctx;
27bfd83c
PM
1352 s->temps_in_use = 0;
1353}
1354
1355int tcg_check_temp_count(void)
1356{
b1311c4a 1357 TCGContext *s = tcg_ctx;
27bfd83c
PM
1358 if (s->temps_in_use) {
1359 /* Clear the count so that we don't give another
1360 * warning immediately next time around.
1361 */
1362 s->temps_in_use = 0;
1363 return 1;
1364 }
1365 return 0;
1366}
1367#endif
1368
be0f34b5
RH
1369/* Return true if OP may appear in the opcode stream.
1370 Test the runtime variable that controls each opcode. */
1371bool tcg_op_supported(TCGOpcode op)
1372{
d2fd745f
RH
1373 const bool have_vec
1374 = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
1375
be0f34b5
RH
1376 switch (op) {
1377 case INDEX_op_discard:
1378 case INDEX_op_set_label:
1379 case INDEX_op_call:
1380 case INDEX_op_br:
1381 case INDEX_op_mb:
1382 case INDEX_op_insn_start:
1383 case INDEX_op_exit_tb:
1384 case INDEX_op_goto_tb:
1385 case INDEX_op_qemu_ld_i32:
1386 case INDEX_op_qemu_st_i32:
1387 case INDEX_op_qemu_ld_i64:
1388 case INDEX_op_qemu_st_i64:
1389 return true;
1390
1391 case INDEX_op_goto_ptr:
1392 return TCG_TARGET_HAS_goto_ptr;
1393
1394 case INDEX_op_mov_i32:
1395 case INDEX_op_movi_i32:
1396 case INDEX_op_setcond_i32:
1397 case INDEX_op_brcond_i32:
1398 case INDEX_op_ld8u_i32:
1399 case INDEX_op_ld8s_i32:
1400 case INDEX_op_ld16u_i32:
1401 case INDEX_op_ld16s_i32:
1402 case INDEX_op_ld_i32:
1403 case INDEX_op_st8_i32:
1404 case INDEX_op_st16_i32:
1405 case INDEX_op_st_i32:
1406 case INDEX_op_add_i32:
1407 case INDEX_op_sub_i32:
1408 case INDEX_op_mul_i32:
1409 case INDEX_op_and_i32:
1410 case INDEX_op_or_i32:
1411 case INDEX_op_xor_i32:
1412 case INDEX_op_shl_i32:
1413 case INDEX_op_shr_i32:
1414 case INDEX_op_sar_i32:
1415 return true;
1416
1417 case INDEX_op_movcond_i32:
1418 return TCG_TARGET_HAS_movcond_i32;
1419 case INDEX_op_div_i32:
1420 case INDEX_op_divu_i32:
1421 return TCG_TARGET_HAS_div_i32;
1422 case INDEX_op_rem_i32:
1423 case INDEX_op_remu_i32:
1424 return TCG_TARGET_HAS_rem_i32;
1425 case INDEX_op_div2_i32:
1426 case INDEX_op_divu2_i32:
1427 return TCG_TARGET_HAS_div2_i32;
1428 case INDEX_op_rotl_i32:
1429 case INDEX_op_rotr_i32:
1430 return TCG_TARGET_HAS_rot_i32;
1431 case INDEX_op_deposit_i32:
1432 return TCG_TARGET_HAS_deposit_i32;
1433 case INDEX_op_extract_i32:
1434 return TCG_TARGET_HAS_extract_i32;
1435 case INDEX_op_sextract_i32:
1436 return TCG_TARGET_HAS_sextract_i32;
fce1296f
RH
1437 case INDEX_op_extract2_i32:
1438 return TCG_TARGET_HAS_extract2_i32;
be0f34b5
RH
1439 case INDEX_op_add2_i32:
1440 return TCG_TARGET_HAS_add2_i32;
1441 case INDEX_op_sub2_i32:
1442 return TCG_TARGET_HAS_sub2_i32;
1443 case INDEX_op_mulu2_i32:
1444 return TCG_TARGET_HAS_mulu2_i32;
1445 case INDEX_op_muls2_i32:
1446 return TCG_TARGET_HAS_muls2_i32;
1447 case INDEX_op_muluh_i32:
1448 return TCG_TARGET_HAS_muluh_i32;
1449 case INDEX_op_mulsh_i32:
1450 return TCG_TARGET_HAS_mulsh_i32;
1451 case INDEX_op_ext8s_i32:
1452 return TCG_TARGET_HAS_ext8s_i32;
1453 case INDEX_op_ext16s_i32:
1454 return TCG_TARGET_HAS_ext16s_i32;
1455 case INDEX_op_ext8u_i32:
1456 return TCG_TARGET_HAS_ext8u_i32;
1457 case INDEX_op_ext16u_i32:
1458 return TCG_TARGET_HAS_ext16u_i32;
1459 case INDEX_op_bswap16_i32:
1460 return TCG_TARGET_HAS_bswap16_i32;
1461 case INDEX_op_bswap32_i32:
1462 return TCG_TARGET_HAS_bswap32_i32;
1463 case INDEX_op_not_i32:
1464 return TCG_TARGET_HAS_not_i32;
1465 case INDEX_op_neg_i32:
1466 return TCG_TARGET_HAS_neg_i32;
1467 case INDEX_op_andc_i32:
1468 return TCG_TARGET_HAS_andc_i32;
1469 case INDEX_op_orc_i32:
1470 return TCG_TARGET_HAS_orc_i32;
1471 case INDEX_op_eqv_i32:
1472 return TCG_TARGET_HAS_eqv_i32;
1473 case INDEX_op_nand_i32:
1474 return TCG_TARGET_HAS_nand_i32;
1475 case INDEX_op_nor_i32:
1476 return TCG_TARGET_HAS_nor_i32;
1477 case INDEX_op_clz_i32:
1478 return TCG_TARGET_HAS_clz_i32;
1479 case INDEX_op_ctz_i32:
1480 return TCG_TARGET_HAS_ctz_i32;
1481 case INDEX_op_ctpop_i32:
1482 return TCG_TARGET_HAS_ctpop_i32;
1483
1484 case INDEX_op_brcond2_i32:
1485 case INDEX_op_setcond2_i32:
1486 return TCG_TARGET_REG_BITS == 32;
1487
1488 case INDEX_op_mov_i64:
1489 case INDEX_op_movi_i64:
1490 case INDEX_op_setcond_i64:
1491 case INDEX_op_brcond_i64:
1492 case INDEX_op_ld8u_i64:
1493 case INDEX_op_ld8s_i64:
1494 case INDEX_op_ld16u_i64:
1495 case INDEX_op_ld16s_i64:
1496 case INDEX_op_ld32u_i64:
1497 case INDEX_op_ld32s_i64:
1498 case INDEX_op_ld_i64:
1499 case INDEX_op_st8_i64:
1500 case INDEX_op_st16_i64:
1501 case INDEX_op_st32_i64:
1502 case INDEX_op_st_i64:
1503 case INDEX_op_add_i64:
1504 case INDEX_op_sub_i64:
1505 case INDEX_op_mul_i64:
1506 case INDEX_op_and_i64:
1507 case INDEX_op_or_i64:
1508 case INDEX_op_xor_i64:
1509 case INDEX_op_shl_i64:
1510 case INDEX_op_shr_i64:
1511 case INDEX_op_sar_i64:
1512 case INDEX_op_ext_i32_i64:
1513 case INDEX_op_extu_i32_i64:
1514 return TCG_TARGET_REG_BITS == 64;
1515
1516 case INDEX_op_movcond_i64:
1517 return TCG_TARGET_HAS_movcond_i64;
1518 case INDEX_op_div_i64:
1519 case INDEX_op_divu_i64:
1520 return TCG_TARGET_HAS_div_i64;
1521 case INDEX_op_rem_i64:
1522 case INDEX_op_remu_i64:
1523 return TCG_TARGET_HAS_rem_i64;
1524 case INDEX_op_div2_i64:
1525 case INDEX_op_divu2_i64:
1526 return TCG_TARGET_HAS_div2_i64;
1527 case INDEX_op_rotl_i64:
1528 case INDEX_op_rotr_i64:
1529 return TCG_TARGET_HAS_rot_i64;
1530 case INDEX_op_deposit_i64:
1531 return TCG_TARGET_HAS_deposit_i64;
1532 case INDEX_op_extract_i64:
1533 return TCG_TARGET_HAS_extract_i64;
1534 case INDEX_op_sextract_i64:
1535 return TCG_TARGET_HAS_sextract_i64;
fce1296f
RH
1536 case INDEX_op_extract2_i64:
1537 return TCG_TARGET_HAS_extract2_i64;
be0f34b5
RH
1538 case INDEX_op_extrl_i64_i32:
1539 return TCG_TARGET_HAS_extrl_i64_i32;
1540 case INDEX_op_extrh_i64_i32:
1541 return TCG_TARGET_HAS_extrh_i64_i32;
1542 case INDEX_op_ext8s_i64:
1543 return TCG_TARGET_HAS_ext8s_i64;
1544 case INDEX_op_ext16s_i64:
1545 return TCG_TARGET_HAS_ext16s_i64;
1546 case INDEX_op_ext32s_i64:
1547 return TCG_TARGET_HAS_ext32s_i64;
1548 case INDEX_op_ext8u_i64:
1549 return TCG_TARGET_HAS_ext8u_i64;
1550 case INDEX_op_ext16u_i64:
1551 return TCG_TARGET_HAS_ext16u_i64;
1552 case INDEX_op_ext32u_i64:
1553 return TCG_TARGET_HAS_ext32u_i64;
1554 case INDEX_op_bswap16_i64:
1555 return TCG_TARGET_HAS_bswap16_i64;
1556 case INDEX_op_bswap32_i64:
1557 return TCG_TARGET_HAS_bswap32_i64;
1558 case INDEX_op_bswap64_i64:
1559 return TCG_TARGET_HAS_bswap64_i64;
1560 case INDEX_op_not_i64:
1561 return TCG_TARGET_HAS_not_i64;
1562 case INDEX_op_neg_i64:
1563 return TCG_TARGET_HAS_neg_i64;
1564 case INDEX_op_andc_i64:
1565 return TCG_TARGET_HAS_andc_i64;
1566 case INDEX_op_orc_i64:
1567 return TCG_TARGET_HAS_orc_i64;
1568 case INDEX_op_eqv_i64:
1569 return TCG_TARGET_HAS_eqv_i64;
1570 case INDEX_op_nand_i64:
1571 return TCG_TARGET_HAS_nand_i64;
1572 case INDEX_op_nor_i64:
1573 return TCG_TARGET_HAS_nor_i64;
1574 case INDEX_op_clz_i64:
1575 return TCG_TARGET_HAS_clz_i64;
1576 case INDEX_op_ctz_i64:
1577 return TCG_TARGET_HAS_ctz_i64;
1578 case INDEX_op_ctpop_i64:
1579 return TCG_TARGET_HAS_ctpop_i64;
1580 case INDEX_op_add2_i64:
1581 return TCG_TARGET_HAS_add2_i64;
1582 case INDEX_op_sub2_i64:
1583 return TCG_TARGET_HAS_sub2_i64;
1584 case INDEX_op_mulu2_i64:
1585 return TCG_TARGET_HAS_mulu2_i64;
1586 case INDEX_op_muls2_i64:
1587 return TCG_TARGET_HAS_muls2_i64;
1588 case INDEX_op_muluh_i64:
1589 return TCG_TARGET_HAS_muluh_i64;
1590 case INDEX_op_mulsh_i64:
1591 return TCG_TARGET_HAS_mulsh_i64;
1592
d2fd745f
RH
1593 case INDEX_op_mov_vec:
1594 case INDEX_op_dup_vec:
1595 case INDEX_op_dupi_vec:
1596 case INDEX_op_ld_vec:
1597 case INDEX_op_st_vec:
1598 case INDEX_op_add_vec:
1599 case INDEX_op_sub_vec:
1600 case INDEX_op_and_vec:
1601 case INDEX_op_or_vec:
1602 case INDEX_op_xor_vec:
212be173 1603 case INDEX_op_cmp_vec:
d2fd745f
RH
1604 return have_vec;
1605 case INDEX_op_dup2_vec:
1606 return have_vec && TCG_TARGET_REG_BITS == 32;
1607 case INDEX_op_not_vec:
1608 return have_vec && TCG_TARGET_HAS_not_vec;
1609 case INDEX_op_neg_vec:
1610 return have_vec && TCG_TARGET_HAS_neg_vec;
1611 case INDEX_op_andc_vec:
1612 return have_vec && TCG_TARGET_HAS_andc_vec;
1613 case INDEX_op_orc_vec:
1614 return have_vec && TCG_TARGET_HAS_orc_vec;
3774030a
RH
1615 case INDEX_op_mul_vec:
1616 return have_vec && TCG_TARGET_HAS_mul_vec;
d0ec9796
RH
1617 case INDEX_op_shli_vec:
1618 case INDEX_op_shri_vec:
1619 case INDEX_op_sari_vec:
1620 return have_vec && TCG_TARGET_HAS_shi_vec;
1621 case INDEX_op_shls_vec:
1622 case INDEX_op_shrs_vec:
1623 case INDEX_op_sars_vec:
1624 return have_vec && TCG_TARGET_HAS_shs_vec;
1625 case INDEX_op_shlv_vec:
1626 case INDEX_op_shrv_vec:
1627 case INDEX_op_sarv_vec:
1628 return have_vec && TCG_TARGET_HAS_shv_vec;
8afaf050
RH
1629 case INDEX_op_ssadd_vec:
1630 case INDEX_op_usadd_vec:
1631 case INDEX_op_sssub_vec:
1632 case INDEX_op_ussub_vec:
1633 return have_vec && TCG_TARGET_HAS_sat_vec;
dd0a0fcd
RH
1634 case INDEX_op_smin_vec:
1635 case INDEX_op_umin_vec:
1636 case INDEX_op_smax_vec:
1637 case INDEX_op_umax_vec:
1638 return have_vec && TCG_TARGET_HAS_minmax_vec;
d2fd745f 1639
db432672
RH
1640 default:
1641 tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
1642 return true;
be0f34b5 1643 }
be0f34b5
RH
1644}
1645
39cf05d3
FB
1646/* Note: we convert the 64 bit args to 32 bit and do some alignment
1647 and endian swap. Maybe it would be better to do the alignment
1648 and endian swap in tcg_reg_alloc_call(). */
ae8b75dc 1649void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
c896fe29 1650{
75e8b9b7 1651 int i, real_args, nb_rets, pi;
bbb8a1b4 1652 unsigned sizemask, flags;
afb49896 1653 TCGHelperInfo *info;
75e8b9b7 1654 TCGOp *op;
afb49896 1655
619205fd 1656 info = g_hash_table_lookup(helper_table, (gpointer)func);
bbb8a1b4
RH
1657 flags = info->flags;
1658 sizemask = info->sizemask;
2bece2c8 1659
34b1a49c
RH
1660#if defined(__sparc__) && !defined(__arch64__) \
1661 && !defined(CONFIG_TCG_INTERPRETER)
1662 /* We have 64-bit values in one register, but need to pass as two
1663 separate parameters. Split them. */
1664 int orig_sizemask = sizemask;
1665 int orig_nargs = nargs;
1666 TCGv_i64 retl, reth;
ae8b75dc 1667 TCGTemp *split_args[MAX_OPC_PARAM];
34b1a49c 1668
f764718d
RH
1669 retl = NULL;
1670 reth = NULL;
34b1a49c 1671 if (sizemask != 0) {
34b1a49c
RH
1672 for (i = real_args = 0; i < nargs; ++i) {
1673 int is_64bit = sizemask & (1 << (i+1)*2);
1674 if (is_64bit) {
085272b3 1675 TCGv_i64 orig = temp_tcgv_i64(args[i]);
34b1a49c
RH
1676 TCGv_i32 h = tcg_temp_new_i32();
1677 TCGv_i32 l = tcg_temp_new_i32();
1678 tcg_gen_extr_i64_i32(l, h, orig);
ae8b75dc
RH
1679 split_args[real_args++] = tcgv_i32_temp(h);
1680 split_args[real_args++] = tcgv_i32_temp(l);
34b1a49c
RH
1681 } else {
1682 split_args[real_args++] = args[i];
1683 }
1684 }
1685 nargs = real_args;
1686 args = split_args;
1687 sizemask = 0;
1688 }
1689#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1690 for (i = 0; i < nargs; ++i) {
1691 int is_64bit = sizemask & (1 << (i+1)*2);
1692 int is_signed = sizemask & (2 << (i+1)*2);
1693 if (!is_64bit) {
1694 TCGv_i64 temp = tcg_temp_new_i64();
085272b3 1695 TCGv_i64 orig = temp_tcgv_i64(args[i]);
2bece2c8
RH
1696 if (is_signed) {
1697 tcg_gen_ext32s_i64(temp, orig);
1698 } else {
1699 tcg_gen_ext32u_i64(temp, orig);
1700 }
ae8b75dc 1701 args[i] = tcgv_i64_temp(temp);
2bece2c8
RH
1702 }
1703 }
1704#endif /* TCG_TARGET_EXTEND_ARGS */
1705
15fa08f8 1706 op = tcg_emit_op(INDEX_op_call);
75e8b9b7
RH
1707
1708 pi = 0;
ae8b75dc 1709 if (ret != NULL) {
34b1a49c
RH
1710#if defined(__sparc__) && !defined(__arch64__) \
1711 && !defined(CONFIG_TCG_INTERPRETER)
1712 if (orig_sizemask & 1) {
1713 /* The 32-bit ABI is going to return the 64-bit value in
1714 the %o0/%o1 register pair. Prepare for this by using
1715 two return temporaries, and reassemble below. */
1716 retl = tcg_temp_new_i64();
1717 reth = tcg_temp_new_i64();
ae8b75dc
RH
1718 op->args[pi++] = tcgv_i64_arg(reth);
1719 op->args[pi++] = tcgv_i64_arg(retl);
34b1a49c
RH
1720 nb_rets = 2;
1721 } else {
ae8b75dc 1722 op->args[pi++] = temp_arg(ret);
34b1a49c
RH
1723 nb_rets = 1;
1724 }
1725#else
1726 if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) {
02eb19d0 1727#ifdef HOST_WORDS_BIGENDIAN
ae8b75dc
RH
1728 op->args[pi++] = temp_arg(ret + 1);
1729 op->args[pi++] = temp_arg(ret);
39cf05d3 1730#else
ae8b75dc
RH
1731 op->args[pi++] = temp_arg(ret);
1732 op->args[pi++] = temp_arg(ret + 1);
39cf05d3 1733#endif
a7812ae4 1734 nb_rets = 2;
34b1a49c 1735 } else {
ae8b75dc 1736 op->args[pi++] = temp_arg(ret);
a7812ae4 1737 nb_rets = 1;
c896fe29 1738 }
34b1a49c 1739#endif
a7812ae4
PB
1740 } else {
1741 nb_rets = 0;
c896fe29 1742 }
cd9090aa 1743 TCGOP_CALLO(op) = nb_rets;
75e8b9b7 1744
a7812ae4
PB
1745 real_args = 0;
1746 for (i = 0; i < nargs; i++) {
2bece2c8 1747 int is_64bit = sizemask & (1 << (i+1)*2);
bbb8a1b4 1748 if (TCG_TARGET_REG_BITS < 64 && is_64bit) {
39cf05d3
FB
1749#ifdef TCG_TARGET_CALL_ALIGN_ARGS
1750 /* some targets want aligned 64 bit args */
ebd486d5 1751 if (real_args & 1) {
75e8b9b7 1752 op->args[pi++] = TCG_CALL_DUMMY_ARG;
ebd486d5 1753 real_args++;
39cf05d3
FB
1754 }
1755#endif
c70fbf0a
RH
1756 /* If stack grows up, then we will be placing successive
1757 arguments at lower addresses, which means we need to
1758 reverse the order compared to how we would normally
1759 treat either big or little-endian. For those arguments
1760 that will wind up in registers, this still works for
1761 HPPA (the only current STACK_GROWSUP target) since the
1762 argument registers are *also* allocated in decreasing
1763 order. If another such target is added, this logic may
1764 have to get more complicated to differentiate between
1765 stack arguments and register arguments. */
02eb19d0 1766#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
ae8b75dc
RH
1767 op->args[pi++] = temp_arg(args[i] + 1);
1768 op->args[pi++] = temp_arg(args[i]);
c896fe29 1769#else
ae8b75dc
RH
1770 op->args[pi++] = temp_arg(args[i]);
1771 op->args[pi++] = temp_arg(args[i] + 1);
c896fe29 1772#endif
a7812ae4 1773 real_args += 2;
2bece2c8 1774 continue;
c896fe29 1775 }
2bece2c8 1776
ae8b75dc 1777 op->args[pi++] = temp_arg(args[i]);
2bece2c8 1778 real_args++;
c896fe29 1779 }
75e8b9b7
RH
1780 op->args[pi++] = (uintptr_t)func;
1781 op->args[pi++] = flags;
cd9090aa 1782 TCGOP_CALLI(op) = real_args;
a7812ae4 1783
75e8b9b7 1784 /* Make sure the fields didn't overflow. */
cd9090aa 1785 tcg_debug_assert(TCGOP_CALLI(op) == real_args);
75e8b9b7 1786 tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
2bece2c8 1787
34b1a49c
RH
1788#if defined(__sparc__) && !defined(__arch64__) \
1789 && !defined(CONFIG_TCG_INTERPRETER)
1790 /* Free all of the parts we allocated above. */
1791 for (i = real_args = 0; i < orig_nargs; ++i) {
1792 int is_64bit = orig_sizemask & (1 << (i+1)*2);
1793 if (is_64bit) {
085272b3
RH
1794 tcg_temp_free_internal(args[real_args++]);
1795 tcg_temp_free_internal(args[real_args++]);
34b1a49c
RH
1796 } else {
1797 real_args++;
1798 }
1799 }
1800 if (orig_sizemask & 1) {
1801 /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them.
1802 Note that describing these as TCGv_i64 eliminates an unnecessary
1803 zero-extension that tcg_gen_concat_i32_i64 would create. */
085272b3 1804 tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth);
34b1a49c
RH
1805 tcg_temp_free_i64(retl);
1806 tcg_temp_free_i64(reth);
1807 }
1808#elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64
2bece2c8
RH
1809 for (i = 0; i < nargs; ++i) {
1810 int is_64bit = sizemask & (1 << (i+1)*2);
1811 if (!is_64bit) {
085272b3 1812 tcg_temp_free_internal(args[i]);
2bece2c8
RH
1813 }
1814 }
1815#endif /* TCG_TARGET_EXTEND_ARGS */
c896fe29 1816}
c896fe29 1817
8fcd3692 1818static void tcg_reg_alloc_start(TCGContext *s)
c896fe29 1819{
ac3b8891 1820 int i, n;
c896fe29 1821 TCGTemp *ts;
ac3b8891
RH
1822
1823 for (i = 0, n = s->nb_globals; i < n; i++) {
c896fe29 1824 ts = &s->temps[i];
ac3b8891 1825 ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM);
c896fe29 1826 }
ac3b8891 1827 for (n = s->nb_temps; i < n; i++) {
e8996ee0 1828 ts = &s->temps[i];
ac3b8891 1829 ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
e8996ee0
FB
1830 ts->mem_allocated = 0;
1831 ts->fixed_reg = 0;
1832 }
f8b2f202
RH
1833
1834 memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
c896fe29
FB
1835}
1836
f8b2f202
RH
1837static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
1838 TCGTemp *ts)
c896fe29 1839{
1807f4c4 1840 int idx = temp_idx(ts);
ac56dd48 1841
fa477d25 1842 if (ts->temp_global) {
ac56dd48 1843 pstrcpy(buf, buf_size, ts->name);
f8b2f202
RH
1844 } else if (ts->temp_local) {
1845 snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
c896fe29 1846 } else {
f8b2f202 1847 snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
c896fe29
FB
1848 }
1849 return buf;
1850}
1851
43439139
RH
1852static char *tcg_get_arg_str(TCGContext *s, char *buf,
1853 int buf_size, TCGArg arg)
f8b2f202 1854{
43439139 1855 return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg));
f8b2f202
RH
1856}
1857
6e085f72
RH
1858/* Find helper name. */
1859static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val)
4dc81f28 1860{
6e085f72 1861 const char *ret = NULL;
619205fd
EC
1862 if (helper_table) {
1863 TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val);
72866e82
RH
1864 if (info) {
1865 ret = info->name;
1866 }
4dc81f28 1867 }
6e085f72 1868 return ret;
4dc81f28
FB
1869}
1870
f48f3ede
BS
1871static const char * const cond_name[] =
1872{
0aed257f
RH
1873 [TCG_COND_NEVER] = "never",
1874 [TCG_COND_ALWAYS] = "always",
f48f3ede
BS
1875 [TCG_COND_EQ] = "eq",
1876 [TCG_COND_NE] = "ne",
1877 [TCG_COND_LT] = "lt",
1878 [TCG_COND_GE] = "ge",
1879 [TCG_COND_LE] = "le",
1880 [TCG_COND_GT] = "gt",
1881 [TCG_COND_LTU] = "ltu",
1882 [TCG_COND_GEU] = "geu",
1883 [TCG_COND_LEU] = "leu",
1884 [TCG_COND_GTU] = "gtu"
1885};
1886
f713d6ad
RH
1887static const char * const ldst_name[] =
1888{
1889 [MO_UB] = "ub",
1890 [MO_SB] = "sb",
1891 [MO_LEUW] = "leuw",
1892 [MO_LESW] = "lesw",
1893 [MO_LEUL] = "leul",
1894 [MO_LESL] = "lesl",
1895 [MO_LEQ] = "leq",
1896 [MO_BEUW] = "beuw",
1897 [MO_BESW] = "besw",
1898 [MO_BEUL] = "beul",
1899 [MO_BESL] = "besl",
1900 [MO_BEQ] = "beq",
1901};
1902
1f00b27f
SS
1903static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
1904#ifdef ALIGNED_ONLY
1905 [MO_UNALN >> MO_ASHIFT] = "un+",
1906 [MO_ALIGN >> MO_ASHIFT] = "",
1907#else
1908 [MO_UNALN >> MO_ASHIFT] = "",
1909 [MO_ALIGN >> MO_ASHIFT] = "al+",
1910#endif
1911 [MO_ALIGN_2 >> MO_ASHIFT] = "al2+",
1912 [MO_ALIGN_4 >> MO_ASHIFT] = "al4+",
1913 [MO_ALIGN_8 >> MO_ASHIFT] = "al8+",
1914 [MO_ALIGN_16 >> MO_ASHIFT] = "al16+",
1915 [MO_ALIGN_32 >> MO_ASHIFT] = "al32+",
1916 [MO_ALIGN_64 >> MO_ASHIFT] = "al64+",
1917};
1918
b016486e
RH
1919static inline bool tcg_regset_single(TCGRegSet d)
1920{
1921 return (d & (d - 1)) == 0;
1922}
1923
1924static inline TCGReg tcg_regset_first(TCGRegSet d)
1925{
1926 if (TCG_TARGET_NB_REGS <= 32) {
1927 return ctz32(d);
1928 } else {
1929 return ctz64(d);
1930 }
1931}
1932
1894f69a 1933static void tcg_dump_ops(TCGContext *s, bool have_prefs)
c896fe29 1934{
c896fe29 1935 char buf[128];
c45cb8bb 1936 TCGOp *op;
c45cb8bb 1937
15fa08f8 1938 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb
RH
1939 int i, k, nb_oargs, nb_iargs, nb_cargs;
1940 const TCGOpDef *def;
c45cb8bb 1941 TCGOpcode c;
bdfb460e 1942 int col = 0;
c896fe29 1943
c45cb8bb 1944 c = op->opc;
c896fe29 1945 def = &tcg_op_defs[c];
c45cb8bb 1946
765b842a 1947 if (c == INDEX_op_insn_start) {
b016486e 1948 nb_oargs = 0;
15fa08f8 1949 col += qemu_log("\n ----");
9aef40ed
RH
1950
1951 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
1952 target_ulong a;
7e4597d7 1953#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 1954 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
7e4597d7 1955#else
efee3746 1956 a = op->args[i];
7e4597d7 1957#endif
bdfb460e 1958 col += qemu_log(" " TARGET_FMT_lx, a);
eeacee4d 1959 }
7e4597d7 1960 } else if (c == INDEX_op_call) {
c896fe29 1961 /* variable number of arguments */
cd9090aa
RH
1962 nb_oargs = TCGOP_CALLO(op);
1963 nb_iargs = TCGOP_CALLI(op);
c896fe29 1964 nb_cargs = def->nb_cargs;
c896fe29 1965
cf066674 1966 /* function name, flags, out args */
bdfb460e 1967 col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name,
efee3746
RH
1968 tcg_find_helper(s, op->args[nb_oargs + nb_iargs]),
1969 op->args[nb_oargs + nb_iargs + 1], nb_oargs);
cf066674 1970 for (i = 0; i < nb_oargs; i++) {
43439139
RH
1971 col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf),
1972 op->args[i]));
b03cce8e 1973 }
cf066674 1974 for (i = 0; i < nb_iargs; i++) {
efee3746 1975 TCGArg arg = op->args[nb_oargs + i];
cf066674
RH
1976 const char *t = "<dummy>";
1977 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 1978 t = tcg_get_arg_str(s, buf, sizeof(buf), arg);
eeacee4d 1979 }
bdfb460e 1980 col += qemu_log(",%s", t);
e8996ee0 1981 }
b03cce8e 1982 } else {
bdfb460e 1983 col += qemu_log(" %s ", def->name);
c45cb8bb
RH
1984
1985 nb_oargs = def->nb_oargs;
1986 nb_iargs = def->nb_iargs;
1987 nb_cargs = def->nb_cargs;
1988
d2fd745f
RH
1989 if (def->flags & TCG_OPF_VECTOR) {
1990 col += qemu_log("v%d,e%d,", 64 << TCGOP_VECL(op),
1991 8 << TCGOP_VECE(op));
1992 }
1993
b03cce8e 1994 k = 0;
c45cb8bb 1995 for (i = 0; i < nb_oargs; i++) {
eeacee4d 1996 if (k != 0) {
bdfb460e 1997 col += qemu_log(",");
eeacee4d 1998 }
43439139
RH
1999 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
2000 op->args[k++]));
b03cce8e 2001 }
c45cb8bb 2002 for (i = 0; i < nb_iargs; i++) {
eeacee4d 2003 if (k != 0) {
bdfb460e 2004 col += qemu_log(",");
eeacee4d 2005 }
43439139
RH
2006 col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf),
2007 op->args[k++]));
b03cce8e 2008 }
be210acb
RH
2009 switch (c) {
2010 case INDEX_op_brcond_i32:
be210acb 2011 case INDEX_op_setcond_i32:
ffc5ea09 2012 case INDEX_op_movcond_i32:
ffc5ea09 2013 case INDEX_op_brcond2_i32:
be210acb 2014 case INDEX_op_setcond2_i32:
ffc5ea09 2015 case INDEX_op_brcond_i64:
be210acb 2016 case INDEX_op_setcond_i64:
ffc5ea09 2017 case INDEX_op_movcond_i64:
212be173 2018 case INDEX_op_cmp_vec:
efee3746
RH
2019 if (op->args[k] < ARRAY_SIZE(cond_name)
2020 && cond_name[op->args[k]]) {
2021 col += qemu_log(",%s", cond_name[op->args[k++]]);
eeacee4d 2022 } else {
efee3746 2023 col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]);
eeacee4d 2024 }
f48f3ede 2025 i = 1;
be210acb 2026 break;
f713d6ad
RH
2027 case INDEX_op_qemu_ld_i32:
2028 case INDEX_op_qemu_st_i32:
2029 case INDEX_op_qemu_ld_i64:
2030 case INDEX_op_qemu_st_i64:
59227d5d 2031 {
efee3746 2032 TCGMemOpIdx oi = op->args[k++];
59227d5d
RH
2033 TCGMemOp op = get_memop(oi);
2034 unsigned ix = get_mmuidx(oi);
2035
59c4b7e8 2036 if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) {
bdfb460e 2037 col += qemu_log(",$0x%x,%u", op, ix);
59c4b7e8 2038 } else {
1f00b27f
SS
2039 const char *s_al, *s_op;
2040 s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT];
59c4b7e8 2041 s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)];
bdfb460e 2042 col += qemu_log(",%s%s,%u", s_al, s_op, ix);
59227d5d
RH
2043 }
2044 i = 1;
f713d6ad 2045 }
f713d6ad 2046 break;
be210acb 2047 default:
f48f3ede 2048 i = 0;
be210acb
RH
2049 break;
2050 }
51e3972c
RH
2051 switch (c) {
2052 case INDEX_op_set_label:
2053 case INDEX_op_br:
2054 case INDEX_op_brcond_i32:
2055 case INDEX_op_brcond_i64:
2056 case INDEX_op_brcond2_i32:
efee3746
RH
2057 col += qemu_log("%s$L%d", k ? "," : "",
2058 arg_label(op->args[k])->id);
51e3972c
RH
2059 i++, k++;
2060 break;
2061 default:
2062 break;
2063 }
2064 for (; i < nb_cargs; i++, k++) {
efee3746 2065 col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]);
bdfb460e
RH
2066 }
2067 }
bdfb460e 2068
1894f69a
RH
2069 if (have_prefs || op->life) {
2070 for (; col < 40; ++col) {
bdfb460e
RH
2071 putc(' ', qemu_logfile);
2072 }
1894f69a
RH
2073 }
2074
2075 if (op->life) {
2076 unsigned life = op->life;
bdfb460e
RH
2077
2078 if (life & (SYNC_ARG * 3)) {
2079 qemu_log(" sync:");
2080 for (i = 0; i < 2; ++i) {
2081 if (life & (SYNC_ARG << i)) {
2082 qemu_log(" %d", i);
2083 }
2084 }
2085 }
2086 life /= DEAD_ARG;
2087 if (life) {
2088 qemu_log(" dead:");
2089 for (i = 0; life; ++i, life >>= 1) {
2090 if (life & 1) {
2091 qemu_log(" %d", i);
2092 }
2093 }
b03cce8e 2094 }
c896fe29 2095 }
1894f69a
RH
2096
2097 if (have_prefs) {
2098 for (i = 0; i < nb_oargs; ++i) {
2099 TCGRegSet set = op->output_pref[i];
2100
2101 if (i == 0) {
2102 qemu_log(" pref=");
2103 } else {
2104 qemu_log(",");
2105 }
2106 if (set == 0) {
2107 qemu_log("none");
2108 } else if (set == MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS)) {
2109 qemu_log("all");
2110#ifdef CONFIG_DEBUG_TCG
2111 } else if (tcg_regset_single(set)) {
2112 TCGReg reg = tcg_regset_first(set);
2113 qemu_log("%s", tcg_target_reg_names[reg]);
2114#endif
2115 } else if (TCG_TARGET_NB_REGS <= 32) {
2116 qemu_log("%#x", (uint32_t)set);
2117 } else {
2118 qemu_log("%#" PRIx64, (uint64_t)set);
2119 }
2120 }
2121 }
2122
eeacee4d 2123 qemu_log("\n");
c896fe29
FB
2124 }
2125}
2126
2127/* we give more priority to constraints with less registers */
2128static int get_constraint_priority(const TCGOpDef *def, int k)
2129{
2130 const TCGArgConstraint *arg_ct;
2131
2132 int i, n;
2133 arg_ct = &def->args_ct[k];
2134 if (arg_ct->ct & TCG_CT_ALIAS) {
2135 /* an alias is equivalent to a single register */
2136 n = 1;
2137 } else {
2138 if (!(arg_ct->ct & TCG_CT_REG))
2139 return 0;
2140 n = 0;
2141 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
2142 if (tcg_regset_test_reg(arg_ct->u.regs, i))
2143 n++;
2144 }
2145 }
2146 return TCG_TARGET_NB_REGS - n + 1;
2147}
2148
2149/* sort from highest priority to lowest */
2150static void sort_constraints(TCGOpDef *def, int start, int n)
2151{
2152 int i, j, p1, p2, tmp;
2153
2154 for(i = 0; i < n; i++)
2155 def->sorted_args[start + i] = start + i;
2156 if (n <= 1)
2157 return;
2158 for(i = 0; i < n - 1; i++) {
2159 for(j = i + 1; j < n; j++) {
2160 p1 = get_constraint_priority(def, def->sorted_args[start + i]);
2161 p2 = get_constraint_priority(def, def->sorted_args[start + j]);
2162 if (p1 < p2) {
2163 tmp = def->sorted_args[start + i];
2164 def->sorted_args[start + i] = def->sorted_args[start + j];
2165 def->sorted_args[start + j] = tmp;
2166 }
2167 }
2168 }
2169}
2170
f69d277e 2171static void process_op_defs(TCGContext *s)
c896fe29 2172{
a9751609 2173 TCGOpcode op;
c896fe29 2174
f69d277e
RH
2175 for (op = 0; op < NB_OPS; op++) {
2176 TCGOpDef *def = &tcg_op_defs[op];
2177 const TCGTargetOpDef *tdefs;
069ea736
RH
2178 TCGType type;
2179 int i, nb_args;
f69d277e
RH
2180
2181 if (def->flags & TCG_OPF_NOT_PRESENT) {
2182 continue;
2183 }
2184
c896fe29 2185 nb_args = def->nb_iargs + def->nb_oargs;
f69d277e
RH
2186 if (nb_args == 0) {
2187 continue;
2188 }
2189
2190 tdefs = tcg_target_op_def(op);
2191 /* Missing TCGTargetOpDef entry. */
2192 tcg_debug_assert(tdefs != NULL);
2193
069ea736 2194 type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
f69d277e
RH
2195 for (i = 0; i < nb_args; i++) {
2196 const char *ct_str = tdefs->args_ct_str[i];
2197 /* Incomplete TCGTargetOpDef entry. */
eabb7b91 2198 tcg_debug_assert(ct_str != NULL);
f69d277e 2199
ccb1bb66 2200 def->args_ct[i].u.regs = 0;
c896fe29 2201 def->args_ct[i].ct = 0;
17280ff4
RH
2202 while (*ct_str != '\0') {
2203 switch(*ct_str) {
2204 case '0' ... '9':
2205 {
2206 int oarg = *ct_str - '0';
2207 tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
2208 tcg_debug_assert(oarg < def->nb_oargs);
2209 tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
2210 /* TCG_CT_ALIAS is for the output arguments.
2211 The input is tagged with TCG_CT_IALIAS. */
2212 def->args_ct[i] = def->args_ct[oarg];
2213 def->args_ct[oarg].ct |= TCG_CT_ALIAS;
2214 def->args_ct[oarg].alias_index = i;
2215 def->args_ct[i].ct |= TCG_CT_IALIAS;
2216 def->args_ct[i].alias_index = oarg;
c896fe29 2217 }
17280ff4
RH
2218 ct_str++;
2219 break;
2220 case '&':
2221 def->args_ct[i].ct |= TCG_CT_NEWREG;
2222 ct_str++;
2223 break;
2224 case 'i':
2225 def->args_ct[i].ct |= TCG_CT_CONST;
2226 ct_str++;
2227 break;
2228 default:
2229 ct_str = target_parse_constraint(&def->args_ct[i],
2230 ct_str, type);
2231 /* Typo in TCGTargetOpDef constraint. */
2232 tcg_debug_assert(ct_str != NULL);
c896fe29
FB
2233 }
2234 }
2235 }
2236
c68aaa18 2237 /* TCGTargetOpDef entry with too much information? */
eabb7b91 2238 tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
c68aaa18 2239
c896fe29
FB
2240 /* sort the constraints (XXX: this is just an heuristic) */
2241 sort_constraints(def, 0, def->nb_oargs);
2242 sort_constraints(def, def->nb_oargs, def->nb_iargs);
a9751609 2243 }
c896fe29
FB
2244}
2245
0c627cdc
RH
2246void tcg_op_remove(TCGContext *s, TCGOp *op)
2247{
d88a117e
RH
2248 TCGLabel *label;
2249
2250 switch (op->opc) {
2251 case INDEX_op_br:
2252 label = arg_label(op->args[0]);
2253 label->refs--;
2254 break;
2255 case INDEX_op_brcond_i32:
2256 case INDEX_op_brcond_i64:
2257 label = arg_label(op->args[3]);
2258 label->refs--;
2259 break;
2260 case INDEX_op_brcond2_i32:
2261 label = arg_label(op->args[5]);
2262 label->refs--;
2263 break;
2264 default:
2265 break;
2266 }
2267
15fa08f8
RH
2268 QTAILQ_REMOVE(&s->ops, op, link);
2269 QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
abebf925 2270 s->nb_ops--;
0c627cdc
RH
2271
2272#ifdef CONFIG_PROFILER
c3fac113 2273 atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
0c627cdc
RH
2274#endif
2275}
2276
15fa08f8 2277static TCGOp *tcg_op_alloc(TCGOpcode opc)
5a18407f 2278{
15fa08f8
RH
2279 TCGContext *s = tcg_ctx;
2280 TCGOp *op;
5a18407f 2281
15fa08f8
RH
2282 if (likely(QTAILQ_EMPTY(&s->free_ops))) {
2283 op = tcg_malloc(sizeof(TCGOp));
2284 } else {
2285 op = QTAILQ_FIRST(&s->free_ops);
2286 QTAILQ_REMOVE(&s->free_ops, op, link);
2287 }
2288 memset(op, 0, offsetof(TCGOp, link));
2289 op->opc = opc;
abebf925 2290 s->nb_ops++;
5a18407f 2291
15fa08f8
RH
2292 return op;
2293}
2294
2295TCGOp *tcg_emit_op(TCGOpcode opc)
2296{
2297 TCGOp *op = tcg_op_alloc(opc);
2298 QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
2299 return op;
2300}
5a18407f 2301
ac1043f6 2302TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
15fa08f8
RH
2303{
2304 TCGOp *new_op = tcg_op_alloc(opc);
2305 QTAILQ_INSERT_BEFORE(old_op, new_op, link);
5a18407f
RH
2306 return new_op;
2307}
2308
ac1043f6 2309TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, TCGOpcode opc)
5a18407f 2310{
15fa08f8
RH
2311 TCGOp *new_op = tcg_op_alloc(opc);
2312 QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
5a18407f
RH
2313 return new_op;
2314}
2315
b4fc67c7
RH
2316/* Reachable analysis : remove unreachable code. */
2317static void reachable_code_pass(TCGContext *s)
2318{
2319 TCGOp *op, *op_next;
2320 bool dead = false;
2321
2322 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
2323 bool remove = dead;
2324 TCGLabel *label;
2325 int call_flags;
2326
2327 switch (op->opc) {
2328 case INDEX_op_set_label:
2329 label = arg_label(op->args[0]);
2330 if (label->refs == 0) {
2331 /*
2332 * While there is an occasional backward branch, virtually
2333 * all branches generated by the translators are forward.
2334 * Which means that generally we will have already removed
2335 * all references to the label that will be, and there is
2336 * little to be gained by iterating.
2337 */
2338 remove = true;
2339 } else {
2340 /* Once we see a label, insns become live again. */
2341 dead = false;
2342 remove = false;
2343
2344 /*
2345 * Optimization can fold conditional branches to unconditional.
2346 * If we find a label with one reference which is preceded by
2347 * an unconditional branch to it, remove both. This needed to
2348 * wait until the dead code in between them was removed.
2349 */
2350 if (label->refs == 1) {
eae3eb3e 2351 TCGOp *op_prev = QTAILQ_PREV(op, link);
b4fc67c7
RH
2352 if (op_prev->opc == INDEX_op_br &&
2353 label == arg_label(op_prev->args[0])) {
2354 tcg_op_remove(s, op_prev);
2355 remove = true;
2356 }
2357 }
2358 }
2359 break;
2360
2361 case INDEX_op_br:
2362 case INDEX_op_exit_tb:
2363 case INDEX_op_goto_ptr:
2364 /* Unconditional branches; everything following is dead. */
2365 dead = true;
2366 break;
2367
2368 case INDEX_op_call:
2369 /* Notice noreturn helper calls, raising exceptions. */
2370 call_flags = op->args[TCGOP_CALLO(op) + TCGOP_CALLI(op) + 1];
2371 if (call_flags & TCG_CALL_NO_RETURN) {
2372 dead = true;
2373 }
2374 break;
2375
2376 case INDEX_op_insn_start:
2377 /* Never remove -- we need to keep these for unwind. */
2378 remove = false;
2379 break;
2380
2381 default:
2382 break;
2383 }
2384
2385 if (remove) {
2386 tcg_op_remove(s, op);
2387 }
2388 }
2389}
2390
c70fbf0a
RH
2391#define TS_DEAD 1
2392#define TS_MEM 2
2393
5a18407f
RH
2394#define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n)))
2395#define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n)))
2396
25f49c5f
RH
2397/* For liveness_pass_1, the register preferences for a given temp. */
2398static inline TCGRegSet *la_temp_pref(TCGTemp *ts)
2399{
2400 return ts->state_ptr;
2401}
2402
2403/* For liveness_pass_1, reset the preferences for a given temp to the
2404 * maximal regset for its type.
2405 */
2406static inline void la_reset_pref(TCGTemp *ts)
2407{
2408 *la_temp_pref(ts)
2409 = (ts->state == TS_DEAD ? 0 : tcg_target_available_regs[ts->type]);
2410}
2411
9c43b68d
AJ
2412/* liveness analysis: end of function: all temps are dead, and globals
2413 should be in memory. */
2616c808 2414static void la_func_end(TCGContext *s, int ng, int nt)
c896fe29 2415{
b83eabea
RH
2416 int i;
2417
2418 for (i = 0; i < ng; ++i) {
2419 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f 2420 la_reset_pref(&s->temps[i]);
b83eabea
RH
2421 }
2422 for (i = ng; i < nt; ++i) {
2423 s->temps[i].state = TS_DEAD;
25f49c5f 2424 la_reset_pref(&s->temps[i]);
b83eabea 2425 }
c896fe29
FB
2426}
2427
9c43b68d
AJ
2428/* liveness analysis: end of basic block: all temps are dead, globals
2429 and local temps should be in memory. */
2616c808 2430static void la_bb_end(TCGContext *s, int ng, int nt)
641d5fbe 2431{
b83eabea 2432 int i;
641d5fbe 2433
b83eabea
RH
2434 for (i = 0; i < ng; ++i) {
2435 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f 2436 la_reset_pref(&s->temps[i]);
b83eabea
RH
2437 }
2438 for (i = ng; i < nt; ++i) {
2439 s->temps[i].state = (s->temps[i].temp_local
2440 ? TS_DEAD | TS_MEM
2441 : TS_DEAD);
25f49c5f 2442 la_reset_pref(&s->temps[i]);
641d5fbe
FB
2443 }
2444}
2445
f65a061c
RH
2446/* liveness analysis: sync globals back to memory. */
2447static void la_global_sync(TCGContext *s, int ng)
2448{
2449 int i;
2450
2451 for (i = 0; i < ng; ++i) {
25f49c5f
RH
2452 int state = s->temps[i].state;
2453 s->temps[i].state = state | TS_MEM;
2454 if (state == TS_DEAD) {
2455 /* If the global was previously dead, reset prefs. */
2456 la_reset_pref(&s->temps[i]);
2457 }
f65a061c
RH
2458 }
2459}
2460
2461/* liveness analysis: sync globals back to memory and kill. */
2462static void la_global_kill(TCGContext *s, int ng)
2463{
2464 int i;
2465
2466 for (i = 0; i < ng; i++) {
2467 s->temps[i].state = TS_DEAD | TS_MEM;
25f49c5f
RH
2468 la_reset_pref(&s->temps[i]);
2469 }
2470}
2471
2472/* liveness analysis: note live globals crossing calls. */
2473static void la_cross_call(TCGContext *s, int nt)
2474{
2475 TCGRegSet mask = ~tcg_target_call_clobber_regs;
2476 int i;
2477
2478 for (i = 0; i < nt; i++) {
2479 TCGTemp *ts = &s->temps[i];
2480 if (!(ts->state & TS_DEAD)) {
2481 TCGRegSet *pset = la_temp_pref(ts);
2482 TCGRegSet set = *pset;
2483
2484 set &= mask;
2485 /* If the combination is not possible, restart. */
2486 if (set == 0) {
2487 set = tcg_target_available_regs[ts->type] & mask;
2488 }
2489 *pset = set;
2490 }
f65a061c
RH
2491 }
2492}
2493
a1b3c48d 2494/* Liveness analysis : update the opc_arg_life array to tell if a
c896fe29
FB
2495 given input arguments is dead. Instructions updating dead
2496 temporaries are removed. */
b83eabea 2497static void liveness_pass_1(TCGContext *s)
c896fe29 2498{
c70fbf0a 2499 int nb_globals = s->nb_globals;
2616c808 2500 int nb_temps = s->nb_temps;
15fa08f8 2501 TCGOp *op, *op_prev;
25f49c5f
RH
2502 TCGRegSet *prefs;
2503 int i;
2504
2505 prefs = tcg_malloc(sizeof(TCGRegSet) * nb_temps);
2506 for (i = 0; i < nb_temps; ++i) {
2507 s->temps[i].state_ptr = prefs + i;
2508 }
a1b3c48d 2509
ae36a246 2510 /* ??? Should be redundant with the exit_tb that ends the TB. */
2616c808 2511 la_func_end(s, nb_globals, nb_temps);
c896fe29 2512
eae3eb3e 2513 QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, link, op_prev) {
25f49c5f 2514 int nb_iargs, nb_oargs;
c45cb8bb
RH
2515 TCGOpcode opc_new, opc_new2;
2516 bool have_opc_new2;
a1b3c48d 2517 TCGLifeData arg_life = 0;
25f49c5f 2518 TCGTemp *ts;
c45cb8bb
RH
2519 TCGOpcode opc = op->opc;
2520 const TCGOpDef *def = &tcg_op_defs[opc];
2521
c45cb8bb 2522 switch (opc) {
c896fe29 2523 case INDEX_op_call:
c6e113f5
FB
2524 {
2525 int call_flags;
25f49c5f 2526 int nb_call_regs;
c896fe29 2527
cd9090aa
RH
2528 nb_oargs = TCGOP_CALLO(op);
2529 nb_iargs = TCGOP_CALLI(op);
efee3746 2530 call_flags = op->args[nb_oargs + nb_iargs + 1];
c6e113f5 2531
c45cb8bb 2532 /* pure functions can be removed if their result is unused */
78505279 2533 if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) {
cf066674 2534 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2535 ts = arg_temp(op->args[i]);
2536 if (ts->state != TS_DEAD) {
c6e113f5 2537 goto do_not_remove_call;
9c43b68d 2538 }
c6e113f5 2539 }
c45cb8bb 2540 goto do_remove;
152c35aa
RH
2541 }
2542 do_not_remove_call:
c896fe29 2543
25f49c5f 2544 /* Output args are dead. */
152c35aa 2545 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2546 ts = arg_temp(op->args[i]);
2547 if (ts->state & TS_DEAD) {
152c35aa
RH
2548 arg_life |= DEAD_ARG << i;
2549 }
25f49c5f 2550 if (ts->state & TS_MEM) {
152c35aa 2551 arg_life |= SYNC_ARG << i;
c6e113f5 2552 }
25f49c5f
RH
2553 ts->state = TS_DEAD;
2554 la_reset_pref(ts);
2555
2556 /* Not used -- it will be tcg_target_call_oarg_regs[i]. */
2557 op->output_pref[i] = 0;
152c35aa 2558 }
78505279 2559
152c35aa
RH
2560 if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS |
2561 TCG_CALL_NO_READ_GLOBALS))) {
f65a061c 2562 la_global_kill(s, nb_globals);
152c35aa 2563 } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) {
f65a061c 2564 la_global_sync(s, nb_globals);
152c35aa 2565 }
b9c18f56 2566
25f49c5f 2567 /* Record arguments that die in this helper. */
152c35aa 2568 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
25f49c5f
RH
2569 ts = arg_temp(op->args[i]);
2570 if (ts && ts->state & TS_DEAD) {
152c35aa 2571 arg_life |= DEAD_ARG << i;
c6e113f5 2572 }
152c35aa 2573 }
25f49c5f
RH
2574
2575 /* For all live registers, remove call-clobbered prefs. */
2576 la_cross_call(s, nb_temps);
2577
2578 nb_call_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
2579
2580 /* Input arguments are live for preceding opcodes. */
2581 for (i = 0; i < nb_iargs; i++) {
2582 ts = arg_temp(op->args[i + nb_oargs]);
2583 if (ts && ts->state & TS_DEAD) {
2584 /* For those arguments that die, and will be allocated
2585 * in registers, clear the register set for that arg,
2586 * to be filled in below. For args that will be on
2587 * the stack, reset to any available reg.
2588 */
2589 *la_temp_pref(ts)
2590 = (i < nb_call_regs ? 0 :
2591 tcg_target_available_regs[ts->type]);
2592 ts->state &= ~TS_DEAD;
2593 }
2594 }
2595
2596 /* For each input argument, add its input register to prefs.
2597 If a temp is used once, this produces a single set bit. */
2598 for (i = 0; i < MIN(nb_call_regs, nb_iargs); i++) {
2599 ts = arg_temp(op->args[i + nb_oargs]);
2600 if (ts) {
2601 tcg_regset_set_reg(*la_temp_pref(ts),
2602 tcg_target_call_iarg_regs[i]);
c19f47bf 2603 }
c896fe29 2604 }
c896fe29 2605 }
c896fe29 2606 break;
765b842a 2607 case INDEX_op_insn_start:
c896fe29 2608 break;
5ff9d6a4 2609 case INDEX_op_discard:
5ff9d6a4 2610 /* mark the temporary as dead */
25f49c5f
RH
2611 ts = arg_temp(op->args[0]);
2612 ts->state = TS_DEAD;
2613 la_reset_pref(ts);
5ff9d6a4 2614 break;
1305c451
RH
2615
2616 case INDEX_op_add2_i32:
c45cb8bb 2617 opc_new = INDEX_op_add_i32;
f1fae40c 2618 goto do_addsub2;
1305c451 2619 case INDEX_op_sub2_i32:
c45cb8bb 2620 opc_new = INDEX_op_sub_i32;
f1fae40c
RH
2621 goto do_addsub2;
2622 case INDEX_op_add2_i64:
c45cb8bb 2623 opc_new = INDEX_op_add_i64;
f1fae40c
RH
2624 goto do_addsub2;
2625 case INDEX_op_sub2_i64:
c45cb8bb 2626 opc_new = INDEX_op_sub_i64;
f1fae40c 2627 do_addsub2:
1305c451
RH
2628 nb_iargs = 4;
2629 nb_oargs = 2;
2630 /* Test if the high part of the operation is dead, but not
2631 the low part. The result can be optimized to a simple
2632 add or sub. This happens often for x86_64 guest when the
2633 cpu mode is set to 32 bit. */
b83eabea
RH
2634 if (arg_temp(op->args[1])->state == TS_DEAD) {
2635 if (arg_temp(op->args[0])->state == TS_DEAD) {
1305c451
RH
2636 goto do_remove;
2637 }
c45cb8bb
RH
2638 /* Replace the opcode and adjust the args in place,
2639 leaving 3 unused args at the end. */
2640 op->opc = opc = opc_new;
efee3746
RH
2641 op->args[1] = op->args[2];
2642 op->args[2] = op->args[4];
1305c451
RH
2643 /* Fall through and mark the single-word operation live. */
2644 nb_iargs = 2;
2645 nb_oargs = 1;
2646 }
2647 goto do_not_remove;
2648
1414968a 2649 case INDEX_op_mulu2_i32:
c45cb8bb
RH
2650 opc_new = INDEX_op_mul_i32;
2651 opc_new2 = INDEX_op_muluh_i32;
2652 have_opc_new2 = TCG_TARGET_HAS_muluh_i32;
03271524 2653 goto do_mul2;
f1fae40c 2654 case INDEX_op_muls2_i32:
c45cb8bb
RH
2655 opc_new = INDEX_op_mul_i32;
2656 opc_new2 = INDEX_op_mulsh_i32;
2657 have_opc_new2 = TCG_TARGET_HAS_mulsh_i32;
f1fae40c
RH
2658 goto do_mul2;
2659 case INDEX_op_mulu2_i64:
c45cb8bb
RH
2660 opc_new = INDEX_op_mul_i64;
2661 opc_new2 = INDEX_op_muluh_i64;
2662 have_opc_new2 = TCG_TARGET_HAS_muluh_i64;
03271524 2663 goto do_mul2;
f1fae40c 2664 case INDEX_op_muls2_i64:
c45cb8bb
RH
2665 opc_new = INDEX_op_mul_i64;
2666 opc_new2 = INDEX_op_mulsh_i64;
2667 have_opc_new2 = TCG_TARGET_HAS_mulsh_i64;
03271524 2668 goto do_mul2;
f1fae40c 2669 do_mul2:
1414968a
RH
2670 nb_iargs = 2;
2671 nb_oargs = 2;
b83eabea
RH
2672 if (arg_temp(op->args[1])->state == TS_DEAD) {
2673 if (arg_temp(op->args[0])->state == TS_DEAD) {
03271524 2674 /* Both parts of the operation are dead. */
1414968a
RH
2675 goto do_remove;
2676 }
03271524 2677 /* The high part of the operation is dead; generate the low. */
c45cb8bb 2678 op->opc = opc = opc_new;
efee3746
RH
2679 op->args[1] = op->args[2];
2680 op->args[2] = op->args[3];
b83eabea 2681 } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) {
c45cb8bb
RH
2682 /* The low part of the operation is dead; generate the high. */
2683 op->opc = opc = opc_new2;
efee3746
RH
2684 op->args[0] = op->args[1];
2685 op->args[1] = op->args[2];
2686 op->args[2] = op->args[3];
03271524
RH
2687 } else {
2688 goto do_not_remove;
1414968a 2689 }
03271524
RH
2690 /* Mark the single-word operation live. */
2691 nb_oargs = 1;
1414968a
RH
2692 goto do_not_remove;
2693
c896fe29 2694 default:
1305c451 2695 /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */
49516bc0
AJ
2696 nb_iargs = def->nb_iargs;
2697 nb_oargs = def->nb_oargs;
c896fe29 2698
49516bc0
AJ
2699 /* Test if the operation can be removed because all
2700 its outputs are dead. We assume that nb_oargs == 0
2701 implies side effects */
2702 if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) {
c45cb8bb 2703 for (i = 0; i < nb_oargs; i++) {
b83eabea 2704 if (arg_temp(op->args[i])->state != TS_DEAD) {
49516bc0 2705 goto do_not_remove;
9c43b68d 2706 }
49516bc0 2707 }
152c35aa
RH
2708 goto do_remove;
2709 }
2710 goto do_not_remove;
49516bc0 2711
152c35aa
RH
2712 do_remove:
2713 tcg_op_remove(s, op);
2714 break;
2715
2716 do_not_remove:
152c35aa 2717 for (i = 0; i < nb_oargs; i++) {
25f49c5f
RH
2718 ts = arg_temp(op->args[i]);
2719
2720 /* Remember the preference of the uses that followed. */
2721 op->output_pref[i] = *la_temp_pref(ts);
2722
2723 /* Output args are dead. */
2724 if (ts->state & TS_DEAD) {
152c35aa 2725 arg_life |= DEAD_ARG << i;
49516bc0 2726 }
25f49c5f 2727 if (ts->state & TS_MEM) {
152c35aa
RH
2728 arg_life |= SYNC_ARG << i;
2729 }
25f49c5f
RH
2730 ts->state = TS_DEAD;
2731 la_reset_pref(ts);
152c35aa 2732 }
49516bc0 2733
25f49c5f 2734 /* If end of basic block, update. */
ae36a246
RH
2735 if (def->flags & TCG_OPF_BB_EXIT) {
2736 la_func_end(s, nb_globals, nb_temps);
2737 } else if (def->flags & TCG_OPF_BB_END) {
2616c808 2738 la_bb_end(s, nb_globals, nb_temps);
152c35aa 2739 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
f65a061c 2740 la_global_sync(s, nb_globals);
25f49c5f
RH
2741 if (def->flags & TCG_OPF_CALL_CLOBBER) {
2742 la_cross_call(s, nb_temps);
2743 }
152c35aa
RH
2744 }
2745
25f49c5f 2746 /* Record arguments that die in this opcode. */
152c35aa 2747 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2748 ts = arg_temp(op->args[i]);
2749 if (ts->state & TS_DEAD) {
152c35aa 2750 arg_life |= DEAD_ARG << i;
c896fe29 2751 }
c896fe29 2752 }
25f49c5f
RH
2753
2754 /* Input arguments are live for preceding opcodes. */
152c35aa 2755 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
25f49c5f
RH
2756 ts = arg_temp(op->args[i]);
2757 if (ts->state & TS_DEAD) {
2758 /* For operands that were dead, initially allow
2759 all regs for the type. */
2760 *la_temp_pref(ts) = tcg_target_available_regs[ts->type];
2761 ts->state &= ~TS_DEAD;
2762 }
2763 }
2764
2765 /* Incorporate constraints for this operand. */
2766 switch (opc) {
2767 case INDEX_op_mov_i32:
2768 case INDEX_op_mov_i64:
2769 /* Note that these are TCG_OPF_NOT_PRESENT and do not
2770 have proper constraints. That said, special case
2771 moves to propagate preferences backward. */
2772 if (IS_DEAD_ARG(1)) {
2773 *la_temp_pref(arg_temp(op->args[0]))
2774 = *la_temp_pref(arg_temp(op->args[1]));
2775 }
2776 break;
2777
2778 default:
2779 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
2780 const TCGArgConstraint *ct = &def->args_ct[i];
2781 TCGRegSet set, *pset;
2782
2783 ts = arg_temp(op->args[i]);
2784 pset = la_temp_pref(ts);
2785 set = *pset;
2786
2787 set &= ct->u.regs;
2788 if (ct->ct & TCG_CT_IALIAS) {
2789 set &= op->output_pref[ct->alias_index];
2790 }
2791 /* If the combination is not possible, restart. */
2792 if (set == 0) {
2793 set = ct->u.regs;
2794 }
2795 *pset = set;
2796 }
2797 break;
152c35aa 2798 }
c896fe29
FB
2799 break;
2800 }
bee158cb 2801 op->life = arg_life;
1ff0a2c5 2802 }
c896fe29 2803}
c896fe29 2804
5a18407f 2805/* Liveness analysis: Convert indirect regs to direct temporaries. */
b83eabea 2806static bool liveness_pass_2(TCGContext *s)
5a18407f
RH
2807{
2808 int nb_globals = s->nb_globals;
15fa08f8 2809 int nb_temps, i;
5a18407f 2810 bool changes = false;
15fa08f8 2811 TCGOp *op, *op_next;
5a18407f 2812
5a18407f
RH
2813 /* Create a temporary for each indirect global. */
2814 for (i = 0; i < nb_globals; ++i) {
2815 TCGTemp *its = &s->temps[i];
2816 if (its->indirect_reg) {
2817 TCGTemp *dts = tcg_temp_alloc(s);
2818 dts->type = its->type;
2819 dts->base_type = its->base_type;
b83eabea
RH
2820 its->state_ptr = dts;
2821 } else {
2822 its->state_ptr = NULL;
5a18407f 2823 }
b83eabea
RH
2824 /* All globals begin dead. */
2825 its->state = TS_DEAD;
2826 }
2827 for (nb_temps = s->nb_temps; i < nb_temps; ++i) {
2828 TCGTemp *its = &s->temps[i];
2829 its->state_ptr = NULL;
2830 its->state = TS_DEAD;
5a18407f 2831 }
5a18407f 2832
15fa08f8 2833 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
5a18407f
RH
2834 TCGOpcode opc = op->opc;
2835 const TCGOpDef *def = &tcg_op_defs[opc];
2836 TCGLifeData arg_life = op->life;
2837 int nb_iargs, nb_oargs, call_flags;
b83eabea 2838 TCGTemp *arg_ts, *dir_ts;
5a18407f 2839
5a18407f 2840 if (opc == INDEX_op_call) {
cd9090aa
RH
2841 nb_oargs = TCGOP_CALLO(op);
2842 nb_iargs = TCGOP_CALLI(op);
efee3746 2843 call_flags = op->args[nb_oargs + nb_iargs + 1];
5a18407f
RH
2844 } else {
2845 nb_iargs = def->nb_iargs;
2846 nb_oargs = def->nb_oargs;
2847
2848 /* Set flags similar to how calls require. */
2849 if (def->flags & TCG_OPF_BB_END) {
2850 /* Like writing globals: save_globals */
2851 call_flags = 0;
2852 } else if (def->flags & TCG_OPF_SIDE_EFFECTS) {
2853 /* Like reading globals: sync_globals */
2854 call_flags = TCG_CALL_NO_WRITE_GLOBALS;
2855 } else {
2856 /* No effect on globals. */
2857 call_flags = (TCG_CALL_NO_READ_GLOBALS |
2858 TCG_CALL_NO_WRITE_GLOBALS);
2859 }
2860 }
2861
2862 /* Make sure that input arguments are available. */
2863 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2864 arg_ts = arg_temp(op->args[i]);
2865 if (arg_ts) {
2866 dir_ts = arg_ts->state_ptr;
2867 if (dir_ts && arg_ts->state == TS_DEAD) {
2868 TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2869 ? INDEX_op_ld_i32
2870 : INDEX_op_ld_i64);
ac1043f6 2871 TCGOp *lop = tcg_op_insert_before(s, op, lopc);
5a18407f 2872
b83eabea
RH
2873 lop->args[0] = temp_arg(dir_ts);
2874 lop->args[1] = temp_arg(arg_ts->mem_base);
2875 lop->args[2] = arg_ts->mem_offset;
5a18407f
RH
2876
2877 /* Loaded, but synced with memory. */
b83eabea 2878 arg_ts->state = TS_MEM;
5a18407f
RH
2879 }
2880 }
2881 }
2882
2883 /* Perform input replacement, and mark inputs that became dead.
2884 No action is required except keeping temp_state up to date
2885 so that we reload when needed. */
2886 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
b83eabea
RH
2887 arg_ts = arg_temp(op->args[i]);
2888 if (arg_ts) {
2889 dir_ts = arg_ts->state_ptr;
2890 if (dir_ts) {
2891 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2892 changes = true;
2893 if (IS_DEAD_ARG(i)) {
b83eabea 2894 arg_ts->state = TS_DEAD;
5a18407f
RH
2895 }
2896 }
2897 }
2898 }
2899
2900 /* Liveness analysis should ensure that the following are
2901 all correct, for call sites and basic block end points. */
2902 if (call_flags & TCG_CALL_NO_READ_GLOBALS) {
2903 /* Nothing to do */
2904 } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) {
2905 for (i = 0; i < nb_globals; ++i) {
2906 /* Liveness should see that globals are synced back,
2907 that is, either TS_DEAD or TS_MEM. */
b83eabea
RH
2908 arg_ts = &s->temps[i];
2909 tcg_debug_assert(arg_ts->state_ptr == 0
2910 || arg_ts->state != 0);
5a18407f
RH
2911 }
2912 } else {
2913 for (i = 0; i < nb_globals; ++i) {
2914 /* Liveness should see that globals are saved back,
2915 that is, TS_DEAD, waiting to be reloaded. */
b83eabea
RH
2916 arg_ts = &s->temps[i];
2917 tcg_debug_assert(arg_ts->state_ptr == 0
2918 || arg_ts->state == TS_DEAD);
5a18407f
RH
2919 }
2920 }
2921
2922 /* Outputs become available. */
2923 for (i = 0; i < nb_oargs; i++) {
b83eabea
RH
2924 arg_ts = arg_temp(op->args[i]);
2925 dir_ts = arg_ts->state_ptr;
2926 if (!dir_ts) {
5a18407f
RH
2927 continue;
2928 }
b83eabea 2929 op->args[i] = temp_arg(dir_ts);
5a18407f
RH
2930 changes = true;
2931
2932 /* The output is now live and modified. */
b83eabea 2933 arg_ts->state = 0;
5a18407f
RH
2934
2935 /* Sync outputs upon their last write. */
2936 if (NEED_SYNC_ARG(i)) {
b83eabea 2937 TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32
5a18407f
RH
2938 ? INDEX_op_st_i32
2939 : INDEX_op_st_i64);
ac1043f6 2940 TCGOp *sop = tcg_op_insert_after(s, op, sopc);
5a18407f 2941
b83eabea
RH
2942 sop->args[0] = temp_arg(dir_ts);
2943 sop->args[1] = temp_arg(arg_ts->mem_base);
2944 sop->args[2] = arg_ts->mem_offset;
5a18407f 2945
b83eabea 2946 arg_ts->state = TS_MEM;
5a18407f
RH
2947 }
2948 /* Drop outputs that are dead. */
2949 if (IS_DEAD_ARG(i)) {
b83eabea 2950 arg_ts->state = TS_DEAD;
5a18407f
RH
2951 }
2952 }
2953 }
2954
2955 return changes;
2956}
2957
8d8fdbae 2958#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
2959static void dump_regs(TCGContext *s)
2960{
2961 TCGTemp *ts;
2962 int i;
2963 char buf[64];
2964
2965 for(i = 0; i < s->nb_temps; i++) {
2966 ts = &s->temps[i];
43439139 2967 printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
c896fe29
FB
2968 switch(ts->val_type) {
2969 case TEMP_VAL_REG:
2970 printf("%s", tcg_target_reg_names[ts->reg]);
2971 break;
2972 case TEMP_VAL_MEM:
b3a62939
RH
2973 printf("%d(%s)", (int)ts->mem_offset,
2974 tcg_target_reg_names[ts->mem_base->reg]);
c896fe29
FB
2975 break;
2976 case TEMP_VAL_CONST:
2977 printf("$0x%" TCG_PRIlx, ts->val);
2978 break;
2979 case TEMP_VAL_DEAD:
2980 printf("D");
2981 break;
2982 default:
2983 printf("???");
2984 break;
2985 }
2986 printf("\n");
2987 }
2988
2989 for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
f8b2f202 2990 if (s->reg_to_temp[i] != NULL) {
c896fe29
FB
2991 printf("%s: %s\n",
2992 tcg_target_reg_names[i],
f8b2f202 2993 tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
c896fe29
FB
2994 }
2995 }
2996}
2997
2998static void check_regs(TCGContext *s)
2999{
869938ae 3000 int reg;
b6638662 3001 int k;
c896fe29
FB
3002 TCGTemp *ts;
3003 char buf[64];
3004
f8b2f202
RH
3005 for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
3006 ts = s->reg_to_temp[reg];
3007 if (ts != NULL) {
3008 if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
c896fe29
FB
3009 printf("Inconsistency for register %s:\n",
3010 tcg_target_reg_names[reg]);
b03cce8e 3011 goto fail;
c896fe29
FB
3012 }
3013 }
3014 }
f8b2f202 3015 for (k = 0; k < s->nb_temps; k++) {
c896fe29 3016 ts = &s->temps[k];
f8b2f202
RH
3017 if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
3018 && s->reg_to_temp[ts->reg] != ts) {
3019 printf("Inconsistency for temp %s:\n",
3020 tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
b03cce8e 3021 fail:
f8b2f202
RH
3022 printf("reg state:\n");
3023 dump_regs(s);
3024 tcg_abort();
c896fe29
FB
3025 }
3026 }
3027}
3028#endif
3029
2272e4a7 3030static void temp_allocate_frame(TCGContext *s, TCGTemp *ts)
c896fe29 3031{
9b9c37c3
RH
3032#if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64)
3033 /* Sparc64 stack is accessed with offset of 2047 */
b591dc59
BS
3034 s->current_frame_offset = (s->current_frame_offset +
3035 (tcg_target_long)sizeof(tcg_target_long) - 1) &
3036 ~(sizeof(tcg_target_long) - 1);
f44c9960 3037#endif
b591dc59
BS
3038 if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) >
3039 s->frame_end) {
5ff9d6a4 3040 tcg_abort();
b591dc59 3041 }
c896fe29 3042 ts->mem_offset = s->current_frame_offset;
b3a62939 3043 ts->mem_base = s->frame_temp;
c896fe29 3044 ts->mem_allocated = 1;
e2c6d1b4 3045 s->current_frame_offset += sizeof(tcg_target_long);
c896fe29
FB
3046}
3047
b722452a 3048static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet, TCGRegSet);
b3915dbb 3049
59d7c14e
RH
3050/* Mark a temporary as free or dead. If 'free_or_dead' is negative,
3051 mark it free; otherwise mark it dead. */
3052static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead)
7f6ceedf 3053{
59d7c14e
RH
3054 if (ts->fixed_reg) {
3055 return;
3056 }
3057 if (ts->val_type == TEMP_VAL_REG) {
3058 s->reg_to_temp[ts->reg] = NULL;
3059 }
3060 ts->val_type = (free_or_dead < 0
3061 || ts->temp_local
fa477d25 3062 || ts->temp_global
59d7c14e
RH
3063 ? TEMP_VAL_MEM : TEMP_VAL_DEAD);
3064}
7f6ceedf 3065
59d7c14e
RH
3066/* Mark a temporary as dead. */
3067static inline void temp_dead(TCGContext *s, TCGTemp *ts)
3068{
3069 temp_free_or_dead(s, ts, 1);
3070}
3071
3072/* Sync a temporary to memory. 'allocated_regs' is used in case a temporary
3073 registers needs to be allocated to store a constant. If 'free_or_dead'
3074 is non-zero, subsequently release the temporary; if it is positive, the
3075 temp is dead; if it is negative, the temp is free. */
98b4e186
RH
3076static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs,
3077 TCGRegSet preferred_regs, int free_or_dead)
59d7c14e
RH
3078{
3079 if (ts->fixed_reg) {
3080 return;
3081 }
3082 if (!ts->mem_coherent) {
7f6ceedf 3083 if (!ts->mem_allocated) {
2272e4a7 3084 temp_allocate_frame(s, ts);
59d7c14e 3085 }
59d7c14e
RH
3086 switch (ts->val_type) {
3087 case TEMP_VAL_CONST:
3088 /* If we're going to free the temp immediately, then we won't
3089 require it later in a register, so attempt to store the
3090 constant to memory directly. */
3091 if (free_or_dead
3092 && tcg_out_sti(s, ts->type, ts->val,
3093 ts->mem_base->reg, ts->mem_offset)) {
3094 break;
3095 }
3096 temp_load(s, ts, tcg_target_available_regs[ts->type],
98b4e186 3097 allocated_regs, preferred_regs);
59d7c14e
RH
3098 /* fallthrough */
3099
3100 case TEMP_VAL_REG:
3101 tcg_out_st(s, ts->type, ts->reg,
3102 ts->mem_base->reg, ts->mem_offset);
3103 break;
3104
3105 case TEMP_VAL_MEM:
3106 break;
3107
3108 case TEMP_VAL_DEAD:
3109 default:
3110 tcg_abort();
3111 }
3112 ts->mem_coherent = 1;
3113 }
3114 if (free_or_dead) {
3115 temp_free_or_dead(s, ts, free_or_dead);
7f6ceedf 3116 }
7f6ceedf
AJ
3117}
3118
c896fe29 3119/* free register 'reg' by spilling the corresponding temporary if necessary */
b3915dbb 3120static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs)
c896fe29 3121{
f8b2f202 3122 TCGTemp *ts = s->reg_to_temp[reg];
f8b2f202 3123 if (ts != NULL) {
98b4e186 3124 temp_sync(s, ts, allocated_regs, 0, -1);
c896fe29
FB
3125 }
3126}
3127
b016486e
RH
3128/**
3129 * tcg_reg_alloc:
3130 * @required_regs: Set of registers in which we must allocate.
3131 * @allocated_regs: Set of registers which must be avoided.
3132 * @preferred_regs: Set of registers we should prefer.
3133 * @rev: True if we search the registers in "indirect" order.
3134 *
3135 * The allocated register must be in @required_regs & ~@allocated_regs,
3136 * but if we can put it in @preferred_regs we may save a move later.
3137 */
3138static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
3139 TCGRegSet allocated_regs,
3140 TCGRegSet preferred_regs, bool rev)
c896fe29 3141{
b016486e
RH
3142 int i, j, f, n = ARRAY_SIZE(tcg_target_reg_alloc_order);
3143 TCGRegSet reg_ct[2];
91478cef 3144 const int *order;
c896fe29 3145
b016486e
RH
3146 reg_ct[1] = required_regs & ~allocated_regs;
3147 tcg_debug_assert(reg_ct[1] != 0);
3148 reg_ct[0] = reg_ct[1] & preferred_regs;
3149
3150 /* Skip the preferred_regs option if it cannot be satisfied,
3151 or if the preference made no difference. */
3152 f = reg_ct[0] == 0 || reg_ct[0] == reg_ct[1];
3153
91478cef 3154 order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order;
c896fe29 3155
b016486e
RH
3156 /* Try free registers, preferences first. */
3157 for (j = f; j < 2; j++) {
3158 TCGRegSet set = reg_ct[j];
3159
3160 if (tcg_regset_single(set)) {
3161 /* One register in the set. */
3162 TCGReg reg = tcg_regset_first(set);
3163 if (s->reg_to_temp[reg] == NULL) {
3164 return reg;
3165 }
3166 } else {
3167 for (i = 0; i < n; i++) {
3168 TCGReg reg = order[i];
3169 if (s->reg_to_temp[reg] == NULL &&
3170 tcg_regset_test_reg(set, reg)) {
3171 return reg;
3172 }
3173 }
3174 }
c896fe29
FB
3175 }
3176
b016486e
RH
3177 /* We must spill something. */
3178 for (j = f; j < 2; j++) {
3179 TCGRegSet set = reg_ct[j];
3180
3181 if (tcg_regset_single(set)) {
3182 /* One register in the set. */
3183 TCGReg reg = tcg_regset_first(set);
b3915dbb 3184 tcg_reg_free(s, reg, allocated_regs);
c896fe29 3185 return reg;
b016486e
RH
3186 } else {
3187 for (i = 0; i < n; i++) {
3188 TCGReg reg = order[i];
3189 if (tcg_regset_test_reg(set, reg)) {
3190 tcg_reg_free(s, reg, allocated_regs);
3191 return reg;
3192 }
3193 }
c896fe29
FB
3194 }
3195 }
3196
3197 tcg_abort();
3198}
3199
40ae5c62
RH
3200/* Make sure the temporary is in a register. If needed, allocate the register
3201 from DESIRED while avoiding ALLOCATED. */
3202static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
b722452a 3203 TCGRegSet allocated_regs, TCGRegSet preferred_regs)
40ae5c62
RH
3204{
3205 TCGReg reg;
3206
3207 switch (ts->val_type) {
3208 case TEMP_VAL_REG:
3209 return;
3210 case TEMP_VAL_CONST:
b016486e 3211 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3212 preferred_regs, ts->indirect_base);
40ae5c62
RH
3213 tcg_out_movi(s, ts->type, reg, ts->val);
3214 ts->mem_coherent = 0;
3215 break;
3216 case TEMP_VAL_MEM:
b016486e 3217 reg = tcg_reg_alloc(s, desired_regs, allocated_regs,
b722452a 3218 preferred_regs, ts->indirect_base);
40ae5c62
RH
3219 tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
3220 ts->mem_coherent = 1;
3221 break;
3222 case TEMP_VAL_DEAD:
3223 default:
3224 tcg_abort();
3225 }
3226 ts->reg = reg;
3227 ts->val_type = TEMP_VAL_REG;
3228 s->reg_to_temp[reg] = ts;
3229}
3230
59d7c14e
RH
3231/* Save a temporary to memory. 'allocated_regs' is used in case a
3232 temporary registers needs to be allocated to store a constant. */
3233static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
1ad80729 3234{
5a18407f
RH
3235 /* The liveness analysis already ensures that globals are back
3236 in memory. Keep an tcg_debug_assert for safety. */
3237 tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
1ad80729
AJ
3238}
3239
9814dd27 3240/* save globals to their canonical location and assume they can be
e8996ee0
FB
3241 modified be the following code. 'allocated_regs' is used in case a
3242 temporary registers needs to be allocated to store a constant. */
3243static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
c896fe29 3244{
ac3b8891 3245 int i, n;
c896fe29 3246
ac3b8891 3247 for (i = 0, n = s->nb_globals; i < n; i++) {
b13eb728 3248 temp_save(s, &s->temps[i], allocated_regs);
c896fe29 3249 }
e5097dc8
FB
3250}
3251
3d5c5f87
AJ
3252/* sync globals to their canonical location and assume they can be
3253 read by the following code. 'allocated_regs' is used in case a
3254 temporary registers needs to be allocated to store a constant. */
3255static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
3256{
ac3b8891 3257 int i, n;
3d5c5f87 3258
ac3b8891 3259 for (i = 0, n = s->nb_globals; i < n; i++) {
12b9b11a 3260 TCGTemp *ts = &s->temps[i];
5a18407f
RH
3261 tcg_debug_assert(ts->val_type != TEMP_VAL_REG
3262 || ts->fixed_reg
3263 || ts->mem_coherent);
3d5c5f87
AJ
3264 }
3265}
3266
e5097dc8 3267/* at the end of a basic block, we assume all temporaries are dead and
e8996ee0
FB
3268 all globals are stored at their canonical location. */
3269static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
e5097dc8 3270{
e5097dc8
FB
3271 int i;
3272
b13eb728
RH
3273 for (i = s->nb_globals; i < s->nb_temps; i++) {
3274 TCGTemp *ts = &s->temps[i];
641d5fbe 3275 if (ts->temp_local) {
b13eb728 3276 temp_save(s, ts, allocated_regs);
641d5fbe 3277 } else {
5a18407f
RH
3278 /* The liveness analysis already ensures that temps are dead.
3279 Keep an tcg_debug_assert for safety. */
3280 tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD);
c896fe29
FB
3281 }
3282 }
e8996ee0
FB
3283
3284 save_globals(s, allocated_regs);
c896fe29
FB
3285}
3286
0fe4fca4 3287static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots,
ba87719c
RH
3288 tcg_target_ulong val, TCGLifeData arg_life,
3289 TCGRegSet preferred_regs)
e8996ee0 3290{
d63e3b6e
RH
3291 /* ENV should not be modified. */
3292 tcg_debug_assert(!ots->fixed_reg);
59d7c14e
RH
3293
3294 /* The movi is not explicitly generated here. */
3295 if (ots->val_type == TEMP_VAL_REG) {
3296 s->reg_to_temp[ots->reg] = NULL;
ec7a869d 3297 }
59d7c14e
RH
3298 ots->val_type = TEMP_VAL_CONST;
3299 ots->val = val;
3300 ots->mem_coherent = 0;
3301 if (NEED_SYNC_ARG(0)) {
ba87719c 3302 temp_sync(s, ots, s->reserved_regs, preferred_regs, IS_DEAD_ARG(0));
59d7c14e 3303 } else if (IS_DEAD_ARG(0)) {
f8bf00f1 3304 temp_dead(s, ots);
4c4e1ab2 3305 }
e8996ee0
FB
3306}
3307
dd186292 3308static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op)
0fe4fca4 3309{
43439139 3310 TCGTemp *ots = arg_temp(op->args[0]);
dd186292 3311 tcg_target_ulong val = op->args[1];
0fe4fca4 3312
69e3706d 3313 tcg_reg_alloc_do_movi(s, ots, val, op->life, op->output_pref[0]);
0fe4fca4
PB
3314}
3315
dd186292 3316static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
c896fe29 3317{
dd186292 3318 const TCGLifeData arg_life = op->life;
69e3706d 3319 TCGRegSet allocated_regs, preferred_regs;
c896fe29 3320 TCGTemp *ts, *ots;
450445d5 3321 TCGType otype, itype;
c896fe29 3322
d21369f5 3323 allocated_regs = s->reserved_regs;
69e3706d 3324 preferred_regs = op->output_pref[0];
43439139
RH
3325 ots = arg_temp(op->args[0]);
3326 ts = arg_temp(op->args[1]);
450445d5 3327
d63e3b6e
RH
3328 /* ENV should not be modified. */
3329 tcg_debug_assert(!ots->fixed_reg);
3330
450445d5
RH
3331 /* Note that otype != itype for no-op truncation. */
3332 otype = ots->type;
3333 itype = ts->type;
c29c1d7e 3334
0fe4fca4
PB
3335 if (ts->val_type == TEMP_VAL_CONST) {
3336 /* propagate constant or generate sti */
3337 tcg_target_ulong val = ts->val;
3338 if (IS_DEAD_ARG(1)) {
3339 temp_dead(s, ts);
3340 }
69e3706d 3341 tcg_reg_alloc_do_movi(s, ots, val, arg_life, preferred_regs);
0fe4fca4
PB
3342 return;
3343 }
3344
3345 /* If the source value is in memory we're going to be forced
3346 to have it in a register in order to perform the copy. Copy
3347 the SOURCE value into its own register first, that way we
3348 don't have to reload SOURCE the next time it is used. */
3349 if (ts->val_type == TEMP_VAL_MEM) {
69e3706d
RH
3350 temp_load(s, ts, tcg_target_available_regs[itype],
3351 allocated_regs, preferred_regs);
c29c1d7e 3352 }
c896fe29 3353
0fe4fca4 3354 tcg_debug_assert(ts->val_type == TEMP_VAL_REG);
d63e3b6e 3355 if (IS_DEAD_ARG(0)) {
c29c1d7e
AJ
3356 /* mov to a non-saved dead register makes no sense (even with
3357 liveness analysis disabled). */
eabb7b91 3358 tcg_debug_assert(NEED_SYNC_ARG(0));
c29c1d7e 3359 if (!ots->mem_allocated) {
2272e4a7 3360 temp_allocate_frame(s, ots);
c29c1d7e 3361 }
b3a62939 3362 tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
c29c1d7e 3363 if (IS_DEAD_ARG(1)) {
f8bf00f1 3364 temp_dead(s, ts);
c29c1d7e 3365 }
f8bf00f1 3366 temp_dead(s, ots);
c29c1d7e 3367 } else {
d63e3b6e 3368 if (IS_DEAD_ARG(1) && !ts->fixed_reg) {
c896fe29 3369 /* the mov can be suppressed */
c29c1d7e 3370 if (ots->val_type == TEMP_VAL_REG) {
f8b2f202 3371 s->reg_to_temp[ots->reg] = NULL;
c29c1d7e
AJ
3372 }
3373 ots->reg = ts->reg;
f8bf00f1 3374 temp_dead(s, ts);
c896fe29 3375 } else {
c29c1d7e
AJ
3376 if (ots->val_type != TEMP_VAL_REG) {
3377 /* When allocating a new register, make sure to not spill the
3378 input one. */
3379 tcg_regset_set_reg(allocated_regs, ts->reg);
450445d5 3380 ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype],
69e3706d 3381 allocated_regs, preferred_regs,
b016486e 3382 ots->indirect_base);
c896fe29 3383 }
78113e83 3384 if (!tcg_out_mov(s, otype, ots->reg, ts->reg)) {
240c08d0
RH
3385 /*
3386 * Cross register class move not supported.
3387 * Store the source register into the destination slot
3388 * and leave the destination temp as TEMP_VAL_MEM.
3389 */
3390 assert(!ots->fixed_reg);
3391 if (!ts->mem_allocated) {
3392 temp_allocate_frame(s, ots);
3393 }
3394 tcg_out_st(s, ts->type, ts->reg,
3395 ots->mem_base->reg, ots->mem_offset);
3396 ots->mem_coherent = 1;
3397 temp_free_or_dead(s, ots, -1);
3398 return;
78113e83 3399 }
c896fe29 3400 }
c29c1d7e
AJ
3401 ots->val_type = TEMP_VAL_REG;
3402 ots->mem_coherent = 0;
f8b2f202 3403 s->reg_to_temp[ots->reg] = ots;
c29c1d7e 3404 if (NEED_SYNC_ARG(0)) {
98b4e186 3405 temp_sync(s, ots, allocated_regs, 0, 0);
c896fe29 3406 }
ec7a869d 3407 }
c896fe29
FB
3408}
3409
dd186292 3410static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
c896fe29 3411{
dd186292
RH
3412 const TCGLifeData arg_life = op->life;
3413 const TCGOpDef * const def = &tcg_op_defs[op->opc];
82790a87
RH
3414 TCGRegSet i_allocated_regs;
3415 TCGRegSet o_allocated_regs;
b6638662
RH
3416 int i, k, nb_iargs, nb_oargs;
3417 TCGReg reg;
c896fe29
FB
3418 TCGArg arg;
3419 const TCGArgConstraint *arg_ct;
3420 TCGTemp *ts;
3421 TCGArg new_args[TCG_MAX_OP_ARGS];
3422 int const_args[TCG_MAX_OP_ARGS];
3423
3424 nb_oargs = def->nb_oargs;
3425 nb_iargs = def->nb_iargs;
3426
3427 /* copy constants */
3428 memcpy(new_args + nb_oargs + nb_iargs,
dd186292 3429 op->args + nb_oargs + nb_iargs,
c896fe29
FB
3430 sizeof(TCGArg) * def->nb_cargs);
3431
d21369f5
RH
3432 i_allocated_regs = s->reserved_regs;
3433 o_allocated_regs = s->reserved_regs;
82790a87 3434
c896fe29 3435 /* satisfy input constraints */
dd186292 3436 for (k = 0; k < nb_iargs; k++) {
d62816f2
RH
3437 TCGRegSet i_preferred_regs, o_preferred_regs;
3438
c896fe29 3439 i = def->sorted_args[nb_oargs + k];
dd186292 3440 arg = op->args[i];
c896fe29 3441 arg_ct = &def->args_ct[i];
43439139 3442 ts = arg_temp(arg);
40ae5c62
RH
3443
3444 if (ts->val_type == TEMP_VAL_CONST
3445 && tcg_target_const_match(ts->val, ts->type, arg_ct)) {
3446 /* constant is OK for instruction */
3447 const_args[i] = 1;
3448 new_args[i] = ts->val;
d62816f2 3449 continue;
c896fe29 3450 }
40ae5c62 3451
d62816f2 3452 i_preferred_regs = o_preferred_regs = 0;
5ff9d6a4 3453 if (arg_ct->ct & TCG_CT_IALIAS) {
d62816f2 3454 o_preferred_regs = op->output_pref[arg_ct->alias_index];
5ff9d6a4
FB
3455 if (ts->fixed_reg) {
3456 /* if fixed register, we must allocate a new register
3457 if the alias is not the same register */
d62816f2 3458 if (arg != op->args[arg_ct->alias_index]) {
5ff9d6a4 3459 goto allocate_in_reg;
d62816f2 3460 }
5ff9d6a4
FB
3461 } else {
3462 /* if the input is aliased to an output and if it is
3463 not dead after the instruction, we must allocate
3464 a new register and move it */
866cb6cb 3465 if (!IS_DEAD_ARG(i)) {
5ff9d6a4 3466 goto allocate_in_reg;
866cb6cb 3467 }
d62816f2 3468
7e1df267
AJ
3469 /* check if the current register has already been allocated
3470 for another input aliased to an output */
d62816f2
RH
3471 if (ts->val_type == TEMP_VAL_REG) {
3472 int k2, i2;
3473 reg = ts->reg;
3474 for (k2 = 0 ; k2 < k ; k2++) {
3475 i2 = def->sorted_args[nb_oargs + k2];
3476 if ((def->args_ct[i2].ct & TCG_CT_IALIAS) &&
3477 reg == new_args[i2]) {
3478 goto allocate_in_reg;
3479 }
7e1df267
AJ
3480 }
3481 }
d62816f2 3482 i_preferred_regs = o_preferred_regs;
5ff9d6a4 3483 }
c896fe29 3484 }
d62816f2
RH
3485
3486 temp_load(s, ts, arg_ct->u.regs, i_allocated_regs, i_preferred_regs);
c896fe29 3487 reg = ts->reg;
d62816f2 3488
c896fe29
FB
3489 if (tcg_regset_test_reg(arg_ct->u.regs, reg)) {
3490 /* nothing to do : the constraint is satisfied */
3491 } else {
3492 allocate_in_reg:
3493 /* allocate a new register matching the constraint
3494 and move the temporary register into it */
d62816f2
RH
3495 temp_load(s, ts, tcg_target_available_regs[ts->type],
3496 i_allocated_regs, 0);
82790a87 3497 reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
d62816f2 3498 o_preferred_regs, ts->indirect_base);
78113e83 3499 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
240c08d0
RH
3500 /*
3501 * Cross register class move not supported. Sync the
3502 * temp back to its slot and load from there.
3503 */
3504 temp_sync(s, ts, i_allocated_regs, 0, 0);
3505 tcg_out_ld(s, ts->type, reg,
3506 ts->mem_base->reg, ts->mem_offset);
78113e83 3507 }
c896fe29 3508 }
c896fe29
FB
3509 new_args[i] = reg;
3510 const_args[i] = 0;
82790a87 3511 tcg_regset_set_reg(i_allocated_regs, reg);
c896fe29
FB
3512 }
3513
a52ad07e
AJ
3514 /* mark dead temporaries and free the associated registers */
3515 for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
3516 if (IS_DEAD_ARG(i)) {
43439139 3517 temp_dead(s, arg_temp(op->args[i]));
a52ad07e
AJ
3518 }
3519 }
3520
e8996ee0 3521 if (def->flags & TCG_OPF_BB_END) {
82790a87 3522 tcg_reg_alloc_bb_end(s, i_allocated_regs);
e8996ee0 3523 } else {
e8996ee0
FB
3524 if (def->flags & TCG_OPF_CALL_CLOBBER) {
3525 /* XXX: permit generic clobber register list ? */
c8074023
RH
3526 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3527 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
82790a87 3528 tcg_reg_free(s, i, i_allocated_regs);
e8996ee0 3529 }
c896fe29 3530 }
3d5c5f87
AJ
3531 }
3532 if (def->flags & TCG_OPF_SIDE_EFFECTS) {
3533 /* sync globals if the op has side effects and might trigger
3534 an exception. */
82790a87 3535 sync_globals(s, i_allocated_regs);
c896fe29 3536 }
e8996ee0
FB
3537
3538 /* satisfy the output constraints */
e8996ee0
FB
3539 for(k = 0; k < nb_oargs; k++) {
3540 i = def->sorted_args[k];
dd186292 3541 arg = op->args[i];
e8996ee0 3542 arg_ct = &def->args_ct[i];
43439139 3543 ts = arg_temp(arg);
d63e3b6e
RH
3544
3545 /* ENV should not be modified. */
3546 tcg_debug_assert(!ts->fixed_reg);
3547
17280ff4
RH
3548 if ((arg_ct->ct & TCG_CT_ALIAS)
3549 && !const_args[arg_ct->alias_index]) {
e8996ee0 3550 reg = new_args[arg_ct->alias_index];
82790a87
RH
3551 } else if (arg_ct->ct & TCG_CT_NEWREG) {
3552 reg = tcg_reg_alloc(s, arg_ct->u.regs,
3553 i_allocated_regs | o_allocated_regs,
69e3706d 3554 op->output_pref[k], ts->indirect_base);
e8996ee0 3555 } else {
82790a87 3556 reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
69e3706d 3557 op->output_pref[k], ts->indirect_base);
c896fe29 3558 }
82790a87 3559 tcg_regset_set_reg(o_allocated_regs, reg);
d63e3b6e
RH
3560 if (ts->val_type == TEMP_VAL_REG) {
3561 s->reg_to_temp[ts->reg] = NULL;
e8996ee0 3562 }
d63e3b6e
RH
3563 ts->val_type = TEMP_VAL_REG;
3564 ts->reg = reg;
3565 /*
3566 * Temp value is modified, so the value kept in memory is
3567 * potentially not the same.
3568 */
3569 ts->mem_coherent = 0;
3570 s->reg_to_temp[reg] = ts;
e8996ee0 3571 new_args[i] = reg;
c896fe29 3572 }
c896fe29
FB
3573 }
3574
c896fe29 3575 /* emit instruction */
d2fd745f
RH
3576 if (def->flags & TCG_OPF_VECTOR) {
3577 tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
3578 new_args, const_args);
3579 } else {
3580 tcg_out_op(s, op->opc, new_args, const_args);
3581 }
3582
c896fe29
FB
3583 /* move the outputs in the correct register if needed */
3584 for(i = 0; i < nb_oargs; i++) {
43439139 3585 ts = arg_temp(op->args[i]);
d63e3b6e
RH
3586
3587 /* ENV should not be modified. */
3588 tcg_debug_assert(!ts->fixed_reg);
3589
ec7a869d 3590 if (NEED_SYNC_ARG(i)) {
98b4e186 3591 temp_sync(s, ts, o_allocated_regs, 0, IS_DEAD_ARG(i));
59d7c14e 3592 } else if (IS_DEAD_ARG(i)) {
f8bf00f1 3593 temp_dead(s, ts);
ec7a869d 3594 }
c896fe29
FB
3595 }
3596}
3597
b03cce8e
FB
3598#ifdef TCG_TARGET_STACK_GROWSUP
3599#define STACK_DIR(x) (-(x))
3600#else
3601#define STACK_DIR(x) (x)
3602#endif
3603
dd186292 3604static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
c896fe29 3605{
cd9090aa
RH
3606 const int nb_oargs = TCGOP_CALLO(op);
3607 const int nb_iargs = TCGOP_CALLI(op);
dd186292 3608 const TCGLifeData arg_life = op->life;
b6638662
RH
3609 int flags, nb_regs, i;
3610 TCGReg reg;
cf066674 3611 TCGArg arg;
c896fe29 3612 TCGTemp *ts;
d3452f1f
RH
3613 intptr_t stack_offset;
3614 size_t call_stack_size;
cf066674
RH
3615 tcg_insn_unit *func_addr;
3616 int allocate_args;
c896fe29 3617 TCGRegSet allocated_regs;
c896fe29 3618
dd186292
RH
3619 func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs];
3620 flags = op->args[nb_oargs + nb_iargs + 1];
c896fe29 3621
6e17d0c5 3622 nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs);
c45cb8bb
RH
3623 if (nb_regs > nb_iargs) {
3624 nb_regs = nb_iargs;
cf066674 3625 }
c896fe29
FB
3626
3627 /* assign stack slots first */
c45cb8bb 3628 call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long);
c896fe29
FB
3629 call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
3630 ~(TCG_TARGET_STACK_ALIGN - 1);
b03cce8e
FB
3631 allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE);
3632 if (allocate_args) {
345649c0
BS
3633 /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed,
3634 preallocate call stack */
3635 tcg_abort();
b03cce8e 3636 }
39cf05d3
FB
3637
3638 stack_offset = TCG_TARGET_CALL_STACK_OFFSET;
dd186292
RH
3639 for (i = nb_regs; i < nb_iargs; i++) {
3640 arg = op->args[nb_oargs + i];
39cf05d3
FB
3641#ifdef TCG_TARGET_STACK_GROWSUP
3642 stack_offset -= sizeof(tcg_target_long);
3643#endif
3644 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3645 ts = arg_temp(arg);
40ae5c62 3646 temp_load(s, ts, tcg_target_available_regs[ts->type],
b722452a 3647 s->reserved_regs, 0);
40ae5c62 3648 tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
c896fe29 3649 }
39cf05d3
FB
3650#ifndef TCG_TARGET_STACK_GROWSUP
3651 stack_offset += sizeof(tcg_target_long);
3652#endif
c896fe29
FB
3653 }
3654
3655 /* assign input registers */
d21369f5 3656 allocated_regs = s->reserved_regs;
dd186292
RH
3657 for (i = 0; i < nb_regs; i++) {
3658 arg = op->args[nb_oargs + i];
39cf05d3 3659 if (arg != TCG_CALL_DUMMY_ARG) {
43439139 3660 ts = arg_temp(arg);
39cf05d3 3661 reg = tcg_target_call_iarg_regs[i];
40ae5c62 3662
39cf05d3
FB
3663 if (ts->val_type == TEMP_VAL_REG) {
3664 if (ts->reg != reg) {
4250da10 3665 tcg_reg_free(s, reg, allocated_regs);
78113e83 3666 if (!tcg_out_mov(s, ts->type, reg, ts->reg)) {
240c08d0
RH
3667 /*
3668 * Cross register class move not supported. Sync the
3669 * temp back to its slot and load from there.
3670 */
3671 temp_sync(s, ts, allocated_regs, 0, 0);
3672 tcg_out_ld(s, ts->type, reg,
3673 ts->mem_base->reg, ts->mem_offset);
78113e83 3674 }
39cf05d3 3675 }
39cf05d3 3676 } else {
ccb1bb66 3677 TCGRegSet arg_set = 0;
40ae5c62 3678
4250da10 3679 tcg_reg_free(s, reg, allocated_regs);
40ae5c62 3680 tcg_regset_set_reg(arg_set, reg);
b722452a 3681 temp_load(s, ts, arg_set, allocated_regs, 0);
c896fe29 3682 }
40ae5c62 3683
39cf05d3 3684 tcg_regset_set_reg(allocated_regs, reg);
c896fe29 3685 }
c896fe29
FB
3686 }
3687
c896fe29 3688 /* mark dead temporaries and free the associated registers */
dd186292 3689 for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
866cb6cb 3690 if (IS_DEAD_ARG(i)) {
43439139 3691 temp_dead(s, arg_temp(op->args[i]));
c896fe29
FB
3692 }
3693 }
3694
3695 /* clobber call registers */
c8074023
RH
3696 for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
3697 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
b3915dbb 3698 tcg_reg_free(s, i, allocated_regs);
c896fe29
FB
3699 }
3700 }
78505279
AJ
3701
3702 /* Save globals if they might be written by the helper, sync them if
3703 they might be read. */
3704 if (flags & TCG_CALL_NO_READ_GLOBALS) {
3705 /* Nothing to do */
3706 } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) {
3707 sync_globals(s, allocated_regs);
3708 } else {
b9c18f56
AJ
3709 save_globals(s, allocated_regs);
3710 }
c896fe29 3711
cf066674 3712 tcg_out_call(s, func_addr);
c896fe29
FB
3713
3714 /* assign output registers and emit moves if needed */
3715 for(i = 0; i < nb_oargs; i++) {
dd186292 3716 arg = op->args[i];
43439139 3717 ts = arg_temp(arg);
d63e3b6e
RH
3718
3719 /* ENV should not be modified. */
3720 tcg_debug_assert(!ts->fixed_reg);
3721
c896fe29 3722 reg = tcg_target_call_oarg_regs[i];
eabb7b91 3723 tcg_debug_assert(s->reg_to_temp[reg] == NULL);
d63e3b6e
RH
3724 if (ts->val_type == TEMP_VAL_REG) {
3725 s->reg_to_temp[ts->reg] = NULL;
3726 }
3727 ts->val_type = TEMP_VAL_REG;
3728 ts->reg = reg;
3729 ts->mem_coherent = 0;
3730 s->reg_to_temp[reg] = ts;
3731 if (NEED_SYNC_ARG(i)) {
3732 temp_sync(s, ts, allocated_regs, 0, IS_DEAD_ARG(i));
3733 } else if (IS_DEAD_ARG(i)) {
3734 temp_dead(s, ts);
c896fe29
FB
3735 }
3736 }
c896fe29
FB
3737}
3738
3739#ifdef CONFIG_PROFILER
3740
c3fac113
EC
3741/* avoid copy/paste errors */
3742#define PROF_ADD(to, from, field) \
3743 do { \
3744 (to)->field += atomic_read(&((from)->field)); \
3745 } while (0)
3746
3747#define PROF_MAX(to, from, field) \
3748 do { \
3749 typeof((from)->field) val__ = atomic_read(&((from)->field)); \
3750 if (val__ > (to)->field) { \
3751 (to)->field = val__; \
3752 } \
3753 } while (0)
3754
3755/* Pass in a zero'ed @prof */
3756static inline
3757void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
3758{
3468b59e 3759 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
c3fac113
EC
3760 unsigned int i;
3761
3468b59e
EC
3762 for (i = 0; i < n_ctxs; i++) {
3763 TCGContext *s = atomic_read(&tcg_ctxs[i]);
3764 const TCGProfile *orig = &s->prof;
c3fac113
EC
3765
3766 if (counters) {
72fd2efb 3767 PROF_ADD(prof, orig, cpu_exec_time);
c3fac113
EC
3768 PROF_ADD(prof, orig, tb_count1);
3769 PROF_ADD(prof, orig, tb_count);
3770 PROF_ADD(prof, orig, op_count);
3771 PROF_MAX(prof, orig, op_count_max);
3772 PROF_ADD(prof, orig, temp_count);
3773 PROF_MAX(prof, orig, temp_count_max);
3774 PROF_ADD(prof, orig, del_op_count);
3775 PROF_ADD(prof, orig, code_in_len);
3776 PROF_ADD(prof, orig, code_out_len);
3777 PROF_ADD(prof, orig, search_out_len);
3778 PROF_ADD(prof, orig, interm_time);
3779 PROF_ADD(prof, orig, code_time);
3780 PROF_ADD(prof, orig, la_time);
3781 PROF_ADD(prof, orig, opt_time);
3782 PROF_ADD(prof, orig, restore_count);
3783 PROF_ADD(prof, orig, restore_time);
3784 }
3785 if (table) {
3786 int i;
3787
3788 for (i = 0; i < NB_OPS; i++) {
3789 PROF_ADD(prof, orig, table_op_count[i]);
3790 }
3791 }
3792 }
3793}
3794
3795#undef PROF_ADD
3796#undef PROF_MAX
3797
3798static void tcg_profile_snapshot_counters(TCGProfile *prof)
3799{
3800 tcg_profile_snapshot(prof, true, false);
3801}
3802
3803static void tcg_profile_snapshot_table(TCGProfile *prof)
3804{
3805 tcg_profile_snapshot(prof, false, true);
3806}
c896fe29 3807
d4c51a0a 3808void tcg_dump_op_count(void)
c896fe29 3809{
c3fac113 3810 TCGProfile prof = {};
c896fe29 3811 int i;
d70724ce 3812
c3fac113 3813 tcg_profile_snapshot_table(&prof);
15fc7daa 3814 for (i = 0; i < NB_OPS; i++) {
d4c51a0a 3815 qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name,
c3fac113 3816 prof.table_op_count[i]);
c896fe29 3817 }
c896fe29 3818}
72fd2efb
EC
3819
3820int64_t tcg_cpu_exec_time(void)
3821{
3822 unsigned int n_ctxs = atomic_read(&n_tcg_ctxs);
3823 unsigned int i;
3824 int64_t ret = 0;
3825
3826 for (i = 0; i < n_ctxs; i++) {
3827 const TCGContext *s = atomic_read(&tcg_ctxs[i]);
3828 const TCGProfile *prof = &s->prof;
3829
3830 ret += atomic_read(&prof->cpu_exec_time);
3831 }
3832 return ret;
3833}
246ae24d 3834#else
d4c51a0a 3835void tcg_dump_op_count(void)
246ae24d 3836{
d4c51a0a 3837 qemu_printf("[TCG profiler not compiled]\n");
246ae24d 3838}
72fd2efb
EC
3839
3840int64_t tcg_cpu_exec_time(void)
3841{
3842 error_report("%s: TCG profiler not compiled", __func__);
3843 exit(EXIT_FAILURE);
3844}
c896fe29
FB
3845#endif
3846
3847
5bd2ec3d 3848int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
c896fe29 3849{
c3fac113
EC
3850#ifdef CONFIG_PROFILER
3851 TCGProfile *prof = &s->prof;
3852#endif
15fa08f8
RH
3853 int i, num_insns;
3854 TCGOp *op;
c896fe29 3855
04fe6400
RH
3856#ifdef CONFIG_PROFILER
3857 {
c1f543b7 3858 int n = 0;
04fe6400 3859
15fa08f8
RH
3860 QTAILQ_FOREACH(op, &s->ops, link) {
3861 n++;
3862 }
c3fac113
EC
3863 atomic_set(&prof->op_count, prof->op_count + n);
3864 if (n > prof->op_count_max) {
3865 atomic_set(&prof->op_count_max, n);
04fe6400
RH
3866 }
3867
3868 n = s->nb_temps;
c3fac113
EC
3869 atomic_set(&prof->temp_count, prof->temp_count + n);
3870 if (n > prof->temp_count_max) {
3871 atomic_set(&prof->temp_count_max, n);
04fe6400
RH
3872 }
3873 }
3874#endif
3875
c896fe29 3876#ifdef DEBUG_DISAS
d977e1c2
AB
3877 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
3878 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3879 qemu_log_lock();
93fcfe39 3880 qemu_log("OP:\n");
1894f69a 3881 tcg_dump_ops(s, false);
93fcfe39 3882 qemu_log("\n");
1ee73216 3883 qemu_log_unlock();
c896fe29
FB
3884 }
3885#endif
3886
bef16ab4
RH
3887#ifdef CONFIG_DEBUG_TCG
3888 /* Ensure all labels referenced have been emitted. */
3889 {
3890 TCGLabel *l;
3891 bool error = false;
3892
3893 QSIMPLEQ_FOREACH(l, &s->labels, next) {
3894 if (unlikely(!l->present) && l->refs) {
3895 qemu_log_mask(CPU_LOG_TB_OP,
3896 "$L%d referenced but not present.\n", l->id);
3897 error = true;
3898 }
3899 }
3900 assert(!error);
3901 }
3902#endif
3903
c5cc28ff 3904#ifdef CONFIG_PROFILER
c3fac113 3905 atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
c5cc28ff
AJ
3906#endif
3907
8f2e8c07 3908#ifdef USE_TCG_OPTIMIZATIONS
c45cb8bb 3909 tcg_optimize(s);
8f2e8c07
KB
3910#endif
3911
a23a9ec6 3912#ifdef CONFIG_PROFILER
c3fac113
EC
3913 atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
3914 atomic_set(&prof->la_time, prof->la_time - profile_getclock());
a23a9ec6 3915#endif
c5cc28ff 3916
b4fc67c7 3917 reachable_code_pass(s);
b83eabea 3918 liveness_pass_1(s);
5a18407f 3919
b83eabea 3920 if (s->nb_indirects > 0) {
5a18407f 3921#ifdef DEBUG_DISAS
b83eabea
RH
3922 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
3923 && qemu_log_in_addr_range(tb->pc))) {
3924 qemu_log_lock();
3925 qemu_log("OP before indirect lowering:\n");
1894f69a 3926 tcg_dump_ops(s, false);
b83eabea
RH
3927 qemu_log("\n");
3928 qemu_log_unlock();
3929 }
5a18407f 3930#endif
b83eabea
RH
3931 /* Replace indirect temps with direct temps. */
3932 if (liveness_pass_2(s)) {
3933 /* If changes were made, re-run liveness. */
3934 liveness_pass_1(s);
5a18407f
RH
3935 }
3936 }
c5cc28ff 3937
a23a9ec6 3938#ifdef CONFIG_PROFILER
c3fac113 3939 atomic_set(&prof->la_time, prof->la_time + profile_getclock());
a23a9ec6 3940#endif
c896fe29
FB
3941
3942#ifdef DEBUG_DISAS
d977e1c2
AB
3943 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
3944 && qemu_log_in_addr_range(tb->pc))) {
1ee73216 3945 qemu_log_lock();
c5cc28ff 3946 qemu_log("OP after optimization and liveness analysis:\n");
1894f69a 3947 tcg_dump_ops(s, true);
93fcfe39 3948 qemu_log("\n");
1ee73216 3949 qemu_log_unlock();
c896fe29
FB
3950 }
3951#endif
3952
3953 tcg_reg_alloc_start(s);
3954
e7e168f4
EC
3955 s->code_buf = tb->tc.ptr;
3956 s->code_ptr = tb->tc.ptr;
c896fe29 3957
659ef5cb 3958#ifdef TCG_TARGET_NEED_LDST_LABELS
6001f772 3959 QSIMPLEQ_INIT(&s->ldst_labels);
659ef5cb 3960#endif
57a26946
RH
3961#ifdef TCG_TARGET_NEED_POOL_LABELS
3962 s->pool_labels = NULL;
3963#endif
9ecefc84 3964
fca8a500 3965 num_insns = -1;
15fa08f8 3966 QTAILQ_FOREACH(op, &s->ops, link) {
c45cb8bb 3967 TCGOpcode opc = op->opc;
b3db8758 3968
c896fe29 3969#ifdef CONFIG_PROFILER
c3fac113 3970 atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
c896fe29 3971#endif
c45cb8bb
RH
3972
3973 switch (opc) {
c896fe29 3974 case INDEX_op_mov_i32:
c896fe29 3975 case INDEX_op_mov_i64:
d2fd745f 3976 case INDEX_op_mov_vec:
dd186292 3977 tcg_reg_alloc_mov(s, op);
c896fe29 3978 break;
e8996ee0 3979 case INDEX_op_movi_i32:
e8996ee0 3980 case INDEX_op_movi_i64:
d2fd745f 3981 case INDEX_op_dupi_vec:
dd186292 3982 tcg_reg_alloc_movi(s, op);
e8996ee0 3983 break;
765b842a 3984 case INDEX_op_insn_start:
fca8a500 3985 if (num_insns >= 0) {
9f754620
RH
3986 size_t off = tcg_current_code_size(s);
3987 s->gen_insn_end_off[num_insns] = off;
3988 /* Assert that we do not overflow our stored offset. */
3989 assert(s->gen_insn_end_off[num_insns] == off);
fca8a500
RH
3990 }
3991 num_insns++;
bad729e2
RH
3992 for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
3993 target_ulong a;
3994#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
efee3746 3995 a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]);
bad729e2 3996#else
efee3746 3997 a = op->args[i];
bad729e2 3998#endif
fca8a500 3999 s->gen_insn_data[num_insns][i] = a;
bad729e2 4000 }
c896fe29 4001 break;
5ff9d6a4 4002 case INDEX_op_discard:
43439139 4003 temp_dead(s, arg_temp(op->args[0]));
5ff9d6a4 4004 break;
c896fe29 4005 case INDEX_op_set_label:
e8996ee0 4006 tcg_reg_alloc_bb_end(s, s->reserved_regs);
efee3746 4007 tcg_out_label(s, arg_label(op->args[0]), s->code_ptr);
c896fe29
FB
4008 break;
4009 case INDEX_op_call:
dd186292 4010 tcg_reg_alloc_call(s, op);
c45cb8bb 4011 break;
c896fe29 4012 default:
25c4d9cc 4013 /* Sanity check that we've not introduced any unhandled opcodes. */
be0f34b5 4014 tcg_debug_assert(tcg_op_supported(opc));
c896fe29
FB
4015 /* Note: in order to speed up the code, it would be much
4016 faster to have specialized register allocator functions for
4017 some common argument patterns */
dd186292 4018 tcg_reg_alloc_op(s, op);
c896fe29
FB
4019 break;
4020 }
8d8fdbae 4021#ifdef CONFIG_DEBUG_TCG
c896fe29
FB
4022 check_regs(s);
4023#endif
b125f9dc
RH
4024 /* Test for (pending) buffer overflow. The assumption is that any
4025 one operation beginning below the high water mark cannot overrun
4026 the buffer completely. Thus we can test for overflow after
4027 generating code without having to check during generation. */
644da9b3 4028 if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
b125f9dc
RH
4029 return -1;
4030 }
6e6c4efe
RH
4031 /* Test for TB overflow, as seen by gen_insn_end_off. */
4032 if (unlikely(tcg_current_code_size(s) > UINT16_MAX)) {
4033 return -2;
4034 }
c896fe29 4035 }
fca8a500
RH
4036 tcg_debug_assert(num_insns >= 0);
4037 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
c45cb8bb 4038
b76f0d8c 4039 /* Generate TB finalization at the end of block */
659ef5cb 4040#ifdef TCG_TARGET_NEED_LDST_LABELS
aeee05f5
RH
4041 i = tcg_out_ldst_finalize(s);
4042 if (i < 0) {
4043 return i;
23dceda6 4044 }
659ef5cb 4045#endif
57a26946 4046#ifdef TCG_TARGET_NEED_POOL_LABELS
1768987b
RH
4047 i = tcg_out_pool_finalize(s);
4048 if (i < 0) {
4049 return i;
57a26946
RH
4050 }
4051#endif
7ecd02a0
RH
4052 if (!tcg_resolve_relocs(s)) {
4053 return -2;
4054 }
c896fe29
FB
4055
4056 /* flush instruction cache */
1813e175 4057 flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
2aeabc08 4058
1813e175 4059 return tcg_current_code_size(s);
c896fe29
FB
4060}
4061
a23a9ec6 4062#ifdef CONFIG_PROFILER
3de2faa9 4063void tcg_dump_info(void)
a23a9ec6 4064{
c3fac113
EC
4065 TCGProfile prof = {};
4066 const TCGProfile *s;
4067 int64_t tb_count;
4068 int64_t tb_div_count;
4069 int64_t tot;
4070
4071 tcg_profile_snapshot_counters(&prof);
4072 s = &prof;
4073 tb_count = s->tb_count;
4074 tb_div_count = tb_count ? tb_count : 1;
4075 tot = s->interm_time + s->code_time;
a23a9ec6 4076
3de2faa9 4077 qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
a23a9ec6 4078 tot, tot / 2.4e9);
3de2faa9
MA
4079 qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64
4080 " %0.1f%%)\n",
fca8a500
RH
4081 tb_count, s->tb_count1 - tb_count,
4082 (double)(s->tb_count1 - s->tb_count)
4083 / (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
3de2faa9 4084 qemu_printf("avg ops/TB %0.1f max=%d\n",
fca8a500 4085 (double)s->op_count / tb_div_count, s->op_count_max);
3de2faa9 4086 qemu_printf("deleted ops/TB %0.2f\n",
fca8a500 4087 (double)s->del_op_count / tb_div_count);
3de2faa9 4088 qemu_printf("avg temps/TB %0.2f max=%d\n",
fca8a500 4089 (double)s->temp_count / tb_div_count, s->temp_count_max);
3de2faa9 4090 qemu_printf("avg host code/TB %0.1f\n",
fca8a500 4091 (double)s->code_out_len / tb_div_count);
3de2faa9 4092 qemu_printf("avg search data/TB %0.1f\n",
fca8a500 4093 (double)s->search_out_len / tb_div_count);
a23a9ec6 4094
3de2faa9 4095 qemu_printf("cycles/op %0.1f\n",
a23a9ec6 4096 s->op_count ? (double)tot / s->op_count : 0);
3de2faa9 4097 qemu_printf("cycles/in byte %0.1f\n",
a23a9ec6 4098 s->code_in_len ? (double)tot / s->code_in_len : 0);
3de2faa9 4099 qemu_printf("cycles/out byte %0.1f\n",
a23a9ec6 4100 s->code_out_len ? (double)tot / s->code_out_len : 0);
3de2faa9 4101 qemu_printf("cycles/search byte %0.1f\n",
fca8a500
RH
4102 s->search_out_len ? (double)tot / s->search_out_len : 0);
4103 if (tot == 0) {
a23a9ec6 4104 tot = 1;
fca8a500 4105 }
3de2faa9 4106 qemu_printf(" gen_interm time %0.1f%%\n",
a23a9ec6 4107 (double)s->interm_time / tot * 100.0);
3de2faa9 4108 qemu_printf(" gen_code time %0.1f%%\n",
a23a9ec6 4109 (double)s->code_time / tot * 100.0);
3de2faa9 4110 qemu_printf("optim./code time %0.1f%%\n",
c5cc28ff
AJ
4111 (double)s->opt_time / (s->code_time ? s->code_time : 1)
4112 * 100.0);
3de2faa9 4113 qemu_printf("liveness/code time %0.1f%%\n",
a23a9ec6 4114 (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
3de2faa9 4115 qemu_printf("cpu_restore count %" PRId64 "\n",
a23a9ec6 4116 s->restore_count);
3de2faa9 4117 qemu_printf(" avg cycles %0.1f\n",
a23a9ec6 4118 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
a23a9ec6
FB
4119}
4120#else
3de2faa9 4121void tcg_dump_info(void)
a23a9ec6 4122{
3de2faa9 4123 qemu_printf("[TCG profiler not compiled]\n");
a23a9ec6
FB
4124}
4125#endif
813da627
RH
4126
4127#ifdef ELF_HOST_MACHINE
5872bbf2
RH
4128/* In order to use this feature, the backend needs to do three things:
4129
4130 (1) Define ELF_HOST_MACHINE to indicate both what value to
4131 put into the ELF image and to indicate support for the feature.
4132
4133 (2) Define tcg_register_jit. This should create a buffer containing
4134 the contents of a .debug_frame section that describes the post-
4135 prologue unwind info for the tcg machine.
4136
4137 (3) Call tcg_register_jit_int, with the constructed .debug_frame.
4138*/
813da627
RH
4139
4140/* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */
4141typedef enum {
4142 JIT_NOACTION = 0,
4143 JIT_REGISTER_FN,
4144 JIT_UNREGISTER_FN
4145} jit_actions_t;
4146
4147struct jit_code_entry {
4148 struct jit_code_entry *next_entry;
4149 struct jit_code_entry *prev_entry;
4150 const void *symfile_addr;
4151 uint64_t symfile_size;
4152};
4153
4154struct jit_descriptor {
4155 uint32_t version;
4156 uint32_t action_flag;
4157 struct jit_code_entry *relevant_entry;
4158 struct jit_code_entry *first_entry;
4159};
4160
4161void __jit_debug_register_code(void) __attribute__((noinline));
4162void __jit_debug_register_code(void)
4163{
4164 asm("");
4165}
4166
4167/* Must statically initialize the version, because GDB may check
4168 the version before we can set it. */
4169struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
4170
4171/* End GDB interface. */
4172
4173static int find_string(const char *strtab, const char *str)
4174{
4175 const char *p = strtab + 1;
4176
4177 while (1) {
4178 if (strcmp(p, str) == 0) {
4179 return p - strtab;
4180 }
4181 p += strlen(p) + 1;
4182 }
4183}
4184
5872bbf2 4185static void tcg_register_jit_int(void *buf_ptr, size_t buf_size,
2c90784a
RH
4186 const void *debug_frame,
4187 size_t debug_frame_size)
813da627 4188{
5872bbf2
RH
4189 struct __attribute__((packed)) DebugInfo {
4190 uint32_t len;
4191 uint16_t version;
4192 uint32_t abbrev;
4193 uint8_t ptr_size;
4194 uint8_t cu_die;
4195 uint16_t cu_lang;
4196 uintptr_t cu_low_pc;
4197 uintptr_t cu_high_pc;
4198 uint8_t fn_die;
4199 char fn_name[16];
4200 uintptr_t fn_low_pc;
4201 uintptr_t fn_high_pc;
4202 uint8_t cu_eoc;
4203 };
813da627
RH
4204
4205 struct ElfImage {
4206 ElfW(Ehdr) ehdr;
4207 ElfW(Phdr) phdr;
5872bbf2
RH
4208 ElfW(Shdr) shdr[7];
4209 ElfW(Sym) sym[2];
4210 struct DebugInfo di;
4211 uint8_t da[24];
4212 char str[80];
4213 };
4214
4215 struct ElfImage *img;
4216
4217 static const struct ElfImage img_template = {
4218 .ehdr = {
4219 .e_ident[EI_MAG0] = ELFMAG0,
4220 .e_ident[EI_MAG1] = ELFMAG1,
4221 .e_ident[EI_MAG2] = ELFMAG2,
4222 .e_ident[EI_MAG3] = ELFMAG3,
4223 .e_ident[EI_CLASS] = ELF_CLASS,
4224 .e_ident[EI_DATA] = ELF_DATA,
4225 .e_ident[EI_VERSION] = EV_CURRENT,
4226 .e_type = ET_EXEC,
4227 .e_machine = ELF_HOST_MACHINE,
4228 .e_version = EV_CURRENT,
4229 .e_phoff = offsetof(struct ElfImage, phdr),
4230 .e_shoff = offsetof(struct ElfImage, shdr),
4231 .e_ehsize = sizeof(ElfW(Shdr)),
4232 .e_phentsize = sizeof(ElfW(Phdr)),
4233 .e_phnum = 1,
4234 .e_shentsize = sizeof(ElfW(Shdr)),
4235 .e_shnum = ARRAY_SIZE(img->shdr),
4236 .e_shstrndx = ARRAY_SIZE(img->shdr) - 1,
abbb3eae
RH
4237#ifdef ELF_HOST_FLAGS
4238 .e_flags = ELF_HOST_FLAGS,
4239#endif
4240#ifdef ELF_OSABI
4241 .e_ident[EI_OSABI] = ELF_OSABI,
4242#endif
5872bbf2
RH
4243 },
4244 .phdr = {
4245 .p_type = PT_LOAD,
4246 .p_flags = PF_X,
4247 },
4248 .shdr = {
4249 [0] = { .sh_type = SHT_NULL },
4250 /* Trick: The contents of code_gen_buffer are not present in
4251 this fake ELF file; that got allocated elsewhere. Therefore
4252 we mark .text as SHT_NOBITS (similar to .bss) so that readers
4253 will not look for contents. We can record any address. */
4254 [1] = { /* .text */
4255 .sh_type = SHT_NOBITS,
4256 .sh_flags = SHF_EXECINSTR | SHF_ALLOC,
4257 },
4258 [2] = { /* .debug_info */
4259 .sh_type = SHT_PROGBITS,
4260 .sh_offset = offsetof(struct ElfImage, di),
4261 .sh_size = sizeof(struct DebugInfo),
4262 },
4263 [3] = { /* .debug_abbrev */
4264 .sh_type = SHT_PROGBITS,
4265 .sh_offset = offsetof(struct ElfImage, da),
4266 .sh_size = sizeof(img->da),
4267 },
4268 [4] = { /* .debug_frame */
4269 .sh_type = SHT_PROGBITS,
4270 .sh_offset = sizeof(struct ElfImage),
4271 },
4272 [5] = { /* .symtab */
4273 .sh_type = SHT_SYMTAB,
4274 .sh_offset = offsetof(struct ElfImage, sym),
4275 .sh_size = sizeof(img->sym),
4276 .sh_info = 1,
4277 .sh_link = ARRAY_SIZE(img->shdr) - 1,
4278 .sh_entsize = sizeof(ElfW(Sym)),
4279 },
4280 [6] = { /* .strtab */
4281 .sh_type = SHT_STRTAB,
4282 .sh_offset = offsetof(struct ElfImage, str),
4283 .sh_size = sizeof(img->str),
4284 }
4285 },
4286 .sym = {
4287 [1] = { /* code_gen_buffer */
4288 .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC),
4289 .st_shndx = 1,
4290 }
4291 },
4292 .di = {
4293 .len = sizeof(struct DebugInfo) - 4,
4294 .version = 2,
4295 .ptr_size = sizeof(void *),
4296 .cu_die = 1,
4297 .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */
4298 .fn_die = 2,
4299 .fn_name = "code_gen_buffer"
4300 },
4301 .da = {
4302 1, /* abbrev number (the cu) */
4303 0x11, 1, /* DW_TAG_compile_unit, has children */
4304 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */
4305 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4306 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4307 0, 0, /* end of abbrev */
4308 2, /* abbrev number (the fn) */
4309 0x2e, 0, /* DW_TAG_subprogram, no children */
4310 0x3, 0x8, /* DW_AT_name, DW_FORM_string */
4311 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */
4312 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */
4313 0, 0, /* end of abbrev */
4314 0 /* no more abbrev */
4315 },
4316 .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0"
4317 ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer",
813da627
RH
4318 };
4319
4320 /* We only need a single jit entry; statically allocate it. */
4321 static struct jit_code_entry one_entry;
4322
5872bbf2 4323 uintptr_t buf = (uintptr_t)buf_ptr;
813da627 4324 size_t img_size = sizeof(struct ElfImage) + debug_frame_size;
2c90784a 4325 DebugFrameHeader *dfh;
813da627 4326
5872bbf2
RH
4327 img = g_malloc(img_size);
4328 *img = img_template;
813da627 4329
5872bbf2
RH
4330 img->phdr.p_vaddr = buf;
4331 img->phdr.p_paddr = buf;
4332 img->phdr.p_memsz = buf_size;
813da627 4333
813da627 4334 img->shdr[1].sh_name = find_string(img->str, ".text");
5872bbf2 4335 img->shdr[1].sh_addr = buf;
813da627
RH
4336 img->shdr[1].sh_size = buf_size;
4337
5872bbf2
RH
4338 img->shdr[2].sh_name = find_string(img->str, ".debug_info");
4339 img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev");
4340
4341 img->shdr[4].sh_name = find_string(img->str, ".debug_frame");
4342 img->shdr[4].sh_size = debug_frame_size;
4343
4344 img->shdr[5].sh_name = find_string(img->str, ".symtab");
4345 img->shdr[6].sh_name = find_string(img->str, ".strtab");
4346
4347 img->sym[1].st_name = find_string(img->str, "code_gen_buffer");
4348 img->sym[1].st_value = buf;
4349 img->sym[1].st_size = buf_size;
813da627 4350
5872bbf2 4351 img->di.cu_low_pc = buf;
45aba097 4352 img->di.cu_high_pc = buf + buf_size;
5872bbf2 4353 img->di.fn_low_pc = buf;
45aba097 4354 img->di.fn_high_pc = buf + buf_size;
813da627 4355
2c90784a
RH
4356 dfh = (DebugFrameHeader *)(img + 1);
4357 memcpy(dfh, debug_frame, debug_frame_size);
4358 dfh->fde.func_start = buf;
4359 dfh->fde.func_len = buf_size;
4360
813da627
RH
4361#ifdef DEBUG_JIT
4362 /* Enable this block to be able to debug the ELF image file creation.
4363 One can use readelf, objdump, or other inspection utilities. */
4364 {
4365 FILE *f = fopen("/tmp/qemu.jit", "w+b");
4366 if (f) {
5872bbf2 4367 if (fwrite(img, img_size, 1, f) != img_size) {
813da627
RH
4368 /* Avoid stupid unused return value warning for fwrite. */
4369 }
4370 fclose(f);
4371 }
4372 }
4373#endif
4374
4375 one_entry.symfile_addr = img;
4376 one_entry.symfile_size = img_size;
4377
4378 __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
4379 __jit_debug_descriptor.relevant_entry = &one_entry;
4380 __jit_debug_descriptor.first_entry = &one_entry;
4381 __jit_debug_register_code();
4382}
4383#else
5872bbf2
RH
4384/* No support for the feature. Provide the entry point expected by exec.c,
4385 and implement the internal function we declared earlier. */
813da627
RH
4386
4387static void tcg_register_jit_int(void *buf, size_t size,
2c90784a
RH
4388 const void *debug_frame,
4389 size_t debug_frame_size)
813da627
RH
4390{
4391}
4392
4393void tcg_register_jit(void *buf, size_t buf_size)
4394{
4395}
4396#endif /* ELF_HOST_MACHINE */
db432672
RH
4397
4398#if !TCG_TARGET_MAYBE_vec
4399void tcg_expand_vec_op(TCGOpcode o, TCGType t, unsigned e, TCGArg a0, ...)
4400{
4401 g_assert_not_reached();
4402}
4403#endif
This page took 1.586861 seconds and 4 git commands to generate.