]>
Commit | Line | Data |
---|---|---|
c896fe29 FB |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
c896fe29 | 25 | /* define it to use liveness analysis (better code) */ |
8f2e8c07 | 26 | #define USE_TCG_OPTIMIZATIONS |
c896fe29 | 27 | |
757e725b | 28 | #include "qemu/osdep.h" |
cca82982 | 29 | |
813da627 RH |
30 | /* Define to jump the ELF file used to communicate with GDB. */ |
31 | #undef DEBUG_JIT | |
32 | ||
f348b6d1 | 33 | #include "qemu/cutils.h" |
1de7afc9 PB |
34 | #include "qemu/host-utils.h" |
35 | #include "qemu/timer.h" | |
c896fe29 | 36 | |
c5d3c498 | 37 | /* Note: the long term plan is to reduce the dependencies on the QEMU |
c896fe29 FB |
38 | CPU definitions. Currently they are used for qemu_ld/st |
39 | instructions */ | |
40 | #define NO_CPU_IO_DEFS | |
41 | #include "cpu.h" | |
c896fe29 | 42 | |
63c91552 PB |
43 | #include "exec/cpu-common.h" |
44 | #include "exec/exec-all.h" | |
45 | ||
c896fe29 | 46 | #include "tcg-op.h" |
813da627 | 47 | |
edee2579 | 48 | #if UINTPTR_MAX == UINT32_MAX |
813da627 | 49 | # define ELF_CLASS ELFCLASS32 |
edee2579 RH |
50 | #else |
51 | # define ELF_CLASS ELFCLASS64 | |
813da627 RH |
52 | #endif |
53 | #ifdef HOST_WORDS_BIGENDIAN | |
54 | # define ELF_DATA ELFDATA2MSB | |
55 | #else | |
56 | # define ELF_DATA ELFDATA2LSB | |
57 | #endif | |
58 | ||
c896fe29 | 59 | #include "elf.h" |
508127e2 | 60 | #include "exec/log.h" |
3468b59e | 61 | #include "sysemu/sysemu.h" |
c896fe29 | 62 | |
ce151109 PM |
63 | /* Forward declarations for functions declared in tcg-target.inc.c and |
64 | used here. */ | |
e4d58b41 | 65 | static void tcg_target_init(TCGContext *s); |
f69d277e | 66 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode); |
e4d58b41 | 67 | static void tcg_target_qemu_prologue(TCGContext *s); |
1813e175 | 68 | static void patch_reloc(tcg_insn_unit *code_ptr, int type, |
2ba7fae2 | 69 | intptr_t value, intptr_t addend); |
c896fe29 | 70 | |
497a22eb RH |
71 | /* The CIE and FDE header definitions will be common to all hosts. */ |
72 | typedef struct { | |
73 | uint32_t len __attribute__((aligned((sizeof(void *))))); | |
74 | uint32_t id; | |
75 | uint8_t version; | |
76 | char augmentation[1]; | |
77 | uint8_t code_align; | |
78 | uint8_t data_align; | |
79 | uint8_t return_column; | |
80 | } DebugFrameCIE; | |
81 | ||
82 | typedef struct QEMU_PACKED { | |
83 | uint32_t len __attribute__((aligned((sizeof(void *))))); | |
84 | uint32_t cie_offset; | |
edee2579 RH |
85 | uintptr_t func_start; |
86 | uintptr_t func_len; | |
497a22eb RH |
87 | } DebugFrameFDEHeader; |
88 | ||
2c90784a RH |
89 | typedef struct QEMU_PACKED { |
90 | DebugFrameCIE cie; | |
91 | DebugFrameFDEHeader fde; | |
92 | } DebugFrameHeader; | |
93 | ||
813da627 | 94 | static void tcg_register_jit_int(void *buf, size_t size, |
2c90784a RH |
95 | const void *debug_frame, |
96 | size_t debug_frame_size) | |
813da627 RH |
97 | __attribute__((unused)); |
98 | ||
ce151109 | 99 | /* Forward declarations for functions declared and used in tcg-target.inc.c. */ |
069ea736 RH |
100 | static const char *target_parse_constraint(TCGArgConstraint *ct, |
101 | const char *ct_str, TCGType type); | |
2a534aff | 102 | static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, |
a05b5b9b | 103 | intptr_t arg2); |
2a534aff | 104 | static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); |
c0ad3001 | 105 | static void tcg_out_movi(TCGContext *s, TCGType type, |
2a534aff | 106 | TCGReg ret, tcg_target_long arg); |
c0ad3001 SW |
107 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, |
108 | const int *const_args); | |
2a534aff | 109 | static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, |
a05b5b9b | 110 | intptr_t arg2); |
59d7c14e RH |
111 | static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
112 | TCGReg base, intptr_t ofs); | |
cf066674 | 113 | static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); |
f6c6afc1 | 114 | static int tcg_target_const_match(tcg_target_long val, TCGType type, |
c0ad3001 | 115 | const TCGArgConstraint *arg_ct); |
659ef5cb RH |
116 | #ifdef TCG_TARGET_NEED_LDST_LABELS |
117 | static bool tcg_out_ldst_finalize(TCGContext *s); | |
118 | #endif | |
c896fe29 | 119 | |
a505785c EC |
120 | #define TCG_HIGHWATER 1024 |
121 | ||
df2cce29 EC |
122 | static TCGContext **tcg_ctxs; |
123 | static unsigned int n_tcg_ctxs; | |
1c2adb95 | 124 | TCGv_env cpu_env = 0; |
df2cce29 | 125 | |
e8feb96f EC |
126 | /* |
127 | * We divide code_gen_buffer into equally-sized "regions" that TCG threads | |
128 | * dynamically allocate from as demand dictates. Given appropriate region | |
129 | * sizing, this minimizes flushes even when some TCG threads generate a lot | |
130 | * more code than others. | |
131 | */ | |
132 | struct tcg_region_state { | |
133 | QemuMutex lock; | |
134 | ||
135 | /* fields set at init time */ | |
136 | void *start; | |
137 | void *start_aligned; | |
138 | void *end; | |
139 | size_t n; | |
140 | size_t size; /* size of one region */ | |
141 | size_t stride; /* .size + guard size */ | |
142 | ||
143 | /* fields protected by the lock */ | |
144 | size_t current; /* current region index */ | |
145 | size_t agg_size_full; /* aggregate size of full regions */ | |
146 | }; | |
147 | ||
148 | static struct tcg_region_state region; | |
149 | ||
b1d8e52e BS |
150 | static TCGRegSet tcg_target_available_regs[2]; |
151 | static TCGRegSet tcg_target_call_clobber_regs; | |
c896fe29 | 152 | |
1813e175 | 153 | #if TCG_TARGET_INSN_UNIT_SIZE == 1 |
4196dca6 | 154 | static __attribute__((unused)) inline void tcg_out8(TCGContext *s, uint8_t v) |
c896fe29 FB |
155 | { |
156 | *s->code_ptr++ = v; | |
157 | } | |
158 | ||
4196dca6 PM |
159 | static __attribute__((unused)) inline void tcg_patch8(tcg_insn_unit *p, |
160 | uint8_t v) | |
5c53bb81 | 161 | { |
1813e175 | 162 | *p = v; |
5c53bb81 | 163 | } |
1813e175 | 164 | #endif |
5c53bb81 | 165 | |
1813e175 | 166 | #if TCG_TARGET_INSN_UNIT_SIZE <= 2 |
4196dca6 | 167 | static __attribute__((unused)) inline void tcg_out16(TCGContext *s, uint16_t v) |
c896fe29 | 168 | { |
1813e175 RH |
169 | if (TCG_TARGET_INSN_UNIT_SIZE == 2) { |
170 | *s->code_ptr++ = v; | |
171 | } else { | |
172 | tcg_insn_unit *p = s->code_ptr; | |
173 | memcpy(p, &v, sizeof(v)); | |
174 | s->code_ptr = p + (2 / TCG_TARGET_INSN_UNIT_SIZE); | |
175 | } | |
c896fe29 FB |
176 | } |
177 | ||
4196dca6 PM |
178 | static __attribute__((unused)) inline void tcg_patch16(tcg_insn_unit *p, |
179 | uint16_t v) | |
5c53bb81 | 180 | { |
1813e175 RH |
181 | if (TCG_TARGET_INSN_UNIT_SIZE == 2) { |
182 | *p = v; | |
183 | } else { | |
184 | memcpy(p, &v, sizeof(v)); | |
185 | } | |
5c53bb81 | 186 | } |
1813e175 | 187 | #endif |
5c53bb81 | 188 | |
1813e175 | 189 | #if TCG_TARGET_INSN_UNIT_SIZE <= 4 |
4196dca6 | 190 | static __attribute__((unused)) inline void tcg_out32(TCGContext *s, uint32_t v) |
c896fe29 | 191 | { |
1813e175 RH |
192 | if (TCG_TARGET_INSN_UNIT_SIZE == 4) { |
193 | *s->code_ptr++ = v; | |
194 | } else { | |
195 | tcg_insn_unit *p = s->code_ptr; | |
196 | memcpy(p, &v, sizeof(v)); | |
197 | s->code_ptr = p + (4 / TCG_TARGET_INSN_UNIT_SIZE); | |
198 | } | |
c896fe29 FB |
199 | } |
200 | ||
4196dca6 PM |
201 | static __attribute__((unused)) inline void tcg_patch32(tcg_insn_unit *p, |
202 | uint32_t v) | |
5c53bb81 | 203 | { |
1813e175 RH |
204 | if (TCG_TARGET_INSN_UNIT_SIZE == 4) { |
205 | *p = v; | |
206 | } else { | |
207 | memcpy(p, &v, sizeof(v)); | |
208 | } | |
5c53bb81 | 209 | } |
1813e175 | 210 | #endif |
5c53bb81 | 211 | |
1813e175 | 212 | #if TCG_TARGET_INSN_UNIT_SIZE <= 8 |
4196dca6 | 213 | static __attribute__((unused)) inline void tcg_out64(TCGContext *s, uint64_t v) |
ac26eb69 | 214 | { |
1813e175 RH |
215 | if (TCG_TARGET_INSN_UNIT_SIZE == 8) { |
216 | *s->code_ptr++ = v; | |
217 | } else { | |
218 | tcg_insn_unit *p = s->code_ptr; | |
219 | memcpy(p, &v, sizeof(v)); | |
220 | s->code_ptr = p + (8 / TCG_TARGET_INSN_UNIT_SIZE); | |
221 | } | |
ac26eb69 RH |
222 | } |
223 | ||
4196dca6 PM |
224 | static __attribute__((unused)) inline void tcg_patch64(tcg_insn_unit *p, |
225 | uint64_t v) | |
5c53bb81 | 226 | { |
1813e175 RH |
227 | if (TCG_TARGET_INSN_UNIT_SIZE == 8) { |
228 | *p = v; | |
229 | } else { | |
230 | memcpy(p, &v, sizeof(v)); | |
231 | } | |
5c53bb81 | 232 | } |
1813e175 | 233 | #endif |
5c53bb81 | 234 | |
c896fe29 FB |
235 | /* label relocation processing */ |
236 | ||
1813e175 | 237 | static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type, |
bec16311 | 238 | TCGLabel *l, intptr_t addend) |
c896fe29 | 239 | { |
c896fe29 FB |
240 | TCGRelocation *r; |
241 | ||
c896fe29 | 242 | if (l->has_value) { |
623e265c PB |
243 | /* FIXME: This may break relocations on RISC targets that |
244 | modify instruction fields in place. The caller may not have | |
245 | written the initial value. */ | |
f54b3f92 | 246 | patch_reloc(code_ptr, type, l->u.value, addend); |
c896fe29 FB |
247 | } else { |
248 | /* add a new relocation entry */ | |
249 | r = tcg_malloc(sizeof(TCGRelocation)); | |
250 | r->type = type; | |
251 | r->ptr = code_ptr; | |
252 | r->addend = addend; | |
253 | r->next = l->u.first_reloc; | |
254 | l->u.first_reloc = r; | |
255 | } | |
256 | } | |
257 | ||
bec16311 | 258 | static void tcg_out_label(TCGContext *s, TCGLabel *l, tcg_insn_unit *ptr) |
c896fe29 | 259 | { |
2ba7fae2 | 260 | intptr_t value = (intptr_t)ptr; |
1813e175 | 261 | TCGRelocation *r; |
c896fe29 | 262 | |
eabb7b91 | 263 | tcg_debug_assert(!l->has_value); |
1813e175 RH |
264 | |
265 | for (r = l->u.first_reloc; r != NULL; r = r->next) { | |
f54b3f92 | 266 | patch_reloc(r->ptr, r->type, value, r->addend); |
c896fe29 | 267 | } |
1813e175 | 268 | |
c896fe29 | 269 | l->has_value = 1; |
1813e175 | 270 | l->u.value_ptr = ptr; |
c896fe29 FB |
271 | } |
272 | ||
42a268c2 | 273 | TCGLabel *gen_new_label(void) |
c896fe29 | 274 | { |
b1311c4a | 275 | TCGContext *s = tcg_ctx; |
51e3972c | 276 | TCGLabel *l = tcg_malloc(sizeof(TCGLabel)); |
c896fe29 | 277 | |
51e3972c RH |
278 | *l = (TCGLabel){ |
279 | .id = s->nb_labels++ | |
280 | }; | |
42a268c2 RH |
281 | |
282 | return l; | |
c896fe29 FB |
283 | } |
284 | ||
ce151109 | 285 | #include "tcg-target.inc.c" |
c896fe29 | 286 | |
e8feb96f EC |
287 | static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend) |
288 | { | |
289 | void *start, *end; | |
290 | ||
291 | start = region.start_aligned + curr_region * region.stride; | |
292 | end = start + region.size; | |
293 | ||
294 | if (curr_region == 0) { | |
295 | start = region.start; | |
296 | } | |
297 | if (curr_region == region.n - 1) { | |
298 | end = region.end; | |
299 | } | |
300 | ||
301 | *pstart = start; | |
302 | *pend = end; | |
303 | } | |
304 | ||
305 | static void tcg_region_assign(TCGContext *s, size_t curr_region) | |
306 | { | |
307 | void *start, *end; | |
308 | ||
309 | tcg_region_bounds(curr_region, &start, &end); | |
310 | ||
311 | s->code_gen_buffer = start; | |
312 | s->code_gen_ptr = start; | |
313 | s->code_gen_buffer_size = end - start; | |
314 | s->code_gen_highwater = end - TCG_HIGHWATER; | |
315 | } | |
316 | ||
317 | static bool tcg_region_alloc__locked(TCGContext *s) | |
318 | { | |
319 | if (region.current == region.n) { | |
320 | return true; | |
321 | } | |
322 | tcg_region_assign(s, region.current); | |
323 | region.current++; | |
324 | return false; | |
325 | } | |
326 | ||
327 | /* | |
328 | * Request a new region once the one in use has filled up. | |
329 | * Returns true on error. | |
330 | */ | |
331 | static bool tcg_region_alloc(TCGContext *s) | |
332 | { | |
333 | bool err; | |
334 | /* read the region size now; alloc__locked will overwrite it on success */ | |
335 | size_t size_full = s->code_gen_buffer_size; | |
336 | ||
337 | qemu_mutex_lock(®ion.lock); | |
338 | err = tcg_region_alloc__locked(s); | |
339 | if (!err) { | |
340 | region.agg_size_full += size_full - TCG_HIGHWATER; | |
341 | } | |
342 | qemu_mutex_unlock(®ion.lock); | |
343 | return err; | |
344 | } | |
345 | ||
346 | /* | |
347 | * Perform a context's first region allocation. | |
348 | * This function does _not_ increment region.agg_size_full. | |
349 | */ | |
350 | static inline bool tcg_region_initial_alloc__locked(TCGContext *s) | |
351 | { | |
352 | return tcg_region_alloc__locked(s); | |
353 | } | |
354 | ||
355 | /* Call from a safe-work context */ | |
356 | void tcg_region_reset_all(void) | |
357 | { | |
3468b59e | 358 | unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); |
e8feb96f EC |
359 | unsigned int i; |
360 | ||
361 | qemu_mutex_lock(®ion.lock); | |
362 | region.current = 0; | |
363 | region.agg_size_full = 0; | |
364 | ||
3468b59e EC |
365 | for (i = 0; i < n_ctxs; i++) { |
366 | TCGContext *s = atomic_read(&tcg_ctxs[i]); | |
367 | bool err = tcg_region_initial_alloc__locked(s); | |
e8feb96f EC |
368 | |
369 | g_assert(!err); | |
370 | } | |
371 | qemu_mutex_unlock(®ion.lock); | |
372 | } | |
373 | ||
3468b59e EC |
374 | #ifdef CONFIG_USER_ONLY |
375 | static size_t tcg_n_regions(void) | |
376 | { | |
377 | return 1; | |
378 | } | |
379 | #else | |
380 | /* | |
381 | * It is likely that some vCPUs will translate more code than others, so we | |
382 | * first try to set more regions than max_cpus, with those regions being of | |
383 | * reasonable size. If that's not possible we make do by evenly dividing | |
384 | * the code_gen_buffer among the vCPUs. | |
385 | */ | |
386 | static size_t tcg_n_regions(void) | |
387 | { | |
388 | size_t i; | |
389 | ||
390 | /* Use a single region if all we have is one vCPU thread */ | |
391 | if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) { | |
392 | return 1; | |
393 | } | |
394 | ||
395 | /* Try to have more regions than max_cpus, with each region being >= 2 MB */ | |
396 | for (i = 8; i > 0; i--) { | |
397 | size_t regions_per_thread = i; | |
398 | size_t region_size; | |
399 | ||
400 | region_size = tcg_init_ctx.code_gen_buffer_size; | |
401 | region_size /= max_cpus * regions_per_thread; | |
402 | ||
403 | if (region_size >= 2 * 1024u * 1024) { | |
404 | return max_cpus * regions_per_thread; | |
405 | } | |
406 | } | |
407 | /* If we can't, then just allocate one region per vCPU thread */ | |
408 | return max_cpus; | |
409 | } | |
410 | #endif | |
411 | ||
e8feb96f EC |
412 | /* |
413 | * Initializes region partitioning. | |
414 | * | |
415 | * Called at init time from the parent thread (i.e. the one calling | |
416 | * tcg_context_init), after the target's TCG globals have been set. | |
3468b59e EC |
417 | * |
418 | * Region partitioning works by splitting code_gen_buffer into separate regions, | |
419 | * and then assigning regions to TCG threads so that the threads can translate | |
420 | * code in parallel without synchronization. | |
421 | * | |
422 | * In softmmu the number of TCG threads is bounded by max_cpus, so we use at | |
423 | * least max_cpus regions in MTTCG. In !MTTCG we use a single region. | |
424 | * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...]) | |
425 | * must have been parsed before calling this function, since it calls | |
426 | * qemu_tcg_mttcg_enabled(). | |
427 | * | |
428 | * In user-mode we use a single region. Having multiple regions in user-mode | |
429 | * is not supported, because the number of vCPU threads (recall that each thread | |
430 | * spawned by the guest corresponds to a vCPU thread) is only bounded by the | |
431 | * OS, and usually this number is huge (tens of thousands is not uncommon). | |
432 | * Thus, given this large bound on the number of vCPU threads and the fact | |
433 | * that code_gen_buffer is allocated at compile-time, we cannot guarantee | |
434 | * that the availability of at least one region per vCPU thread. | |
435 | * | |
436 | * However, this user-mode limitation is unlikely to be a significant problem | |
437 | * in practice. Multi-threaded guests share most if not all of their translated | |
438 | * code, which makes parallel code generation less appealing than in softmmu. | |
e8feb96f EC |
439 | */ |
440 | void tcg_region_init(void) | |
441 | { | |
442 | void *buf = tcg_init_ctx.code_gen_buffer; | |
443 | void *aligned; | |
444 | size_t size = tcg_init_ctx.code_gen_buffer_size; | |
445 | size_t page_size = qemu_real_host_page_size; | |
446 | size_t region_size; | |
447 | size_t n_regions; | |
448 | size_t i; | |
449 | ||
3468b59e | 450 | n_regions = tcg_n_regions(); |
e8feb96f EC |
451 | |
452 | /* The first region will be 'aligned - buf' bytes larger than the others */ | |
453 | aligned = QEMU_ALIGN_PTR_UP(buf, page_size); | |
454 | g_assert(aligned < tcg_init_ctx.code_gen_buffer + size); | |
455 | /* | |
456 | * Make region_size a multiple of page_size, using aligned as the start. | |
457 | * As a result of this we might end up with a few extra pages at the end of | |
458 | * the buffer; we will assign those to the last region. | |
459 | */ | |
460 | region_size = (size - (aligned - buf)) / n_regions; | |
461 | region_size = QEMU_ALIGN_DOWN(region_size, page_size); | |
462 | ||
463 | /* A region must have at least 2 pages; one code, one guard */ | |
464 | g_assert(region_size >= 2 * page_size); | |
465 | ||
466 | /* init the region struct */ | |
467 | qemu_mutex_init(®ion.lock); | |
468 | region.n = n_regions; | |
469 | region.size = region_size - page_size; | |
470 | region.stride = region_size; | |
471 | region.start = buf; | |
472 | region.start_aligned = aligned; | |
473 | /* page-align the end, since its last page will be a guard page */ | |
474 | region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size); | |
475 | /* account for that last guard page */ | |
476 | region.end -= page_size; | |
477 | ||
478 | /* set guard pages */ | |
479 | for (i = 0; i < region.n; i++) { | |
480 | void *start, *end; | |
481 | int rc; | |
482 | ||
483 | tcg_region_bounds(i, &start, &end); | |
484 | rc = qemu_mprotect_none(end, page_size); | |
485 | g_assert(!rc); | |
486 | } | |
487 | ||
3468b59e EC |
488 | /* In user-mode we support only one ctx, so do the initial allocation now */ |
489 | #ifdef CONFIG_USER_ONLY | |
e8feb96f EC |
490 | { |
491 | bool err = tcg_region_initial_alloc__locked(tcg_ctx); | |
492 | ||
493 | g_assert(!err); | |
494 | } | |
3468b59e EC |
495 | #endif |
496 | } | |
497 | ||
498 | /* | |
499 | * All TCG threads except the parent (i.e. the one that called tcg_context_init | |
500 | * and registered the target's TCG globals) must register with this function | |
501 | * before initiating translation. | |
502 | * | |
503 | * In user-mode we just point tcg_ctx to tcg_init_ctx. See the documentation | |
504 | * of tcg_region_init() for the reasoning behind this. | |
505 | * | |
506 | * In softmmu each caller registers its context in tcg_ctxs[]. Note that in | |
507 | * softmmu tcg_ctxs[] does not track tcg_ctx_init, since the initial context | |
508 | * is not used anymore for translation once this function is called. | |
509 | * | |
510 | * Not tracking tcg_init_ctx in tcg_ctxs[] in softmmu keeps code that iterates | |
511 | * over the array (e.g. tcg_code_size() the same for both softmmu and user-mode. | |
512 | */ | |
513 | #ifdef CONFIG_USER_ONLY | |
514 | void tcg_register_thread(void) | |
515 | { | |
516 | tcg_ctx = &tcg_init_ctx; | |
517 | } | |
518 | #else | |
519 | void tcg_register_thread(void) | |
520 | { | |
521 | TCGContext *s = g_malloc(sizeof(*s)); | |
522 | unsigned int i, n; | |
523 | bool err; | |
524 | ||
525 | *s = tcg_init_ctx; | |
526 | ||
527 | /* Relink mem_base. */ | |
528 | for (i = 0, n = tcg_init_ctx.nb_globals; i < n; ++i) { | |
529 | if (tcg_init_ctx.temps[i].mem_base) { | |
530 | ptrdiff_t b = tcg_init_ctx.temps[i].mem_base - tcg_init_ctx.temps; | |
531 | tcg_debug_assert(b >= 0 && b < n); | |
532 | s->temps[i].mem_base = &s->temps[b]; | |
533 | } | |
534 | } | |
535 | ||
536 | /* Claim an entry in tcg_ctxs */ | |
537 | n = atomic_fetch_inc(&n_tcg_ctxs); | |
538 | g_assert(n < max_cpus); | |
539 | atomic_set(&tcg_ctxs[n], s); | |
540 | ||
541 | tcg_ctx = s; | |
542 | qemu_mutex_lock(®ion.lock); | |
543 | err = tcg_region_initial_alloc__locked(tcg_ctx); | |
544 | g_assert(!err); | |
545 | qemu_mutex_unlock(®ion.lock); | |
e8feb96f | 546 | } |
3468b59e | 547 | #endif /* !CONFIG_USER_ONLY */ |
e8feb96f EC |
548 | |
549 | /* | |
550 | * Returns the size (in bytes) of all translated code (i.e. from all regions) | |
551 | * currently in the cache. | |
552 | * See also: tcg_code_capacity() | |
553 | * Do not confuse with tcg_current_code_size(); that one applies to a single | |
554 | * TCG context. | |
555 | */ | |
556 | size_t tcg_code_size(void) | |
557 | { | |
3468b59e | 558 | unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); |
e8feb96f EC |
559 | unsigned int i; |
560 | size_t total; | |
561 | ||
562 | qemu_mutex_lock(®ion.lock); | |
563 | total = region.agg_size_full; | |
3468b59e EC |
564 | for (i = 0; i < n_ctxs; i++) { |
565 | const TCGContext *s = atomic_read(&tcg_ctxs[i]); | |
e8feb96f EC |
566 | size_t size; |
567 | ||
568 | size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer; | |
569 | g_assert(size <= s->code_gen_buffer_size); | |
570 | total += size; | |
571 | } | |
572 | qemu_mutex_unlock(®ion.lock); | |
573 | return total; | |
574 | } | |
575 | ||
576 | /* | |
577 | * Returns the code capacity (in bytes) of the entire cache, i.e. including all | |
578 | * regions. | |
579 | * See also: tcg_code_size() | |
580 | */ | |
581 | size_t tcg_code_capacity(void) | |
582 | { | |
583 | size_t guard_size, capacity; | |
584 | ||
585 | /* no need for synchronization; these variables are set at init time */ | |
586 | guard_size = region.stride - region.size; | |
587 | capacity = region.end + guard_size - region.start; | |
588 | capacity -= region.n * (guard_size + TCG_HIGHWATER); | |
589 | return capacity; | |
590 | } | |
591 | ||
c896fe29 FB |
592 | /* pool based memory allocation */ |
593 | void *tcg_malloc_internal(TCGContext *s, int size) | |
594 | { | |
595 | TCGPool *p; | |
596 | int pool_size; | |
597 | ||
598 | if (size > TCG_POOL_CHUNK_SIZE) { | |
599 | /* big malloc: insert a new pool (XXX: could optimize) */ | |
7267c094 | 600 | p = g_malloc(sizeof(TCGPool) + size); |
c896fe29 | 601 | p->size = size; |
4055299e KB |
602 | p->next = s->pool_first_large; |
603 | s->pool_first_large = p; | |
604 | return p->data; | |
c896fe29 FB |
605 | } else { |
606 | p = s->pool_current; | |
607 | if (!p) { | |
608 | p = s->pool_first; | |
609 | if (!p) | |
610 | goto new_pool; | |
611 | } else { | |
612 | if (!p->next) { | |
613 | new_pool: | |
614 | pool_size = TCG_POOL_CHUNK_SIZE; | |
7267c094 | 615 | p = g_malloc(sizeof(TCGPool) + pool_size); |
c896fe29 FB |
616 | p->size = pool_size; |
617 | p->next = NULL; | |
618 | if (s->pool_current) | |
619 | s->pool_current->next = p; | |
620 | else | |
621 | s->pool_first = p; | |
622 | } else { | |
623 | p = p->next; | |
624 | } | |
625 | } | |
626 | } | |
627 | s->pool_current = p; | |
628 | s->pool_cur = p->data + size; | |
629 | s->pool_end = p->data + p->size; | |
630 | return p->data; | |
631 | } | |
632 | ||
633 | void tcg_pool_reset(TCGContext *s) | |
634 | { | |
4055299e KB |
635 | TCGPool *p, *t; |
636 | for (p = s->pool_first_large; p; p = t) { | |
637 | t = p->next; | |
638 | g_free(p); | |
639 | } | |
640 | s->pool_first_large = NULL; | |
c896fe29 FB |
641 | s->pool_cur = s->pool_end = NULL; |
642 | s->pool_current = NULL; | |
643 | } | |
644 | ||
100b5e01 RH |
645 | typedef struct TCGHelperInfo { |
646 | void *func; | |
647 | const char *name; | |
afb49896 RH |
648 | unsigned flags; |
649 | unsigned sizemask; | |
100b5e01 RH |
650 | } TCGHelperInfo; |
651 | ||
2ef6175a RH |
652 | #include "exec/helper-proto.h" |
653 | ||
100b5e01 | 654 | static const TCGHelperInfo all_helpers[] = { |
2ef6175a | 655 | #include "exec/helper-tcg.h" |
100b5e01 | 656 | }; |
619205fd | 657 | static GHashTable *helper_table; |
100b5e01 | 658 | |
91478cef | 659 | static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)]; |
f69d277e | 660 | static void process_op_defs(TCGContext *s); |
1c2adb95 RH |
661 | static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, |
662 | TCGReg reg, const char *name); | |
91478cef | 663 | |
c896fe29 FB |
664 | void tcg_context_init(TCGContext *s) |
665 | { | |
100b5e01 | 666 | int op, total_args, n, i; |
c896fe29 FB |
667 | TCGOpDef *def; |
668 | TCGArgConstraint *args_ct; | |
669 | int *sorted_args; | |
1c2adb95 | 670 | TCGTemp *ts; |
c896fe29 FB |
671 | |
672 | memset(s, 0, sizeof(*s)); | |
c896fe29 | 673 | s->nb_globals = 0; |
c70fbf0a | 674 | |
c896fe29 FB |
675 | /* Count total number of arguments and allocate the corresponding |
676 | space */ | |
677 | total_args = 0; | |
678 | for(op = 0; op < NB_OPS; op++) { | |
679 | def = &tcg_op_defs[op]; | |
680 | n = def->nb_iargs + def->nb_oargs; | |
681 | total_args += n; | |
682 | } | |
683 | ||
7267c094 AL |
684 | args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); |
685 | sorted_args = g_malloc(sizeof(int) * total_args); | |
c896fe29 FB |
686 | |
687 | for(op = 0; op < NB_OPS; op++) { | |
688 | def = &tcg_op_defs[op]; | |
689 | def->args_ct = args_ct; | |
690 | def->sorted_args = sorted_args; | |
691 | n = def->nb_iargs + def->nb_oargs; | |
692 | sorted_args += n; | |
693 | args_ct += n; | |
694 | } | |
5cd8f621 RH |
695 | |
696 | /* Register helpers. */ | |
84fd9dd3 | 697 | /* Use g_direct_hash/equal for direct pointer comparisons on func. */ |
619205fd | 698 | helper_table = g_hash_table_new(NULL, NULL); |
84fd9dd3 | 699 | |
100b5e01 | 700 | for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) { |
84fd9dd3 | 701 | g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func, |
72866e82 | 702 | (gpointer)&all_helpers[i]); |
100b5e01 | 703 | } |
5cd8f621 | 704 | |
c896fe29 | 705 | tcg_target_init(s); |
f69d277e | 706 | process_op_defs(s); |
91478cef RH |
707 | |
708 | /* Reverse the order of the saved registers, assuming they're all at | |
709 | the start of tcg_target_reg_alloc_order. */ | |
710 | for (n = 0; n < ARRAY_SIZE(tcg_target_reg_alloc_order); ++n) { | |
711 | int r = tcg_target_reg_alloc_order[n]; | |
712 | if (tcg_regset_test_reg(tcg_target_call_clobber_regs, r)) { | |
713 | break; | |
714 | } | |
715 | } | |
716 | for (i = 0; i < n; ++i) { | |
717 | indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[n - 1 - i]; | |
718 | } | |
719 | for (; i < ARRAY_SIZE(tcg_target_reg_alloc_order); ++i) { | |
720 | indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; | |
721 | } | |
b1311c4a EC |
722 | |
723 | tcg_ctx = s; | |
3468b59e EC |
724 | /* |
725 | * In user-mode we simply share the init context among threads, since we | |
726 | * use a single region. See the documentation tcg_region_init() for the | |
727 | * reasoning behind this. | |
728 | * In softmmu we will have at most max_cpus TCG threads. | |
729 | */ | |
730 | #ifdef CONFIG_USER_ONLY | |
df2cce29 EC |
731 | tcg_ctxs = &tcg_ctx; |
732 | n_tcg_ctxs = 1; | |
3468b59e EC |
733 | #else |
734 | tcg_ctxs = g_new(TCGContext *, max_cpus); | |
735 | #endif | |
1c2adb95 RH |
736 | |
737 | tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0)); | |
738 | ts = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, TCG_AREG0, "env"); | |
739 | cpu_env = temp_tcgv_ptr(ts); | |
9002ec79 | 740 | } |
b03cce8e | 741 | |
6e3b2bfd EC |
742 | /* |
743 | * Allocate TBs right before their corresponding translated code, making | |
744 | * sure that TBs and code are on different cache lines. | |
745 | */ | |
746 | TranslationBlock *tcg_tb_alloc(TCGContext *s) | |
747 | { | |
748 | uintptr_t align = qemu_icache_linesize; | |
749 | TranslationBlock *tb; | |
750 | void *next; | |
751 | ||
e8feb96f | 752 | retry: |
6e3b2bfd EC |
753 | tb = (void *)ROUND_UP((uintptr_t)s->code_gen_ptr, align); |
754 | next = (void *)ROUND_UP((uintptr_t)(tb + 1), align); | |
755 | ||
756 | if (unlikely(next > s->code_gen_highwater)) { | |
e8feb96f EC |
757 | if (tcg_region_alloc(s)) { |
758 | return NULL; | |
759 | } | |
760 | goto retry; | |
6e3b2bfd | 761 | } |
e8feb96f | 762 | atomic_set(&s->code_gen_ptr, next); |
57a26946 | 763 | s->data_gen_ptr = NULL; |
6e3b2bfd EC |
764 | return tb; |
765 | } | |
766 | ||
9002ec79 RH |
767 | void tcg_prologue_init(TCGContext *s) |
768 | { | |
8163b749 RH |
769 | size_t prologue_size, total_size; |
770 | void *buf0, *buf1; | |
771 | ||
772 | /* Put the prologue at the beginning of code_gen_buffer. */ | |
773 | buf0 = s->code_gen_buffer; | |
5b38ee31 | 774 | total_size = s->code_gen_buffer_size; |
8163b749 RH |
775 | s->code_ptr = buf0; |
776 | s->code_buf = buf0; | |
5b38ee31 | 777 | s->data_gen_ptr = NULL; |
8163b749 RH |
778 | s->code_gen_prologue = buf0; |
779 | ||
5b38ee31 RH |
780 | /* Compute a high-water mark, at which we voluntarily flush the buffer |
781 | and start over. The size here is arbitrary, significantly larger | |
782 | than we expect the code generation for any one opcode to require. */ | |
783 | s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER); | |
784 | ||
785 | #ifdef TCG_TARGET_NEED_POOL_LABELS | |
786 | s->pool_labels = NULL; | |
787 | #endif | |
788 | ||
8163b749 | 789 | /* Generate the prologue. */ |
b03cce8e | 790 | tcg_target_qemu_prologue(s); |
5b38ee31 RH |
791 | |
792 | #ifdef TCG_TARGET_NEED_POOL_LABELS | |
793 | /* Allow the prologue to put e.g. guest_base into a pool entry. */ | |
794 | { | |
795 | bool ok = tcg_out_pool_finalize(s); | |
796 | tcg_debug_assert(ok); | |
797 | } | |
798 | #endif | |
799 | ||
8163b749 RH |
800 | buf1 = s->code_ptr; |
801 | flush_icache_range((uintptr_t)buf0, (uintptr_t)buf1); | |
802 | ||
803 | /* Deduct the prologue from the buffer. */ | |
804 | prologue_size = tcg_current_code_size(s); | |
805 | s->code_gen_ptr = buf1; | |
806 | s->code_gen_buffer = buf1; | |
807 | s->code_buf = buf1; | |
5b38ee31 | 808 | total_size -= prologue_size; |
8163b749 RH |
809 | s->code_gen_buffer_size = total_size; |
810 | ||
8163b749 | 811 | tcg_register_jit(s->code_gen_buffer, total_size); |
d6b64b2b RH |
812 | |
813 | #ifdef DEBUG_DISAS | |
814 | if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { | |
1ee73216 | 815 | qemu_log_lock(); |
8163b749 | 816 | qemu_log("PROLOGUE: [size=%zu]\n", prologue_size); |
5b38ee31 RH |
817 | if (s->data_gen_ptr) { |
818 | size_t code_size = s->data_gen_ptr - buf0; | |
819 | size_t data_size = prologue_size - code_size; | |
820 | size_t i; | |
821 | ||
822 | log_disas(buf0, code_size); | |
823 | ||
824 | for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) { | |
825 | if (sizeof(tcg_target_ulong) == 8) { | |
826 | qemu_log("0x%08" PRIxPTR ": .quad 0x%016" PRIx64 "\n", | |
827 | (uintptr_t)s->data_gen_ptr + i, | |
828 | *(uint64_t *)(s->data_gen_ptr + i)); | |
829 | } else { | |
830 | qemu_log("0x%08" PRIxPTR ": .long 0x%08x\n", | |
831 | (uintptr_t)s->data_gen_ptr + i, | |
832 | *(uint32_t *)(s->data_gen_ptr + i)); | |
833 | } | |
834 | } | |
835 | } else { | |
836 | log_disas(buf0, prologue_size); | |
837 | } | |
d6b64b2b RH |
838 | qemu_log("\n"); |
839 | qemu_log_flush(); | |
1ee73216 | 840 | qemu_log_unlock(); |
d6b64b2b RH |
841 | } |
842 | #endif | |
cedbcb01 EC |
843 | |
844 | /* Assert that goto_ptr is implemented completely. */ | |
845 | if (TCG_TARGET_HAS_goto_ptr) { | |
846 | tcg_debug_assert(s->code_gen_epilogue != NULL); | |
847 | } | |
c896fe29 FB |
848 | } |
849 | ||
c896fe29 FB |
850 | void tcg_func_start(TCGContext *s) |
851 | { | |
852 | tcg_pool_reset(s); | |
853 | s->nb_temps = s->nb_globals; | |
0ec9eabc RH |
854 | |
855 | /* No temps have been previously allocated for size or locality. */ | |
856 | memset(s->free_temps, 0, sizeof(s->free_temps)); | |
857 | ||
c896fe29 FB |
858 | s->nb_labels = 0; |
859 | s->current_frame_offset = s->frame_start; | |
860 | ||
0a209d4b RH |
861 | #ifdef CONFIG_DEBUG_TCG |
862 | s->goto_tb_issue_mask = 0; | |
863 | #endif | |
864 | ||
15fa08f8 RH |
865 | QTAILQ_INIT(&s->ops); |
866 | QTAILQ_INIT(&s->free_ops); | |
c896fe29 FB |
867 | } |
868 | ||
7ca4b752 RH |
869 | static inline TCGTemp *tcg_temp_alloc(TCGContext *s) |
870 | { | |
871 | int n = s->nb_temps++; | |
872 | tcg_debug_assert(n < TCG_MAX_TEMPS); | |
873 | return memset(&s->temps[n], 0, sizeof(TCGTemp)); | |
874 | } | |
875 | ||
876 | static inline TCGTemp *tcg_global_alloc(TCGContext *s) | |
877 | { | |
fa477d25 RH |
878 | TCGTemp *ts; |
879 | ||
7ca4b752 RH |
880 | tcg_debug_assert(s->nb_globals == s->nb_temps); |
881 | s->nb_globals++; | |
fa477d25 RH |
882 | ts = tcg_temp_alloc(s); |
883 | ts->temp_global = 1; | |
884 | ||
885 | return ts; | |
c896fe29 FB |
886 | } |
887 | ||
085272b3 RH |
888 | static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type, |
889 | TCGReg reg, const char *name) | |
c896fe29 | 890 | { |
c896fe29 | 891 | TCGTemp *ts; |
c896fe29 | 892 | |
b3a62939 | 893 | if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) { |
c896fe29 | 894 | tcg_abort(); |
b3a62939 | 895 | } |
7ca4b752 RH |
896 | |
897 | ts = tcg_global_alloc(s); | |
c896fe29 FB |
898 | ts->base_type = type; |
899 | ts->type = type; | |
900 | ts->fixed_reg = 1; | |
901 | ts->reg = reg; | |
c896fe29 | 902 | ts->name = name; |
c896fe29 | 903 | tcg_regset_set_reg(s->reserved_regs, reg); |
7ca4b752 | 904 | |
085272b3 | 905 | return ts; |
a7812ae4 PB |
906 | } |
907 | ||
b6638662 | 908 | void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size) |
b3a62939 | 909 | { |
b3a62939 RH |
910 | s->frame_start = start; |
911 | s->frame_end = start + size; | |
085272b3 RH |
912 | s->frame_temp |
913 | = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame"); | |
b3a62939 RH |
914 | } |
915 | ||
085272b3 RH |
916 | TCGTemp *tcg_global_mem_new_internal(TCGType type, TCGv_ptr base, |
917 | intptr_t offset, const char *name) | |
c896fe29 | 918 | { |
b1311c4a | 919 | TCGContext *s = tcg_ctx; |
dc41aa7d | 920 | TCGTemp *base_ts = tcgv_ptr_temp(base); |
7ca4b752 | 921 | TCGTemp *ts = tcg_global_alloc(s); |
b3915dbb | 922 | int indirect_reg = 0, bigendian = 0; |
7ca4b752 RH |
923 | #ifdef HOST_WORDS_BIGENDIAN |
924 | bigendian = 1; | |
925 | #endif | |
c896fe29 | 926 | |
b3915dbb | 927 | if (!base_ts->fixed_reg) { |
5a18407f RH |
928 | /* We do not support double-indirect registers. */ |
929 | tcg_debug_assert(!base_ts->indirect_reg); | |
b3915dbb | 930 | base_ts->indirect_base = 1; |
5a18407f RH |
931 | s->nb_indirects += (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64 |
932 | ? 2 : 1); | |
933 | indirect_reg = 1; | |
b3915dbb RH |
934 | } |
935 | ||
7ca4b752 RH |
936 | if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { |
937 | TCGTemp *ts2 = tcg_global_alloc(s); | |
c896fe29 | 938 | char buf[64]; |
7ca4b752 RH |
939 | |
940 | ts->base_type = TCG_TYPE_I64; | |
c896fe29 | 941 | ts->type = TCG_TYPE_I32; |
b3915dbb | 942 | ts->indirect_reg = indirect_reg; |
c896fe29 | 943 | ts->mem_allocated = 1; |
b3a62939 | 944 | ts->mem_base = base_ts; |
7ca4b752 | 945 | ts->mem_offset = offset + bigendian * 4; |
c896fe29 FB |
946 | pstrcpy(buf, sizeof(buf), name); |
947 | pstrcat(buf, sizeof(buf), "_0"); | |
948 | ts->name = strdup(buf); | |
c896fe29 | 949 | |
7ca4b752 RH |
950 | tcg_debug_assert(ts2 == ts + 1); |
951 | ts2->base_type = TCG_TYPE_I64; | |
952 | ts2->type = TCG_TYPE_I32; | |
b3915dbb | 953 | ts2->indirect_reg = indirect_reg; |
7ca4b752 RH |
954 | ts2->mem_allocated = 1; |
955 | ts2->mem_base = base_ts; | |
956 | ts2->mem_offset = offset + (1 - bigendian) * 4; | |
c896fe29 FB |
957 | pstrcpy(buf, sizeof(buf), name); |
958 | pstrcat(buf, sizeof(buf), "_1"); | |
120c1084 | 959 | ts2->name = strdup(buf); |
7ca4b752 | 960 | } else { |
c896fe29 FB |
961 | ts->base_type = type; |
962 | ts->type = type; | |
b3915dbb | 963 | ts->indirect_reg = indirect_reg; |
c896fe29 | 964 | ts->mem_allocated = 1; |
b3a62939 | 965 | ts->mem_base = base_ts; |
c896fe29 | 966 | ts->mem_offset = offset; |
c896fe29 | 967 | ts->name = name; |
c896fe29 | 968 | } |
085272b3 | 969 | return ts; |
a7812ae4 PB |
970 | } |
971 | ||
085272b3 | 972 | static TCGTemp *tcg_temp_new_internal(TCGType type, int temp_local) |
c896fe29 | 973 | { |
b1311c4a | 974 | TCGContext *s = tcg_ctx; |
c896fe29 | 975 | TCGTemp *ts; |
641d5fbe | 976 | int idx, k; |
c896fe29 | 977 | |
0ec9eabc RH |
978 | k = type + (temp_local ? TCG_TYPE_COUNT : 0); |
979 | idx = find_first_bit(s->free_temps[k].l, TCG_MAX_TEMPS); | |
980 | if (idx < TCG_MAX_TEMPS) { | |
981 | /* There is already an available temp with the right type. */ | |
982 | clear_bit(idx, s->free_temps[k].l); | |
983 | ||
e8996ee0 | 984 | ts = &s->temps[idx]; |
e8996ee0 | 985 | ts->temp_allocated = 1; |
7ca4b752 RH |
986 | tcg_debug_assert(ts->base_type == type); |
987 | tcg_debug_assert(ts->temp_local == temp_local); | |
e8996ee0 | 988 | } else { |
7ca4b752 RH |
989 | ts = tcg_temp_alloc(s); |
990 | if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) { | |
991 | TCGTemp *ts2 = tcg_temp_alloc(s); | |
992 | ||
f6aa2f7d | 993 | ts->base_type = type; |
e8996ee0 FB |
994 | ts->type = TCG_TYPE_I32; |
995 | ts->temp_allocated = 1; | |
641d5fbe | 996 | ts->temp_local = temp_local; |
7ca4b752 RH |
997 | |
998 | tcg_debug_assert(ts2 == ts + 1); | |
999 | ts2->base_type = TCG_TYPE_I64; | |
1000 | ts2->type = TCG_TYPE_I32; | |
1001 | ts2->temp_allocated = 1; | |
1002 | ts2->temp_local = temp_local; | |
1003 | } else { | |
e8996ee0 FB |
1004 | ts->base_type = type; |
1005 | ts->type = type; | |
1006 | ts->temp_allocated = 1; | |
641d5fbe | 1007 | ts->temp_local = temp_local; |
e8996ee0 | 1008 | } |
c896fe29 | 1009 | } |
27bfd83c PM |
1010 | |
1011 | #if defined(CONFIG_DEBUG_TCG) | |
1012 | s->temps_in_use++; | |
1013 | #endif | |
085272b3 | 1014 | return ts; |
c896fe29 FB |
1015 | } |
1016 | ||
a7812ae4 PB |
1017 | TCGv_i32 tcg_temp_new_internal_i32(int temp_local) |
1018 | { | |
085272b3 RH |
1019 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I32, temp_local); |
1020 | return temp_tcgv_i32(t); | |
a7812ae4 PB |
1021 | } |
1022 | ||
1023 | TCGv_i64 tcg_temp_new_internal_i64(int temp_local) | |
1024 | { | |
085272b3 RH |
1025 | TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I64, temp_local); |
1026 | return temp_tcgv_i64(t); | |
a7812ae4 PB |
1027 | } |
1028 | ||
085272b3 | 1029 | static void tcg_temp_free_internal(TCGTemp *ts) |
c896fe29 | 1030 | { |
b1311c4a | 1031 | TCGContext *s = tcg_ctx; |
085272b3 | 1032 | int k, idx; |
c896fe29 | 1033 | |
27bfd83c PM |
1034 | #if defined(CONFIG_DEBUG_TCG) |
1035 | s->temps_in_use--; | |
1036 | if (s->temps_in_use < 0) { | |
1037 | fprintf(stderr, "More temporaries freed than allocated!\n"); | |
1038 | } | |
1039 | #endif | |
1040 | ||
085272b3 | 1041 | tcg_debug_assert(ts->temp_global == 0); |
eabb7b91 | 1042 | tcg_debug_assert(ts->temp_allocated != 0); |
e8996ee0 | 1043 | ts->temp_allocated = 0; |
0ec9eabc | 1044 | |
085272b3 | 1045 | idx = temp_idx(ts); |
18d13fa2 | 1046 | k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); |
0ec9eabc | 1047 | set_bit(idx, s->free_temps[k].l); |
c896fe29 FB |
1048 | } |
1049 | ||
a7812ae4 PB |
1050 | void tcg_temp_free_i32(TCGv_i32 arg) |
1051 | { | |
085272b3 | 1052 | tcg_temp_free_internal(tcgv_i32_temp(arg)); |
a7812ae4 PB |
1053 | } |
1054 | ||
1055 | void tcg_temp_free_i64(TCGv_i64 arg) | |
1056 | { | |
085272b3 | 1057 | tcg_temp_free_internal(tcgv_i64_temp(arg)); |
a7812ae4 | 1058 | } |
e8996ee0 | 1059 | |
a7812ae4 | 1060 | TCGv_i32 tcg_const_i32(int32_t val) |
c896fe29 | 1061 | { |
a7812ae4 PB |
1062 | TCGv_i32 t0; |
1063 | t0 = tcg_temp_new_i32(); | |
e8996ee0 FB |
1064 | tcg_gen_movi_i32(t0, val); |
1065 | return t0; | |
1066 | } | |
c896fe29 | 1067 | |
a7812ae4 | 1068 | TCGv_i64 tcg_const_i64(int64_t val) |
e8996ee0 | 1069 | { |
a7812ae4 PB |
1070 | TCGv_i64 t0; |
1071 | t0 = tcg_temp_new_i64(); | |
e8996ee0 FB |
1072 | tcg_gen_movi_i64(t0, val); |
1073 | return t0; | |
c896fe29 FB |
1074 | } |
1075 | ||
a7812ae4 | 1076 | TCGv_i32 tcg_const_local_i32(int32_t val) |
bdffd4a9 | 1077 | { |
a7812ae4 PB |
1078 | TCGv_i32 t0; |
1079 | t0 = tcg_temp_local_new_i32(); | |
bdffd4a9 AJ |
1080 | tcg_gen_movi_i32(t0, val); |
1081 | return t0; | |
1082 | } | |
1083 | ||
a7812ae4 | 1084 | TCGv_i64 tcg_const_local_i64(int64_t val) |
bdffd4a9 | 1085 | { |
a7812ae4 PB |
1086 | TCGv_i64 t0; |
1087 | t0 = tcg_temp_local_new_i64(); | |
bdffd4a9 AJ |
1088 | tcg_gen_movi_i64(t0, val); |
1089 | return t0; | |
1090 | } | |
1091 | ||
27bfd83c PM |
1092 | #if defined(CONFIG_DEBUG_TCG) |
1093 | void tcg_clear_temp_count(void) | |
1094 | { | |
b1311c4a | 1095 | TCGContext *s = tcg_ctx; |
27bfd83c PM |
1096 | s->temps_in_use = 0; |
1097 | } | |
1098 | ||
1099 | int tcg_check_temp_count(void) | |
1100 | { | |
b1311c4a | 1101 | TCGContext *s = tcg_ctx; |
27bfd83c PM |
1102 | if (s->temps_in_use) { |
1103 | /* Clear the count so that we don't give another | |
1104 | * warning immediately next time around. | |
1105 | */ | |
1106 | s->temps_in_use = 0; | |
1107 | return 1; | |
1108 | } | |
1109 | return 0; | |
1110 | } | |
1111 | #endif | |
1112 | ||
be0f34b5 RH |
1113 | /* Return true if OP may appear in the opcode stream. |
1114 | Test the runtime variable that controls each opcode. */ | |
1115 | bool tcg_op_supported(TCGOpcode op) | |
1116 | { | |
1117 | switch (op) { | |
1118 | case INDEX_op_discard: | |
1119 | case INDEX_op_set_label: | |
1120 | case INDEX_op_call: | |
1121 | case INDEX_op_br: | |
1122 | case INDEX_op_mb: | |
1123 | case INDEX_op_insn_start: | |
1124 | case INDEX_op_exit_tb: | |
1125 | case INDEX_op_goto_tb: | |
1126 | case INDEX_op_qemu_ld_i32: | |
1127 | case INDEX_op_qemu_st_i32: | |
1128 | case INDEX_op_qemu_ld_i64: | |
1129 | case INDEX_op_qemu_st_i64: | |
1130 | return true; | |
1131 | ||
1132 | case INDEX_op_goto_ptr: | |
1133 | return TCG_TARGET_HAS_goto_ptr; | |
1134 | ||
1135 | case INDEX_op_mov_i32: | |
1136 | case INDEX_op_movi_i32: | |
1137 | case INDEX_op_setcond_i32: | |
1138 | case INDEX_op_brcond_i32: | |
1139 | case INDEX_op_ld8u_i32: | |
1140 | case INDEX_op_ld8s_i32: | |
1141 | case INDEX_op_ld16u_i32: | |
1142 | case INDEX_op_ld16s_i32: | |
1143 | case INDEX_op_ld_i32: | |
1144 | case INDEX_op_st8_i32: | |
1145 | case INDEX_op_st16_i32: | |
1146 | case INDEX_op_st_i32: | |
1147 | case INDEX_op_add_i32: | |
1148 | case INDEX_op_sub_i32: | |
1149 | case INDEX_op_mul_i32: | |
1150 | case INDEX_op_and_i32: | |
1151 | case INDEX_op_or_i32: | |
1152 | case INDEX_op_xor_i32: | |
1153 | case INDEX_op_shl_i32: | |
1154 | case INDEX_op_shr_i32: | |
1155 | case INDEX_op_sar_i32: | |
1156 | return true; | |
1157 | ||
1158 | case INDEX_op_movcond_i32: | |
1159 | return TCG_TARGET_HAS_movcond_i32; | |
1160 | case INDEX_op_div_i32: | |
1161 | case INDEX_op_divu_i32: | |
1162 | return TCG_TARGET_HAS_div_i32; | |
1163 | case INDEX_op_rem_i32: | |
1164 | case INDEX_op_remu_i32: | |
1165 | return TCG_TARGET_HAS_rem_i32; | |
1166 | case INDEX_op_div2_i32: | |
1167 | case INDEX_op_divu2_i32: | |
1168 | return TCG_TARGET_HAS_div2_i32; | |
1169 | case INDEX_op_rotl_i32: | |
1170 | case INDEX_op_rotr_i32: | |
1171 | return TCG_TARGET_HAS_rot_i32; | |
1172 | case INDEX_op_deposit_i32: | |
1173 | return TCG_TARGET_HAS_deposit_i32; | |
1174 | case INDEX_op_extract_i32: | |
1175 | return TCG_TARGET_HAS_extract_i32; | |
1176 | case INDEX_op_sextract_i32: | |
1177 | return TCG_TARGET_HAS_sextract_i32; | |
1178 | case INDEX_op_add2_i32: | |
1179 | return TCG_TARGET_HAS_add2_i32; | |
1180 | case INDEX_op_sub2_i32: | |
1181 | return TCG_TARGET_HAS_sub2_i32; | |
1182 | case INDEX_op_mulu2_i32: | |
1183 | return TCG_TARGET_HAS_mulu2_i32; | |
1184 | case INDEX_op_muls2_i32: | |
1185 | return TCG_TARGET_HAS_muls2_i32; | |
1186 | case INDEX_op_muluh_i32: | |
1187 | return TCG_TARGET_HAS_muluh_i32; | |
1188 | case INDEX_op_mulsh_i32: | |
1189 | return TCG_TARGET_HAS_mulsh_i32; | |
1190 | case INDEX_op_ext8s_i32: | |
1191 | return TCG_TARGET_HAS_ext8s_i32; | |
1192 | case INDEX_op_ext16s_i32: | |
1193 | return TCG_TARGET_HAS_ext16s_i32; | |
1194 | case INDEX_op_ext8u_i32: | |
1195 | return TCG_TARGET_HAS_ext8u_i32; | |
1196 | case INDEX_op_ext16u_i32: | |
1197 | return TCG_TARGET_HAS_ext16u_i32; | |
1198 | case INDEX_op_bswap16_i32: | |
1199 | return TCG_TARGET_HAS_bswap16_i32; | |
1200 | case INDEX_op_bswap32_i32: | |
1201 | return TCG_TARGET_HAS_bswap32_i32; | |
1202 | case INDEX_op_not_i32: | |
1203 | return TCG_TARGET_HAS_not_i32; | |
1204 | case INDEX_op_neg_i32: | |
1205 | return TCG_TARGET_HAS_neg_i32; | |
1206 | case INDEX_op_andc_i32: | |
1207 | return TCG_TARGET_HAS_andc_i32; | |
1208 | case INDEX_op_orc_i32: | |
1209 | return TCG_TARGET_HAS_orc_i32; | |
1210 | case INDEX_op_eqv_i32: | |
1211 | return TCG_TARGET_HAS_eqv_i32; | |
1212 | case INDEX_op_nand_i32: | |
1213 | return TCG_TARGET_HAS_nand_i32; | |
1214 | case INDEX_op_nor_i32: | |
1215 | return TCG_TARGET_HAS_nor_i32; | |
1216 | case INDEX_op_clz_i32: | |
1217 | return TCG_TARGET_HAS_clz_i32; | |
1218 | case INDEX_op_ctz_i32: | |
1219 | return TCG_TARGET_HAS_ctz_i32; | |
1220 | case INDEX_op_ctpop_i32: | |
1221 | return TCG_TARGET_HAS_ctpop_i32; | |
1222 | ||
1223 | case INDEX_op_brcond2_i32: | |
1224 | case INDEX_op_setcond2_i32: | |
1225 | return TCG_TARGET_REG_BITS == 32; | |
1226 | ||
1227 | case INDEX_op_mov_i64: | |
1228 | case INDEX_op_movi_i64: | |
1229 | case INDEX_op_setcond_i64: | |
1230 | case INDEX_op_brcond_i64: | |
1231 | case INDEX_op_ld8u_i64: | |
1232 | case INDEX_op_ld8s_i64: | |
1233 | case INDEX_op_ld16u_i64: | |
1234 | case INDEX_op_ld16s_i64: | |
1235 | case INDEX_op_ld32u_i64: | |
1236 | case INDEX_op_ld32s_i64: | |
1237 | case INDEX_op_ld_i64: | |
1238 | case INDEX_op_st8_i64: | |
1239 | case INDEX_op_st16_i64: | |
1240 | case INDEX_op_st32_i64: | |
1241 | case INDEX_op_st_i64: | |
1242 | case INDEX_op_add_i64: | |
1243 | case INDEX_op_sub_i64: | |
1244 | case INDEX_op_mul_i64: | |
1245 | case INDEX_op_and_i64: | |
1246 | case INDEX_op_or_i64: | |
1247 | case INDEX_op_xor_i64: | |
1248 | case INDEX_op_shl_i64: | |
1249 | case INDEX_op_shr_i64: | |
1250 | case INDEX_op_sar_i64: | |
1251 | case INDEX_op_ext_i32_i64: | |
1252 | case INDEX_op_extu_i32_i64: | |
1253 | return TCG_TARGET_REG_BITS == 64; | |
1254 | ||
1255 | case INDEX_op_movcond_i64: | |
1256 | return TCG_TARGET_HAS_movcond_i64; | |
1257 | case INDEX_op_div_i64: | |
1258 | case INDEX_op_divu_i64: | |
1259 | return TCG_TARGET_HAS_div_i64; | |
1260 | case INDEX_op_rem_i64: | |
1261 | case INDEX_op_remu_i64: | |
1262 | return TCG_TARGET_HAS_rem_i64; | |
1263 | case INDEX_op_div2_i64: | |
1264 | case INDEX_op_divu2_i64: | |
1265 | return TCG_TARGET_HAS_div2_i64; | |
1266 | case INDEX_op_rotl_i64: | |
1267 | case INDEX_op_rotr_i64: | |
1268 | return TCG_TARGET_HAS_rot_i64; | |
1269 | case INDEX_op_deposit_i64: | |
1270 | return TCG_TARGET_HAS_deposit_i64; | |
1271 | case INDEX_op_extract_i64: | |
1272 | return TCG_TARGET_HAS_extract_i64; | |
1273 | case INDEX_op_sextract_i64: | |
1274 | return TCG_TARGET_HAS_sextract_i64; | |
1275 | case INDEX_op_extrl_i64_i32: | |
1276 | return TCG_TARGET_HAS_extrl_i64_i32; | |
1277 | case INDEX_op_extrh_i64_i32: | |
1278 | return TCG_TARGET_HAS_extrh_i64_i32; | |
1279 | case INDEX_op_ext8s_i64: | |
1280 | return TCG_TARGET_HAS_ext8s_i64; | |
1281 | case INDEX_op_ext16s_i64: | |
1282 | return TCG_TARGET_HAS_ext16s_i64; | |
1283 | case INDEX_op_ext32s_i64: | |
1284 | return TCG_TARGET_HAS_ext32s_i64; | |
1285 | case INDEX_op_ext8u_i64: | |
1286 | return TCG_TARGET_HAS_ext8u_i64; | |
1287 | case INDEX_op_ext16u_i64: | |
1288 | return TCG_TARGET_HAS_ext16u_i64; | |
1289 | case INDEX_op_ext32u_i64: | |
1290 | return TCG_TARGET_HAS_ext32u_i64; | |
1291 | case INDEX_op_bswap16_i64: | |
1292 | return TCG_TARGET_HAS_bswap16_i64; | |
1293 | case INDEX_op_bswap32_i64: | |
1294 | return TCG_TARGET_HAS_bswap32_i64; | |
1295 | case INDEX_op_bswap64_i64: | |
1296 | return TCG_TARGET_HAS_bswap64_i64; | |
1297 | case INDEX_op_not_i64: | |
1298 | return TCG_TARGET_HAS_not_i64; | |
1299 | case INDEX_op_neg_i64: | |
1300 | return TCG_TARGET_HAS_neg_i64; | |
1301 | case INDEX_op_andc_i64: | |
1302 | return TCG_TARGET_HAS_andc_i64; | |
1303 | case INDEX_op_orc_i64: | |
1304 | return TCG_TARGET_HAS_orc_i64; | |
1305 | case INDEX_op_eqv_i64: | |
1306 | return TCG_TARGET_HAS_eqv_i64; | |
1307 | case INDEX_op_nand_i64: | |
1308 | return TCG_TARGET_HAS_nand_i64; | |
1309 | case INDEX_op_nor_i64: | |
1310 | return TCG_TARGET_HAS_nor_i64; | |
1311 | case INDEX_op_clz_i64: | |
1312 | return TCG_TARGET_HAS_clz_i64; | |
1313 | case INDEX_op_ctz_i64: | |
1314 | return TCG_TARGET_HAS_ctz_i64; | |
1315 | case INDEX_op_ctpop_i64: | |
1316 | return TCG_TARGET_HAS_ctpop_i64; | |
1317 | case INDEX_op_add2_i64: | |
1318 | return TCG_TARGET_HAS_add2_i64; | |
1319 | case INDEX_op_sub2_i64: | |
1320 | return TCG_TARGET_HAS_sub2_i64; | |
1321 | case INDEX_op_mulu2_i64: | |
1322 | return TCG_TARGET_HAS_mulu2_i64; | |
1323 | case INDEX_op_muls2_i64: | |
1324 | return TCG_TARGET_HAS_muls2_i64; | |
1325 | case INDEX_op_muluh_i64: | |
1326 | return TCG_TARGET_HAS_muluh_i64; | |
1327 | case INDEX_op_mulsh_i64: | |
1328 | return TCG_TARGET_HAS_mulsh_i64; | |
1329 | ||
1330 | case NB_OPS: | |
1331 | break; | |
1332 | } | |
1333 | g_assert_not_reached(); | |
1334 | } | |
1335 | ||
39cf05d3 FB |
1336 | /* Note: we convert the 64 bit args to 32 bit and do some alignment |
1337 | and endian swap. Maybe it would be better to do the alignment | |
1338 | and endian swap in tcg_reg_alloc_call(). */ | |
ae8b75dc | 1339 | void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) |
c896fe29 | 1340 | { |
75e8b9b7 | 1341 | int i, real_args, nb_rets, pi; |
bbb8a1b4 | 1342 | unsigned sizemask, flags; |
afb49896 | 1343 | TCGHelperInfo *info; |
75e8b9b7 | 1344 | TCGOp *op; |
afb49896 | 1345 | |
619205fd | 1346 | info = g_hash_table_lookup(helper_table, (gpointer)func); |
bbb8a1b4 RH |
1347 | flags = info->flags; |
1348 | sizemask = info->sizemask; | |
2bece2c8 | 1349 | |
34b1a49c RH |
1350 | #if defined(__sparc__) && !defined(__arch64__) \ |
1351 | && !defined(CONFIG_TCG_INTERPRETER) | |
1352 | /* We have 64-bit values in one register, but need to pass as two | |
1353 | separate parameters. Split them. */ | |
1354 | int orig_sizemask = sizemask; | |
1355 | int orig_nargs = nargs; | |
1356 | TCGv_i64 retl, reth; | |
ae8b75dc | 1357 | TCGTemp *split_args[MAX_OPC_PARAM]; |
34b1a49c | 1358 | |
f764718d RH |
1359 | retl = NULL; |
1360 | reth = NULL; | |
34b1a49c | 1361 | if (sizemask != 0) { |
34b1a49c RH |
1362 | for (i = real_args = 0; i < nargs; ++i) { |
1363 | int is_64bit = sizemask & (1 << (i+1)*2); | |
1364 | if (is_64bit) { | |
085272b3 | 1365 | TCGv_i64 orig = temp_tcgv_i64(args[i]); |
34b1a49c RH |
1366 | TCGv_i32 h = tcg_temp_new_i32(); |
1367 | TCGv_i32 l = tcg_temp_new_i32(); | |
1368 | tcg_gen_extr_i64_i32(l, h, orig); | |
ae8b75dc RH |
1369 | split_args[real_args++] = tcgv_i32_temp(h); |
1370 | split_args[real_args++] = tcgv_i32_temp(l); | |
34b1a49c RH |
1371 | } else { |
1372 | split_args[real_args++] = args[i]; | |
1373 | } | |
1374 | } | |
1375 | nargs = real_args; | |
1376 | args = split_args; | |
1377 | sizemask = 0; | |
1378 | } | |
1379 | #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 | |
2bece2c8 RH |
1380 | for (i = 0; i < nargs; ++i) { |
1381 | int is_64bit = sizemask & (1 << (i+1)*2); | |
1382 | int is_signed = sizemask & (2 << (i+1)*2); | |
1383 | if (!is_64bit) { | |
1384 | TCGv_i64 temp = tcg_temp_new_i64(); | |
085272b3 | 1385 | TCGv_i64 orig = temp_tcgv_i64(args[i]); |
2bece2c8 RH |
1386 | if (is_signed) { |
1387 | tcg_gen_ext32s_i64(temp, orig); | |
1388 | } else { | |
1389 | tcg_gen_ext32u_i64(temp, orig); | |
1390 | } | |
ae8b75dc | 1391 | args[i] = tcgv_i64_temp(temp); |
2bece2c8 RH |
1392 | } |
1393 | } | |
1394 | #endif /* TCG_TARGET_EXTEND_ARGS */ | |
1395 | ||
15fa08f8 | 1396 | op = tcg_emit_op(INDEX_op_call); |
75e8b9b7 RH |
1397 | |
1398 | pi = 0; | |
ae8b75dc | 1399 | if (ret != NULL) { |
34b1a49c RH |
1400 | #if defined(__sparc__) && !defined(__arch64__) \ |
1401 | && !defined(CONFIG_TCG_INTERPRETER) | |
1402 | if (orig_sizemask & 1) { | |
1403 | /* The 32-bit ABI is going to return the 64-bit value in | |
1404 | the %o0/%o1 register pair. Prepare for this by using | |
1405 | two return temporaries, and reassemble below. */ | |
1406 | retl = tcg_temp_new_i64(); | |
1407 | reth = tcg_temp_new_i64(); | |
ae8b75dc RH |
1408 | op->args[pi++] = tcgv_i64_arg(reth); |
1409 | op->args[pi++] = tcgv_i64_arg(retl); | |
34b1a49c RH |
1410 | nb_rets = 2; |
1411 | } else { | |
ae8b75dc | 1412 | op->args[pi++] = temp_arg(ret); |
34b1a49c RH |
1413 | nb_rets = 1; |
1414 | } | |
1415 | #else | |
1416 | if (TCG_TARGET_REG_BITS < 64 && (sizemask & 1)) { | |
02eb19d0 | 1417 | #ifdef HOST_WORDS_BIGENDIAN |
ae8b75dc RH |
1418 | op->args[pi++] = temp_arg(ret + 1); |
1419 | op->args[pi++] = temp_arg(ret); | |
39cf05d3 | 1420 | #else |
ae8b75dc RH |
1421 | op->args[pi++] = temp_arg(ret); |
1422 | op->args[pi++] = temp_arg(ret + 1); | |
39cf05d3 | 1423 | #endif |
a7812ae4 | 1424 | nb_rets = 2; |
34b1a49c | 1425 | } else { |
ae8b75dc | 1426 | op->args[pi++] = temp_arg(ret); |
a7812ae4 | 1427 | nb_rets = 1; |
c896fe29 | 1428 | } |
34b1a49c | 1429 | #endif |
a7812ae4 PB |
1430 | } else { |
1431 | nb_rets = 0; | |
c896fe29 | 1432 | } |
cd9090aa | 1433 | TCGOP_CALLO(op) = nb_rets; |
75e8b9b7 | 1434 | |
a7812ae4 PB |
1435 | real_args = 0; |
1436 | for (i = 0; i < nargs; i++) { | |
2bece2c8 | 1437 | int is_64bit = sizemask & (1 << (i+1)*2); |
bbb8a1b4 | 1438 | if (TCG_TARGET_REG_BITS < 64 && is_64bit) { |
39cf05d3 FB |
1439 | #ifdef TCG_TARGET_CALL_ALIGN_ARGS |
1440 | /* some targets want aligned 64 bit args */ | |
ebd486d5 | 1441 | if (real_args & 1) { |
75e8b9b7 | 1442 | op->args[pi++] = TCG_CALL_DUMMY_ARG; |
ebd486d5 | 1443 | real_args++; |
39cf05d3 FB |
1444 | } |
1445 | #endif | |
c70fbf0a RH |
1446 | /* If stack grows up, then we will be placing successive |
1447 | arguments at lower addresses, which means we need to | |
1448 | reverse the order compared to how we would normally | |
1449 | treat either big or little-endian. For those arguments | |
1450 | that will wind up in registers, this still works for | |
1451 | HPPA (the only current STACK_GROWSUP target) since the | |
1452 | argument registers are *also* allocated in decreasing | |
1453 | order. If another such target is added, this logic may | |
1454 | have to get more complicated to differentiate between | |
1455 | stack arguments and register arguments. */ | |
02eb19d0 | 1456 | #if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) |
ae8b75dc RH |
1457 | op->args[pi++] = temp_arg(args[i] + 1); |
1458 | op->args[pi++] = temp_arg(args[i]); | |
c896fe29 | 1459 | #else |
ae8b75dc RH |
1460 | op->args[pi++] = temp_arg(args[i]); |
1461 | op->args[pi++] = temp_arg(args[i] + 1); | |
c896fe29 | 1462 | #endif |
a7812ae4 | 1463 | real_args += 2; |
2bece2c8 | 1464 | continue; |
c896fe29 | 1465 | } |
2bece2c8 | 1466 | |
ae8b75dc | 1467 | op->args[pi++] = temp_arg(args[i]); |
2bece2c8 | 1468 | real_args++; |
c896fe29 | 1469 | } |
75e8b9b7 RH |
1470 | op->args[pi++] = (uintptr_t)func; |
1471 | op->args[pi++] = flags; | |
cd9090aa | 1472 | TCGOP_CALLI(op) = real_args; |
a7812ae4 | 1473 | |
75e8b9b7 | 1474 | /* Make sure the fields didn't overflow. */ |
cd9090aa | 1475 | tcg_debug_assert(TCGOP_CALLI(op) == real_args); |
75e8b9b7 | 1476 | tcg_debug_assert(pi <= ARRAY_SIZE(op->args)); |
2bece2c8 | 1477 | |
34b1a49c RH |
1478 | #if defined(__sparc__) && !defined(__arch64__) \ |
1479 | && !defined(CONFIG_TCG_INTERPRETER) | |
1480 | /* Free all of the parts we allocated above. */ | |
1481 | for (i = real_args = 0; i < orig_nargs; ++i) { | |
1482 | int is_64bit = orig_sizemask & (1 << (i+1)*2); | |
1483 | if (is_64bit) { | |
085272b3 RH |
1484 | tcg_temp_free_internal(args[real_args++]); |
1485 | tcg_temp_free_internal(args[real_args++]); | |
34b1a49c RH |
1486 | } else { |
1487 | real_args++; | |
1488 | } | |
1489 | } | |
1490 | if (orig_sizemask & 1) { | |
1491 | /* The 32-bit ABI returned two 32-bit pieces. Re-assemble them. | |
1492 | Note that describing these as TCGv_i64 eliminates an unnecessary | |
1493 | zero-extension that tcg_gen_concat_i32_i64 would create. */ | |
085272b3 | 1494 | tcg_gen_concat32_i64(temp_tcgv_i64(ret), retl, reth); |
34b1a49c RH |
1495 | tcg_temp_free_i64(retl); |
1496 | tcg_temp_free_i64(reth); | |
1497 | } | |
1498 | #elif defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 | |
2bece2c8 RH |
1499 | for (i = 0; i < nargs; ++i) { |
1500 | int is_64bit = sizemask & (1 << (i+1)*2); | |
1501 | if (!is_64bit) { | |
085272b3 | 1502 | tcg_temp_free_internal(args[i]); |
2bece2c8 RH |
1503 | } |
1504 | } | |
1505 | #endif /* TCG_TARGET_EXTEND_ARGS */ | |
c896fe29 | 1506 | } |
c896fe29 | 1507 | |
8fcd3692 | 1508 | static void tcg_reg_alloc_start(TCGContext *s) |
c896fe29 | 1509 | { |
ac3b8891 | 1510 | int i, n; |
c896fe29 | 1511 | TCGTemp *ts; |
ac3b8891 RH |
1512 | |
1513 | for (i = 0, n = s->nb_globals; i < n; i++) { | |
c896fe29 | 1514 | ts = &s->temps[i]; |
ac3b8891 | 1515 | ts->val_type = (ts->fixed_reg ? TEMP_VAL_REG : TEMP_VAL_MEM); |
c896fe29 | 1516 | } |
ac3b8891 | 1517 | for (n = s->nb_temps; i < n; i++) { |
e8996ee0 | 1518 | ts = &s->temps[i]; |
ac3b8891 | 1519 | ts->val_type = (ts->temp_local ? TEMP_VAL_MEM : TEMP_VAL_DEAD); |
e8996ee0 FB |
1520 | ts->mem_allocated = 0; |
1521 | ts->fixed_reg = 0; | |
1522 | } | |
f8b2f202 RH |
1523 | |
1524 | memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp)); | |
c896fe29 FB |
1525 | } |
1526 | ||
f8b2f202 RH |
1527 | static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size, |
1528 | TCGTemp *ts) | |
c896fe29 | 1529 | { |
1807f4c4 | 1530 | int idx = temp_idx(ts); |
ac56dd48 | 1531 | |
fa477d25 | 1532 | if (ts->temp_global) { |
ac56dd48 | 1533 | pstrcpy(buf, buf_size, ts->name); |
f8b2f202 RH |
1534 | } else if (ts->temp_local) { |
1535 | snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); | |
c896fe29 | 1536 | } else { |
f8b2f202 | 1537 | snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); |
c896fe29 FB |
1538 | } |
1539 | return buf; | |
1540 | } | |
1541 | ||
43439139 RH |
1542 | static char *tcg_get_arg_str(TCGContext *s, char *buf, |
1543 | int buf_size, TCGArg arg) | |
f8b2f202 | 1544 | { |
43439139 | 1545 | return tcg_get_arg_str_ptr(s, buf, buf_size, arg_temp(arg)); |
f8b2f202 RH |
1546 | } |
1547 | ||
6e085f72 RH |
1548 | /* Find helper name. */ |
1549 | static inline const char *tcg_find_helper(TCGContext *s, uintptr_t val) | |
4dc81f28 | 1550 | { |
6e085f72 | 1551 | const char *ret = NULL; |
619205fd EC |
1552 | if (helper_table) { |
1553 | TCGHelperInfo *info = g_hash_table_lookup(helper_table, (gpointer)val); | |
72866e82 RH |
1554 | if (info) { |
1555 | ret = info->name; | |
1556 | } | |
4dc81f28 | 1557 | } |
6e085f72 | 1558 | return ret; |
4dc81f28 FB |
1559 | } |
1560 | ||
f48f3ede BS |
1561 | static const char * const cond_name[] = |
1562 | { | |
0aed257f RH |
1563 | [TCG_COND_NEVER] = "never", |
1564 | [TCG_COND_ALWAYS] = "always", | |
f48f3ede BS |
1565 | [TCG_COND_EQ] = "eq", |
1566 | [TCG_COND_NE] = "ne", | |
1567 | [TCG_COND_LT] = "lt", | |
1568 | [TCG_COND_GE] = "ge", | |
1569 | [TCG_COND_LE] = "le", | |
1570 | [TCG_COND_GT] = "gt", | |
1571 | [TCG_COND_LTU] = "ltu", | |
1572 | [TCG_COND_GEU] = "geu", | |
1573 | [TCG_COND_LEU] = "leu", | |
1574 | [TCG_COND_GTU] = "gtu" | |
1575 | }; | |
1576 | ||
f713d6ad RH |
1577 | static const char * const ldst_name[] = |
1578 | { | |
1579 | [MO_UB] = "ub", | |
1580 | [MO_SB] = "sb", | |
1581 | [MO_LEUW] = "leuw", | |
1582 | [MO_LESW] = "lesw", | |
1583 | [MO_LEUL] = "leul", | |
1584 | [MO_LESL] = "lesl", | |
1585 | [MO_LEQ] = "leq", | |
1586 | [MO_BEUW] = "beuw", | |
1587 | [MO_BESW] = "besw", | |
1588 | [MO_BEUL] = "beul", | |
1589 | [MO_BESL] = "besl", | |
1590 | [MO_BEQ] = "beq", | |
1591 | }; | |
1592 | ||
1f00b27f SS |
1593 | static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = { |
1594 | #ifdef ALIGNED_ONLY | |
1595 | [MO_UNALN >> MO_ASHIFT] = "un+", | |
1596 | [MO_ALIGN >> MO_ASHIFT] = "", | |
1597 | #else | |
1598 | [MO_UNALN >> MO_ASHIFT] = "", | |
1599 | [MO_ALIGN >> MO_ASHIFT] = "al+", | |
1600 | #endif | |
1601 | [MO_ALIGN_2 >> MO_ASHIFT] = "al2+", | |
1602 | [MO_ALIGN_4 >> MO_ASHIFT] = "al4+", | |
1603 | [MO_ALIGN_8 >> MO_ASHIFT] = "al8+", | |
1604 | [MO_ALIGN_16 >> MO_ASHIFT] = "al16+", | |
1605 | [MO_ALIGN_32 >> MO_ASHIFT] = "al32+", | |
1606 | [MO_ALIGN_64 >> MO_ASHIFT] = "al64+", | |
1607 | }; | |
1608 | ||
eeacee4d | 1609 | void tcg_dump_ops(TCGContext *s) |
c896fe29 | 1610 | { |
c896fe29 | 1611 | char buf[128]; |
c45cb8bb | 1612 | TCGOp *op; |
c45cb8bb | 1613 | |
15fa08f8 | 1614 | QTAILQ_FOREACH(op, &s->ops, link) { |
c45cb8bb RH |
1615 | int i, k, nb_oargs, nb_iargs, nb_cargs; |
1616 | const TCGOpDef *def; | |
c45cb8bb | 1617 | TCGOpcode c; |
bdfb460e | 1618 | int col = 0; |
c896fe29 | 1619 | |
c45cb8bb | 1620 | c = op->opc; |
c896fe29 | 1621 | def = &tcg_op_defs[c]; |
c45cb8bb | 1622 | |
765b842a | 1623 | if (c == INDEX_op_insn_start) { |
15fa08f8 | 1624 | col += qemu_log("\n ----"); |
9aef40ed RH |
1625 | |
1626 | for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { | |
1627 | target_ulong a; | |
7e4597d7 | 1628 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS |
efee3746 | 1629 | a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); |
7e4597d7 | 1630 | #else |
efee3746 | 1631 | a = op->args[i]; |
7e4597d7 | 1632 | #endif |
bdfb460e | 1633 | col += qemu_log(" " TARGET_FMT_lx, a); |
eeacee4d | 1634 | } |
7e4597d7 | 1635 | } else if (c == INDEX_op_call) { |
c896fe29 | 1636 | /* variable number of arguments */ |
cd9090aa RH |
1637 | nb_oargs = TCGOP_CALLO(op); |
1638 | nb_iargs = TCGOP_CALLI(op); | |
c896fe29 | 1639 | nb_cargs = def->nb_cargs; |
c896fe29 | 1640 | |
cf066674 | 1641 | /* function name, flags, out args */ |
bdfb460e | 1642 | col += qemu_log(" %s %s,$0x%" TCG_PRIlx ",$%d", def->name, |
efee3746 RH |
1643 | tcg_find_helper(s, op->args[nb_oargs + nb_iargs]), |
1644 | op->args[nb_oargs + nb_iargs + 1], nb_oargs); | |
cf066674 | 1645 | for (i = 0; i < nb_oargs; i++) { |
43439139 RH |
1646 | col += qemu_log(",%s", tcg_get_arg_str(s, buf, sizeof(buf), |
1647 | op->args[i])); | |
b03cce8e | 1648 | } |
cf066674 | 1649 | for (i = 0; i < nb_iargs; i++) { |
efee3746 | 1650 | TCGArg arg = op->args[nb_oargs + i]; |
cf066674 RH |
1651 | const char *t = "<dummy>"; |
1652 | if (arg != TCG_CALL_DUMMY_ARG) { | |
43439139 | 1653 | t = tcg_get_arg_str(s, buf, sizeof(buf), arg); |
eeacee4d | 1654 | } |
bdfb460e | 1655 | col += qemu_log(",%s", t); |
e8996ee0 | 1656 | } |
b03cce8e | 1657 | } else { |
bdfb460e | 1658 | col += qemu_log(" %s ", def->name); |
c45cb8bb RH |
1659 | |
1660 | nb_oargs = def->nb_oargs; | |
1661 | nb_iargs = def->nb_iargs; | |
1662 | nb_cargs = def->nb_cargs; | |
1663 | ||
b03cce8e | 1664 | k = 0; |
c45cb8bb | 1665 | for (i = 0; i < nb_oargs; i++) { |
eeacee4d | 1666 | if (k != 0) { |
bdfb460e | 1667 | col += qemu_log(","); |
eeacee4d | 1668 | } |
43439139 RH |
1669 | col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf), |
1670 | op->args[k++])); | |
b03cce8e | 1671 | } |
c45cb8bb | 1672 | for (i = 0; i < nb_iargs; i++) { |
eeacee4d | 1673 | if (k != 0) { |
bdfb460e | 1674 | col += qemu_log(","); |
eeacee4d | 1675 | } |
43439139 RH |
1676 | col += qemu_log("%s", tcg_get_arg_str(s, buf, sizeof(buf), |
1677 | op->args[k++])); | |
b03cce8e | 1678 | } |
be210acb RH |
1679 | switch (c) { |
1680 | case INDEX_op_brcond_i32: | |
be210acb | 1681 | case INDEX_op_setcond_i32: |
ffc5ea09 | 1682 | case INDEX_op_movcond_i32: |
ffc5ea09 | 1683 | case INDEX_op_brcond2_i32: |
be210acb | 1684 | case INDEX_op_setcond2_i32: |
ffc5ea09 | 1685 | case INDEX_op_brcond_i64: |
be210acb | 1686 | case INDEX_op_setcond_i64: |
ffc5ea09 | 1687 | case INDEX_op_movcond_i64: |
efee3746 RH |
1688 | if (op->args[k] < ARRAY_SIZE(cond_name) |
1689 | && cond_name[op->args[k]]) { | |
1690 | col += qemu_log(",%s", cond_name[op->args[k++]]); | |
eeacee4d | 1691 | } else { |
efee3746 | 1692 | col += qemu_log(",$0x%" TCG_PRIlx, op->args[k++]); |
eeacee4d | 1693 | } |
f48f3ede | 1694 | i = 1; |
be210acb | 1695 | break; |
f713d6ad RH |
1696 | case INDEX_op_qemu_ld_i32: |
1697 | case INDEX_op_qemu_st_i32: | |
1698 | case INDEX_op_qemu_ld_i64: | |
1699 | case INDEX_op_qemu_st_i64: | |
59227d5d | 1700 | { |
efee3746 | 1701 | TCGMemOpIdx oi = op->args[k++]; |
59227d5d RH |
1702 | TCGMemOp op = get_memop(oi); |
1703 | unsigned ix = get_mmuidx(oi); | |
1704 | ||
59c4b7e8 | 1705 | if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { |
bdfb460e | 1706 | col += qemu_log(",$0x%x,%u", op, ix); |
59c4b7e8 | 1707 | } else { |
1f00b27f SS |
1708 | const char *s_al, *s_op; |
1709 | s_al = alignment_name[(op & MO_AMASK) >> MO_ASHIFT]; | |
59c4b7e8 | 1710 | s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; |
bdfb460e | 1711 | col += qemu_log(",%s%s,%u", s_al, s_op, ix); |
59227d5d RH |
1712 | } |
1713 | i = 1; | |
f713d6ad | 1714 | } |
f713d6ad | 1715 | break; |
be210acb | 1716 | default: |
f48f3ede | 1717 | i = 0; |
be210acb RH |
1718 | break; |
1719 | } | |
51e3972c RH |
1720 | switch (c) { |
1721 | case INDEX_op_set_label: | |
1722 | case INDEX_op_br: | |
1723 | case INDEX_op_brcond_i32: | |
1724 | case INDEX_op_brcond_i64: | |
1725 | case INDEX_op_brcond2_i32: | |
efee3746 RH |
1726 | col += qemu_log("%s$L%d", k ? "," : "", |
1727 | arg_label(op->args[k])->id); | |
51e3972c RH |
1728 | i++, k++; |
1729 | break; | |
1730 | default: | |
1731 | break; | |
1732 | } | |
1733 | for (; i < nb_cargs; i++, k++) { | |
efee3746 | 1734 | col += qemu_log("%s$0x%" TCG_PRIlx, k ? "," : "", op->args[k]); |
bdfb460e RH |
1735 | } |
1736 | } | |
1737 | if (op->life) { | |
1738 | unsigned life = op->life; | |
1739 | ||
1740 | for (; col < 48; ++col) { | |
1741 | putc(' ', qemu_logfile); | |
1742 | } | |
1743 | ||
1744 | if (life & (SYNC_ARG * 3)) { | |
1745 | qemu_log(" sync:"); | |
1746 | for (i = 0; i < 2; ++i) { | |
1747 | if (life & (SYNC_ARG << i)) { | |
1748 | qemu_log(" %d", i); | |
1749 | } | |
1750 | } | |
1751 | } | |
1752 | life /= DEAD_ARG; | |
1753 | if (life) { | |
1754 | qemu_log(" dead:"); | |
1755 | for (i = 0; life; ++i, life >>= 1) { | |
1756 | if (life & 1) { | |
1757 | qemu_log(" %d", i); | |
1758 | } | |
1759 | } | |
b03cce8e | 1760 | } |
c896fe29 | 1761 | } |
eeacee4d | 1762 | qemu_log("\n"); |
c896fe29 FB |
1763 | } |
1764 | } | |
1765 | ||
1766 | /* we give more priority to constraints with less registers */ | |
1767 | static int get_constraint_priority(const TCGOpDef *def, int k) | |
1768 | { | |
1769 | const TCGArgConstraint *arg_ct; | |
1770 | ||
1771 | int i, n; | |
1772 | arg_ct = &def->args_ct[k]; | |
1773 | if (arg_ct->ct & TCG_CT_ALIAS) { | |
1774 | /* an alias is equivalent to a single register */ | |
1775 | n = 1; | |
1776 | } else { | |
1777 | if (!(arg_ct->ct & TCG_CT_REG)) | |
1778 | return 0; | |
1779 | n = 0; | |
1780 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | |
1781 | if (tcg_regset_test_reg(arg_ct->u.regs, i)) | |
1782 | n++; | |
1783 | } | |
1784 | } | |
1785 | return TCG_TARGET_NB_REGS - n + 1; | |
1786 | } | |
1787 | ||
1788 | /* sort from highest priority to lowest */ | |
1789 | static void sort_constraints(TCGOpDef *def, int start, int n) | |
1790 | { | |
1791 | int i, j, p1, p2, tmp; | |
1792 | ||
1793 | for(i = 0; i < n; i++) | |
1794 | def->sorted_args[start + i] = start + i; | |
1795 | if (n <= 1) | |
1796 | return; | |
1797 | for(i = 0; i < n - 1; i++) { | |
1798 | for(j = i + 1; j < n; j++) { | |
1799 | p1 = get_constraint_priority(def, def->sorted_args[start + i]); | |
1800 | p2 = get_constraint_priority(def, def->sorted_args[start + j]); | |
1801 | if (p1 < p2) { | |
1802 | tmp = def->sorted_args[start + i]; | |
1803 | def->sorted_args[start + i] = def->sorted_args[start + j]; | |
1804 | def->sorted_args[start + j] = tmp; | |
1805 | } | |
1806 | } | |
1807 | } | |
1808 | } | |
1809 | ||
f69d277e | 1810 | static void process_op_defs(TCGContext *s) |
c896fe29 | 1811 | { |
a9751609 | 1812 | TCGOpcode op; |
c896fe29 | 1813 | |
f69d277e RH |
1814 | for (op = 0; op < NB_OPS; op++) { |
1815 | TCGOpDef *def = &tcg_op_defs[op]; | |
1816 | const TCGTargetOpDef *tdefs; | |
069ea736 RH |
1817 | TCGType type; |
1818 | int i, nb_args; | |
f69d277e RH |
1819 | |
1820 | if (def->flags & TCG_OPF_NOT_PRESENT) { | |
1821 | continue; | |
1822 | } | |
1823 | ||
c896fe29 | 1824 | nb_args = def->nb_iargs + def->nb_oargs; |
f69d277e RH |
1825 | if (nb_args == 0) { |
1826 | continue; | |
1827 | } | |
1828 | ||
1829 | tdefs = tcg_target_op_def(op); | |
1830 | /* Missing TCGTargetOpDef entry. */ | |
1831 | tcg_debug_assert(tdefs != NULL); | |
1832 | ||
069ea736 | 1833 | type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32); |
f69d277e RH |
1834 | for (i = 0; i < nb_args; i++) { |
1835 | const char *ct_str = tdefs->args_ct_str[i]; | |
1836 | /* Incomplete TCGTargetOpDef entry. */ | |
eabb7b91 | 1837 | tcg_debug_assert(ct_str != NULL); |
f69d277e | 1838 | |
ccb1bb66 | 1839 | def->args_ct[i].u.regs = 0; |
c896fe29 | 1840 | def->args_ct[i].ct = 0; |
17280ff4 RH |
1841 | while (*ct_str != '\0') { |
1842 | switch(*ct_str) { | |
1843 | case '0' ... '9': | |
1844 | { | |
1845 | int oarg = *ct_str - '0'; | |
1846 | tcg_debug_assert(ct_str == tdefs->args_ct_str[i]); | |
1847 | tcg_debug_assert(oarg < def->nb_oargs); | |
1848 | tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG); | |
1849 | /* TCG_CT_ALIAS is for the output arguments. | |
1850 | The input is tagged with TCG_CT_IALIAS. */ | |
1851 | def->args_ct[i] = def->args_ct[oarg]; | |
1852 | def->args_ct[oarg].ct |= TCG_CT_ALIAS; | |
1853 | def->args_ct[oarg].alias_index = i; | |
1854 | def->args_ct[i].ct |= TCG_CT_IALIAS; | |
1855 | def->args_ct[i].alias_index = oarg; | |
c896fe29 | 1856 | } |
17280ff4 RH |
1857 | ct_str++; |
1858 | break; | |
1859 | case '&': | |
1860 | def->args_ct[i].ct |= TCG_CT_NEWREG; | |
1861 | ct_str++; | |
1862 | break; | |
1863 | case 'i': | |
1864 | def->args_ct[i].ct |= TCG_CT_CONST; | |
1865 | ct_str++; | |
1866 | break; | |
1867 | default: | |
1868 | ct_str = target_parse_constraint(&def->args_ct[i], | |
1869 | ct_str, type); | |
1870 | /* Typo in TCGTargetOpDef constraint. */ | |
1871 | tcg_debug_assert(ct_str != NULL); | |
c896fe29 FB |
1872 | } |
1873 | } | |
1874 | } | |
1875 | ||
c68aaa18 | 1876 | /* TCGTargetOpDef entry with too much information? */ |
eabb7b91 | 1877 | tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); |
c68aaa18 | 1878 | |
c896fe29 FB |
1879 | /* sort the constraints (XXX: this is just an heuristic) */ |
1880 | sort_constraints(def, 0, def->nb_oargs); | |
1881 | sort_constraints(def, def->nb_oargs, def->nb_iargs); | |
a9751609 | 1882 | } |
c896fe29 FB |
1883 | } |
1884 | ||
0c627cdc RH |
1885 | void tcg_op_remove(TCGContext *s, TCGOp *op) |
1886 | { | |
15fa08f8 RH |
1887 | QTAILQ_REMOVE(&s->ops, op, link); |
1888 | QTAILQ_INSERT_TAIL(&s->free_ops, op, link); | |
0c627cdc RH |
1889 | |
1890 | #ifdef CONFIG_PROFILER | |
c3fac113 | 1891 | atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); |
0c627cdc RH |
1892 | #endif |
1893 | } | |
1894 | ||
15fa08f8 | 1895 | static TCGOp *tcg_op_alloc(TCGOpcode opc) |
5a18407f | 1896 | { |
15fa08f8 RH |
1897 | TCGContext *s = tcg_ctx; |
1898 | TCGOp *op; | |
5a18407f | 1899 | |
15fa08f8 RH |
1900 | if (likely(QTAILQ_EMPTY(&s->free_ops))) { |
1901 | op = tcg_malloc(sizeof(TCGOp)); | |
1902 | } else { | |
1903 | op = QTAILQ_FIRST(&s->free_ops); | |
1904 | QTAILQ_REMOVE(&s->free_ops, op, link); | |
1905 | } | |
1906 | memset(op, 0, offsetof(TCGOp, link)); | |
1907 | op->opc = opc; | |
5a18407f | 1908 | |
15fa08f8 RH |
1909 | return op; |
1910 | } | |
1911 | ||
1912 | TCGOp *tcg_emit_op(TCGOpcode opc) | |
1913 | { | |
1914 | TCGOp *op = tcg_op_alloc(opc); | |
1915 | QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link); | |
1916 | return op; | |
1917 | } | |
5a18407f | 1918 | |
15fa08f8 RH |
1919 | TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op, |
1920 | TCGOpcode opc, int nargs) | |
1921 | { | |
1922 | TCGOp *new_op = tcg_op_alloc(opc); | |
1923 | QTAILQ_INSERT_BEFORE(old_op, new_op, link); | |
5a18407f RH |
1924 | return new_op; |
1925 | } | |
1926 | ||
1927 | TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op, | |
1928 | TCGOpcode opc, int nargs) | |
1929 | { | |
15fa08f8 RH |
1930 | TCGOp *new_op = tcg_op_alloc(opc); |
1931 | QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link); | |
5a18407f RH |
1932 | return new_op; |
1933 | } | |
1934 | ||
c70fbf0a RH |
1935 | #define TS_DEAD 1 |
1936 | #define TS_MEM 2 | |
1937 | ||
5a18407f RH |
1938 | #define IS_DEAD_ARG(n) (arg_life & (DEAD_ARG << (n))) |
1939 | #define NEED_SYNC_ARG(n) (arg_life & (SYNC_ARG << (n))) | |
1940 | ||
9c43b68d AJ |
1941 | /* liveness analysis: end of function: all temps are dead, and globals |
1942 | should be in memory. */ | |
b83eabea | 1943 | static void tcg_la_func_end(TCGContext *s) |
c896fe29 | 1944 | { |
b83eabea RH |
1945 | int ng = s->nb_globals; |
1946 | int nt = s->nb_temps; | |
1947 | int i; | |
1948 | ||
1949 | for (i = 0; i < ng; ++i) { | |
1950 | s->temps[i].state = TS_DEAD | TS_MEM; | |
1951 | } | |
1952 | for (i = ng; i < nt; ++i) { | |
1953 | s->temps[i].state = TS_DEAD; | |
1954 | } | |
c896fe29 FB |
1955 | } |
1956 | ||
9c43b68d AJ |
1957 | /* liveness analysis: end of basic block: all temps are dead, globals |
1958 | and local temps should be in memory. */ | |
b83eabea | 1959 | static void tcg_la_bb_end(TCGContext *s) |
641d5fbe | 1960 | { |
b83eabea RH |
1961 | int ng = s->nb_globals; |
1962 | int nt = s->nb_temps; | |
1963 | int i; | |
641d5fbe | 1964 | |
b83eabea RH |
1965 | for (i = 0; i < ng; ++i) { |
1966 | s->temps[i].state = TS_DEAD | TS_MEM; | |
1967 | } | |
1968 | for (i = ng; i < nt; ++i) { | |
1969 | s->temps[i].state = (s->temps[i].temp_local | |
1970 | ? TS_DEAD | TS_MEM | |
1971 | : TS_DEAD); | |
641d5fbe FB |
1972 | } |
1973 | } | |
1974 | ||
a1b3c48d | 1975 | /* Liveness analysis : update the opc_arg_life array to tell if a |
c896fe29 FB |
1976 | given input arguments is dead. Instructions updating dead |
1977 | temporaries are removed. */ | |
b83eabea | 1978 | static void liveness_pass_1(TCGContext *s) |
c896fe29 | 1979 | { |
c70fbf0a | 1980 | int nb_globals = s->nb_globals; |
15fa08f8 | 1981 | TCGOp *op, *op_prev; |
a1b3c48d | 1982 | |
b83eabea | 1983 | tcg_la_func_end(s); |
c896fe29 | 1984 | |
15fa08f8 | 1985 | QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) { |
c45cb8bb RH |
1986 | int i, nb_iargs, nb_oargs; |
1987 | TCGOpcode opc_new, opc_new2; | |
1988 | bool have_opc_new2; | |
a1b3c48d | 1989 | TCGLifeData arg_life = 0; |
b83eabea | 1990 | TCGTemp *arg_ts; |
c45cb8bb RH |
1991 | TCGOpcode opc = op->opc; |
1992 | const TCGOpDef *def = &tcg_op_defs[opc]; | |
1993 | ||
c45cb8bb | 1994 | switch (opc) { |
c896fe29 | 1995 | case INDEX_op_call: |
c6e113f5 FB |
1996 | { |
1997 | int call_flags; | |
c896fe29 | 1998 | |
cd9090aa RH |
1999 | nb_oargs = TCGOP_CALLO(op); |
2000 | nb_iargs = TCGOP_CALLI(op); | |
efee3746 | 2001 | call_flags = op->args[nb_oargs + nb_iargs + 1]; |
c6e113f5 | 2002 | |
c45cb8bb | 2003 | /* pure functions can be removed if their result is unused */ |
78505279 | 2004 | if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { |
cf066674 | 2005 | for (i = 0; i < nb_oargs; i++) { |
b83eabea RH |
2006 | arg_ts = arg_temp(op->args[i]); |
2007 | if (arg_ts->state != TS_DEAD) { | |
c6e113f5 | 2008 | goto do_not_remove_call; |
9c43b68d | 2009 | } |
c6e113f5 | 2010 | } |
c45cb8bb | 2011 | goto do_remove; |
c6e113f5 FB |
2012 | } else { |
2013 | do_not_remove_call: | |
c896fe29 | 2014 | |
c6e113f5 | 2015 | /* output args are dead */ |
cf066674 | 2016 | for (i = 0; i < nb_oargs; i++) { |
b83eabea RH |
2017 | arg_ts = arg_temp(op->args[i]); |
2018 | if (arg_ts->state & TS_DEAD) { | |
a1b3c48d | 2019 | arg_life |= DEAD_ARG << i; |
6b64b624 | 2020 | } |
b83eabea | 2021 | if (arg_ts->state & TS_MEM) { |
a1b3c48d | 2022 | arg_life |= SYNC_ARG << i; |
9c43b68d | 2023 | } |
b83eabea | 2024 | arg_ts->state = TS_DEAD; |
c6e113f5 | 2025 | } |
78505279 | 2026 | |
78505279 AJ |
2027 | if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | |
2028 | TCG_CALL_NO_READ_GLOBALS))) { | |
9c43b68d | 2029 | /* globals should go back to memory */ |
b83eabea RH |
2030 | for (i = 0; i < nb_globals; i++) { |
2031 | s->temps[i].state = TS_DEAD | TS_MEM; | |
2032 | } | |
c70fbf0a RH |
2033 | } else if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { |
2034 | /* globals should be synced to memory */ | |
2035 | for (i = 0; i < nb_globals; i++) { | |
b83eabea | 2036 | s->temps[i].state |= TS_MEM; |
c70fbf0a | 2037 | } |
b9c18f56 AJ |
2038 | } |
2039 | ||
c19f47bf | 2040 | /* record arguments that die in this helper */ |
cf066674 | 2041 | for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { |
b83eabea RH |
2042 | arg_ts = arg_temp(op->args[i]); |
2043 | if (arg_ts && arg_ts->state & TS_DEAD) { | |
2044 | arg_life |= DEAD_ARG << i; | |
c6e113f5 | 2045 | } |
c6e113f5 | 2046 | } |
67cc32eb | 2047 | /* input arguments are live for preceding opcodes */ |
c70fbf0a | 2048 | for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { |
b83eabea RH |
2049 | arg_ts = arg_temp(op->args[i]); |
2050 | if (arg_ts) { | |
2051 | arg_ts->state &= ~TS_DEAD; | |
c70fbf0a | 2052 | } |
c19f47bf | 2053 | } |
c896fe29 | 2054 | } |
c896fe29 | 2055 | } |
c896fe29 | 2056 | break; |
765b842a | 2057 | case INDEX_op_insn_start: |
c896fe29 | 2058 | break; |
5ff9d6a4 | 2059 | case INDEX_op_discard: |
5ff9d6a4 | 2060 | /* mark the temporary as dead */ |
b83eabea | 2061 | arg_temp(op->args[0])->state = TS_DEAD; |
5ff9d6a4 | 2062 | break; |
1305c451 RH |
2063 | |
2064 | case INDEX_op_add2_i32: | |
c45cb8bb | 2065 | opc_new = INDEX_op_add_i32; |
f1fae40c | 2066 | goto do_addsub2; |
1305c451 | 2067 | case INDEX_op_sub2_i32: |
c45cb8bb | 2068 | opc_new = INDEX_op_sub_i32; |
f1fae40c RH |
2069 | goto do_addsub2; |
2070 | case INDEX_op_add2_i64: | |
c45cb8bb | 2071 | opc_new = INDEX_op_add_i64; |
f1fae40c RH |
2072 | goto do_addsub2; |
2073 | case INDEX_op_sub2_i64: | |
c45cb8bb | 2074 | opc_new = INDEX_op_sub_i64; |
f1fae40c | 2075 | do_addsub2: |
1305c451 RH |
2076 | nb_iargs = 4; |
2077 | nb_oargs = 2; | |
2078 | /* Test if the high part of the operation is dead, but not | |
2079 | the low part. The result can be optimized to a simple | |
2080 | add or sub. This happens often for x86_64 guest when the | |
2081 | cpu mode is set to 32 bit. */ | |
b83eabea RH |
2082 | if (arg_temp(op->args[1])->state == TS_DEAD) { |
2083 | if (arg_temp(op->args[0])->state == TS_DEAD) { | |
1305c451 RH |
2084 | goto do_remove; |
2085 | } | |
c45cb8bb RH |
2086 | /* Replace the opcode and adjust the args in place, |
2087 | leaving 3 unused args at the end. */ | |
2088 | op->opc = opc = opc_new; | |
efee3746 RH |
2089 | op->args[1] = op->args[2]; |
2090 | op->args[2] = op->args[4]; | |
1305c451 RH |
2091 | /* Fall through and mark the single-word operation live. */ |
2092 | nb_iargs = 2; | |
2093 | nb_oargs = 1; | |
2094 | } | |
2095 | goto do_not_remove; | |
2096 | ||
1414968a | 2097 | case INDEX_op_mulu2_i32: |
c45cb8bb RH |
2098 | opc_new = INDEX_op_mul_i32; |
2099 | opc_new2 = INDEX_op_muluh_i32; | |
2100 | have_opc_new2 = TCG_TARGET_HAS_muluh_i32; | |
03271524 | 2101 | goto do_mul2; |
f1fae40c | 2102 | case INDEX_op_muls2_i32: |
c45cb8bb RH |
2103 | opc_new = INDEX_op_mul_i32; |
2104 | opc_new2 = INDEX_op_mulsh_i32; | |
2105 | have_opc_new2 = TCG_TARGET_HAS_mulsh_i32; | |
f1fae40c RH |
2106 | goto do_mul2; |
2107 | case INDEX_op_mulu2_i64: | |
c45cb8bb RH |
2108 | opc_new = INDEX_op_mul_i64; |
2109 | opc_new2 = INDEX_op_muluh_i64; | |
2110 | have_opc_new2 = TCG_TARGET_HAS_muluh_i64; | |
03271524 | 2111 | goto do_mul2; |
f1fae40c | 2112 | case INDEX_op_muls2_i64: |
c45cb8bb RH |
2113 | opc_new = INDEX_op_mul_i64; |
2114 | opc_new2 = INDEX_op_mulsh_i64; | |
2115 | have_opc_new2 = TCG_TARGET_HAS_mulsh_i64; | |
03271524 | 2116 | goto do_mul2; |
f1fae40c | 2117 | do_mul2: |
1414968a RH |
2118 | nb_iargs = 2; |
2119 | nb_oargs = 2; | |
b83eabea RH |
2120 | if (arg_temp(op->args[1])->state == TS_DEAD) { |
2121 | if (arg_temp(op->args[0])->state == TS_DEAD) { | |
03271524 | 2122 | /* Both parts of the operation are dead. */ |
1414968a RH |
2123 | goto do_remove; |
2124 | } | |
03271524 | 2125 | /* The high part of the operation is dead; generate the low. */ |
c45cb8bb | 2126 | op->opc = opc = opc_new; |
efee3746 RH |
2127 | op->args[1] = op->args[2]; |
2128 | op->args[2] = op->args[3]; | |
b83eabea | 2129 | } else if (arg_temp(op->args[0])->state == TS_DEAD && have_opc_new2) { |
c45cb8bb RH |
2130 | /* The low part of the operation is dead; generate the high. */ |
2131 | op->opc = opc = opc_new2; | |
efee3746 RH |
2132 | op->args[0] = op->args[1]; |
2133 | op->args[1] = op->args[2]; | |
2134 | op->args[2] = op->args[3]; | |
03271524 RH |
2135 | } else { |
2136 | goto do_not_remove; | |
1414968a | 2137 | } |
03271524 RH |
2138 | /* Mark the single-word operation live. */ |
2139 | nb_oargs = 1; | |
1414968a RH |
2140 | goto do_not_remove; |
2141 | ||
c896fe29 | 2142 | default: |
1305c451 | 2143 | /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ |
49516bc0 AJ |
2144 | nb_iargs = def->nb_iargs; |
2145 | nb_oargs = def->nb_oargs; | |
c896fe29 | 2146 | |
49516bc0 AJ |
2147 | /* Test if the operation can be removed because all |
2148 | its outputs are dead. We assume that nb_oargs == 0 | |
2149 | implies side effects */ | |
2150 | if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { | |
c45cb8bb | 2151 | for (i = 0; i < nb_oargs; i++) { |
b83eabea | 2152 | if (arg_temp(op->args[i])->state != TS_DEAD) { |
49516bc0 | 2153 | goto do_not_remove; |
9c43b68d | 2154 | } |
49516bc0 | 2155 | } |
1305c451 | 2156 | do_remove: |
0c627cdc | 2157 | tcg_op_remove(s, op); |
49516bc0 AJ |
2158 | } else { |
2159 | do_not_remove: | |
49516bc0 | 2160 | /* output args are dead */ |
c45cb8bb | 2161 | for (i = 0; i < nb_oargs; i++) { |
b83eabea RH |
2162 | arg_ts = arg_temp(op->args[i]); |
2163 | if (arg_ts->state & TS_DEAD) { | |
a1b3c48d | 2164 | arg_life |= DEAD_ARG << i; |
6b64b624 | 2165 | } |
b83eabea | 2166 | if (arg_ts->state & TS_MEM) { |
a1b3c48d | 2167 | arg_life |= SYNC_ARG << i; |
9c43b68d | 2168 | } |
b83eabea | 2169 | arg_ts->state = TS_DEAD; |
49516bc0 AJ |
2170 | } |
2171 | ||
2172 | /* if end of basic block, update */ | |
2173 | if (def->flags & TCG_OPF_BB_END) { | |
b83eabea | 2174 | tcg_la_bb_end(s); |
3d5c5f87 AJ |
2175 | } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { |
2176 | /* globals should be synced to memory */ | |
c70fbf0a | 2177 | for (i = 0; i < nb_globals; i++) { |
b83eabea | 2178 | s->temps[i].state |= TS_MEM; |
c70fbf0a | 2179 | } |
49516bc0 AJ |
2180 | } |
2181 | ||
c19f47bf | 2182 | /* record arguments that die in this opcode */ |
c45cb8bb | 2183 | for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { |
b83eabea RH |
2184 | arg_ts = arg_temp(op->args[i]); |
2185 | if (arg_ts->state & TS_DEAD) { | |
a1b3c48d | 2186 | arg_life |= DEAD_ARG << i; |
c896fe29 | 2187 | } |
c19f47bf | 2188 | } |
67cc32eb | 2189 | /* input arguments are live for preceding opcodes */ |
c19f47bf | 2190 | for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { |
b83eabea | 2191 | arg_temp(op->args[i])->state &= ~TS_DEAD; |
c896fe29 | 2192 | } |
c896fe29 FB |
2193 | } |
2194 | break; | |
2195 | } | |
bee158cb | 2196 | op->life = arg_life; |
1ff0a2c5 | 2197 | } |
c896fe29 | 2198 | } |
c896fe29 | 2199 | |
5a18407f | 2200 | /* Liveness analysis: Convert indirect regs to direct temporaries. */ |
b83eabea | 2201 | static bool liveness_pass_2(TCGContext *s) |
5a18407f RH |
2202 | { |
2203 | int nb_globals = s->nb_globals; | |
15fa08f8 | 2204 | int nb_temps, i; |
5a18407f | 2205 | bool changes = false; |
15fa08f8 | 2206 | TCGOp *op, *op_next; |
5a18407f | 2207 | |
5a18407f RH |
2208 | /* Create a temporary for each indirect global. */ |
2209 | for (i = 0; i < nb_globals; ++i) { | |
2210 | TCGTemp *its = &s->temps[i]; | |
2211 | if (its->indirect_reg) { | |
2212 | TCGTemp *dts = tcg_temp_alloc(s); | |
2213 | dts->type = its->type; | |
2214 | dts->base_type = its->base_type; | |
b83eabea RH |
2215 | its->state_ptr = dts; |
2216 | } else { | |
2217 | its->state_ptr = NULL; | |
5a18407f | 2218 | } |
b83eabea RH |
2219 | /* All globals begin dead. */ |
2220 | its->state = TS_DEAD; | |
2221 | } | |
2222 | for (nb_temps = s->nb_temps; i < nb_temps; ++i) { | |
2223 | TCGTemp *its = &s->temps[i]; | |
2224 | its->state_ptr = NULL; | |
2225 | its->state = TS_DEAD; | |
5a18407f | 2226 | } |
5a18407f | 2227 | |
15fa08f8 | 2228 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
5a18407f RH |
2229 | TCGOpcode opc = op->opc; |
2230 | const TCGOpDef *def = &tcg_op_defs[opc]; | |
2231 | TCGLifeData arg_life = op->life; | |
2232 | int nb_iargs, nb_oargs, call_flags; | |
b83eabea | 2233 | TCGTemp *arg_ts, *dir_ts; |
5a18407f | 2234 | |
5a18407f | 2235 | if (opc == INDEX_op_call) { |
cd9090aa RH |
2236 | nb_oargs = TCGOP_CALLO(op); |
2237 | nb_iargs = TCGOP_CALLI(op); | |
efee3746 | 2238 | call_flags = op->args[nb_oargs + nb_iargs + 1]; |
5a18407f RH |
2239 | } else { |
2240 | nb_iargs = def->nb_iargs; | |
2241 | nb_oargs = def->nb_oargs; | |
2242 | ||
2243 | /* Set flags similar to how calls require. */ | |
2244 | if (def->flags & TCG_OPF_BB_END) { | |
2245 | /* Like writing globals: save_globals */ | |
2246 | call_flags = 0; | |
2247 | } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { | |
2248 | /* Like reading globals: sync_globals */ | |
2249 | call_flags = TCG_CALL_NO_WRITE_GLOBALS; | |
2250 | } else { | |
2251 | /* No effect on globals. */ | |
2252 | call_flags = (TCG_CALL_NO_READ_GLOBALS | | |
2253 | TCG_CALL_NO_WRITE_GLOBALS); | |
2254 | } | |
2255 | } | |
2256 | ||
2257 | /* Make sure that input arguments are available. */ | |
2258 | for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { | |
b83eabea RH |
2259 | arg_ts = arg_temp(op->args[i]); |
2260 | if (arg_ts) { | |
2261 | dir_ts = arg_ts->state_ptr; | |
2262 | if (dir_ts && arg_ts->state == TS_DEAD) { | |
2263 | TCGOpcode lopc = (arg_ts->type == TCG_TYPE_I32 | |
5a18407f RH |
2264 | ? INDEX_op_ld_i32 |
2265 | : INDEX_op_ld_i64); | |
2266 | TCGOp *lop = tcg_op_insert_before(s, op, lopc, 3); | |
5a18407f | 2267 | |
b83eabea RH |
2268 | lop->args[0] = temp_arg(dir_ts); |
2269 | lop->args[1] = temp_arg(arg_ts->mem_base); | |
2270 | lop->args[2] = arg_ts->mem_offset; | |
5a18407f RH |
2271 | |
2272 | /* Loaded, but synced with memory. */ | |
b83eabea | 2273 | arg_ts->state = TS_MEM; |
5a18407f RH |
2274 | } |
2275 | } | |
2276 | } | |
2277 | ||
2278 | /* Perform input replacement, and mark inputs that became dead. | |
2279 | No action is required except keeping temp_state up to date | |
2280 | so that we reload when needed. */ | |
2281 | for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { | |
b83eabea RH |
2282 | arg_ts = arg_temp(op->args[i]); |
2283 | if (arg_ts) { | |
2284 | dir_ts = arg_ts->state_ptr; | |
2285 | if (dir_ts) { | |
2286 | op->args[i] = temp_arg(dir_ts); | |
5a18407f RH |
2287 | changes = true; |
2288 | if (IS_DEAD_ARG(i)) { | |
b83eabea | 2289 | arg_ts->state = TS_DEAD; |
5a18407f RH |
2290 | } |
2291 | } | |
2292 | } | |
2293 | } | |
2294 | ||
2295 | /* Liveness analysis should ensure that the following are | |
2296 | all correct, for call sites and basic block end points. */ | |
2297 | if (call_flags & TCG_CALL_NO_READ_GLOBALS) { | |
2298 | /* Nothing to do */ | |
2299 | } else if (call_flags & TCG_CALL_NO_WRITE_GLOBALS) { | |
2300 | for (i = 0; i < nb_globals; ++i) { | |
2301 | /* Liveness should see that globals are synced back, | |
2302 | that is, either TS_DEAD or TS_MEM. */ | |
b83eabea RH |
2303 | arg_ts = &s->temps[i]; |
2304 | tcg_debug_assert(arg_ts->state_ptr == 0 | |
2305 | || arg_ts->state != 0); | |
5a18407f RH |
2306 | } |
2307 | } else { | |
2308 | for (i = 0; i < nb_globals; ++i) { | |
2309 | /* Liveness should see that globals are saved back, | |
2310 | that is, TS_DEAD, waiting to be reloaded. */ | |
b83eabea RH |
2311 | arg_ts = &s->temps[i]; |
2312 | tcg_debug_assert(arg_ts->state_ptr == 0 | |
2313 | || arg_ts->state == TS_DEAD); | |
5a18407f RH |
2314 | } |
2315 | } | |
2316 | ||
2317 | /* Outputs become available. */ | |
2318 | for (i = 0; i < nb_oargs; i++) { | |
b83eabea RH |
2319 | arg_ts = arg_temp(op->args[i]); |
2320 | dir_ts = arg_ts->state_ptr; | |
2321 | if (!dir_ts) { | |
5a18407f RH |
2322 | continue; |
2323 | } | |
b83eabea | 2324 | op->args[i] = temp_arg(dir_ts); |
5a18407f RH |
2325 | changes = true; |
2326 | ||
2327 | /* The output is now live and modified. */ | |
b83eabea | 2328 | arg_ts->state = 0; |
5a18407f RH |
2329 | |
2330 | /* Sync outputs upon their last write. */ | |
2331 | if (NEED_SYNC_ARG(i)) { | |
b83eabea | 2332 | TCGOpcode sopc = (arg_ts->type == TCG_TYPE_I32 |
5a18407f RH |
2333 | ? INDEX_op_st_i32 |
2334 | : INDEX_op_st_i64); | |
2335 | TCGOp *sop = tcg_op_insert_after(s, op, sopc, 3); | |
5a18407f | 2336 | |
b83eabea RH |
2337 | sop->args[0] = temp_arg(dir_ts); |
2338 | sop->args[1] = temp_arg(arg_ts->mem_base); | |
2339 | sop->args[2] = arg_ts->mem_offset; | |
5a18407f | 2340 | |
b83eabea | 2341 | arg_ts->state = TS_MEM; |
5a18407f RH |
2342 | } |
2343 | /* Drop outputs that are dead. */ | |
2344 | if (IS_DEAD_ARG(i)) { | |
b83eabea | 2345 | arg_ts->state = TS_DEAD; |
5a18407f RH |
2346 | } |
2347 | } | |
2348 | } | |
2349 | ||
2350 | return changes; | |
2351 | } | |
2352 | ||
8d8fdbae | 2353 | #ifdef CONFIG_DEBUG_TCG |
c896fe29 FB |
2354 | static void dump_regs(TCGContext *s) |
2355 | { | |
2356 | TCGTemp *ts; | |
2357 | int i; | |
2358 | char buf[64]; | |
2359 | ||
2360 | for(i = 0; i < s->nb_temps; i++) { | |
2361 | ts = &s->temps[i]; | |
43439139 | 2362 | printf(" %10s: ", tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); |
c896fe29 FB |
2363 | switch(ts->val_type) { |
2364 | case TEMP_VAL_REG: | |
2365 | printf("%s", tcg_target_reg_names[ts->reg]); | |
2366 | break; | |
2367 | case TEMP_VAL_MEM: | |
b3a62939 RH |
2368 | printf("%d(%s)", (int)ts->mem_offset, |
2369 | tcg_target_reg_names[ts->mem_base->reg]); | |
c896fe29 FB |
2370 | break; |
2371 | case TEMP_VAL_CONST: | |
2372 | printf("$0x%" TCG_PRIlx, ts->val); | |
2373 | break; | |
2374 | case TEMP_VAL_DEAD: | |
2375 | printf("D"); | |
2376 | break; | |
2377 | default: | |
2378 | printf("???"); | |
2379 | break; | |
2380 | } | |
2381 | printf("\n"); | |
2382 | } | |
2383 | ||
2384 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | |
f8b2f202 | 2385 | if (s->reg_to_temp[i] != NULL) { |
c896fe29 FB |
2386 | printf("%s: %s\n", |
2387 | tcg_target_reg_names[i], | |
f8b2f202 | 2388 | tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i])); |
c896fe29 FB |
2389 | } |
2390 | } | |
2391 | } | |
2392 | ||
2393 | static void check_regs(TCGContext *s) | |
2394 | { | |
869938ae | 2395 | int reg; |
b6638662 | 2396 | int k; |
c896fe29 FB |
2397 | TCGTemp *ts; |
2398 | char buf[64]; | |
2399 | ||
f8b2f202 RH |
2400 | for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { |
2401 | ts = s->reg_to_temp[reg]; | |
2402 | if (ts != NULL) { | |
2403 | if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) { | |
c896fe29 FB |
2404 | printf("Inconsistency for register %s:\n", |
2405 | tcg_target_reg_names[reg]); | |
b03cce8e | 2406 | goto fail; |
c896fe29 FB |
2407 | } |
2408 | } | |
2409 | } | |
f8b2f202 | 2410 | for (k = 0; k < s->nb_temps; k++) { |
c896fe29 | 2411 | ts = &s->temps[k]; |
f8b2f202 RH |
2412 | if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg |
2413 | && s->reg_to_temp[ts->reg] != ts) { | |
2414 | printf("Inconsistency for temp %s:\n", | |
2415 | tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts)); | |
b03cce8e | 2416 | fail: |
f8b2f202 RH |
2417 | printf("reg state:\n"); |
2418 | dump_regs(s); | |
2419 | tcg_abort(); | |
c896fe29 FB |
2420 | } |
2421 | } | |
2422 | } | |
2423 | #endif | |
2424 | ||
2272e4a7 | 2425 | static void temp_allocate_frame(TCGContext *s, TCGTemp *ts) |
c896fe29 | 2426 | { |
9b9c37c3 RH |
2427 | #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) |
2428 | /* Sparc64 stack is accessed with offset of 2047 */ | |
b591dc59 BS |
2429 | s->current_frame_offset = (s->current_frame_offset + |
2430 | (tcg_target_long)sizeof(tcg_target_long) - 1) & | |
2431 | ~(sizeof(tcg_target_long) - 1); | |
f44c9960 | 2432 | #endif |
b591dc59 BS |
2433 | if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) > |
2434 | s->frame_end) { | |
5ff9d6a4 | 2435 | tcg_abort(); |
b591dc59 | 2436 | } |
c896fe29 | 2437 | ts->mem_offset = s->current_frame_offset; |
b3a62939 | 2438 | ts->mem_base = s->frame_temp; |
c896fe29 | 2439 | ts->mem_allocated = 1; |
e2c6d1b4 | 2440 | s->current_frame_offset += sizeof(tcg_target_long); |
c896fe29 FB |
2441 | } |
2442 | ||
b3915dbb RH |
2443 | static void temp_load(TCGContext *, TCGTemp *, TCGRegSet, TCGRegSet); |
2444 | ||
59d7c14e RH |
2445 | /* Mark a temporary as free or dead. If 'free_or_dead' is negative, |
2446 | mark it free; otherwise mark it dead. */ | |
2447 | static void temp_free_or_dead(TCGContext *s, TCGTemp *ts, int free_or_dead) | |
7f6ceedf | 2448 | { |
59d7c14e RH |
2449 | if (ts->fixed_reg) { |
2450 | return; | |
2451 | } | |
2452 | if (ts->val_type == TEMP_VAL_REG) { | |
2453 | s->reg_to_temp[ts->reg] = NULL; | |
2454 | } | |
2455 | ts->val_type = (free_or_dead < 0 | |
2456 | || ts->temp_local | |
fa477d25 | 2457 | || ts->temp_global |
59d7c14e RH |
2458 | ? TEMP_VAL_MEM : TEMP_VAL_DEAD); |
2459 | } | |
7f6ceedf | 2460 | |
59d7c14e RH |
2461 | /* Mark a temporary as dead. */ |
2462 | static inline void temp_dead(TCGContext *s, TCGTemp *ts) | |
2463 | { | |
2464 | temp_free_or_dead(s, ts, 1); | |
2465 | } | |
2466 | ||
2467 | /* Sync a temporary to memory. 'allocated_regs' is used in case a temporary | |
2468 | registers needs to be allocated to store a constant. If 'free_or_dead' | |
2469 | is non-zero, subsequently release the temporary; if it is positive, the | |
2470 | temp is dead; if it is negative, the temp is free. */ | |
2471 | static void temp_sync(TCGContext *s, TCGTemp *ts, | |
2472 | TCGRegSet allocated_regs, int free_or_dead) | |
2473 | { | |
2474 | if (ts->fixed_reg) { | |
2475 | return; | |
2476 | } | |
2477 | if (!ts->mem_coherent) { | |
7f6ceedf | 2478 | if (!ts->mem_allocated) { |
2272e4a7 | 2479 | temp_allocate_frame(s, ts); |
59d7c14e | 2480 | } |
59d7c14e RH |
2481 | switch (ts->val_type) { |
2482 | case TEMP_VAL_CONST: | |
2483 | /* If we're going to free the temp immediately, then we won't | |
2484 | require it later in a register, so attempt to store the | |
2485 | constant to memory directly. */ | |
2486 | if (free_or_dead | |
2487 | && tcg_out_sti(s, ts->type, ts->val, | |
2488 | ts->mem_base->reg, ts->mem_offset)) { | |
2489 | break; | |
2490 | } | |
2491 | temp_load(s, ts, tcg_target_available_regs[ts->type], | |
2492 | allocated_regs); | |
2493 | /* fallthrough */ | |
2494 | ||
2495 | case TEMP_VAL_REG: | |
2496 | tcg_out_st(s, ts->type, ts->reg, | |
2497 | ts->mem_base->reg, ts->mem_offset); | |
2498 | break; | |
2499 | ||
2500 | case TEMP_VAL_MEM: | |
2501 | break; | |
2502 | ||
2503 | case TEMP_VAL_DEAD: | |
2504 | default: | |
2505 | tcg_abort(); | |
2506 | } | |
2507 | ts->mem_coherent = 1; | |
2508 | } | |
2509 | if (free_or_dead) { | |
2510 | temp_free_or_dead(s, ts, free_or_dead); | |
7f6ceedf | 2511 | } |
7f6ceedf AJ |
2512 | } |
2513 | ||
c896fe29 | 2514 | /* free register 'reg' by spilling the corresponding temporary if necessary */ |
b3915dbb | 2515 | static void tcg_reg_free(TCGContext *s, TCGReg reg, TCGRegSet allocated_regs) |
c896fe29 | 2516 | { |
f8b2f202 | 2517 | TCGTemp *ts = s->reg_to_temp[reg]; |
f8b2f202 | 2518 | if (ts != NULL) { |
59d7c14e | 2519 | temp_sync(s, ts, allocated_regs, -1); |
c896fe29 FB |
2520 | } |
2521 | } | |
2522 | ||
2523 | /* Allocate a register belonging to reg1 & ~reg2 */ | |
b3915dbb | 2524 | static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet desired_regs, |
91478cef | 2525 | TCGRegSet allocated_regs, bool rev) |
c896fe29 | 2526 | { |
91478cef RH |
2527 | int i, n = ARRAY_SIZE(tcg_target_reg_alloc_order); |
2528 | const int *order; | |
b6638662 | 2529 | TCGReg reg; |
c896fe29 FB |
2530 | TCGRegSet reg_ct; |
2531 | ||
07ddf036 | 2532 | reg_ct = desired_regs & ~allocated_regs; |
91478cef | 2533 | order = rev ? indirect_reg_alloc_order : tcg_target_reg_alloc_order; |
c896fe29 FB |
2534 | |
2535 | /* first try free registers */ | |
91478cef RH |
2536 | for(i = 0; i < n; i++) { |
2537 | reg = order[i]; | |
f8b2f202 | 2538 | if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL) |
c896fe29 FB |
2539 | return reg; |
2540 | } | |
2541 | ||
2542 | /* XXX: do better spill choice */ | |
91478cef RH |
2543 | for(i = 0; i < n; i++) { |
2544 | reg = order[i]; | |
c896fe29 | 2545 | if (tcg_regset_test_reg(reg_ct, reg)) { |
b3915dbb | 2546 | tcg_reg_free(s, reg, allocated_regs); |
c896fe29 FB |
2547 | return reg; |
2548 | } | |
2549 | } | |
2550 | ||
2551 | tcg_abort(); | |
2552 | } | |
2553 | ||
40ae5c62 RH |
2554 | /* Make sure the temporary is in a register. If needed, allocate the register |
2555 | from DESIRED while avoiding ALLOCATED. */ | |
2556 | static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs, | |
2557 | TCGRegSet allocated_regs) | |
2558 | { | |
2559 | TCGReg reg; | |
2560 | ||
2561 | switch (ts->val_type) { | |
2562 | case TEMP_VAL_REG: | |
2563 | return; | |
2564 | case TEMP_VAL_CONST: | |
91478cef | 2565 | reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base); |
40ae5c62 RH |
2566 | tcg_out_movi(s, ts->type, reg, ts->val); |
2567 | ts->mem_coherent = 0; | |
2568 | break; | |
2569 | case TEMP_VAL_MEM: | |
91478cef | 2570 | reg = tcg_reg_alloc(s, desired_regs, allocated_regs, ts->indirect_base); |
40ae5c62 RH |
2571 | tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset); |
2572 | ts->mem_coherent = 1; | |
2573 | break; | |
2574 | case TEMP_VAL_DEAD: | |
2575 | default: | |
2576 | tcg_abort(); | |
2577 | } | |
2578 | ts->reg = reg; | |
2579 | ts->val_type = TEMP_VAL_REG; | |
2580 | s->reg_to_temp[reg] = ts; | |
2581 | } | |
2582 | ||
59d7c14e RH |
2583 | /* Save a temporary to memory. 'allocated_regs' is used in case a |
2584 | temporary registers needs to be allocated to store a constant. */ | |
2585 | static void temp_save(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs) | |
1ad80729 | 2586 | { |
5a18407f RH |
2587 | /* The liveness analysis already ensures that globals are back |
2588 | in memory. Keep an tcg_debug_assert for safety. */ | |
2589 | tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg); | |
1ad80729 AJ |
2590 | } |
2591 | ||
9814dd27 | 2592 | /* save globals to their canonical location and assume they can be |
e8996ee0 FB |
2593 | modified be the following code. 'allocated_regs' is used in case a |
2594 | temporary registers needs to be allocated to store a constant. */ | |
2595 | static void save_globals(TCGContext *s, TCGRegSet allocated_regs) | |
c896fe29 | 2596 | { |
ac3b8891 | 2597 | int i, n; |
c896fe29 | 2598 | |
ac3b8891 | 2599 | for (i = 0, n = s->nb_globals; i < n; i++) { |
b13eb728 | 2600 | temp_save(s, &s->temps[i], allocated_regs); |
c896fe29 | 2601 | } |
e5097dc8 FB |
2602 | } |
2603 | ||
3d5c5f87 AJ |
2604 | /* sync globals to their canonical location and assume they can be |
2605 | read by the following code. 'allocated_regs' is used in case a | |
2606 | temporary registers needs to be allocated to store a constant. */ | |
2607 | static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) | |
2608 | { | |
ac3b8891 | 2609 | int i, n; |
3d5c5f87 | 2610 | |
ac3b8891 | 2611 | for (i = 0, n = s->nb_globals; i < n; i++) { |
12b9b11a | 2612 | TCGTemp *ts = &s->temps[i]; |
5a18407f RH |
2613 | tcg_debug_assert(ts->val_type != TEMP_VAL_REG |
2614 | || ts->fixed_reg | |
2615 | || ts->mem_coherent); | |
3d5c5f87 AJ |
2616 | } |
2617 | } | |
2618 | ||
e5097dc8 | 2619 | /* at the end of a basic block, we assume all temporaries are dead and |
e8996ee0 FB |
2620 | all globals are stored at their canonical location. */ |
2621 | static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | |
e5097dc8 | 2622 | { |
e5097dc8 FB |
2623 | int i; |
2624 | ||
b13eb728 RH |
2625 | for (i = s->nb_globals; i < s->nb_temps; i++) { |
2626 | TCGTemp *ts = &s->temps[i]; | |
641d5fbe | 2627 | if (ts->temp_local) { |
b13eb728 | 2628 | temp_save(s, ts, allocated_regs); |
641d5fbe | 2629 | } else { |
5a18407f RH |
2630 | /* The liveness analysis already ensures that temps are dead. |
2631 | Keep an tcg_debug_assert for safety. */ | |
2632 | tcg_debug_assert(ts->val_type == TEMP_VAL_DEAD); | |
c896fe29 FB |
2633 | } |
2634 | } | |
e8996ee0 FB |
2635 | |
2636 | save_globals(s, allocated_regs); | |
c896fe29 FB |
2637 | } |
2638 | ||
0fe4fca4 PB |
2639 | static void tcg_reg_alloc_do_movi(TCGContext *s, TCGTemp *ots, |
2640 | tcg_target_ulong val, TCGLifeData arg_life) | |
e8996ee0 | 2641 | { |
e8996ee0 | 2642 | if (ots->fixed_reg) { |
59d7c14e | 2643 | /* For fixed registers, we do not do any constant propagation. */ |
e8996ee0 | 2644 | tcg_out_movi(s, ots->type, ots->reg, val); |
59d7c14e | 2645 | return; |
e8996ee0 | 2646 | } |
59d7c14e RH |
2647 | |
2648 | /* The movi is not explicitly generated here. */ | |
2649 | if (ots->val_type == TEMP_VAL_REG) { | |
2650 | s->reg_to_temp[ots->reg] = NULL; | |
ec7a869d | 2651 | } |
59d7c14e RH |
2652 | ots->val_type = TEMP_VAL_CONST; |
2653 | ots->val = val; | |
2654 | ots->mem_coherent = 0; | |
2655 | if (NEED_SYNC_ARG(0)) { | |
2656 | temp_sync(s, ots, s->reserved_regs, IS_DEAD_ARG(0)); | |
2657 | } else if (IS_DEAD_ARG(0)) { | |
f8bf00f1 | 2658 | temp_dead(s, ots); |
4c4e1ab2 | 2659 | } |
e8996ee0 FB |
2660 | } |
2661 | ||
dd186292 | 2662 | static void tcg_reg_alloc_movi(TCGContext *s, const TCGOp *op) |
0fe4fca4 | 2663 | { |
43439139 | 2664 | TCGTemp *ots = arg_temp(op->args[0]); |
dd186292 | 2665 | tcg_target_ulong val = op->args[1]; |
0fe4fca4 | 2666 | |
dd186292 | 2667 | tcg_reg_alloc_do_movi(s, ots, val, op->life); |
0fe4fca4 PB |
2668 | } |
2669 | ||
dd186292 | 2670 | static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op) |
c896fe29 | 2671 | { |
dd186292 | 2672 | const TCGLifeData arg_life = op->life; |
c29c1d7e | 2673 | TCGRegSet allocated_regs; |
c896fe29 | 2674 | TCGTemp *ts, *ots; |
450445d5 | 2675 | TCGType otype, itype; |
c896fe29 | 2676 | |
d21369f5 | 2677 | allocated_regs = s->reserved_regs; |
43439139 RH |
2678 | ots = arg_temp(op->args[0]); |
2679 | ts = arg_temp(op->args[1]); | |
450445d5 RH |
2680 | |
2681 | /* Note that otype != itype for no-op truncation. */ | |
2682 | otype = ots->type; | |
2683 | itype = ts->type; | |
c29c1d7e | 2684 | |
0fe4fca4 PB |
2685 | if (ts->val_type == TEMP_VAL_CONST) { |
2686 | /* propagate constant or generate sti */ | |
2687 | tcg_target_ulong val = ts->val; | |
2688 | if (IS_DEAD_ARG(1)) { | |
2689 | temp_dead(s, ts); | |
2690 | } | |
2691 | tcg_reg_alloc_do_movi(s, ots, val, arg_life); | |
2692 | return; | |
2693 | } | |
2694 | ||
2695 | /* If the source value is in memory we're going to be forced | |
2696 | to have it in a register in order to perform the copy. Copy | |
2697 | the SOURCE value into its own register first, that way we | |
2698 | don't have to reload SOURCE the next time it is used. */ | |
2699 | if (ts->val_type == TEMP_VAL_MEM) { | |
40ae5c62 | 2700 | temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs); |
c29c1d7e | 2701 | } |
c896fe29 | 2702 | |
0fe4fca4 | 2703 | tcg_debug_assert(ts->val_type == TEMP_VAL_REG); |
c29c1d7e AJ |
2704 | if (IS_DEAD_ARG(0) && !ots->fixed_reg) { |
2705 | /* mov to a non-saved dead register makes no sense (even with | |
2706 | liveness analysis disabled). */ | |
eabb7b91 | 2707 | tcg_debug_assert(NEED_SYNC_ARG(0)); |
c29c1d7e | 2708 | if (!ots->mem_allocated) { |
2272e4a7 | 2709 | temp_allocate_frame(s, ots); |
c29c1d7e | 2710 | } |
b3a62939 | 2711 | tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset); |
c29c1d7e | 2712 | if (IS_DEAD_ARG(1)) { |
f8bf00f1 | 2713 | temp_dead(s, ts); |
c29c1d7e | 2714 | } |
f8bf00f1 | 2715 | temp_dead(s, ots); |
c29c1d7e | 2716 | } else { |
866cb6cb | 2717 | if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { |
c896fe29 | 2718 | /* the mov can be suppressed */ |
c29c1d7e | 2719 | if (ots->val_type == TEMP_VAL_REG) { |
f8b2f202 | 2720 | s->reg_to_temp[ots->reg] = NULL; |
c29c1d7e AJ |
2721 | } |
2722 | ots->reg = ts->reg; | |
f8bf00f1 | 2723 | temp_dead(s, ts); |
c896fe29 | 2724 | } else { |
c29c1d7e AJ |
2725 | if (ots->val_type != TEMP_VAL_REG) { |
2726 | /* When allocating a new register, make sure to not spill the | |
2727 | input one. */ | |
2728 | tcg_regset_set_reg(allocated_regs, ts->reg); | |
450445d5 | 2729 | ots->reg = tcg_reg_alloc(s, tcg_target_available_regs[otype], |
91478cef | 2730 | allocated_regs, ots->indirect_base); |
c896fe29 | 2731 | } |
450445d5 | 2732 | tcg_out_mov(s, otype, ots->reg, ts->reg); |
c896fe29 | 2733 | } |
c29c1d7e AJ |
2734 | ots->val_type = TEMP_VAL_REG; |
2735 | ots->mem_coherent = 0; | |
f8b2f202 | 2736 | s->reg_to_temp[ots->reg] = ots; |
c29c1d7e | 2737 | if (NEED_SYNC_ARG(0)) { |
59d7c14e | 2738 | temp_sync(s, ots, allocated_regs, 0); |
c896fe29 | 2739 | } |
ec7a869d | 2740 | } |
c896fe29 FB |
2741 | } |
2742 | ||
dd186292 | 2743 | static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) |
c896fe29 | 2744 | { |
dd186292 RH |
2745 | const TCGLifeData arg_life = op->life; |
2746 | const TCGOpDef * const def = &tcg_op_defs[op->opc]; | |
82790a87 RH |
2747 | TCGRegSet i_allocated_regs; |
2748 | TCGRegSet o_allocated_regs; | |
b6638662 RH |
2749 | int i, k, nb_iargs, nb_oargs; |
2750 | TCGReg reg; | |
c896fe29 FB |
2751 | TCGArg arg; |
2752 | const TCGArgConstraint *arg_ct; | |
2753 | TCGTemp *ts; | |
2754 | TCGArg new_args[TCG_MAX_OP_ARGS]; | |
2755 | int const_args[TCG_MAX_OP_ARGS]; | |
2756 | ||
2757 | nb_oargs = def->nb_oargs; | |
2758 | nb_iargs = def->nb_iargs; | |
2759 | ||
2760 | /* copy constants */ | |
2761 | memcpy(new_args + nb_oargs + nb_iargs, | |
dd186292 | 2762 | op->args + nb_oargs + nb_iargs, |
c896fe29 FB |
2763 | sizeof(TCGArg) * def->nb_cargs); |
2764 | ||
d21369f5 RH |
2765 | i_allocated_regs = s->reserved_regs; |
2766 | o_allocated_regs = s->reserved_regs; | |
82790a87 | 2767 | |
c896fe29 | 2768 | /* satisfy input constraints */ |
dd186292 | 2769 | for (k = 0; k < nb_iargs; k++) { |
c896fe29 | 2770 | i = def->sorted_args[nb_oargs + k]; |
dd186292 | 2771 | arg = op->args[i]; |
c896fe29 | 2772 | arg_ct = &def->args_ct[i]; |
43439139 | 2773 | ts = arg_temp(arg); |
40ae5c62 RH |
2774 | |
2775 | if (ts->val_type == TEMP_VAL_CONST | |
2776 | && tcg_target_const_match(ts->val, ts->type, arg_ct)) { | |
2777 | /* constant is OK for instruction */ | |
2778 | const_args[i] = 1; | |
2779 | new_args[i] = ts->val; | |
2780 | goto iarg_end; | |
c896fe29 | 2781 | } |
40ae5c62 | 2782 | |
82790a87 | 2783 | temp_load(s, ts, arg_ct->u.regs, i_allocated_regs); |
40ae5c62 | 2784 | |
5ff9d6a4 FB |
2785 | if (arg_ct->ct & TCG_CT_IALIAS) { |
2786 | if (ts->fixed_reg) { | |
2787 | /* if fixed register, we must allocate a new register | |
2788 | if the alias is not the same register */ | |
dd186292 | 2789 | if (arg != op->args[arg_ct->alias_index]) |
5ff9d6a4 FB |
2790 | goto allocate_in_reg; |
2791 | } else { | |
2792 | /* if the input is aliased to an output and if it is | |
2793 | not dead after the instruction, we must allocate | |
2794 | a new register and move it */ | |
866cb6cb | 2795 | if (!IS_DEAD_ARG(i)) { |
5ff9d6a4 | 2796 | goto allocate_in_reg; |
866cb6cb | 2797 | } |
7e1df267 AJ |
2798 | /* check if the current register has already been allocated |
2799 | for another input aliased to an output */ | |
2800 | int k2, i2; | |
2801 | for (k2 = 0 ; k2 < k ; k2++) { | |
2802 | i2 = def->sorted_args[nb_oargs + k2]; | |
2803 | if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && | |
2804 | (new_args[i2] == ts->reg)) { | |
2805 | goto allocate_in_reg; | |
2806 | } | |
2807 | } | |
5ff9d6a4 | 2808 | } |
c896fe29 FB |
2809 | } |
2810 | reg = ts->reg; | |
2811 | if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { | |
2812 | /* nothing to do : the constraint is satisfied */ | |
2813 | } else { | |
2814 | allocate_in_reg: | |
2815 | /* allocate a new register matching the constraint | |
2816 | and move the temporary register into it */ | |
82790a87 | 2817 | reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs, |
91478cef | 2818 | ts->indirect_base); |
3b6dac34 | 2819 | tcg_out_mov(s, ts->type, reg, ts->reg); |
c896fe29 | 2820 | } |
c896fe29 FB |
2821 | new_args[i] = reg; |
2822 | const_args[i] = 0; | |
82790a87 | 2823 | tcg_regset_set_reg(i_allocated_regs, reg); |
c896fe29 FB |
2824 | iarg_end: ; |
2825 | } | |
2826 | ||
a52ad07e AJ |
2827 | /* mark dead temporaries and free the associated registers */ |
2828 | for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
2829 | if (IS_DEAD_ARG(i)) { | |
43439139 | 2830 | temp_dead(s, arg_temp(op->args[i])); |
a52ad07e AJ |
2831 | } |
2832 | } | |
2833 | ||
e8996ee0 | 2834 | if (def->flags & TCG_OPF_BB_END) { |
82790a87 | 2835 | tcg_reg_alloc_bb_end(s, i_allocated_regs); |
e8996ee0 | 2836 | } else { |
e8996ee0 FB |
2837 | if (def->flags & TCG_OPF_CALL_CLOBBER) { |
2838 | /* XXX: permit generic clobber register list ? */ | |
c8074023 RH |
2839 | for (i = 0; i < TCG_TARGET_NB_REGS; i++) { |
2840 | if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { | |
82790a87 | 2841 | tcg_reg_free(s, i, i_allocated_regs); |
e8996ee0 | 2842 | } |
c896fe29 | 2843 | } |
3d5c5f87 AJ |
2844 | } |
2845 | if (def->flags & TCG_OPF_SIDE_EFFECTS) { | |
2846 | /* sync globals if the op has side effects and might trigger | |
2847 | an exception. */ | |
82790a87 | 2848 | sync_globals(s, i_allocated_regs); |
c896fe29 | 2849 | } |
e8996ee0 FB |
2850 | |
2851 | /* satisfy the output constraints */ | |
e8996ee0 FB |
2852 | for(k = 0; k < nb_oargs; k++) { |
2853 | i = def->sorted_args[k]; | |
dd186292 | 2854 | arg = op->args[i]; |
e8996ee0 | 2855 | arg_ct = &def->args_ct[i]; |
43439139 | 2856 | ts = arg_temp(arg); |
17280ff4 RH |
2857 | if ((arg_ct->ct & TCG_CT_ALIAS) |
2858 | && !const_args[arg_ct->alias_index]) { | |
e8996ee0 | 2859 | reg = new_args[arg_ct->alias_index]; |
82790a87 RH |
2860 | } else if (arg_ct->ct & TCG_CT_NEWREG) { |
2861 | reg = tcg_reg_alloc(s, arg_ct->u.regs, | |
2862 | i_allocated_regs | o_allocated_regs, | |
2863 | ts->indirect_base); | |
e8996ee0 FB |
2864 | } else { |
2865 | /* if fixed register, we try to use it */ | |
2866 | reg = ts->reg; | |
2867 | if (ts->fixed_reg && | |
2868 | tcg_regset_test_reg(arg_ct->u.regs, reg)) { | |
2869 | goto oarg_end; | |
2870 | } | |
82790a87 | 2871 | reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs, |
91478cef | 2872 | ts->indirect_base); |
c896fe29 | 2873 | } |
82790a87 | 2874 | tcg_regset_set_reg(o_allocated_regs, reg); |
e8996ee0 FB |
2875 | /* if a fixed register is used, then a move will be done afterwards */ |
2876 | if (!ts->fixed_reg) { | |
ec7a869d | 2877 | if (ts->val_type == TEMP_VAL_REG) { |
f8b2f202 | 2878 | s->reg_to_temp[ts->reg] = NULL; |
ec7a869d AJ |
2879 | } |
2880 | ts->val_type = TEMP_VAL_REG; | |
2881 | ts->reg = reg; | |
2882 | /* temp value is modified, so the value kept in memory is | |
2883 | potentially not the same */ | |
2884 | ts->mem_coherent = 0; | |
f8b2f202 | 2885 | s->reg_to_temp[reg] = ts; |
e8996ee0 FB |
2886 | } |
2887 | oarg_end: | |
2888 | new_args[i] = reg; | |
c896fe29 | 2889 | } |
c896fe29 FB |
2890 | } |
2891 | ||
c896fe29 | 2892 | /* emit instruction */ |
dd186292 | 2893 | tcg_out_op(s, op->opc, new_args, const_args); |
c896fe29 FB |
2894 | |
2895 | /* move the outputs in the correct register if needed */ | |
2896 | for(i = 0; i < nb_oargs; i++) { | |
43439139 | 2897 | ts = arg_temp(op->args[i]); |
c896fe29 FB |
2898 | reg = new_args[i]; |
2899 | if (ts->fixed_reg && ts->reg != reg) { | |
3b6dac34 | 2900 | tcg_out_mov(s, ts->type, ts->reg, reg); |
c896fe29 | 2901 | } |
ec7a869d | 2902 | if (NEED_SYNC_ARG(i)) { |
82790a87 | 2903 | temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i)); |
59d7c14e | 2904 | } else if (IS_DEAD_ARG(i)) { |
f8bf00f1 | 2905 | temp_dead(s, ts); |
ec7a869d | 2906 | } |
c896fe29 FB |
2907 | } |
2908 | } | |
2909 | ||
b03cce8e FB |
2910 | #ifdef TCG_TARGET_STACK_GROWSUP |
2911 | #define STACK_DIR(x) (-(x)) | |
2912 | #else | |
2913 | #define STACK_DIR(x) (x) | |
2914 | #endif | |
2915 | ||
dd186292 | 2916 | static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) |
c896fe29 | 2917 | { |
cd9090aa RH |
2918 | const int nb_oargs = TCGOP_CALLO(op); |
2919 | const int nb_iargs = TCGOP_CALLI(op); | |
dd186292 | 2920 | const TCGLifeData arg_life = op->life; |
b6638662 RH |
2921 | int flags, nb_regs, i; |
2922 | TCGReg reg; | |
cf066674 | 2923 | TCGArg arg; |
c896fe29 | 2924 | TCGTemp *ts; |
d3452f1f RH |
2925 | intptr_t stack_offset; |
2926 | size_t call_stack_size; | |
cf066674 RH |
2927 | tcg_insn_unit *func_addr; |
2928 | int allocate_args; | |
c896fe29 | 2929 | TCGRegSet allocated_regs; |
c896fe29 | 2930 | |
dd186292 RH |
2931 | func_addr = (tcg_insn_unit *)(intptr_t)op->args[nb_oargs + nb_iargs]; |
2932 | flags = op->args[nb_oargs + nb_iargs + 1]; | |
c896fe29 | 2933 | |
6e17d0c5 | 2934 | nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); |
c45cb8bb RH |
2935 | if (nb_regs > nb_iargs) { |
2936 | nb_regs = nb_iargs; | |
cf066674 | 2937 | } |
c896fe29 FB |
2938 | |
2939 | /* assign stack slots first */ | |
c45cb8bb | 2940 | call_stack_size = (nb_iargs - nb_regs) * sizeof(tcg_target_long); |
c896fe29 FB |
2941 | call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & |
2942 | ~(TCG_TARGET_STACK_ALIGN - 1); | |
b03cce8e FB |
2943 | allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); |
2944 | if (allocate_args) { | |
345649c0 BS |
2945 | /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, |
2946 | preallocate call stack */ | |
2947 | tcg_abort(); | |
b03cce8e | 2948 | } |
39cf05d3 FB |
2949 | |
2950 | stack_offset = TCG_TARGET_CALL_STACK_OFFSET; | |
dd186292 RH |
2951 | for (i = nb_regs; i < nb_iargs; i++) { |
2952 | arg = op->args[nb_oargs + i]; | |
39cf05d3 FB |
2953 | #ifdef TCG_TARGET_STACK_GROWSUP |
2954 | stack_offset -= sizeof(tcg_target_long); | |
2955 | #endif | |
2956 | if (arg != TCG_CALL_DUMMY_ARG) { | |
43439139 | 2957 | ts = arg_temp(arg); |
40ae5c62 RH |
2958 | temp_load(s, ts, tcg_target_available_regs[ts->type], |
2959 | s->reserved_regs); | |
2960 | tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); | |
c896fe29 | 2961 | } |
39cf05d3 FB |
2962 | #ifndef TCG_TARGET_STACK_GROWSUP |
2963 | stack_offset += sizeof(tcg_target_long); | |
2964 | #endif | |
c896fe29 FB |
2965 | } |
2966 | ||
2967 | /* assign input registers */ | |
d21369f5 | 2968 | allocated_regs = s->reserved_regs; |
dd186292 RH |
2969 | for (i = 0; i < nb_regs; i++) { |
2970 | arg = op->args[nb_oargs + i]; | |
39cf05d3 | 2971 | if (arg != TCG_CALL_DUMMY_ARG) { |
43439139 | 2972 | ts = arg_temp(arg); |
39cf05d3 | 2973 | reg = tcg_target_call_iarg_regs[i]; |
b3915dbb | 2974 | tcg_reg_free(s, reg, allocated_regs); |
40ae5c62 | 2975 | |
39cf05d3 FB |
2976 | if (ts->val_type == TEMP_VAL_REG) { |
2977 | if (ts->reg != reg) { | |
3b6dac34 | 2978 | tcg_out_mov(s, ts->type, reg, ts->reg); |
39cf05d3 | 2979 | } |
39cf05d3 | 2980 | } else { |
ccb1bb66 | 2981 | TCGRegSet arg_set = 0; |
40ae5c62 | 2982 | |
40ae5c62 RH |
2983 | tcg_regset_set_reg(arg_set, reg); |
2984 | temp_load(s, ts, arg_set, allocated_regs); | |
c896fe29 | 2985 | } |
40ae5c62 | 2986 | |
39cf05d3 | 2987 | tcg_regset_set_reg(allocated_regs, reg); |
c896fe29 | 2988 | } |
c896fe29 FB |
2989 | } |
2990 | ||
c896fe29 | 2991 | /* mark dead temporaries and free the associated registers */ |
dd186292 | 2992 | for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { |
866cb6cb | 2993 | if (IS_DEAD_ARG(i)) { |
43439139 | 2994 | temp_dead(s, arg_temp(op->args[i])); |
c896fe29 FB |
2995 | } |
2996 | } | |
2997 | ||
2998 | /* clobber call registers */ | |
c8074023 RH |
2999 | for (i = 0; i < TCG_TARGET_NB_REGS; i++) { |
3000 | if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) { | |
b3915dbb | 3001 | tcg_reg_free(s, i, allocated_regs); |
c896fe29 FB |
3002 | } |
3003 | } | |
78505279 AJ |
3004 | |
3005 | /* Save globals if they might be written by the helper, sync them if | |
3006 | they might be read. */ | |
3007 | if (flags & TCG_CALL_NO_READ_GLOBALS) { | |
3008 | /* Nothing to do */ | |
3009 | } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) { | |
3010 | sync_globals(s, allocated_regs); | |
3011 | } else { | |
b9c18f56 AJ |
3012 | save_globals(s, allocated_regs); |
3013 | } | |
c896fe29 | 3014 | |
cf066674 | 3015 | tcg_out_call(s, func_addr); |
c896fe29 FB |
3016 | |
3017 | /* assign output registers and emit moves if needed */ | |
3018 | for(i = 0; i < nb_oargs; i++) { | |
dd186292 | 3019 | arg = op->args[i]; |
43439139 | 3020 | ts = arg_temp(arg); |
c896fe29 | 3021 | reg = tcg_target_call_oarg_regs[i]; |
eabb7b91 | 3022 | tcg_debug_assert(s->reg_to_temp[reg] == NULL); |
34b1a49c | 3023 | |
c896fe29 FB |
3024 | if (ts->fixed_reg) { |
3025 | if (ts->reg != reg) { | |
3b6dac34 | 3026 | tcg_out_mov(s, ts->type, ts->reg, reg); |
c896fe29 FB |
3027 | } |
3028 | } else { | |
ec7a869d | 3029 | if (ts->val_type == TEMP_VAL_REG) { |
f8b2f202 | 3030 | s->reg_to_temp[ts->reg] = NULL; |
ec7a869d AJ |
3031 | } |
3032 | ts->val_type = TEMP_VAL_REG; | |
3033 | ts->reg = reg; | |
3034 | ts->mem_coherent = 0; | |
f8b2f202 | 3035 | s->reg_to_temp[reg] = ts; |
ec7a869d | 3036 | if (NEED_SYNC_ARG(i)) { |
59d7c14e RH |
3037 | temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i)); |
3038 | } else if (IS_DEAD_ARG(i)) { | |
f8bf00f1 | 3039 | temp_dead(s, ts); |
8c11ad25 | 3040 | } |
c896fe29 FB |
3041 | } |
3042 | } | |
c896fe29 FB |
3043 | } |
3044 | ||
3045 | #ifdef CONFIG_PROFILER | |
3046 | ||
c3fac113 EC |
3047 | /* avoid copy/paste errors */ |
3048 | #define PROF_ADD(to, from, field) \ | |
3049 | do { \ | |
3050 | (to)->field += atomic_read(&((from)->field)); \ | |
3051 | } while (0) | |
3052 | ||
3053 | #define PROF_MAX(to, from, field) \ | |
3054 | do { \ | |
3055 | typeof((from)->field) val__ = atomic_read(&((from)->field)); \ | |
3056 | if (val__ > (to)->field) { \ | |
3057 | (to)->field = val__; \ | |
3058 | } \ | |
3059 | } while (0) | |
3060 | ||
3061 | /* Pass in a zero'ed @prof */ | |
3062 | static inline | |
3063 | void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table) | |
3064 | { | |
3468b59e | 3065 | unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); |
c3fac113 EC |
3066 | unsigned int i; |
3067 | ||
3468b59e EC |
3068 | for (i = 0; i < n_ctxs; i++) { |
3069 | TCGContext *s = atomic_read(&tcg_ctxs[i]); | |
3070 | const TCGProfile *orig = &s->prof; | |
c3fac113 EC |
3071 | |
3072 | if (counters) { | |
3073 | PROF_ADD(prof, orig, tb_count1); | |
3074 | PROF_ADD(prof, orig, tb_count); | |
3075 | PROF_ADD(prof, orig, op_count); | |
3076 | PROF_MAX(prof, orig, op_count_max); | |
3077 | PROF_ADD(prof, orig, temp_count); | |
3078 | PROF_MAX(prof, orig, temp_count_max); | |
3079 | PROF_ADD(prof, orig, del_op_count); | |
3080 | PROF_ADD(prof, orig, code_in_len); | |
3081 | PROF_ADD(prof, orig, code_out_len); | |
3082 | PROF_ADD(prof, orig, search_out_len); | |
3083 | PROF_ADD(prof, orig, interm_time); | |
3084 | PROF_ADD(prof, orig, code_time); | |
3085 | PROF_ADD(prof, orig, la_time); | |
3086 | PROF_ADD(prof, orig, opt_time); | |
3087 | PROF_ADD(prof, orig, restore_count); | |
3088 | PROF_ADD(prof, orig, restore_time); | |
3089 | } | |
3090 | if (table) { | |
3091 | int i; | |
3092 | ||
3093 | for (i = 0; i < NB_OPS; i++) { | |
3094 | PROF_ADD(prof, orig, table_op_count[i]); | |
3095 | } | |
3096 | } | |
3097 | } | |
3098 | } | |
3099 | ||
3100 | #undef PROF_ADD | |
3101 | #undef PROF_MAX | |
3102 | ||
3103 | static void tcg_profile_snapshot_counters(TCGProfile *prof) | |
3104 | { | |
3105 | tcg_profile_snapshot(prof, true, false); | |
3106 | } | |
3107 | ||
3108 | static void tcg_profile_snapshot_table(TCGProfile *prof) | |
3109 | { | |
3110 | tcg_profile_snapshot(prof, false, true); | |
3111 | } | |
c896fe29 | 3112 | |
246ae24d | 3113 | void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf) |
c896fe29 | 3114 | { |
c3fac113 | 3115 | TCGProfile prof = {}; |
c896fe29 | 3116 | int i; |
d70724ce | 3117 | |
c3fac113 | 3118 | tcg_profile_snapshot_table(&prof); |
15fc7daa | 3119 | for (i = 0; i < NB_OPS; i++) { |
246ae24d | 3120 | cpu_fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, |
c3fac113 | 3121 | prof.table_op_count[i]); |
c896fe29 | 3122 | } |
c896fe29 | 3123 | } |
246ae24d MF |
3124 | #else |
3125 | void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf) | |
3126 | { | |
3127 | cpu_fprintf(f, "[TCG profiler not compiled]\n"); | |
3128 | } | |
c896fe29 FB |
3129 | #endif |
3130 | ||
3131 | ||
5bd2ec3d | 3132 | int tcg_gen_code(TCGContext *s, TranslationBlock *tb) |
c896fe29 | 3133 | { |
c3fac113 EC |
3134 | #ifdef CONFIG_PROFILER |
3135 | TCGProfile *prof = &s->prof; | |
3136 | #endif | |
15fa08f8 RH |
3137 | int i, num_insns; |
3138 | TCGOp *op; | |
c896fe29 | 3139 | |
04fe6400 RH |
3140 | #ifdef CONFIG_PROFILER |
3141 | { | |
3142 | int n; | |
3143 | ||
15fa08f8 RH |
3144 | QTAILQ_FOREACH(op, &s->ops, link) { |
3145 | n++; | |
3146 | } | |
c3fac113 EC |
3147 | atomic_set(&prof->op_count, prof->op_count + n); |
3148 | if (n > prof->op_count_max) { | |
3149 | atomic_set(&prof->op_count_max, n); | |
04fe6400 RH |
3150 | } |
3151 | ||
3152 | n = s->nb_temps; | |
c3fac113 EC |
3153 | atomic_set(&prof->temp_count, prof->temp_count + n); |
3154 | if (n > prof->temp_count_max) { | |
3155 | atomic_set(&prof->temp_count_max, n); | |
04fe6400 RH |
3156 | } |
3157 | } | |
3158 | #endif | |
3159 | ||
c896fe29 | 3160 | #ifdef DEBUG_DISAS |
d977e1c2 AB |
3161 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP) |
3162 | && qemu_log_in_addr_range(tb->pc))) { | |
1ee73216 | 3163 | qemu_log_lock(); |
93fcfe39 | 3164 | qemu_log("OP:\n"); |
eeacee4d | 3165 | tcg_dump_ops(s); |
93fcfe39 | 3166 | qemu_log("\n"); |
1ee73216 | 3167 | qemu_log_unlock(); |
c896fe29 FB |
3168 | } |
3169 | #endif | |
3170 | ||
c5cc28ff | 3171 | #ifdef CONFIG_PROFILER |
c3fac113 | 3172 | atomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); |
c5cc28ff AJ |
3173 | #endif |
3174 | ||
8f2e8c07 | 3175 | #ifdef USE_TCG_OPTIMIZATIONS |
c45cb8bb | 3176 | tcg_optimize(s); |
8f2e8c07 KB |
3177 | #endif |
3178 | ||
a23a9ec6 | 3179 | #ifdef CONFIG_PROFILER |
c3fac113 EC |
3180 | atomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); |
3181 | atomic_set(&prof->la_time, prof->la_time - profile_getclock()); | |
a23a9ec6 | 3182 | #endif |
c5cc28ff | 3183 | |
b83eabea | 3184 | liveness_pass_1(s); |
5a18407f | 3185 | |
b83eabea | 3186 | if (s->nb_indirects > 0) { |
5a18407f | 3187 | #ifdef DEBUG_DISAS |
b83eabea RH |
3188 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND) |
3189 | && qemu_log_in_addr_range(tb->pc))) { | |
3190 | qemu_log_lock(); | |
3191 | qemu_log("OP before indirect lowering:\n"); | |
3192 | tcg_dump_ops(s); | |
3193 | qemu_log("\n"); | |
3194 | qemu_log_unlock(); | |
3195 | } | |
5a18407f | 3196 | #endif |
b83eabea RH |
3197 | /* Replace indirect temps with direct temps. */ |
3198 | if (liveness_pass_2(s)) { | |
3199 | /* If changes were made, re-run liveness. */ | |
3200 | liveness_pass_1(s); | |
5a18407f RH |
3201 | } |
3202 | } | |
c5cc28ff | 3203 | |
a23a9ec6 | 3204 | #ifdef CONFIG_PROFILER |
c3fac113 | 3205 | atomic_set(&prof->la_time, prof->la_time + profile_getclock()); |
a23a9ec6 | 3206 | #endif |
c896fe29 FB |
3207 | |
3208 | #ifdef DEBUG_DISAS | |
d977e1c2 AB |
3209 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT) |
3210 | && qemu_log_in_addr_range(tb->pc))) { | |
1ee73216 | 3211 | qemu_log_lock(); |
c5cc28ff | 3212 | qemu_log("OP after optimization and liveness analysis:\n"); |
eeacee4d | 3213 | tcg_dump_ops(s); |
93fcfe39 | 3214 | qemu_log("\n"); |
1ee73216 | 3215 | qemu_log_unlock(); |
c896fe29 FB |
3216 | } |
3217 | #endif | |
3218 | ||
3219 | tcg_reg_alloc_start(s); | |
3220 | ||
e7e168f4 EC |
3221 | s->code_buf = tb->tc.ptr; |
3222 | s->code_ptr = tb->tc.ptr; | |
c896fe29 | 3223 | |
659ef5cb RH |
3224 | #ifdef TCG_TARGET_NEED_LDST_LABELS |
3225 | s->ldst_labels = NULL; | |
3226 | #endif | |
57a26946 RH |
3227 | #ifdef TCG_TARGET_NEED_POOL_LABELS |
3228 | s->pool_labels = NULL; | |
3229 | #endif | |
9ecefc84 | 3230 | |
fca8a500 | 3231 | num_insns = -1; |
15fa08f8 | 3232 | QTAILQ_FOREACH(op, &s->ops, link) { |
c45cb8bb | 3233 | TCGOpcode opc = op->opc; |
b3db8758 | 3234 | |
c896fe29 | 3235 | #ifdef CONFIG_PROFILER |
c3fac113 | 3236 | atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); |
c896fe29 | 3237 | #endif |
c45cb8bb RH |
3238 | |
3239 | switch (opc) { | |
c896fe29 | 3240 | case INDEX_op_mov_i32: |
c896fe29 | 3241 | case INDEX_op_mov_i64: |
dd186292 | 3242 | tcg_reg_alloc_mov(s, op); |
c896fe29 | 3243 | break; |
e8996ee0 | 3244 | case INDEX_op_movi_i32: |
e8996ee0 | 3245 | case INDEX_op_movi_i64: |
dd186292 | 3246 | tcg_reg_alloc_movi(s, op); |
e8996ee0 | 3247 | break; |
765b842a | 3248 | case INDEX_op_insn_start: |
fca8a500 RH |
3249 | if (num_insns >= 0) { |
3250 | s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); | |
3251 | } | |
3252 | num_insns++; | |
bad729e2 RH |
3253 | for (i = 0; i < TARGET_INSN_START_WORDS; ++i) { |
3254 | target_ulong a; | |
3255 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
efee3746 | 3256 | a = deposit64(op->args[i * 2], 32, 32, op->args[i * 2 + 1]); |
bad729e2 | 3257 | #else |
efee3746 | 3258 | a = op->args[i]; |
bad729e2 | 3259 | #endif |
fca8a500 | 3260 | s->gen_insn_data[num_insns][i] = a; |
bad729e2 | 3261 | } |
c896fe29 | 3262 | break; |
5ff9d6a4 | 3263 | case INDEX_op_discard: |
43439139 | 3264 | temp_dead(s, arg_temp(op->args[0])); |
5ff9d6a4 | 3265 | break; |
c896fe29 | 3266 | case INDEX_op_set_label: |
e8996ee0 | 3267 | tcg_reg_alloc_bb_end(s, s->reserved_regs); |
efee3746 | 3268 | tcg_out_label(s, arg_label(op->args[0]), s->code_ptr); |
c896fe29 FB |
3269 | break; |
3270 | case INDEX_op_call: | |
dd186292 | 3271 | tcg_reg_alloc_call(s, op); |
c45cb8bb | 3272 | break; |
c896fe29 | 3273 | default: |
25c4d9cc | 3274 | /* Sanity check that we've not introduced any unhandled opcodes. */ |
be0f34b5 | 3275 | tcg_debug_assert(tcg_op_supported(opc)); |
c896fe29 FB |
3276 | /* Note: in order to speed up the code, it would be much |
3277 | faster to have specialized register allocator functions for | |
3278 | some common argument patterns */ | |
dd186292 | 3279 | tcg_reg_alloc_op(s, op); |
c896fe29 FB |
3280 | break; |
3281 | } | |
8d8fdbae | 3282 | #ifdef CONFIG_DEBUG_TCG |
c896fe29 FB |
3283 | check_regs(s); |
3284 | #endif | |
b125f9dc RH |
3285 | /* Test for (pending) buffer overflow. The assumption is that any |
3286 | one operation beginning below the high water mark cannot overrun | |
3287 | the buffer completely. Thus we can test for overflow after | |
3288 | generating code without having to check during generation. */ | |
644da9b3 | 3289 | if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { |
b125f9dc RH |
3290 | return -1; |
3291 | } | |
c896fe29 | 3292 | } |
fca8a500 RH |
3293 | tcg_debug_assert(num_insns >= 0); |
3294 | s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); | |
c45cb8bb | 3295 | |
b76f0d8c | 3296 | /* Generate TB finalization at the end of block */ |
659ef5cb RH |
3297 | #ifdef TCG_TARGET_NEED_LDST_LABELS |
3298 | if (!tcg_out_ldst_finalize(s)) { | |
23dceda6 RH |
3299 | return -1; |
3300 | } | |
659ef5cb | 3301 | #endif |
57a26946 RH |
3302 | #ifdef TCG_TARGET_NEED_POOL_LABELS |
3303 | if (!tcg_out_pool_finalize(s)) { | |
3304 | return -1; | |
3305 | } | |
3306 | #endif | |
c896fe29 FB |
3307 | |
3308 | /* flush instruction cache */ | |
1813e175 | 3309 | flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); |
2aeabc08 | 3310 | |
1813e175 | 3311 | return tcg_current_code_size(s); |
c896fe29 FB |
3312 | } |
3313 | ||
a23a9ec6 | 3314 | #ifdef CONFIG_PROFILER |
405cf9ff | 3315 | void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) |
a23a9ec6 | 3316 | { |
c3fac113 EC |
3317 | TCGProfile prof = {}; |
3318 | const TCGProfile *s; | |
3319 | int64_t tb_count; | |
3320 | int64_t tb_div_count; | |
3321 | int64_t tot; | |
3322 | ||
3323 | tcg_profile_snapshot_counters(&prof); | |
3324 | s = &prof; | |
3325 | tb_count = s->tb_count; | |
3326 | tb_div_count = tb_count ? tb_count : 1; | |
3327 | tot = s->interm_time + s->code_time; | |
a23a9ec6 | 3328 | |
a23a9ec6 FB |
3329 | cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", |
3330 | tot, tot / 2.4e9); | |
3331 | cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", | |
fca8a500 RH |
3332 | tb_count, s->tb_count1 - tb_count, |
3333 | (double)(s->tb_count1 - s->tb_count) | |
3334 | / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); | |
a23a9ec6 | 3335 | cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", |
fca8a500 | 3336 | (double)s->op_count / tb_div_count, s->op_count_max); |
a23a9ec6 | 3337 | cpu_fprintf(f, "deleted ops/TB %0.2f\n", |
fca8a500 | 3338 | (double)s->del_op_count / tb_div_count); |
a23a9ec6 | 3339 | cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n", |
fca8a500 RH |
3340 | (double)s->temp_count / tb_div_count, s->temp_count_max); |
3341 | cpu_fprintf(f, "avg host code/TB %0.1f\n", | |
3342 | (double)s->code_out_len / tb_div_count); | |
3343 | cpu_fprintf(f, "avg search data/TB %0.1f\n", | |
3344 | (double)s->search_out_len / tb_div_count); | |
a23a9ec6 FB |
3345 | |
3346 | cpu_fprintf(f, "cycles/op %0.1f\n", | |
3347 | s->op_count ? (double)tot / s->op_count : 0); | |
3348 | cpu_fprintf(f, "cycles/in byte %0.1f\n", | |
3349 | s->code_in_len ? (double)tot / s->code_in_len : 0); | |
3350 | cpu_fprintf(f, "cycles/out byte %0.1f\n", | |
3351 | s->code_out_len ? (double)tot / s->code_out_len : 0); | |
fca8a500 RH |
3352 | cpu_fprintf(f, "cycles/search byte %0.1f\n", |
3353 | s->search_out_len ? (double)tot / s->search_out_len : 0); | |
3354 | if (tot == 0) { | |
a23a9ec6 | 3355 | tot = 1; |
fca8a500 | 3356 | } |
a23a9ec6 FB |
3357 | cpu_fprintf(f, " gen_interm time %0.1f%%\n", |
3358 | (double)s->interm_time / tot * 100.0); | |
3359 | cpu_fprintf(f, " gen_code time %0.1f%%\n", | |
3360 | (double)s->code_time / tot * 100.0); | |
c5cc28ff AJ |
3361 | cpu_fprintf(f, "optim./code time %0.1f%%\n", |
3362 | (double)s->opt_time / (s->code_time ? s->code_time : 1) | |
3363 | * 100.0); | |
a23a9ec6 FB |
3364 | cpu_fprintf(f, "liveness/code time %0.1f%%\n", |
3365 | (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); | |
3366 | cpu_fprintf(f, "cpu_restore count %" PRId64 "\n", | |
3367 | s->restore_count); | |
3368 | cpu_fprintf(f, " avg cycles %0.1f\n", | |
3369 | s->restore_count ? (double)s->restore_time / s->restore_count : 0); | |
a23a9ec6 FB |
3370 | } |
3371 | #else | |
405cf9ff | 3372 | void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) |
a23a9ec6 | 3373 | { |
24bf7b3a | 3374 | cpu_fprintf(f, "[TCG profiler not compiled]\n"); |
a23a9ec6 FB |
3375 | } |
3376 | #endif | |
813da627 RH |
3377 | |
3378 | #ifdef ELF_HOST_MACHINE | |
5872bbf2 RH |
3379 | /* In order to use this feature, the backend needs to do three things: |
3380 | ||
3381 | (1) Define ELF_HOST_MACHINE to indicate both what value to | |
3382 | put into the ELF image and to indicate support for the feature. | |
3383 | ||
3384 | (2) Define tcg_register_jit. This should create a buffer containing | |
3385 | the contents of a .debug_frame section that describes the post- | |
3386 | prologue unwind info for the tcg machine. | |
3387 | ||
3388 | (3) Call tcg_register_jit_int, with the constructed .debug_frame. | |
3389 | */ | |
813da627 RH |
3390 | |
3391 | /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */ | |
3392 | typedef enum { | |
3393 | JIT_NOACTION = 0, | |
3394 | JIT_REGISTER_FN, | |
3395 | JIT_UNREGISTER_FN | |
3396 | } jit_actions_t; | |
3397 | ||
3398 | struct jit_code_entry { | |
3399 | struct jit_code_entry *next_entry; | |
3400 | struct jit_code_entry *prev_entry; | |
3401 | const void *symfile_addr; | |
3402 | uint64_t symfile_size; | |
3403 | }; | |
3404 | ||
3405 | struct jit_descriptor { | |
3406 | uint32_t version; | |
3407 | uint32_t action_flag; | |
3408 | struct jit_code_entry *relevant_entry; | |
3409 | struct jit_code_entry *first_entry; | |
3410 | }; | |
3411 | ||
3412 | void __jit_debug_register_code(void) __attribute__((noinline)); | |
3413 | void __jit_debug_register_code(void) | |
3414 | { | |
3415 | asm(""); | |
3416 | } | |
3417 | ||
3418 | /* Must statically initialize the version, because GDB may check | |
3419 | the version before we can set it. */ | |
3420 | struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; | |
3421 | ||
3422 | /* End GDB interface. */ | |
3423 | ||
3424 | static int find_string(const char *strtab, const char *str) | |
3425 | { | |
3426 | const char *p = strtab + 1; | |
3427 | ||
3428 | while (1) { | |
3429 | if (strcmp(p, str) == 0) { | |
3430 | return p - strtab; | |
3431 | } | |
3432 | p += strlen(p) + 1; | |
3433 | } | |
3434 | } | |
3435 | ||
5872bbf2 | 3436 | static void tcg_register_jit_int(void *buf_ptr, size_t buf_size, |
2c90784a RH |
3437 | const void *debug_frame, |
3438 | size_t debug_frame_size) | |
813da627 | 3439 | { |
5872bbf2 RH |
3440 | struct __attribute__((packed)) DebugInfo { |
3441 | uint32_t len; | |
3442 | uint16_t version; | |
3443 | uint32_t abbrev; | |
3444 | uint8_t ptr_size; | |
3445 | uint8_t cu_die; | |
3446 | uint16_t cu_lang; | |
3447 | uintptr_t cu_low_pc; | |
3448 | uintptr_t cu_high_pc; | |
3449 | uint8_t fn_die; | |
3450 | char fn_name[16]; | |
3451 | uintptr_t fn_low_pc; | |
3452 | uintptr_t fn_high_pc; | |
3453 | uint8_t cu_eoc; | |
3454 | }; | |
813da627 RH |
3455 | |
3456 | struct ElfImage { | |
3457 | ElfW(Ehdr) ehdr; | |
3458 | ElfW(Phdr) phdr; | |
5872bbf2 RH |
3459 | ElfW(Shdr) shdr[7]; |
3460 | ElfW(Sym) sym[2]; | |
3461 | struct DebugInfo di; | |
3462 | uint8_t da[24]; | |
3463 | char str[80]; | |
3464 | }; | |
3465 | ||
3466 | struct ElfImage *img; | |
3467 | ||
3468 | static const struct ElfImage img_template = { | |
3469 | .ehdr = { | |
3470 | .e_ident[EI_MAG0] = ELFMAG0, | |
3471 | .e_ident[EI_MAG1] = ELFMAG1, | |
3472 | .e_ident[EI_MAG2] = ELFMAG2, | |
3473 | .e_ident[EI_MAG3] = ELFMAG3, | |
3474 | .e_ident[EI_CLASS] = ELF_CLASS, | |
3475 | .e_ident[EI_DATA] = ELF_DATA, | |
3476 | .e_ident[EI_VERSION] = EV_CURRENT, | |
3477 | .e_type = ET_EXEC, | |
3478 | .e_machine = ELF_HOST_MACHINE, | |
3479 | .e_version = EV_CURRENT, | |
3480 | .e_phoff = offsetof(struct ElfImage, phdr), | |
3481 | .e_shoff = offsetof(struct ElfImage, shdr), | |
3482 | .e_ehsize = sizeof(ElfW(Shdr)), | |
3483 | .e_phentsize = sizeof(ElfW(Phdr)), | |
3484 | .e_phnum = 1, | |
3485 | .e_shentsize = sizeof(ElfW(Shdr)), | |
3486 | .e_shnum = ARRAY_SIZE(img->shdr), | |
3487 | .e_shstrndx = ARRAY_SIZE(img->shdr) - 1, | |
abbb3eae RH |
3488 | #ifdef ELF_HOST_FLAGS |
3489 | .e_flags = ELF_HOST_FLAGS, | |
3490 | #endif | |
3491 | #ifdef ELF_OSABI | |
3492 | .e_ident[EI_OSABI] = ELF_OSABI, | |
3493 | #endif | |
5872bbf2 RH |
3494 | }, |
3495 | .phdr = { | |
3496 | .p_type = PT_LOAD, | |
3497 | .p_flags = PF_X, | |
3498 | }, | |
3499 | .shdr = { | |
3500 | [0] = { .sh_type = SHT_NULL }, | |
3501 | /* Trick: The contents of code_gen_buffer are not present in | |
3502 | this fake ELF file; that got allocated elsewhere. Therefore | |
3503 | we mark .text as SHT_NOBITS (similar to .bss) so that readers | |
3504 | will not look for contents. We can record any address. */ | |
3505 | [1] = { /* .text */ | |
3506 | .sh_type = SHT_NOBITS, | |
3507 | .sh_flags = SHF_EXECINSTR | SHF_ALLOC, | |
3508 | }, | |
3509 | [2] = { /* .debug_info */ | |
3510 | .sh_type = SHT_PROGBITS, | |
3511 | .sh_offset = offsetof(struct ElfImage, di), | |
3512 | .sh_size = sizeof(struct DebugInfo), | |
3513 | }, | |
3514 | [3] = { /* .debug_abbrev */ | |
3515 | .sh_type = SHT_PROGBITS, | |
3516 | .sh_offset = offsetof(struct ElfImage, da), | |
3517 | .sh_size = sizeof(img->da), | |
3518 | }, | |
3519 | [4] = { /* .debug_frame */ | |
3520 | .sh_type = SHT_PROGBITS, | |
3521 | .sh_offset = sizeof(struct ElfImage), | |
3522 | }, | |
3523 | [5] = { /* .symtab */ | |
3524 | .sh_type = SHT_SYMTAB, | |
3525 | .sh_offset = offsetof(struct ElfImage, sym), | |
3526 | .sh_size = sizeof(img->sym), | |
3527 | .sh_info = 1, | |
3528 | .sh_link = ARRAY_SIZE(img->shdr) - 1, | |
3529 | .sh_entsize = sizeof(ElfW(Sym)), | |
3530 | }, | |
3531 | [6] = { /* .strtab */ | |
3532 | .sh_type = SHT_STRTAB, | |
3533 | .sh_offset = offsetof(struct ElfImage, str), | |
3534 | .sh_size = sizeof(img->str), | |
3535 | } | |
3536 | }, | |
3537 | .sym = { | |
3538 | [1] = { /* code_gen_buffer */ | |
3539 | .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC), | |
3540 | .st_shndx = 1, | |
3541 | } | |
3542 | }, | |
3543 | .di = { | |
3544 | .len = sizeof(struct DebugInfo) - 4, | |
3545 | .version = 2, | |
3546 | .ptr_size = sizeof(void *), | |
3547 | .cu_die = 1, | |
3548 | .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */ | |
3549 | .fn_die = 2, | |
3550 | .fn_name = "code_gen_buffer" | |
3551 | }, | |
3552 | .da = { | |
3553 | 1, /* abbrev number (the cu) */ | |
3554 | 0x11, 1, /* DW_TAG_compile_unit, has children */ | |
3555 | 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */ | |
3556 | 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ | |
3557 | 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ | |
3558 | 0, 0, /* end of abbrev */ | |
3559 | 2, /* abbrev number (the fn) */ | |
3560 | 0x2e, 0, /* DW_TAG_subprogram, no children */ | |
3561 | 0x3, 0x8, /* DW_AT_name, DW_FORM_string */ | |
3562 | 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ | |
3563 | 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ | |
3564 | 0, 0, /* end of abbrev */ | |
3565 | 0 /* no more abbrev */ | |
3566 | }, | |
3567 | .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0" | |
3568 | ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer", | |
813da627 RH |
3569 | }; |
3570 | ||
3571 | /* We only need a single jit entry; statically allocate it. */ | |
3572 | static struct jit_code_entry one_entry; | |
3573 | ||
5872bbf2 | 3574 | uintptr_t buf = (uintptr_t)buf_ptr; |
813da627 | 3575 | size_t img_size = sizeof(struct ElfImage) + debug_frame_size; |
2c90784a | 3576 | DebugFrameHeader *dfh; |
813da627 | 3577 | |
5872bbf2 RH |
3578 | img = g_malloc(img_size); |
3579 | *img = img_template; | |
813da627 | 3580 | |
5872bbf2 RH |
3581 | img->phdr.p_vaddr = buf; |
3582 | img->phdr.p_paddr = buf; | |
3583 | img->phdr.p_memsz = buf_size; | |
813da627 | 3584 | |
813da627 | 3585 | img->shdr[1].sh_name = find_string(img->str, ".text"); |
5872bbf2 | 3586 | img->shdr[1].sh_addr = buf; |
813da627 RH |
3587 | img->shdr[1].sh_size = buf_size; |
3588 | ||
5872bbf2 RH |
3589 | img->shdr[2].sh_name = find_string(img->str, ".debug_info"); |
3590 | img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev"); | |
3591 | ||
3592 | img->shdr[4].sh_name = find_string(img->str, ".debug_frame"); | |
3593 | img->shdr[4].sh_size = debug_frame_size; | |
3594 | ||
3595 | img->shdr[5].sh_name = find_string(img->str, ".symtab"); | |
3596 | img->shdr[6].sh_name = find_string(img->str, ".strtab"); | |
3597 | ||
3598 | img->sym[1].st_name = find_string(img->str, "code_gen_buffer"); | |
3599 | img->sym[1].st_value = buf; | |
3600 | img->sym[1].st_size = buf_size; | |
813da627 | 3601 | |
5872bbf2 | 3602 | img->di.cu_low_pc = buf; |
45aba097 | 3603 | img->di.cu_high_pc = buf + buf_size; |
5872bbf2 | 3604 | img->di.fn_low_pc = buf; |
45aba097 | 3605 | img->di.fn_high_pc = buf + buf_size; |
813da627 | 3606 | |
2c90784a RH |
3607 | dfh = (DebugFrameHeader *)(img + 1); |
3608 | memcpy(dfh, debug_frame, debug_frame_size); | |
3609 | dfh->fde.func_start = buf; | |
3610 | dfh->fde.func_len = buf_size; | |
3611 | ||
813da627 RH |
3612 | #ifdef DEBUG_JIT |
3613 | /* Enable this block to be able to debug the ELF image file creation. | |
3614 | One can use readelf, objdump, or other inspection utilities. */ | |
3615 | { | |
3616 | FILE *f = fopen("/tmp/qemu.jit", "w+b"); | |
3617 | if (f) { | |
5872bbf2 | 3618 | if (fwrite(img, img_size, 1, f) != img_size) { |
813da627 RH |
3619 | /* Avoid stupid unused return value warning for fwrite. */ |
3620 | } | |
3621 | fclose(f); | |
3622 | } | |
3623 | } | |
3624 | #endif | |
3625 | ||
3626 | one_entry.symfile_addr = img; | |
3627 | one_entry.symfile_size = img_size; | |
3628 | ||
3629 | __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; | |
3630 | __jit_debug_descriptor.relevant_entry = &one_entry; | |
3631 | __jit_debug_descriptor.first_entry = &one_entry; | |
3632 | __jit_debug_register_code(); | |
3633 | } | |
3634 | #else | |
5872bbf2 RH |
3635 | /* No support for the feature. Provide the entry point expected by exec.c, |
3636 | and implement the internal function we declared earlier. */ | |
813da627 RH |
3637 | |
3638 | static void tcg_register_jit_int(void *buf, size_t size, | |
2c90784a RH |
3639 | const void *debug_frame, |
3640 | size_t debug_frame_size) | |
813da627 RH |
3641 | { |
3642 | } | |
3643 | ||
3644 | void tcg_register_jit(void *buf, size_t buf_size) | |
3645 | { | |
3646 | } | |
3647 | #endif /* ELF_HOST_MACHINE */ |