]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #ifndef TCG_H | |
26 | #define TCG_H | |
27 | ||
28 | #include "qemu-common.h" | |
29 | #include "cpu.h" | |
30 | #include "exec/tb-context.h" | |
31 | #include "qemu/bitops.h" | |
32 | #include "tcg-mo.h" | |
33 | #include "tcg-target.h" | |
34 | ||
35 | /* XXX: make safe guess about sizes */ | |
36 | #define MAX_OP_PER_INSTR 266 | |
37 | ||
38 | #if HOST_LONG_BITS == 32 | |
39 | #define MAX_OPC_PARAM_PER_ARG 2 | |
40 | #else | |
41 | #define MAX_OPC_PARAM_PER_ARG 1 | |
42 | #endif | |
43 | #define MAX_OPC_PARAM_IARGS 5 | |
44 | #define MAX_OPC_PARAM_OARGS 1 | |
45 | #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) | |
46 | ||
47 | /* A Call op needs up to 4 + 2N parameters on 32-bit archs, | |
48 | * and up to 4 + N parameters on 64-bit archs | |
49 | * (N = number of input arguments + output arguments). */ | |
50 | #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) | |
51 | #define OPC_BUF_SIZE 640 | |
52 | #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) | |
53 | ||
54 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) | |
55 | ||
56 | #define CPU_TEMP_BUF_NLONGS 128 | |
57 | ||
58 | /* Default target word size to pointer size. */ | |
59 | #ifndef TCG_TARGET_REG_BITS | |
60 | # if UINTPTR_MAX == UINT32_MAX | |
61 | # define TCG_TARGET_REG_BITS 32 | |
62 | # elif UINTPTR_MAX == UINT64_MAX | |
63 | # define TCG_TARGET_REG_BITS 64 | |
64 | # else | |
65 | # error Unknown pointer size for tcg target | |
66 | # endif | |
67 | #endif | |
68 | ||
69 | #if TCG_TARGET_REG_BITS == 32 | |
70 | typedef int32_t tcg_target_long; | |
71 | typedef uint32_t tcg_target_ulong; | |
72 | #define TCG_PRIlx PRIx32 | |
73 | #define TCG_PRIld PRId32 | |
74 | #elif TCG_TARGET_REG_BITS == 64 | |
75 | typedef int64_t tcg_target_long; | |
76 | typedef uint64_t tcg_target_ulong; | |
77 | #define TCG_PRIlx PRIx64 | |
78 | #define TCG_PRIld PRId64 | |
79 | #else | |
80 | #error unsupported | |
81 | #endif | |
82 | ||
83 | /* Oversized TCG guests make things like MTTCG hard | |
84 | * as we can't use atomics for cputlb updates. | |
85 | */ | |
86 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
87 | #define TCG_OVERSIZED_GUEST 1 | |
88 | #else | |
89 | #define TCG_OVERSIZED_GUEST 0 | |
90 | #endif | |
91 | ||
92 | #if TCG_TARGET_NB_REGS <= 32 | |
93 | typedef uint32_t TCGRegSet; | |
94 | #elif TCG_TARGET_NB_REGS <= 64 | |
95 | typedef uint64_t TCGRegSet; | |
96 | #else | |
97 | #error unsupported | |
98 | #endif | |
99 | ||
100 | #if TCG_TARGET_REG_BITS == 32 | |
101 | /* Turn some undef macros into false macros. */ | |
102 | #define TCG_TARGET_HAS_extrl_i64_i32 0 | |
103 | #define TCG_TARGET_HAS_extrh_i64_i32 0 | |
104 | #define TCG_TARGET_HAS_div_i64 0 | |
105 | #define TCG_TARGET_HAS_rem_i64 0 | |
106 | #define TCG_TARGET_HAS_div2_i64 0 | |
107 | #define TCG_TARGET_HAS_rot_i64 0 | |
108 | #define TCG_TARGET_HAS_ext8s_i64 0 | |
109 | #define TCG_TARGET_HAS_ext16s_i64 0 | |
110 | #define TCG_TARGET_HAS_ext32s_i64 0 | |
111 | #define TCG_TARGET_HAS_ext8u_i64 0 | |
112 | #define TCG_TARGET_HAS_ext16u_i64 0 | |
113 | #define TCG_TARGET_HAS_ext32u_i64 0 | |
114 | #define TCG_TARGET_HAS_bswap16_i64 0 | |
115 | #define TCG_TARGET_HAS_bswap32_i64 0 | |
116 | #define TCG_TARGET_HAS_bswap64_i64 0 | |
117 | #define TCG_TARGET_HAS_neg_i64 0 | |
118 | #define TCG_TARGET_HAS_not_i64 0 | |
119 | #define TCG_TARGET_HAS_andc_i64 0 | |
120 | #define TCG_TARGET_HAS_orc_i64 0 | |
121 | #define TCG_TARGET_HAS_eqv_i64 0 | |
122 | #define TCG_TARGET_HAS_nand_i64 0 | |
123 | #define TCG_TARGET_HAS_nor_i64 0 | |
124 | #define TCG_TARGET_HAS_clz_i64 0 | |
125 | #define TCG_TARGET_HAS_ctz_i64 0 | |
126 | #define TCG_TARGET_HAS_ctpop_i64 0 | |
127 | #define TCG_TARGET_HAS_deposit_i64 0 | |
128 | #define TCG_TARGET_HAS_extract_i64 0 | |
129 | #define TCG_TARGET_HAS_sextract_i64 0 | |
130 | #define TCG_TARGET_HAS_movcond_i64 0 | |
131 | #define TCG_TARGET_HAS_add2_i64 0 | |
132 | #define TCG_TARGET_HAS_sub2_i64 0 | |
133 | #define TCG_TARGET_HAS_mulu2_i64 0 | |
134 | #define TCG_TARGET_HAS_muls2_i64 0 | |
135 | #define TCG_TARGET_HAS_muluh_i64 0 | |
136 | #define TCG_TARGET_HAS_mulsh_i64 0 | |
137 | /* Turn some undef macros into true macros. */ | |
138 | #define TCG_TARGET_HAS_add2_i32 1 | |
139 | #define TCG_TARGET_HAS_sub2_i32 1 | |
140 | #endif | |
141 | ||
142 | #ifndef TCG_TARGET_deposit_i32_valid | |
143 | #define TCG_TARGET_deposit_i32_valid(ofs, len) 1 | |
144 | #endif | |
145 | #ifndef TCG_TARGET_deposit_i64_valid | |
146 | #define TCG_TARGET_deposit_i64_valid(ofs, len) 1 | |
147 | #endif | |
148 | #ifndef TCG_TARGET_extract_i32_valid | |
149 | #define TCG_TARGET_extract_i32_valid(ofs, len) 1 | |
150 | #endif | |
151 | #ifndef TCG_TARGET_extract_i64_valid | |
152 | #define TCG_TARGET_extract_i64_valid(ofs, len) 1 | |
153 | #endif | |
154 | ||
155 | /* Only one of DIV or DIV2 should be defined. */ | |
156 | #if defined(TCG_TARGET_HAS_div_i32) | |
157 | #define TCG_TARGET_HAS_div2_i32 0 | |
158 | #elif defined(TCG_TARGET_HAS_div2_i32) | |
159 | #define TCG_TARGET_HAS_div_i32 0 | |
160 | #define TCG_TARGET_HAS_rem_i32 0 | |
161 | #endif | |
162 | #if defined(TCG_TARGET_HAS_div_i64) | |
163 | #define TCG_TARGET_HAS_div2_i64 0 | |
164 | #elif defined(TCG_TARGET_HAS_div2_i64) | |
165 | #define TCG_TARGET_HAS_div_i64 0 | |
166 | #define TCG_TARGET_HAS_rem_i64 0 | |
167 | #endif | |
168 | ||
169 | /* For 32-bit targets, some sort of unsigned widening multiply is required. */ | |
170 | #if TCG_TARGET_REG_BITS == 32 \ | |
171 | && !(defined(TCG_TARGET_HAS_mulu2_i32) \ | |
172 | || defined(TCG_TARGET_HAS_muluh_i32)) | |
173 | # error "Missing unsigned widening multiply" | |
174 | #endif | |
175 | ||
176 | #ifndef TARGET_INSN_START_EXTRA_WORDS | |
177 | # define TARGET_INSN_START_WORDS 1 | |
178 | #else | |
179 | # define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS) | |
180 | #endif | |
181 | ||
182 | typedef enum TCGOpcode { | |
183 | #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, | |
184 | #include "tcg-opc.h" | |
185 | #undef DEF | |
186 | NB_OPS, | |
187 | } TCGOpcode; | |
188 | ||
189 | #define tcg_regset_clear(d) (d) = 0 | |
190 | #define tcg_regset_set(d, s) (d) = (s) | |
191 | #define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg) | |
192 | #define tcg_regset_set_reg(d, r) (d) |= 1L << (r) | |
193 | #define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r)) | |
194 | #define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1) | |
195 | #define tcg_regset_or(d, a, b) (d) = (a) | (b) | |
196 | #define tcg_regset_and(d, a, b) (d) = (a) & (b) | |
197 | #define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b) | |
198 | #define tcg_regset_not(d, a) (d) = ~(a) | |
199 | ||
200 | #ifndef TCG_TARGET_INSN_UNIT_SIZE | |
201 | # error "Missing TCG_TARGET_INSN_UNIT_SIZE" | |
202 | #elif TCG_TARGET_INSN_UNIT_SIZE == 1 | |
203 | typedef uint8_t tcg_insn_unit; | |
204 | #elif TCG_TARGET_INSN_UNIT_SIZE == 2 | |
205 | typedef uint16_t tcg_insn_unit; | |
206 | #elif TCG_TARGET_INSN_UNIT_SIZE == 4 | |
207 | typedef uint32_t tcg_insn_unit; | |
208 | #elif TCG_TARGET_INSN_UNIT_SIZE == 8 | |
209 | typedef uint64_t tcg_insn_unit; | |
210 | #else | |
211 | /* The port better have done this. */ | |
212 | #endif | |
213 | ||
214 | ||
215 | #if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS | |
216 | # define tcg_debug_assert(X) do { assert(X); } while (0) | |
217 | #elif QEMU_GNUC_PREREQ(4, 5) | |
218 | # define tcg_debug_assert(X) \ | |
219 | do { if (!(X)) { __builtin_unreachable(); } } while (0) | |
220 | #else | |
221 | # define tcg_debug_assert(X) do { (void)(X); } while (0) | |
222 | #endif | |
223 | ||
224 | typedef struct TCGRelocation { | |
225 | struct TCGRelocation *next; | |
226 | int type; | |
227 | tcg_insn_unit *ptr; | |
228 | intptr_t addend; | |
229 | } TCGRelocation; | |
230 | ||
231 | typedef struct TCGLabel { | |
232 | unsigned has_value : 1; | |
233 | unsigned id : 31; | |
234 | union { | |
235 | uintptr_t value; | |
236 | tcg_insn_unit *value_ptr; | |
237 | TCGRelocation *first_reloc; | |
238 | } u; | |
239 | } TCGLabel; | |
240 | ||
241 | typedef struct TCGPool { | |
242 | struct TCGPool *next; | |
243 | int size; | |
244 | uint8_t data[0] __attribute__ ((aligned)); | |
245 | } TCGPool; | |
246 | ||
247 | #define TCG_POOL_CHUNK_SIZE 32768 | |
248 | ||
249 | #define TCG_MAX_TEMPS 512 | |
250 | #define TCG_MAX_INSNS 512 | |
251 | ||
252 | /* when the size of the arguments of a called function is smaller than | |
253 | this value, they are statically allocated in the TB stack frame */ | |
254 | #define TCG_STATIC_CALL_ARGS_SIZE 128 | |
255 | ||
256 | typedef enum TCGType { | |
257 | TCG_TYPE_I32, | |
258 | TCG_TYPE_I64, | |
259 | TCG_TYPE_COUNT, /* number of different types */ | |
260 | ||
261 | /* An alias for the size of the host register. */ | |
262 | #if TCG_TARGET_REG_BITS == 32 | |
263 | TCG_TYPE_REG = TCG_TYPE_I32, | |
264 | #else | |
265 | TCG_TYPE_REG = TCG_TYPE_I64, | |
266 | #endif | |
267 | ||
268 | /* An alias for the size of the native pointer. */ | |
269 | #if UINTPTR_MAX == UINT32_MAX | |
270 | TCG_TYPE_PTR = TCG_TYPE_I32, | |
271 | #else | |
272 | TCG_TYPE_PTR = TCG_TYPE_I64, | |
273 | #endif | |
274 | ||
275 | /* An alias for the size of the target "long", aka register. */ | |
276 | #if TARGET_LONG_BITS == 64 | |
277 | TCG_TYPE_TL = TCG_TYPE_I64, | |
278 | #else | |
279 | TCG_TYPE_TL = TCG_TYPE_I32, | |
280 | #endif | |
281 | } TCGType; | |
282 | ||
283 | /* Constants for qemu_ld and qemu_st for the Memory Operation field. */ | |
284 | typedef enum TCGMemOp { | |
285 | MO_8 = 0, | |
286 | MO_16 = 1, | |
287 | MO_32 = 2, | |
288 | MO_64 = 3, | |
289 | MO_SIZE = 3, /* Mask for the above. */ | |
290 | ||
291 | MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ | |
292 | ||
293 | MO_BSWAP = 8, /* Host reverse endian. */ | |
294 | #ifdef HOST_WORDS_BIGENDIAN | |
295 | MO_LE = MO_BSWAP, | |
296 | MO_BE = 0, | |
297 | #else | |
298 | MO_LE = 0, | |
299 | MO_BE = MO_BSWAP, | |
300 | #endif | |
301 | #ifdef TARGET_WORDS_BIGENDIAN | |
302 | MO_TE = MO_BE, | |
303 | #else | |
304 | MO_TE = MO_LE, | |
305 | #endif | |
306 | ||
307 | /* MO_UNALN accesses are never checked for alignment. | |
308 | * MO_ALIGN accesses will result in a call to the CPU's | |
309 | * do_unaligned_access hook if the guest address is not aligned. | |
310 | * The default depends on whether the target CPU defines ALIGNED_ONLY. | |
311 | * | |
312 | * Some architectures (e.g. ARMv8) need the address which is aligned | |
313 | * to a size more than the size of the memory access. | |
314 | * Some architectures (e.g. SPARCv9) need an address which is aligned, | |
315 | * but less strictly than the natural alignment. | |
316 | * | |
317 | * MO_ALIGN supposes the alignment size is the size of a memory access. | |
318 | * | |
319 | * There are three options: | |
320 | * - unaligned access permitted (MO_UNALN). | |
321 | * - an alignment to the size of an access (MO_ALIGN); | |
322 | * - an alignment to a specified size, which may be more or less than | |
323 | * the access size (MO_ALIGN_x where 'x' is a size in bytes); | |
324 | */ | |
325 | MO_ASHIFT = 4, | |
326 | MO_AMASK = 7 << MO_ASHIFT, | |
327 | #ifdef ALIGNED_ONLY | |
328 | MO_ALIGN = 0, | |
329 | MO_UNALN = MO_AMASK, | |
330 | #else | |
331 | MO_ALIGN = MO_AMASK, | |
332 | MO_UNALN = 0, | |
333 | #endif | |
334 | MO_ALIGN_2 = 1 << MO_ASHIFT, | |
335 | MO_ALIGN_4 = 2 << MO_ASHIFT, | |
336 | MO_ALIGN_8 = 3 << MO_ASHIFT, | |
337 | MO_ALIGN_16 = 4 << MO_ASHIFT, | |
338 | MO_ALIGN_32 = 5 << MO_ASHIFT, | |
339 | MO_ALIGN_64 = 6 << MO_ASHIFT, | |
340 | ||
341 | /* Combinations of the above, for ease of use. */ | |
342 | MO_UB = MO_8, | |
343 | MO_UW = MO_16, | |
344 | MO_UL = MO_32, | |
345 | MO_SB = MO_SIGN | MO_8, | |
346 | MO_SW = MO_SIGN | MO_16, | |
347 | MO_SL = MO_SIGN | MO_32, | |
348 | MO_Q = MO_64, | |
349 | ||
350 | MO_LEUW = MO_LE | MO_UW, | |
351 | MO_LEUL = MO_LE | MO_UL, | |
352 | MO_LESW = MO_LE | MO_SW, | |
353 | MO_LESL = MO_LE | MO_SL, | |
354 | MO_LEQ = MO_LE | MO_Q, | |
355 | ||
356 | MO_BEUW = MO_BE | MO_UW, | |
357 | MO_BEUL = MO_BE | MO_UL, | |
358 | MO_BESW = MO_BE | MO_SW, | |
359 | MO_BESL = MO_BE | MO_SL, | |
360 | MO_BEQ = MO_BE | MO_Q, | |
361 | ||
362 | MO_TEUW = MO_TE | MO_UW, | |
363 | MO_TEUL = MO_TE | MO_UL, | |
364 | MO_TESW = MO_TE | MO_SW, | |
365 | MO_TESL = MO_TE | MO_SL, | |
366 | MO_TEQ = MO_TE | MO_Q, | |
367 | ||
368 | MO_SSIZE = MO_SIZE | MO_SIGN, | |
369 | } TCGMemOp; | |
370 | ||
371 | /** | |
372 | * get_alignment_bits | |
373 | * @memop: TCGMemOp value | |
374 | * | |
375 | * Extract the alignment size from the memop. | |
376 | */ | |
377 | static inline unsigned get_alignment_bits(TCGMemOp memop) | |
378 | { | |
379 | unsigned a = memop & MO_AMASK; | |
380 | ||
381 | if (a == MO_UNALN) { | |
382 | /* No alignment required. */ | |
383 | a = 0; | |
384 | } else if (a == MO_ALIGN) { | |
385 | /* A natural alignment requirement. */ | |
386 | a = memop & MO_SIZE; | |
387 | } else { | |
388 | /* A specific alignment requirement. */ | |
389 | a = a >> MO_ASHIFT; | |
390 | } | |
391 | #if defined(CONFIG_SOFTMMU) | |
392 | /* The requested alignment cannot overlap the TLB flags. */ | |
393 | tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0); | |
394 | #endif | |
395 | return a; | |
396 | } | |
397 | ||
398 | typedef tcg_target_ulong TCGArg; | |
399 | ||
400 | /* Define type and accessor macros for TCG variables. | |
401 | ||
402 | TCG variables are the inputs and outputs of TCG ops, as described | |
403 | in tcg/README. Target CPU front-end code uses these types to deal | |
404 | with TCG variables as it emits TCG code via the tcg_gen_* functions. | |
405 | They come in several flavours: | |
406 | * TCGv_i32 : 32 bit integer type | |
407 | * TCGv_i64 : 64 bit integer type | |
408 | * TCGv_ptr : a host pointer type | |
409 | * TCGv : an integer type the same size as target_ulong | |
410 | (an alias for either TCGv_i32 or TCGv_i64) | |
411 | The compiler's type checking will complain if you mix them | |
412 | up and pass the wrong sized TCGv to a function. | |
413 | ||
414 | Users of tcg_gen_* don't need to know about any of the internal | |
415 | details of these, and should treat them as opaque types. | |
416 | You won't be able to look inside them in a debugger either. | |
417 | ||
418 | Internal implementation details follow: | |
419 | ||
420 | Note that there is no definition of the structs TCGv_i32_d etc anywhere. | |
421 | This is deliberate, because the values we store in variables of type | |
422 | TCGv_i32 are not really pointers-to-structures. They're just small | |
423 | integers, but keeping them in pointer types like this means that the | |
424 | compiler will complain if you accidentally pass a TCGv_i32 to a | |
425 | function which takes a TCGv_i64, and so on. Only the internals of | |
426 | TCG need to care about the actual contents of the types, and they always | |
427 | box and unbox via the MAKE_TCGV_* and GET_TCGV_* functions. | |
428 | Converting to and from intptr_t rather than int reduces the number | |
429 | of sign-extension instructions that get implied on 64-bit hosts. */ | |
430 | ||
431 | typedef struct TCGv_i32_d *TCGv_i32; | |
432 | typedef struct TCGv_i64_d *TCGv_i64; | |
433 | typedef struct TCGv_ptr_d *TCGv_ptr; | |
434 | typedef TCGv_ptr TCGv_env; | |
435 | #if TARGET_LONG_BITS == 32 | |
436 | #define TCGv TCGv_i32 | |
437 | #elif TARGET_LONG_BITS == 64 | |
438 | #define TCGv TCGv_i64 | |
439 | #else | |
440 | #error Unhandled TARGET_LONG_BITS value | |
441 | #endif | |
442 | ||
443 | static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i) | |
444 | { | |
445 | return (TCGv_i32)i; | |
446 | } | |
447 | ||
448 | static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i) | |
449 | { | |
450 | return (TCGv_i64)i; | |
451 | } | |
452 | ||
453 | static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i) | |
454 | { | |
455 | return (TCGv_ptr)i; | |
456 | } | |
457 | ||
458 | static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t) | |
459 | { | |
460 | return (intptr_t)t; | |
461 | } | |
462 | ||
463 | static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t) | |
464 | { | |
465 | return (intptr_t)t; | |
466 | } | |
467 | ||
468 | static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t) | |
469 | { | |
470 | return (intptr_t)t; | |
471 | } | |
472 | ||
473 | #if TCG_TARGET_REG_BITS == 32 | |
474 | #define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t)) | |
475 | #define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1) | |
476 | #endif | |
477 | ||
478 | #define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b)) | |
479 | #define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b)) | |
480 | #define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b)) | |
481 | ||
482 | /* Dummy definition to avoid compiler warnings. */ | |
483 | #define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1) | |
484 | #define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1) | |
485 | #define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1) | |
486 | ||
487 | #define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1) | |
488 | #define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1) | |
489 | #define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1) | |
490 | ||
491 | /* call flags */ | |
492 | /* Helper does not read globals (either directly or through an exception). It | |
493 | implies TCG_CALL_NO_WRITE_GLOBALS. */ | |
494 | #define TCG_CALL_NO_READ_GLOBALS 0x0010 | |
495 | /* Helper does not write globals */ | |
496 | #define TCG_CALL_NO_WRITE_GLOBALS 0x0020 | |
497 | /* Helper can be safely suppressed if the return value is not used. */ | |
498 | #define TCG_CALL_NO_SIDE_EFFECTS 0x0040 | |
499 | ||
500 | /* convenience version of most used call flags */ | |
501 | #define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS | |
502 | #define TCG_CALL_NO_WG TCG_CALL_NO_WRITE_GLOBALS | |
503 | #define TCG_CALL_NO_SE TCG_CALL_NO_SIDE_EFFECTS | |
504 | #define TCG_CALL_NO_RWG_SE (TCG_CALL_NO_RWG | TCG_CALL_NO_SE) | |
505 | #define TCG_CALL_NO_WG_SE (TCG_CALL_NO_WG | TCG_CALL_NO_SE) | |
506 | ||
507 | /* used to align parameters */ | |
508 | #define TCG_CALL_DUMMY_TCGV MAKE_TCGV_I32(-1) | |
509 | #define TCG_CALL_DUMMY_ARG ((TCGArg)(-1)) | |
510 | ||
511 | /* Conditions. Note that these are laid out for easy manipulation by | |
512 | the functions below: | |
513 | bit 0 is used for inverting; | |
514 | bit 1 is signed, | |
515 | bit 2 is unsigned, | |
516 | bit 3 is used with bit 0 for swapping signed/unsigned. */ | |
517 | typedef enum { | |
518 | /* non-signed */ | |
519 | TCG_COND_NEVER = 0 | 0 | 0 | 0, | |
520 | TCG_COND_ALWAYS = 0 | 0 | 0 | 1, | |
521 | TCG_COND_EQ = 8 | 0 | 0 | 0, | |
522 | TCG_COND_NE = 8 | 0 | 0 | 1, | |
523 | /* signed */ | |
524 | TCG_COND_LT = 0 | 0 | 2 | 0, | |
525 | TCG_COND_GE = 0 | 0 | 2 | 1, | |
526 | TCG_COND_LE = 8 | 0 | 2 | 0, | |
527 | TCG_COND_GT = 8 | 0 | 2 | 1, | |
528 | /* unsigned */ | |
529 | TCG_COND_LTU = 0 | 4 | 0 | 0, | |
530 | TCG_COND_GEU = 0 | 4 | 0 | 1, | |
531 | TCG_COND_LEU = 8 | 4 | 0 | 0, | |
532 | TCG_COND_GTU = 8 | 4 | 0 | 1, | |
533 | } TCGCond; | |
534 | ||
535 | /* Invert the sense of the comparison. */ | |
536 | static inline TCGCond tcg_invert_cond(TCGCond c) | |
537 | { | |
538 | return (TCGCond)(c ^ 1); | |
539 | } | |
540 | ||
541 | /* Swap the operands in a comparison. */ | |
542 | static inline TCGCond tcg_swap_cond(TCGCond c) | |
543 | { | |
544 | return c & 6 ? (TCGCond)(c ^ 9) : c; | |
545 | } | |
546 | ||
547 | /* Create an "unsigned" version of a "signed" comparison. */ | |
548 | static inline TCGCond tcg_unsigned_cond(TCGCond c) | |
549 | { | |
550 | return c & 2 ? (TCGCond)(c ^ 6) : c; | |
551 | } | |
552 | ||
553 | /* Must a comparison be considered unsigned? */ | |
554 | static inline bool is_unsigned_cond(TCGCond c) | |
555 | { | |
556 | return (c & 4) != 0; | |
557 | } | |
558 | ||
559 | /* Create a "high" version of a double-word comparison. | |
560 | This removes equality from a LTE or GTE comparison. */ | |
561 | static inline TCGCond tcg_high_cond(TCGCond c) | |
562 | { | |
563 | switch (c) { | |
564 | case TCG_COND_GE: | |
565 | case TCG_COND_LE: | |
566 | case TCG_COND_GEU: | |
567 | case TCG_COND_LEU: | |
568 | return (TCGCond)(c ^ 8); | |
569 | default: | |
570 | return c; | |
571 | } | |
572 | } | |
573 | ||
574 | typedef enum TCGTempVal { | |
575 | TEMP_VAL_DEAD, | |
576 | TEMP_VAL_REG, | |
577 | TEMP_VAL_MEM, | |
578 | TEMP_VAL_CONST, | |
579 | } TCGTempVal; | |
580 | ||
581 | typedef struct TCGTemp { | |
582 | TCGReg reg:8; | |
583 | TCGTempVal val_type:8; | |
584 | TCGType base_type:8; | |
585 | TCGType type:8; | |
586 | unsigned int fixed_reg:1; | |
587 | unsigned int indirect_reg:1; | |
588 | unsigned int indirect_base:1; | |
589 | unsigned int mem_coherent:1; | |
590 | unsigned int mem_allocated:1; | |
591 | unsigned int temp_local:1; /* If true, the temp is saved across | |
592 | basic blocks. Otherwise, it is not | |
593 | preserved across basic blocks. */ | |
594 | unsigned int temp_allocated:1; /* never used for code gen */ | |
595 | ||
596 | tcg_target_long val; | |
597 | struct TCGTemp *mem_base; | |
598 | intptr_t mem_offset; | |
599 | const char *name; | |
600 | } TCGTemp; | |
601 | ||
602 | typedef struct TCGContext TCGContext; | |
603 | ||
604 | typedef struct TCGTempSet { | |
605 | unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)]; | |
606 | } TCGTempSet; | |
607 | ||
608 | /* While we limit helpers to 6 arguments, for 32-bit hosts, with padding, | |
609 | this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands. | |
610 | There are never more than 2 outputs, which means that we can store all | |
611 | dead + sync data within 16 bits. */ | |
612 | #define DEAD_ARG 4 | |
613 | #define SYNC_ARG 1 | |
614 | typedef uint16_t TCGLifeData; | |
615 | ||
616 | /* The layout here is designed to avoid crossing of a 32-bit boundary. | |
617 | If we do so, gcc adds padding, expanding the size to 12. */ | |
618 | typedef struct TCGOp { | |
619 | TCGOpcode opc : 8; /* 8 */ | |
620 | ||
621 | /* Index of the prev/next op, or 0 for the end of the list. */ | |
622 | unsigned prev : 10; /* 18 */ | |
623 | unsigned next : 10; /* 28 */ | |
624 | ||
625 | /* The number of out and in parameter for a call. */ | |
626 | unsigned calli : 4; /* 32 */ | |
627 | unsigned callo : 2; /* 34 */ | |
628 | ||
629 | /* Index of the arguments for this op, or 0 for zero-operand ops. */ | |
630 | unsigned args : 14; /* 48 */ | |
631 | ||
632 | /* Lifetime data of the operands. */ | |
633 | unsigned life : 16; /* 64 */ | |
634 | } TCGOp; | |
635 | ||
636 | /* Make sure operands fit in the bitfields above. */ | |
637 | QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); | |
638 | QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 10)); | |
639 | QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE > (1 << 14)); | |
640 | ||
641 | /* Make sure that we don't overflow 64 bits without noticing. */ | |
642 | QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8); | |
643 | ||
644 | struct TCGContext { | |
645 | uint8_t *pool_cur, *pool_end; | |
646 | TCGPool *pool_first, *pool_current, *pool_first_large; | |
647 | int nb_labels; | |
648 | int nb_globals; | |
649 | int nb_temps; | |
650 | int nb_indirects; | |
651 | ||
652 | /* goto_tb support */ | |
653 | tcg_insn_unit *code_buf; | |
654 | uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ | |
655 | uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */ | |
656 | uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */ | |
657 | ||
658 | TCGRegSet reserved_regs; | |
659 | intptr_t current_frame_offset; | |
660 | intptr_t frame_start; | |
661 | intptr_t frame_end; | |
662 | TCGTemp *frame_temp; | |
663 | ||
664 | tcg_insn_unit *code_ptr; | |
665 | ||
666 | GHashTable *helpers; | |
667 | ||
668 | #ifdef CONFIG_PROFILER | |
669 | /* profiling info */ | |
670 | int64_t tb_count1; | |
671 | int64_t tb_count; | |
672 | int64_t op_count; /* total insn count */ | |
673 | int op_count_max; /* max insn per TB */ | |
674 | int64_t temp_count; | |
675 | int temp_count_max; | |
676 | int64_t del_op_count; | |
677 | int64_t code_in_len; | |
678 | int64_t code_out_len; | |
679 | int64_t search_out_len; | |
680 | int64_t interm_time; | |
681 | int64_t code_time; | |
682 | int64_t la_time; | |
683 | int64_t opt_time; | |
684 | int64_t restore_count; | |
685 | int64_t restore_time; | |
686 | #endif | |
687 | ||
688 | #ifdef CONFIG_DEBUG_TCG | |
689 | int temps_in_use; | |
690 | int goto_tb_issue_mask; | |
691 | #endif | |
692 | ||
693 | int gen_next_op_idx; | |
694 | int gen_next_parm_idx; | |
695 | ||
696 | /* Code generation. Note that we specifically do not use tcg_insn_unit | |
697 | here, because there's too much arithmetic throughout that relies | |
698 | on addition and subtraction working on bytes. Rely on the GCC | |
699 | extension that allows arithmetic on void*. */ | |
700 | int code_gen_max_blocks; | |
701 | void *code_gen_prologue; | |
702 | void *code_gen_buffer; | |
703 | size_t code_gen_buffer_size; | |
704 | void *code_gen_ptr; | |
705 | ||
706 | /* Threshold to flush the translated code buffer. */ | |
707 | void *code_gen_highwater; | |
708 | ||
709 | TBContext tb_ctx; | |
710 | ||
711 | /* Track which vCPU triggers events */ | |
712 | CPUState *cpu; /* *_trans */ | |
713 | TCGv_env tcg_env; /* *_exec */ | |
714 | ||
715 | /* The TCGBackendData structure is private to tcg-target.inc.c. */ | |
716 | struct TCGBackendData *be; | |
717 | ||
718 | TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; | |
719 | TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ | |
720 | ||
721 | /* Tells which temporary holds a given register. | |
722 | It does not take into account fixed registers */ | |
723 | TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS]; | |
724 | ||
725 | TCGOp gen_op_buf[OPC_BUF_SIZE]; | |
726 | TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE]; | |
727 | ||
728 | uint16_t gen_insn_end_off[TCG_MAX_INSNS]; | |
729 | target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS]; | |
730 | }; | |
731 | ||
732 | extern TCGContext tcg_ctx; | |
733 | extern bool parallel_cpus; | |
734 | ||
735 | static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v) | |
736 | { | |
737 | int op_argi = tcg_ctx.gen_op_buf[op_idx].args; | |
738 | tcg_ctx.gen_opparam_buf[op_argi + arg] = v; | |
739 | } | |
740 | ||
741 | /* The number of opcodes emitted so far. */ | |
742 | static inline int tcg_op_buf_count(void) | |
743 | { | |
744 | return tcg_ctx.gen_next_op_idx; | |
745 | } | |
746 | ||
747 | /* Test for whether to terminate the TB for using too many opcodes. */ | |
748 | static inline bool tcg_op_buf_full(void) | |
749 | { | |
750 | return tcg_op_buf_count() >= OPC_MAX_SIZE; | |
751 | } | |
752 | ||
753 | /* pool based memory allocation */ | |
754 | ||
755 | /* tb_lock must be held for tcg_malloc_internal. */ | |
756 | void *tcg_malloc_internal(TCGContext *s, int size); | |
757 | void tcg_pool_reset(TCGContext *s); | |
758 | ||
759 | void tb_lock(void); | |
760 | void tb_unlock(void); | |
761 | void tb_lock_reset(void); | |
762 | ||
763 | /* Called with tb_lock held. */ | |
764 | static inline void *tcg_malloc(int size) | |
765 | { | |
766 | TCGContext *s = &tcg_ctx; | |
767 | uint8_t *ptr, *ptr_end; | |
768 | size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1); | |
769 | ptr = s->pool_cur; | |
770 | ptr_end = ptr + size; | |
771 | if (unlikely(ptr_end > s->pool_end)) { | |
772 | return tcg_malloc_internal(&tcg_ctx, size); | |
773 | } else { | |
774 | s->pool_cur = ptr_end; | |
775 | return ptr; | |
776 | } | |
777 | } | |
778 | ||
779 | void tcg_context_init(TCGContext *s); | |
780 | void tcg_prologue_init(TCGContext *s); | |
781 | void tcg_func_start(TCGContext *s); | |
782 | ||
783 | int tcg_gen_code(TCGContext *s, TranslationBlock *tb); | |
784 | ||
785 | void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size); | |
786 | ||
787 | int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *); | |
788 | ||
789 | TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name); | |
790 | TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name); | |
791 | ||
792 | TCGv_i32 tcg_temp_new_internal_i32(int temp_local); | |
793 | TCGv_i64 tcg_temp_new_internal_i64(int temp_local); | |
794 | ||
795 | void tcg_temp_free_i32(TCGv_i32 arg); | |
796 | void tcg_temp_free_i64(TCGv_i64 arg); | |
797 | ||
798 | static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset, | |
799 | const char *name) | |
800 | { | |
801 | int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); | |
802 | return MAKE_TCGV_I32(idx); | |
803 | } | |
804 | ||
805 | static inline TCGv_i32 tcg_temp_new_i32(void) | |
806 | { | |
807 | return tcg_temp_new_internal_i32(0); | |
808 | } | |
809 | ||
810 | static inline TCGv_i32 tcg_temp_local_new_i32(void) | |
811 | { | |
812 | return tcg_temp_new_internal_i32(1); | |
813 | } | |
814 | ||
815 | static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset, | |
816 | const char *name) | |
817 | { | |
818 | int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); | |
819 | return MAKE_TCGV_I64(idx); | |
820 | } | |
821 | ||
822 | static inline TCGv_i64 tcg_temp_new_i64(void) | |
823 | { | |
824 | return tcg_temp_new_internal_i64(0); | |
825 | } | |
826 | ||
827 | static inline TCGv_i64 tcg_temp_local_new_i64(void) | |
828 | { | |
829 | return tcg_temp_new_internal_i64(1); | |
830 | } | |
831 | ||
832 | #if defined(CONFIG_DEBUG_TCG) | |
833 | /* If you call tcg_clear_temp_count() at the start of a section of | |
834 | * code which is not supposed to leak any TCG temporaries, then | |
835 | * calling tcg_check_temp_count() at the end of the section will | |
836 | * return 1 if the section did in fact leak a temporary. | |
837 | */ | |
838 | void tcg_clear_temp_count(void); | |
839 | int tcg_check_temp_count(void); | |
840 | #else | |
841 | #define tcg_clear_temp_count() do { } while (0) | |
842 | #define tcg_check_temp_count() 0 | |
843 | #endif | |
844 | ||
845 | void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf); | |
846 | void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf); | |
847 | ||
848 | #define TCG_CT_ALIAS 0x80 | |
849 | #define TCG_CT_IALIAS 0x40 | |
850 | #define TCG_CT_NEWREG 0x20 /* output requires a new register */ | |
851 | #define TCG_CT_REG 0x01 | |
852 | #define TCG_CT_CONST 0x02 /* any constant of register size */ | |
853 | ||
854 | typedef struct TCGArgConstraint { | |
855 | uint16_t ct; | |
856 | uint8_t alias_index; | |
857 | union { | |
858 | TCGRegSet regs; | |
859 | } u; | |
860 | } TCGArgConstraint; | |
861 | ||
862 | #define TCG_MAX_OP_ARGS 16 | |
863 | ||
864 | /* Bits for TCGOpDef->flags, 8 bits available. */ | |
865 | enum { | |
866 | /* Instruction defines the end of a basic block. */ | |
867 | TCG_OPF_BB_END = 0x01, | |
868 | /* Instruction clobbers call registers and potentially update globals. */ | |
869 | TCG_OPF_CALL_CLOBBER = 0x02, | |
870 | /* Instruction has side effects: it cannot be removed if its outputs | |
871 | are not used, and might trigger exceptions. */ | |
872 | TCG_OPF_SIDE_EFFECTS = 0x04, | |
873 | /* Instruction operands are 64-bits (otherwise 32-bits). */ | |
874 | TCG_OPF_64BIT = 0x08, | |
875 | /* Instruction is optional and not implemented by the host, or insn | |
876 | is generic and should not be implemened by the host. */ | |
877 | TCG_OPF_NOT_PRESENT = 0x10, | |
878 | }; | |
879 | ||
880 | typedef struct TCGOpDef { | |
881 | const char *name; | |
882 | uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; | |
883 | uint8_t flags; | |
884 | TCGArgConstraint *args_ct; | |
885 | int *sorted_args; | |
886 | #if defined(CONFIG_DEBUG_TCG) | |
887 | int used; | |
888 | #endif | |
889 | } TCGOpDef; | |
890 | ||
891 | extern TCGOpDef tcg_op_defs[]; | |
892 | extern const size_t tcg_op_defs_max; | |
893 | ||
894 | typedef struct TCGTargetOpDef { | |
895 | TCGOpcode op; | |
896 | const char *args_ct_str[TCG_MAX_OP_ARGS]; | |
897 | } TCGTargetOpDef; | |
898 | ||
899 | #define tcg_abort() \ | |
900 | do {\ | |
901 | fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\ | |
902 | abort();\ | |
903 | } while (0) | |
904 | ||
905 | #if UINTPTR_MAX == UINT32_MAX | |
906 | #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n)) | |
907 | #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n)) | |
908 | ||
909 | #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V))) | |
910 | #define tcg_global_reg_new_ptr(R, N) \ | |
911 | TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N))) | |
912 | #define tcg_global_mem_new_ptr(R, O, N) \ | |
913 | TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N))) | |
914 | #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32()) | |
915 | #define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T)) | |
916 | #else | |
917 | #define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n)) | |
918 | #define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n)) | |
919 | ||
920 | #define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V))) | |
921 | #define tcg_global_reg_new_ptr(R, N) \ | |
922 | TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N))) | |
923 | #define tcg_global_mem_new_ptr(R, O, N) \ | |
924 | TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N))) | |
925 | #define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64()) | |
926 | #define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T)) | |
927 | #endif | |
928 | ||
929 | void tcg_gen_callN(TCGContext *s, void *func, | |
930 | TCGArg ret, int nargs, TCGArg *args); | |
931 | ||
932 | void tcg_op_remove(TCGContext *s, TCGOp *op); | |
933 | TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg); | |
934 | TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg); | |
935 | ||
936 | void tcg_optimize(TCGContext *s); | |
937 | ||
938 | /* only used for debugging purposes */ | |
939 | void tcg_dump_ops(TCGContext *s); | |
940 | ||
941 | TCGv_i32 tcg_const_i32(int32_t val); | |
942 | TCGv_i64 tcg_const_i64(int64_t val); | |
943 | TCGv_i32 tcg_const_local_i32(int32_t val); | |
944 | TCGv_i64 tcg_const_local_i64(int64_t val); | |
945 | ||
946 | TCGLabel *gen_new_label(void); | |
947 | ||
948 | /** | |
949 | * label_arg | |
950 | * @l: label | |
951 | * | |
952 | * Encode a label for storage in the TCG opcode stream. | |
953 | */ | |
954 | ||
955 | static inline TCGArg label_arg(TCGLabel *l) | |
956 | { | |
957 | return (uintptr_t)l; | |
958 | } | |
959 | ||
960 | /** | |
961 | * arg_label | |
962 | * @i: value | |
963 | * | |
964 | * The opposite of label_arg. Retrieve a label from the | |
965 | * encoding of the TCG opcode stream. | |
966 | */ | |
967 | ||
968 | static inline TCGLabel *arg_label(TCGArg i) | |
969 | { | |
970 | return (TCGLabel *)(uintptr_t)i; | |
971 | } | |
972 | ||
973 | /** | |
974 | * tcg_ptr_byte_diff | |
975 | * @a, @b: addresses to be differenced | |
976 | * | |
977 | * There are many places within the TCG backends where we need a byte | |
978 | * difference between two pointers. While this can be accomplished | |
979 | * with local casting, it's easy to get wrong -- especially if one is | |
980 | * concerned with the signedness of the result. | |
981 | * | |
982 | * This version relies on GCC's void pointer arithmetic to get the | |
983 | * correct result. | |
984 | */ | |
985 | ||
986 | static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b) | |
987 | { | |
988 | return a - b; | |
989 | } | |
990 | ||
991 | /** | |
992 | * tcg_pcrel_diff | |
993 | * @s: the tcg context | |
994 | * @target: address of the target | |
995 | * | |
996 | * Produce a pc-relative difference, from the current code_ptr | |
997 | * to the destination address. | |
998 | */ | |
999 | ||
1000 | static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target) | |
1001 | { | |
1002 | return tcg_ptr_byte_diff(target, s->code_ptr); | |
1003 | } | |
1004 | ||
1005 | /** | |
1006 | * tcg_current_code_size | |
1007 | * @s: the tcg context | |
1008 | * | |
1009 | * Compute the current code size within the translation block. | |
1010 | * This is used to fill in qemu's data structures for goto_tb. | |
1011 | */ | |
1012 | ||
1013 | static inline size_t tcg_current_code_size(TCGContext *s) | |
1014 | { | |
1015 | return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); | |
1016 | } | |
1017 | ||
1018 | /* Combine the TCGMemOp and mmu_idx parameters into a single value. */ | |
1019 | typedef uint32_t TCGMemOpIdx; | |
1020 | ||
1021 | /** | |
1022 | * make_memop_idx | |
1023 | * @op: memory operation | |
1024 | * @idx: mmu index | |
1025 | * | |
1026 | * Encode these values into a single parameter. | |
1027 | */ | |
1028 | static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx) | |
1029 | { | |
1030 | tcg_debug_assert(idx <= 15); | |
1031 | return (op << 4) | idx; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * get_memop | |
1036 | * @oi: combined op/idx parameter | |
1037 | * | |
1038 | * Extract the memory operation from the combined value. | |
1039 | */ | |
1040 | static inline TCGMemOp get_memop(TCGMemOpIdx oi) | |
1041 | { | |
1042 | return oi >> 4; | |
1043 | } | |
1044 | ||
1045 | /** | |
1046 | * get_mmuidx | |
1047 | * @oi: combined op/idx parameter | |
1048 | * | |
1049 | * Extract the mmu index from the combined value. | |
1050 | */ | |
1051 | static inline unsigned get_mmuidx(TCGMemOpIdx oi) | |
1052 | { | |
1053 | return oi & 15; | |
1054 | } | |
1055 | ||
1056 | /** | |
1057 | * tcg_qemu_tb_exec: | |
1058 | * @env: pointer to CPUArchState for the CPU | |
1059 | * @tb_ptr: address of generated code for the TB to execute | |
1060 | * | |
1061 | * Start executing code from a given translation block. | |
1062 | * Where translation blocks have been linked, execution | |
1063 | * may proceed from the given TB into successive ones. | |
1064 | * Control eventually returns only when some action is needed | |
1065 | * from the top-level loop: either control must pass to a TB | |
1066 | * which has not yet been directly linked, or an asynchronous | |
1067 | * event such as an interrupt needs handling. | |
1068 | * | |
1069 | * Return: The return value is the value passed to the corresponding | |
1070 | * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. | |
1071 | * The value is either zero or a 4-byte aligned pointer to that TB combined | |
1072 | * with additional information in its two least significant bits. The | |
1073 | * additional information is encoded as follows: | |
1074 | * 0, 1: the link between this TB and the next is via the specified | |
1075 | * TB index (0 or 1). That is, we left the TB via (the equivalent | |
1076 | * of) "goto_tb <index>". The main loop uses this to determine | |
1077 | * how to link the TB just executed to the next. | |
1078 | * 2: we are using instruction counting code generation, and we | |
1079 | * did not start executing this TB because the instruction counter | |
1080 | * would hit zero midway through it. In this case the pointer | |
1081 | * returned is the TB we were about to execute, and the caller must | |
1082 | * arrange to execute the remaining count of instructions. | |
1083 | * 3: we stopped because the CPU's exit_request flag was set | |
1084 | * (usually meaning that there is an interrupt that needs to be | |
1085 | * handled). The pointer returned is the TB we were about to execute | |
1086 | * when we noticed the pending exit request. | |
1087 | * | |
1088 | * If the bottom two bits indicate an exit-via-index then the CPU | |
1089 | * state is correctly synchronised and ready for execution of the next | |
1090 | * TB (and in particular the guest PC is the address to execute next). | |
1091 | * Otherwise, we gave up on execution of this TB before it started, and | |
1092 | * the caller must fix up the CPU state by calling the CPU's | |
1093 | * synchronize_from_tb() method with the TB pointer we return (falling | |
1094 | * back to calling the CPU's set_pc method with tb->pb if no | |
1095 | * synchronize_from_tb() method exists). | |
1096 | * | |
1097 | * Note that TCG targets may use a different definition of tcg_qemu_tb_exec | |
1098 | * to this default (which just calls the prologue.code emitted by | |
1099 | * tcg_target_qemu_prologue()). | |
1100 | */ | |
1101 | #define TB_EXIT_MASK 3 | |
1102 | #define TB_EXIT_IDX0 0 | |
1103 | #define TB_EXIT_IDX1 1 | |
1104 | #define TB_EXIT_ICOUNT_EXPIRED 2 | |
1105 | #define TB_EXIT_REQUESTED 3 | |
1106 | ||
1107 | #ifdef HAVE_TCG_QEMU_TB_EXEC | |
1108 | uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr); | |
1109 | #else | |
1110 | # define tcg_qemu_tb_exec(env, tb_ptr) \ | |
1111 | ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr) | |
1112 | #endif | |
1113 | ||
1114 | void tcg_register_jit(void *buf, size_t buf_size); | |
1115 | ||
1116 | /* | |
1117 | * Memory helpers that will be used by TCG generated code. | |
1118 | */ | |
1119 | #ifdef CONFIG_SOFTMMU | |
1120 | /* Value zero-extended to tcg register size. */ | |
1121 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, | |
1122 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1123 | tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, | |
1124 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1125 | tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, | |
1126 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1127 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, | |
1128 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1129 | tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, | |
1130 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1131 | tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, | |
1132 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1133 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, | |
1134 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1135 | ||
1136 | /* Value sign-extended to tcg register size. */ | |
1137 | tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, | |
1138 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1139 | tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
1140 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1141 | tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
1142 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1143 | tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, | |
1144 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1145 | tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, | |
1146 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1147 | ||
1148 | void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, | |
1149 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1150 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | |
1151 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1152 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | |
1153 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1154 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
1155 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1156 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, | |
1157 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1158 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, | |
1159 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1160 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, | |
1161 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1162 | ||
1163 | uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, | |
1164 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1165 | uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, | |
1166 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1167 | uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, | |
1168 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1169 | uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, | |
1170 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1171 | uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, | |
1172 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1173 | uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, | |
1174 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1175 | uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, | |
1176 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1177 | ||
1178 | /* Temporary aliases until backends are converted. */ | |
1179 | #ifdef TARGET_WORDS_BIGENDIAN | |
1180 | # define helper_ret_ldsw_mmu helper_be_ldsw_mmu | |
1181 | # define helper_ret_lduw_mmu helper_be_lduw_mmu | |
1182 | # define helper_ret_ldsl_mmu helper_be_ldsl_mmu | |
1183 | # define helper_ret_ldul_mmu helper_be_ldul_mmu | |
1184 | # define helper_ret_ldl_mmu helper_be_ldul_mmu | |
1185 | # define helper_ret_ldq_mmu helper_be_ldq_mmu | |
1186 | # define helper_ret_stw_mmu helper_be_stw_mmu | |
1187 | # define helper_ret_stl_mmu helper_be_stl_mmu | |
1188 | # define helper_ret_stq_mmu helper_be_stq_mmu | |
1189 | # define helper_ret_ldw_cmmu helper_be_ldw_cmmu | |
1190 | # define helper_ret_ldl_cmmu helper_be_ldl_cmmu | |
1191 | # define helper_ret_ldq_cmmu helper_be_ldq_cmmu | |
1192 | #else | |
1193 | # define helper_ret_ldsw_mmu helper_le_ldsw_mmu | |
1194 | # define helper_ret_lduw_mmu helper_le_lduw_mmu | |
1195 | # define helper_ret_ldsl_mmu helper_le_ldsl_mmu | |
1196 | # define helper_ret_ldul_mmu helper_le_ldul_mmu | |
1197 | # define helper_ret_ldl_mmu helper_le_ldul_mmu | |
1198 | # define helper_ret_ldq_mmu helper_le_ldq_mmu | |
1199 | # define helper_ret_stw_mmu helper_le_stw_mmu | |
1200 | # define helper_ret_stl_mmu helper_le_stl_mmu | |
1201 | # define helper_ret_stq_mmu helper_le_stq_mmu | |
1202 | # define helper_ret_ldw_cmmu helper_le_ldw_cmmu | |
1203 | # define helper_ret_ldl_cmmu helper_le_ldl_cmmu | |
1204 | # define helper_ret_ldq_cmmu helper_le_ldq_cmmu | |
1205 | #endif | |
1206 | ||
1207 | uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, | |
1208 | uint32_t cmpv, uint32_t newv, | |
1209 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1210 | uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, | |
1211 | uint32_t cmpv, uint32_t newv, | |
1212 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1213 | uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, | |
1214 | uint32_t cmpv, uint32_t newv, | |
1215 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1216 | uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, | |
1217 | uint64_t cmpv, uint64_t newv, | |
1218 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1219 | uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, | |
1220 | uint32_t cmpv, uint32_t newv, | |
1221 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1222 | uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, | |
1223 | uint32_t cmpv, uint32_t newv, | |
1224 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1225 | uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, | |
1226 | uint64_t cmpv, uint64_t newv, | |
1227 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1228 | ||
1229 | #define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ | |
1230 | TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \ | |
1231 | (CPUArchState *env, target_ulong addr, TYPE val, \ | |
1232 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1233 | ||
1234 | #ifdef CONFIG_ATOMIC64 | |
1235 | #define GEN_ATOMIC_HELPER_ALL(NAME) \ | |
1236 | GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ | |
1237 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ | |
1238 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ | |
1239 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ | |
1240 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ | |
1241 | GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ | |
1242 | GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) | |
1243 | #else | |
1244 | #define GEN_ATOMIC_HELPER_ALL(NAME) \ | |
1245 | GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ | |
1246 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ | |
1247 | GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ | |
1248 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ | |
1249 | GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) | |
1250 | #endif | |
1251 | ||
1252 | GEN_ATOMIC_HELPER_ALL(fetch_add) | |
1253 | GEN_ATOMIC_HELPER_ALL(fetch_sub) | |
1254 | GEN_ATOMIC_HELPER_ALL(fetch_and) | |
1255 | GEN_ATOMIC_HELPER_ALL(fetch_or) | |
1256 | GEN_ATOMIC_HELPER_ALL(fetch_xor) | |
1257 | ||
1258 | GEN_ATOMIC_HELPER_ALL(add_fetch) | |
1259 | GEN_ATOMIC_HELPER_ALL(sub_fetch) | |
1260 | GEN_ATOMIC_HELPER_ALL(and_fetch) | |
1261 | GEN_ATOMIC_HELPER_ALL(or_fetch) | |
1262 | GEN_ATOMIC_HELPER_ALL(xor_fetch) | |
1263 | ||
1264 | GEN_ATOMIC_HELPER_ALL(xchg) | |
1265 | ||
1266 | #undef GEN_ATOMIC_HELPER_ALL | |
1267 | #undef GEN_ATOMIC_HELPER | |
1268 | #endif /* CONFIG_SOFTMMU */ | |
1269 | ||
1270 | #ifdef CONFIG_ATOMIC128 | |
1271 | #include "qemu/int128.h" | |
1272 | ||
1273 | /* These aren't really a "proper" helpers because TCG cannot manage Int128. | |
1274 | However, use the same format as the others, for use by the backends. */ | |
1275 | Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, | |
1276 | Int128 cmpv, Int128 newv, | |
1277 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1278 | Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, | |
1279 | Int128 cmpv, Int128 newv, | |
1280 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1281 | ||
1282 | Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, | |
1283 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1284 | Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, | |
1285 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1286 | void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, | |
1287 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1288 | void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, | |
1289 | TCGMemOpIdx oi, uintptr_t retaddr); | |
1290 | ||
1291 | #endif /* CONFIG_ATOMIC128 */ | |
1292 | ||
1293 | #endif /* TCG_H */ |