2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-ldst.h"
27 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
28 #define SZP ((int)sizeof(void *))
30 /* Shorthand for size of a register. */
31 #define SZR (TCG_TARGET_REG_BITS / 8)
33 #define TCG_CT_CONST_S16 0x100
34 #define TCG_CT_CONST_U16 0x200
35 #define TCG_CT_CONST_S32 0x400
36 #define TCG_CT_CONST_U32 0x800
37 #define TCG_CT_CONST_ZERO 0x1000
38 #define TCG_CT_CONST_MONE 0x2000
40 static tcg_insn_unit *tb_ret_addr;
47 static bool have_isa_2_06;
48 #define HAVE_ISA_2_06 have_isa_2_06
49 #define HAVE_ISEL have_isa_2_06
51 #ifdef CONFIG_USE_GUEST_BASE
52 #define TCG_GUEST_BASE_REG 30
54 #define TCG_GUEST_BASE_REG 0
58 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
94 static const int tcg_target_reg_alloc_order[] = {
95 TCG_REG_R14, /* call saved registers */
113 TCG_REG_R12, /* call clobbered, non-arguments */
115 TCG_REG_R10, /* call clobbered, arguments */
125 static const int tcg_target_call_iarg_regs[] = {
136 static const int tcg_target_call_oarg_regs[] = {
140 static const int tcg_target_callee_save_regs[] = {
157 TCG_REG_R27, /* currently used for the global env */
164 static inline bool in_range_b(tcg_target_long target)
166 return target == sextract64(target, 0, 26);
169 static uint32_t reloc_pc24_val(tcg_insn_unit *pc, tcg_insn_unit *target)
171 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
172 assert(in_range_b(disp));
173 return disp & 0x3fffffc;
176 static void reloc_pc24(tcg_insn_unit *pc, tcg_insn_unit *target)
178 *pc = (*pc & ~0x3fffffc) | reloc_pc24_val(pc, target);
181 static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target)
183 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
184 assert(disp == (int16_t) disp);
185 return disp & 0xfffc;
188 static void reloc_pc14(tcg_insn_unit *pc, tcg_insn_unit *target)
190 *pc = (*pc & ~0xfffc) | reloc_pc14_val(pc, target);
193 static inline void tcg_out_b_noaddr(TCGContext *s, int insn)
195 unsigned retrans = *s->code_ptr & 0x3fffffc;
196 tcg_out32(s, insn | retrans);
199 static inline void tcg_out_bc_noaddr(TCGContext *s, int insn)
201 unsigned retrans = *s->code_ptr & 0xfffc;
202 tcg_out32(s, insn | retrans);
205 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
206 intptr_t value, intptr_t addend)
208 tcg_insn_unit *target = (tcg_insn_unit *)value;
213 reloc_pc14(code_ptr, target);
216 reloc_pc24(code_ptr, target);
223 /* parse target specific constraints */
224 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
230 case 'A': case 'B': case 'C': case 'D':
231 ct->ct |= TCG_CT_REG;
232 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
235 ct->ct |= TCG_CT_REG;
236 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
238 case 'L': /* qemu_ld constraint */
239 ct->ct |= TCG_CT_REG;
240 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
241 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
242 #ifdef CONFIG_SOFTMMU
243 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
244 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
247 case 'S': /* qemu_st constraint */
248 ct->ct |= TCG_CT_REG;
249 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
250 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
251 #ifdef CONFIG_SOFTMMU
252 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
253 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
254 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
258 ct->ct |= TCG_CT_CONST_S16;
261 ct->ct |= TCG_CT_CONST_U16;
264 ct->ct |= TCG_CT_CONST_MONE;
267 ct->ct |= TCG_CT_CONST_S32;
270 ct->ct |= TCG_CT_CONST_U32;
273 ct->ct |= TCG_CT_CONST_ZERO;
283 /* test if a constant matches the constraint */
284 static int tcg_target_const_match(tcg_target_long val, TCGType type,
285 const TCGArgConstraint *arg_ct)
288 if (ct & TCG_CT_CONST) {
292 /* The only 32-bit constraint we use aside from
293 TCG_CT_CONST is TCG_CT_CONST_S16. */
294 if (type == TCG_TYPE_I32) {
298 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
300 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
302 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
304 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
306 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
308 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
314 #define OPCD(opc) ((opc)<<26)
315 #define XO19(opc) (OPCD(19)|((opc)<<1))
316 #define MD30(opc) (OPCD(30)|((opc)<<2))
317 #define MDS30(opc) (OPCD(30)|((opc)<<1))
318 #define XO31(opc) (OPCD(31)|((opc)<<1))
319 #define XO58(opc) (OPCD(58)|(opc))
320 #define XO62(opc) (OPCD(62)|(opc))
324 #define LBZ OPCD( 34)
325 #define LHZ OPCD( 40)
326 #define LHA OPCD( 42)
327 #define LWZ OPCD( 32)
328 #define STB OPCD( 38)
329 #define STH OPCD( 44)
330 #define STW OPCD( 36)
333 #define STDU XO62( 1)
334 #define STDX XO31(149)
337 #define LDX XO31( 21)
340 #define LWAX XO31(341)
342 #define ADDIC OPCD( 12)
343 #define ADDI OPCD( 14)
344 #define ADDIS OPCD( 15)
345 #define ORI OPCD( 24)
346 #define ORIS OPCD( 25)
347 #define XORI OPCD( 26)
348 #define XORIS OPCD( 27)
349 #define ANDI OPCD( 28)
350 #define ANDIS OPCD( 29)
351 #define MULLI OPCD( 7)
352 #define CMPLI OPCD( 10)
353 #define CMPI OPCD( 11)
354 #define SUBFIC OPCD( 8)
356 #define LWZU OPCD( 33)
357 #define STWU OPCD( 37)
359 #define RLWIMI OPCD( 20)
360 #define RLWINM OPCD( 21)
361 #define RLWNM OPCD( 23)
363 #define RLDICL MD30( 0)
364 #define RLDICR MD30( 1)
365 #define RLDIMI MD30( 3)
366 #define RLDCL MDS30( 8)
368 #define BCLR XO19( 16)
369 #define BCCTR XO19(528)
370 #define CRAND XO19(257)
371 #define CRANDC XO19(129)
372 #define CRNAND XO19(225)
373 #define CROR XO19(449)
374 #define CRNOR XO19( 33)
376 #define EXTSB XO31(954)
377 #define EXTSH XO31(922)
378 #define EXTSW XO31(986)
379 #define ADD XO31(266)
380 #define ADDE XO31(138)
381 #define ADDME XO31(234)
382 #define ADDZE XO31(202)
383 #define ADDC XO31( 10)
384 #define AND XO31( 28)
385 #define SUBF XO31( 40)
386 #define SUBFC XO31( 8)
387 #define SUBFE XO31(136)
388 #define SUBFME XO31(232)
389 #define SUBFZE XO31(200)
391 #define XOR XO31(316)
392 #define MULLW XO31(235)
393 #define MULHWU XO31( 11)
394 #define DIVW XO31(491)
395 #define DIVWU XO31(459)
397 #define CMPL XO31( 32)
398 #define LHBRX XO31(790)
399 #define LWBRX XO31(534)
400 #define LDBRX XO31(532)
401 #define STHBRX XO31(918)
402 #define STWBRX XO31(662)
403 #define STDBRX XO31(660)
404 #define MFSPR XO31(339)
405 #define MTSPR XO31(467)
406 #define SRAWI XO31(824)
407 #define NEG XO31(104)
408 #define MFCR XO31( 19)
409 #define MFOCRF (MFCR | (1u << 20))
410 #define NOR XO31(124)
411 #define CNTLZW XO31( 26)
412 #define CNTLZD XO31( 58)
413 #define ANDC XO31( 60)
414 #define ORC XO31(412)
415 #define EQV XO31(284)
416 #define NAND XO31(476)
417 #define ISEL XO31( 15)
419 #define MULLD XO31(233)
420 #define MULHD XO31( 73)
421 #define MULHDU XO31( 9)
422 #define DIVD XO31(489)
423 #define DIVDU XO31(457)
425 #define LBZX XO31( 87)
426 #define LHZX XO31(279)
427 #define LHAX XO31(343)
428 #define LWZX XO31( 23)
429 #define STBX XO31(215)
430 #define STHX XO31(407)
431 #define STWX XO31(151)
433 #define SPR(a, b) ((((a)<<5)|(b))<<11)
435 #define CTR SPR(9, 0)
437 #define SLW XO31( 24)
438 #define SRW XO31(536)
439 #define SRAW XO31(792)
441 #define SLD XO31( 27)
442 #define SRD XO31(539)
443 #define SRAD XO31(794)
444 #define SRADI XO31(413<<1)
447 #define TRAP (TW | TO(31))
449 #define RT(r) ((r)<<21)
450 #define RS(r) ((r)<<21)
451 #define RA(r) ((r)<<16)
452 #define RB(r) ((r)<<11)
453 #define TO(t) ((t)<<21)
454 #define SH(s) ((s)<<11)
455 #define MB(b) ((b)<<6)
456 #define ME(e) ((e)<<1)
457 #define BO(o) ((o)<<21)
458 #define MB64(b) ((b)<<5)
459 #define FXM(b) (1 << (19 - (b)))
463 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
464 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
465 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
466 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
468 #define BF(n) ((n)<<23)
469 #define BI(n, c) (((c)+((n)*4))<<16)
470 #define BT(n, c) (((c)+((n)*4))<<21)
471 #define BA(n, c) (((c)+((n)*4))<<16)
472 #define BB(n, c) (((c)+((n)*4))<<11)
473 #define BC_(n, c) (((c)+((n)*4))<<6)
475 #define BO_COND_TRUE BO(12)
476 #define BO_COND_FALSE BO( 4)
477 #define BO_ALWAYS BO(20)
486 static const uint32_t tcg_to_bc[] = {
487 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
488 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
489 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
490 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
491 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
492 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
493 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
494 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
495 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
496 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
499 /* The low bit here is set if the RA and RB fields must be inverted. */
500 static const uint32_t tcg_to_isel[] = {
501 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
502 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
503 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
504 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
505 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
506 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
507 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
508 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
509 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
510 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
513 static inline void tcg_out_mov(TCGContext *s, TCGType type,
514 TCGReg ret, TCGReg arg)
517 tcg_out32(s, OR | SAB(arg, ret, arg));
521 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
524 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
525 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
526 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
529 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
530 int sh, int mb, int me)
532 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
535 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
537 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
540 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
542 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
545 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
547 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
550 static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg)
552 if (arg == (int16_t) arg) {
553 tcg_out32(s, ADDI | TAI(ret, 0, arg));
555 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
557 tcg_out32(s, ORI | SAI(ret, ret, arg));
562 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
565 if (type == TCG_TYPE_I32 || arg == (int32_t)arg) {
566 tcg_out_movi32(s, ret, arg);
567 } else if (arg == (uint32_t)arg && !(arg & 0x8000)) {
568 tcg_out32(s, ADDI | TAI(ret, 0, arg));
569 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
571 int32_t high = arg >> 32;
572 tcg_out_movi32(s, ret, high);
574 tcg_out_shli64(s, ret, ret, 32);
576 if (arg & 0xffff0000) {
577 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
580 tcg_out32(s, ORI | SAI(ret, ret, arg));
585 static bool mask_operand(uint32_t c, int *mb, int *me)
589 /* Accept a bit pattern like:
593 Keep track of the transitions. */
594 if (c == 0 || c == -1) {
600 if (test & (test - 1)) {
605 *mb = test ? clz32(test & -test) + 1 : 0;
609 static bool mask64_operand(uint64_t c, int *mb, int *me)
618 /* Accept 1..10..0. */
624 /* Accept 0..01..1. */
625 if (lsb == 1 && (c & (c + 1)) == 0) {
626 *mb = clz64(c + 1) + 1;
633 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
637 if ((c & 0xffff) == c) {
638 tcg_out32(s, ANDI | SAI(src, dst, c));
640 } else if ((c & 0xffff0000) == c) {
641 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
643 } else if (mask_operand(c, &mb, &me)) {
644 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
646 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
647 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
651 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
655 if ((c & 0xffff) == c) {
656 tcg_out32(s, ANDI | SAI(src, dst, c));
658 } else if ((c & 0xffff0000) == c) {
659 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
661 } else if (mask64_operand(c, &mb, &me)) {
663 tcg_out_rld(s, RLDICR, dst, src, 0, me);
665 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
668 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
669 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
673 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
674 int op_lo, int op_hi)
677 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
681 tcg_out32(s, op_lo | SAI(src, dst, c));
686 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
688 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
691 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
693 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
696 static void tcg_out_b(TCGContext *s, int mask, tcg_insn_unit *target)
698 ptrdiff_t disp = tcg_pcrel_diff(s, target);
699 if (in_range_b(disp)) {
700 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
702 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
703 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
704 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
708 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
709 TCGReg base, tcg_target_long offset)
711 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
712 bool is_store = false;
713 TCGReg rs = TCG_REG_R2;
720 if (rt != TCG_REG_R0) {
728 case STB: case STH: case STW:
733 /* For unaligned, or very large offsets, use the indexed form. */
734 if (offset & align || offset != (int32_t)offset) {
735 tcg_debug_assert(rs != base && (!is_store || rs != rt));
736 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
737 tcg_out32(s, opx | TAB(rt, base, rs));
741 l0 = (int16_t)offset;
742 offset = (offset - l0) >> 16;
743 l1 = (int16_t)offset;
745 if (l1 < 0 && orig >= 0) {
747 l1 = (int16_t)(offset - 0x4000);
750 tcg_out32(s, ADDIS | TAI(rs, base, l1));
754 tcg_out32(s, ADDIS | TAI(rs, base, extra));
757 if (opi != ADDI || base != rt || l0 != 0) {
758 tcg_out32(s, opi | TAI(rt, base, l0));
762 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
763 TCGReg arg1, intptr_t arg2)
767 if (type == TCG_TYPE_I32) {
768 opi = LWZ, opx = LWZX;
772 tcg_out_mem_long(s, opi, opx, ret, arg1, arg2);
775 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
776 TCGReg arg1, intptr_t arg2)
780 if (type == TCG_TYPE_I32) {
781 opi = STW, opx = STWX;
783 opi = STD, opx = STDX;
785 tcg_out_mem_long(s, opi, opx, arg, arg1, arg2);
788 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
789 int const_arg2, int cr, TCGType type)
794 /* Simplify the comparisons below wrt CMPI. */
795 if (type == TCG_TYPE_I32) {
796 arg2 = (int32_t)arg2;
803 if ((int16_t) arg2 == arg2) {
807 } else if ((uint16_t) arg2 == arg2) {
822 if ((int16_t) arg2 == arg2) {
837 if ((uint16_t) arg2 == arg2) {
850 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
853 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
856 tcg_out_movi(s, type, TCG_REG_R0, arg2);
859 tcg_out32(s, op | RA(arg1) | RB(arg2));
863 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
864 TCGReg dst, TCGReg src)
866 tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(src) | RA(dst));
867 tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5);
870 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
872 /* X != 0 implies X + -1 generates a carry. Extra addition
873 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
875 tcg_out32(s, ADDIC | TAI(dst, src, -1));
876 tcg_out32(s, SUBFE | TAB(dst, dst, src));
878 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
879 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
883 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
887 if ((uint32_t)arg2 == arg2) {
888 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
890 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
891 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
894 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
899 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
900 TCGArg arg0, TCGArg arg1, TCGArg arg2,
905 /* Ignore high bits of a potential constant arg2. */
906 if (type == TCG_TYPE_I32) {
907 arg2 = (uint32_t)arg2;
910 /* Handle common and trivial cases before handling anything else. */
914 tcg_out_setcond_eq0(s, type, arg0, arg1);
917 if (type == TCG_TYPE_I32) {
918 tcg_out_ext32u(s, TCG_REG_R0, arg1);
921 tcg_out_setcond_ne0(s, arg0, arg1);
924 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
928 /* Extract the sign bit. */
929 tcg_out_rld(s, RLDICL, arg0, arg1,
930 type == TCG_TYPE_I64 ? 1 : 33, 63);
937 /* If we have ISEL, we can implement everything with 3 or 4 insns.
938 All other cases below are also at least 3 insns, so speed up the
939 code generator by not considering them and always using ISEL. */
943 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
945 isel = tcg_to_isel[cond];
947 tcg_out_movi(s, type, arg0, 1);
949 /* arg0 = (bc ? 0 : 1) */
950 tab = TAB(arg0, 0, arg0);
953 /* arg0 = (bc ? 1 : 0) */
954 tcg_out_movi(s, type, TCG_REG_R0, 0);
955 tab = TAB(arg0, arg0, TCG_REG_R0);
957 tcg_out32(s, isel | tab);
963 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
964 tcg_out_setcond_eq0(s, type, arg0, arg1);
968 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
969 /* Discard the high bits only once, rather than both inputs. */
970 if (type == TCG_TYPE_I32) {
971 tcg_out_ext32u(s, TCG_REG_R0, arg1);
974 tcg_out_setcond_ne0(s, arg0, arg1);
992 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
998 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1000 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1004 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1005 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1013 static void tcg_out_bc(TCGContext *s, int bc, int label_index)
1015 TCGLabel *l = &s->labels[label_index];
1018 tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value_ptr));
1020 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0);
1021 tcg_out_bc_noaddr(s, bc);
1025 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1026 TCGArg arg1, TCGArg arg2, int const_arg2,
1027 int label_index, TCGType type)
1029 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1030 tcg_out_bc(s, tcg_to_bc[cond], label_index);
1033 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1034 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1035 TCGArg v2, bool const_c2)
1037 /* If for some reason both inputs are zero, don't produce bad code. */
1038 if (v1 == 0 && v2 == 0) {
1039 tcg_out_movi(s, type, dest, 0);
1043 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1046 int isel = tcg_to_isel[cond];
1048 /* Swap the V operands if the operation indicates inversion. */
1055 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1057 tcg_out_movi(s, type, TCG_REG_R0, 0);
1059 tcg_out32(s, isel | TAB(dest, v1, v2));
1062 cond = tcg_invert_cond(cond);
1064 } else if (dest != v1) {
1066 tcg_out_movi(s, type, dest, 0);
1068 tcg_out_mov(s, type, dest, v1);
1071 /* Branch forward over one insn */
1072 tcg_out32(s, tcg_to_bc[cond] | 8);
1074 tcg_out_movi(s, type, dest, 0);
1076 tcg_out_mov(s, type, dest, v2);
1081 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
1085 s.code_buf = s.code_ptr = (tcg_insn_unit *)jmp_addr;
1086 tcg_out_b(&s, 0, (tcg_insn_unit *)addr);
1087 flush_icache_range(jmp_addr, jmp_addr + tcg_current_code_size(&s));
1090 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
1093 tcg_out_b(s, LK, target);
1095 /* Look through the descriptor. If the branch is in range, and we
1096 don't have to spend too much effort on building the toc. */
1097 void *tgt = ((void **)target)[0];
1098 uintptr_t toc = ((uintptr_t *)target)[1];
1099 intptr_t diff = tcg_pcrel_diff(s, tgt);
1101 if (in_range_b(diff) && toc == (uint32_t)toc) {
1102 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, toc);
1103 tcg_out_b(s, LK, tgt);
1105 /* Fold the low bits of the constant into the addresses below. */
1106 intptr_t arg = (intptr_t)target;
1107 int ofs = (int16_t)arg;
1109 if (ofs + 8 < 0x8000) {
1114 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, arg);
1115 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R2, ofs);
1116 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1117 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_R2, ofs + SZP);
1118 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
1123 static const uint32_t qemu_ldx_opc[16] = {
1130 [MO_BSWAP | MO_UB] = LBZX,
1131 [MO_BSWAP | MO_UW] = LHBRX,
1132 [MO_BSWAP | MO_UL] = LWBRX,
1133 [MO_BSWAP | MO_Q] = LDBRX,
1136 static const uint32_t qemu_stx_opc[16] = {
1141 [MO_BSWAP | MO_UB] = STBX,
1142 [MO_BSWAP | MO_UW] = STHBRX,
1143 [MO_BSWAP | MO_UL] = STWBRX,
1144 [MO_BSWAP | MO_Q] = STDBRX,
1147 static const uint32_t qemu_exts_opc[4] = {
1148 EXTSB, EXTSH, EXTSW, 0
1151 #if defined (CONFIG_SOFTMMU)
1152 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1153 * int mmu_idx, uintptr_t ra)
1155 static void * const qemu_ld_helpers[16] = {
1156 [MO_UB] = helper_ret_ldub_mmu,
1157 [MO_LEUW] = helper_le_lduw_mmu,
1158 [MO_LEUL] = helper_le_ldul_mmu,
1159 [MO_LEQ] = helper_le_ldq_mmu,
1160 [MO_BEUW] = helper_be_lduw_mmu,
1161 [MO_BEUL] = helper_be_ldul_mmu,
1162 [MO_BEQ] = helper_be_ldq_mmu,
1165 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1166 * uintxx_t val, int mmu_idx, uintptr_t ra)
1168 static void * const qemu_st_helpers[16] = {
1169 [MO_UB] = helper_ret_stb_mmu,
1170 [MO_LEUW] = helper_le_stw_mmu,
1171 [MO_LEUL] = helper_le_stl_mmu,
1172 [MO_LEQ] = helper_le_stq_mmu,
1173 [MO_BEUW] = helper_be_stw_mmu,
1174 [MO_BEUL] = helper_be_stl_mmu,
1175 [MO_BEQ] = helper_be_stq_mmu,
1178 /* Perform the TLB load and compare. Places the result of the comparison
1179 in CR7, loads the addend of the TLB into R3, and returns the register
1180 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1182 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits, TCGReg addr_reg,
1183 int mem_index, bool is_read)
1187 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
1188 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
1189 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1190 TCGReg base = TCG_AREG0;
1192 /* Extract the page index, shifted into place for tlb index. */
1193 if (TARGET_LONG_BITS == 32) {
1194 /* Zero-extend the address into a place helpful for further use. */
1195 tcg_out_ext32u(s, TCG_REG_R4, addr_reg);
1196 addr_reg = TCG_REG_R4;
1198 tcg_out_rld(s, RLDICL, TCG_REG_R3, addr_reg,
1199 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS);
1202 /* Compensate for very large offsets. */
1203 if (add_off >= 0x8000) {
1204 /* Most target env are smaller than 32k; none are larger than 64k.
1205 Simplify the logic here merely to offset by 0x7ff0, giving us a
1206 range just shy of 64k. Check this assumption. */
1207 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
1208 tlb_table[NB_MMU_MODES - 1][1])
1210 tcg_out32(s, ADDI | TAI(TCG_REG_R2, base, 0x7ff0));
1216 /* Extraction and shifting, part 2. */
1217 if (TARGET_LONG_BITS == 32) {
1218 tcg_out_rlw(s, RLWINM, TCG_REG_R3, addr_reg,
1219 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
1220 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
1221 31 - CPU_TLB_ENTRY_BITS);
1223 tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS);
1226 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base));
1228 /* Load the tlb comparator. */
1229 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_R2, TCG_REG_R3, cmp_off);
1231 /* Load the TLB addend for use on the fast path. Do this asap
1232 to minimize any load use delay. */
1233 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off);
1235 /* Clear the non-page, non-alignment bits from the address. */
1236 if (TARGET_LONG_BITS == 32) {
1237 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr_reg, 0,
1238 (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
1239 } else if (!s_bits) {
1240 tcg_out_rld(s, RLDICR, TCG_REG_R0, addr_reg, 0, 63 - TARGET_PAGE_BITS);
1242 tcg_out_rld(s, RLDICL, TCG_REG_R0, addr_reg,
1243 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
1244 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
1247 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_R2, 0, 7, TCG_TYPE_TL);
1252 /* Record the context of a call to the out of line helper code for the slow
1253 path for a load or store, so that we can later generate the correct
1255 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
1256 int data_reg, int addr_reg, int mem_index,
1257 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1259 TCGLabelQemuLdst *label = new_ldst_label(s);
1261 label->is_ld = is_ld;
1263 label->datalo_reg = data_reg;
1264 label->addrlo_reg = addr_reg;
1265 label->mem_index = mem_index;
1266 label->raddr = raddr;
1267 label->label_ptr[0] = label_ptr;
1270 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1272 TCGMemOp opc = lb->opc;
1274 reloc_pc14(lb->label_ptr[0], s->code_ptr);
1276 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0);
1278 /* If the address needed to be zero-extended, we'll have already
1279 placed it in R4. The only remaining case is 64-bit guest. */
1280 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
1282 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index);
1283 tcg_out32(s, MFSPR | RT(TCG_REG_R6) | LR);
1285 tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
1287 if (opc & MO_SIGN) {
1288 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
1289 tcg_out32(s, insn | RA(lb->datalo_reg) | RS(TCG_REG_R3));
1291 tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_R3);
1294 tcg_out_b(s, 0, lb->raddr);
1297 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1299 TCGMemOp opc = lb->opc;
1300 TCGMemOp s_bits = opc & MO_SIZE;
1302 reloc_pc14(lb->label_ptr[0], s->code_ptr);
1304 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, TCG_AREG0);
1306 /* If the address needed to be zero-extended, we'll have already
1307 placed it in R4. The only remaining case is 64-bit guest. */
1308 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
1310 tcg_out_rld(s, RLDICL, TCG_REG_R5, lb->datalo_reg,
1311 0, 64 - (1 << (3 + s_bits)));
1312 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R6, lb->mem_index);
1313 tcg_out32(s, MFSPR | RT(TCG_REG_R7) | LR);
1315 tcg_out_call(s, qemu_st_helpers[opc]);
1317 tcg_out_b(s, 0, lb->raddr);
1319 #endif /* SOFTMMU */
1321 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1322 TCGMemOp opc, int mem_index)
1326 TCGMemOp s_bits = opc & MO_SIZE;
1327 #ifdef CONFIG_SOFTMMU
1328 tcg_insn_unit *label_ptr;
1331 #ifdef CONFIG_SOFTMMU
1332 addr_reg = tcg_out_tlb_read(s, s_bits, addr_reg, mem_index, true);
1334 /* Load a pointer into the current opcode w/conditional branch-link. */
1335 label_ptr = s->code_ptr;
1336 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
1339 #else /* !CONFIG_SOFTMMU */
1340 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
1341 if (TARGET_LONG_BITS == 32) {
1342 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1343 addr_reg = TCG_REG_R2;
1347 insn = qemu_ldx_opc[opc];
1348 if (!HAVE_ISA_2_06 && insn == LDBRX) {
1349 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addr_reg, 4));
1350 tcg_out32(s, LWBRX | TAB(data_reg, rbase, addr_reg));
1351 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
1352 tcg_out_rld(s, RLDIMI, data_reg, TCG_REG_R0, 32, 0);
1354 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
1356 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
1357 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
1358 insn = qemu_exts_opc[s_bits];
1359 tcg_out32(s, insn | RA(data_reg) | RS(data_reg));
1362 #ifdef CONFIG_SOFTMMU
1363 add_qemu_ldst_label(s, true, opc, data_reg, addr_reg, mem_index,
1364 s->code_ptr, label_ptr);
1368 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1369 TCGMemOp opc, int mem_index)
1373 #ifdef CONFIG_SOFTMMU
1374 tcg_insn_unit *label_ptr;
1377 #ifdef CONFIG_SOFTMMU
1378 addr_reg = tcg_out_tlb_read(s, opc & MO_SIZE, addr_reg, mem_index, false);
1380 /* Load a pointer into the current opcode w/conditional branch-link. */
1381 label_ptr = s->code_ptr;
1382 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
1385 #else /* !CONFIG_SOFTMMU */
1386 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
1387 if (TARGET_LONG_BITS == 32) {
1388 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1389 addr_reg = TCG_REG_R2;
1393 insn = qemu_stx_opc[opc];
1394 if (!HAVE_ISA_2_06 && insn == STDBRX) {
1395 tcg_out32(s, STWBRX | SAB(data_reg, rbase, addr_reg));
1396 tcg_out32(s, ADDI | TAI(TCG_REG_R2, addr_reg, 4));
1397 tcg_out_shri64(s, TCG_REG_R0, data_reg, 32);
1398 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_R2));
1400 tcg_out32(s, insn | SAB(data_reg, rbase, addr_reg));
1403 #ifdef CONFIG_SOFTMMU
1404 add_qemu_ldst_label(s, false, opc, data_reg, addr_reg, mem_index,
1405 s->code_ptr, label_ptr);
1409 /* Parameters for function call generation, used in tcg.c. */
1410 #define TCG_TARGET_STACK_ALIGN 16
1411 #define TCG_TARGET_EXTEND_ARGS 1
1414 # define LINK_AREA_SIZE (6 * SZR)
1415 # define LR_OFFSET (1 * SZR)
1416 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
1421 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1422 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
1424 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
1425 + TCG_STATIC_CALL_ARGS_SIZE \
1426 + CPU_TEMP_BUF_SIZE \
1428 + TCG_TARGET_STACK_ALIGN - 1) \
1429 & -TCG_TARGET_STACK_ALIGN)
1431 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
1433 static void tcg_target_qemu_prologue(TCGContext *s)
1437 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
1442 void **desc = (void **)s->code_ptr;
1443 desc[0] = desc + 2; /* entry point */
1444 desc[1] = 0; /* environment pointer */
1445 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
1450 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
1451 tcg_out32(s, STDU | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
1453 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
1454 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1455 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
1457 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
1459 #ifdef CONFIG_USE_GUEST_BASE
1461 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1462 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1466 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1467 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
1468 tcg_out32(s, BCCTR | BO_ALWAYS);
1471 tb_ret_addr = s->code_ptr;
1473 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
1474 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
1475 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1476 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
1478 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
1479 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
1480 tcg_out32(s, BCLR | BO_ALWAYS);
1483 static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1484 const int *const_args)
1490 case INDEX_op_exit_tb:
1491 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
1492 tcg_out_b(s, 0, tb_ret_addr);
1494 case INDEX_op_goto_tb:
1495 if (s->tb_jmp_offset) {
1496 /* Direct jump method. */
1497 s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
1500 /* Indirect jump method. */
1503 s->tb_next_offset[args[0]] = tcg_current_code_size(s);
1507 TCGLabel *l = &s->labels[args[0]];
1510 tcg_out_b(s, 0, l->u.value_ptr);
1512 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0);
1513 tcg_out_b_noaddr(s, B);
1517 case INDEX_op_ld8u_i32:
1518 case INDEX_op_ld8u_i64:
1519 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
1521 case INDEX_op_ld8s_i32:
1522 case INDEX_op_ld8s_i64:
1523 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
1524 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
1526 case INDEX_op_ld16u_i32:
1527 case INDEX_op_ld16u_i64:
1528 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
1530 case INDEX_op_ld16s_i32:
1531 case INDEX_op_ld16s_i64:
1532 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
1534 case INDEX_op_ld_i32:
1535 case INDEX_op_ld32u_i64:
1536 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
1538 case INDEX_op_ld32s_i64:
1539 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
1541 case INDEX_op_ld_i64:
1542 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
1544 case INDEX_op_st8_i32:
1545 case INDEX_op_st8_i64:
1546 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
1548 case INDEX_op_st16_i32:
1549 case INDEX_op_st16_i64:
1550 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
1552 case INDEX_op_st_i32:
1553 case INDEX_op_st32_i64:
1554 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
1556 case INDEX_op_st_i64:
1557 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
1560 case INDEX_op_add_i32:
1561 a0 = args[0], a1 = args[1], a2 = args[2];
1562 if (const_args[2]) {
1564 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
1566 tcg_out32(s, ADD | TAB(a0, a1, a2));
1569 case INDEX_op_sub_i32:
1570 a0 = args[0], a1 = args[1], a2 = args[2];
1571 if (const_args[1]) {
1572 if (const_args[2]) {
1573 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
1575 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1577 } else if (const_args[2]) {
1581 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1585 case INDEX_op_and_i32:
1586 a0 = args[0], a1 = args[1], a2 = args[2];
1587 if (const_args[2]) {
1588 tcg_out_andi32(s, a0, a1, a2);
1590 tcg_out32(s, AND | SAB(a1, a0, a2));
1593 case INDEX_op_and_i64:
1594 a0 = args[0], a1 = args[1], a2 = args[2];
1595 if (const_args[2]) {
1596 tcg_out_andi64(s, a0, a1, a2);
1598 tcg_out32(s, AND | SAB(a1, a0, a2));
1601 case INDEX_op_or_i64:
1602 case INDEX_op_or_i32:
1603 a0 = args[0], a1 = args[1], a2 = args[2];
1604 if (const_args[2]) {
1605 tcg_out_ori32(s, a0, a1, a2);
1607 tcg_out32(s, OR | SAB(a1, a0, a2));
1610 case INDEX_op_xor_i64:
1611 case INDEX_op_xor_i32:
1612 a0 = args[0], a1 = args[1], a2 = args[2];
1613 if (const_args[2]) {
1614 tcg_out_xori32(s, a0, a1, a2);
1616 tcg_out32(s, XOR | SAB(a1, a0, a2));
1619 case INDEX_op_andc_i32:
1620 a0 = args[0], a1 = args[1], a2 = args[2];
1621 if (const_args[2]) {
1622 tcg_out_andi32(s, a0, a1, ~a2);
1624 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1627 case INDEX_op_andc_i64:
1628 a0 = args[0], a1 = args[1], a2 = args[2];
1629 if (const_args[2]) {
1630 tcg_out_andi64(s, a0, a1, ~a2);
1632 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1635 case INDEX_op_orc_i32:
1636 if (const_args[2]) {
1637 tcg_out_ori32(s, args[0], args[1], ~args[2]);
1641 case INDEX_op_orc_i64:
1642 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
1644 case INDEX_op_eqv_i32:
1645 if (const_args[2]) {
1646 tcg_out_xori32(s, args[0], args[1], ~args[2]);
1650 case INDEX_op_eqv_i64:
1651 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
1653 case INDEX_op_nand_i32:
1654 case INDEX_op_nand_i64:
1655 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
1657 case INDEX_op_nor_i32:
1658 case INDEX_op_nor_i64:
1659 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
1662 case INDEX_op_mul_i32:
1663 a0 = args[0], a1 = args[1], a2 = args[2];
1664 if (const_args[2]) {
1665 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1667 tcg_out32(s, MULLW | TAB(a0, a1, a2));
1671 case INDEX_op_div_i32:
1672 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
1675 case INDEX_op_divu_i32:
1676 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
1679 case INDEX_op_shl_i32:
1680 if (const_args[2]) {
1681 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]);
1683 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
1686 case INDEX_op_shr_i32:
1687 if (const_args[2]) {
1688 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31);
1690 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
1693 case INDEX_op_sar_i32:
1694 if (const_args[2]) {
1695 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
1697 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
1700 case INDEX_op_rotl_i32:
1701 if (const_args[2]) {
1702 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
1704 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
1708 case INDEX_op_rotr_i32:
1709 if (const_args[2]) {
1710 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
1712 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
1713 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
1718 case INDEX_op_brcond_i32:
1719 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1720 args[3], TCG_TYPE_I32);
1723 case INDEX_op_brcond_i64:
1724 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1725 args[3], TCG_TYPE_I64);
1728 case INDEX_op_neg_i32:
1729 case INDEX_op_neg_i64:
1730 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
1733 case INDEX_op_not_i32:
1734 case INDEX_op_not_i64:
1735 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
1738 case INDEX_op_add_i64:
1739 a0 = args[0], a1 = args[1], a2 = args[2];
1740 if (const_args[2]) {
1742 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
1744 tcg_out32(s, ADD | TAB(a0, a1, a2));
1747 case INDEX_op_sub_i64:
1748 a0 = args[0], a1 = args[1], a2 = args[2];
1749 if (const_args[1]) {
1750 if (const_args[2]) {
1751 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
1753 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1755 } else if (const_args[2]) {
1759 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1763 case INDEX_op_shl_i64:
1764 if (const_args[2]) {
1765 tcg_out_shli64(s, args[0], args[1], args[2]);
1767 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
1770 case INDEX_op_shr_i64:
1771 if (const_args[2]) {
1772 tcg_out_shri64(s, args[0], args[1], args[2]);
1774 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
1777 case INDEX_op_sar_i64:
1778 if (const_args[2]) {
1779 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
1780 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
1782 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
1785 case INDEX_op_rotl_i64:
1786 if (const_args[2]) {
1787 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
1789 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
1792 case INDEX_op_rotr_i64:
1793 if (const_args[2]) {
1794 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
1796 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
1797 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
1801 case INDEX_op_mul_i64:
1802 a0 = args[0], a1 = args[1], a2 = args[2];
1803 if (const_args[2]) {
1804 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1806 tcg_out32(s, MULLD | TAB(a0, a1, a2));
1809 case INDEX_op_div_i64:
1810 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
1812 case INDEX_op_divu_i64:
1813 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
1816 case INDEX_op_qemu_ld_i32:
1817 case INDEX_op_qemu_ld_i64:
1818 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]);
1820 case INDEX_op_qemu_st_i32:
1821 case INDEX_op_qemu_st_i64:
1822 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]);
1825 case INDEX_op_ext8s_i32:
1826 case INDEX_op_ext8s_i64:
1829 case INDEX_op_ext16s_i32:
1830 case INDEX_op_ext16s_i64:
1833 case INDEX_op_ext32s_i64:
1837 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
1840 case INDEX_op_setcond_i32:
1841 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
1844 case INDEX_op_setcond_i64:
1845 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
1849 case INDEX_op_bswap16_i32:
1850 case INDEX_op_bswap16_i64:
1851 a0 = args[0], a1 = args[1];
1854 /* a0 = (a1 r<< 24) & 0xff # 000c */
1855 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1856 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
1857 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
1859 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
1860 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
1861 /* a0 = (a1 r<< 24) & 0xff # 000c */
1862 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1863 /* a0 = a0 | r0 # 00dc */
1864 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
1868 case INDEX_op_bswap32_i32:
1869 case INDEX_op_bswap32_i64:
1870 /* Stolen from gcc's builtin_bswap32 */
1872 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
1874 /* a1 = args[1] # abcd */
1875 /* a0 = rotate_left (a1, 8) # bcda */
1876 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1877 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
1878 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1879 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
1880 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1882 if (a0 == TCG_REG_R0) {
1883 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
1887 case INDEX_op_bswap64_i64:
1888 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
1894 /* a1 = # abcd efgh */
1895 /* a0 = rl32(a1, 8) # 0000 fghe */
1896 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1897 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
1898 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1899 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
1900 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1902 /* a0 = rl64(a0, 32) # hgfe 0000 */
1903 /* a2 = rl64(a1, 32) # efgh abcd */
1904 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
1905 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
1907 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
1908 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
1909 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
1910 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
1911 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
1912 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
1915 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
1919 case INDEX_op_deposit_i32:
1920 if (const_args[2]) {
1921 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
1922 tcg_out_andi32(s, args[0], args[0], ~mask);
1924 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
1925 32 - args[3] - args[4], 31 - args[3]);
1928 case INDEX_op_deposit_i64:
1929 if (const_args[2]) {
1930 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
1931 tcg_out_andi64(s, args[0], args[0], ~mask);
1933 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
1934 64 - args[3] - args[4]);
1938 case INDEX_op_movcond_i32:
1939 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
1940 args[3], args[4], const_args[2]);
1942 case INDEX_op_movcond_i64:
1943 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
1944 args[3], args[4], const_args[2]);
1947 case INDEX_op_add2_i64:
1948 /* Note that the CA bit is defined based on the word size of the
1949 environment. So in 64-bit mode it's always carry-out of bit 63.
1950 The fallback code using deposit works just as well for 32-bit. */
1951 a0 = args[0], a1 = args[1];
1952 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
1955 if (const_args[4]) {
1956 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
1958 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
1960 if (const_args[5]) {
1961 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
1963 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
1965 if (a0 != args[0]) {
1966 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
1970 case INDEX_op_sub2_i64:
1971 a0 = args[0], a1 = args[1];
1972 if (a0 == args[5] || (!const_args[4] && a0 == args[4])) {
1975 if (const_args[2]) {
1976 tcg_out32(s, SUBFIC | TAI(a0, args[3], args[2]));
1978 tcg_out32(s, SUBFC | TAB(a0, args[3], args[2]));
1980 if (const_args[4]) {
1981 tcg_out32(s, (args[4] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
1983 tcg_out32(s, SUBFE | TAB(a1, args[5], args[4]));
1985 if (a0 != args[0]) {
1986 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
1990 case INDEX_op_muluh_i64:
1991 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
1993 case INDEX_op_mulsh_i64:
1994 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
1997 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1998 case INDEX_op_mov_i64:
1999 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2000 case INDEX_op_movi_i64:
2001 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2007 static const TCGTargetOpDef ppc_op_defs[] = {
2008 { INDEX_op_exit_tb, { } },
2009 { INDEX_op_goto_tb, { } },
2010 { INDEX_op_br, { } },
2012 { INDEX_op_ld8u_i32, { "r", "r" } },
2013 { INDEX_op_ld8s_i32, { "r", "r" } },
2014 { INDEX_op_ld16u_i32, { "r", "r" } },
2015 { INDEX_op_ld16s_i32, { "r", "r" } },
2016 { INDEX_op_ld_i32, { "r", "r" } },
2017 { INDEX_op_ld_i64, { "r", "r" } },
2018 { INDEX_op_st8_i32, { "r", "r" } },
2019 { INDEX_op_st8_i64, { "r", "r" } },
2020 { INDEX_op_st16_i32, { "r", "r" } },
2021 { INDEX_op_st16_i64, { "r", "r" } },
2022 { INDEX_op_st_i32, { "r", "r" } },
2023 { INDEX_op_st_i64, { "r", "r" } },
2024 { INDEX_op_st32_i64, { "r", "r" } },
2026 { INDEX_op_ld8u_i64, { "r", "r" } },
2027 { INDEX_op_ld8s_i64, { "r", "r" } },
2028 { INDEX_op_ld16u_i64, { "r", "r" } },
2029 { INDEX_op_ld16s_i64, { "r", "r" } },
2030 { INDEX_op_ld32u_i64, { "r", "r" } },
2031 { INDEX_op_ld32s_i64, { "r", "r" } },
2033 { INDEX_op_add_i32, { "r", "r", "ri" } },
2034 { INDEX_op_mul_i32, { "r", "r", "rI" } },
2035 { INDEX_op_div_i32, { "r", "r", "r" } },
2036 { INDEX_op_divu_i32, { "r", "r", "r" } },
2037 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
2038 { INDEX_op_and_i32, { "r", "r", "ri" } },
2039 { INDEX_op_or_i32, { "r", "r", "ri" } },
2040 { INDEX_op_xor_i32, { "r", "r", "ri" } },
2041 { INDEX_op_andc_i32, { "r", "r", "ri" } },
2042 { INDEX_op_orc_i32, { "r", "r", "ri" } },
2043 { INDEX_op_eqv_i32, { "r", "r", "ri" } },
2044 { INDEX_op_nand_i32, { "r", "r", "r" } },
2045 { INDEX_op_nor_i32, { "r", "r", "r" } },
2047 { INDEX_op_shl_i32, { "r", "r", "ri" } },
2048 { INDEX_op_shr_i32, { "r", "r", "ri" } },
2049 { INDEX_op_sar_i32, { "r", "r", "ri" } },
2050 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
2051 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
2053 { INDEX_op_brcond_i32, { "r", "ri" } },
2054 { INDEX_op_brcond_i64, { "r", "ri" } },
2056 { INDEX_op_neg_i32, { "r", "r" } },
2057 { INDEX_op_not_i32, { "r", "r" } },
2059 { INDEX_op_add_i64, { "r", "r", "rT" } },
2060 { INDEX_op_sub_i64, { "r", "rI", "rT" } },
2061 { INDEX_op_and_i64, { "r", "r", "ri" } },
2062 { INDEX_op_or_i64, { "r", "r", "rU" } },
2063 { INDEX_op_xor_i64, { "r", "r", "rU" } },
2064 { INDEX_op_andc_i64, { "r", "r", "ri" } },
2065 { INDEX_op_orc_i64, { "r", "r", "r" } },
2066 { INDEX_op_eqv_i64, { "r", "r", "r" } },
2067 { INDEX_op_nand_i64, { "r", "r", "r" } },
2068 { INDEX_op_nor_i64, { "r", "r", "r" } },
2070 { INDEX_op_shl_i64, { "r", "r", "ri" } },
2071 { INDEX_op_shr_i64, { "r", "r", "ri" } },
2072 { INDEX_op_sar_i64, { "r", "r", "ri" } },
2073 { INDEX_op_rotl_i64, { "r", "r", "ri" } },
2074 { INDEX_op_rotr_i64, { "r", "r", "ri" } },
2076 { INDEX_op_mul_i64, { "r", "r", "rI" } },
2077 { INDEX_op_div_i64, { "r", "r", "r" } },
2078 { INDEX_op_divu_i64, { "r", "r", "r" } },
2080 { INDEX_op_neg_i64, { "r", "r" } },
2081 { INDEX_op_not_i64, { "r", "r" } },
2083 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2084 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2085 { INDEX_op_qemu_st_i32, { "S", "S" } },
2086 { INDEX_op_qemu_st_i64, { "S", "S" } },
2088 { INDEX_op_ext8s_i32, { "r", "r" } },
2089 { INDEX_op_ext16s_i32, { "r", "r" } },
2090 { INDEX_op_ext8s_i64, { "r", "r" } },
2091 { INDEX_op_ext16s_i64, { "r", "r" } },
2092 { INDEX_op_ext32s_i64, { "r", "r" } },
2094 { INDEX_op_setcond_i32, { "r", "r", "ri" } },
2095 { INDEX_op_setcond_i64, { "r", "r", "ri" } },
2096 { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } },
2097 { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } },
2099 { INDEX_op_bswap16_i32, { "r", "r" } },
2100 { INDEX_op_bswap16_i64, { "r", "r" } },
2101 { INDEX_op_bswap32_i32, { "r", "r" } },
2102 { INDEX_op_bswap32_i64, { "r", "r" } },
2103 { INDEX_op_bswap64_i64, { "r", "r" } },
2105 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2106 { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
2108 { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } },
2109 { INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } },
2110 { INDEX_op_mulsh_i64, { "r", "r", "r" } },
2111 { INDEX_op_muluh_i64, { "r", "r", "r" } },
2116 static void tcg_target_init(TCGContext *s)
2118 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2119 if (hwcap & PPC_FEATURE_ARCH_2_06) {
2120 have_isa_2_06 = true;
2123 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
2124 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
2125 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
2135 (1 << TCG_REG_R10) |
2136 (1 << TCG_REG_R11) |
2137 (1 << TCG_REG_R12));
2139 tcg_regset_clear(s->reserved_regs);
2140 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
2141 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
2142 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* mem temp */
2144 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R11); /* ??? */
2146 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
2148 tcg_add_target_add_op_defs(ppc_op_defs);
2153 DebugFrameFDEHeader fde;
2154 uint8_t fde_def_cfa[4];
2155 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
2158 /* We're expecting a 2 byte uleb128 encoded value. */
2159 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2161 #define ELF_HOST_MACHINE EM_PPC64
2163 static DebugFrame debug_frame = {
2164 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2167 .cie.code_align = 1,
2168 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
2169 .cie.return_column = 65,
2171 /* Total FDE size does not include the "len" member. */
2172 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2175 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
2176 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2180 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
2181 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
2185 void tcg_register_jit(void *buf, size_t buf_size)
2187 uint8_t *p = &debug_frame.fde_reg_ofs[3];
2190 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
2191 p[0] = 0x80 + tcg_target_callee_save_regs[i];
2192 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
2195 debug_frame.fde.func_start = (uintptr_t)buf;
2196 debug_frame.fde.func_len = buf_size;
2198 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));