2 * Tiny Code Generator for QEMU
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* ??? The translation blocks produced by TCG are generally small enough to
28 be entirely reachable with a 16-bit displacement. Leaving the option for
29 a 32-bit displacement here Just In Case. */
30 #define USE_LONG_BRANCHES 0
32 #define TCG_CT_CONST_32 0x0100
33 #define TCG_CT_CONST_NEG 0x0200
34 #define TCG_CT_CONST_ADDI 0x0400
35 #define TCG_CT_CONST_MULI 0x0800
36 #define TCG_CT_CONST_ANDI 0x1000
37 #define TCG_CT_CONST_ORI 0x2000
38 #define TCG_CT_CONST_XORI 0x4000
39 #define TCG_CT_CONST_CMPI 0x8000
41 /* Several places within the instruction set 0 means "no register"
42 rather than TCG_REG_R0. */
43 #define TCG_REG_NONE 0
45 /* A scratch register that may be be used throughout the backend. */
46 #define TCG_TMP0 TCG_REG_R14
48 #ifdef CONFIG_USE_GUEST_BASE
49 #define TCG_GUEST_BASE_REG TCG_REG_R13
51 #define TCG_GUEST_BASE_REG TCG_REG_R0
59 /* All of the following instructions are prefixed with their instruction
60 format, and are defined as 8- or 16-bit quantities, even when the two
61 halves of the 16-bit quantity may appear 32 bits apart in the insn.
62 This makes it easy to copy the values from the tables in Appendix B. */
63 typedef enum S390Opcode {
207 #define LD_SIGNED 0x04
208 #define LD_UINT8 0x00
209 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
210 #define LD_UINT16 0x01
211 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
212 #define LD_UINT32 0x02
213 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
214 #define LD_UINT64 0x03
215 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
218 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
219 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
220 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
224 /* Since R6 is a potential argument register, choose it last of the
225 call-saved registers. Likewise prefer the call-clobbered registers
226 in reverse order to maximize the chance of avoiding the arguments. */
227 static const int tcg_target_reg_alloc_order[] = {
245 static const int tcg_target_call_iarg_regs[] = {
253 static const int tcg_target_call_oarg_regs[] = {
255 #if TCG_TARGET_REG_BITS == 32
264 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
265 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
266 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
267 #define S390_CC_NEVER 0
268 #define S390_CC_ALWAYS 15
270 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
271 static const uint8_t tcg_cond_to_s390_cond[10] = {
272 [TCG_COND_EQ] = S390_CC_EQ,
273 [TCG_COND_NE] = S390_CC_NE,
274 [TCG_COND_LT] = S390_CC_LT,
275 [TCG_COND_LE] = S390_CC_LE,
276 [TCG_COND_GT] = S390_CC_GT,
277 [TCG_COND_GE] = S390_CC_GE,
278 [TCG_COND_LTU] = S390_CC_LT,
279 [TCG_COND_LEU] = S390_CC_LE,
280 [TCG_COND_GTU] = S390_CC_GT,
281 [TCG_COND_GEU] = S390_CC_GE,
284 /* Condition codes that result from a LOAD AND TEST. Here, we have no
285 unsigned instruction variation, however since the test is vs zero we
286 can re-map the outcomes appropriately. */
287 static const uint8_t tcg_cond_to_ltr_cond[10] = {
288 [TCG_COND_EQ] = S390_CC_EQ,
289 [TCG_COND_NE] = S390_CC_NE,
290 [TCG_COND_LT] = S390_CC_LT,
291 [TCG_COND_LE] = S390_CC_LE,
292 [TCG_COND_GT] = S390_CC_GT,
293 [TCG_COND_GE] = S390_CC_GE,
294 [TCG_COND_LTU] = S390_CC_NEVER,
295 [TCG_COND_LEU] = S390_CC_EQ,
296 [TCG_COND_GTU] = S390_CC_NE,
297 [TCG_COND_GEU] = S390_CC_ALWAYS,
300 #ifdef CONFIG_SOFTMMU
302 #include "../../softmmu_defs.h"
304 static void *qemu_ld_helpers[4] = {
311 static void *qemu_st_helpers[4] = {
319 static uint8_t *tb_ret_addr;
321 /* A list of relevant facilities used by this translator. Some of these
322 are required for proper operation, and these are checked at startup. */
324 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
325 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
326 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
327 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
329 static uint64_t facilities;
331 static void patch_reloc(uint8_t *code_ptr, int type,
332 tcg_target_long value, tcg_target_long addend)
334 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
335 tcg_target_long pcrel2;
337 /* ??? Not the usual definition of "addend". */
338 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
342 assert(pcrel2 == (int16_t)pcrel2);
343 *(int16_t *)code_ptr = pcrel2;
346 assert(pcrel2 == (int32_t)pcrel2);
347 *(int32_t *)code_ptr = pcrel2;
355 static int tcg_target_get_call_iarg_regs_count(int flags)
357 return sizeof(tcg_target_call_iarg_regs) / sizeof(int);
360 /* parse target specific constraints */
361 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
363 const char *ct_str = *pct_str;
366 case 'r': /* all registers */
367 ct->ct |= TCG_CT_REG;
368 tcg_regset_set32(ct->u.regs, 0, 0xffff);
370 case 'R': /* not R0 */
371 ct->ct |= TCG_CT_REG;
372 tcg_regset_set32(ct->u.regs, 0, 0xffff);
373 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
375 case 'L': /* qemu_ld/st constraint */
376 ct->ct |= TCG_CT_REG;
377 tcg_regset_set32(ct->u.regs, 0, 0xffff);
378 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
379 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
381 case 'a': /* force R2 for division */
382 ct->ct |= TCG_CT_REG;
383 tcg_regset_clear(ct->u.regs);
384 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
386 case 'b': /* force R3 for division */
387 ct->ct |= TCG_CT_REG;
388 tcg_regset_clear(ct->u.regs);
389 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
391 case 'N': /* force immediate negate */
392 ct->ct |= TCG_CT_CONST_NEG;
394 case 'W': /* force 32-bit ("word") immediate */
395 ct->ct |= TCG_CT_CONST_32;
398 ct->ct |= TCG_CT_CONST_ADDI;
401 ct->ct |= TCG_CT_CONST_MULI;
404 ct->ct |= TCG_CT_CONST_ANDI;
407 ct->ct |= TCG_CT_CONST_ORI;
410 ct->ct |= TCG_CT_CONST_XORI;
413 ct->ct |= TCG_CT_CONST_CMPI;
424 /* Immediates to be used with logical AND. This is an optimization only,
425 since a full 64-bit immediate AND can always be performed with 4 sequential
426 NI[LH][LH] instructions. What we're looking for is immediates that we
427 can load efficiently, and the immediate load plus the reg-reg AND is
428 smaller than the sequential NI's. */
430 static int tcg_match_andi(int ct, tcg_target_ulong val)
434 if (facilities & FACILITY_EXT_IMM) {
435 if (ct & TCG_CT_CONST_32) {
436 /* All 32-bit ANDs can be performed with 1 48-bit insn. */
440 /* Zero-extensions. */
441 if (val == 0xff || val == 0xffff || val == 0xffffffff) {
445 if (ct & TCG_CT_CONST_32) {
447 } else if (val == 0xffffffff) {
452 /* Try all 32-bit insns that can perform it in one go. */
453 for (i = 0; i < 4; i++) {
454 tcg_target_ulong mask = ~(0xffffull << i*16);
455 if ((val & mask) == mask) {
460 /* Look for 16-bit values performing the mask. These are better
461 to load with LLI[LH][LH]. */
462 for (i = 0; i < 4; i++) {
463 tcg_target_ulong mask = 0xffffull << i*16;
464 if ((val & mask) == val) {
469 /* Look for 32-bit values performing the 64-bit mask. These
470 are better to load with LLI[LH]F, or if extended immediates
471 not available, with a pair of LLI insns. */
472 if ((ct & TCG_CT_CONST_32) == 0) {
473 if (val <= 0xffffffff || (val & 0xffffffff) == 0) {
481 /* Immediates to be used with logical OR. This is an optimization only,
482 since a full 64-bit immediate OR can always be performed with 4 sequential
483 OI[LH][LH] instructions. What we're looking for is immediates that we
484 can load efficiently, and the immediate load plus the reg-reg OR is
485 smaller than the sequential OI's. */
487 static int tcg_match_ori(int ct, tcg_target_long val)
489 if (facilities & FACILITY_EXT_IMM) {
490 if (ct & TCG_CT_CONST_32) {
491 /* All 32-bit ORs can be performed with 1 48-bit insn. */
496 /* Look for negative values. These are best to load with LGHI. */
498 if (val == (int16_t)val) {
501 if (facilities & FACILITY_EXT_IMM) {
502 if (val == (int32_t)val) {
511 /* Immediates to be used with logical XOR. This is almost, but not quite,
512 only an optimization. XOR with immediate is only supported with the
513 extended-immediate facility. That said, there are a few patterns for
514 which it is better to load the value into a register first. */
516 static int tcg_match_xori(int ct, tcg_target_long val)
518 if ((facilities & FACILITY_EXT_IMM) == 0) {
522 if (ct & TCG_CT_CONST_32) {
523 /* All 32-bit XORs can be performed with 1 48-bit insn. */
527 /* Look for negative values. These are best to load with LGHI. */
528 if (val < 0 && val == (int32_t)val) {
535 /* Imediates to be used with comparisons. */
537 static int tcg_match_cmpi(int ct, tcg_target_long val)
539 if (facilities & FACILITY_EXT_IMM) {
540 /* The COMPARE IMMEDIATE instruction is available. */
541 if (ct & TCG_CT_CONST_32) {
542 /* We have a 32-bit immediate and can compare against anything. */
545 /* ??? We have no insight here into whether the comparison is
546 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
547 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
548 a 32-bit unsigned immediate. If we were to use the (semi)
549 obvious "val == (int32_t)val" we would be enabling unsigned
550 comparisons vs very large numbers. The only solution is to
551 take the intersection of the ranges. */
552 /* ??? Another possible solution is to simply lie and allow all
553 constants here and force the out-of-range values into a temp
554 register in tgen_cmp when we have knowledge of the actual
555 comparison code in use. */
556 return val >= 0 && val <= 0x7fffffff;
559 /* Only the LOAD AND TEST instruction is available. */
564 /* Test if a constant matches the constraint. */
565 static int tcg_target_const_match(tcg_target_long val,
566 const TCGArgConstraint *arg_ct)
570 if (ct & TCG_CT_CONST) {
574 /* Handle the modifiers. */
575 if (ct & TCG_CT_CONST_NEG) {
578 if (ct & TCG_CT_CONST_32) {
582 /* The following are mutually exclusive. */
583 if (ct & TCG_CT_CONST_ADDI) {
584 /* Immediates that may be used with add. If we have the
585 extended-immediates facility then we have ADD IMMEDIATE
586 with signed and unsigned 32-bit, otherwise we have only
587 ADD HALFWORD IMMEDIATE with a signed 16-bit. */
588 if (facilities & FACILITY_EXT_IMM) {
589 return val == (int32_t)val || val == (uint32_t)val;
591 return val == (int16_t)val;
593 } else if (ct & TCG_CT_CONST_MULI) {
594 /* Immediates that may be used with multiply. If we have the
595 general-instruction-extensions, then we have MULTIPLY SINGLE
596 IMMEDIATE with a signed 32-bit, otherwise we have only
597 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
598 if (facilities & FACILITY_GEN_INST_EXT) {
599 return val == (int32_t)val;
601 return val == (int16_t)val;
603 } else if (ct & TCG_CT_CONST_ANDI) {
604 return tcg_match_andi(ct, val);
605 } else if (ct & TCG_CT_CONST_ORI) {
606 return tcg_match_ori(ct, val);
607 } else if (ct & TCG_CT_CONST_XORI) {
608 return tcg_match_xori(ct, val);
609 } else if (ct & TCG_CT_CONST_CMPI) {
610 return tcg_match_cmpi(ct, val);
616 /* Emit instructions according to the given instruction format. */
618 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
620 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
623 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
624 TCGReg r1, TCGReg r2)
626 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
629 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
631 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
634 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
636 tcg_out16(s, op | (r1 << 4));
640 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
641 TCGReg b2, TCGReg r3, int disp)
643 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
647 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
648 TCGReg b2, TCGReg r3, int disp)
650 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
651 tcg_out32(s, (op & 0xff) | (b2 << 28)
652 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
655 #define tcg_out_insn_RX tcg_out_insn_RS
656 #define tcg_out_insn_RXY tcg_out_insn_RSY
658 /* Emit an opcode with "type-checking" of the format. */
659 #define tcg_out_insn(S, FMT, OP, ...) \
660 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
663 /* emit 64-bit shifts */
664 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
665 TCGReg src, TCGReg sh_reg, int sh_imm)
667 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
670 /* emit 32-bit shifts */
671 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
672 TCGReg sh_reg, int sh_imm)
674 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
677 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
680 if (type == TCG_TYPE_I32) {
681 tcg_out_insn(s, RR, LR, dst, src);
683 tcg_out_insn(s, RRE, LGR, dst, src);
688 /* load a register with an immediate value */
689 static void tcg_out_movi(TCGContext *s, TCGType type,
690 TCGReg ret, tcg_target_long sval)
692 static const S390Opcode lli_insns[4] = {
693 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
696 tcg_target_ulong uval = sval;
699 if (type == TCG_TYPE_I32) {
700 uval = (uint32_t)sval;
701 sval = (int32_t)sval;
704 /* Try all 32-bit insns that can load it in one go. */
705 if (sval >= -0x8000 && sval < 0x8000) {
706 tcg_out_insn(s, RI, LGHI, ret, sval);
710 for (i = 0; i < 4; i++) {
711 tcg_target_long mask = 0xffffull << i*16;
712 if ((uval & mask) == uval) {
713 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
718 /* Try all 48-bit insns that can load it in one go. */
719 if (facilities & FACILITY_EXT_IMM) {
720 if (sval == (int32_t)sval) {
721 tcg_out_insn(s, RIL, LGFI, ret, sval);
724 if (uval <= 0xffffffff) {
725 tcg_out_insn(s, RIL, LLILF, ret, uval);
728 if ((uval & 0xffffffff) == 0) {
729 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
734 /* Try for PC-relative address load. */
735 if ((sval & 1) == 0) {
736 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
737 if (off == (int32_t)off) {
738 tcg_out_insn(s, RIL, LARL, ret, off);
743 /* If extended immediates are not present, then we may have to issue
744 several instructions to load the low 32 bits. */
745 if (!(facilities & FACILITY_EXT_IMM)) {
746 /* A 32-bit unsigned value can be loaded in 2 insns. And given
747 that the lli_insns loop above did not succeed, we know that
748 both insns are required. */
749 if (uval <= 0xffffffff) {
750 tcg_out_insn(s, RI, LLILL, ret, uval);
751 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
755 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
756 We first want to make sure that all the high bits get set. With
757 luck the low 16-bits can be considered negative to perform that for
758 free, otherwise we load an explicit -1. */
759 if (sval >> 31 >> 1 == -1) {
761 tcg_out_insn(s, RI, LGHI, ret, uval);
763 tcg_out_insn(s, RI, LGHI, ret, -1);
764 tcg_out_insn(s, RI, IILL, ret, uval);
766 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
771 /* If we get here, both the high and low parts have non-zero bits. */
773 /* Recurse to load the lower 32-bits. */
774 tcg_out_movi(s, TCG_TYPE_I32, ret, sval);
776 /* Insert data into the high 32-bits. */
777 uval = uval >> 31 >> 1;
778 if (facilities & FACILITY_EXT_IMM) {
779 if (uval < 0x10000) {
780 tcg_out_insn(s, RI, IIHL, ret, uval);
781 } else if ((uval & 0xffff) == 0) {
782 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
784 tcg_out_insn(s, RIL, IIHF, ret, uval);
788 tcg_out_insn(s, RI, IIHL, ret, uval);
790 if (uval & 0xffff0000) {
791 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
797 /* Emit a load/store type instruction. Inputs are:
798 DATA: The register to be loaded or stored.
799 BASE+OFS: The effective address.
800 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
801 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
803 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
804 TCGReg data, TCGReg base, TCGReg index,
807 if (ofs < -0x80000 || ofs >= 0x80000) {
808 /* Combine the low 16 bits of the offset with the actual load insn;
809 the high 48 bits must come from an immediate load. */
810 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
813 /* If we were already given an index register, add it in. */
814 if (index != TCG_REG_NONE) {
815 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
820 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
821 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
823 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
828 /* load data without address translation or endianness conversion */
829 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
830 TCGReg base, tcg_target_long ofs)
832 if (type == TCG_TYPE_I32) {
833 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
835 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
839 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
840 TCGReg base, tcg_target_long ofs)
842 if (type == TCG_TYPE_I32) {
843 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
845 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
849 /* load data from an absolute host address */
850 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
852 tcg_target_long addr = (tcg_target_long)abs;
854 if (facilities & FACILITY_GEN_INST_EXT) {
855 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
856 if (disp == (int32_t)disp) {
857 if (type == TCG_TYPE_I32) {
858 tcg_out_insn(s, RIL, LRL, dest, disp);
860 tcg_out_insn(s, RIL, LGRL, dest, disp);
866 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
867 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
870 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
872 if (facilities & FACILITY_EXT_IMM) {
873 tcg_out_insn(s, RRE, LGBR, dest, src);
877 if (type == TCG_TYPE_I32) {
879 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
881 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
883 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
885 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
886 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
890 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
892 if (facilities & FACILITY_EXT_IMM) {
893 tcg_out_insn(s, RRE, LLGCR, dest, src);
898 tcg_out_movi(s, type, TCG_TMP0, 0xff);
901 tcg_out_movi(s, type, dest, 0xff);
903 if (type == TCG_TYPE_I32) {
904 tcg_out_insn(s, RR, NR, dest, src);
906 tcg_out_insn(s, RRE, NGR, dest, src);
910 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
912 if (facilities & FACILITY_EXT_IMM) {
913 tcg_out_insn(s, RRE, LGHR, dest, src);
917 if (type == TCG_TYPE_I32) {
919 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
921 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
923 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
925 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
926 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
930 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
932 if (facilities & FACILITY_EXT_IMM) {
933 tcg_out_insn(s, RRE, LLGHR, dest, src);
938 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
941 tcg_out_movi(s, type, dest, 0xffff);
943 if (type == TCG_TYPE_I32) {
944 tcg_out_insn(s, RR, NR, dest, src);
946 tcg_out_insn(s, RRE, NGR, dest, src);
950 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
952 tcg_out_insn(s, RRE, LGFR, dest, src);
955 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
957 tcg_out_insn(s, RRE, LLGFR, dest, src);
960 static inline void tgen32_addi(TCGContext *s, TCGReg dest, int32_t val)
962 if (val == (int16_t)val) {
963 tcg_out_insn(s, RI, AHI, dest, val);
965 tcg_out_insn(s, RIL, AFI, dest, val);
969 static inline void tgen64_addi(TCGContext *s, TCGReg dest, int64_t val)
971 if (val == (int16_t)val) {
972 tcg_out_insn(s, RI, AGHI, dest, val);
973 } else if (val == (int32_t)val) {
974 tcg_out_insn(s, RIL, AGFI, dest, val);
975 } else if (val == (uint32_t)val) {
976 tcg_out_insn(s, RIL, ALGFI, dest, val);
983 static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
985 static const S390Opcode ni_insns[4] = {
986 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
988 static const S390Opcode nif_insns[2] = {
994 /* Look for no-op. */
999 /* Look for the zero-extensions. */
1000 if (val == 0xffffffff) {
1001 tgen_ext32u(s, dest, dest);
1005 if (facilities & FACILITY_EXT_IMM) {
1007 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
1010 if (val == 0xffff) {
1011 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
1015 /* Try all 32-bit insns that can perform it in one go. */
1016 for (i = 0; i < 4; i++) {
1017 tcg_target_ulong mask = ~(0xffffull << i*16);
1018 if ((val & mask) == mask) {
1019 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1024 /* Try all 48-bit insns that can perform it in one go. */
1025 if (facilities & FACILITY_EXT_IMM) {
1026 for (i = 0; i < 2; i++) {
1027 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1028 if ((val & mask) == mask) {
1029 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1035 /* Perform the AND via sequential modifications to the high and low
1036 parts. Do this via recursion to handle 16-bit vs 32-bit masks in
1038 tgen64_andi(s, dest, val | 0xffffffff00000000ull);
1039 tgen64_andi(s, dest, val | 0x00000000ffffffffull);
1041 /* With no extended-immediate facility, just emit the sequence. */
1042 for (i = 0; i < 4; i++) {
1043 tcg_target_ulong mask = 0xffffull << i*16;
1044 if ((val & mask) != mask) {
1045 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1051 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1053 static const S390Opcode oi_insns[4] = {
1054 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1056 static const S390Opcode nif_insns[2] = {
1062 /* Look for no-op. */
1067 if (facilities & FACILITY_EXT_IMM) {
1068 /* Try all 32-bit insns that can perform it in one go. */
1069 for (i = 0; i < 4; i++) {
1070 tcg_target_ulong mask = (0xffffull << i*16);
1071 if ((val & mask) != 0 && (val & ~mask) == 0) {
1072 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1077 /* Try all 48-bit insns that can perform it in one go. */
1078 for (i = 0; i < 2; i++) {
1079 tcg_target_ulong mask = (0xffffffffull << i*32);
1080 if ((val & mask) != 0 && (val & ~mask) == 0) {
1081 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1086 /* Perform the OR via sequential modifications to the high and
1087 low parts. Do this via recursion to handle 16-bit vs 32-bit
1088 masks in each half. */
1089 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1090 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1092 /* With no extended-immediate facility, we don't need to be so
1093 clever. Just iterate over the insns and mask in the constant. */
1094 for (i = 0; i < 4; i++) {
1095 tcg_target_ulong mask = (0xffffull << i*16);
1096 if ((val & mask) != 0) {
1097 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1103 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1105 /* Perform the xor by parts. */
1106 if (val & 0xffffffff) {
1107 tcg_out_insn(s, RIL, XILF, dest, val);
1109 if (val > 0xffffffff) {
1110 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1114 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1115 TCGArg c2, int c2const)
1117 bool is_unsigned = (c > TCG_COND_GT);
1120 if (type == TCG_TYPE_I32) {
1121 tcg_out_insn(s, RR, LTR, r1, r1);
1123 tcg_out_insn(s, RRE, LTGR, r1, r1);
1125 return tcg_cond_to_ltr_cond[c];
1128 if (type == TCG_TYPE_I32) {
1129 tcg_out_insn(s, RIL, CLFI, r1, c2);
1131 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1134 if (type == TCG_TYPE_I32) {
1135 tcg_out_insn(s, RIL, CFI, r1, c2);
1137 tcg_out_insn(s, RIL, CGFI, r1, c2);
1143 if (type == TCG_TYPE_I32) {
1144 tcg_out_insn(s, RR, CLR, r1, c2);
1146 tcg_out_insn(s, RRE, CLGR, r1, c2);
1149 if (type == TCG_TYPE_I32) {
1150 tcg_out_insn(s, RR, CR, r1, c2);
1152 tcg_out_insn(s, RRE, CGR, r1, c2);
1156 return tcg_cond_to_s390_cond[c];
1159 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
1160 TCGReg dest, TCGReg r1, TCGArg c2, int c2const)
1162 int cc = tgen_cmp(s, type, c, r1, c2, c2const);
1164 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1165 tcg_out_movi(s, type, dest, 1);
1166 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1167 tcg_out_movi(s, type, dest, 0);
1170 static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1172 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1173 if (off > -0x8000 && off < 0x7fff) {
1174 tcg_out_insn(s, RI, BRC, cc, off);
1175 } else if (off == (int32_t)off) {
1176 tcg_out_insn(s, RIL, BRCL, cc, off);
1178 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1179 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1183 static void tgen_branch(TCGContext *s, int cc, int labelno)
1185 TCGLabel* l = &s->labels[labelno];
1187 tgen_gotoi(s, cc, l->u.value);
1188 } else if (USE_LONG_BRANCHES) {
1189 tcg_out16(s, RIL_BRCL | (cc << 4));
1190 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1193 tcg_out16(s, RI_BRC | (cc << 4));
1194 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1199 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1200 TCGReg r1, TCGReg r2, int labelno)
1202 TCGLabel* l = &s->labels[labelno];
1203 tcg_target_long off;
1206 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1208 /* We need to keep the offset unchanged for retranslation. */
1209 off = ((int16_t *)s->code_ptr)[1];
1210 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1213 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1215 tcg_out16(s, cc << 12 | (opc & 0xff));
1218 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1219 TCGReg r1, int i2, int labelno)
1221 TCGLabel* l = &s->labels[labelno];
1222 tcg_target_long off;
1225 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1227 /* We need to keep the offset unchanged for retranslation. */
1228 off = ((int16_t *)s->code_ptr)[1];
1229 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1232 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1234 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1237 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1238 TCGReg r1, TCGArg c2, int c2const, int labelno)
1242 if (facilities & FACILITY_GEN_INST_EXT) {
1243 bool is_unsigned = (c > TCG_COND_GT);
1247 cc = tcg_cond_to_s390_cond[c];
1250 opc = (type == TCG_TYPE_I32
1251 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1252 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1253 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1257 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1258 If the immediate we've been given does not fit that range, we'll
1259 fall back to separate compare and branch instructions using the
1260 larger comparison range afforded by COMPARE IMMEDIATE. */
1261 if (type == TCG_TYPE_I32) {
1264 in_range = (uint32_t)c2 == (uint8_t)c2;
1267 in_range = (int32_t)c2 == (int8_t)c2;
1272 in_range = (uint64_t)c2 == (uint8_t)c2;
1275 in_range = (int64_t)c2 == (int8_t)c2;
1279 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1284 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1285 tgen_branch(s, cc, labelno);
1288 static void tgen_calli(TCGContext *s, tcg_target_long dest)
1290 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1291 if (off == (int32_t)off) {
1292 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1294 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1295 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1299 static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1300 TCGReg base, TCGReg index, int disp)
1302 #ifdef TARGET_WORDS_BIGENDIAN
1303 const int bswap = 0;
1305 const int bswap = 1;
1309 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1312 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1316 /* swapped unsigned halfword load with upper bits zeroed */
1317 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1318 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1320 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1325 /* swapped sign-extended halfword load */
1326 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1327 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1329 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1334 /* swapped unsigned int load with upper bits zeroed */
1335 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1336 tgen_ext32u(s, data, data);
1338 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1343 /* swapped sign-extended int load */
1344 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1345 tgen_ext32s(s, data, data);
1347 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1352 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1354 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1362 static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1363 TCGReg base, TCGReg index, int disp)
1365 #ifdef TARGET_WORDS_BIGENDIAN
1366 const int bswap = 0;
1368 const int bswap = 1;
1372 if (disp >= 0 && disp < 0x1000) {
1373 tcg_out_insn(s, RX, STC, data, base, index, disp);
1375 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1380 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1381 } else if (disp >= 0 && disp < 0x1000) {
1382 tcg_out_insn(s, RX, STH, data, base, index, disp);
1384 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1389 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1390 } else if (disp >= 0 && disp < 0x1000) {
1391 tcg_out_insn(s, RX, ST, data, base, index, disp);
1393 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1398 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1400 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1408 #if defined(CONFIG_SOFTMMU)
1409 static void tgen64_andi_tmp(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1411 if (tcg_match_andi(0, val)) {
1412 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
1413 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1415 tgen64_andi(s, dest, val);
1419 static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1420 TCGReg addr_reg, int mem_index, int opc,
1421 uint16_t **label2_ptr_p, int is_store)
1423 const TCGReg arg0 = TCG_REG_R2;
1424 const TCGReg arg1 = TCG_REG_R3;
1425 int s_bits = opc & 3;
1426 uint16_t *label1_ptr;
1427 tcg_target_long ofs;
1429 if (TARGET_LONG_BITS == 32) {
1430 tgen_ext32u(s, arg0, addr_reg);
1432 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1435 tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
1436 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1438 tgen64_andi_tmp(s, arg0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1439 tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1442 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1444 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1446 assert(ofs < 0x80000);
1448 if (TARGET_LONG_BITS == 32) {
1449 tcg_out_mem(s, RX_C, RXY_CY, arg0, arg1, TCG_AREG0, ofs);
1451 tcg_out_mem(s, 0, RXY_CG, arg0, arg1, TCG_AREG0, ofs);
1454 if (TARGET_LONG_BITS == 32) {
1455 tgen_ext32u(s, arg0, addr_reg);
1457 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1460 label1_ptr = (uint16_t*)s->code_ptr;
1462 /* je label1 (offset will be patched in later) */
1463 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1465 /* call load/store helper */
1467 /* Make sure to zero-extend the value to the full register
1468 for the calling convention. */
1471 tgen_ext8u(s, TCG_TYPE_I64, arg1, data_reg);
1474 tgen_ext16u(s, TCG_TYPE_I64, arg1, data_reg);
1477 tgen_ext32u(s, arg1, data_reg);
1480 tcg_out_mov(s, TCG_TYPE_I64, arg1, data_reg);
1485 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
1486 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1488 tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
1489 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1491 /* sign extension */
1494 tgen_ext8s(s, TCG_TYPE_I64, data_reg, arg0);
1497 tgen_ext16s(s, TCG_TYPE_I64, data_reg, arg0);
1500 tgen_ext32s(s, data_reg, arg0);
1503 /* unsigned -> just copy */
1504 tcg_out_mov(s, TCG_TYPE_I64, data_reg, arg0);
1509 /* jump to label2 (end) */
1510 *label2_ptr_p = (uint16_t*)s->code_ptr;
1512 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1514 /* this is label1, patch branch */
1515 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1516 (unsigned long)label1_ptr) >> 1;
1518 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1519 assert(ofs < 0x80000);
1521 tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
1524 static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1527 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1528 (unsigned long)label2_ptr) >> 1;
1531 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1532 TCGReg *index_reg, tcg_target_long *disp)
1534 if (TARGET_LONG_BITS == 32) {
1535 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1536 *addr_reg = TCG_TMP0;
1538 if (GUEST_BASE < 0x80000) {
1539 *index_reg = TCG_REG_NONE;
1542 *index_reg = TCG_GUEST_BASE_REG;
1546 #endif /* CONFIG_SOFTMMU */
1548 /* load data with address translation (if applicable)
1549 and endianness conversion */
1550 static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1552 TCGReg addr_reg, data_reg;
1553 #if defined(CONFIG_SOFTMMU)
1555 uint16_t *label2_ptr;
1558 tcg_target_long disp;
1564 #if defined(CONFIG_SOFTMMU)
1567 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1568 opc, &label2_ptr, 0);
1570 tcg_out_qemu_ld_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1572 tcg_finish_qemu_ldst(s, label2_ptr);
1574 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1575 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1579 static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1581 TCGReg addr_reg, data_reg;
1582 #if defined(CONFIG_SOFTMMU)
1584 uint16_t *label2_ptr;
1587 tcg_target_long disp;
1593 #if defined(CONFIG_SOFTMMU)
1596 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1597 opc, &label2_ptr, 1);
1599 tcg_out_qemu_st_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1601 tcg_finish_qemu_ldst(s, label2_ptr);
1603 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1604 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1608 #if TCG_TARGET_REG_BITS == 64
1609 # define OP_32_64(x) \
1610 case glue(glue(INDEX_op_,x),_i32): \
1611 case glue(glue(INDEX_op_,x),_i64)
1613 # define OP_32_64(x) \
1614 case glue(glue(INDEX_op_,x),_i32)
1617 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1618 const TCGArg *args, const int *const_args)
1623 case INDEX_op_exit_tb:
1625 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1626 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1629 case INDEX_op_goto_tb:
1630 if (s->tb_jmp_offset) {
1633 /* load address stored at s->tb_next + args[0] */
1634 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1636 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1638 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1642 if (const_args[0]) {
1643 tgen_calli(s, args[0]);
1645 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1649 case INDEX_op_mov_i32:
1650 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1652 case INDEX_op_movi_i32:
1653 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1657 /* ??? LLC (RXY format) is only present with the extended-immediate
1658 facility, whereas LLGC is always present. */
1659 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1663 /* ??? LB is no smaller than LGB, so no point to using it. */
1664 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1668 /* ??? LLH (RXY format) is only present with the extended-immediate
1669 facility, whereas LLGH is always present. */
1670 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1673 case INDEX_op_ld16s_i32:
1674 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1677 case INDEX_op_ld_i32:
1678 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1682 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1683 TCG_REG_NONE, args[2]);
1687 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1688 TCG_REG_NONE, args[2]);
1691 case INDEX_op_st_i32:
1692 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1695 case INDEX_op_add_i32:
1696 if (const_args[2]) {
1697 tgen32_addi(s, args[0], args[2]);
1699 tcg_out_insn(s, RR, AR, args[0], args[2]);
1702 case INDEX_op_sub_i32:
1703 if (const_args[2]) {
1704 tgen32_addi(s, args[0], -args[2]);
1706 tcg_out_insn(s, RR, SR, args[0], args[2]);
1710 case INDEX_op_and_i32:
1711 if (const_args[2]) {
1712 tgen64_andi(s, args[0], args[2] | 0xffffffff00000000ull);
1714 tcg_out_insn(s, RR, NR, args[0], args[2]);
1717 case INDEX_op_or_i32:
1718 if (const_args[2]) {
1719 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1721 tcg_out_insn(s, RR, OR, args[0], args[2]);
1724 case INDEX_op_xor_i32:
1725 if (const_args[2]) {
1726 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1728 tcg_out_insn(s, RR, XR, args[0], args[2]);
1732 case INDEX_op_neg_i32:
1733 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1736 case INDEX_op_mul_i32:
1737 if (const_args[2]) {
1738 if ((int32_t)args[2] == (int16_t)args[2]) {
1739 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1741 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1744 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1748 case INDEX_op_div2_i32:
1749 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1751 case INDEX_op_divu2_i32:
1752 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1755 case INDEX_op_shl_i32:
1758 if (const_args[2]) {
1759 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1761 tcg_out_sh32(s, op, args[0], args[2], 0);
1764 case INDEX_op_shr_i32:
1767 case INDEX_op_sar_i32:
1771 case INDEX_op_rotl_i32:
1772 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1773 if (const_args[2]) {
1774 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1776 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1779 case INDEX_op_rotr_i32:
1780 if (const_args[2]) {
1781 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1782 TCG_REG_NONE, (32 - args[2]) & 31);
1784 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1785 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1789 case INDEX_op_ext8s_i32:
1790 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1792 case INDEX_op_ext16s_i32:
1793 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1795 case INDEX_op_ext8u_i32:
1796 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1798 case INDEX_op_ext16u_i32:
1799 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1803 /* The TCG bswap definition requires bits 0-47 already be zero.
1804 Thus we don't need the G-type insns to implement bswap16_i64. */
1805 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1806 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1809 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1813 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1816 case INDEX_op_brcond_i32:
1817 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1818 args[1], const_args[1], args[3]);
1820 case INDEX_op_setcond_i32:
1821 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1822 args[2], const_args[2]);
1825 case INDEX_op_qemu_ld8u:
1826 tcg_out_qemu_ld(s, args, LD_UINT8);
1828 case INDEX_op_qemu_ld8s:
1829 tcg_out_qemu_ld(s, args, LD_INT8);
1831 case INDEX_op_qemu_ld16u:
1832 tcg_out_qemu_ld(s, args, LD_UINT16);
1834 case INDEX_op_qemu_ld16s:
1835 tcg_out_qemu_ld(s, args, LD_INT16);
1837 case INDEX_op_qemu_ld32:
1838 /* ??? Technically we can use a non-extending instruction. */
1839 tcg_out_qemu_ld(s, args, LD_UINT32);
1841 case INDEX_op_qemu_ld64:
1842 tcg_out_qemu_ld(s, args, LD_UINT64);
1845 case INDEX_op_qemu_st8:
1846 tcg_out_qemu_st(s, args, LD_UINT8);
1848 case INDEX_op_qemu_st16:
1849 tcg_out_qemu_st(s, args, LD_UINT16);
1851 case INDEX_op_qemu_st32:
1852 tcg_out_qemu_st(s, args, LD_UINT32);
1854 case INDEX_op_qemu_st64:
1855 tcg_out_qemu_st(s, args, LD_UINT64);
1858 #if TCG_TARGET_REG_BITS == 64
1859 case INDEX_op_mov_i64:
1860 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1862 case INDEX_op_movi_i64:
1863 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1866 case INDEX_op_ld16s_i64:
1867 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1869 case INDEX_op_ld32u_i64:
1870 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1872 case INDEX_op_ld32s_i64:
1873 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1875 case INDEX_op_ld_i64:
1876 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1879 case INDEX_op_st32_i64:
1880 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1882 case INDEX_op_st_i64:
1883 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1886 case INDEX_op_add_i64:
1887 if (const_args[2]) {
1888 tgen64_addi(s, args[0], args[2]);
1890 tcg_out_insn(s, RRE, AGR, args[0], args[2]);
1893 case INDEX_op_sub_i64:
1894 if (const_args[2]) {
1895 tgen64_addi(s, args[0], -args[2]);
1897 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1901 case INDEX_op_and_i64:
1902 if (const_args[2]) {
1903 tgen64_andi(s, args[0], args[2]);
1905 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1908 case INDEX_op_or_i64:
1909 if (const_args[2]) {
1910 tgen64_ori(s, args[0], args[2]);
1912 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1915 case INDEX_op_xor_i64:
1916 if (const_args[2]) {
1917 tgen64_xori(s, args[0], args[2]);
1919 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1923 case INDEX_op_neg_i64:
1924 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1926 case INDEX_op_bswap64_i64:
1927 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1930 case INDEX_op_mul_i64:
1931 if (const_args[2]) {
1932 if (args[2] == (int16_t)args[2]) {
1933 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1935 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1938 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1942 case INDEX_op_div2_i64:
1943 /* ??? We get an unnecessary sign-extension of the dividend
1944 into R3 with this definition, but as we do in fact always
1945 produce both quotient and remainder using INDEX_op_div_i64
1946 instead requires jumping through even more hoops. */
1947 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1949 case INDEX_op_divu2_i64:
1950 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1953 case INDEX_op_shl_i64:
1956 if (const_args[2]) {
1957 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1959 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1962 case INDEX_op_shr_i64:
1965 case INDEX_op_sar_i64:
1969 case INDEX_op_rotl_i64:
1970 if (const_args[2]) {
1971 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1972 TCG_REG_NONE, args[2]);
1974 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
1977 case INDEX_op_rotr_i64:
1978 if (const_args[2]) {
1979 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1980 TCG_REG_NONE, (64 - args[2]) & 63);
1982 /* We can use the smaller 32-bit negate because only the
1983 low 6 bits are examined for the rotate. */
1984 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1985 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
1989 case INDEX_op_ext8s_i64:
1990 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
1992 case INDEX_op_ext16s_i64:
1993 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
1995 case INDEX_op_ext32s_i64:
1996 tgen_ext32s(s, args[0], args[1]);
1998 case INDEX_op_ext8u_i64:
1999 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2001 case INDEX_op_ext16u_i64:
2002 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2004 case INDEX_op_ext32u_i64:
2005 tgen_ext32u(s, args[0], args[1]);
2008 case INDEX_op_brcond_i64:
2009 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2010 args[1], const_args[1], args[3]);
2012 case INDEX_op_setcond_i64:
2013 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2014 args[2], const_args[2]);
2017 case INDEX_op_qemu_ld32u:
2018 tcg_out_qemu_ld(s, args, LD_UINT32);
2020 case INDEX_op_qemu_ld32s:
2021 tcg_out_qemu_ld(s, args, LD_INT32);
2023 #endif /* TCG_TARGET_REG_BITS == 64 */
2026 /* This one is obsolete and never emitted. */
2031 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2036 static const TCGTargetOpDef s390_op_defs[] = {
2037 { INDEX_op_exit_tb, { } },
2038 { INDEX_op_goto_tb, { } },
2039 { INDEX_op_call, { "ri" } },
2040 { INDEX_op_jmp, { "ri" } },
2041 { INDEX_op_br, { } },
2043 { INDEX_op_mov_i32, { "r", "r" } },
2044 { INDEX_op_movi_i32, { "r" } },
2046 { INDEX_op_ld8u_i32, { "r", "r" } },
2047 { INDEX_op_ld8s_i32, { "r", "r" } },
2048 { INDEX_op_ld16u_i32, { "r", "r" } },
2049 { INDEX_op_ld16s_i32, { "r", "r" } },
2050 { INDEX_op_ld_i32, { "r", "r" } },
2051 { INDEX_op_st8_i32, { "r", "r" } },
2052 { INDEX_op_st16_i32, { "r", "r" } },
2053 { INDEX_op_st_i32, { "r", "r" } },
2055 { INDEX_op_add_i32, { "r", "0", "rWI" } },
2056 { INDEX_op_sub_i32, { "r", "0", "rWNI" } },
2057 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2059 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2060 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2062 { INDEX_op_and_i32, { "r", "0", "rWA" } },
2063 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2064 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2066 { INDEX_op_neg_i32, { "r", "r" } },
2068 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2069 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2070 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2072 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2073 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2075 { INDEX_op_ext8s_i32, { "r", "r" } },
2076 { INDEX_op_ext8u_i32, { "r", "r" } },
2077 { INDEX_op_ext16s_i32, { "r", "r" } },
2078 { INDEX_op_ext16u_i32, { "r", "r" } },
2080 { INDEX_op_bswap16_i32, { "r", "r" } },
2081 { INDEX_op_bswap32_i32, { "r", "r" } },
2083 { INDEX_op_brcond_i32, { "r", "rWC" } },
2084 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
2086 { INDEX_op_qemu_ld8u, { "r", "L" } },
2087 { INDEX_op_qemu_ld8s, { "r", "L" } },
2088 { INDEX_op_qemu_ld16u, { "r", "L" } },
2089 { INDEX_op_qemu_ld16s, { "r", "L" } },
2090 { INDEX_op_qemu_ld32, { "r", "L" } },
2091 { INDEX_op_qemu_ld64, { "r", "L" } },
2093 { INDEX_op_qemu_st8, { "L", "L" } },
2094 { INDEX_op_qemu_st16, { "L", "L" } },
2095 { INDEX_op_qemu_st32, { "L", "L" } },
2096 { INDEX_op_qemu_st64, { "L", "L" } },
2098 #if defined(__s390x__)
2099 { INDEX_op_mov_i64, { "r", "r" } },
2100 { INDEX_op_movi_i64, { "r" } },
2102 { INDEX_op_ld8u_i64, { "r", "r" } },
2103 { INDEX_op_ld8s_i64, { "r", "r" } },
2104 { INDEX_op_ld16u_i64, { "r", "r" } },
2105 { INDEX_op_ld16s_i64, { "r", "r" } },
2106 { INDEX_op_ld32u_i64, { "r", "r" } },
2107 { INDEX_op_ld32s_i64, { "r", "r" } },
2108 { INDEX_op_ld_i64, { "r", "r" } },
2110 { INDEX_op_st8_i64, { "r", "r" } },
2111 { INDEX_op_st16_i64, { "r", "r" } },
2112 { INDEX_op_st32_i64, { "r", "r" } },
2113 { INDEX_op_st_i64, { "r", "r" } },
2115 { INDEX_op_add_i64, { "r", "0", "rI" } },
2116 { INDEX_op_sub_i64, { "r", "0", "rNI" } },
2117 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2119 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2120 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2122 { INDEX_op_and_i64, { "r", "0", "rA" } },
2123 { INDEX_op_or_i64, { "r", "0", "rO" } },
2124 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2126 { INDEX_op_neg_i64, { "r", "r" } },
2128 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2129 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2130 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2132 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2133 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2135 { INDEX_op_ext8s_i64, { "r", "r" } },
2136 { INDEX_op_ext8u_i64, { "r", "r" } },
2137 { INDEX_op_ext16s_i64, { "r", "r" } },
2138 { INDEX_op_ext16u_i64, { "r", "r" } },
2139 { INDEX_op_ext32s_i64, { "r", "r" } },
2140 { INDEX_op_ext32u_i64, { "r", "r" } },
2142 { INDEX_op_bswap16_i64, { "r", "r" } },
2143 { INDEX_op_bswap32_i64, { "r", "r" } },
2144 { INDEX_op_bswap64_i64, { "r", "r" } },
2146 { INDEX_op_brcond_i64, { "r", "rC" } },
2147 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2149 { INDEX_op_qemu_ld32u, { "r", "L" } },
2150 { INDEX_op_qemu_ld32s, { "r", "L" } },
2156 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2157 this information. However, getting at that entry is not easy this far
2158 away from main. Our options are: start searching from environ, but
2159 that fails as soon as someone does a setenv in between. Read the data
2160 from /proc/self/auxv. Or do the probing ourselves. The only thing
2161 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2162 that the kernel saves all 64-bits of the registers around traps while
2163 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2164 back and see from when this might not be true). */
2168 static volatile sig_atomic_t got_sigill;
2170 static void sigill_handler(int sig)
2175 static void query_facilities(void)
2177 struct sigaction sa_old, sa_new;
2178 register int r0 __asm__("0");
2179 register void *r1 __asm__("1");
2182 memset(&sa_new, 0, sizeof(sa_new));
2183 sa_new.sa_handler = sigill_handler;
2184 sigaction(SIGILL, &sa_new, &sa_old);
2186 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2187 we need not do any more probing. Unfortunately, this itself is an
2188 extension and the original STORE FACILITY LIST instruction is
2189 kernel-only, storing its results at absolute address 200. */
2192 asm volatile(".word 0xb2b0,0x1000"
2193 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2196 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2197 kind of instruction that we're interested in. */
2198 /* ??? Possibly some of these are in practice never present unless
2199 the store-facility-extended facility is also present. But since
2200 that isn't documented it's just better to probe for each. */
2202 /* Test for z/Architecture. Required even in 31-bit mode. */
2205 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2207 facilities |= FACILITY_ZARCH_ACTIVE;
2210 /* Test for long displacement. */
2214 asm volatile(".word 0xe300,0x1000,0x0058"
2215 : "=r"(r0) : "r"(r1) : "cc");
2217 facilities |= FACILITY_LONG_DISP;
2220 /* Test for extended immediates. */
2223 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2225 facilities |= FACILITY_EXT_IMM;
2228 /* Test for general-instructions-extension. */
2231 asm volatile(".word 0xc201,0x0000,0x0001");
2233 facilities |= FACILITY_GEN_INST_EXT;
2237 sigaction(SIGILL, &sa_old, NULL);
2239 /* The translator currently uses these extensions unconditionally.
2240 Pruning this back to the base ESA/390 architecture doesn't seem
2241 worthwhile, since even the KVM target requires z/Arch. */
2243 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2244 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2245 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2248 if ((facilities & FACILITY_LONG_DISP) == 0) {
2249 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2253 /* So far there's just enough support for 31-bit mode to let the
2254 compile succeed. This is good enough to run QEMU with KVM. */
2255 if (sizeof(void *) != 8) {
2256 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2265 static void tcg_target_init(TCGContext *s)
2267 #if !defined(CONFIG_USER_ONLY)
2269 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2276 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2277 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2279 tcg_regset_clear(tcg_target_call_clobber_regs);
2280 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2281 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2282 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2283 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2284 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2285 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2286 /* The return register can be considered call-clobbered. */
2287 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2289 tcg_regset_clear(s->reserved_regs);
2290 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2291 /* XXX many insns can't be used with R0, so we better avoid it for now */
2292 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2293 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2295 tcg_add_target_add_op_defs(s390_op_defs);
2296 tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
2297 CPU_TEMP_BUF_NLONGS * sizeof(long));
2300 static void tcg_target_qemu_prologue(TCGContext *s)
2302 /* stmg %r6,%r15,48(%r15) (save registers) */
2303 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2305 /* aghi %r15,-160 (stack frame) */
2306 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -160);
2308 if (GUEST_BASE >= 0x80000) {
2309 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2310 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2313 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2314 /* br %r3 (go to TB) */
2315 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2317 tb_ret_addr = s->code_ptr;
2319 /* lmg %r6,%r15,208(%r15) (restore registers) */
2320 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 208);
2322 /* br %r14 (return) */
2323 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);