2 * Tiny Code Generator for QEMU
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "tcg-be-ldst.h"
29 /* We only support generating code for 64-bit mode. */
30 #if TCG_TARGET_REG_BITS != 64
31 #error "unsupported code generation mode"
36 /* ??? The translation blocks produced by TCG are generally small enough to
37 be entirely reachable with a 16-bit displacement. Leaving the option for
38 a 32-bit displacement here Just In Case. */
39 #define USE_LONG_BRANCHES 0
41 #define TCG_CT_CONST_MULI 0x100
42 #define TCG_CT_CONST_ORI 0x200
43 #define TCG_CT_CONST_XORI 0x400
44 #define TCG_CT_CONST_CMPI 0x800
45 #define TCG_CT_CONST_ADLI 0x1000
47 /* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49 #define TCG_REG_NONE 0
51 /* A scratch register that may be be used throughout the backend. */
52 #define TCG_TMP0 TCG_REG_R14
54 #ifndef CONFIG_SOFTMMU
55 #define TCG_GUEST_BASE_REG TCG_REG_R13
58 /* All of the following instructions are prefixed with their instruction
59 format, and are defined as 8- or 16-bit quantities, even when the two
60 halves of the 16-bit quantity may appear 32 bits apart in the insn.
61 This makes it easy to copy the values from the tables in Appendix B. */
62 typedef enum S390Opcode {
226 #ifdef CONFIG_DEBUG_TCG
227 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
228 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
229 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
233 /* Since R6 is a potential argument register, choose it last of the
234 call-saved registers. Likewise prefer the call-clobbered registers
235 in reverse order to maximize the chance of avoiding the arguments. */
236 static const int tcg_target_reg_alloc_order[] = {
237 /* Call saved registers. */
246 /* Call clobbered registers. */
250 /* Argument registers, in reverse order of allocation. */
257 static const int tcg_target_call_iarg_regs[] = {
265 static const int tcg_target_call_oarg_regs[] = {
273 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
274 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
275 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
276 #define S390_CC_NEVER 0
277 #define S390_CC_ALWAYS 15
279 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
280 static const uint8_t tcg_cond_to_s390_cond[] = {
281 [TCG_COND_EQ] = S390_CC_EQ,
282 [TCG_COND_NE] = S390_CC_NE,
283 [TCG_COND_LT] = S390_CC_LT,
284 [TCG_COND_LE] = S390_CC_LE,
285 [TCG_COND_GT] = S390_CC_GT,
286 [TCG_COND_GE] = S390_CC_GE,
287 [TCG_COND_LTU] = S390_CC_LT,
288 [TCG_COND_LEU] = S390_CC_LE,
289 [TCG_COND_GTU] = S390_CC_GT,
290 [TCG_COND_GEU] = S390_CC_GE,
293 /* Condition codes that result from a LOAD AND TEST. Here, we have no
294 unsigned instruction variation, however since the test is vs zero we
295 can re-map the outcomes appropriately. */
296 static const uint8_t tcg_cond_to_ltr_cond[] = {
297 [TCG_COND_EQ] = S390_CC_EQ,
298 [TCG_COND_NE] = S390_CC_NE,
299 [TCG_COND_LT] = S390_CC_LT,
300 [TCG_COND_LE] = S390_CC_LE,
301 [TCG_COND_GT] = S390_CC_GT,
302 [TCG_COND_GE] = S390_CC_GE,
303 [TCG_COND_LTU] = S390_CC_NEVER,
304 [TCG_COND_LEU] = S390_CC_EQ,
305 [TCG_COND_GTU] = S390_CC_NE,
306 [TCG_COND_GEU] = S390_CC_ALWAYS,
309 #ifdef CONFIG_SOFTMMU
310 static void * const qemu_ld_helpers[16] = {
311 [MO_UB] = helper_ret_ldub_mmu,
312 [MO_SB] = helper_ret_ldsb_mmu,
313 [MO_LEUW] = helper_le_lduw_mmu,
314 [MO_LESW] = helper_le_ldsw_mmu,
315 [MO_LEUL] = helper_le_ldul_mmu,
316 [MO_LESL] = helper_le_ldsl_mmu,
317 [MO_LEQ] = helper_le_ldq_mmu,
318 [MO_BEUW] = helper_be_lduw_mmu,
319 [MO_BESW] = helper_be_ldsw_mmu,
320 [MO_BEUL] = helper_be_ldul_mmu,
321 [MO_BESL] = helper_be_ldsl_mmu,
322 [MO_BEQ] = helper_be_ldq_mmu,
325 static void * const qemu_st_helpers[16] = {
326 [MO_UB] = helper_ret_stb_mmu,
327 [MO_LEUW] = helper_le_stw_mmu,
328 [MO_LEUL] = helper_le_stl_mmu,
329 [MO_LEQ] = helper_le_stq_mmu,
330 [MO_BEUW] = helper_be_stw_mmu,
331 [MO_BEUL] = helper_be_stl_mmu,
332 [MO_BEQ] = helper_be_stq_mmu,
336 static tcg_insn_unit *tb_ret_addr;
338 /* A list of relevant facilities used by this translator. Some of these
339 are required for proper operation, and these are checked at startup. */
341 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
342 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
343 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
344 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
345 #define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
347 static uint64_t facilities;
349 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
350 intptr_t value, intptr_t addend)
352 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
353 tcg_debug_assert(addend == -2);
357 tcg_debug_assert(pcrel2 == (int16_t)pcrel2);
358 tcg_patch16(code_ptr, pcrel2);
361 tcg_debug_assert(pcrel2 == (int32_t)pcrel2);
362 tcg_patch32(code_ptr, pcrel2);
370 /* parse target specific constraints */
371 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
373 const char *ct_str = *pct_str;
376 case 'r': /* all registers */
377 ct->ct |= TCG_CT_REG;
378 tcg_regset_set32(ct->u.regs, 0, 0xffff);
380 case 'R': /* not R0 */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
385 case 'L': /* qemu_ld/st constraint */
386 ct->ct |= TCG_CT_REG;
387 tcg_regset_set32(ct->u.regs, 0, 0xffff);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
390 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
392 case 'a': /* force R2 for division */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_clear(ct->u.regs);
395 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
397 case 'b': /* force R3 for division */
398 ct->ct |= TCG_CT_REG;
399 tcg_regset_clear(ct->u.regs);
400 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
403 ct->ct |= TCG_CT_CONST_ADLI;
406 ct->ct |= TCG_CT_CONST_MULI;
409 ct->ct |= TCG_CT_CONST_ORI;
412 ct->ct |= TCG_CT_CONST_XORI;
415 ct->ct |= TCG_CT_CONST_CMPI;
426 /* Immediates to be used with logical OR. This is an optimization only,
427 since a full 64-bit immediate OR can always be performed with 4 sequential
428 OI[LH][LH] instructions. What we're looking for is immediates that we
429 can load efficiently, and the immediate load plus the reg-reg OR is
430 smaller than the sequential OI's. */
432 static int tcg_match_ori(TCGType type, tcg_target_long val)
434 if (facilities & FACILITY_EXT_IMM) {
435 if (type == TCG_TYPE_I32) {
436 /* All 32-bit ORs can be performed with 1 48-bit insn. */
441 /* Look for negative values. These are best to load with LGHI. */
443 if (val == (int16_t)val) {
446 if (facilities & FACILITY_EXT_IMM) {
447 if (val == (int32_t)val) {
456 /* Immediates to be used with logical XOR. This is almost, but not quite,
457 only an optimization. XOR with immediate is only supported with the
458 extended-immediate facility. That said, there are a few patterns for
459 which it is better to load the value into a register first. */
461 static int tcg_match_xori(TCGType type, tcg_target_long val)
463 if ((facilities & FACILITY_EXT_IMM) == 0) {
467 if (type == TCG_TYPE_I32) {
468 /* All 32-bit XORs can be performed with 1 48-bit insn. */
472 /* Look for negative values. These are best to load with LGHI. */
473 if (val < 0 && val == (int32_t)val) {
480 /* Imediates to be used with comparisons. */
482 static int tcg_match_cmpi(TCGType type, tcg_target_long val)
484 if (facilities & FACILITY_EXT_IMM) {
485 /* The COMPARE IMMEDIATE instruction is available. */
486 if (type == TCG_TYPE_I32) {
487 /* We have a 32-bit immediate and can compare against anything. */
490 /* ??? We have no insight here into whether the comparison is
491 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
492 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
493 a 32-bit unsigned immediate. If we were to use the (semi)
494 obvious "val == (int32_t)val" we would be enabling unsigned
495 comparisons vs very large numbers. The only solution is to
496 take the intersection of the ranges. */
497 /* ??? Another possible solution is to simply lie and allow all
498 constants here and force the out-of-range values into a temp
499 register in tgen_cmp when we have knowledge of the actual
500 comparison code in use. */
501 return val >= 0 && val <= 0x7fffffff;
504 /* Only the LOAD AND TEST instruction is available. */
509 /* Immediates to be used with add2/sub2. */
511 static int tcg_match_add2i(TCGType type, tcg_target_long val)
513 if (facilities & FACILITY_EXT_IMM) {
514 if (type == TCG_TYPE_I32) {
516 } else if (val >= -0xffffffffll && val <= 0xffffffffll) {
523 /* Test if a constant matches the constraint. */
524 static int tcg_target_const_match(tcg_target_long val, TCGType type,
525 const TCGArgConstraint *arg_ct)
529 if (ct & TCG_CT_CONST) {
533 if (type == TCG_TYPE_I32) {
537 /* The following are mutually exclusive. */
538 if (ct & TCG_CT_CONST_MULI) {
539 /* Immediates that may be used with multiply. If we have the
540 general-instruction-extensions, then we have MULTIPLY SINGLE
541 IMMEDIATE with a signed 32-bit, otherwise we have only
542 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
543 if (facilities & FACILITY_GEN_INST_EXT) {
544 return val == (int32_t)val;
546 return val == (int16_t)val;
548 } else if (ct & TCG_CT_CONST_ADLI) {
549 return tcg_match_add2i(type, val);
550 } else if (ct & TCG_CT_CONST_ORI) {
551 return tcg_match_ori(type, val);
552 } else if (ct & TCG_CT_CONST_XORI) {
553 return tcg_match_xori(type, val);
554 } else if (ct & TCG_CT_CONST_CMPI) {
555 return tcg_match_cmpi(type, val);
561 /* Emit instructions according to the given instruction format. */
563 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
565 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
568 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
569 TCGReg r1, TCGReg r2)
571 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
574 static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
575 TCGReg r1, TCGReg r2, int m3)
577 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
580 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
582 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
585 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
587 tcg_out16(s, op | (r1 << 4));
591 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
592 TCGReg b2, TCGReg r3, int disp)
594 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
598 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
599 TCGReg b2, TCGReg r3, int disp)
601 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
602 tcg_out32(s, (op & 0xff) | (b2 << 28)
603 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
606 #define tcg_out_insn_RX tcg_out_insn_RS
607 #define tcg_out_insn_RXY tcg_out_insn_RSY
609 /* Emit an opcode with "type-checking" of the format. */
610 #define tcg_out_insn(S, FMT, OP, ...) \
611 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
614 /* emit 64-bit shifts */
615 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
616 TCGReg src, TCGReg sh_reg, int sh_imm)
618 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
621 /* emit 32-bit shifts */
622 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
623 TCGReg sh_reg, int sh_imm)
625 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
628 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
631 if (type == TCG_TYPE_I32) {
632 tcg_out_insn(s, RR, LR, dst, src);
634 tcg_out_insn(s, RRE, LGR, dst, src);
639 /* load a register with an immediate value */
640 static void tcg_out_movi(TCGContext *s, TCGType type,
641 TCGReg ret, tcg_target_long sval)
643 static const S390Opcode lli_insns[4] = {
644 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
647 tcg_target_ulong uval = sval;
650 if (type == TCG_TYPE_I32) {
651 uval = (uint32_t)sval;
652 sval = (int32_t)sval;
655 /* Try all 32-bit insns that can load it in one go. */
656 if (sval >= -0x8000 && sval < 0x8000) {
657 tcg_out_insn(s, RI, LGHI, ret, sval);
661 for (i = 0; i < 4; i++) {
662 tcg_target_long mask = 0xffffull << i*16;
663 if ((uval & mask) == uval) {
664 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
669 /* Try all 48-bit insns that can load it in one go. */
670 if (facilities & FACILITY_EXT_IMM) {
671 if (sval == (int32_t)sval) {
672 tcg_out_insn(s, RIL, LGFI, ret, sval);
675 if (uval <= 0xffffffff) {
676 tcg_out_insn(s, RIL, LLILF, ret, uval);
679 if ((uval & 0xffffffff) == 0) {
680 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
685 /* Try for PC-relative address load. */
686 if ((sval & 1) == 0) {
687 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
688 if (off == (int32_t)off) {
689 tcg_out_insn(s, RIL, LARL, ret, off);
694 /* If extended immediates are not present, then we may have to issue
695 several instructions to load the low 32 bits. */
696 if (!(facilities & FACILITY_EXT_IMM)) {
697 /* A 32-bit unsigned value can be loaded in 2 insns. And given
698 that the lli_insns loop above did not succeed, we know that
699 both insns are required. */
700 if (uval <= 0xffffffff) {
701 tcg_out_insn(s, RI, LLILL, ret, uval);
702 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
706 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
707 We first want to make sure that all the high bits get set. With
708 luck the low 16-bits can be considered negative to perform that for
709 free, otherwise we load an explicit -1. */
710 if (sval >> 31 >> 1 == -1) {
712 tcg_out_insn(s, RI, LGHI, ret, uval);
714 tcg_out_insn(s, RI, LGHI, ret, -1);
715 tcg_out_insn(s, RI, IILL, ret, uval);
717 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
722 /* If we get here, both the high and low parts have non-zero bits. */
724 /* Recurse to load the lower 32-bits. */
725 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
727 /* Insert data into the high 32-bits. */
728 uval = uval >> 31 >> 1;
729 if (facilities & FACILITY_EXT_IMM) {
730 if (uval < 0x10000) {
731 tcg_out_insn(s, RI, IIHL, ret, uval);
732 } else if ((uval & 0xffff) == 0) {
733 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
735 tcg_out_insn(s, RIL, IIHF, ret, uval);
739 tcg_out_insn(s, RI, IIHL, ret, uval);
741 if (uval & 0xffff0000) {
742 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
748 /* Emit a load/store type instruction. Inputs are:
749 DATA: The register to be loaded or stored.
750 BASE+OFS: The effective address.
751 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
752 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
754 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
755 TCGReg data, TCGReg base, TCGReg index,
758 if (ofs < -0x80000 || ofs >= 0x80000) {
759 /* Combine the low 20 bits of the offset with the actual load insn;
760 the high 44 bits must come from an immediate load. */
761 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
762 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
765 /* If we were already given an index register, add it in. */
766 if (index != TCG_REG_NONE) {
767 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
772 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
773 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
775 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
780 /* load data without address translation or endianness conversion */
781 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
782 TCGReg base, intptr_t ofs)
784 if (type == TCG_TYPE_I32) {
785 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
787 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
791 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
792 TCGReg base, intptr_t ofs)
794 if (type == TCG_TYPE_I32) {
795 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
797 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
801 /* load data from an absolute host address */
802 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
804 intptr_t addr = (intptr_t)abs;
806 if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
807 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
808 if (disp == (int32_t)disp) {
809 if (type == TCG_TYPE_I32) {
810 tcg_out_insn(s, RIL, LRL, dest, disp);
812 tcg_out_insn(s, RIL, LGRL, dest, disp);
818 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
819 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
822 static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
823 int msb, int lsb, int ofs, int z)
826 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
827 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
828 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
831 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
833 if (facilities & FACILITY_EXT_IMM) {
834 tcg_out_insn(s, RRE, LGBR, dest, src);
838 if (type == TCG_TYPE_I32) {
840 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
842 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
844 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
846 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
847 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
851 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
853 if (facilities & FACILITY_EXT_IMM) {
854 tcg_out_insn(s, RRE, LLGCR, dest, src);
859 tcg_out_movi(s, type, TCG_TMP0, 0xff);
862 tcg_out_movi(s, type, dest, 0xff);
864 if (type == TCG_TYPE_I32) {
865 tcg_out_insn(s, RR, NR, dest, src);
867 tcg_out_insn(s, RRE, NGR, dest, src);
871 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
873 if (facilities & FACILITY_EXT_IMM) {
874 tcg_out_insn(s, RRE, LGHR, dest, src);
878 if (type == TCG_TYPE_I32) {
880 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
882 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
884 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
886 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
887 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
891 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
893 if (facilities & FACILITY_EXT_IMM) {
894 tcg_out_insn(s, RRE, LLGHR, dest, src);
899 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
902 tcg_out_movi(s, type, dest, 0xffff);
904 if (type == TCG_TYPE_I32) {
905 tcg_out_insn(s, RR, NR, dest, src);
907 tcg_out_insn(s, RRE, NGR, dest, src);
911 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
913 tcg_out_insn(s, RRE, LGFR, dest, src);
916 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
918 tcg_out_insn(s, RRE, LLGFR, dest, src);
921 /* Accept bit patterns like these:
926 Copied from gcc sources. */
927 static inline bool risbg_mask(uint64_t c)
930 /* We don't change the number of transitions by inverting,
931 so make sure we start with the LSB zero. */
935 /* Reject all zeros or all ones. */
939 /* Find the first transition. */
941 /* Invert to look for a second transition. */
943 /* Erase the first transition. */
945 /* Find the second transition, if any. */
947 /* Match if all the bits are 1's, or if c is zero. */
951 static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
954 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
955 /* Achieve wraparound by swapping msb and lsb. */
956 msb = 64 - ctz64(~val);
957 lsb = clz64(~val) - 1;
960 lsb = 63 - ctz64(val);
962 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
965 static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
967 static const S390Opcode ni_insns[4] = {
968 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
970 static const S390Opcode nif_insns[2] = {
973 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
976 /* Look for the zero-extensions. */
977 if ((val & valid) == 0xffffffff) {
978 tgen_ext32u(s, dest, dest);
981 if (facilities & FACILITY_EXT_IMM) {
982 if ((val & valid) == 0xff) {
983 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
986 if ((val & valid) == 0xffff) {
987 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
992 /* Try all 32-bit insns that can perform it in one go. */
993 for (i = 0; i < 4; i++) {
994 tcg_target_ulong mask = ~(0xffffull << i*16);
995 if (((val | ~valid) & mask) == mask) {
996 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1001 /* Try all 48-bit insns that can perform it in one go. */
1002 if (facilities & FACILITY_EXT_IMM) {
1003 for (i = 0; i < 2; i++) {
1004 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1005 if (((val | ~valid) & mask) == mask) {
1006 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1011 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
1012 tgen_andi_risbg(s, dest, dest, val);
1016 /* Fall back to loading the constant. */
1017 tcg_out_movi(s, type, TCG_TMP0, val);
1018 if (type == TCG_TYPE_I32) {
1019 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
1021 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1025 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1027 static const S390Opcode oi_insns[4] = {
1028 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1030 static const S390Opcode nif_insns[2] = {
1036 /* Look for no-op. */
1041 if (facilities & FACILITY_EXT_IMM) {
1042 /* Try all 32-bit insns that can perform it in one go. */
1043 for (i = 0; i < 4; i++) {
1044 tcg_target_ulong mask = (0xffffull << i*16);
1045 if ((val & mask) != 0 && (val & ~mask) == 0) {
1046 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1051 /* Try all 48-bit insns that can perform it in one go. */
1052 for (i = 0; i < 2; i++) {
1053 tcg_target_ulong mask = (0xffffffffull << i*32);
1054 if ((val & mask) != 0 && (val & ~mask) == 0) {
1055 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1060 /* Perform the OR via sequential modifications to the high and
1061 low parts. Do this via recursion to handle 16-bit vs 32-bit
1062 masks in each half. */
1063 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1064 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1066 /* With no extended-immediate facility, we don't need to be so
1067 clever. Just iterate over the insns and mask in the constant. */
1068 for (i = 0; i < 4; i++) {
1069 tcg_target_ulong mask = (0xffffull << i*16);
1070 if ((val & mask) != 0) {
1071 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1077 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1079 /* Perform the xor by parts. */
1080 if (val & 0xffffffff) {
1081 tcg_out_insn(s, RIL, XILF, dest, val);
1083 if (val > 0xffffffff) {
1084 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1088 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1089 TCGArg c2, int c2const)
1091 bool is_unsigned = is_unsigned_cond(c);
1094 if (type == TCG_TYPE_I32) {
1095 tcg_out_insn(s, RR, LTR, r1, r1);
1097 tcg_out_insn(s, RRE, LTGR, r1, r1);
1099 return tcg_cond_to_ltr_cond[c];
1102 if (type == TCG_TYPE_I32) {
1103 tcg_out_insn(s, RIL, CLFI, r1, c2);
1105 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1108 if (type == TCG_TYPE_I32) {
1109 tcg_out_insn(s, RIL, CFI, r1, c2);
1111 tcg_out_insn(s, RIL, CGFI, r1, c2);
1117 if (type == TCG_TYPE_I32) {
1118 tcg_out_insn(s, RR, CLR, r1, c2);
1120 tcg_out_insn(s, RRE, CLGR, r1, c2);
1123 if (type == TCG_TYPE_I32) {
1124 tcg_out_insn(s, RR, CR, r1, c2);
1126 tcg_out_insn(s, RRE, CGR, r1, c2);
1130 return tcg_cond_to_s390_cond[c];
1133 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
1134 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
1142 /* The result of a compare has CC=2 for GT and CC=3 unused.
1143 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
1144 tgen_cmp(s, type, cond, c1, c2, c2const);
1145 tcg_out_movi(s, type, dest, 0);
1146 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1151 /* We need "real" carry semantics, so use SUBTRACT LOGICAL
1152 instead of COMPARE LOGICAL. This needs an extra move. */
1153 tcg_out_mov(s, type, TCG_TMP0, c1);
1155 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1156 if (type == TCG_TYPE_I32) {
1157 tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2);
1159 tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2);
1162 if (type == TCG_TYPE_I32) {
1163 tcg_out_insn(s, RR, SLR, TCG_TMP0, c2);
1165 tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2);
1167 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1169 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1175 /* Swap operands so that we can use GEU/GTU/GT. */
1177 tcg_out_movi(s, type, TCG_TMP0, c2);
1186 if (cond == TCG_COND_LEU) {
1189 cond = tcg_swap_cond(cond);
1193 /* X != 0 is X > 0. */
1194 if (c2const && c2 == 0) {
1195 cond = TCG_COND_GTU;
1201 /* X == 0 is X <= 0 is 0 >= X. */
1202 if (c2const && c2 == 0) {
1203 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0);
1215 cc = tgen_cmp(s, type, cond, c1, c2, c2const);
1216 if (facilities & FACILITY_LOAD_ON_COND) {
1217 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1218 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1219 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1220 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1222 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1223 tcg_out_movi(s, type, dest, 1);
1224 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1225 tcg_out_movi(s, type, dest, 0);
1229 static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1230 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1233 if (facilities & FACILITY_LOAD_ON_COND) {
1234 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1235 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1237 c = tcg_invert_cond(c);
1238 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1240 /* Emit: if (cc) goto over; dest = r3; over: */
1241 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1242 tcg_out_insn(s, RRE, LGR, dest, r3);
1246 bool tcg_target_deposit_valid(int ofs, int len)
1248 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1251 static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1254 int lsb = (63 - ofs);
1255 int msb = lsb - (len - 1);
1256 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
1259 static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
1261 ptrdiff_t off = dest - s->code_ptr;
1262 if (off == (int16_t)off) {
1263 tcg_out_insn(s, RI, BRC, cc, off);
1264 } else if (off == (int32_t)off) {
1265 tcg_out_insn(s, RIL, BRCL, cc, off);
1267 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1268 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1272 static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
1275 tgen_gotoi(s, cc, l->u.value_ptr);
1276 } else if (USE_LONG_BRANCHES) {
1277 tcg_out16(s, RIL_BRCL | (cc << 4));
1278 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
1281 tcg_out16(s, RI_BRC | (cc << 4));
1282 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
1287 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1288 TCGReg r1, TCGReg r2, TCGLabel *l)
1293 off = l->u.value_ptr - s->code_ptr;
1295 /* We need to keep the offset unchanged for retranslation. */
1296 off = s->code_ptr[1];
1297 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1300 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1302 tcg_out16(s, cc << 12 | (opc & 0xff));
1305 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1306 TCGReg r1, int i2, TCGLabel *l)
1308 tcg_target_long off;
1311 off = l->u.value_ptr - s->code_ptr;
1313 /* We need to keep the offset unchanged for retranslation. */
1314 off = s->code_ptr[1];
1315 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
1318 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1320 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1323 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1324 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
1328 if (facilities & FACILITY_GEN_INST_EXT) {
1329 bool is_unsigned = is_unsigned_cond(c);
1333 cc = tcg_cond_to_s390_cond[c];
1336 opc = (type == TCG_TYPE_I32
1337 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1338 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1339 tgen_compare_branch(s, opc, cc, r1, c2, l);
1343 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1344 If the immediate we've been given does not fit that range, we'll
1345 fall back to separate compare and branch instructions using the
1346 larger comparison range afforded by COMPARE IMMEDIATE. */
1347 if (type == TCG_TYPE_I32) {
1350 in_range = (uint32_t)c2 == (uint8_t)c2;
1353 in_range = (int32_t)c2 == (int8_t)c2;
1358 in_range = (uint64_t)c2 == (uint8_t)c2;
1361 in_range = (int64_t)c2 == (int8_t)c2;
1365 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
1370 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1371 tgen_branch(s, cc, l);
1374 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
1376 ptrdiff_t off = dest - s->code_ptr;
1377 if (off == (int32_t)off) {
1378 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1380 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
1381 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1385 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1386 TCGReg base, TCGReg index, int disp)
1388 switch (opc & (MO_SSIZE | MO_BSWAP)) {
1390 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1393 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1396 case MO_UW | MO_BSWAP:
1397 /* swapped unsigned halfword load with upper bits zeroed */
1398 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1399 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1402 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1405 case MO_SW | MO_BSWAP:
1406 /* swapped sign-extended halfword load */
1407 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1408 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1411 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1414 case MO_UL | MO_BSWAP:
1415 /* swapped unsigned int load with upper bits zeroed */
1416 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1417 tgen_ext32u(s, data, data);
1420 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1423 case MO_SL | MO_BSWAP:
1424 /* swapped sign-extended int load */
1425 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1426 tgen_ext32s(s, data, data);
1429 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1432 case MO_Q | MO_BSWAP:
1433 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1436 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1444 static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
1445 TCGReg base, TCGReg index, int disp)
1447 switch (opc & (MO_SIZE | MO_BSWAP)) {
1449 if (disp >= 0 && disp < 0x1000) {
1450 tcg_out_insn(s, RX, STC, data, base, index, disp);
1452 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1456 case MO_UW | MO_BSWAP:
1457 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1460 if (disp >= 0 && disp < 0x1000) {
1461 tcg_out_insn(s, RX, STH, data, base, index, disp);
1463 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1467 case MO_UL | MO_BSWAP:
1468 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1471 if (disp >= 0 && disp < 0x1000) {
1472 tcg_out_insn(s, RX, ST, data, base, index, disp);
1474 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1478 case MO_Q | MO_BSWAP:
1479 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1482 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1490 #if defined(CONFIG_SOFTMMU)
1491 /* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1492 Using the offset of the second entry in the last tlb table ensures
1493 that we can index all of the elements of the first entry. */
1494 QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1497 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1498 addend into R2. Returns a register with the santitized guest address. */
1499 static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1500 int mem_index, bool is_ld)
1502 int s_mask = (1 << (opc & MO_SIZE)) - 1;
1506 /* For aligned accesses, we check the first byte and include the alignment
1507 bits within the address. For unaligned access, we check that we don't
1508 cross pages using the address of the last byte of the access. */
1509 if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
1511 tlb_mask = TARGET_PAGE_MASK | s_mask;
1514 tlb_mask = TARGET_PAGE_MASK;
1517 if (facilities & FACILITY_GEN_INST_EXT) {
1518 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1519 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1520 63 - CPU_TLB_ENTRY_BITS,
1521 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
1523 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1524 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1526 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1529 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1530 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1531 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1532 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1533 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1534 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1538 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1540 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1542 if (TARGET_LONG_BITS == 32) {
1543 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1545 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
1548 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1549 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1551 if (TARGET_LONG_BITS == 32) {
1552 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1558 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1559 TCGReg data, TCGReg addr,
1560 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1562 TCGLabelQemuLdst *label = new_ldst_label(s);
1564 label->is_ld = is_ld;
1566 label->datalo_reg = data;
1567 label->addrlo_reg = addr;
1568 label->raddr = raddr;
1569 label->label_ptr[0] = label_ptr;
1572 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1574 TCGReg addr_reg = lb->addrlo_reg;
1575 TCGReg data_reg = lb->datalo_reg;
1576 TCGMemOpIdx oi = lb->oi;
1577 TCGMemOp opc = get_memop(oi);
1579 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1581 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1582 if (TARGET_LONG_BITS == 64) {
1583 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1585 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
1586 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
1587 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
1588 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
1590 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1593 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1595 TCGReg addr_reg = lb->addrlo_reg;
1596 TCGReg data_reg = lb->datalo_reg;
1597 TCGMemOpIdx oi = lb->oi;
1598 TCGMemOp opc = get_memop(oi);
1600 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1602 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1603 if (TARGET_LONG_BITS == 64) {
1604 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1606 switch (opc & MO_SIZE) {
1608 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1611 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1614 tgen_ext32u(s, TCG_REG_R4, data_reg);
1617 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1622 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
1623 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
1624 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
1626 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
1629 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1630 TCGReg *index_reg, tcg_target_long *disp)
1632 if (TARGET_LONG_BITS == 32) {
1633 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1634 *addr_reg = TCG_TMP0;
1636 if (guest_base < 0x80000) {
1637 *index_reg = TCG_REG_NONE;
1640 *index_reg = TCG_GUEST_BASE_REG;
1644 #endif /* CONFIG_SOFTMMU */
1646 static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1649 TCGMemOp opc = get_memop(oi);
1650 #ifdef CONFIG_SOFTMMU
1651 unsigned mem_index = get_mmuidx(oi);
1652 tcg_insn_unit *label_ptr;
1655 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1657 /* We need to keep the offset unchanged for retranslation. */
1658 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1659 label_ptr = s->code_ptr;
1662 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1664 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1667 tcg_target_long disp;
1669 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1670 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1674 static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
1677 TCGMemOp opc = get_memop(oi);
1678 #ifdef CONFIG_SOFTMMU
1679 unsigned mem_index = get_mmuidx(oi);
1680 tcg_insn_unit *label_ptr;
1683 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1685 /* We need to keep the offset unchanged for retranslation. */
1686 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1687 label_ptr = s->code_ptr;
1690 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
1692 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
1695 tcg_target_long disp;
1697 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1698 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1702 # define OP_32_64(x) \
1703 case glue(glue(INDEX_op_,x),_i32): \
1704 case glue(glue(INDEX_op_,x),_i64)
1706 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1707 const TCGArg *args, const int *const_args)
1713 case INDEX_op_exit_tb:
1715 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1716 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1719 case INDEX_op_goto_tb:
1720 if (s->tb_jmp_insn_offset) {
1721 /* branch displacement must be aligned for atomic patching;
1722 * see if we need to add extra nop before branch
1724 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1727 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
1728 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
1731 /* load address stored at s->tb_jmp_target_addr + args[0] */
1732 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
1733 s->tb_jmp_target_addr + args[0]);
1735 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1737 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
1741 /* ??? LLC (RXY format) is only present with the extended-immediate
1742 facility, whereas LLGC is always present. */
1743 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1747 /* ??? LB is no smaller than LGB, so no point to using it. */
1748 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1752 /* ??? LLH (RXY format) is only present with the extended-immediate
1753 facility, whereas LLGH is always present. */
1754 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1757 case INDEX_op_ld16s_i32:
1758 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1761 case INDEX_op_ld_i32:
1762 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1766 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1767 TCG_REG_NONE, args[2]);
1771 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1772 TCG_REG_NONE, args[2]);
1775 case INDEX_op_st_i32:
1776 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1779 case INDEX_op_add_i32:
1780 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1781 if (const_args[2]) {
1784 if (a2 == (int16_t)a2) {
1785 tcg_out_insn(s, RI, AHI, a0, a2);
1788 if (facilities & FACILITY_EXT_IMM) {
1789 tcg_out_insn(s, RIL, AFI, a0, a2);
1793 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1794 } else if (a0 == a1) {
1795 tcg_out_insn(s, RR, AR, a0, a2);
1797 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
1800 case INDEX_op_sub_i32:
1801 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1802 if (const_args[2]) {
1806 tcg_out_insn(s, RR, SR, args[0], args[2]);
1809 case INDEX_op_and_i32:
1810 if (const_args[2]) {
1811 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
1813 tcg_out_insn(s, RR, NR, args[0], args[2]);
1816 case INDEX_op_or_i32:
1817 if (const_args[2]) {
1818 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1820 tcg_out_insn(s, RR, OR, args[0], args[2]);
1823 case INDEX_op_xor_i32:
1824 if (const_args[2]) {
1825 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1827 tcg_out_insn(s, RR, XR, args[0], args[2]);
1831 case INDEX_op_neg_i32:
1832 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1835 case INDEX_op_mul_i32:
1836 if (const_args[2]) {
1837 if ((int32_t)args[2] == (int16_t)args[2]) {
1838 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1840 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1843 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1847 case INDEX_op_div2_i32:
1848 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1850 case INDEX_op_divu2_i32:
1851 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1854 case INDEX_op_shl_i32:
1857 if (const_args[2]) {
1858 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1860 tcg_out_sh32(s, op, args[0], args[2], 0);
1863 case INDEX_op_shr_i32:
1866 case INDEX_op_sar_i32:
1870 case INDEX_op_rotl_i32:
1871 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1872 if (const_args[2]) {
1873 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1875 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1878 case INDEX_op_rotr_i32:
1879 if (const_args[2]) {
1880 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1881 TCG_REG_NONE, (32 - args[2]) & 31);
1883 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1884 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1888 case INDEX_op_ext8s_i32:
1889 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1891 case INDEX_op_ext16s_i32:
1892 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1894 case INDEX_op_ext8u_i32:
1895 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1897 case INDEX_op_ext16u_i32:
1898 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1902 /* The TCG bswap definition requires bits 0-47 already be zero.
1903 Thus we don't need the G-type insns to implement bswap16_i64. */
1904 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1905 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1908 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1911 case INDEX_op_add2_i32:
1912 if (const_args[4]) {
1913 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1915 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1917 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1919 case INDEX_op_sub2_i32:
1920 if (const_args[4]) {
1921 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1923 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1925 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1929 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
1932 case INDEX_op_brcond_i32:
1933 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1934 args[1], const_args[1], arg_label(args[3]));
1936 case INDEX_op_setcond_i32:
1937 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1938 args[2], const_args[2]);
1940 case INDEX_op_movcond_i32:
1941 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1942 args[2], const_args[2], args[3]);
1945 case INDEX_op_qemu_ld_i32:
1946 /* ??? Technically we can use a non-extending instruction. */
1947 case INDEX_op_qemu_ld_i64:
1948 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
1950 case INDEX_op_qemu_st_i32:
1951 case INDEX_op_qemu_st_i64:
1952 tcg_out_qemu_st(s, args[0], args[1], args[2]);
1955 case INDEX_op_ld16s_i64:
1956 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1958 case INDEX_op_ld32u_i64:
1959 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1961 case INDEX_op_ld32s_i64:
1962 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1964 case INDEX_op_ld_i64:
1965 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1968 case INDEX_op_st32_i64:
1969 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1971 case INDEX_op_st_i64:
1972 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1975 case INDEX_op_add_i64:
1976 a0 = args[0], a1 = args[1], a2 = args[2];
1977 if (const_args[2]) {
1980 if (a2 == (int16_t)a2) {
1981 tcg_out_insn(s, RI, AGHI, a0, a2);
1984 if (facilities & FACILITY_EXT_IMM) {
1985 if (a2 == (int32_t)a2) {
1986 tcg_out_insn(s, RIL, AGFI, a0, a2);
1988 } else if (a2 == (uint32_t)a2) {
1989 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1991 } else if (-a2 == (uint32_t)-a2) {
1992 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1997 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1998 } else if (a0 == a1) {
1999 tcg_out_insn(s, RRE, AGR, a0, a2);
2001 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
2004 case INDEX_op_sub_i64:
2005 a0 = args[0], a1 = args[1], a2 = args[2];
2006 if (const_args[2]) {
2010 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
2014 case INDEX_op_and_i64:
2015 if (const_args[2]) {
2016 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
2018 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
2021 case INDEX_op_or_i64:
2022 if (const_args[2]) {
2023 tgen64_ori(s, args[0], args[2]);
2025 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
2028 case INDEX_op_xor_i64:
2029 if (const_args[2]) {
2030 tgen64_xori(s, args[0], args[2]);
2032 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
2036 case INDEX_op_neg_i64:
2037 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2039 case INDEX_op_bswap64_i64:
2040 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2043 case INDEX_op_mul_i64:
2044 if (const_args[2]) {
2045 if (args[2] == (int16_t)args[2]) {
2046 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2048 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2051 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2055 case INDEX_op_div2_i64:
2056 /* ??? We get an unnecessary sign-extension of the dividend
2057 into R3 with this definition, but as we do in fact always
2058 produce both quotient and remainder using INDEX_op_div_i64
2059 instead requires jumping through even more hoops. */
2060 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2062 case INDEX_op_divu2_i64:
2063 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2065 case INDEX_op_mulu2_i64:
2066 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2069 case INDEX_op_shl_i64:
2072 if (const_args[2]) {
2073 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2075 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2078 case INDEX_op_shr_i64:
2081 case INDEX_op_sar_i64:
2085 case INDEX_op_rotl_i64:
2086 if (const_args[2]) {
2087 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2088 TCG_REG_NONE, args[2]);
2090 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2093 case INDEX_op_rotr_i64:
2094 if (const_args[2]) {
2095 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2096 TCG_REG_NONE, (64 - args[2]) & 63);
2098 /* We can use the smaller 32-bit negate because only the
2099 low 6 bits are examined for the rotate. */
2100 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2101 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2105 case INDEX_op_ext8s_i64:
2106 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2108 case INDEX_op_ext16s_i64:
2109 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2111 case INDEX_op_ext_i32_i64:
2112 case INDEX_op_ext32s_i64:
2113 tgen_ext32s(s, args[0], args[1]);
2115 case INDEX_op_ext8u_i64:
2116 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2118 case INDEX_op_ext16u_i64:
2119 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2121 case INDEX_op_extu_i32_i64:
2122 case INDEX_op_ext32u_i64:
2123 tgen_ext32u(s, args[0], args[1]);
2126 case INDEX_op_add2_i64:
2127 if (const_args[4]) {
2128 if ((int64_t)args[4] >= 0) {
2129 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2131 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2134 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2136 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2138 case INDEX_op_sub2_i64:
2139 if (const_args[4]) {
2140 if ((int64_t)args[4] >= 0) {
2141 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2143 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2146 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2148 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2151 case INDEX_op_brcond_i64:
2152 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2153 args[1], const_args[1], arg_label(args[3]));
2155 case INDEX_op_setcond_i64:
2156 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2157 args[2], const_args[2]);
2159 case INDEX_op_movcond_i64:
2160 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2161 args[2], const_args[2], args[3]);
2165 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2168 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2169 case INDEX_op_mov_i64:
2170 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2171 case INDEX_op_movi_i64:
2172 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2178 static const TCGTargetOpDef s390_op_defs[] = {
2179 { INDEX_op_exit_tb, { } },
2180 { INDEX_op_goto_tb, { } },
2181 { INDEX_op_br, { } },
2183 { INDEX_op_ld8u_i32, { "r", "r" } },
2184 { INDEX_op_ld8s_i32, { "r", "r" } },
2185 { INDEX_op_ld16u_i32, { "r", "r" } },
2186 { INDEX_op_ld16s_i32, { "r", "r" } },
2187 { INDEX_op_ld_i32, { "r", "r" } },
2188 { INDEX_op_st8_i32, { "r", "r" } },
2189 { INDEX_op_st16_i32, { "r", "r" } },
2190 { INDEX_op_st_i32, { "r", "r" } },
2192 { INDEX_op_add_i32, { "r", "r", "ri" } },
2193 { INDEX_op_sub_i32, { "r", "0", "ri" } },
2194 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2196 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2197 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2199 { INDEX_op_and_i32, { "r", "0", "ri" } },
2200 { INDEX_op_or_i32, { "r", "0", "rO" } },
2201 { INDEX_op_xor_i32, { "r", "0", "rX" } },
2203 { INDEX_op_neg_i32, { "r", "r" } },
2205 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2206 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2207 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2209 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2210 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2212 { INDEX_op_ext8s_i32, { "r", "r" } },
2213 { INDEX_op_ext8u_i32, { "r", "r" } },
2214 { INDEX_op_ext16s_i32, { "r", "r" } },
2215 { INDEX_op_ext16u_i32, { "r", "r" } },
2217 { INDEX_op_bswap16_i32, { "r", "r" } },
2218 { INDEX_op_bswap32_i32, { "r", "r" } },
2220 { INDEX_op_add2_i32, { "r", "r", "0", "1", "rA", "r" } },
2221 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "rA", "r" } },
2223 { INDEX_op_brcond_i32, { "r", "rC" } },
2224 { INDEX_op_setcond_i32, { "r", "r", "rC" } },
2225 { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
2226 { INDEX_op_deposit_i32, { "r", "0", "r" } },
2228 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2229 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2230 { INDEX_op_qemu_st_i32, { "L", "L" } },
2231 { INDEX_op_qemu_st_i64, { "L", "L" } },
2233 { INDEX_op_ld8u_i64, { "r", "r" } },
2234 { INDEX_op_ld8s_i64, { "r", "r" } },
2235 { INDEX_op_ld16u_i64, { "r", "r" } },
2236 { INDEX_op_ld16s_i64, { "r", "r" } },
2237 { INDEX_op_ld32u_i64, { "r", "r" } },
2238 { INDEX_op_ld32s_i64, { "r", "r" } },
2239 { INDEX_op_ld_i64, { "r", "r" } },
2241 { INDEX_op_st8_i64, { "r", "r" } },
2242 { INDEX_op_st16_i64, { "r", "r" } },
2243 { INDEX_op_st32_i64, { "r", "r" } },
2244 { INDEX_op_st_i64, { "r", "r" } },
2246 { INDEX_op_add_i64, { "r", "r", "ri" } },
2247 { INDEX_op_sub_i64, { "r", "0", "ri" } },
2248 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2250 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2251 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2252 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
2254 { INDEX_op_and_i64, { "r", "0", "ri" } },
2255 { INDEX_op_or_i64, { "r", "0", "rO" } },
2256 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2258 { INDEX_op_neg_i64, { "r", "r" } },
2260 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2261 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2262 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2264 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2265 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2267 { INDEX_op_ext8s_i64, { "r", "r" } },
2268 { INDEX_op_ext8u_i64, { "r", "r" } },
2269 { INDEX_op_ext16s_i64, { "r", "r" } },
2270 { INDEX_op_ext16u_i64, { "r", "r" } },
2271 { INDEX_op_ext32s_i64, { "r", "r" } },
2272 { INDEX_op_ext32u_i64, { "r", "r" } },
2274 { INDEX_op_ext_i32_i64, { "r", "r" } },
2275 { INDEX_op_extu_i32_i64, { "r", "r" } },
2277 { INDEX_op_bswap16_i64, { "r", "r" } },
2278 { INDEX_op_bswap32_i64, { "r", "r" } },
2279 { INDEX_op_bswap64_i64, { "r", "r" } },
2281 { INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
2282 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
2284 { INDEX_op_brcond_i64, { "r", "rC" } },
2285 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2286 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
2287 { INDEX_op_deposit_i64, { "r", "0", "r" } },
2292 static void query_facilities(void)
2294 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2296 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2297 is present on all 64-bit systems, but let's check for it anyway. */
2298 if (hwcap & HWCAP_S390_STFLE) {
2299 register int r0 __asm__("0");
2300 register void *r1 __asm__("1");
2304 asm volatile(".word 0xb2b0,0x1000"
2305 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2309 static void tcg_target_init(TCGContext *s)
2313 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2314 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2316 tcg_regset_clear(tcg_target_call_clobber_regs);
2317 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2318 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2319 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2320 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2321 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2322 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2323 /* The r6 register is technically call-saved, but it's also a parameter
2324 register, so it can get killed by setup for the qemu_st helper. */
2325 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
2326 /* The return register can be considered call-clobbered. */
2327 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2329 tcg_regset_clear(s->reserved_regs);
2330 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2331 /* XXX many insns can't be used with R0, so we better avoid it for now */
2332 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2333 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2335 tcg_add_target_add_op_defs(s390_op_defs);
2338 #define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2339 + TCG_STATIC_CALL_ARGS_SIZE \
2340 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2342 static void tcg_target_qemu_prologue(TCGContext *s)
2344 /* stmg %r6,%r15,48(%r15) (save registers) */
2345 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2347 /* aghi %r15,-frame_size */
2348 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
2350 tcg_set_frame(s, TCG_REG_CALL_STACK,
2351 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2352 CPU_TEMP_BUF_NLONGS * sizeof(long));
2354 #ifndef CONFIG_SOFTMMU
2355 if (guest_base >= 0x80000) {
2356 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2357 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2361 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2362 /* br %r3 (go to TB) */
2363 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2365 tb_ret_addr = s->code_ptr;
2367 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2368 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2371 /* br %r14 (return) */
2372 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2377 uint8_t fde_def_cfa[4];
2378 uint8_t fde_reg_ofs[18];
2381 /* We're expecting a 2 byte uleb128 encoded value. */
2382 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2384 #define ELF_HOST_MACHINE EM_S390
2386 static const DebugFrame debug_frame = {
2387 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2390 .h.cie.code_align = 1,
2391 .h.cie.data_align = 8, /* sleb128 8 */
2392 .h.cie.return_column = TCG_REG_R14,
2394 /* Total FDE size does not include the "len" member. */
2395 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2398 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2399 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2403 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2404 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2405 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2406 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2407 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2408 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2409 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2410 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2411 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2415 void tcg_register_jit(void *buf, size_t buf_size)
2417 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));