]> Git Repo - qemu.git/blame - tcg/s390/tcg-target.inc.c
tcg: Rearrange ldst label tracking
[qemu.git] / tcg / s390 / tcg-target.inc.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <[email protected]>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <[email protected]>
6 * Copyright (c) 2010 Richard Henderson <[email protected]>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
c9baa30f
RH
32#include "elf.h"
33
48bb3750
RH
34/* ??? The translation blocks produced by TCG are generally small enough to
35 be entirely reachable with a 16-bit displacement. Leaving the option for
36 a 32-bit displacement here Just In Case. */
37#define USE_LONG_BRANCHES 0
38
a8f0269e
RH
39#define TCG_CT_CONST_S16 0x100
40#define TCG_CT_CONST_S32 0x200
e42349cb
RH
41#define TCG_CT_CONST_NN16 0x400
42#define TCG_CT_CONST_NN32 0x800
a8f0269e 43#define TCG_CT_CONST_U31 0x1000
ba18b07d 44#define TCG_CT_CONST_S33 0x2000
a8f0269e 45#define TCG_CT_CONST_ZERO 0x4000
48bb3750
RH
46
47/* Several places within the instruction set 0 means "no register"
48 rather than TCG_REG_R0. */
49#define TCG_REG_NONE 0
50
51/* A scratch register that may be be used throughout the backend. */
ce411066 52#define TCG_TMP0 TCG_REG_R1
48bb3750 53
4cbea598 54#ifndef CONFIG_SOFTMMU
48bb3750 55#define TCG_GUEST_BASE_REG TCG_REG_R13
48bb3750
RH
56#endif
57
48bb3750
RH
58/* All of the following instructions are prefixed with their instruction
59 format, and are defined as 8- or 16-bit quantities, even when the two
60 halves of the 16-bit quantity may appear 32 bits apart in the insn.
61 This makes it easy to copy the values from the tables in Appendix B. */
62typedef enum S390Opcode {
63 RIL_AFI = 0xc209,
64 RIL_AGFI = 0xc208,
3790b918 65 RIL_ALFI = 0xc20b,
48bb3750
RH
66 RIL_ALGFI = 0xc20a,
67 RIL_BRASL = 0xc005,
68 RIL_BRCL = 0xc004,
69 RIL_CFI = 0xc20d,
70 RIL_CGFI = 0xc20c,
71 RIL_CLFI = 0xc20f,
72 RIL_CLGFI = 0xc20e,
73 RIL_IIHF = 0xc008,
74 RIL_IILF = 0xc009,
75 RIL_LARL = 0xc000,
76 RIL_LGFI = 0xc001,
77 RIL_LGRL = 0xc408,
78 RIL_LLIHF = 0xc00e,
79 RIL_LLILF = 0xc00f,
80 RIL_LRL = 0xc40d,
81 RIL_MSFI = 0xc201,
82 RIL_MSGFI = 0xc200,
83 RIL_NIHF = 0xc00a,
84 RIL_NILF = 0xc00b,
85 RIL_OIHF = 0xc00c,
86 RIL_OILF = 0xc00d,
3790b918 87 RIL_SLFI = 0xc205,
0db921e6 88 RIL_SLGFI = 0xc204,
48bb3750
RH
89 RIL_XIHF = 0xc006,
90 RIL_XILF = 0xc007,
91
92 RI_AGHI = 0xa70b,
93 RI_AHI = 0xa70a,
94 RI_BRC = 0xa704,
95 RI_IIHH = 0xa500,
96 RI_IIHL = 0xa501,
97 RI_IILH = 0xa502,
98 RI_IILL = 0xa503,
99 RI_LGHI = 0xa709,
100 RI_LLIHH = 0xa50c,
101 RI_LLIHL = 0xa50d,
102 RI_LLILH = 0xa50e,
103 RI_LLILL = 0xa50f,
104 RI_MGHI = 0xa70d,
105 RI_MHI = 0xa70c,
106 RI_NIHH = 0xa504,
107 RI_NIHL = 0xa505,
108 RI_NILH = 0xa506,
109 RI_NILL = 0xa507,
110 RI_OIHH = 0xa508,
111 RI_OIHL = 0xa509,
112 RI_OILH = 0xa50a,
113 RI_OILL = 0xa50b,
114
115 RIE_CGIJ = 0xec7c,
116 RIE_CGRJ = 0xec64,
117 RIE_CIJ = 0xec7e,
118 RIE_CLGRJ = 0xec65,
119 RIE_CLIJ = 0xec7f,
120 RIE_CLGIJ = 0xec7d,
121 RIE_CLRJ = 0xec77,
122 RIE_CRJ = 0xec76,
7af525af 123 RIE_LOCGHI = 0xec46,
d5690ea4 124 RIE_RISBG = 0xec55,
48bb3750
RH
125
126 RRE_AGR = 0xb908,
3790b918
RH
127 RRE_ALGR = 0xb90a,
128 RRE_ALCR = 0xb998,
129 RRE_ALCGR = 0xb988,
48bb3750
RH
130 RRE_CGR = 0xb920,
131 RRE_CLGR = 0xb921,
132 RRE_DLGR = 0xb987,
133 RRE_DLR = 0xb997,
134 RRE_DSGFR = 0xb91d,
135 RRE_DSGR = 0xb90d,
ce411066 136 RRE_FLOGR = 0xb983,
48bb3750
RH
137 RRE_LGBR = 0xb906,
138 RRE_LCGR = 0xb903,
139 RRE_LGFR = 0xb914,
140 RRE_LGHR = 0xb907,
141 RRE_LGR = 0xb904,
142 RRE_LLGCR = 0xb984,
143 RRE_LLGFR = 0xb916,
144 RRE_LLGHR = 0xb985,
145 RRE_LRVR = 0xb91f,
146 RRE_LRVGR = 0xb90f,
147 RRE_LTGR = 0xb902,
36017dc6 148 RRE_MLGR = 0xb986,
48bb3750
RH
149 RRE_MSGR = 0xb90c,
150 RRE_MSR = 0xb252,
151 RRE_NGR = 0xb980,
152 RRE_OGR = 0xb981,
153 RRE_SGR = 0xb909,
3790b918
RH
154 RRE_SLGR = 0xb90b,
155 RRE_SLBR = 0xb999,
156 RRE_SLBGR = 0xb989,
48bb3750
RH
157 RRE_XGR = 0xb982,
158
96a9f093
RH
159 RRF_LOCR = 0xb9f2,
160 RRF_LOCGR = 0xb9e2,
c2097136
RH
161 RRF_NRK = 0xb9f4,
162 RRF_NGRK = 0xb9e4,
163 RRF_ORK = 0xb9f6,
164 RRF_OGRK = 0xb9e6,
165 RRF_SRK = 0xb9f9,
166 RRF_SGRK = 0xb9e9,
167 RRF_SLRK = 0xb9fb,
168 RRF_SLGRK = 0xb9eb,
169 RRF_XRK = 0xb9f7,
170 RRF_XGRK = 0xb9e7,
96a9f093 171
48bb3750 172 RR_AR = 0x1a,
3790b918 173 RR_ALR = 0x1e,
48bb3750
RH
174 RR_BASR = 0x0d,
175 RR_BCR = 0x07,
176 RR_CLR = 0x15,
177 RR_CR = 0x19,
178 RR_DR = 0x1d,
179 RR_LCR = 0x13,
180 RR_LR = 0x18,
181 RR_LTR = 0x12,
182 RR_NR = 0x14,
183 RR_OR = 0x16,
184 RR_SR = 0x1b,
3790b918 185 RR_SLR = 0x1f,
48bb3750
RH
186 RR_XR = 0x17,
187
188 RSY_RLL = 0xeb1d,
189 RSY_RLLG = 0xeb1c,
190 RSY_SLLG = 0xeb0d,
c2097136 191 RSY_SLLK = 0xebdf,
48bb3750 192 RSY_SRAG = 0xeb0a,
c2097136 193 RSY_SRAK = 0xebdc,
48bb3750 194 RSY_SRLG = 0xeb0c,
c2097136 195 RSY_SRLK = 0xebde,
48bb3750
RH
196
197 RS_SLL = 0x89,
198 RS_SRA = 0x8a,
199 RS_SRL = 0x88,
200
201 RXY_AG = 0xe308,
202 RXY_AY = 0xe35a,
203 RXY_CG = 0xe320,
204 RXY_CY = 0xe359,
0db921e6 205 RXY_LAY = 0xe371,
48bb3750
RH
206 RXY_LB = 0xe376,
207 RXY_LG = 0xe304,
208 RXY_LGB = 0xe377,
209 RXY_LGF = 0xe314,
210 RXY_LGH = 0xe315,
211 RXY_LHY = 0xe378,
212 RXY_LLGC = 0xe390,
213 RXY_LLGF = 0xe316,
214 RXY_LLGH = 0xe391,
215 RXY_LMG = 0xeb04,
216 RXY_LRV = 0xe31e,
217 RXY_LRVG = 0xe30f,
218 RXY_LRVH = 0xe31f,
219 RXY_LY = 0xe358,
220 RXY_STCY = 0xe372,
221 RXY_STG = 0xe324,
222 RXY_STHY = 0xe370,
223 RXY_STMG = 0xeb24,
224 RXY_STRV = 0xe33e,
225 RXY_STRVG = 0xe32f,
226 RXY_STRVH = 0xe33f,
227 RXY_STY = 0xe350,
228
229 RX_A = 0x5a,
230 RX_C = 0x59,
231 RX_L = 0x58,
0db921e6 232 RX_LA = 0x41,
48bb3750
RH
233 RX_LH = 0x48,
234 RX_ST = 0x50,
235 RX_STC = 0x42,
236 RX_STH = 0x40,
ed3d51ec
SF
237
238 NOP = 0x0707,
48bb3750
RH
239} S390Opcode;
240
8d8fdbae 241#ifdef CONFIG_DEBUG_TCG
48bb3750
RH
242static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
243 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
244 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
245};
246#endif
247
248/* Since R6 is a potential argument register, choose it last of the
249 call-saved registers. Likewise prefer the call-clobbered registers
250 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 251static const int tcg_target_reg_alloc_order[] = {
f24efee4 252 /* Call saved registers. */
48bb3750
RH
253 TCG_REG_R13,
254 TCG_REG_R12,
255 TCG_REG_R11,
256 TCG_REG_R10,
257 TCG_REG_R9,
258 TCG_REG_R8,
259 TCG_REG_R7,
260 TCG_REG_R6,
f24efee4 261 /* Call clobbered registers. */
48bb3750
RH
262 TCG_REG_R14,
263 TCG_REG_R0,
264 TCG_REG_R1,
f24efee4 265 /* Argument registers, in reverse order of allocation. */
48bb3750
RH
266 TCG_REG_R5,
267 TCG_REG_R4,
268 TCG_REG_R3,
269 TCG_REG_R2,
2827822e
AG
270};
271
272static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
273 TCG_REG_R2,
274 TCG_REG_R3,
275 TCG_REG_R4,
276 TCG_REG_R5,
277 TCG_REG_R6,
2827822e
AG
278};
279
280static const int tcg_target_call_oarg_regs[] = {
48bb3750 281 TCG_REG_R2,
48bb3750
RH
282};
283
284#define S390_CC_EQ 8
285#define S390_CC_LT 4
286#define S390_CC_GT 2
287#define S390_CC_OV 1
288#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
289#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
290#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
291#define S390_CC_NEVER 0
292#define S390_CC_ALWAYS 15
293
294/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 295static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
296 [TCG_COND_EQ] = S390_CC_EQ,
297 [TCG_COND_NE] = S390_CC_NE,
298 [TCG_COND_LT] = S390_CC_LT,
299 [TCG_COND_LE] = S390_CC_LE,
300 [TCG_COND_GT] = S390_CC_GT,
301 [TCG_COND_GE] = S390_CC_GE,
302 [TCG_COND_LTU] = S390_CC_LT,
303 [TCG_COND_LEU] = S390_CC_LE,
304 [TCG_COND_GTU] = S390_CC_GT,
305 [TCG_COND_GEU] = S390_CC_GE,
306};
307
308/* Condition codes that result from a LOAD AND TEST. Here, we have no
309 unsigned instruction variation, however since the test is vs zero we
310 can re-map the outcomes appropriately. */
0aed257f 311static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
312 [TCG_COND_EQ] = S390_CC_EQ,
313 [TCG_COND_NE] = S390_CC_NE,
314 [TCG_COND_LT] = S390_CC_LT,
315 [TCG_COND_LE] = S390_CC_LE,
316 [TCG_COND_GT] = S390_CC_GT,
317 [TCG_COND_GE] = S390_CC_GE,
318 [TCG_COND_LTU] = S390_CC_NEVER,
319 [TCG_COND_LEU] = S390_CC_EQ,
320 [TCG_COND_GTU] = S390_CC_NE,
321 [TCG_COND_GEU] = S390_CC_ALWAYS,
322};
323
324#ifdef CONFIG_SOFTMMU
f24efee4
RH
325static void * const qemu_ld_helpers[16] = {
326 [MO_UB] = helper_ret_ldub_mmu,
327 [MO_SB] = helper_ret_ldsb_mmu,
328 [MO_LEUW] = helper_le_lduw_mmu,
329 [MO_LESW] = helper_le_ldsw_mmu,
330 [MO_LEUL] = helper_le_ldul_mmu,
331 [MO_LESL] = helper_le_ldsl_mmu,
332 [MO_LEQ] = helper_le_ldq_mmu,
333 [MO_BEUW] = helper_be_lduw_mmu,
334 [MO_BESW] = helper_be_ldsw_mmu,
335 [MO_BEUL] = helper_be_ldul_mmu,
336 [MO_BESL] = helper_be_ldsl_mmu,
337 [MO_BEQ] = helper_be_ldq_mmu,
e141ab52
BS
338};
339
f24efee4
RH
340static void * const qemu_st_helpers[16] = {
341 [MO_UB] = helper_ret_stb_mmu,
342 [MO_LEUW] = helper_le_stw_mmu,
343 [MO_LEUL] = helper_le_stl_mmu,
344 [MO_LEQ] = helper_le_stq_mmu,
345 [MO_BEUW] = helper_be_stw_mmu,
346 [MO_BEUL] = helper_be_stl_mmu,
347 [MO_BEQ] = helper_be_stq_mmu,
e141ab52 348};
e141ab52 349#endif
48bb3750 350
8c081b18 351static tcg_insn_unit *tb_ret_addr;
b2c98d9d 352uint64_t s390_facilities;
2827822e 353
8c081b18 354static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 355 intptr_t value, intptr_t addend)
2827822e 356{
8c081b18 357 intptr_t pcrel2 = (tcg_insn_unit *)value - (code_ptr - 1);
eabb7b91 358 tcg_debug_assert(addend == -2);
48bb3750
RH
359
360 switch (type) {
361 case R_390_PC16DBL:
eabb7b91 362 tcg_debug_assert(pcrel2 == (int16_t)pcrel2);
8c081b18 363 tcg_patch16(code_ptr, pcrel2);
48bb3750
RH
364 break;
365 case R_390_PC32DBL:
eabb7b91 366 tcg_debug_assert(pcrel2 == (int32_t)pcrel2);
8c081b18 367 tcg_patch32(code_ptr, pcrel2);
48bb3750
RH
368 break;
369 default:
370 tcg_abort();
371 break;
372 }
2827822e
AG
373}
374
2827822e 375/* parse target specific constraints */
069ea736
RH
376static const char *target_parse_constraint(TCGArgConstraint *ct,
377 const char *ct_str, TCGType type)
2827822e 378{
069ea736 379 switch (*ct_str++) {
48bb3750
RH
380 case 'r': /* all registers */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_set32(ct->u.regs, 0, 0xffff);
383 break;
48bb3750
RH
384 case 'L': /* qemu_ld/st constraint */
385 ct->ct |= TCG_CT_REG;
386 tcg_regset_set32(ct->u.regs, 0, 0xffff);
387 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
388 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 389 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
390 break;
391 case 'a': /* force R2 for division */
392 ct->ct |= TCG_CT_REG;
393 tcg_regset_clear(ct->u.regs);
394 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
395 break;
396 case 'b': /* force R3 for division */
397 ct->ct |= TCG_CT_REG;
398 tcg_regset_clear(ct->u.regs);
399 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
400 break;
ad19b358 401 case 'A':
ba18b07d 402 ct->ct |= TCG_CT_CONST_S33;
ad19b358 403 break;
a8f0269e
RH
404 case 'I':
405 ct->ct |= TCG_CT_CONST_S16;
406 break;
407 case 'J':
408 ct->ct |= TCG_CT_CONST_S32;
48bb3750 409 break;
e42349cb
RH
410 case 'N':
411 ct->ct |= TCG_CT_CONST_NN16;
48bb3750 412 break;
e42349cb
RH
413 case 'M':
414 ct->ct |= TCG_CT_CONST_NN32;
48bb3750
RH
415 break;
416 case 'C':
07952d95
RH
417 /* ??? We have no insight here into whether the comparison is
418 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
419 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
420 a 32-bit unsigned immediate. If we were to use the (semi)
421 obvious "val == (int32_t)val" we would be enabling unsigned
422 comparisons vs very large numbers. The only solution is to
423 take the intersection of the ranges. */
424 /* ??? Another possible solution is to simply lie and allow all
425 constants here and force the out-of-range values into a temp
426 register in tgen_cmp when we have knowledge of the actual
427 comparison code in use. */
428 ct->ct |= TCG_CT_CONST_U31;
48bb3750 429 break;
752b1be9
RH
430 case 'Z':
431 ct->ct |= TCG_CT_CONST_ZERO;
432 break;
48bb3750 433 default:
069ea736 434 return NULL;
48bb3750 435 }
069ea736 436 return ct_str;
2827822e
AG
437}
438
439/* Test if a constant matches the constraint. */
f6c6afc1 440static int tcg_target_const_match(tcg_target_long val, TCGType type,
48bb3750 441 const TCGArgConstraint *arg_ct)
2827822e 442{
48bb3750
RH
443 int ct = arg_ct->ct;
444
445 if (ct & TCG_CT_CONST) {
446 return 1;
447 }
448
671c835b 449 if (type == TCG_TYPE_I32) {
48bb3750
RH
450 val = (int32_t)val;
451 }
452
453 /* The following are mutually exclusive. */
a8f0269e
RH
454 if (ct & TCG_CT_CONST_S16) {
455 return val == (int16_t)val;
456 } else if (ct & TCG_CT_CONST_S32) {
457 return val == (int32_t)val;
ba18b07d
RH
458 } else if (ct & TCG_CT_CONST_S33) {
459 return val >= -0xffffffffll && val <= 0xffffffffll;
e42349cb
RH
460 } else if (ct & TCG_CT_CONST_NN16) {
461 return !(val < 0 && val == (int16_t)val);
462 } else if (ct & TCG_CT_CONST_NN32) {
463 return !(val < 0 && val == (int32_t)val);
07952d95
RH
464 } else if (ct & TCG_CT_CONST_U31) {
465 return val >= 0 && val <= 0x7fffffff;
752b1be9
RH
466 } else if (ct & TCG_CT_CONST_ZERO) {
467 return val == 0;
48bb3750
RH
468 }
469
2827822e
AG
470 return 0;
471}
472
48bb3750
RH
473/* Emit instructions according to the given instruction format. */
474
475static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
476{
477 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
478}
479
480static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
481 TCGReg r1, TCGReg r2)
482{
483 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
484}
485
96a9f093
RH
486static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
487 TCGReg r1, TCGReg r2, int m3)
488{
489 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
490}
491
48bb3750
RH
492static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
493{
494 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
495}
496
7af525af
RH
497static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
498 int i2, int m3)
499{
500 tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
501 tcg_out32(s, (i2 << 16) | (op & 0xff));
502}
503
48bb3750
RH
504static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
505{
506 tcg_out16(s, op | (r1 << 4));
507 tcg_out32(s, i2);
508}
509
510static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
511 TCGReg b2, TCGReg r3, int disp)
512{
513 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
514 | (disp & 0xfff));
515}
516
517static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
518 TCGReg b2, TCGReg r3, int disp)
519{
520 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
521 tcg_out32(s, (op & 0xff) | (b2 << 28)
522 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
523}
524
525#define tcg_out_insn_RX tcg_out_insn_RS
526#define tcg_out_insn_RXY tcg_out_insn_RSY
527
528/* Emit an opcode with "type-checking" of the format. */
529#define tcg_out_insn(S, FMT, OP, ...) \
530 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
531
532
533/* emit 64-bit shifts */
534static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
535 TCGReg src, TCGReg sh_reg, int sh_imm)
536{
537 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
538}
539
540/* emit 32-bit shifts */
541static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
542 TCGReg sh_reg, int sh_imm)
543{
544 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
545}
546
547static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
548{
549 if (src != dst) {
550 if (type == TCG_TYPE_I32) {
551 tcg_out_insn(s, RR, LR, dst, src);
552 } else {
553 tcg_out_insn(s, RRE, LGR, dst, src);
554 }
555 }
556}
557
2827822e 558/* load a register with an immediate value */
48bb3750
RH
559static void tcg_out_movi(TCGContext *s, TCGType type,
560 TCGReg ret, tcg_target_long sval)
2827822e 561{
48bb3750
RH
562 static const S390Opcode lli_insns[4] = {
563 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
564 };
565
566 tcg_target_ulong uval = sval;
567 int i;
568
569 if (type == TCG_TYPE_I32) {
570 uval = (uint32_t)sval;
571 sval = (int32_t)sval;
572 }
573
574 /* Try all 32-bit insns that can load it in one go. */
575 if (sval >= -0x8000 && sval < 0x8000) {
576 tcg_out_insn(s, RI, LGHI, ret, sval);
577 return;
578 }
579
580 for (i = 0; i < 4; i++) {
581 tcg_target_long mask = 0xffffull << i*16;
582 if ((uval & mask) == uval) {
583 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
584 return;
585 }
586 }
587
588 /* Try all 48-bit insns that can load it in one go. */
b2c98d9d 589 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
590 if (sval == (int32_t)sval) {
591 tcg_out_insn(s, RIL, LGFI, ret, sval);
592 return;
593 }
594 if (uval <= 0xffffffff) {
595 tcg_out_insn(s, RIL, LLILF, ret, uval);
596 return;
597 }
598 if ((uval & 0xffffffff) == 0) {
599 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
600 return;
601 }
602 }
603
604 /* Try for PC-relative address load. */
605 if ((sval & 1) == 0) {
8c081b18 606 ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
48bb3750
RH
607 if (off == (int32_t)off) {
608 tcg_out_insn(s, RIL, LARL, ret, off);
609 return;
610 }
611 }
612
613 /* If extended immediates are not present, then we may have to issue
614 several instructions to load the low 32 bits. */
b2c98d9d 615 if (!(s390_facilities & FACILITY_EXT_IMM)) {
48bb3750
RH
616 /* A 32-bit unsigned value can be loaded in 2 insns. And given
617 that the lli_insns loop above did not succeed, we know that
618 both insns are required. */
619 if (uval <= 0xffffffff) {
620 tcg_out_insn(s, RI, LLILL, ret, uval);
621 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
622 return;
623 }
624
625 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
626 We first want to make sure that all the high bits get set. With
627 luck the low 16-bits can be considered negative to perform that for
628 free, otherwise we load an explicit -1. */
629 if (sval >> 31 >> 1 == -1) {
630 if (uval & 0x8000) {
631 tcg_out_insn(s, RI, LGHI, ret, uval);
632 } else {
633 tcg_out_insn(s, RI, LGHI, ret, -1);
634 tcg_out_insn(s, RI, IILL, ret, uval);
635 }
636 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
637 return;
638 }
639 }
640
641 /* If we get here, both the high and low parts have non-zero bits. */
642
643 /* Recurse to load the lower 32-bits. */
a22971f9 644 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
645
646 /* Insert data into the high 32-bits. */
647 uval = uval >> 31 >> 1;
b2c98d9d 648 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
649 if (uval < 0x10000) {
650 tcg_out_insn(s, RI, IIHL, ret, uval);
651 } else if ((uval & 0xffff) == 0) {
652 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
653 } else {
654 tcg_out_insn(s, RIL, IIHF, ret, uval);
655 }
656 } else {
657 if (uval & 0xffff) {
658 tcg_out_insn(s, RI, IIHL, ret, uval);
659 }
660 if (uval & 0xffff0000) {
661 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
662 }
663 }
664}
665
666
667/* Emit a load/store type instruction. Inputs are:
668 DATA: The register to be loaded or stored.
669 BASE+OFS: The effective address.
670 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
671 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
672
673static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
674 TCGReg data, TCGReg base, TCGReg index,
675 tcg_target_long ofs)
676{
677 if (ofs < -0x80000 || ofs >= 0x80000) {
78c9f7c5
RH
678 /* Combine the low 20 bits of the offset with the actual load insn;
679 the high 44 bits must come from an immediate load. */
680 tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
681 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
682 ofs = low;
48bb3750
RH
683
684 /* If we were already given an index register, add it in. */
685 if (index != TCG_REG_NONE) {
686 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
687 }
688 index = TCG_TMP0;
689 }
690
691 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
692 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
693 } else {
694 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
695 }
2827822e
AG
696}
697
48bb3750 698
2827822e 699/* load data without address translation or endianness conversion */
48bb3750 700static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 701 TCGReg base, intptr_t ofs)
2827822e 702{
48bb3750
RH
703 if (type == TCG_TYPE_I32) {
704 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
705 } else {
706 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
707 }
2827822e
AG
708}
709
48bb3750 710static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
a05b5b9b 711 TCGReg base, intptr_t ofs)
2827822e 712{
48bb3750
RH
713 if (type == TCG_TYPE_I32) {
714 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
715 } else {
716 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
717 }
718}
719
59d7c14e
RH
720static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
721 TCGReg base, intptr_t ofs)
722{
723 return false;
724}
725
48bb3750
RH
726/* load data from an absolute host address */
727static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
728{
8c081b18 729 intptr_t addr = (intptr_t)abs;
48bb3750 730
b2c98d9d 731 if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
8c081b18 732 ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
48bb3750
RH
733 if (disp == (int32_t)disp) {
734 if (type == TCG_TYPE_I32) {
735 tcg_out_insn(s, RIL, LRL, dest, disp);
736 } else {
737 tcg_out_insn(s, RIL, LGRL, dest, disp);
738 }
739 return;
740 }
741 }
742
743 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
744 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
745}
746
f0bffc27
RH
747static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
748 int msb, int lsb, int ofs, int z)
749{
750 /* Format RIE-f */
751 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
752 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
753 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
754}
755
48bb3750
RH
756static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
757{
b2c98d9d 758 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
759 tcg_out_insn(s, RRE, LGBR, dest, src);
760 return;
761 }
762
763 if (type == TCG_TYPE_I32) {
764 if (dest == src) {
765 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
766 } else {
767 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
768 }
769 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
770 } else {
771 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
772 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
773 }
774}
775
776static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
777{
b2c98d9d 778 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
779 tcg_out_insn(s, RRE, LLGCR, dest, src);
780 return;
781 }
782
783 if (dest == src) {
784 tcg_out_movi(s, type, TCG_TMP0, 0xff);
785 src = TCG_TMP0;
786 } else {
787 tcg_out_movi(s, type, dest, 0xff);
788 }
789 if (type == TCG_TYPE_I32) {
790 tcg_out_insn(s, RR, NR, dest, src);
791 } else {
792 tcg_out_insn(s, RRE, NGR, dest, src);
793 }
794}
795
796static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
797{
b2c98d9d 798 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
799 tcg_out_insn(s, RRE, LGHR, dest, src);
800 return;
801 }
802
803 if (type == TCG_TYPE_I32) {
804 if (dest == src) {
805 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
806 } else {
807 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
808 }
809 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
810 } else {
811 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
812 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
813 }
814}
815
816static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
817{
b2c98d9d 818 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
819 tcg_out_insn(s, RRE, LLGHR, dest, src);
820 return;
821 }
822
823 if (dest == src) {
824 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
825 src = TCG_TMP0;
826 } else {
827 tcg_out_movi(s, type, dest, 0xffff);
828 }
829 if (type == TCG_TYPE_I32) {
830 tcg_out_insn(s, RR, NR, dest, src);
831 } else {
832 tcg_out_insn(s, RRE, NGR, dest, src);
833 }
834}
835
836static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
837{
838 tcg_out_insn(s, RRE, LGFR, dest, src);
839}
840
841static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
842{
843 tcg_out_insn(s, RRE, LLGFR, dest, src);
844}
845
f0bffc27
RH
846/* Accept bit patterns like these:
847 0....01....1
848 1....10....0
849 1..10..01..1
850 0..01..10..0
851 Copied from gcc sources. */
852static inline bool risbg_mask(uint64_t c)
853{
854 uint64_t lsb;
855 /* We don't change the number of transitions by inverting,
856 so make sure we start with the LSB zero. */
857 if (c & 1) {
858 c = ~c;
859 }
860 /* Reject all zeros or all ones. */
861 if (c == 0) {
862 return false;
863 }
864 /* Find the first transition. */
865 lsb = c & -c;
866 /* Invert to look for a second transition. */
867 c = ~c;
868 /* Erase the first transition. */
869 c &= -lsb;
870 /* Find the second transition, if any. */
871 lsb = c & -c;
872 /* Match if all the bits are 1's, or if c is zero. */
873 return c == -lsb;
874}
875
547ec121
RH
876static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
877{
878 int msb, lsb;
879 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
880 /* Achieve wraparound by swapping msb and lsb. */
881 msb = 64 - ctz64(~val);
882 lsb = clz64(~val) - 1;
883 } else {
884 msb = clz64(val);
885 lsb = 63 - ctz64(val);
886 }
887 tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
888}
889
07ff7983 890static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
891{
892 static const S390Opcode ni_insns[4] = {
893 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
894 };
895 static const S390Opcode nif_insns[2] = {
896 RIL_NILF, RIL_NIHF
897 };
07ff7983 898 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
899 int i;
900
48bb3750 901 /* Look for the zero-extensions. */
07ff7983 902 if ((val & valid) == 0xffffffff) {
48bb3750
RH
903 tgen_ext32u(s, dest, dest);
904 return;
905 }
b2c98d9d 906 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983 907 if ((val & valid) == 0xff) {
48bb3750
RH
908 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
909 return;
910 }
07ff7983 911 if ((val & valid) == 0xffff) {
48bb3750
RH
912 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
913 return;
914 }
07ff7983 915 }
48bb3750 916
07ff7983
RH
917 /* Try all 32-bit insns that can perform it in one go. */
918 for (i = 0; i < 4; i++) {
919 tcg_target_ulong mask = ~(0xffffull << i*16);
920 if (((val | ~valid) & mask) == mask) {
921 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
922 return;
48bb3750 923 }
07ff7983 924 }
48bb3750 925
07ff7983 926 /* Try all 48-bit insns that can perform it in one go. */
b2c98d9d 927 if (s390_facilities & FACILITY_EXT_IMM) {
07ff7983
RH
928 for (i = 0; i < 2; i++) {
929 tcg_target_ulong mask = ~(0xffffffffull << i*32);
930 if (((val | ~valid) & mask) == mask) {
931 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
932 return;
48bb3750
RH
933 }
934 }
07ff7983 935 }
b2c98d9d 936 if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
547ec121 937 tgen_andi_risbg(s, dest, dest, val);
f0bffc27
RH
938 return;
939 }
48bb3750 940
07ff7983
RH
941 /* Fall back to loading the constant. */
942 tcg_out_movi(s, type, TCG_TMP0, val);
943 if (type == TCG_TYPE_I32) {
944 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 945 } else {
07ff7983 946 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
947 }
948}
949
950static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
951{
952 static const S390Opcode oi_insns[4] = {
953 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
954 };
955 static const S390Opcode nif_insns[2] = {
956 RIL_OILF, RIL_OIHF
957 };
958
959 int i;
960
961 /* Look for no-op. */
962 if (val == 0) {
963 return;
964 }
965
b2c98d9d 966 if (s390_facilities & FACILITY_EXT_IMM) {
48bb3750
RH
967 /* Try all 32-bit insns that can perform it in one go. */
968 for (i = 0; i < 4; i++) {
969 tcg_target_ulong mask = (0xffffull << i*16);
970 if ((val & mask) != 0 && (val & ~mask) == 0) {
971 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
972 return;
973 }
974 }
975
976 /* Try all 48-bit insns that can perform it in one go. */
977 for (i = 0; i < 2; i++) {
978 tcg_target_ulong mask = (0xffffffffull << i*32);
979 if ((val & mask) != 0 && (val & ~mask) == 0) {
980 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
981 return;
982 }
983 }
984
985 /* Perform the OR via sequential modifications to the high and
986 low parts. Do this via recursion to handle 16-bit vs 32-bit
987 masks in each half. */
988 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
989 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
990 } else {
991 /* With no extended-immediate facility, we don't need to be so
992 clever. Just iterate over the insns and mask in the constant. */
993 for (i = 0; i < 4; i++) {
994 tcg_target_ulong mask = (0xffffull << i*16);
995 if ((val & mask) != 0) {
996 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
997 }
998 }
999 }
1000}
1001
1002static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1003{
1004 /* Perform the xor by parts. */
1005 if (val & 0xffffffff) {
1006 tcg_out_insn(s, RIL, XILF, dest, val);
1007 }
1008 if (val > 0xffffffff) {
1009 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1010 }
1011}
1012
1013static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
65839b56 1014 TCGArg c2, bool c2const, bool need_carry)
48bb3750 1015{
bcc66562 1016 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1017 if (c2const) {
1018 if (c2 == 0) {
65839b56
RH
1019 if (!(is_unsigned && need_carry)) {
1020 if (type == TCG_TYPE_I32) {
1021 tcg_out_insn(s, RR, LTR, r1, r1);
1022 } else {
1023 tcg_out_insn(s, RRE, LTGR, r1, r1);
1024 }
1025 return tcg_cond_to_ltr_cond[c];
1026 }
1027 /* If we only got here because of load-and-test,
1028 and we couldn't use that, then we need to load
1029 the constant into a register. */
a32b6ae8 1030 if (!(s390_facilities & FACILITY_EXT_IMM)) {
65839b56
RH
1031 c2 = TCG_TMP0;
1032 tcg_out_movi(s, type, c2, 0);
1033 goto do_reg;
1034 }
1035 }
1036 if (is_unsigned) {
48bb3750 1037 if (type == TCG_TYPE_I32) {
65839b56 1038 tcg_out_insn(s, RIL, CLFI, r1, c2);
48bb3750 1039 } else {
65839b56 1040 tcg_out_insn(s, RIL, CLGFI, r1, c2);
48bb3750 1041 }
48bb3750 1042 } else {
65839b56
RH
1043 if (type == TCG_TYPE_I32) {
1044 tcg_out_insn(s, RIL, CFI, r1, c2);
48bb3750 1045 } else {
65839b56 1046 tcg_out_insn(s, RIL, CGFI, r1, c2);
48bb3750
RH
1047 }
1048 }
1049 } else {
65839b56 1050 do_reg:
48bb3750
RH
1051 if (is_unsigned) {
1052 if (type == TCG_TYPE_I32) {
1053 tcg_out_insn(s, RR, CLR, r1, c2);
1054 } else {
1055 tcg_out_insn(s, RRE, CLGR, r1, c2);
1056 }
1057 } else {
1058 if (type == TCG_TYPE_I32) {
1059 tcg_out_insn(s, RR, CR, r1, c2);
1060 } else {
1061 tcg_out_insn(s, RRE, CGR, r1, c2);
1062 }
1063 }
1064 }
1065 return tcg_cond_to_s390_cond[c];
1066}
1067
7b7066b1 1068static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
96a9f093 1069 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1070{
7b7066b1 1071 int cc;
7af525af 1072 bool have_loc;
7b7066b1 1073
7af525af
RH
1074 /* With LOC2, we can always emit the minimum 3 insns. */
1075 if (s390_facilities & FACILITY_LOAD_ON_COND2) {
1076 /* Emit: d = 0, d = (cc ? 1 : d). */
1077 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
1078 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1079 tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
1080 return;
1081 }
1082
1083 have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0;
1084
4609190b
RH
1085 /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
1086 restart:
7b7066b1 1087 switch (cond) {
4609190b
RH
1088 case TCG_COND_NE:
1089 /* X != 0 is X > 0. */
1090 if (c2const && c2 == 0) {
1091 cond = TCG_COND_GTU;
1092 } else {
1093 break;
1094 }
1095 /* fallthru */
1096
7b7066b1
RH
1097 case TCG_COND_GTU:
1098 case TCG_COND_GT:
7b7066b1
RH
1099 /* The result of a compare has CC=2 for GT and CC=3 unused.
1100 ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
65839b56 1101 tgen_cmp(s, type, cond, c1, c2, c2const, true);
7b7066b1
RH
1102 tcg_out_movi(s, type, dest, 0);
1103 tcg_out_insn(s, RRE, ALCGR, dest, dest);
1104 return;
1105
4609190b
RH
1106 case TCG_COND_EQ:
1107 /* X == 0 is X <= 0. */
1108 if (c2const && c2 == 0) {
1109 cond = TCG_COND_LEU;
7b7066b1 1110 } else {
4609190b 1111 break;
7b7066b1 1112 }
4609190b 1113 /* fallthru */
7b7066b1
RH
1114
1115 case TCG_COND_LEU:
4609190b
RH
1116 case TCG_COND_LE:
1117 /* As above, but we're looking for borrow, or !carry.
1118 The second insn computes d - d - borrow, or -1 for true
1119 and 0 for false. So we must mask to 1 bit afterward. */
1120 tgen_cmp(s, type, cond, c1, c2, c2const, true);
1121 tcg_out_insn(s, RRE, SLBGR, dest, dest);
1122 tgen_andi(s, type, dest, 1);
1123 return;
1124
1125 case TCG_COND_GEU:
7b7066b1
RH
1126 case TCG_COND_LTU:
1127 case TCG_COND_LT:
4609190b
RH
1128 case TCG_COND_GE:
1129 /* Swap operands so that we can use LEU/GTU/GT/LE. */
7b7066b1 1130 if (c2const) {
7af525af 1131 if (have_loc) {
4609190b 1132 break;
7af525af 1133 }
7b7066b1
RH
1134 tcg_out_movi(s, type, TCG_TMP0, c2);
1135 c2 = c1;
1136 c2const = 0;
1137 c1 = TCG_TMP0;
1138 } else {
1139 TCGReg t = c1;
1140 c1 = c2;
1141 c2 = t;
1142 }
7b7066b1 1143 cond = tcg_swap_cond(cond);
4609190b 1144 goto restart;
48bb3750 1145
7b7066b1 1146 default:
4609190b 1147 g_assert_not_reached();
7b7066b1
RH
1148 }
1149
65839b56 1150 cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
4609190b
RH
1151 if (have_loc) {
1152 /* Emit: d = 0, t = 1, d = (cc ? t : d). */
1153 tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
1154 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
1155 tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
1156 } else {
1157 /* Emit: d = 1; if (cc) goto over; d = 0; over: */
1158 tcg_out_movi(s, type, dest, 1);
1159 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1160 tcg_out_movi(s, type, dest, 0);
1161 }
48bb3750
RH
1162}
1163
96a9f093 1164static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
7af525af
RH
1165 TCGReg c1, TCGArg c2, int c2const,
1166 TCGArg v3, int v3const)
96a9f093
RH
1167{
1168 int cc;
b2c98d9d 1169 if (s390_facilities & FACILITY_LOAD_ON_COND) {
65839b56 1170 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
7af525af
RH
1171 if (v3const) {
1172 tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
1173 } else {
1174 tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
1175 }
96a9f093
RH
1176 } else {
1177 c = tcg_invert_cond(c);
65839b56 1178 cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
96a9f093
RH
1179
1180 /* Emit: if (cc) goto over; dest = r3; over: */
1181 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
7af525af 1182 tcg_out_insn(s, RRE, LGR, dest, v3);
96a9f093
RH
1183 }
1184}
1185
ce411066
RH
1186static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
1187 TCGArg a2, int a2const)
1188{
1189 /* Since this sets both R and R+1, we have no choice but to store the
1190 result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
1191 QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
1192 tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
1193
1194 if (a2const && a2 == 64) {
1195 tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
1196 } else {
1197 if (a2const) {
1198 tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
1199 } else {
1200 tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
1201 }
1202 if (s390_facilities & FACILITY_LOAD_ON_COND) {
1203 /* Emit: if (one bit found) dest = r0. */
1204 tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
1205 } else {
1206 /* Emit: if (no one bit found) goto over; dest = r0; over: */
1207 tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
1208 tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
1209 }
1210 }
1211}
1212
d5690ea4 1213static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
752b1be9 1214 int ofs, int len, int z)
d5690ea4
RH
1215{
1216 int lsb = (63 - ofs);
1217 int msb = lsb - (len - 1);
752b1be9 1218 tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
d5690ea4
RH
1219}
1220
b0bf5fe8
RH
1221static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
1222 int ofs, int len)
1223{
1224 tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
1225}
1226
8c081b18 1227static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
48bb3750 1228{
8c081b18
RH
1229 ptrdiff_t off = dest - s->code_ptr;
1230 if (off == (int16_t)off) {
48bb3750
RH
1231 tcg_out_insn(s, RI, BRC, cc, off);
1232 } else if (off == (int32_t)off) {
1233 tcg_out_insn(s, RIL, BRCL, cc, off);
1234 } else {
8c081b18 1235 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1236 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1237 }
1238}
1239
bec16311 1240static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
48bb3750 1241{
48bb3750 1242 if (l->has_value) {
8c081b18 1243 tgen_gotoi(s, cc, l->u.value_ptr);
48bb3750
RH
1244 } else if (USE_LONG_BRANCHES) {
1245 tcg_out16(s, RIL_BRCL | (cc << 4));
bec16311 1246 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, -2);
8c081b18 1247 s->code_ptr += 2;
48bb3750
RH
1248 } else {
1249 tcg_out16(s, RI_BRC | (cc << 4));
bec16311 1250 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, -2);
8c081b18 1251 s->code_ptr += 1;
48bb3750
RH
1252 }
1253}
1254
1255static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1256 TCGReg r1, TCGReg r2, TCGLabel *l)
48bb3750 1257{
8c081b18 1258 intptr_t off;
48bb3750
RH
1259
1260 if (l->has_value) {
8c081b18 1261 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1262 } else {
1263 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1264 off = s->code_ptr[1];
bec16311 1265 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1266 }
1267
1268 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1269 tcg_out16(s, off);
1270 tcg_out16(s, cc << 12 | (opc & 0xff));
1271}
1272
1273static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
bec16311 1274 TCGReg r1, int i2, TCGLabel *l)
48bb3750 1275{
48bb3750
RH
1276 tcg_target_long off;
1277
1278 if (l->has_value) {
8c081b18 1279 off = l->u.value_ptr - s->code_ptr;
48bb3750
RH
1280 } else {
1281 /* We need to keep the offset unchanged for retranslation. */
8c081b18 1282 off = s->code_ptr[1];
bec16311 1283 tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, -2);
48bb3750
RH
1284 }
1285
1286 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1287 tcg_out16(s, off);
1288 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1289}
1290
1291static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
bec16311 1292 TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
48bb3750
RH
1293{
1294 int cc;
1295
b2c98d9d 1296 if (s390_facilities & FACILITY_GEN_INST_EXT) {
b879f308 1297 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1298 bool in_range;
1299 S390Opcode opc;
1300
1301 cc = tcg_cond_to_s390_cond[c];
1302
1303 if (!c2const) {
1304 opc = (type == TCG_TYPE_I32
1305 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1306 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
bec16311 1307 tgen_compare_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1308 return;
1309 }
1310
1311 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1312 If the immediate we've been given does not fit that range, we'll
1313 fall back to separate compare and branch instructions using the
1314 larger comparison range afforded by COMPARE IMMEDIATE. */
1315 if (type == TCG_TYPE_I32) {
1316 if (is_unsigned) {
1317 opc = RIE_CLIJ;
1318 in_range = (uint32_t)c2 == (uint8_t)c2;
1319 } else {
1320 opc = RIE_CIJ;
1321 in_range = (int32_t)c2 == (int8_t)c2;
1322 }
1323 } else {
1324 if (is_unsigned) {
1325 opc = RIE_CLGIJ;
1326 in_range = (uint64_t)c2 == (uint8_t)c2;
1327 } else {
1328 opc = RIE_CGIJ;
1329 in_range = (int64_t)c2 == (int8_t)c2;
1330 }
1331 }
1332 if (in_range) {
bec16311 1333 tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
48bb3750
RH
1334 return;
1335 }
1336 }
1337
65839b56 1338 cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
bec16311 1339 tgen_branch(s, cc, l);
48bb3750
RH
1340}
1341
a8111212 1342static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
48bb3750 1343{
8c081b18 1344 ptrdiff_t off = dest - s->code_ptr;
48bb3750
RH
1345 if (off == (int32_t)off) {
1346 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1347 } else {
8c081b18 1348 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
48bb3750
RH
1349 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1350 }
1351}
1352
a5a04f28 1353static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1354 TCGReg base, TCGReg index, int disp)
1355{
3c8691f5 1356 switch (opc & (MO_SSIZE | MO_BSWAP)) {
a5a04f28 1357 case MO_UB:
48bb3750
RH
1358 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1359 break;
a5a04f28 1360 case MO_SB:
48bb3750
RH
1361 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1362 break;
b8dd88b8
RH
1363
1364 case MO_UW | MO_BSWAP:
1365 /* swapped unsigned halfword load with upper bits zeroed */
1366 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1367 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1368 break;
a5a04f28 1369 case MO_UW:
b8dd88b8
RH
1370 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1371 break;
1372
1373 case MO_SW | MO_BSWAP:
1374 /* swapped sign-extended halfword load */
1375 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1376 tgen_ext16s(s, TCG_TYPE_I64, data, data);
48bb3750 1377 break;
a5a04f28 1378 case MO_SW:
b8dd88b8
RH
1379 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1380 break;
1381
1382 case MO_UL | MO_BSWAP:
1383 /* swapped unsigned int load with upper bits zeroed */
1384 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1385 tgen_ext32u(s, data, data);
48bb3750 1386 break;
a5a04f28 1387 case MO_UL:
b8dd88b8
RH
1388 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1389 break;
1390
1391 case MO_SL | MO_BSWAP:
1392 /* swapped sign-extended int load */
1393 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1394 tgen_ext32s(s, data, data);
48bb3750 1395 break;
a5a04f28 1396 case MO_SL:
b8dd88b8
RH
1397 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1398 break;
1399
1400 case MO_Q | MO_BSWAP:
1401 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
48bb3750 1402 break;
a5a04f28 1403 case MO_Q:
b8dd88b8 1404 tcg_out_insn(s, RXY, LG, data, base, index, disp);
48bb3750 1405 break;
b8dd88b8 1406
48bb3750
RH
1407 default:
1408 tcg_abort();
1409 }
1410}
1411
a5a04f28 1412static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc, TCGReg data,
48bb3750
RH
1413 TCGReg base, TCGReg index, int disp)
1414{
3c8691f5 1415 switch (opc & (MO_SIZE | MO_BSWAP)) {
a5a04f28 1416 case MO_UB:
48bb3750
RH
1417 if (disp >= 0 && disp < 0x1000) {
1418 tcg_out_insn(s, RX, STC, data, base, index, disp);
1419 } else {
1420 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1421 }
1422 break;
b8dd88b8
RH
1423
1424 case MO_UW | MO_BSWAP:
1425 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1426 break;
a5a04f28 1427 case MO_UW:
b8dd88b8 1428 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1429 tcg_out_insn(s, RX, STH, data, base, index, disp);
1430 } else {
1431 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1432 }
1433 break;
b8dd88b8
RH
1434
1435 case MO_UL | MO_BSWAP:
1436 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1437 break;
a5a04f28 1438 case MO_UL:
b8dd88b8 1439 if (disp >= 0 && disp < 0x1000) {
48bb3750
RH
1440 tcg_out_insn(s, RX, ST, data, base, index, disp);
1441 } else {
1442 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1443 }
1444 break;
b8dd88b8
RH
1445
1446 case MO_Q | MO_BSWAP:
1447 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1448 break;
a5a04f28 1449 case MO_Q:
b8dd88b8 1450 tcg_out_insn(s, RXY, STG, data, base, index, disp);
48bb3750 1451 break;
b8dd88b8 1452
48bb3750
RH
1453 default:
1454 tcg_abort();
1455 }
1456}
1457
1458#if defined(CONFIG_SOFTMMU)
659ef5cb
RH
1459#include "tcg-ldst.inc.c"
1460
fb596415
RH
1461/* We're expecting to use a 20-bit signed offset on the tlb memory ops.
1462 Using the offset of the second entry in the last tlb table ensures
1463 that we can index all of the elements of the first entry. */
1464QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
1465 > 0x7ffff);
1466
1467/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
1468 addend into R2. Returns a register with the santitized guest address. */
1469static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
1470 int mem_index, bool is_ld)
48bb3750 1471{
85aa8081
RH
1472 unsigned s_bits = opc & MO_SIZE;
1473 unsigned a_bits = get_alignment_bits(opc);
1474 unsigned s_mask = (1 << s_bits) - 1;
1475 unsigned a_mask = (1 << a_bits) - 1;
a5e39810
RH
1476 int ofs, a_off;
1477 uint64_t tlb_mask;
1478
1479 /* For aligned accesses, we check the first byte and include the alignment
1480 bits within the address. For unaligned access, we check that we don't
1481 cross pages using the address of the last byte of the access. */
85aa8081
RH
1482 a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
1483 tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
fb596415 1484
b2c98d9d 1485 if (s390_facilities & FACILITY_GEN_INST_EXT) {
547ec121
RH
1486 tcg_out_risbg(s, TCG_REG_R2, addr_reg,
1487 64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
1488 63 - CPU_TLB_ENTRY_BITS,
1489 64 + CPU_TLB_ENTRY_BITS - TARGET_PAGE_BITS, 1);
a5e39810
RH
1490 if (a_off) {
1491 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
1492 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
1493 } else {
1494 tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
1495 }
48bb3750 1496 } else {
547ec121
RH
1497 tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
1498 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
a5e39810 1499 tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
547ec121
RH
1500 tgen_andi(s, TCG_TYPE_I64, TCG_REG_R2,
1501 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1502 tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
48bb3750
RH
1503 }
1504
fb596415 1505 if (is_ld) {
9349b4f9 1506 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
fb596415
RH
1507 } else {
1508 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1509 }
48bb3750 1510 if (TARGET_LONG_BITS == 32) {
fb596415 1511 tcg_out_mem(s, RX_C, RXY_CY, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750 1512 } else {
fb596415 1513 tcg_out_mem(s, 0, RXY_CG, TCG_REG_R3, TCG_REG_R2, TCG_AREG0, ofs);
48bb3750
RH
1514 }
1515
fb596415
RH
1516 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1517 tcg_out_mem(s, 0, RXY_LG, TCG_REG_R2, TCG_REG_R2, TCG_AREG0, ofs);
1518
48bb3750 1519 if (TARGET_LONG_BITS == 32) {
fb596415
RH
1520 tgen_ext32u(s, TCG_REG_R3, addr_reg);
1521 return TCG_REG_R3;
48bb3750 1522 }
fb596415
RH
1523 return addr_reg;
1524}
48bb3750 1525
3972ef6f
RH
1526static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
1527 TCGReg data, TCGReg addr,
fb596415
RH
1528 tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
1529{
1530 TCGLabelQemuLdst *label = new_ldst_label(s);
1531
1532 label->is_ld = is_ld;
3972ef6f 1533 label->oi = oi;
fb596415
RH
1534 label->datalo_reg = data;
1535 label->addrlo_reg = addr;
fb596415
RH
1536 label->raddr = raddr;
1537 label->label_ptr[0] = label_ptr;
1538}
48bb3750 1539
fb596415
RH
1540static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1541{
1542 TCGReg addr_reg = lb->addrlo_reg;
1543 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1544 TCGMemOpIdx oi = lb->oi;
1545 TCGMemOp opc = get_memop(oi);
48bb3750 1546
fb596415 1547 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
48bb3750 1548
fb596415
RH
1549 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1550 if (TARGET_LONG_BITS == 64) {
1551 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1552 }
3972ef6f 1553 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
fb596415 1554 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
2b7ec66f 1555 tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
fb596415 1556 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
65a62a75 1557
fb596415 1558 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1559}
1560
fb596415 1561static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
48bb3750 1562{
fb596415
RH
1563 TCGReg addr_reg = lb->addrlo_reg;
1564 TCGReg data_reg = lb->datalo_reg;
3972ef6f
RH
1565 TCGMemOpIdx oi = lb->oi;
1566 TCGMemOp opc = get_memop(oi);
fb596415
RH
1567
1568 patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
1569
1570 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
1571 if (TARGET_LONG_BITS == 64) {
1572 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
1573 }
1574 switch (opc & MO_SIZE) {
1575 case MO_UB:
1576 tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1577 break;
1578 case MO_UW:
1579 tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1580 break;
1581 case MO_UL:
1582 tgen_ext32u(s, TCG_REG_R4, data_reg);
1583 break;
1584 case MO_Q:
1585 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
1586 break;
1587 default:
1588 tcg_abort();
1589 }
3972ef6f 1590 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
fb596415 1591 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
2b7ec66f 1592 tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
fb596415
RH
1593
1594 tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
48bb3750
RH
1595}
1596#else
1597static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1598 TCGReg *index_reg, tcg_target_long *disp)
1599{
1600 if (TARGET_LONG_BITS == 32) {
1601 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1602 *addr_reg = TCG_TMP0;
1603 }
b76f21a7 1604 if (guest_base < 0x80000) {
48bb3750 1605 *index_reg = TCG_REG_NONE;
b76f21a7 1606 *disp = guest_base;
48bb3750
RH
1607 } else {
1608 *index_reg = TCG_GUEST_BASE_REG;
1609 *disp = 0;
1610 }
1611}
1612#endif /* CONFIG_SOFTMMU */
1613
f24efee4 1614static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1615 TCGMemOpIdx oi)
48bb3750 1616{
59227d5d 1617 TCGMemOp opc = get_memop(oi);
fb596415 1618#ifdef CONFIG_SOFTMMU
59227d5d 1619 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1620 tcg_insn_unit *label_ptr;
1621 TCGReg base_reg;
1622
1623 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
1624
cd3b29b7
AJ
1625 /* We need to keep the offset unchanged for retranslation. */
1626 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1627 label_ptr = s->code_ptr;
1628 s->code_ptr += 1;
fb596415
RH
1629
1630 tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1631
3972ef6f 1632 add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1633#else
f24efee4
RH
1634 TCGReg index_reg;
1635 tcg_target_long disp;
1636
48bb3750
RH
1637 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1638 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1639#endif
1640}
1641
f24efee4 1642static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
59227d5d 1643 TCGMemOpIdx oi)
48bb3750 1644{
59227d5d 1645 TCGMemOp opc = get_memop(oi);
fb596415 1646#ifdef CONFIG_SOFTMMU
59227d5d 1647 unsigned mem_index = get_mmuidx(oi);
fb596415
RH
1648 tcg_insn_unit *label_ptr;
1649 TCGReg base_reg;
1650
1651 base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
1652
cd3b29b7
AJ
1653 /* We need to keep the offset unchanged for retranslation. */
1654 tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
1655 label_ptr = s->code_ptr;
1656 s->code_ptr += 1;
fb596415
RH
1657
1658 tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
48bb3750 1659
3972ef6f 1660 add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
48bb3750 1661#else
f24efee4
RH
1662 TCGReg index_reg;
1663 tcg_target_long disp;
1664
48bb3750
RH
1665 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1666 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1667#endif
2827822e
AG
1668}
1669
48bb3750
RH
1670# define OP_32_64(x) \
1671 case glue(glue(INDEX_op_,x),_i32): \
1672 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1673
a9751609 1674static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1675 const TCGArg *args, const int *const_args)
1676{
c2097136 1677 S390Opcode op, op2;
0db921e6 1678 TCGArg a0, a1, a2;
48bb3750
RH
1679
1680 switch (opc) {
1681 case INDEX_op_exit_tb:
46644483
RH
1682 /* Reuse the zeroing that exists for goto_ptr. */
1683 a0 = args[0];
1684 if (a0 == 0) {
1685 tgen_gotoi(s, S390_CC_ALWAYS, s->code_gen_epilogue);
1686 } else {
1687 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
1688 tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
1689 }
48bb3750
RH
1690 break;
1691
1692 case INDEX_op_goto_tb:
f309101c 1693 if (s->tb_jmp_insn_offset) {
ed3d51ec
SF
1694 /* branch displacement must be aligned for atomic patching;
1695 * see if we need to add extra nop before branch
1696 */
1697 if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
1698 tcg_out16(s, NOP);
1699 }
a10c64e0 1700 tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
f309101c 1701 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
a10c64e0 1702 s->code_ptr += 2;
48bb3750 1703 } else {
f309101c
SF
1704 /* load address stored at s->tb_jmp_target_addr + args[0] */
1705 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
1706 s->tb_jmp_target_addr + args[0]);
48bb3750
RH
1707 /* and go there */
1708 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1709 }
f309101c 1710 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
48bb3750
RH
1711 break;
1712
46644483
RH
1713 case INDEX_op_goto_ptr:
1714 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, args[0]);
1715 break;
1716
48bb3750
RH
1717 OP_32_64(ld8u):
1718 /* ??? LLC (RXY format) is only present with the extended-immediate
1719 facility, whereas LLGC is always present. */
1720 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1721 break;
1722
1723 OP_32_64(ld8s):
1724 /* ??? LB is no smaller than LGB, so no point to using it. */
1725 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1726 break;
1727
1728 OP_32_64(ld16u):
1729 /* ??? LLH (RXY format) is only present with the extended-immediate
1730 facility, whereas LLGH is always present. */
1731 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1732 break;
1733
1734 case INDEX_op_ld16s_i32:
1735 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1736 break;
1737
1738 case INDEX_op_ld_i32:
1739 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1740 break;
1741
1742 OP_32_64(st8):
1743 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1744 TCG_REG_NONE, args[2]);
1745 break;
1746
1747 OP_32_64(st16):
1748 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1749 TCG_REG_NONE, args[2]);
1750 break;
1751
1752 case INDEX_op_st_i32:
1753 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1754 break;
1755
1756 case INDEX_op_add_i32:
0db921e6 1757 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1758 if (const_args[2]) {
0db921e6
RH
1759 do_addi_32:
1760 if (a0 == a1) {
1761 if (a2 == (int16_t)a2) {
1762 tcg_out_insn(s, RI, AHI, a0, a2);
1763 break;
1764 }
b2c98d9d 1765 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
1766 tcg_out_insn(s, RIL, AFI, a0, a2);
1767 break;
1768 }
1769 }
1770 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1771 } else if (a0 == a1) {
1772 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1773 } else {
0db921e6 1774 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1775 }
1776 break;
1777 case INDEX_op_sub_i32:
0db921e6 1778 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1779 if (const_args[2]) {
0db921e6
RH
1780 a2 = -a2;
1781 goto do_addi_32;
c2097136
RH
1782 } else if (a0 == a1) {
1783 tcg_out_insn(s, RR, SR, a0, a2);
1784 } else {
1785 tcg_out_insn(s, RRF, SRK, a0, a1, a2);
48bb3750
RH
1786 }
1787 break;
1788
1789 case INDEX_op_and_i32:
c2097136 1790 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1791 if (const_args[2]) {
c2097136
RH
1792 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1793 tgen_andi(s, TCG_TYPE_I32, a0, a2);
1794 } else if (a0 == a1) {
1795 tcg_out_insn(s, RR, NR, a0, a2);
48bb3750 1796 } else {
c2097136 1797 tcg_out_insn(s, RRF, NRK, a0, a1, a2);
48bb3750
RH
1798 }
1799 break;
1800 case INDEX_op_or_i32:
c2097136 1801 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1802 if (const_args[2]) {
c2097136
RH
1803 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1804 tgen64_ori(s, a0, a2);
1805 } else if (a0 == a1) {
1806 tcg_out_insn(s, RR, OR, a0, a2);
48bb3750 1807 } else {
c2097136 1808 tcg_out_insn(s, RRF, ORK, a0, a1, a2);
48bb3750
RH
1809 }
1810 break;
1811 case INDEX_op_xor_i32:
c2097136 1812 a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
48bb3750 1813 if (const_args[2]) {
c2097136
RH
1814 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1815 tgen64_xori(s, a0, a2);
1816 } else if (a0 == a1) {
48bb3750 1817 tcg_out_insn(s, RR, XR, args[0], args[2]);
c2097136
RH
1818 } else {
1819 tcg_out_insn(s, RRF, XRK, a0, a1, a2);
48bb3750
RH
1820 }
1821 break;
1822
1823 case INDEX_op_neg_i32:
1824 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1825 break;
1826
1827 case INDEX_op_mul_i32:
1828 if (const_args[2]) {
1829 if ((int32_t)args[2] == (int16_t)args[2]) {
1830 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1831 } else {
1832 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1833 }
1834 } else {
1835 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1836 }
1837 break;
1838
1839 case INDEX_op_div2_i32:
1840 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1841 break;
1842 case INDEX_op_divu2_i32:
1843 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1844 break;
1845
1846 case INDEX_op_shl_i32:
1847 op = RS_SLL;
c2097136 1848 op2 = RSY_SLLK;
48bb3750 1849 do_shift32:
c2097136
RH
1850 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
1851 if (a0 == a1) {
1852 if (const_args[2]) {
1853 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
1854 } else {
1855 tcg_out_sh32(s, op, a0, a2, 0);
1856 }
48bb3750 1857 } else {
c2097136
RH
1858 /* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
1859 if (const_args[2]) {
1860 tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
1861 } else {
1862 tcg_out_sh64(s, op2, a0, a1, a2, 0);
1863 }
48bb3750
RH
1864 }
1865 break;
1866 case INDEX_op_shr_i32:
1867 op = RS_SRL;
c2097136 1868 op2 = RSY_SRLK;
48bb3750
RH
1869 goto do_shift32;
1870 case INDEX_op_sar_i32:
1871 op = RS_SRA;
c2097136 1872 op2 = RSY_SRAK;
48bb3750
RH
1873 goto do_shift32;
1874
1875 case INDEX_op_rotl_i32:
1876 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1877 if (const_args[2]) {
1878 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1879 } else {
1880 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1881 }
1882 break;
1883 case INDEX_op_rotr_i32:
1884 if (const_args[2]) {
1885 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1886 TCG_REG_NONE, (32 - args[2]) & 31);
1887 } else {
1888 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1889 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1890 }
1891 break;
1892
1893 case INDEX_op_ext8s_i32:
1894 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1895 break;
1896 case INDEX_op_ext16s_i32:
1897 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1898 break;
1899 case INDEX_op_ext8u_i32:
1900 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1901 break;
1902 case INDEX_op_ext16u_i32:
1903 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1904 break;
1905
1906 OP_32_64(bswap16):
1907 /* The TCG bswap definition requires bits 0-47 already be zero.
1908 Thus we don't need the G-type insns to implement bswap16_i64. */
1909 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1910 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1911 break;
1912 OP_32_64(bswap32):
1913 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1914 break;
1915
3790b918 1916 case INDEX_op_add2_i32:
ad19b358
RH
1917 if (const_args[4]) {
1918 tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
1919 } else {
1920 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1921 }
3790b918
RH
1922 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1923 break;
1924 case INDEX_op_sub2_i32:
ad19b358
RH
1925 if (const_args[4]) {
1926 tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
1927 } else {
1928 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1929 }
3790b918
RH
1930 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1931 break;
1932
48bb3750 1933 case INDEX_op_br:
bec16311 1934 tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
48bb3750
RH
1935 break;
1936
1937 case INDEX_op_brcond_i32:
1938 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
bec16311 1939 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
1940 break;
1941 case INDEX_op_setcond_i32:
1942 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1943 args[2], const_args[2]);
1944 break;
96a9f093
RH
1945 case INDEX_op_movcond_i32:
1946 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
7af525af 1947 args[2], const_args[2], args[3], const_args[3]);
96a9f093 1948 break;
48bb3750 1949
f24efee4 1950 case INDEX_op_qemu_ld_i32:
48bb3750 1951 /* ??? Technically we can use a non-extending instruction. */
f24efee4 1952 case INDEX_op_qemu_ld_i64:
59227d5d 1953 tcg_out_qemu_ld(s, args[0], args[1], args[2]);
48bb3750 1954 break;
f24efee4
RH
1955 case INDEX_op_qemu_st_i32:
1956 case INDEX_op_qemu_st_i64:
59227d5d 1957 tcg_out_qemu_st(s, args[0], args[1], args[2]);
48bb3750
RH
1958 break;
1959
48bb3750
RH
1960 case INDEX_op_ld16s_i64:
1961 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1962 break;
1963 case INDEX_op_ld32u_i64:
1964 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1965 break;
1966 case INDEX_op_ld32s_i64:
1967 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1968 break;
1969 case INDEX_op_ld_i64:
1970 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1971 break;
1972
1973 case INDEX_op_st32_i64:
1974 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1975 break;
1976 case INDEX_op_st_i64:
1977 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1978 break;
1979
1980 case INDEX_op_add_i64:
0db921e6 1981 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1982 if (const_args[2]) {
0db921e6
RH
1983 do_addi_64:
1984 if (a0 == a1) {
1985 if (a2 == (int16_t)a2) {
1986 tcg_out_insn(s, RI, AGHI, a0, a2);
1987 break;
1988 }
b2c98d9d 1989 if (s390_facilities & FACILITY_EXT_IMM) {
0db921e6
RH
1990 if (a2 == (int32_t)a2) {
1991 tcg_out_insn(s, RIL, AGFI, a0, a2);
1992 break;
1993 } else if (a2 == (uint32_t)a2) {
1994 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1995 break;
1996 } else if (-a2 == (uint32_t)-a2) {
1997 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1998 break;
1999 }
2000 }
2001 }
2002 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
2003 } else if (a0 == a1) {
2004 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 2005 } else {
0db921e6 2006 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
2007 }
2008 break;
2009 case INDEX_op_sub_i64:
0db921e6 2010 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2011 if (const_args[2]) {
0db921e6
RH
2012 a2 = -a2;
2013 goto do_addi_64;
c2097136
RH
2014 } else if (a0 == a1) {
2015 tcg_out_insn(s, RRE, SGR, a0, a2);
48bb3750 2016 } else {
c2097136 2017 tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
48bb3750
RH
2018 }
2019 break;
2020
2021 case INDEX_op_and_i64:
c2097136 2022 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2023 if (const_args[2]) {
c2097136 2024 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
07ff7983 2025 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
c2097136 2026 } else if (a0 == a1) {
48bb3750 2027 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
c2097136
RH
2028 } else {
2029 tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
48bb3750
RH
2030 }
2031 break;
2032 case INDEX_op_or_i64:
c2097136 2033 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2034 if (const_args[2]) {
c2097136
RH
2035 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2036 tgen64_ori(s, a0, a2);
2037 } else if (a0 == a1) {
2038 tcg_out_insn(s, RRE, OGR, a0, a2);
48bb3750 2039 } else {
c2097136 2040 tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
48bb3750
RH
2041 }
2042 break;
2043 case INDEX_op_xor_i64:
c2097136 2044 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 2045 if (const_args[2]) {
c2097136
RH
2046 tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
2047 tgen64_xori(s, a0, a2);
2048 } else if (a0 == a1) {
2049 tcg_out_insn(s, RRE, XGR, a0, a2);
48bb3750 2050 } else {
c2097136 2051 tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
48bb3750
RH
2052 }
2053 break;
2054
2055 case INDEX_op_neg_i64:
2056 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
2057 break;
2058 case INDEX_op_bswap64_i64:
2059 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
2060 break;
2061
2062 case INDEX_op_mul_i64:
2063 if (const_args[2]) {
2064 if (args[2] == (int16_t)args[2]) {
2065 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
2066 } else {
2067 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
2068 }
2069 } else {
2070 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
2071 }
2072 break;
2073
2074 case INDEX_op_div2_i64:
2075 /* ??? We get an unnecessary sign-extension of the dividend
2076 into R3 with this definition, but as we do in fact always
2077 produce both quotient and remainder using INDEX_op_div_i64
2078 instead requires jumping through even more hoops. */
2079 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
2080 break;
2081 case INDEX_op_divu2_i64:
2082 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
2083 break;
36017dc6
RH
2084 case INDEX_op_mulu2_i64:
2085 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
2086 break;
48bb3750
RH
2087
2088 case INDEX_op_shl_i64:
2089 op = RSY_SLLG;
2090 do_shift64:
2091 if (const_args[2]) {
2092 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
2093 } else {
2094 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
2095 }
2096 break;
2097 case INDEX_op_shr_i64:
2098 op = RSY_SRLG;
2099 goto do_shift64;
2100 case INDEX_op_sar_i64:
2101 op = RSY_SRAG;
2102 goto do_shift64;
2103
2104 case INDEX_op_rotl_i64:
2105 if (const_args[2]) {
2106 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2107 TCG_REG_NONE, args[2]);
2108 } else {
2109 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2110 }
2111 break;
2112 case INDEX_op_rotr_i64:
2113 if (const_args[2]) {
2114 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2115 TCG_REG_NONE, (64 - args[2]) & 63);
2116 } else {
2117 /* We can use the smaller 32-bit negate because only the
2118 low 6 bits are examined for the rotate. */
2119 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2120 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2121 }
2122 break;
2123
2124 case INDEX_op_ext8s_i64:
2125 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2126 break;
2127 case INDEX_op_ext16s_i64:
2128 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2129 break;
4f2331e5 2130 case INDEX_op_ext_i32_i64:
48bb3750
RH
2131 case INDEX_op_ext32s_i64:
2132 tgen_ext32s(s, args[0], args[1]);
2133 break;
2134 case INDEX_op_ext8u_i64:
2135 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2136 break;
2137 case INDEX_op_ext16u_i64:
2138 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2139 break;
4f2331e5 2140 case INDEX_op_extu_i32_i64:
48bb3750
RH
2141 case INDEX_op_ext32u_i64:
2142 tgen_ext32u(s, args[0], args[1]);
2143 break;
2144
3790b918 2145 case INDEX_op_add2_i64:
ad19b358
RH
2146 if (const_args[4]) {
2147 if ((int64_t)args[4] >= 0) {
2148 tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
2149 } else {
2150 tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
2151 }
2152 } else {
2153 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2154 }
3790b918
RH
2155 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2156 break;
2157 case INDEX_op_sub2_i64:
ad19b358
RH
2158 if (const_args[4]) {
2159 if ((int64_t)args[4] >= 0) {
2160 tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
2161 } else {
2162 tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
2163 }
2164 } else {
2165 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2166 }
3790b918
RH
2167 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2168 break;
2169
48bb3750
RH
2170 case INDEX_op_brcond_i64:
2171 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
bec16311 2172 args[1], const_args[1], arg_label(args[3]));
48bb3750
RH
2173 break;
2174 case INDEX_op_setcond_i64:
2175 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2176 args[2], const_args[2]);
2177 break;
96a9f093
RH
2178 case INDEX_op_movcond_i64:
2179 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
7af525af 2180 args[2], const_args[2], args[3], const_args[3]);
96a9f093 2181 break;
48bb3750 2182
d5690ea4 2183 OP_32_64(deposit):
752b1be9
RH
2184 a0 = args[0], a1 = args[1], a2 = args[2];
2185 if (const_args[1]) {
2186 tgen_deposit(s, a0, a2, args[3], args[4], 1);
2187 } else {
2188 /* Since we can't support "0Z" as a constraint, we allow a1 in
2189 any register. Fix things up as if a matching constraint. */
2190 if (a0 != a1) {
2191 TCGType type = (opc == INDEX_op_deposit_i64);
2192 if (a0 == a2) {
2193 tcg_out_mov(s, type, TCG_TMP0, a2);
2194 a2 = TCG_TMP0;
2195 }
2196 tcg_out_mov(s, type, a0, a1);
2197 }
2198 tgen_deposit(s, a0, a2, args[3], args[4], 0);
2199 }
d5690ea4 2200 break;
752b1be9 2201
b0bf5fe8
RH
2202 OP_32_64(extract):
2203 tgen_extract(s, args[0], args[1], args[2], args[3]);
2204 break;
d5690ea4 2205
ce411066
RH
2206 case INDEX_op_clz_i64:
2207 tgen_clz(s, args[0], args[1], args[2], const_args[2]);
2208 break;
2209
c9314d61
PK
2210 case INDEX_op_mb:
2211 /* The host memory model is quite strong, we simply need to
2212 serialize the instruction stream. */
2213 if (args[0] & TCG_MO_ST_LD) {
2214 tcg_out_insn(s, RR, BCR,
b2c98d9d 2215 s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
c9314d61
PK
2216 }
2217 break;
2218
96d0ee7f
RH
2219 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2220 case INDEX_op_mov_i64:
2221 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
2222 case INDEX_op_movi_i64:
2223 case INDEX_op_call: /* Always emitted via tcg_out_call. */
48bb3750 2224 default:
48bb3750
RH
2225 tcg_abort();
2226 }
2827822e
AG
2227}
2228
f69d277e
RH
2229static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
2230{
9b5500b6
RH
2231 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
2232 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
2233 static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
2234 static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
07952d95 2235 static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
9b5500b6 2236 static const TCGTargetOpDef r_rC = { .args_ct_str = { "r", "rC" } };
07952d95 2237 static const TCGTargetOpDef r_rZ = { .args_ct_str = { "r", "rZ" } };
9b5500b6 2238 static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
c2097136 2239 static const TCGTargetOpDef r_r_rM = { .args_ct_str = { "r", "r", "rM" } };
e42349cb 2240 static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
9b5500b6 2241 static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
a8f0269e
RH
2242 static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
2243 static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
e42349cb
RH
2244 static const TCGTargetOpDef r_0_rN = { .args_ct_str = { "r", "0", "rN" } };
2245 static const TCGTargetOpDef r_0_rM = { .args_ct_str = { "r", "0", "rM" } };
ba18b07d
RH
2246 static const TCGTargetOpDef a2_r
2247 = { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
2248 static const TCGTargetOpDef a2_ri
2249 = { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
2250 static const TCGTargetOpDef a2_rA
2251 = { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
9b5500b6
RH
2252
2253 switch (op) {
2254 case INDEX_op_goto_ptr:
2255 return &r;
2256
2257 case INDEX_op_ld8u_i32:
2258 case INDEX_op_ld8u_i64:
2259 case INDEX_op_ld8s_i32:
2260 case INDEX_op_ld8s_i64:
2261 case INDEX_op_ld16u_i32:
2262 case INDEX_op_ld16u_i64:
2263 case INDEX_op_ld16s_i32:
2264 case INDEX_op_ld16s_i64:
2265 case INDEX_op_ld_i32:
2266 case INDEX_op_ld32u_i64:
2267 case INDEX_op_ld32s_i64:
2268 case INDEX_op_ld_i64:
2269 case INDEX_op_st8_i32:
2270 case INDEX_op_st8_i64:
2271 case INDEX_op_st16_i32:
2272 case INDEX_op_st16_i64:
2273 case INDEX_op_st_i32:
2274 case INDEX_op_st32_i64:
2275 case INDEX_op_st_i64:
2276 return &r_r;
2277
2278 case INDEX_op_add_i32:
2279 case INDEX_op_add_i64:
2280 return &r_r_ri;
2281 case INDEX_op_sub_i32:
2282 case INDEX_op_sub_i64:
c2097136 2283 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
a8f0269e 2284
9b5500b6 2285 case INDEX_op_mul_i32:
a8f0269e
RH
2286 /* If we have the general-instruction-extensions, then we have
2287 MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
2288 have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
2289 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
9b5500b6 2290 case INDEX_op_mul_i64:
a8f0269e
RH
2291 return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
2292
9b5500b6 2293 case INDEX_op_or_i32:
e42349cb
RH
2294 /* The use of [iNM] constraints are optimization only, since a full
2295 64-bit immediate OR can always be performed with 4 sequential
2296 OI[LH][LH] instructions. By rejecting certain negative ranges,
2297 the immediate load plus the reg-reg OR is smaller. */
2298 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2299 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
e42349cb 2300 : &r_0_rN);
9b5500b6 2301 case INDEX_op_or_i64:
e42349cb 2302 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2303 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
e42349cb
RH
2304 : &r_0_rN);
2305
9b5500b6 2306 case INDEX_op_xor_i32:
e42349cb
RH
2307 /* Without EXT_IMM, no immediates are supported. Otherwise,
2308 rejecting certain negative ranges leads to smaller code. */
2309 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2310 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri)
e42349cb 2311 : &r_0_r);
9b5500b6 2312 case INDEX_op_xor_i64:
e42349cb 2313 return (s390_facilities & FACILITY_EXT_IMM
c2097136 2314 ? (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_rM : &r_0_rM)
e42349cb
RH
2315 : &r_0_r);
2316
9b5500b6
RH
2317 case INDEX_op_and_i32:
2318 case INDEX_op_and_i64:
c2097136 2319 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
9b5500b6
RH
2320
2321 case INDEX_op_shl_i32:
2322 case INDEX_op_shr_i32:
2323 case INDEX_op_sar_i32:
c2097136 2324 return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
9b5500b6
RH
2325
2326 case INDEX_op_shl_i64:
2327 case INDEX_op_shr_i64:
2328 case INDEX_op_sar_i64:
2329 return &r_r_ri;
2330
2331 case INDEX_op_rotl_i32:
2332 case INDEX_op_rotl_i64:
2333 case INDEX_op_rotr_i32:
2334 case INDEX_op_rotr_i64:
2335 return &r_r_ri;
2336
2337 case INDEX_op_brcond_i32:
07952d95
RH
2338 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2339 return (s390_facilities & FACILITY_EXT_IMM ? &r_ri : &r_rZ);
9b5500b6 2340 case INDEX_op_brcond_i64:
07952d95 2341 return (s390_facilities & FACILITY_EXT_IMM ? &r_rC : &r_rZ);
9b5500b6
RH
2342
2343 case INDEX_op_bswap16_i32:
2344 case INDEX_op_bswap16_i64:
2345 case INDEX_op_bswap32_i32:
2346 case INDEX_op_bswap32_i64:
2347 case INDEX_op_bswap64_i64:
2348 case INDEX_op_neg_i32:
2349 case INDEX_op_neg_i64:
2350 case INDEX_op_ext8s_i32:
2351 case INDEX_op_ext8s_i64:
2352 case INDEX_op_ext8u_i32:
2353 case INDEX_op_ext8u_i64:
2354 case INDEX_op_ext16s_i32:
2355 case INDEX_op_ext16s_i64:
2356 case INDEX_op_ext16u_i32:
2357 case INDEX_op_ext16u_i64:
2358 case INDEX_op_ext32s_i64:
2359 case INDEX_op_ext32u_i64:
2360 case INDEX_op_ext_i32_i64:
2361 case INDEX_op_extu_i32_i64:
2362 case INDEX_op_extract_i32:
2363 case INDEX_op_extract_i64:
2364 return &r_r;
2365
2366 case INDEX_op_clz_i64:
2367 return &r_r_ri;
2368
2369 case INDEX_op_qemu_ld_i32:
2370 case INDEX_op_qemu_ld_i64:
2371 return &r_L;
2372 case INDEX_op_qemu_st_i64:
2373 case INDEX_op_qemu_st_i32:
2374 return &L_L;
f69d277e 2375
9b5500b6
RH
2376 case INDEX_op_deposit_i32:
2377 case INDEX_op_deposit_i64:
2378 {
2379 static const TCGTargetOpDef dep
2380 = { .args_ct_str = { "r", "rZ", "r" } };
2381 return &dep;
f69d277e 2382 }
9b5500b6
RH
2383 case INDEX_op_setcond_i32:
2384 case INDEX_op_setcond_i64:
2385 {
07952d95
RH
2386 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2387 static const TCGTargetOpDef setc_z
2388 = { .args_ct_str = { "r", "r", "rZ" } };
2389 static const TCGTargetOpDef setc_c
9b5500b6 2390 = { .args_ct_str = { "r", "r", "rC" } };
07952d95 2391 return (s390_facilities & FACILITY_EXT_IMM ? &setc_c : &setc_z);
9b5500b6
RH
2392 }
2393 case INDEX_op_movcond_i32:
2394 case INDEX_op_movcond_i64:
2395 {
07952d95
RH
2396 /* Without EXT_IMM, only the LOAD AND TEST insn is available. */
2397 static const TCGTargetOpDef movc_z
2398 = { .args_ct_str = { "r", "r", "rZ", "r", "0" } };
2399 static const TCGTargetOpDef movc_c
9b5500b6 2400 = { .args_ct_str = { "r", "r", "rC", "r", "0" } };
7af525af
RH
2401 static const TCGTargetOpDef movc_l
2402 = { .args_ct_str = { "r", "r", "rC", "rI", "0" } };
2403 return (s390_facilities & FACILITY_EXT_IMM
2404 ? (s390_facilities & FACILITY_LOAD_ON_COND2
2405 ? &movc_l : &movc_c)
2406 : &movc_z);
9b5500b6
RH
2407 }
2408 case INDEX_op_div2_i32:
2409 case INDEX_op_div2_i64:
2410 case INDEX_op_divu2_i32:
2411 case INDEX_op_divu2_i64:
2412 {
2413 static const TCGTargetOpDef div2
2414 = { .args_ct_str = { "b", "a", "0", "1", "r" } };
2415 return &div2;
2416 }
2417 case INDEX_op_mulu2_i64:
2418 {
2419 static const TCGTargetOpDef mul2
2420 = { .args_ct_str = { "b", "a", "0", "r" } };
2421 return &mul2;
2422 }
ba18b07d 2423
9b5500b6 2424 case INDEX_op_add2_i32:
9b5500b6 2425 case INDEX_op_sub2_i32:
ba18b07d
RH
2426 return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
2427 case INDEX_op_add2_i64:
9b5500b6 2428 case INDEX_op_sub2_i64:
ba18b07d 2429 return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
9b5500b6
RH
2430
2431 default:
2432 break;
f69d277e
RH
2433 }
2434 return NULL;
2435}
2436
b2c98d9d 2437static void query_s390_facilities(void)
48bb3750 2438{
c9baa30f 2439 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
48bb3750 2440
c9baa30f
RH
2441 /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
2442 is present on all 64-bit systems, but let's check for it anyway. */
2443 if (hwcap & HWCAP_S390_STFLE) {
2444 register int r0 __asm__("0");
2445 register void *r1 __asm__("1");
48bb3750 2446
c9baa30f 2447 /* stfle 0(%r1) */
b2c98d9d 2448 r1 = &s390_facilities;
c9baa30f
RH
2449 asm volatile(".word 0xb2b0,0x1000"
2450 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
48bb3750
RH
2451 }
2452}
2453
2454static void tcg_target_init(TCGContext *s)
2827822e 2455{
b2c98d9d 2456 query_s390_facilities();
48bb3750
RH
2457
2458 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2459 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2460
2461 tcg_regset_clear(tcg_target_call_clobber_regs);
2462 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2463 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2464 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2465 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2466 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2467 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
f24efee4
RH
2468 /* The r6 register is technically call-saved, but it's also a parameter
2469 register, so it can get killed by setup for the qemu_st helper. */
2470 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
48bb3750
RH
2471 /* The return register can be considered call-clobbered. */
2472 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2473
2474 tcg_regset_clear(s->reserved_regs);
2475 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2476 /* XXX many insns can't be used with R0, so we better avoid it for now */
2477 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2478 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2827822e
AG
2479}
2480
f167dc37
RH
2481#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
2482 + TCG_STATIC_CALL_ARGS_SIZE \
2483 + CPU_TEMP_BUF_NLONGS * sizeof(long)))
2484
48bb3750 2485static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2486{
48bb3750
RH
2487 /* stmg %r6,%r15,48(%r15) (save registers) */
2488 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2489
a4924e8b 2490 /* aghi %r15,-frame_size */
f167dc37 2491 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
a4924e8b
RH
2492
2493 tcg_set_frame(s, TCG_REG_CALL_STACK,
2494 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2495 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750 2496
090d0bfd 2497#ifndef CONFIG_SOFTMMU
b76f21a7
LV
2498 if (guest_base >= 0x80000) {
2499 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
48bb3750
RH
2500 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2501 }
090d0bfd 2502#endif
48bb3750 2503
cea5f9a2
BS
2504 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2505 /* br %r3 (go to TB) */
2506 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750 2507
46644483
RH
2508 /*
2509 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
2510 * and fall through to the rest of the epilogue.
2511 */
2512 s->code_gen_epilogue = s->code_ptr;
2513 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
2514
2515 /* TB epilogue */
48bb3750
RH
2516 tb_ret_addr = s->code_ptr;
2517
a4924e8b
RH
2518 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2519 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
f167dc37 2520 FRAME_SIZE + 48);
48bb3750
RH
2521
2522 /* br %r14 (return) */
2523 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2524}
f167dc37
RH
2525
2526typedef struct {
d2e16f2c 2527 DebugFrameHeader h;
f167dc37
RH
2528 uint8_t fde_def_cfa[4];
2529 uint8_t fde_reg_ofs[18];
2530} DebugFrame;
2531
2532/* We're expecting a 2 byte uleb128 encoded value. */
2533QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2534
2535#define ELF_HOST_MACHINE EM_S390
2536
d2e16f2c
RH
2537static const DebugFrame debug_frame = {
2538 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2539 .h.cie.id = -1,
2540 .h.cie.version = 1,
2541 .h.cie.code_align = 1,
2542 .h.cie.data_align = 8, /* sleb128 8 */
2543 .h.cie.return_column = TCG_REG_R14,
f167dc37
RH
2544
2545 /* Total FDE size does not include the "len" member. */
d2e16f2c 2546 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
f167dc37
RH
2547
2548 .fde_def_cfa = {
2549 12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
2550 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2551 (FRAME_SIZE >> 7)
2552 },
2553 .fde_reg_ofs = {
2554 0x86, 6, /* DW_CFA_offset, %r6, 48 */
2555 0x87, 7, /* DW_CFA_offset, %r7, 56 */
2556 0x88, 8, /* DW_CFA_offset, %r8, 64 */
2557 0x89, 9, /* DW_CFA_offset, %r92, 72 */
2558 0x8a, 10, /* DW_CFA_offset, %r10, 80 */
2559 0x8b, 11, /* DW_CFA_offset, %r11, 88 */
2560 0x8c, 12, /* DW_CFA_offset, %r12, 96 */
2561 0x8d, 13, /* DW_CFA_offset, %r13, 104 */
2562 0x8e, 14, /* DW_CFA_offset, %r14, 112 */
2563 }
2564};
2565
2566void tcg_register_jit(void *buf, size_t buf_size)
2567{
f167dc37
RH
2568 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2569}
This page took 0.953077 seconds and 4 git commands to generate.