]> Git Repo - qemu.git/blame - tcg/s390/tcg-target.c
tcg-s390: Use load-address for addition
[qemu.git] / tcg / s390 / tcg-target.c
CommitLineData
2827822e
AG
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009 Ulrich Hecht <[email protected]>
48bb3750
RH
5 * Copyright (c) 2009 Alexander Graf <[email protected]>
6 * Copyright (c) 2010 Richard Henderson <[email protected]>
2827822e
AG
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
a01fc30d
RH
27/* We only support generating code for 64-bit mode. */
28#if TCG_TARGET_REG_BITS != 64
29#error "unsupported code generation mode"
30#endif
31
48bb3750
RH
32/* ??? The translation blocks produced by TCG are generally small enough to
33 be entirely reachable with a 16-bit displacement. Leaving the option for
34 a 32-bit displacement here Just In Case. */
35#define USE_LONG_BRANCHES 0
36
37#define TCG_CT_CONST_32 0x0100
48bb3750 38#define TCG_CT_CONST_MULI 0x0800
48bb3750
RH
39#define TCG_CT_CONST_ORI 0x2000
40#define TCG_CT_CONST_XORI 0x4000
41#define TCG_CT_CONST_CMPI 0x8000
42
43/* Several places within the instruction set 0 means "no register"
44 rather than TCG_REG_R0. */
45#define TCG_REG_NONE 0
46
47/* A scratch register that may be be used throughout the backend. */
48#define TCG_TMP0 TCG_REG_R14
49
50#ifdef CONFIG_USE_GUEST_BASE
51#define TCG_GUEST_BASE_REG TCG_REG_R13
52#else
53#define TCG_GUEST_BASE_REG TCG_REG_R0
54#endif
55
56#ifndef GUEST_BASE
57#define GUEST_BASE 0
58#endif
59
60
61/* All of the following instructions are prefixed with their instruction
62 format, and are defined as 8- or 16-bit quantities, even when the two
63 halves of the 16-bit quantity may appear 32 bits apart in the insn.
64 This makes it easy to copy the values from the tables in Appendix B. */
65typedef enum S390Opcode {
66 RIL_AFI = 0xc209,
67 RIL_AGFI = 0xc208,
3790b918 68 RIL_ALFI = 0xc20b,
48bb3750
RH
69 RIL_ALGFI = 0xc20a,
70 RIL_BRASL = 0xc005,
71 RIL_BRCL = 0xc004,
72 RIL_CFI = 0xc20d,
73 RIL_CGFI = 0xc20c,
74 RIL_CLFI = 0xc20f,
75 RIL_CLGFI = 0xc20e,
76 RIL_IIHF = 0xc008,
77 RIL_IILF = 0xc009,
78 RIL_LARL = 0xc000,
79 RIL_LGFI = 0xc001,
80 RIL_LGRL = 0xc408,
81 RIL_LLIHF = 0xc00e,
82 RIL_LLILF = 0xc00f,
83 RIL_LRL = 0xc40d,
84 RIL_MSFI = 0xc201,
85 RIL_MSGFI = 0xc200,
86 RIL_NIHF = 0xc00a,
87 RIL_NILF = 0xc00b,
88 RIL_OIHF = 0xc00c,
89 RIL_OILF = 0xc00d,
3790b918 90 RIL_SLFI = 0xc205,
0db921e6 91 RIL_SLGFI = 0xc204,
48bb3750
RH
92 RIL_XIHF = 0xc006,
93 RIL_XILF = 0xc007,
94
95 RI_AGHI = 0xa70b,
96 RI_AHI = 0xa70a,
97 RI_BRC = 0xa704,
98 RI_IIHH = 0xa500,
99 RI_IIHL = 0xa501,
100 RI_IILH = 0xa502,
101 RI_IILL = 0xa503,
102 RI_LGHI = 0xa709,
103 RI_LLIHH = 0xa50c,
104 RI_LLIHL = 0xa50d,
105 RI_LLILH = 0xa50e,
106 RI_LLILL = 0xa50f,
107 RI_MGHI = 0xa70d,
108 RI_MHI = 0xa70c,
109 RI_NIHH = 0xa504,
110 RI_NIHL = 0xa505,
111 RI_NILH = 0xa506,
112 RI_NILL = 0xa507,
113 RI_OIHH = 0xa508,
114 RI_OIHL = 0xa509,
115 RI_OILH = 0xa50a,
116 RI_OILL = 0xa50b,
117
118 RIE_CGIJ = 0xec7c,
119 RIE_CGRJ = 0xec64,
120 RIE_CIJ = 0xec7e,
121 RIE_CLGRJ = 0xec65,
122 RIE_CLIJ = 0xec7f,
123 RIE_CLGIJ = 0xec7d,
124 RIE_CLRJ = 0xec77,
125 RIE_CRJ = 0xec76,
d5690ea4 126 RIE_RISBG = 0xec55,
48bb3750
RH
127
128 RRE_AGR = 0xb908,
3790b918
RH
129 RRE_ALGR = 0xb90a,
130 RRE_ALCR = 0xb998,
131 RRE_ALCGR = 0xb988,
48bb3750
RH
132 RRE_CGR = 0xb920,
133 RRE_CLGR = 0xb921,
134 RRE_DLGR = 0xb987,
135 RRE_DLR = 0xb997,
136 RRE_DSGFR = 0xb91d,
137 RRE_DSGR = 0xb90d,
138 RRE_LGBR = 0xb906,
139 RRE_LCGR = 0xb903,
140 RRE_LGFR = 0xb914,
141 RRE_LGHR = 0xb907,
142 RRE_LGR = 0xb904,
143 RRE_LLGCR = 0xb984,
144 RRE_LLGFR = 0xb916,
145 RRE_LLGHR = 0xb985,
146 RRE_LRVR = 0xb91f,
147 RRE_LRVGR = 0xb90f,
148 RRE_LTGR = 0xb902,
36017dc6 149 RRE_MLGR = 0xb986,
48bb3750
RH
150 RRE_MSGR = 0xb90c,
151 RRE_MSR = 0xb252,
152 RRE_NGR = 0xb980,
153 RRE_OGR = 0xb981,
154 RRE_SGR = 0xb909,
3790b918
RH
155 RRE_SLGR = 0xb90b,
156 RRE_SLBR = 0xb999,
157 RRE_SLBGR = 0xb989,
48bb3750
RH
158 RRE_XGR = 0xb982,
159
96a9f093
RH
160 RRF_LOCR = 0xb9f2,
161 RRF_LOCGR = 0xb9e2,
162
48bb3750 163 RR_AR = 0x1a,
3790b918 164 RR_ALR = 0x1e,
48bb3750
RH
165 RR_BASR = 0x0d,
166 RR_BCR = 0x07,
167 RR_CLR = 0x15,
168 RR_CR = 0x19,
169 RR_DR = 0x1d,
170 RR_LCR = 0x13,
171 RR_LR = 0x18,
172 RR_LTR = 0x12,
173 RR_NR = 0x14,
174 RR_OR = 0x16,
175 RR_SR = 0x1b,
3790b918 176 RR_SLR = 0x1f,
48bb3750
RH
177 RR_XR = 0x17,
178
179 RSY_RLL = 0xeb1d,
180 RSY_RLLG = 0xeb1c,
181 RSY_SLLG = 0xeb0d,
182 RSY_SRAG = 0xeb0a,
183 RSY_SRLG = 0xeb0c,
184
185 RS_SLL = 0x89,
186 RS_SRA = 0x8a,
187 RS_SRL = 0x88,
188
189 RXY_AG = 0xe308,
190 RXY_AY = 0xe35a,
191 RXY_CG = 0xe320,
192 RXY_CY = 0xe359,
0db921e6 193 RXY_LAY = 0xe371,
48bb3750
RH
194 RXY_LB = 0xe376,
195 RXY_LG = 0xe304,
196 RXY_LGB = 0xe377,
197 RXY_LGF = 0xe314,
198 RXY_LGH = 0xe315,
199 RXY_LHY = 0xe378,
200 RXY_LLGC = 0xe390,
201 RXY_LLGF = 0xe316,
202 RXY_LLGH = 0xe391,
203 RXY_LMG = 0xeb04,
204 RXY_LRV = 0xe31e,
205 RXY_LRVG = 0xe30f,
206 RXY_LRVH = 0xe31f,
207 RXY_LY = 0xe358,
208 RXY_STCY = 0xe372,
209 RXY_STG = 0xe324,
210 RXY_STHY = 0xe370,
211 RXY_STMG = 0xeb24,
212 RXY_STRV = 0xe33e,
213 RXY_STRVG = 0xe32f,
214 RXY_STRVH = 0xe33f,
215 RXY_STY = 0xe350,
216
217 RX_A = 0x5a,
218 RX_C = 0x59,
219 RX_L = 0x58,
0db921e6 220 RX_LA = 0x41,
48bb3750
RH
221 RX_LH = 0x48,
222 RX_ST = 0x50,
223 RX_STC = 0x42,
224 RX_STH = 0x40,
225} S390Opcode;
226
227#define LD_SIGNED 0x04
228#define LD_UINT8 0x00
229#define LD_INT8 (LD_UINT8 | LD_SIGNED)
230#define LD_UINT16 0x01
231#define LD_INT16 (LD_UINT16 | LD_SIGNED)
232#define LD_UINT32 0x02
233#define LD_INT32 (LD_UINT32 | LD_SIGNED)
234#define LD_UINT64 0x03
235#define LD_INT64 (LD_UINT64 | LD_SIGNED)
236
237#ifndef NDEBUG
238static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
239 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
240 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
241};
242#endif
243
244/* Since R6 is a potential argument register, choose it last of the
245 call-saved registers. Likewise prefer the call-clobbered registers
246 in reverse order to maximize the chance of avoiding the arguments. */
2827822e 247static const int tcg_target_reg_alloc_order[] = {
48bb3750
RH
248 TCG_REG_R13,
249 TCG_REG_R12,
250 TCG_REG_R11,
251 TCG_REG_R10,
252 TCG_REG_R9,
253 TCG_REG_R8,
254 TCG_REG_R7,
255 TCG_REG_R6,
256 TCG_REG_R14,
257 TCG_REG_R0,
258 TCG_REG_R1,
259 TCG_REG_R5,
260 TCG_REG_R4,
261 TCG_REG_R3,
262 TCG_REG_R2,
2827822e
AG
263};
264
265static const int tcg_target_call_iarg_regs[] = {
48bb3750
RH
266 TCG_REG_R2,
267 TCG_REG_R3,
268 TCG_REG_R4,
269 TCG_REG_R5,
270 TCG_REG_R6,
2827822e
AG
271};
272
273static const int tcg_target_call_oarg_regs[] = {
48bb3750 274 TCG_REG_R2,
48bb3750
RH
275};
276
277#define S390_CC_EQ 8
278#define S390_CC_LT 4
279#define S390_CC_GT 2
280#define S390_CC_OV 1
281#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
282#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
283#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
284#define S390_CC_NEVER 0
285#define S390_CC_ALWAYS 15
286
287/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
0aed257f 288static const uint8_t tcg_cond_to_s390_cond[] = {
48bb3750
RH
289 [TCG_COND_EQ] = S390_CC_EQ,
290 [TCG_COND_NE] = S390_CC_NE,
291 [TCG_COND_LT] = S390_CC_LT,
292 [TCG_COND_LE] = S390_CC_LE,
293 [TCG_COND_GT] = S390_CC_GT,
294 [TCG_COND_GE] = S390_CC_GE,
295 [TCG_COND_LTU] = S390_CC_LT,
296 [TCG_COND_LEU] = S390_CC_LE,
297 [TCG_COND_GTU] = S390_CC_GT,
298 [TCG_COND_GEU] = S390_CC_GE,
299};
300
301/* Condition codes that result from a LOAD AND TEST. Here, we have no
302 unsigned instruction variation, however since the test is vs zero we
303 can re-map the outcomes appropriately. */
0aed257f 304static const uint8_t tcg_cond_to_ltr_cond[] = {
48bb3750
RH
305 [TCG_COND_EQ] = S390_CC_EQ,
306 [TCG_COND_NE] = S390_CC_NE,
307 [TCG_COND_LT] = S390_CC_LT,
308 [TCG_COND_LE] = S390_CC_LE,
309 [TCG_COND_GT] = S390_CC_GT,
310 [TCG_COND_GE] = S390_CC_GE,
311 [TCG_COND_LTU] = S390_CC_NEVER,
312 [TCG_COND_LEU] = S390_CC_EQ,
313 [TCG_COND_GTU] = S390_CC_NE,
314 [TCG_COND_GEU] = S390_CC_ALWAYS,
315};
316
317#ifdef CONFIG_SOFTMMU
318
022c62cb 319#include "exec/softmmu_defs.h"
48bb3750 320
e141ab52
BS
321/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
322 int mmu_idx) */
323static const void * const qemu_ld_helpers[4] = {
324 helper_ldb_mmu,
325 helper_ldw_mmu,
326 helper_ldl_mmu,
327 helper_ldq_mmu,
328};
329
330/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
331 uintxx_t val, int mmu_idx) */
332static const void * const qemu_st_helpers[4] = {
333 helper_stb_mmu,
334 helper_stw_mmu,
335 helper_stl_mmu,
336 helper_stq_mmu,
337};
e141ab52 338#endif
48bb3750
RH
339
340static uint8_t *tb_ret_addr;
341
342/* A list of relevant facilities used by this translator. Some of these
343 are required for proper operation, and these are checked at startup. */
344
345#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
346#define FACILITY_LONG_DISP (1ULL << (63 - 18))
347#define FACILITY_EXT_IMM (1ULL << (63 - 21))
348#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
96a9f093 349#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
48bb3750
RH
350
351static uint64_t facilities;
2827822e
AG
352
353static void patch_reloc(uint8_t *code_ptr, int type,
48bb3750 354 tcg_target_long value, tcg_target_long addend)
2827822e 355{
48bb3750
RH
356 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
357 tcg_target_long pcrel2;
358
359 /* ??? Not the usual definition of "addend". */
360 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
361
362 switch (type) {
363 case R_390_PC16DBL:
364 assert(pcrel2 == (int16_t)pcrel2);
365 *(int16_t *)code_ptr = pcrel2;
366 break;
367 case R_390_PC32DBL:
368 assert(pcrel2 == (int32_t)pcrel2);
369 *(int32_t *)code_ptr = pcrel2;
370 break;
371 default:
372 tcg_abort();
373 break;
374 }
2827822e
AG
375}
376
2827822e
AG
377/* parse target specific constraints */
378static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
379{
48bb3750
RH
380 const char *ct_str = *pct_str;
381
382 switch (ct_str[0]) {
383 case 'r': /* all registers */
384 ct->ct |= TCG_CT_REG;
385 tcg_regset_set32(ct->u.regs, 0, 0xffff);
386 break;
387 case 'R': /* not R0 */
388 ct->ct |= TCG_CT_REG;
389 tcg_regset_set32(ct->u.regs, 0, 0xffff);
390 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
391 break;
392 case 'L': /* qemu_ld/st constraint */
393 ct->ct |= TCG_CT_REG;
394 tcg_regset_set32(ct->u.regs, 0, 0xffff);
395 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
396 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
65a62a75 397 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
48bb3750
RH
398 break;
399 case 'a': /* force R2 for division */
400 ct->ct |= TCG_CT_REG;
401 tcg_regset_clear(ct->u.regs);
402 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
403 break;
404 case 'b': /* force R3 for division */
405 ct->ct |= TCG_CT_REG;
406 tcg_regset_clear(ct->u.regs);
407 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
408 break;
48bb3750
RH
409 case 'W': /* force 32-bit ("word") immediate */
410 ct->ct |= TCG_CT_CONST_32;
411 break;
48bb3750
RH
412 case 'K':
413 ct->ct |= TCG_CT_CONST_MULI;
414 break;
48bb3750
RH
415 case 'O':
416 ct->ct |= TCG_CT_CONST_ORI;
417 break;
418 case 'X':
419 ct->ct |= TCG_CT_CONST_XORI;
420 break;
421 case 'C':
422 ct->ct |= TCG_CT_CONST_CMPI;
423 break;
424 default:
425 return -1;
426 }
427 ct_str++;
428 *pct_str = ct_str;
429
2827822e
AG
430 return 0;
431}
432
48bb3750
RH
433/* Immediates to be used with logical OR. This is an optimization only,
434 since a full 64-bit immediate OR can always be performed with 4 sequential
435 OI[LH][LH] instructions. What we're looking for is immediates that we
436 can load efficiently, and the immediate load plus the reg-reg OR is
437 smaller than the sequential OI's. */
438
439static int tcg_match_ori(int ct, tcg_target_long val)
440{
441 if (facilities & FACILITY_EXT_IMM) {
442 if (ct & TCG_CT_CONST_32) {
443 /* All 32-bit ORs can be performed with 1 48-bit insn. */
444 return 1;
445 }
446 }
447
448 /* Look for negative values. These are best to load with LGHI. */
449 if (val < 0) {
450 if (val == (int16_t)val) {
451 return 0;
452 }
453 if (facilities & FACILITY_EXT_IMM) {
454 if (val == (int32_t)val) {
455 return 0;
456 }
457 }
458 }
459
460 return 1;
461}
462
463/* Immediates to be used with logical XOR. This is almost, but not quite,
464 only an optimization. XOR with immediate is only supported with the
465 extended-immediate facility. That said, there are a few patterns for
466 which it is better to load the value into a register first. */
467
468static int tcg_match_xori(int ct, tcg_target_long val)
469{
470 if ((facilities & FACILITY_EXT_IMM) == 0) {
471 return 0;
472 }
473
474 if (ct & TCG_CT_CONST_32) {
475 /* All 32-bit XORs can be performed with 1 48-bit insn. */
476 return 1;
477 }
478
479 /* Look for negative values. These are best to load with LGHI. */
480 if (val < 0 && val == (int32_t)val) {
481 return 0;
482 }
483
484 return 1;
485}
486
487/* Imediates to be used with comparisons. */
488
489static int tcg_match_cmpi(int ct, tcg_target_long val)
490{
491 if (facilities & FACILITY_EXT_IMM) {
492 /* The COMPARE IMMEDIATE instruction is available. */
493 if (ct & TCG_CT_CONST_32) {
494 /* We have a 32-bit immediate and can compare against anything. */
495 return 1;
496 } else {
497 /* ??? We have no insight here into whether the comparison is
498 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
499 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
500 a 32-bit unsigned immediate. If we were to use the (semi)
501 obvious "val == (int32_t)val" we would be enabling unsigned
502 comparisons vs very large numbers. The only solution is to
503 take the intersection of the ranges. */
504 /* ??? Another possible solution is to simply lie and allow all
505 constants here and force the out-of-range values into a temp
506 register in tgen_cmp when we have knowledge of the actual
507 comparison code in use. */
508 return val >= 0 && val <= 0x7fffffff;
509 }
510 } else {
511 /* Only the LOAD AND TEST instruction is available. */
512 return val == 0;
513 }
514}
515
2827822e 516/* Test if a constant matches the constraint. */
48bb3750
RH
517static int tcg_target_const_match(tcg_target_long val,
518 const TCGArgConstraint *arg_ct)
2827822e 519{
48bb3750
RH
520 int ct = arg_ct->ct;
521
522 if (ct & TCG_CT_CONST) {
523 return 1;
524 }
525
526 /* Handle the modifiers. */
48bb3750
RH
527 if (ct & TCG_CT_CONST_32) {
528 val = (int32_t)val;
529 }
530
531 /* The following are mutually exclusive. */
0db921e6 532 if (ct & TCG_CT_CONST_MULI) {
48bb3750
RH
533 /* Immediates that may be used with multiply. If we have the
534 general-instruction-extensions, then we have MULTIPLY SINGLE
535 IMMEDIATE with a signed 32-bit, otherwise we have only
536 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
537 if (facilities & FACILITY_GEN_INST_EXT) {
538 return val == (int32_t)val;
539 } else {
540 return val == (int16_t)val;
541 }
48bb3750
RH
542 } else if (ct & TCG_CT_CONST_ORI) {
543 return tcg_match_ori(ct, val);
544 } else if (ct & TCG_CT_CONST_XORI) {
545 return tcg_match_xori(ct, val);
546 } else if (ct & TCG_CT_CONST_CMPI) {
547 return tcg_match_cmpi(ct, val);
548 }
549
2827822e
AG
550 return 0;
551}
552
48bb3750
RH
553/* Emit instructions according to the given instruction format. */
554
555static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
556{
557 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
558}
559
560static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
561 TCGReg r1, TCGReg r2)
562{
563 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
564}
565
96a9f093
RH
566static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
567 TCGReg r1, TCGReg r2, int m3)
568{
569 tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
570}
571
48bb3750
RH
572static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
573{
574 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
575}
576
577static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
578{
579 tcg_out16(s, op | (r1 << 4));
580 tcg_out32(s, i2);
581}
582
583static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
584 TCGReg b2, TCGReg r3, int disp)
585{
586 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
587 | (disp & 0xfff));
588}
589
590static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
591 TCGReg b2, TCGReg r3, int disp)
592{
593 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
594 tcg_out32(s, (op & 0xff) | (b2 << 28)
595 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
596}
597
598#define tcg_out_insn_RX tcg_out_insn_RS
599#define tcg_out_insn_RXY tcg_out_insn_RSY
600
601/* Emit an opcode with "type-checking" of the format. */
602#define tcg_out_insn(S, FMT, OP, ...) \
603 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
604
605
606/* emit 64-bit shifts */
607static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
608 TCGReg src, TCGReg sh_reg, int sh_imm)
609{
610 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
611}
612
613/* emit 32-bit shifts */
614static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
615 TCGReg sh_reg, int sh_imm)
616{
617 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
618}
619
620static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
621{
622 if (src != dst) {
623 if (type == TCG_TYPE_I32) {
624 tcg_out_insn(s, RR, LR, dst, src);
625 } else {
626 tcg_out_insn(s, RRE, LGR, dst, src);
627 }
628 }
629}
630
2827822e 631/* load a register with an immediate value */
48bb3750
RH
632static void tcg_out_movi(TCGContext *s, TCGType type,
633 TCGReg ret, tcg_target_long sval)
2827822e 634{
48bb3750
RH
635 static const S390Opcode lli_insns[4] = {
636 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
637 };
638
639 tcg_target_ulong uval = sval;
640 int i;
641
642 if (type == TCG_TYPE_I32) {
643 uval = (uint32_t)sval;
644 sval = (int32_t)sval;
645 }
646
647 /* Try all 32-bit insns that can load it in one go. */
648 if (sval >= -0x8000 && sval < 0x8000) {
649 tcg_out_insn(s, RI, LGHI, ret, sval);
650 return;
651 }
652
653 for (i = 0; i < 4; i++) {
654 tcg_target_long mask = 0xffffull << i*16;
655 if ((uval & mask) == uval) {
656 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
657 return;
658 }
659 }
660
661 /* Try all 48-bit insns that can load it in one go. */
662 if (facilities & FACILITY_EXT_IMM) {
663 if (sval == (int32_t)sval) {
664 tcg_out_insn(s, RIL, LGFI, ret, sval);
665 return;
666 }
667 if (uval <= 0xffffffff) {
668 tcg_out_insn(s, RIL, LLILF, ret, uval);
669 return;
670 }
671 if ((uval & 0xffffffff) == 0) {
672 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
673 return;
674 }
675 }
676
677 /* Try for PC-relative address load. */
678 if ((sval & 1) == 0) {
679 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
680 if (off == (int32_t)off) {
681 tcg_out_insn(s, RIL, LARL, ret, off);
682 return;
683 }
684 }
685
686 /* If extended immediates are not present, then we may have to issue
687 several instructions to load the low 32 bits. */
688 if (!(facilities & FACILITY_EXT_IMM)) {
689 /* A 32-bit unsigned value can be loaded in 2 insns. And given
690 that the lli_insns loop above did not succeed, we know that
691 both insns are required. */
692 if (uval <= 0xffffffff) {
693 tcg_out_insn(s, RI, LLILL, ret, uval);
694 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
695 return;
696 }
697
698 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
699 We first want to make sure that all the high bits get set. With
700 luck the low 16-bits can be considered negative to perform that for
701 free, otherwise we load an explicit -1. */
702 if (sval >> 31 >> 1 == -1) {
703 if (uval & 0x8000) {
704 tcg_out_insn(s, RI, LGHI, ret, uval);
705 } else {
706 tcg_out_insn(s, RI, LGHI, ret, -1);
707 tcg_out_insn(s, RI, IILL, ret, uval);
708 }
709 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
710 return;
711 }
712 }
713
714 /* If we get here, both the high and low parts have non-zero bits. */
715
716 /* Recurse to load the lower 32-bits. */
a22971f9 717 tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
48bb3750
RH
718
719 /* Insert data into the high 32-bits. */
720 uval = uval >> 31 >> 1;
721 if (facilities & FACILITY_EXT_IMM) {
722 if (uval < 0x10000) {
723 tcg_out_insn(s, RI, IIHL, ret, uval);
724 } else if ((uval & 0xffff) == 0) {
725 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
726 } else {
727 tcg_out_insn(s, RIL, IIHF, ret, uval);
728 }
729 } else {
730 if (uval & 0xffff) {
731 tcg_out_insn(s, RI, IIHL, ret, uval);
732 }
733 if (uval & 0xffff0000) {
734 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
735 }
736 }
737}
738
739
740/* Emit a load/store type instruction. Inputs are:
741 DATA: The register to be loaded or stored.
742 BASE+OFS: The effective address.
743 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
744 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
745
746static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
747 TCGReg data, TCGReg base, TCGReg index,
748 tcg_target_long ofs)
749{
750 if (ofs < -0x80000 || ofs >= 0x80000) {
751 /* Combine the low 16 bits of the offset with the actual load insn;
752 the high 48 bits must come from an immediate load. */
753 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
754 ofs &= 0xffff;
755
756 /* If we were already given an index register, add it in. */
757 if (index != TCG_REG_NONE) {
758 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
759 }
760 index = TCG_TMP0;
761 }
762
763 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
764 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
765 } else {
766 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
767 }
2827822e
AG
768}
769
48bb3750 770
2827822e 771/* load data without address translation or endianness conversion */
48bb3750
RH
772static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
773 TCGReg base, tcg_target_long ofs)
2827822e 774{
48bb3750
RH
775 if (type == TCG_TYPE_I32) {
776 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
777 } else {
778 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
779 }
2827822e
AG
780}
781
48bb3750
RH
782static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
783 TCGReg base, tcg_target_long ofs)
2827822e 784{
48bb3750
RH
785 if (type == TCG_TYPE_I32) {
786 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
787 } else {
788 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
789 }
790}
791
792/* load data from an absolute host address */
793static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
794{
795 tcg_target_long addr = (tcg_target_long)abs;
796
797 if (facilities & FACILITY_GEN_INST_EXT) {
798 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
799 if (disp == (int32_t)disp) {
800 if (type == TCG_TYPE_I32) {
801 tcg_out_insn(s, RIL, LRL, dest, disp);
802 } else {
803 tcg_out_insn(s, RIL, LGRL, dest, disp);
804 }
805 return;
806 }
807 }
808
809 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
810 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
811}
812
f0bffc27
RH
813static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
814 int msb, int lsb, int ofs, int z)
815{
816 /* Format RIE-f */
817 tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
818 tcg_out16(s, (msb << 8) | (z << 7) | lsb);
819 tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
820}
821
48bb3750
RH
822static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
823{
824 if (facilities & FACILITY_EXT_IMM) {
825 tcg_out_insn(s, RRE, LGBR, dest, src);
826 return;
827 }
828
829 if (type == TCG_TYPE_I32) {
830 if (dest == src) {
831 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
832 } else {
833 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
834 }
835 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
836 } else {
837 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
838 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
839 }
840}
841
842static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
843{
844 if (facilities & FACILITY_EXT_IMM) {
845 tcg_out_insn(s, RRE, LLGCR, dest, src);
846 return;
847 }
848
849 if (dest == src) {
850 tcg_out_movi(s, type, TCG_TMP0, 0xff);
851 src = TCG_TMP0;
852 } else {
853 tcg_out_movi(s, type, dest, 0xff);
854 }
855 if (type == TCG_TYPE_I32) {
856 tcg_out_insn(s, RR, NR, dest, src);
857 } else {
858 tcg_out_insn(s, RRE, NGR, dest, src);
859 }
860}
861
862static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
863{
864 if (facilities & FACILITY_EXT_IMM) {
865 tcg_out_insn(s, RRE, LGHR, dest, src);
866 return;
867 }
868
869 if (type == TCG_TYPE_I32) {
870 if (dest == src) {
871 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
872 } else {
873 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
874 }
875 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
876 } else {
877 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
878 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
879 }
880}
881
882static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
883{
884 if (facilities & FACILITY_EXT_IMM) {
885 tcg_out_insn(s, RRE, LLGHR, dest, src);
886 return;
887 }
888
889 if (dest == src) {
890 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
891 src = TCG_TMP0;
892 } else {
893 tcg_out_movi(s, type, dest, 0xffff);
894 }
895 if (type == TCG_TYPE_I32) {
896 tcg_out_insn(s, RR, NR, dest, src);
897 } else {
898 tcg_out_insn(s, RRE, NGR, dest, src);
899 }
900}
901
902static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
903{
904 tcg_out_insn(s, RRE, LGFR, dest, src);
905}
906
907static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
908{
909 tcg_out_insn(s, RRE, LLGFR, dest, src);
910}
911
f0bffc27
RH
912/* Accept bit patterns like these:
913 0....01....1
914 1....10....0
915 1..10..01..1
916 0..01..10..0
917 Copied from gcc sources. */
918static inline bool risbg_mask(uint64_t c)
919{
920 uint64_t lsb;
921 /* We don't change the number of transitions by inverting,
922 so make sure we start with the LSB zero. */
923 if (c & 1) {
924 c = ~c;
925 }
926 /* Reject all zeros or all ones. */
927 if (c == 0) {
928 return false;
929 }
930 /* Find the first transition. */
931 lsb = c & -c;
932 /* Invert to look for a second transition. */
933 c = ~c;
934 /* Erase the first transition. */
935 c &= -lsb;
936 /* Find the second transition, if any. */
937 lsb = c & -c;
938 /* Match if all the bits are 1's, or if c is zero. */
939 return c == -lsb;
940}
941
07ff7983 942static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
48bb3750
RH
943{
944 static const S390Opcode ni_insns[4] = {
945 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
946 };
947 static const S390Opcode nif_insns[2] = {
948 RIL_NILF, RIL_NIHF
949 };
07ff7983 950 uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
48bb3750
RH
951 int i;
952
48bb3750 953 /* Look for the zero-extensions. */
07ff7983 954 if ((val & valid) == 0xffffffff) {
48bb3750
RH
955 tgen_ext32u(s, dest, dest);
956 return;
957 }
48bb3750 958 if (facilities & FACILITY_EXT_IMM) {
07ff7983 959 if ((val & valid) == 0xff) {
48bb3750
RH
960 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
961 return;
962 }
07ff7983 963 if ((val & valid) == 0xffff) {
48bb3750
RH
964 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
965 return;
966 }
07ff7983 967 }
48bb3750 968
07ff7983
RH
969 /* Try all 32-bit insns that can perform it in one go. */
970 for (i = 0; i < 4; i++) {
971 tcg_target_ulong mask = ~(0xffffull << i*16);
972 if (((val | ~valid) & mask) == mask) {
973 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
974 return;
48bb3750 975 }
07ff7983 976 }
48bb3750 977
07ff7983
RH
978 /* Try all 48-bit insns that can perform it in one go. */
979 if (facilities & FACILITY_EXT_IMM) {
980 for (i = 0; i < 2; i++) {
981 tcg_target_ulong mask = ~(0xffffffffull << i*32);
982 if (((val | ~valid) & mask) == mask) {
983 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
984 return;
48bb3750
RH
985 }
986 }
07ff7983 987 }
f0bffc27
RH
988 if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
989 int msb, lsb;
990 if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
991 /* Achieve wraparound by swapping msb and lsb. */
992 msb = 63 - ctz64(~val);
993 lsb = clz64(~val) + 1;
994 } else {
995 msb = clz64(val);
996 lsb = 63 - ctz64(val);
997 }
998 tcg_out_risbg(s, dest, dest, msb, lsb, 0, 1);
999 return;
1000 }
48bb3750 1001
07ff7983
RH
1002 /* Fall back to loading the constant. */
1003 tcg_out_movi(s, type, TCG_TMP0, val);
1004 if (type == TCG_TYPE_I32) {
1005 tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
48bb3750 1006 } else {
07ff7983 1007 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
48bb3750
RH
1008 }
1009}
1010
1011static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1012{
1013 static const S390Opcode oi_insns[4] = {
1014 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1015 };
1016 static const S390Opcode nif_insns[2] = {
1017 RIL_OILF, RIL_OIHF
1018 };
1019
1020 int i;
1021
1022 /* Look for no-op. */
1023 if (val == 0) {
1024 return;
1025 }
1026
1027 if (facilities & FACILITY_EXT_IMM) {
1028 /* Try all 32-bit insns that can perform it in one go. */
1029 for (i = 0; i < 4; i++) {
1030 tcg_target_ulong mask = (0xffffull << i*16);
1031 if ((val & mask) != 0 && (val & ~mask) == 0) {
1032 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1033 return;
1034 }
1035 }
1036
1037 /* Try all 48-bit insns that can perform it in one go. */
1038 for (i = 0; i < 2; i++) {
1039 tcg_target_ulong mask = (0xffffffffull << i*32);
1040 if ((val & mask) != 0 && (val & ~mask) == 0) {
1041 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1042 return;
1043 }
1044 }
1045
1046 /* Perform the OR via sequential modifications to the high and
1047 low parts. Do this via recursion to handle 16-bit vs 32-bit
1048 masks in each half. */
1049 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1050 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1051 } else {
1052 /* With no extended-immediate facility, we don't need to be so
1053 clever. Just iterate over the insns and mask in the constant. */
1054 for (i = 0; i < 4; i++) {
1055 tcg_target_ulong mask = (0xffffull << i*16);
1056 if ((val & mask) != 0) {
1057 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1058 }
1059 }
1060 }
1061}
1062
1063static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1064{
1065 /* Perform the xor by parts. */
1066 if (val & 0xffffffff) {
1067 tcg_out_insn(s, RIL, XILF, dest, val);
1068 }
1069 if (val > 0xffffffff) {
1070 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1071 }
1072}
1073
1074static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1075 TCGArg c2, int c2const)
1076{
bcc66562 1077 bool is_unsigned = is_unsigned_cond(c);
48bb3750
RH
1078 if (c2const) {
1079 if (c2 == 0) {
1080 if (type == TCG_TYPE_I32) {
1081 tcg_out_insn(s, RR, LTR, r1, r1);
1082 } else {
1083 tcg_out_insn(s, RRE, LTGR, r1, r1);
1084 }
1085 return tcg_cond_to_ltr_cond[c];
1086 } else {
1087 if (is_unsigned) {
1088 if (type == TCG_TYPE_I32) {
1089 tcg_out_insn(s, RIL, CLFI, r1, c2);
1090 } else {
1091 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1092 }
1093 } else {
1094 if (type == TCG_TYPE_I32) {
1095 tcg_out_insn(s, RIL, CFI, r1, c2);
1096 } else {
1097 tcg_out_insn(s, RIL, CGFI, r1, c2);
1098 }
1099 }
1100 }
1101 } else {
1102 if (is_unsigned) {
1103 if (type == TCG_TYPE_I32) {
1104 tcg_out_insn(s, RR, CLR, r1, c2);
1105 } else {
1106 tcg_out_insn(s, RRE, CLGR, r1, c2);
1107 }
1108 } else {
1109 if (type == TCG_TYPE_I32) {
1110 tcg_out_insn(s, RR, CR, r1, c2);
1111 } else {
1112 tcg_out_insn(s, RRE, CGR, r1, c2);
1113 }
1114 }
1115 }
1116 return tcg_cond_to_s390_cond[c];
1117}
1118
1119static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
96a9f093 1120 TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
48bb3750 1121{
96a9f093 1122 int cc = tgen_cmp(s, type, c, c1, c2, c2const);
48bb3750
RH
1123
1124 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1125 tcg_out_movi(s, type, dest, 1);
1126 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1127 tcg_out_movi(s, type, dest, 0);
1128}
1129
96a9f093
RH
1130static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
1131 TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
1132{
1133 int cc;
1134 if (facilities & FACILITY_LOAD_ON_COND) {
1135 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1136 tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
1137 } else {
1138 c = tcg_invert_cond(c);
1139 cc = tgen_cmp(s, type, c, c1, c2, c2const);
1140
1141 /* Emit: if (cc) goto over; dest = r3; over: */
1142 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1143 tcg_out_insn(s, RRE, LGR, dest, r3);
1144 }
1145}
1146
d5690ea4
RH
1147bool tcg_target_deposit_valid(int ofs, int len)
1148{
1149 return (facilities & FACILITY_GEN_INST_EXT) != 0;
1150}
1151
1152static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
1153 int ofs, int len)
1154{
1155 int lsb = (63 - ofs);
1156 int msb = lsb - (len - 1);
f0bffc27 1157 tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
d5690ea4
RH
1158}
1159
48bb3750
RH
1160static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1161{
1162 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1163 if (off > -0x8000 && off < 0x7fff) {
1164 tcg_out_insn(s, RI, BRC, cc, off);
1165 } else if (off == (int32_t)off) {
1166 tcg_out_insn(s, RIL, BRCL, cc, off);
1167 } else {
1168 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1169 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1170 }
1171}
1172
1173static void tgen_branch(TCGContext *s, int cc, int labelno)
1174{
1175 TCGLabel* l = &s->labels[labelno];
1176 if (l->has_value) {
1177 tgen_gotoi(s, cc, l->u.value);
1178 } else if (USE_LONG_BRANCHES) {
1179 tcg_out16(s, RIL_BRCL | (cc << 4));
1180 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1181 s->code_ptr += 4;
1182 } else {
1183 tcg_out16(s, RI_BRC | (cc << 4));
1184 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1185 s->code_ptr += 2;
1186 }
1187}
1188
1189static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1190 TCGReg r1, TCGReg r2, int labelno)
1191{
1192 TCGLabel* l = &s->labels[labelno];
1193 tcg_target_long off;
1194
1195 if (l->has_value) {
1196 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1197 } else {
1198 /* We need to keep the offset unchanged for retranslation. */
1199 off = ((int16_t *)s->code_ptr)[1];
1200 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1201 }
1202
1203 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1204 tcg_out16(s, off);
1205 tcg_out16(s, cc << 12 | (opc & 0xff));
1206}
1207
1208static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1209 TCGReg r1, int i2, int labelno)
1210{
1211 TCGLabel* l = &s->labels[labelno];
1212 tcg_target_long off;
1213
1214 if (l->has_value) {
1215 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1216 } else {
1217 /* We need to keep the offset unchanged for retranslation. */
1218 off = ((int16_t *)s->code_ptr)[1];
1219 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1220 }
1221
1222 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1223 tcg_out16(s, off);
1224 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1225}
1226
1227static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1228 TCGReg r1, TCGArg c2, int c2const, int labelno)
1229{
1230 int cc;
1231
1232 if (facilities & FACILITY_GEN_INST_EXT) {
1233 bool is_unsigned = (c > TCG_COND_GT);
1234 bool in_range;
1235 S390Opcode opc;
1236
1237 cc = tcg_cond_to_s390_cond[c];
1238
1239 if (!c2const) {
1240 opc = (type == TCG_TYPE_I32
1241 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1242 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1243 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1244 return;
1245 }
1246
1247 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1248 If the immediate we've been given does not fit that range, we'll
1249 fall back to separate compare and branch instructions using the
1250 larger comparison range afforded by COMPARE IMMEDIATE. */
1251 if (type == TCG_TYPE_I32) {
1252 if (is_unsigned) {
1253 opc = RIE_CLIJ;
1254 in_range = (uint32_t)c2 == (uint8_t)c2;
1255 } else {
1256 opc = RIE_CIJ;
1257 in_range = (int32_t)c2 == (int8_t)c2;
1258 }
1259 } else {
1260 if (is_unsigned) {
1261 opc = RIE_CLGIJ;
1262 in_range = (uint64_t)c2 == (uint8_t)c2;
1263 } else {
1264 opc = RIE_CGIJ;
1265 in_range = (int64_t)c2 == (int8_t)c2;
1266 }
1267 }
1268 if (in_range) {
1269 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1270 return;
1271 }
1272 }
1273
1274 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1275 tgen_branch(s, cc, labelno);
1276}
1277
1278static void tgen_calli(TCGContext *s, tcg_target_long dest)
1279{
1280 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1281 if (off == (int32_t)off) {
1282 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1283 } else {
1284 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1285 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1286 }
1287}
1288
1289static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1290 TCGReg base, TCGReg index, int disp)
1291{
1292#ifdef TARGET_WORDS_BIGENDIAN
1293 const int bswap = 0;
1294#else
1295 const int bswap = 1;
1296#endif
1297 switch (opc) {
1298 case LD_UINT8:
1299 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1300 break;
1301 case LD_INT8:
1302 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1303 break;
1304 case LD_UINT16:
1305 if (bswap) {
1306 /* swapped unsigned halfword load with upper bits zeroed */
1307 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1308 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1309 } else {
1310 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1311 }
1312 break;
1313 case LD_INT16:
1314 if (bswap) {
1315 /* swapped sign-extended halfword load */
1316 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1317 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1318 } else {
1319 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1320 }
1321 break;
1322 case LD_UINT32:
1323 if (bswap) {
1324 /* swapped unsigned int load with upper bits zeroed */
1325 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1326 tgen_ext32u(s, data, data);
1327 } else {
1328 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1329 }
1330 break;
1331 case LD_INT32:
1332 if (bswap) {
1333 /* swapped sign-extended int load */
1334 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1335 tgen_ext32s(s, data, data);
1336 } else {
1337 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1338 }
1339 break;
1340 case LD_UINT64:
1341 if (bswap) {
1342 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1343 } else {
1344 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1345 }
1346 break;
1347 default:
1348 tcg_abort();
1349 }
1350}
1351
1352static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1353 TCGReg base, TCGReg index, int disp)
1354{
1355#ifdef TARGET_WORDS_BIGENDIAN
1356 const int bswap = 0;
1357#else
1358 const int bswap = 1;
1359#endif
1360 switch (opc) {
1361 case LD_UINT8:
1362 if (disp >= 0 && disp < 0x1000) {
1363 tcg_out_insn(s, RX, STC, data, base, index, disp);
1364 } else {
1365 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1366 }
1367 break;
1368 case LD_UINT16:
1369 if (bswap) {
1370 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1371 } else if (disp >= 0 && disp < 0x1000) {
1372 tcg_out_insn(s, RX, STH, data, base, index, disp);
1373 } else {
1374 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1375 }
1376 break;
1377 case LD_UINT32:
1378 if (bswap) {
1379 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1380 } else if (disp >= 0 && disp < 0x1000) {
1381 tcg_out_insn(s, RX, ST, data, base, index, disp);
1382 } else {
1383 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1384 }
1385 break;
1386 case LD_UINT64:
1387 if (bswap) {
1388 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1389 } else {
1390 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1391 }
1392 break;
1393 default:
1394 tcg_abort();
1395 }
1396}
1397
1398#if defined(CONFIG_SOFTMMU)
65a62a75
RH
1399static TCGReg tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1400 TCGReg addr_reg, int mem_index, int opc,
1401 uint16_t **label2_ptr_p, int is_store)
48bb3750 1402{
65a62a75
RH
1403 const TCGReg arg0 = tcg_target_call_iarg_regs[0];
1404 const TCGReg arg1 = tcg_target_call_iarg_regs[1];
1405 const TCGReg arg2 = tcg_target_call_iarg_regs[2];
1406 const TCGReg arg3 = tcg_target_call_iarg_regs[3];
48bb3750
RH
1407 int s_bits = opc & 3;
1408 uint16_t *label1_ptr;
1409 tcg_target_long ofs;
1410
1411 if (TARGET_LONG_BITS == 32) {
65a62a75 1412 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1413 } else {
65a62a75 1414 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1415 }
1416
65a62a75 1417 tcg_out_sh64(s, RSY_SRLG, arg2, addr_reg, TCG_REG_NONE,
48bb3750
RH
1418 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1419
65a62a75
RH
1420 tgen_andi(s, TCG_TYPE_I64, arg1, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1421 tgen_andi(s, TCG_TYPE_I64, arg2, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
48bb3750
RH
1422
1423 if (is_store) {
9349b4f9 1424 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
48bb3750 1425 } else {
9349b4f9 1426 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
48bb3750
RH
1427 }
1428 assert(ofs < 0x80000);
1429
1430 if (TARGET_LONG_BITS == 32) {
65a62a75 1431 tcg_out_mem(s, RX_C, RXY_CY, arg1, arg2, TCG_AREG0, ofs);
48bb3750 1432 } else {
65a62a75 1433 tcg_out_mem(s, 0, RXY_CG, arg1, arg2, TCG_AREG0, ofs);
48bb3750
RH
1434 }
1435
1436 if (TARGET_LONG_BITS == 32) {
65a62a75 1437 tgen_ext32u(s, arg1, addr_reg);
48bb3750 1438 } else {
65a62a75 1439 tcg_out_mov(s, TCG_TYPE_I64, arg1, addr_reg);
48bb3750
RH
1440 }
1441
1442 label1_ptr = (uint16_t*)s->code_ptr;
1443
1444 /* je label1 (offset will be patched in later) */
1445 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1446
1447 /* call load/store helper */
1448 if (is_store) {
1449 /* Make sure to zero-extend the value to the full register
1450 for the calling convention. */
1451 switch (opc) {
1452 case LD_UINT8:
65a62a75 1453 tgen_ext8u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1454 break;
1455 case LD_UINT16:
65a62a75 1456 tgen_ext16u(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1457 break;
1458 case LD_UINT32:
65a62a75 1459 tgen_ext32u(s, arg2, data_reg);
48bb3750
RH
1460 break;
1461 case LD_UINT64:
65a62a75 1462 tcg_out_mov(s, TCG_TYPE_I64, arg2, data_reg);
48bb3750
RH
1463 break;
1464 default:
1465 tcg_abort();
1466 }
65a62a75
RH
1467 tcg_out_movi(s, TCG_TYPE_I32, arg3, mem_index);
1468 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1469 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1470 } else {
65a62a75
RH
1471 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1472 tcg_out_mov(s, TCG_TYPE_I64, arg0, TCG_AREG0);
48bb3750
RH
1473 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1474
1475 /* sign extension */
1476 switch (opc) {
1477 case LD_INT8:
65a62a75 1478 tgen_ext8s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1479 break;
1480 case LD_INT16:
65a62a75 1481 tgen_ext16s(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1482 break;
1483 case LD_INT32:
65a62a75 1484 tgen_ext32s(s, data_reg, TCG_REG_R2);
48bb3750
RH
1485 break;
1486 default:
1487 /* unsigned -> just copy */
65a62a75 1488 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
48bb3750
RH
1489 break;
1490 }
1491 }
1492
1493 /* jump to label2 (end) */
1494 *label2_ptr_p = (uint16_t*)s->code_ptr;
1495
1496 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1497
1498 /* this is label1, patch branch */
1499 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1500 (unsigned long)label1_ptr) >> 1;
1501
9349b4f9 1502 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
48bb3750
RH
1503 assert(ofs < 0x80000);
1504
65a62a75
RH
1505 tcg_out_mem(s, 0, RXY_AG, arg1, arg2, TCG_AREG0, ofs);
1506
1507 return arg1;
48bb3750
RH
1508}
1509
1510static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1511{
1512 /* patch branch */
1513 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1514 (unsigned long)label2_ptr) >> 1;
1515}
1516#else
1517static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1518 TCGReg *index_reg, tcg_target_long *disp)
1519{
1520 if (TARGET_LONG_BITS == 32) {
1521 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1522 *addr_reg = TCG_TMP0;
1523 }
1524 if (GUEST_BASE < 0x80000) {
1525 *index_reg = TCG_REG_NONE;
1526 *disp = GUEST_BASE;
1527 } else {
1528 *index_reg = TCG_GUEST_BASE_REG;
1529 *disp = 0;
1530 }
1531}
1532#endif /* CONFIG_SOFTMMU */
1533
1534/* load data with address translation (if applicable)
1535 and endianness conversion */
1536static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1537{
1538 TCGReg addr_reg, data_reg;
1539#if defined(CONFIG_SOFTMMU)
1540 int mem_index;
1541 uint16_t *label2_ptr;
1542#else
1543 TCGReg index_reg;
1544 tcg_target_long disp;
1545#endif
1546
1547 data_reg = *args++;
1548 addr_reg = *args++;
1549
1550#if defined(CONFIG_SOFTMMU)
1551 mem_index = *args;
1552
65a62a75
RH
1553 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1554 opc, &label2_ptr, 0);
48bb3750 1555
65a62a75 1556 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1557
1558 tcg_finish_qemu_ldst(s, label2_ptr);
1559#else
1560 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1561 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1562#endif
1563}
1564
1565static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1566{
1567 TCGReg addr_reg, data_reg;
1568#if defined(CONFIG_SOFTMMU)
1569 int mem_index;
1570 uint16_t *label2_ptr;
1571#else
1572 TCGReg index_reg;
1573 tcg_target_long disp;
1574#endif
1575
1576 data_reg = *args++;
1577 addr_reg = *args++;
1578
1579#if defined(CONFIG_SOFTMMU)
1580 mem_index = *args;
1581
65a62a75
RH
1582 addr_reg = tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1583 opc, &label2_ptr, 1);
48bb3750 1584
65a62a75 1585 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_NONE, 0);
48bb3750
RH
1586
1587 tcg_finish_qemu_ldst(s, label2_ptr);
1588#else
1589 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1590 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1591#endif
2827822e
AG
1592}
1593
48bb3750
RH
1594# define OP_32_64(x) \
1595 case glue(glue(INDEX_op_,x),_i32): \
1596 case glue(glue(INDEX_op_,x),_i64)
48bb3750 1597
a9751609 1598static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2827822e
AG
1599 const TCGArg *args, const int *const_args)
1600{
48bb3750 1601 S390Opcode op;
0db921e6 1602 TCGArg a0, a1, a2;
48bb3750
RH
1603
1604 switch (opc) {
1605 case INDEX_op_exit_tb:
1606 /* return value */
1607 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1608 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1609 break;
1610
1611 case INDEX_op_goto_tb:
1612 if (s->tb_jmp_offset) {
1613 tcg_abort();
1614 } else {
1615 /* load address stored at s->tb_next + args[0] */
1616 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1617 /* and go there */
1618 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1619 }
1620 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1621 break;
1622
1623 case INDEX_op_call:
1624 if (const_args[0]) {
1625 tgen_calli(s, args[0]);
1626 } else {
1627 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1628 }
1629 break;
1630
1631 case INDEX_op_mov_i32:
1632 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1633 break;
1634 case INDEX_op_movi_i32:
1635 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1636 break;
1637
1638 OP_32_64(ld8u):
1639 /* ??? LLC (RXY format) is only present with the extended-immediate
1640 facility, whereas LLGC is always present. */
1641 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1642 break;
1643
1644 OP_32_64(ld8s):
1645 /* ??? LB is no smaller than LGB, so no point to using it. */
1646 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1647 break;
1648
1649 OP_32_64(ld16u):
1650 /* ??? LLH (RXY format) is only present with the extended-immediate
1651 facility, whereas LLGH is always present. */
1652 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1653 break;
1654
1655 case INDEX_op_ld16s_i32:
1656 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1657 break;
1658
1659 case INDEX_op_ld_i32:
1660 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1661 break;
1662
1663 OP_32_64(st8):
1664 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1665 TCG_REG_NONE, args[2]);
1666 break;
1667
1668 OP_32_64(st16):
1669 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1670 TCG_REG_NONE, args[2]);
1671 break;
1672
1673 case INDEX_op_st_i32:
1674 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1675 break;
1676
1677 case INDEX_op_add_i32:
0db921e6 1678 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1679 if (const_args[2]) {
0db921e6
RH
1680 do_addi_32:
1681 if (a0 == a1) {
1682 if (a2 == (int16_t)a2) {
1683 tcg_out_insn(s, RI, AHI, a0, a2);
1684 break;
1685 }
1686 if (facilities & FACILITY_EXT_IMM) {
1687 tcg_out_insn(s, RIL, AFI, a0, a2);
1688 break;
1689 }
1690 }
1691 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1692 } else if (a0 == a1) {
1693 tcg_out_insn(s, RR, AR, a0, a2);
48bb3750 1694 } else {
0db921e6 1695 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1696 }
1697 break;
1698 case INDEX_op_sub_i32:
0db921e6 1699 a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
48bb3750 1700 if (const_args[2]) {
0db921e6
RH
1701 a2 = -a2;
1702 goto do_addi_32;
48bb3750 1703 }
0db921e6 1704 tcg_out_insn(s, RR, SR, args[0], args[2]);
48bb3750
RH
1705 break;
1706
1707 case INDEX_op_and_i32:
1708 if (const_args[2]) {
07ff7983 1709 tgen_andi(s, TCG_TYPE_I32, args[0], args[2]);
48bb3750
RH
1710 } else {
1711 tcg_out_insn(s, RR, NR, args[0], args[2]);
1712 }
1713 break;
1714 case INDEX_op_or_i32:
1715 if (const_args[2]) {
1716 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1717 } else {
1718 tcg_out_insn(s, RR, OR, args[0], args[2]);
1719 }
1720 break;
1721 case INDEX_op_xor_i32:
1722 if (const_args[2]) {
1723 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1724 } else {
1725 tcg_out_insn(s, RR, XR, args[0], args[2]);
1726 }
1727 break;
1728
1729 case INDEX_op_neg_i32:
1730 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1731 break;
1732
1733 case INDEX_op_mul_i32:
1734 if (const_args[2]) {
1735 if ((int32_t)args[2] == (int16_t)args[2]) {
1736 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1737 } else {
1738 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1739 }
1740 } else {
1741 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1742 }
1743 break;
1744
1745 case INDEX_op_div2_i32:
1746 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1747 break;
1748 case INDEX_op_divu2_i32:
1749 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1750 break;
1751
1752 case INDEX_op_shl_i32:
1753 op = RS_SLL;
1754 do_shift32:
1755 if (const_args[2]) {
1756 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1757 } else {
1758 tcg_out_sh32(s, op, args[0], args[2], 0);
1759 }
1760 break;
1761 case INDEX_op_shr_i32:
1762 op = RS_SRL;
1763 goto do_shift32;
1764 case INDEX_op_sar_i32:
1765 op = RS_SRA;
1766 goto do_shift32;
1767
1768 case INDEX_op_rotl_i32:
1769 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1770 if (const_args[2]) {
1771 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1772 } else {
1773 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1774 }
1775 break;
1776 case INDEX_op_rotr_i32:
1777 if (const_args[2]) {
1778 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1779 TCG_REG_NONE, (32 - args[2]) & 31);
1780 } else {
1781 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1782 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1783 }
1784 break;
1785
1786 case INDEX_op_ext8s_i32:
1787 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1788 break;
1789 case INDEX_op_ext16s_i32:
1790 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1791 break;
1792 case INDEX_op_ext8u_i32:
1793 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1794 break;
1795 case INDEX_op_ext16u_i32:
1796 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1797 break;
1798
1799 OP_32_64(bswap16):
1800 /* The TCG bswap definition requires bits 0-47 already be zero.
1801 Thus we don't need the G-type insns to implement bswap16_i64. */
1802 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1803 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1804 break;
1805 OP_32_64(bswap32):
1806 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1807 break;
1808
3790b918
RH
1809 case INDEX_op_add2_i32:
1810 /* ??? Make use of ALFI. */
1811 tcg_out_insn(s, RR, ALR, args[0], args[4]);
1812 tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
1813 break;
1814 case INDEX_op_sub2_i32:
1815 /* ??? Make use of SLFI. */
1816 tcg_out_insn(s, RR, SLR, args[0], args[4]);
1817 tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
1818 break;
1819
48bb3750
RH
1820 case INDEX_op_br:
1821 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1822 break;
1823
1824 case INDEX_op_brcond_i32:
1825 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1826 args[1], const_args[1], args[3]);
1827 break;
1828 case INDEX_op_setcond_i32:
1829 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1830 args[2], const_args[2]);
1831 break;
96a9f093
RH
1832 case INDEX_op_movcond_i32:
1833 tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
1834 args[2], const_args[2], args[3]);
1835 break;
48bb3750
RH
1836
1837 case INDEX_op_qemu_ld8u:
1838 tcg_out_qemu_ld(s, args, LD_UINT8);
1839 break;
1840 case INDEX_op_qemu_ld8s:
1841 tcg_out_qemu_ld(s, args, LD_INT8);
1842 break;
1843 case INDEX_op_qemu_ld16u:
1844 tcg_out_qemu_ld(s, args, LD_UINT16);
1845 break;
1846 case INDEX_op_qemu_ld16s:
1847 tcg_out_qemu_ld(s, args, LD_INT16);
1848 break;
1849 case INDEX_op_qemu_ld32:
1850 /* ??? Technically we can use a non-extending instruction. */
1851 tcg_out_qemu_ld(s, args, LD_UINT32);
1852 break;
1853 case INDEX_op_qemu_ld64:
1854 tcg_out_qemu_ld(s, args, LD_UINT64);
1855 break;
1856
1857 case INDEX_op_qemu_st8:
1858 tcg_out_qemu_st(s, args, LD_UINT8);
1859 break;
1860 case INDEX_op_qemu_st16:
1861 tcg_out_qemu_st(s, args, LD_UINT16);
1862 break;
1863 case INDEX_op_qemu_st32:
1864 tcg_out_qemu_st(s, args, LD_UINT32);
1865 break;
1866 case INDEX_op_qemu_st64:
1867 tcg_out_qemu_st(s, args, LD_UINT64);
1868 break;
1869
48bb3750
RH
1870 case INDEX_op_mov_i64:
1871 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1872 break;
1873 case INDEX_op_movi_i64:
1874 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1875 break;
1876
1877 case INDEX_op_ld16s_i64:
1878 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1879 break;
1880 case INDEX_op_ld32u_i64:
1881 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1882 break;
1883 case INDEX_op_ld32s_i64:
1884 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1885 break;
1886 case INDEX_op_ld_i64:
1887 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1888 break;
1889
1890 case INDEX_op_st32_i64:
1891 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1892 break;
1893 case INDEX_op_st_i64:
1894 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1895 break;
1896
1897 case INDEX_op_add_i64:
0db921e6 1898 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1899 if (const_args[2]) {
0db921e6
RH
1900 do_addi_64:
1901 if (a0 == a1) {
1902 if (a2 == (int16_t)a2) {
1903 tcg_out_insn(s, RI, AGHI, a0, a2);
1904 break;
1905 }
1906 if (facilities & FACILITY_EXT_IMM) {
1907 if (a2 == (int32_t)a2) {
1908 tcg_out_insn(s, RIL, AGFI, a0, a2);
1909 break;
1910 } else if (a2 == (uint32_t)a2) {
1911 tcg_out_insn(s, RIL, ALGFI, a0, a2);
1912 break;
1913 } else if (-a2 == (uint32_t)-a2) {
1914 tcg_out_insn(s, RIL, SLGFI, a0, -a2);
1915 break;
1916 }
1917 }
1918 }
1919 tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
1920 } else if (a0 == a1) {
1921 tcg_out_insn(s, RRE, AGR, a0, a2);
48bb3750 1922 } else {
0db921e6 1923 tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
48bb3750
RH
1924 }
1925 break;
1926 case INDEX_op_sub_i64:
0db921e6 1927 a0 = args[0], a1 = args[1], a2 = args[2];
48bb3750 1928 if (const_args[2]) {
0db921e6
RH
1929 a2 = -a2;
1930 goto do_addi_64;
48bb3750
RH
1931 } else {
1932 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1933 }
1934 break;
1935
1936 case INDEX_op_and_i64:
1937 if (const_args[2]) {
07ff7983 1938 tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
48bb3750
RH
1939 } else {
1940 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1941 }
1942 break;
1943 case INDEX_op_or_i64:
1944 if (const_args[2]) {
1945 tgen64_ori(s, args[0], args[2]);
1946 } else {
1947 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1948 }
1949 break;
1950 case INDEX_op_xor_i64:
1951 if (const_args[2]) {
1952 tgen64_xori(s, args[0], args[2]);
1953 } else {
1954 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1955 }
1956 break;
1957
1958 case INDEX_op_neg_i64:
1959 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1960 break;
1961 case INDEX_op_bswap64_i64:
1962 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1963 break;
1964
1965 case INDEX_op_mul_i64:
1966 if (const_args[2]) {
1967 if (args[2] == (int16_t)args[2]) {
1968 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1969 } else {
1970 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1971 }
1972 } else {
1973 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1974 }
1975 break;
1976
1977 case INDEX_op_div2_i64:
1978 /* ??? We get an unnecessary sign-extension of the dividend
1979 into R3 with this definition, but as we do in fact always
1980 produce both quotient and remainder using INDEX_op_div_i64
1981 instead requires jumping through even more hoops. */
1982 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1983 break;
1984 case INDEX_op_divu2_i64:
1985 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1986 break;
36017dc6
RH
1987 case INDEX_op_mulu2_i64:
1988 tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
1989 break;
48bb3750
RH
1990
1991 case INDEX_op_shl_i64:
1992 op = RSY_SLLG;
1993 do_shift64:
1994 if (const_args[2]) {
1995 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1996 } else {
1997 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1998 }
1999 break;
2000 case INDEX_op_shr_i64:
2001 op = RSY_SRLG;
2002 goto do_shift64;
2003 case INDEX_op_sar_i64:
2004 op = RSY_SRAG;
2005 goto do_shift64;
2006
2007 case INDEX_op_rotl_i64:
2008 if (const_args[2]) {
2009 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2010 TCG_REG_NONE, args[2]);
2011 } else {
2012 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
2013 }
2014 break;
2015 case INDEX_op_rotr_i64:
2016 if (const_args[2]) {
2017 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
2018 TCG_REG_NONE, (64 - args[2]) & 63);
2019 } else {
2020 /* We can use the smaller 32-bit negate because only the
2021 low 6 bits are examined for the rotate. */
2022 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2023 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2024 }
2025 break;
2026
2027 case INDEX_op_ext8s_i64:
2028 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2029 break;
2030 case INDEX_op_ext16s_i64:
2031 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2032 break;
2033 case INDEX_op_ext32s_i64:
2034 tgen_ext32s(s, args[0], args[1]);
2035 break;
2036 case INDEX_op_ext8u_i64:
2037 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2038 break;
2039 case INDEX_op_ext16u_i64:
2040 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2041 break;
2042 case INDEX_op_ext32u_i64:
2043 tgen_ext32u(s, args[0], args[1]);
2044 break;
2045
3790b918
RH
2046 case INDEX_op_add2_i64:
2047 /* ??? Make use of ALGFI and SLGFI. */
2048 tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
2049 tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
2050 break;
2051 case INDEX_op_sub2_i64:
2052 /* ??? Make use of ALGFI and SLGFI. */
2053 tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
2054 tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
2055 break;
2056
48bb3750
RH
2057 case INDEX_op_brcond_i64:
2058 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2059 args[1], const_args[1], args[3]);
2060 break;
2061 case INDEX_op_setcond_i64:
2062 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2063 args[2], const_args[2]);
2064 break;
96a9f093
RH
2065 case INDEX_op_movcond_i64:
2066 tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
2067 args[2], const_args[2], args[3]);
2068 break;
48bb3750
RH
2069
2070 case INDEX_op_qemu_ld32u:
2071 tcg_out_qemu_ld(s, args, LD_UINT32);
2072 break;
2073 case INDEX_op_qemu_ld32s:
2074 tcg_out_qemu_ld(s, args, LD_INT32);
2075 break;
48bb3750 2076
d5690ea4
RH
2077 OP_32_64(deposit):
2078 tgen_deposit(s, args[0], args[2], args[3], args[4]);
2079 break;
2080
48bb3750
RH
2081 default:
2082 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2083 tcg_abort();
2084 }
2827822e
AG
2085}
2086
48bb3750
RH
2087static const TCGTargetOpDef s390_op_defs[] = {
2088 { INDEX_op_exit_tb, { } },
2089 { INDEX_op_goto_tb, { } },
2090 { INDEX_op_call, { "ri" } },
48bb3750
RH
2091 { INDEX_op_br, { } },
2092
2093 { INDEX_op_mov_i32, { "r", "r" } },
2094 { INDEX_op_movi_i32, { "r" } },
2095
2096 { INDEX_op_ld8u_i32, { "r", "r" } },
2097 { INDEX_op_ld8s_i32, { "r", "r" } },
2098 { INDEX_op_ld16u_i32, { "r", "r" } },
2099 { INDEX_op_ld16s_i32, { "r", "r" } },
2100 { INDEX_op_ld_i32, { "r", "r" } },
2101 { INDEX_op_st8_i32, { "r", "r" } },
2102 { INDEX_op_st16_i32, { "r", "r" } },
2103 { INDEX_op_st_i32, { "r", "r" } },
2104
0db921e6
RH
2105 { INDEX_op_add_i32, { "r", "r", "ri" } },
2106 { INDEX_op_sub_i32, { "r", "0", "ri" } },
48bb3750
RH
2107 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2108
2109 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2110 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2111
07ff7983 2112 { INDEX_op_and_i32, { "r", "0", "ri" } },
48bb3750
RH
2113 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2114 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2115
2116 { INDEX_op_neg_i32, { "r", "r" } },
2117
2118 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2119 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2120 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2121
2122 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2123 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2124
2125 { INDEX_op_ext8s_i32, { "r", "r" } },
2126 { INDEX_op_ext8u_i32, { "r", "r" } },
2127 { INDEX_op_ext16s_i32, { "r", "r" } },
2128 { INDEX_op_ext16u_i32, { "r", "r" } },
2129
2130 { INDEX_op_bswap16_i32, { "r", "r" } },
2131 { INDEX_op_bswap32_i32, { "r", "r" } },
2132
3790b918
RH
2133 { INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
2134 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
2135
48bb3750
RH
2136 { INDEX_op_brcond_i32, { "r", "rWC" } },
2137 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
96a9f093 2138 { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } },
d5690ea4 2139 { INDEX_op_deposit_i32, { "r", "0", "r" } },
48bb3750
RH
2140
2141 { INDEX_op_qemu_ld8u, { "r", "L" } },
2142 { INDEX_op_qemu_ld8s, { "r", "L" } },
2143 { INDEX_op_qemu_ld16u, { "r", "L" } },
2144 { INDEX_op_qemu_ld16s, { "r", "L" } },
2145 { INDEX_op_qemu_ld32, { "r", "L" } },
2146 { INDEX_op_qemu_ld64, { "r", "L" } },
2147
2148 { INDEX_op_qemu_st8, { "L", "L" } },
2149 { INDEX_op_qemu_st16, { "L", "L" } },
2150 { INDEX_op_qemu_st32, { "L", "L" } },
2151 { INDEX_op_qemu_st64, { "L", "L" } },
2152
48bb3750
RH
2153 { INDEX_op_mov_i64, { "r", "r" } },
2154 { INDEX_op_movi_i64, { "r" } },
2155
2156 { INDEX_op_ld8u_i64, { "r", "r" } },
2157 { INDEX_op_ld8s_i64, { "r", "r" } },
2158 { INDEX_op_ld16u_i64, { "r", "r" } },
2159 { INDEX_op_ld16s_i64, { "r", "r" } },
2160 { INDEX_op_ld32u_i64, { "r", "r" } },
2161 { INDEX_op_ld32s_i64, { "r", "r" } },
2162 { INDEX_op_ld_i64, { "r", "r" } },
2163
2164 { INDEX_op_st8_i64, { "r", "r" } },
2165 { INDEX_op_st16_i64, { "r", "r" } },
2166 { INDEX_op_st32_i64, { "r", "r" } },
2167 { INDEX_op_st_i64, { "r", "r" } },
2168
0db921e6
RH
2169 { INDEX_op_add_i64, { "r", "r", "ri" } },
2170 { INDEX_op_sub_i64, { "r", "0", "ri" } },
48bb3750
RH
2171 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2172
2173 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2174 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
36017dc6 2175 { INDEX_op_mulu2_i64, { "b", "a", "0", "r" } },
48bb3750 2176
07ff7983 2177 { INDEX_op_and_i64, { "r", "0", "ri" } },
48bb3750
RH
2178 { INDEX_op_or_i64, { "r", "0", "rO" } },
2179 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2180
2181 { INDEX_op_neg_i64, { "r", "r" } },
2182
2183 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2184 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2185 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2186
2187 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2188 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2189
2190 { INDEX_op_ext8s_i64, { "r", "r" } },
2191 { INDEX_op_ext8u_i64, { "r", "r" } },
2192 { INDEX_op_ext16s_i64, { "r", "r" } },
2193 { INDEX_op_ext16u_i64, { "r", "r" } },
2194 { INDEX_op_ext32s_i64, { "r", "r" } },
2195 { INDEX_op_ext32u_i64, { "r", "r" } },
2196
2197 { INDEX_op_bswap16_i64, { "r", "r" } },
2198 { INDEX_op_bswap32_i64, { "r", "r" } },
2199 { INDEX_op_bswap64_i64, { "r", "r" } },
2200
3790b918
RH
2201 { INDEX_op_add2_i64, { "r", "r", "0", "1", "r", "r" } },
2202 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "r", "r" } },
2203
48bb3750
RH
2204 { INDEX_op_brcond_i64, { "r", "rC" } },
2205 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
96a9f093 2206 { INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
d5690ea4 2207 { INDEX_op_deposit_i64, { "r", "0", "r" } },
48bb3750
RH
2208
2209 { INDEX_op_qemu_ld32u, { "r", "L" } },
2210 { INDEX_op_qemu_ld32s, { "r", "L" } },
48bb3750
RH
2211
2212 { -1 },
2213};
2214
2215/* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2216 this information. However, getting at that entry is not easy this far
2217 away from main. Our options are: start searching from environ, but
2218 that fails as soon as someone does a setenv in between. Read the data
2219 from /proc/self/auxv. Or do the probing ourselves. The only thing
2220 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2221 that the kernel saves all 64-bits of the registers around traps while
2222 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2223 back and see from when this might not be true). */
2224
2225#include <signal.h>
2226
2227static volatile sig_atomic_t got_sigill;
2228
2229static void sigill_handler(int sig)
2827822e 2230{
48bb3750 2231 got_sigill = 1;
2827822e
AG
2232}
2233
48bb3750
RH
2234static void query_facilities(void)
2235{
2236 struct sigaction sa_old, sa_new;
2237 register int r0 __asm__("0");
2238 register void *r1 __asm__("1");
2239 int fail;
2240
2241 memset(&sa_new, 0, sizeof(sa_new));
2242 sa_new.sa_handler = sigill_handler;
2243 sigaction(SIGILL, &sa_new, &sa_old);
2244
2245 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2246 we need not do any more probing. Unfortunately, this itself is an
2247 extension and the original STORE FACILITY LIST instruction is
2248 kernel-only, storing its results at absolute address 200. */
2249 /* stfle 0(%r1) */
2250 r1 = &facilities;
2251 asm volatile(".word 0xb2b0,0x1000"
2252 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2253
2254 if (got_sigill) {
2255 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2256 kind of instruction that we're interested in. */
2257 /* ??? Possibly some of these are in practice never present unless
2258 the store-facility-extended facility is also present. But since
2259 that isn't documented it's just better to probe for each. */
2260
2261 /* Test for z/Architecture. Required even in 31-bit mode. */
2262 got_sigill = 0;
2263 /* agr %r0,%r0 */
2264 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2265 if (!got_sigill) {
2266 facilities |= FACILITY_ZARCH_ACTIVE;
2267 }
2268
2269 /* Test for long displacement. */
2270 got_sigill = 0;
2271 /* ly %r0,0(%r1) */
2272 r1 = &facilities;
2273 asm volatile(".word 0xe300,0x1000,0x0058"
2274 : "=r"(r0) : "r"(r1) : "cc");
2275 if (!got_sigill) {
2276 facilities |= FACILITY_LONG_DISP;
2277 }
2278
2279 /* Test for extended immediates. */
2280 got_sigill = 0;
2281 /* afi %r0,0 */
2282 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2283 if (!got_sigill) {
2284 facilities |= FACILITY_EXT_IMM;
2285 }
2286
2287 /* Test for general-instructions-extension. */
2288 got_sigill = 0;
2289 /* msfi %r0,1 */
2290 asm volatile(".word 0xc201,0x0000,0x0001");
2291 if (!got_sigill) {
2292 facilities |= FACILITY_GEN_INST_EXT;
2293 }
2294 }
2295
2296 sigaction(SIGILL, &sa_old, NULL);
2297
2298 /* The translator currently uses these extensions unconditionally.
2299 Pruning this back to the base ESA/390 architecture doesn't seem
2300 worthwhile, since even the KVM target requires z/Arch. */
2301 fail = 0;
2302 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2303 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2304 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2305 fail = 1;
2306 }
2307 if ((facilities & FACILITY_LONG_DISP) == 0) {
2308 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2309 fail = 1;
2310 }
2311
2312 /* So far there's just enough support for 31-bit mode to let the
2313 compile succeed. This is good enough to run QEMU with KVM. */
2314 if (sizeof(void *) != 8) {
2315 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2316 fail = 1;
2317 }
2318
2319 if (fail) {
2320 exit(-1);
2321 }
2322}
2323
2324static void tcg_target_init(TCGContext *s)
2827822e 2325{
48bb3750
RH
2326#if !defined(CONFIG_USER_ONLY)
2327 /* fail safe */
2328 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2329 tcg_abort();
2330 }
2331#endif
2332
2333 query_facilities();
2334
2335 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2336 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2337
2338 tcg_regset_clear(tcg_target_call_clobber_regs);
2339 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2340 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2341 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2342 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2343 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2344 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2345 /* The return register can be considered call-clobbered. */
2346 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2347
2348 tcg_regset_clear(s->reserved_regs);
2349 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2350 /* XXX many insns can't be used with R0, so we better avoid it for now */
2351 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2352 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2353
2354 tcg_add_target_add_op_defs(s390_op_defs);
2827822e
AG
2355}
2356
48bb3750 2357static void tcg_target_qemu_prologue(TCGContext *s)
2827822e 2358{
a4924e8b
RH
2359 tcg_target_long frame_size;
2360
48bb3750
RH
2361 /* stmg %r6,%r15,48(%r15) (save registers) */
2362 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2363
a4924e8b
RH
2364 /* aghi %r15,-frame_size */
2365 frame_size = TCG_TARGET_CALL_STACK_OFFSET;
2366 frame_size += TCG_STATIC_CALL_ARGS_SIZE;
2367 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2368 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -frame_size);
2369
2370 tcg_set_frame(s, TCG_REG_CALL_STACK,
2371 TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
2372 CPU_TEMP_BUF_NLONGS * sizeof(long));
48bb3750
RH
2373
2374 if (GUEST_BASE >= 0x80000) {
2375 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2376 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2377 }
2378
cea5f9a2
BS
2379 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2380 /* br %r3 (go to TB) */
2381 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
48bb3750
RH
2382
2383 tb_ret_addr = s->code_ptr;
2384
a4924e8b
RH
2385 /* lmg %r6,%r15,fs+48(%r15) (restore registers) */
2386 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
2387 frame_size + 48);
48bb3750
RH
2388
2389 /* br %r14 (return) */
2390 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
2827822e 2391}
This page took 0.720321 seconds and 4 git commands to generate.