2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond {
258 typedef struct DisasContext {
259 DisasContextBase base;
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282 static int expand_sm_imm(int val)
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
293 /* Inverted space register indicates 0 means sr0 not inferred from base. */
294 static int expand_sr3x(int val)
299 /* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
301 static int ma_to_m(int val)
303 return val & 2 ? (val & 1 ? -1 : 1) : 0;
306 /* Used for branch targets. */
307 static int expand_shl2(int val)
313 /* Include the auto-generated decoder. */
314 #include "decode.inc.c"
316 /* We are not using a goto_tb (for whatever reason), but have updated
317 the iaq (for whatever reason), so don't do it again on exit. */
318 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
320 /* We are exiting the TB, but have neither emitted a goto_tb, nor
321 updated the iaq for the next instruction to be executed. */
322 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
324 /* Similarly, but we want to return to the main loop immediately
325 to recognize unmasked interrupts. */
326 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
328 typedef struct DisasInsn {
330 bool (*trans)(DisasContext *ctx, uint32_t insn,
331 const struct DisasInsn *f);
333 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
334 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
335 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
336 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
337 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
338 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
339 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
343 /* global register indexes */
344 static TCGv_reg cpu_gr[32];
345 static TCGv_i64 cpu_sr[4];
346 static TCGv_i64 cpu_srH;
347 static TCGv_reg cpu_iaoq_f;
348 static TCGv_reg cpu_iaoq_b;
349 static TCGv_i64 cpu_iasq_f;
350 static TCGv_i64 cpu_iasq_b;
351 static TCGv_reg cpu_sar;
352 static TCGv_reg cpu_psw_n;
353 static TCGv_reg cpu_psw_v;
354 static TCGv_reg cpu_psw_cb;
355 static TCGv_reg cpu_psw_cb_msb;
357 #include "exec/gen-icount.h"
359 void hppa_translate_init(void)
361 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
363 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
364 static const GlobalVar vars[] = {
365 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
376 /* Use the symbolic register names that match the disassembler. */
377 static const char gr_names[32][4] = {
378 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
379 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
380 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
381 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
383 /* SR[4-7] are not global registers so that we can index them. */
384 static const char sr_names[5][4] = {
385 "sr0", "sr1", "sr2", "sr3", "srH"
391 for (i = 1; i < 32; i++) {
392 cpu_gr[i] = tcg_global_mem_new(cpu_env,
393 offsetof(CPUHPPAState, gr[i]),
396 for (i = 0; i < 4; i++) {
397 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
398 offsetof(CPUHPPAState, sr[i]),
401 cpu_srH = tcg_global_mem_new_i64(cpu_env,
402 offsetof(CPUHPPAState, sr[4]),
405 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
406 const GlobalVar *v = &vars[i];
407 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
410 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
411 offsetof(CPUHPPAState, iasq_f),
413 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
414 offsetof(CPUHPPAState, iasq_b),
418 static DisasCond cond_make_f(void)
427 static DisasCond cond_make_n(void)
438 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
440 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
442 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
443 r.a0 = tcg_temp_new();
444 tcg_gen_mov_reg(r.a0, a0);
449 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
451 DisasCond r = { .c = c };
453 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
454 r.a0 = tcg_temp_new();
455 tcg_gen_mov_reg(r.a0, a0);
456 r.a1 = tcg_temp_new();
457 tcg_gen_mov_reg(r.a1, a1);
462 static void cond_prep(DisasCond *cond)
465 cond->a1_is_0 = false;
466 cond->a1 = tcg_const_reg(0);
470 static void cond_free(DisasCond *cond)
474 if (!cond->a0_is_n) {
475 tcg_temp_free(cond->a0);
477 if (!cond->a1_is_0) {
478 tcg_temp_free(cond->a1);
480 cond->a0_is_n = false;
481 cond->a1_is_0 = false;
485 case TCG_COND_ALWAYS:
486 cond->c = TCG_COND_NEVER;
493 static TCGv_reg get_temp(DisasContext *ctx)
495 unsigned i = ctx->ntempr++;
496 g_assert(i < ARRAY_SIZE(ctx->tempr));
497 return ctx->tempr[i] = tcg_temp_new();
500 #ifndef CONFIG_USER_ONLY
501 static TCGv_tl get_temp_tl(DisasContext *ctx)
503 unsigned i = ctx->ntempl++;
504 g_assert(i < ARRAY_SIZE(ctx->templ));
505 return ctx->templ[i] = tcg_temp_new_tl();
509 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
511 TCGv_reg t = get_temp(ctx);
512 tcg_gen_movi_reg(t, v);
516 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
519 TCGv_reg t = get_temp(ctx);
520 tcg_gen_movi_reg(t, 0);
527 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
529 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
530 return get_temp(ctx);
536 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
538 if (ctx->null_cond.c != TCG_COND_NEVER) {
539 cond_prep(&ctx->null_cond);
540 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
541 ctx->null_cond.a1, dest, t);
543 tcg_gen_mov_reg(dest, t);
547 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
550 save_or_nullify(ctx, cpu_gr[reg], t);
554 #ifdef HOST_WORDS_BIGENDIAN
562 static TCGv_i32 load_frw_i32(unsigned rt)
564 TCGv_i32 ret = tcg_temp_new_i32();
565 tcg_gen_ld_i32(ret, cpu_env,
566 offsetof(CPUHPPAState, fr[rt & 31])
567 + (rt & 32 ? LO_OFS : HI_OFS));
571 static TCGv_i32 load_frw0_i32(unsigned rt)
574 return tcg_const_i32(0);
576 return load_frw_i32(rt);
580 static TCGv_i64 load_frw0_i64(unsigned rt)
583 return tcg_const_i64(0);
585 TCGv_i64 ret = tcg_temp_new_i64();
586 tcg_gen_ld32u_i64(ret, cpu_env,
587 offsetof(CPUHPPAState, fr[rt & 31])
588 + (rt & 32 ? LO_OFS : HI_OFS));
593 static void save_frw_i32(unsigned rt, TCGv_i32 val)
595 tcg_gen_st_i32(val, cpu_env,
596 offsetof(CPUHPPAState, fr[rt & 31])
597 + (rt & 32 ? LO_OFS : HI_OFS));
603 static TCGv_i64 load_frd(unsigned rt)
605 TCGv_i64 ret = tcg_temp_new_i64();
606 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
610 static TCGv_i64 load_frd0(unsigned rt)
613 return tcg_const_i64(0);
619 static void save_frd(unsigned rt, TCGv_i64 val)
621 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
624 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
626 #ifdef CONFIG_USER_ONLY
627 tcg_gen_movi_i64(dest, 0);
630 tcg_gen_mov_i64(dest, cpu_sr[reg]);
631 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
632 tcg_gen_mov_i64(dest, cpu_srH);
634 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
639 /* Skip over the implementation of an insn that has been nullified.
640 Use this when the insn is too complex for a conditional move. */
641 static void nullify_over(DisasContext *ctx)
643 if (ctx->null_cond.c != TCG_COND_NEVER) {
644 /* The always condition should have been handled in the main loop. */
645 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
647 ctx->null_lab = gen_new_label();
648 cond_prep(&ctx->null_cond);
650 /* If we're using PSW[N], copy it to a temp because... */
651 if (ctx->null_cond.a0_is_n) {
652 ctx->null_cond.a0_is_n = false;
653 ctx->null_cond.a0 = tcg_temp_new();
654 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
656 /* ... we clear it before branching over the implementation,
657 so that (1) it's clear after nullifying this insn and
658 (2) if this insn nullifies the next, PSW[N] is valid. */
659 if (ctx->psw_n_nonzero) {
660 ctx->psw_n_nonzero = false;
661 tcg_gen_movi_reg(cpu_psw_n, 0);
664 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
665 ctx->null_cond.a1, ctx->null_lab);
666 cond_free(&ctx->null_cond);
670 /* Save the current nullification state to PSW[N]. */
671 static void nullify_save(DisasContext *ctx)
673 if (ctx->null_cond.c == TCG_COND_NEVER) {
674 if (ctx->psw_n_nonzero) {
675 tcg_gen_movi_reg(cpu_psw_n, 0);
679 if (!ctx->null_cond.a0_is_n) {
680 cond_prep(&ctx->null_cond);
681 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
682 ctx->null_cond.a0, ctx->null_cond.a1);
683 ctx->psw_n_nonzero = true;
685 cond_free(&ctx->null_cond);
688 /* Set a PSW[N] to X. The intention is that this is used immediately
689 before a goto_tb/exit_tb, so that there is no fallthru path to other
690 code within the TB. Therefore we do not update psw_n_nonzero. */
691 static void nullify_set(DisasContext *ctx, bool x)
693 if (ctx->psw_n_nonzero || x) {
694 tcg_gen_movi_reg(cpu_psw_n, x);
698 /* Mark the end of an instruction that may have been nullified.
699 This is the pair to nullify_over. Always returns true so that
700 it may be tail-called from a translate function. */
701 static bool nullify_end(DisasContext *ctx)
703 TCGLabel *null_lab = ctx->null_lab;
704 DisasJumpType status = ctx->base.is_jmp;
706 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
707 For UPDATED, we cannot update on the nullified path. */
708 assert(status != DISAS_IAQ_N_UPDATED);
710 if (likely(null_lab == NULL)) {
711 /* The current insn wasn't conditional or handled the condition
712 applied to it without a branch, so the (new) setting of
713 NULL_COND can be applied directly to the next insn. */
716 ctx->null_lab = NULL;
718 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
719 /* The next instruction will be unconditional,
720 and NULL_COND already reflects that. */
721 gen_set_label(null_lab);
723 /* The insn that we just executed is itself nullifying the next
724 instruction. Store the condition in the PSW[N] global.
725 We asserted PSW[N] = 0 in nullify_over, so that after the
726 label we have the proper value in place. */
728 gen_set_label(null_lab);
729 ctx->null_cond = cond_make_n();
731 if (status == DISAS_NORETURN) {
732 ctx->base.is_jmp = DISAS_NEXT;
737 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
739 if (unlikely(ival == -1)) {
740 tcg_gen_mov_reg(dest, vval);
742 tcg_gen_movi_reg(dest, ival);
746 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
748 return ctx->iaoq_f + disp + 8;
751 static void gen_excp_1(int exception)
753 TCGv_i32 t = tcg_const_i32(exception);
754 gen_helper_excp(cpu_env, t);
755 tcg_temp_free_i32(t);
758 static void gen_excp(DisasContext *ctx, int exception)
760 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
761 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
763 gen_excp_1(exception);
764 ctx->base.is_jmp = DISAS_NORETURN;
767 static bool gen_excp_iir(DisasContext *ctx, int exc)
772 tmp = tcg_const_reg(ctx->insn);
773 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
776 return nullify_end(ctx);
779 static bool gen_illegal(DisasContext *ctx)
781 return gen_excp_iir(ctx, EXCP_ILL);
784 #ifdef CONFIG_USER_ONLY
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
786 return gen_excp_iir(ctx, EXCP)
788 #define CHECK_MOST_PRIVILEGED(EXCP) \
790 if (ctx->privilege != 0) { \
791 return gen_excp_iir(ctx, EXCP); \
796 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
798 /* Suppress goto_tb in the case of single-steping and IO. */
799 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
800 || ctx->base.singlestep_enabled) {
806 /* If the next insn is to be nullified, and it's on the same page,
807 and we're not attempting to set a breakpoint on it, then we can
808 totally skip the nullified insn. This avoids creating and
809 executing a TB that merely branches to the next TB. */
810 static bool use_nullify_skip(DisasContext *ctx)
812 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
813 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
816 static void gen_goto_tb(DisasContext *ctx, int which,
817 target_ureg f, target_ureg b)
819 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
820 tcg_gen_goto_tb(which);
821 tcg_gen_movi_reg(cpu_iaoq_f, f);
822 tcg_gen_movi_reg(cpu_iaoq_b, b);
823 tcg_gen_exit_tb(ctx->base.tb, which);
825 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
826 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
827 if (ctx->base.singlestep_enabled) {
828 gen_excp_1(EXCP_DEBUG);
830 tcg_gen_lookup_and_goto_ptr();
835 /* PA has a habit of taking the LSB of a field and using that as the sign,
836 with the rest of the field becoming the least significant bits. */
837 static target_sreg low_sextract(uint32_t val, int pos, int len)
839 target_ureg x = -(target_ureg)extract32(val, pos, 1);
840 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
844 static unsigned assemble_rt64(uint32_t insn)
846 unsigned r1 = extract32(insn, 6, 1);
847 unsigned r0 = extract32(insn, 0, 5);
851 static unsigned assemble_ra64(uint32_t insn)
853 unsigned r1 = extract32(insn, 7, 1);
854 unsigned r0 = extract32(insn, 21, 5);
858 static unsigned assemble_rb64(uint32_t insn)
860 unsigned r1 = extract32(insn, 12, 1);
861 unsigned r0 = extract32(insn, 16, 5);
865 static unsigned assemble_rc64(uint32_t insn)
867 unsigned r2 = extract32(insn, 8, 1);
868 unsigned r1 = extract32(insn, 13, 3);
869 unsigned r0 = extract32(insn, 9, 2);
870 return r2 * 32 + r1 * 4 + r0;
873 static inline unsigned assemble_sr3(uint32_t insn)
875 unsigned s2 = extract32(insn, 13, 1);
876 unsigned s0 = extract32(insn, 14, 2);
880 static target_sreg assemble_16(uint32_t insn)
882 /* Take the name from PA2.0, which produces a 16-bit number
883 only with wide mode; otherwise a 14-bit number. Since we don't
884 implement wide mode, this is always the 14-bit number. */
885 return low_sextract(insn, 0, 14);
888 static target_sreg assemble_16a(uint32_t insn)
890 /* Take the name from PA2.0, which produces a 14-bit shifted number
891 only with wide mode; otherwise a 12-bit shifted number. Since we
892 don't implement wide mode, this is always the 12-bit number. */
893 target_ureg x = -(target_ureg)(insn & 1);
894 x = (x << 11) | extract32(insn, 2, 11);
898 static target_sreg assemble_21(uint32_t insn)
900 target_ureg x = -(target_ureg)(insn & 1);
901 x = (x << 11) | extract32(insn, 1, 11);
902 x = (x << 2) | extract32(insn, 14, 2);
903 x = (x << 5) | extract32(insn, 16, 5);
904 x = (x << 2) | extract32(insn, 12, 2);
908 /* The parisc documentation describes only the general interpretation of
909 the conditions, without describing their exact implementation. The
910 interpretations do not stand up well when considering ADD,C and SUB,B.
911 However, considering the Addition, Subtraction and Logical conditions
912 as a whole it would appear that these relations are similar to what
913 a traditional NZCV set of flags would produce. */
915 static DisasCond do_cond(unsigned cf, TCGv_reg res,
916 TCGv_reg cb_msb, TCGv_reg sv)
922 case 0: /* Never / TR */
923 cond = cond_make_f();
925 case 1: /* = / <> (Z / !Z) */
926 cond = cond_make_0(TCG_COND_EQ, res);
928 case 2: /* < / >= (N / !N) */
929 cond = cond_make_0(TCG_COND_LT, res);
931 case 3: /* <= / > (N | Z / !N & !Z) */
932 cond = cond_make_0(TCG_COND_LE, res);
934 case 4: /* NUV / UV (!C / C) */
935 cond = cond_make_0(TCG_COND_EQ, cb_msb);
937 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
938 tmp = tcg_temp_new();
939 tcg_gen_neg_reg(tmp, cb_msb);
940 tcg_gen_and_reg(tmp, tmp, res);
941 cond = cond_make_0(TCG_COND_EQ, tmp);
944 case 6: /* SV / NSV (V / !V) */
945 cond = cond_make_0(TCG_COND_LT, sv);
947 case 7: /* OD / EV */
948 tmp = tcg_temp_new();
949 tcg_gen_andi_reg(tmp, res, 1);
950 cond = cond_make_0(TCG_COND_NE, tmp);
954 g_assert_not_reached();
957 cond.c = tcg_invert_cond(cond.c);
963 /* Similar, but for the special case of subtraction without borrow, we
964 can use the inputs directly. This can allow other computation to be
965 deleted as unused. */
967 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
968 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
974 cond = cond_make(TCG_COND_EQ, in1, in2);
977 cond = cond_make(TCG_COND_LT, in1, in2);
980 cond = cond_make(TCG_COND_LE, in1, in2);
982 case 4: /* << / >>= */
983 cond = cond_make(TCG_COND_LTU, in1, in2);
985 case 5: /* <<= / >> */
986 cond = cond_make(TCG_COND_LEU, in1, in2);
989 return do_cond(cf, res, sv, sv);
992 cond.c = tcg_invert_cond(cond.c);
998 /* Similar, but for logicals, where the carry and overflow bits are not
999 computed, and use of them is undefined. */
1001 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
1004 case 4: case 5: case 6:
1008 return do_cond(cf, res, res, res);
1011 /* Similar, but for shift/extract/deposit conditions. */
1013 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1017 /* Convert the compressed condition codes to standard.
1018 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1019 4-7 are the reverse of 0-3. */
1026 return do_log_cond(c * 2 + f, res);
1029 /* Similar, but for unit conditions. */
1031 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1032 TCGv_reg in1, TCGv_reg in2)
1035 TCGv_reg tmp, cb = NULL;
1038 /* Since we want to test lots of carry-out bits all at once, do not
1039 * do our normal thing and compute carry-in of bit B+1 since that
1040 * leaves us with carry bits spread across two words.
1042 cb = tcg_temp_new();
1043 tmp = tcg_temp_new();
1044 tcg_gen_or_reg(cb, in1, in2);
1045 tcg_gen_and_reg(tmp, in1, in2);
1046 tcg_gen_andc_reg(cb, cb, res);
1047 tcg_gen_or_reg(cb, cb, tmp);
1052 case 0: /* never / TR */
1053 case 1: /* undefined */
1054 case 5: /* undefined */
1055 cond = cond_make_f();
1058 case 2: /* SBZ / NBZ */
1059 /* See hasless(v,1) from
1060 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1062 tmp = tcg_temp_new();
1063 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1064 tcg_gen_andc_reg(tmp, tmp, res);
1065 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1066 cond = cond_make_0(TCG_COND_NE, tmp);
1070 case 3: /* SHZ / NHZ */
1071 tmp = tcg_temp_new();
1072 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1073 tcg_gen_andc_reg(tmp, tmp, res);
1074 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1075 cond = cond_make_0(TCG_COND_NE, tmp);
1079 case 4: /* SDC / NDC */
1080 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1081 cond = cond_make_0(TCG_COND_NE, cb);
1084 case 6: /* SBC / NBC */
1085 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1086 cond = cond_make_0(TCG_COND_NE, cb);
1089 case 7: /* SHC / NHC */
1090 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1091 cond = cond_make_0(TCG_COND_NE, cb);
1095 g_assert_not_reached();
1101 cond.c = tcg_invert_cond(cond.c);
1107 /* Compute signed overflow for addition. */
1108 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1109 TCGv_reg in1, TCGv_reg in2)
1111 TCGv_reg sv = get_temp(ctx);
1112 TCGv_reg tmp = tcg_temp_new();
1114 tcg_gen_xor_reg(sv, res, in1);
1115 tcg_gen_xor_reg(tmp, in1, in2);
1116 tcg_gen_andc_reg(sv, sv, tmp);
1122 /* Compute signed overflow for subtraction. */
1123 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1124 TCGv_reg in1, TCGv_reg in2)
1126 TCGv_reg sv = get_temp(ctx);
1127 TCGv_reg tmp = tcg_temp_new();
1129 tcg_gen_xor_reg(sv, res, in1);
1130 tcg_gen_xor_reg(tmp, in1, in2);
1131 tcg_gen_and_reg(sv, sv, tmp);
1137 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1138 TCGv_reg in2, unsigned shift, bool is_l,
1139 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1141 TCGv_reg dest, cb, cb_msb, sv, tmp;
1142 unsigned c = cf >> 1;
1145 dest = tcg_temp_new();
1150 tmp = get_temp(ctx);
1151 tcg_gen_shli_reg(tmp, in1, shift);
1155 if (!is_l || c == 4 || c == 5) {
1156 TCGv_reg zero = tcg_const_reg(0);
1157 cb_msb = get_temp(ctx);
1158 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1160 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1162 tcg_temp_free(zero);
1165 tcg_gen_xor_reg(cb, in1, in2);
1166 tcg_gen_xor_reg(cb, cb, dest);
1169 tcg_gen_add_reg(dest, in1, in2);
1171 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1175 /* Compute signed overflow if required. */
1177 if (is_tsv || c == 6) {
1178 sv = do_add_sv(ctx, dest, in1, in2);
1180 /* ??? Need to include overflow from shift. */
1181 gen_helper_tsv(cpu_env, sv);
1185 /* Emit any conditional trap before any writeback. */
1186 cond = do_cond(cf, dest, cb_msb, sv);
1189 tmp = tcg_temp_new();
1190 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1191 gen_helper_tcond(cpu_env, tmp);
1195 /* Write back the result. */
1197 save_or_nullify(ctx, cpu_psw_cb, cb);
1198 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1200 save_gpr(ctx, rt, dest);
1201 tcg_temp_free(dest);
1203 /* Install the new nullification. */
1204 cond_free(&ctx->null_cond);
1205 ctx->null_cond = cond;
1208 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1209 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1211 TCGv_reg tcg_r1, tcg_r2;
1216 tcg_r1 = load_gpr(ctx, a->r1);
1217 tcg_r2 = load_gpr(ctx, a->r2);
1218 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1219 return nullify_end(ctx);
1222 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1223 TCGv_reg in2, bool is_tsv, bool is_b,
1224 bool is_tc, unsigned cf)
1226 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1227 unsigned c = cf >> 1;
1230 dest = tcg_temp_new();
1231 cb = tcg_temp_new();
1232 cb_msb = tcg_temp_new();
1234 zero = tcg_const_reg(0);
1236 /* DEST,C = IN1 + ~IN2 + C. */
1237 tcg_gen_not_reg(cb, in2);
1238 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1239 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1240 tcg_gen_xor_reg(cb, cb, in1);
1241 tcg_gen_xor_reg(cb, cb, dest);
1243 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1244 operations by seeding the high word with 1 and subtracting. */
1245 tcg_gen_movi_reg(cb_msb, 1);
1246 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1247 tcg_gen_eqv_reg(cb, in1, in2);
1248 tcg_gen_xor_reg(cb, cb, dest);
1250 tcg_temp_free(zero);
1252 /* Compute signed overflow if required. */
1254 if (is_tsv || c == 6) {
1255 sv = do_sub_sv(ctx, dest, in1, in2);
1257 gen_helper_tsv(cpu_env, sv);
1261 /* Compute the condition. We cannot use the special case for borrow. */
1263 cond = do_sub_cond(cf, dest, in1, in2, sv);
1265 cond = do_cond(cf, dest, cb_msb, sv);
1268 /* Emit any conditional trap before any writeback. */
1271 tmp = tcg_temp_new();
1272 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1273 gen_helper_tcond(cpu_env, tmp);
1277 /* Write back the result. */
1278 save_or_nullify(ctx, cpu_psw_cb, cb);
1279 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1280 save_gpr(ctx, rt, dest);
1281 tcg_temp_free(dest);
1283 /* Install the new nullification. */
1284 cond_free(&ctx->null_cond);
1285 ctx->null_cond = cond;
1288 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1289 bool is_tsv, bool is_b, bool is_tc)
1291 TCGv_reg tcg_r1, tcg_r2;
1296 tcg_r1 = load_gpr(ctx, a->r1);
1297 tcg_r2 = load_gpr(ctx, a->r2);
1298 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1299 return nullify_end(ctx);
1302 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1303 TCGv_reg in2, unsigned cf)
1308 dest = tcg_temp_new();
1309 tcg_gen_sub_reg(dest, in1, in2);
1311 /* Compute signed overflow if required. */
1313 if ((cf >> 1) == 6) {
1314 sv = do_sub_sv(ctx, dest, in1, in2);
1317 /* Form the condition for the compare. */
1318 cond = do_sub_cond(cf, dest, in1, in2, sv);
1321 tcg_gen_movi_reg(dest, 0);
1322 save_gpr(ctx, rt, dest);
1323 tcg_temp_free(dest);
1325 /* Install the new nullification. */
1326 cond_free(&ctx->null_cond);
1327 ctx->null_cond = cond;
1330 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1331 TCGv_reg in2, unsigned cf,
1332 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1334 TCGv_reg dest = dest_gpr(ctx, rt);
1336 /* Perform the operation, and writeback. */
1338 save_gpr(ctx, rt, dest);
1340 /* Install the new nullification. */
1341 cond_free(&ctx->null_cond);
1343 ctx->null_cond = do_log_cond(cf, dest);
1347 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1348 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1350 TCGv_reg tcg_r1, tcg_r2;
1355 tcg_r1 = load_gpr(ctx, a->r1);
1356 tcg_r2 = load_gpr(ctx, a->r2);
1357 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1358 return nullify_end(ctx);
1361 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1362 TCGv_reg in2, unsigned cf, bool is_tc,
1363 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1369 dest = dest_gpr(ctx, rt);
1371 save_gpr(ctx, rt, dest);
1372 cond_free(&ctx->null_cond);
1374 dest = tcg_temp_new();
1377 cond = do_unit_cond(cf, dest, in1, in2);
1380 TCGv_reg tmp = tcg_temp_new();
1382 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1383 gen_helper_tcond(cpu_env, tmp);
1386 save_gpr(ctx, rt, dest);
1388 cond_free(&ctx->null_cond);
1389 ctx->null_cond = cond;
1393 #ifndef CONFIG_USER_ONLY
1394 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1395 from the top 2 bits of the base register. There are a few system
1396 instructions that have a 3-bit space specifier, for which SR0 is
1397 not special. To handle this, pass ~SP. */
1398 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1408 spc = get_temp_tl(ctx);
1409 load_spr(ctx, spc, sp);
1412 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1416 ptr = tcg_temp_new_ptr();
1417 tmp = tcg_temp_new();
1418 spc = get_temp_tl(ctx);
1420 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1421 tcg_gen_andi_reg(tmp, tmp, 030);
1422 tcg_gen_trunc_reg_ptr(ptr, tmp);
1425 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1426 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1427 tcg_temp_free_ptr(ptr);
1433 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1434 unsigned rb, unsigned rx, int scale, target_sreg disp,
1435 unsigned sp, int modify, bool is_phys)
1437 TCGv_reg base = load_gpr(ctx, rb);
1440 /* Note that RX is mutually exclusive with DISP. */
1442 ofs = get_temp(ctx);
1443 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1444 tcg_gen_add_reg(ofs, ofs, base);
1445 } else if (disp || modify) {
1446 ofs = get_temp(ctx);
1447 tcg_gen_addi_reg(ofs, base, disp);
1453 #ifdef CONFIG_USER_ONLY
1454 *pgva = (modify <= 0 ? ofs : base);
1456 TCGv_tl addr = get_temp_tl(ctx);
1457 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1458 if (ctx->tb_flags & PSW_W) {
1459 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1462 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1468 /* Emit a memory load. The modify parameter should be
1469 * < 0 for pre-modify,
1470 * > 0 for post-modify,
1471 * = 0 for no base register update.
1473 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1474 unsigned rx, int scale, target_sreg disp,
1475 unsigned sp, int modify, TCGMemOp mop)
1480 /* Caller uses nullify_over/nullify_end. */
1481 assert(ctx->null_cond.c == TCG_COND_NEVER);
1483 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1484 ctx->mmu_idx == MMU_PHYS_IDX);
1485 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1487 save_gpr(ctx, rb, ofs);
1491 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1492 unsigned rx, int scale, target_sreg disp,
1493 unsigned sp, int modify, TCGMemOp mop)
1498 /* Caller uses nullify_over/nullify_end. */
1499 assert(ctx->null_cond.c == TCG_COND_NEVER);
1501 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1502 ctx->mmu_idx == MMU_PHYS_IDX);
1503 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1505 save_gpr(ctx, rb, ofs);
1509 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1510 unsigned rx, int scale, target_sreg disp,
1511 unsigned sp, int modify, TCGMemOp mop)
1516 /* Caller uses nullify_over/nullify_end. */
1517 assert(ctx->null_cond.c == TCG_COND_NEVER);
1519 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1520 ctx->mmu_idx == MMU_PHYS_IDX);
1521 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1523 save_gpr(ctx, rb, ofs);
1527 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1528 unsigned rx, int scale, target_sreg disp,
1529 unsigned sp, int modify, TCGMemOp mop)
1534 /* Caller uses nullify_over/nullify_end. */
1535 assert(ctx->null_cond.c == TCG_COND_NEVER);
1537 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1538 ctx->mmu_idx == MMU_PHYS_IDX);
1539 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1541 save_gpr(ctx, rb, ofs);
1545 #if TARGET_REGISTER_BITS == 64
1546 #define do_load_reg do_load_64
1547 #define do_store_reg do_store_64
1549 #define do_load_reg do_load_32
1550 #define do_store_reg do_store_32
1553 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1554 unsigned rx, int scale, target_sreg disp,
1555 unsigned sp, int modify, TCGMemOp mop)
1562 /* No base register update. */
1563 dest = dest_gpr(ctx, rt);
1565 /* Make sure if RT == RB, we see the result of the load. */
1566 dest = get_temp(ctx);
1568 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1569 save_gpr(ctx, rt, dest);
1571 return nullify_end(ctx);
1574 static void do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1575 unsigned rx, int scale, target_sreg disp,
1576 unsigned sp, int modify)
1582 tmp = tcg_temp_new_i32();
1583 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1584 save_frw_i32(rt, tmp);
1585 tcg_temp_free_i32(tmp);
1588 gen_helper_loaded_fr0(cpu_env);
1594 static void do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1595 unsigned rx, int scale, target_sreg disp,
1596 unsigned sp, int modify)
1602 tmp = tcg_temp_new_i64();
1603 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1605 tcg_temp_free_i64(tmp);
1608 gen_helper_loaded_fr0(cpu_env);
1614 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1615 target_sreg disp, unsigned sp,
1616 int modify, TCGMemOp mop)
1619 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1620 return nullify_end(ctx);
1623 static void do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1624 unsigned rx, int scale, target_sreg disp,
1625 unsigned sp, int modify)
1631 tmp = load_frw_i32(rt);
1632 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1633 tcg_temp_free_i32(tmp);
1638 static void do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1639 unsigned rx, int scale, target_sreg disp,
1640 unsigned sp, int modify)
1647 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1648 tcg_temp_free_i64(tmp);
1653 static void do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1654 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1659 tmp = load_frw0_i32(ra);
1661 func(tmp, cpu_env, tmp);
1663 save_frw_i32(rt, tmp);
1664 tcg_temp_free_i32(tmp);
1668 static void do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1669 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1676 dst = tcg_temp_new_i32();
1678 func(dst, cpu_env, src);
1680 tcg_temp_free_i64(src);
1681 save_frw_i32(rt, dst);
1682 tcg_temp_free_i32(dst);
1686 static void do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1687 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1692 tmp = load_frd0(ra);
1694 func(tmp, cpu_env, tmp);
1697 tcg_temp_free_i64(tmp);
1701 static void do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1702 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1708 src = load_frw0_i32(ra);
1709 dst = tcg_temp_new_i64();
1711 func(dst, cpu_env, src);
1713 tcg_temp_free_i32(src);
1715 tcg_temp_free_i64(dst);
1719 static void do_fop_weww(DisasContext *ctx, unsigned rt,
1720 unsigned ra, unsigned rb,
1721 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1726 a = load_frw0_i32(ra);
1727 b = load_frw0_i32(rb);
1729 func(a, cpu_env, a, b);
1731 tcg_temp_free_i32(b);
1732 save_frw_i32(rt, a);
1733 tcg_temp_free_i32(a);
1737 static void do_fop_dedd(DisasContext *ctx, unsigned rt,
1738 unsigned ra, unsigned rb,
1739 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1747 func(a, cpu_env, a, b);
1749 tcg_temp_free_i64(b);
1751 tcg_temp_free_i64(a);
1755 /* Emit an unconditional branch to a direct target, which may or may not
1756 have already had nullification handled. */
1757 static bool do_dbranch(DisasContext *ctx, target_ureg dest,
1758 unsigned link, bool is_n)
1760 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1762 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1766 ctx->null_cond.c = TCG_COND_ALWAYS;
1772 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1775 if (is_n && use_nullify_skip(ctx)) {
1776 nullify_set(ctx, 0);
1777 gen_goto_tb(ctx, 0, dest, dest + 4);
1779 nullify_set(ctx, is_n);
1780 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1785 nullify_set(ctx, 0);
1786 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1787 ctx->base.is_jmp = DISAS_NORETURN;
1792 /* Emit a conditional branch to a direct target. If the branch itself
1793 is nullified, we should have already used nullify_over. */
1794 static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1797 target_ureg dest = iaoq_dest(ctx, disp);
1798 TCGLabel *taken = NULL;
1799 TCGCond c = cond->c;
1802 assert(ctx->null_cond.c == TCG_COND_NEVER);
1804 /* Handle TRUE and NEVER as direct branches. */
1805 if (c == TCG_COND_ALWAYS) {
1806 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1808 if (c == TCG_COND_NEVER) {
1809 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1812 taken = gen_new_label();
1814 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1817 /* Not taken: Condition not satisfied; nullify on backward branches. */
1818 n = is_n && disp < 0;
1819 if (n && use_nullify_skip(ctx)) {
1820 nullify_set(ctx, 0);
1821 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1823 if (!n && ctx->null_lab) {
1824 gen_set_label(ctx->null_lab);
1825 ctx->null_lab = NULL;
1827 nullify_set(ctx, n);
1828 if (ctx->iaoq_n == -1) {
1829 /* The temporary iaoq_n_var died at the branch above.
1830 Regenerate it here instead of saving it. */
1831 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1833 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1836 gen_set_label(taken);
1838 /* Taken: Condition satisfied; nullify on forward branches. */
1839 n = is_n && disp >= 0;
1840 if (n && use_nullify_skip(ctx)) {
1841 nullify_set(ctx, 0);
1842 gen_goto_tb(ctx, 1, dest, dest + 4);
1844 nullify_set(ctx, n);
1845 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1848 /* Not taken: the branch itself was nullified. */
1849 if (ctx->null_lab) {
1850 gen_set_label(ctx->null_lab);
1851 ctx->null_lab = NULL;
1852 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1854 ctx->base.is_jmp = DISAS_NORETURN;
1859 /* Emit an unconditional branch to an indirect target. This handles
1860 nullification of the branch itself. */
1861 static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
1862 unsigned link, bool is_n)
1864 TCGv_reg a0, a1, next, tmp;
1867 assert(ctx->null_lab == NULL);
1869 if (ctx->null_cond.c == TCG_COND_NEVER) {
1871 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1873 next = get_temp(ctx);
1874 tcg_gen_mov_reg(next, dest);
1876 if (use_nullify_skip(ctx)) {
1877 tcg_gen_mov_reg(cpu_iaoq_f, next);
1878 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1879 nullify_set(ctx, 0);
1880 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1883 ctx->null_cond.c = TCG_COND_ALWAYS;
1886 ctx->iaoq_n_var = next;
1887 } else if (is_n && use_nullify_skip(ctx)) {
1888 /* The (conditional) branch, B, nullifies the next insn, N,
1889 and we're allowed to skip execution N (no single-step or
1890 tracepoint in effect). Since the goto_ptr that we must use
1891 for the indirect branch consumes no special resources, we
1892 can (conditionally) skip B and continue execution. */
1893 /* The use_nullify_skip test implies we have a known control path. */
1894 tcg_debug_assert(ctx->iaoq_b != -1);
1895 tcg_debug_assert(ctx->iaoq_n != -1);
1897 /* We do have to handle the non-local temporary, DEST, before
1898 branching. Since IOAQ_F is not really live at this point, we
1899 can simply store DEST optimistically. Similarly with IAOQ_B. */
1900 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1901 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1905 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1907 tcg_gen_lookup_and_goto_ptr();
1908 return nullify_end(ctx);
1910 cond_prep(&ctx->null_cond);
1911 c = ctx->null_cond.c;
1912 a0 = ctx->null_cond.a0;
1913 a1 = ctx->null_cond.a1;
1915 tmp = tcg_temp_new();
1916 next = get_temp(ctx);
1918 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1919 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1921 ctx->iaoq_n_var = next;
1924 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1928 /* The branch nullifies the next insn, which means the state of N
1929 after the branch is the inverse of the state of N that applied
1931 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1932 cond_free(&ctx->null_cond);
1933 ctx->null_cond = cond_make_n();
1934 ctx->psw_n_nonzero = true;
1936 cond_free(&ctx->null_cond);
1943 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1944 * IAOQ_Next{30..31} ← GR[b]{30..31};
1946 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1947 * which keeps the privilege level from being increased.
1949 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1952 switch (ctx->privilege) {
1954 /* Privilege 0 is maximum and is allowed to decrease. */
1957 /* Privilege 3 is minimum and is never allowed increase. */
1958 dest = get_temp(ctx);
1959 tcg_gen_ori_reg(dest, offset, 3);
1962 dest = tcg_temp_new();
1963 tcg_gen_andi_reg(dest, offset, -4);
1964 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1965 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1966 tcg_temp_free(dest);
1972 #ifdef CONFIG_USER_ONLY
1973 /* On Linux, page zero is normally marked execute only + gateway.
1974 Therefore normal read or write is supposed to fail, but specific
1975 offsets have kernel code mapped to raise permissions to implement
1976 system calls. Handling this via an explicit check here, rather
1977 in than the "be disp(sr2,r0)" instruction that probably sent us
1978 here, is the easiest way to handle the branch delay slot on the
1979 aforementioned BE. */
1980 static void do_page_zero(DisasContext *ctx)
1982 /* If by some means we get here with PSW[N]=1, that implies that
1983 the B,GATE instruction would be skipped, and we'd fault on the
1984 next insn within the privilaged page. */
1985 switch (ctx->null_cond.c) {
1986 case TCG_COND_NEVER:
1988 case TCG_COND_ALWAYS:
1989 tcg_gen_movi_reg(cpu_psw_n, 0);
1992 /* Since this is always the first (and only) insn within the
1993 TB, we should know the state of PSW[N] from TB->FLAGS. */
1994 g_assert_not_reached();
1997 /* Check that we didn't arrive here via some means that allowed
1998 non-sequential instruction execution. Normally the PSW[B] bit
1999 detects this by disallowing the B,GATE instruction to execute
2000 under such conditions. */
2001 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2005 switch (ctx->iaoq_f & -4) {
2006 case 0x00: /* Null pointer call */
2007 gen_excp_1(EXCP_IMP);
2008 ctx->base.is_jmp = DISAS_NORETURN;
2011 case 0xb0: /* LWS */
2012 gen_excp_1(EXCP_SYSCALL_LWS);
2013 ctx->base.is_jmp = DISAS_NORETURN;
2016 case 0xe0: /* SET_THREAD_POINTER */
2017 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
2018 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
2019 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
2020 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2023 case 0x100: /* SYSCALL */
2024 gen_excp_1(EXCP_SYSCALL);
2025 ctx->base.is_jmp = DISAS_NORETURN;
2030 gen_excp_1(EXCP_ILL);
2031 ctx->base.is_jmp = DISAS_NORETURN;
2037 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2039 cond_free(&ctx->null_cond);
2043 static bool trans_break(DisasContext *ctx, arg_break *a)
2045 return gen_excp_iir(ctx, EXCP_BREAK);
2048 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2050 /* No point in nullifying the memory barrier. */
2051 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2053 cond_free(&ctx->null_cond);
2057 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2060 TCGv_reg tmp = dest_gpr(ctx, rt);
2061 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2062 save_gpr(ctx, rt, tmp);
2064 cond_free(&ctx->null_cond);
2068 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2071 unsigned rs = a->sp;
2072 TCGv_i64 t0 = tcg_temp_new_i64();
2073 TCGv_reg t1 = tcg_temp_new();
2075 load_spr(ctx, t0, rs);
2076 tcg_gen_shri_i64(t0, t0, 32);
2077 tcg_gen_trunc_i64_reg(t1, t0);
2079 save_gpr(ctx, rt, t1);
2081 tcg_temp_free_i64(t0);
2083 cond_free(&ctx->null_cond);
2087 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2090 unsigned ctl = a->r;
2095 #ifdef TARGET_HPPA64
2097 /* MFSAR without ,W masks low 5 bits. */
2098 tmp = dest_gpr(ctx, rt);
2099 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2100 save_gpr(ctx, rt, tmp);
2104 save_gpr(ctx, rt, cpu_sar);
2106 case CR_IT: /* Interval Timer */
2107 /* FIXME: Respect PSW_S bit. */
2109 tmp = dest_gpr(ctx, rt);
2110 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2112 gen_helper_read_interval_timer(tmp);
2114 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2116 gen_helper_read_interval_timer(tmp);
2118 save_gpr(ctx, rt, tmp);
2119 return nullify_end(ctx);
2124 /* All other control registers are privileged. */
2125 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2129 tmp = get_temp(ctx);
2130 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2131 save_gpr(ctx, rt, tmp);
2134 cond_free(&ctx->null_cond);
2138 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2141 unsigned rs = a->sp;
2145 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2149 t64 = tcg_temp_new_i64();
2150 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2151 tcg_gen_shli_i64(t64, t64, 32);
2154 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2155 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2157 tcg_gen_mov_i64(cpu_sr[rs], t64);
2159 tcg_temp_free_i64(t64);
2161 return nullify_end(ctx);
2164 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2166 unsigned ctl = a->t;
2167 TCGv_reg reg = load_gpr(ctx, a->r);
2170 if (ctl == CR_SAR) {
2171 tmp = tcg_temp_new();
2172 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2173 save_or_nullify(ctx, cpu_sar, tmp);
2176 cond_free(&ctx->null_cond);
2180 /* All other control registers are privileged or read-only. */
2181 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2183 #ifndef CONFIG_USER_ONLY
2187 gen_helper_write_interval_timer(cpu_env, reg);
2190 gen_helper_write_eirr(cpu_env, reg);
2193 gen_helper_write_eiem(cpu_env, reg);
2194 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2199 /* FIXME: Respect PSW_Q bit */
2200 /* The write advances the queue and stores to the back element. */
2201 tmp = get_temp(ctx);
2202 tcg_gen_ld_reg(tmp, cpu_env,
2203 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2204 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2205 tcg_gen_st_reg(reg, cpu_env,
2206 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2210 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2213 return nullify_end(ctx);
2217 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2219 TCGv_reg tmp = tcg_temp_new();
2221 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2222 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2223 save_or_nullify(ctx, cpu_sar, tmp);
2226 cond_free(&ctx->null_cond);
2230 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2232 TCGv_reg dest = dest_gpr(ctx, a->t);
2234 #ifdef CONFIG_USER_ONLY
2235 /* We don't implement space registers in user mode. */
2236 tcg_gen_movi_reg(dest, 0);
2238 TCGv_i64 t0 = tcg_temp_new_i64();
2240 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2241 tcg_gen_shri_i64(t0, t0, 32);
2242 tcg_gen_trunc_i64_reg(dest, t0);
2244 tcg_temp_free_i64(t0);
2246 save_gpr(ctx, a->t, dest);
2248 cond_free(&ctx->null_cond);
2252 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2254 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2255 #ifndef CONFIG_USER_ONLY
2260 tmp = get_temp(ctx);
2261 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2262 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2263 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2264 save_gpr(ctx, a->t, tmp);
2266 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2267 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2268 return nullify_end(ctx);
2272 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2274 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2275 #ifndef CONFIG_USER_ONLY
2280 tmp = get_temp(ctx);
2281 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2282 tcg_gen_ori_reg(tmp, tmp, a->i);
2283 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2284 save_gpr(ctx, a->t, tmp);
2286 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2287 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2288 return nullify_end(ctx);
2292 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2294 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2295 #ifndef CONFIG_USER_ONLY
2299 reg = load_gpr(ctx, a->r);
2300 tmp = get_temp(ctx);
2301 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2303 /* Exit the TB to recognize new interrupts. */
2304 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2305 return nullify_end(ctx);
2309 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2311 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2312 #ifndef CONFIG_USER_ONLY
2316 gen_helper_rfi_r(cpu_env);
2318 gen_helper_rfi(cpu_env);
2320 /* Exit the TB to recognize new interrupts. */
2321 if (ctx->base.singlestep_enabled) {
2322 gen_excp_1(EXCP_DEBUG);
2324 tcg_gen_exit_tb(NULL, 0);
2326 ctx->base.is_jmp = DISAS_NORETURN;
2328 return nullify_end(ctx);
2332 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2334 return do_rfi(ctx, false);
2337 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2339 return do_rfi(ctx, true);
2342 #ifndef CONFIG_USER_ONLY
2343 static bool gen_hlt(DisasContext *ctx, int reset)
2345 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2348 gen_helper_reset(cpu_env);
2350 gen_helper_halt(cpu_env);
2352 ctx->base.is_jmp = DISAS_NORETURN;
2353 return nullify_end(ctx);
2355 #endif /* !CONFIG_USER_ONLY */
2357 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2360 TCGv_reg dest = dest_gpr(ctx, a->b);
2361 TCGv_reg src1 = load_gpr(ctx, a->b);
2362 TCGv_reg src2 = load_gpr(ctx, a->x);
2364 /* The only thing we need to do is the base register modification. */
2365 tcg_gen_add_reg(dest, src1, src2);
2366 save_gpr(ctx, a->b, dest);
2368 cond_free(&ctx->null_cond);
2372 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2375 TCGv_i32 level, want;
2380 dest = dest_gpr(ctx, a->t);
2381 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2384 level = tcg_const_i32(a->ri);
2386 level = tcg_temp_new_i32();
2387 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
2388 tcg_gen_andi_i32(level, level, 3);
2390 want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
2392 gen_helper_probe(dest, cpu_env, addr, level, want);
2394 tcg_temp_free_i32(want);
2395 tcg_temp_free_i32(level);
2397 save_gpr(ctx, a->t, dest);
2398 return nullify_end(ctx);
2401 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2403 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2404 #ifndef CONFIG_USER_ONLY
2410 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2411 reg = load_gpr(ctx, a->r);
2413 gen_helper_itlba(cpu_env, addr, reg);
2415 gen_helper_itlbp(cpu_env, addr, reg);
2418 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2419 the case, since the OS TLB fill handler runs with mmu disabled. */
2420 if (!a->data && (ctx->tb_flags & PSW_C)) {
2421 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2423 return nullify_end(ctx);
2427 static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
2429 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2430 #ifndef CONFIG_USER_ONLY
2436 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2438 save_gpr(ctx, a->b, ofs);
2441 gen_helper_ptlbe(cpu_env);
2443 gen_helper_ptlb(cpu_env, addr);
2446 /* Exit TB for TLB change if mmu is enabled. */
2447 if (!a->data && (ctx->tb_flags & PSW_C)) {
2448 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2450 return nullify_end(ctx);
2454 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2456 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2457 #ifndef CONFIG_USER_ONLY
2459 TCGv_reg ofs, paddr;
2463 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2465 paddr = tcg_temp_new();
2466 gen_helper_lpa(paddr, cpu_env, vaddr);
2468 /* Note that physical address result overrides base modification. */
2470 save_gpr(ctx, a->b, ofs);
2472 save_gpr(ctx, a->t, paddr);
2473 tcg_temp_free(paddr);
2475 return nullify_end(ctx);
2479 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2483 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2485 /* The Coherence Index is an implementation-defined function of the
2486 physical address. Two addresses with the same CI have a coherent
2487 view of the cache. Our implementation is to return 0 for all,
2488 since the entire address space is coherent. */
2489 ci = tcg_const_reg(0);
2490 save_gpr(ctx, a->t, ci);
2493 cond_free(&ctx->null_cond);
2497 static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
2499 return do_add_reg(ctx, a, false, false, false, false);
2502 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2504 return do_add_reg(ctx, a, true, false, false, false);
2507 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2509 return do_add_reg(ctx, a, false, true, false, false);
2512 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
2514 return do_add_reg(ctx, a, false, false, false, true);
2517 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2519 return do_add_reg(ctx, a, false, true, false, true);
2522 static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2524 return do_sub_reg(ctx, a, false, false, false);
2527 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
2529 return do_sub_reg(ctx, a, true, false, false);
2532 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2534 return do_sub_reg(ctx, a, false, false, true);
2537 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
2539 return do_sub_reg(ctx, a, true, false, true);
2542 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2544 return do_sub_reg(ctx, a, false, true, false);
2547 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2549 return do_sub_reg(ctx, a, true, true, false);
2552 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2554 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2557 static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2559 return do_log_reg(ctx, a, tcg_gen_and_reg);
2562 static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2565 unsigned r2 = a->r2;
2566 unsigned r1 = a->r1;
2569 if (rt == 0) { /* NOP */
2570 cond_free(&ctx->null_cond);
2573 if (r2 == 0) { /* COPY */
2575 TCGv_reg dest = dest_gpr(ctx, rt);
2576 tcg_gen_movi_reg(dest, 0);
2577 save_gpr(ctx, rt, dest);
2579 save_gpr(ctx, rt, cpu_gr[r1]);
2581 cond_free(&ctx->null_cond);
2584 #ifndef CONFIG_USER_ONLY
2585 /* These are QEMU extensions and are nops in the real architecture:
2587 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2588 * or %r31,%r31,%r31 -- death loop; offline cpu
2589 * currently implemented as idle.
2591 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2594 /* No need to check for supervisor, as userland can only pause
2595 until the next timer interrupt. */
2598 /* Advance the instruction queue. */
2599 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2600 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2601 nullify_set(ctx, 0);
2603 /* Tell the qemu main loop to halt until this cpu has work. */
2604 tmp = tcg_const_i32(1);
2605 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2606 offsetof(CPUState, halted));
2607 tcg_temp_free_i32(tmp);
2608 gen_excp_1(EXCP_HALTED);
2609 ctx->base.is_jmp = DISAS_NORETURN;
2611 return nullify_end(ctx);
2615 return do_log_reg(ctx, a, tcg_gen_or_reg);
2618 static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2620 return do_log_reg(ctx, a, tcg_gen_xor_reg);
2623 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
2625 TCGv_reg tcg_r1, tcg_r2;
2630 tcg_r1 = load_gpr(ctx, a->r1);
2631 tcg_r2 = load_gpr(ctx, a->r2);
2632 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
2633 return nullify_end(ctx);
2636 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
2638 TCGv_reg tcg_r1, tcg_r2;
2643 tcg_r1 = load_gpr(ctx, a->r1);
2644 tcg_r2 = load_gpr(ctx, a->r2);
2645 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
2646 return nullify_end(ctx);
2649 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
2651 TCGv_reg tcg_r1, tcg_r2, tmp;
2656 tcg_r1 = load_gpr(ctx, a->r1);
2657 tcg_r2 = load_gpr(ctx, a->r2);
2658 tmp = get_temp(ctx);
2659 tcg_gen_not_reg(tmp, tcg_r2);
2660 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
2661 return nullify_end(ctx);
2664 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2666 return do_uaddcm(ctx, a, false);
2669 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2671 return do_uaddcm(ctx, a, true);
2674 static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
2680 tmp = get_temp(ctx);
2681 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2683 tcg_gen_not_reg(tmp, tmp);
2685 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2686 tcg_gen_muli_reg(tmp, tmp, 6);
2687 do_unit(ctx, a->t, tmp, load_gpr(ctx, a->r), a->cf, false,
2688 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2689 return nullify_end(ctx);
2692 static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2694 return do_dcor(ctx, a, false);
2697 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2699 return do_dcor(ctx, a, true);
2702 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2704 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2708 in1 = load_gpr(ctx, a->r1);
2709 in2 = load_gpr(ctx, a->r2);
2711 add1 = tcg_temp_new();
2712 add2 = tcg_temp_new();
2713 addc = tcg_temp_new();
2714 dest = tcg_temp_new();
2715 zero = tcg_const_reg(0);
2717 /* Form R1 << 1 | PSW[CB]{8}. */
2718 tcg_gen_add_reg(add1, in1, in1);
2719 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2721 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2722 carry{8} requires that we subtract via + ~R2 + 1, as described in
2723 the manual. By extracting and masking V, we can produce the
2724 proper inputs to the addition without movcond. */
2725 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2726 tcg_gen_xor_reg(add2, in2, addc);
2727 tcg_gen_andi_reg(addc, addc, 1);
2728 /* ??? This is only correct for 32-bit. */
2729 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2730 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2732 tcg_temp_free(addc);
2733 tcg_temp_free(zero);
2735 /* Write back the result register. */
2736 save_gpr(ctx, a->t, dest);
2738 /* Write back PSW[CB]. */
2739 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2740 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2742 /* Write back PSW[V] for the division step. */
2743 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2744 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2746 /* Install the new nullification. */
2749 if (a->cf >> 1 == 6) {
2750 /* ??? The lshift is supposed to contribute to overflow. */
2751 sv = do_add_sv(ctx, dest, add1, add2);
2753 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
2756 tcg_temp_free(add1);
2757 tcg_temp_free(add2);
2758 tcg_temp_free(dest);
2760 return nullify_end(ctx);
2763 static bool trans_addi(DisasContext *ctx, uint32_t insn)
2765 target_sreg im = low_sextract(insn, 0, 11);
2766 unsigned e1 = extract32(insn, 11, 1);
2767 unsigned cf = extract32(insn, 12, 4);
2768 unsigned rt = extract32(insn, 16, 5);
2769 unsigned r2 = extract32(insn, 21, 5);
2770 unsigned o1 = extract32(insn, 26, 1);
2771 TCGv_reg tcg_im, tcg_r2;
2777 tcg_im = load_const(ctx, im);
2778 tcg_r2 = load_gpr(ctx, r2);
2779 do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2781 return nullify_end(ctx);
2784 static bool trans_subi(DisasContext *ctx, uint32_t insn)
2786 target_sreg im = low_sextract(insn, 0, 11);
2787 unsigned e1 = extract32(insn, 11, 1);
2788 unsigned cf = extract32(insn, 12, 4);
2789 unsigned rt = extract32(insn, 16, 5);
2790 unsigned r2 = extract32(insn, 21, 5);
2791 TCGv_reg tcg_im, tcg_r2;
2797 tcg_im = load_const(ctx, im);
2798 tcg_r2 = load_gpr(ctx, r2);
2799 do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2801 return nullify_end(ctx);
2804 static bool trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2806 target_sreg im = low_sextract(insn, 0, 11);
2807 unsigned cf = extract32(insn, 12, 4);
2808 unsigned rt = extract32(insn, 16, 5);
2809 unsigned r2 = extract32(insn, 21, 5);
2810 TCGv_reg tcg_im, tcg_r2;
2816 tcg_im = load_const(ctx, im);
2817 tcg_r2 = load_gpr(ctx, r2);
2818 do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2820 return nullify_end(ctx);
2823 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
2825 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2826 a->disp, a->sp, a->m, a->size | MO_TE);
2829 static bool trans_st(DisasContext *ctx, arg_ldst *a)
2831 assert(a->x == 0 && a->scale == 0);
2832 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2835 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
2837 TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
2838 TCGv_reg zero, dest, ofs;
2844 /* Base register modification. Make sure if RT == RB,
2845 we see the result of the load. */
2846 dest = get_temp(ctx);
2848 dest = dest_gpr(ctx, a->t);
2851 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2852 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
2853 zero = tcg_const_reg(0);
2854 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2856 save_gpr(ctx, a->b, ofs);
2858 save_gpr(ctx, a->t, dest);
2860 return nullify_end(ctx);
2863 static bool trans_stby(DisasContext *ctx, arg_stby *a)
2870 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
2871 ctx->mmu_idx == MMU_PHYS_IDX);
2872 val = load_gpr(ctx, a->r);
2874 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2875 gen_helper_stby_e_parallel(cpu_env, addr, val);
2877 gen_helper_stby_e(cpu_env, addr, val);
2880 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2881 gen_helper_stby_b_parallel(cpu_env, addr, val);
2883 gen_helper_stby_b(cpu_env, addr, val);
2887 tcg_gen_andi_reg(ofs, ofs, ~3);
2888 save_gpr(ctx, a->b, ofs);
2891 return nullify_end(ctx);
2894 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
2896 int hold_mmu_idx = ctx->mmu_idx;
2898 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2899 ctx->mmu_idx = MMU_PHYS_IDX;
2901 ctx->mmu_idx = hold_mmu_idx;
2905 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
2907 int hold_mmu_idx = ctx->mmu_idx;
2909 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2910 ctx->mmu_idx = MMU_PHYS_IDX;
2912 ctx->mmu_idx = hold_mmu_idx;
2916 static bool trans_ldil(DisasContext *ctx, uint32_t insn)
2918 unsigned rt = extract32(insn, 21, 5);
2919 target_sreg i = assemble_21(insn);
2920 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2922 tcg_gen_movi_reg(tcg_rt, i);
2923 save_gpr(ctx, rt, tcg_rt);
2924 cond_free(&ctx->null_cond);
2928 static bool trans_addil(DisasContext *ctx, uint32_t insn)
2930 unsigned rt = extract32(insn, 21, 5);
2931 target_sreg i = assemble_21(insn);
2932 TCGv_reg tcg_rt = load_gpr(ctx, rt);
2933 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
2935 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
2936 save_gpr(ctx, 1, tcg_r1);
2937 cond_free(&ctx->null_cond);
2941 static bool trans_ldo(DisasContext *ctx, uint32_t insn)
2943 unsigned rb = extract32(insn, 21, 5);
2944 unsigned rt = extract32(insn, 16, 5);
2945 target_sreg i = assemble_16(insn);
2946 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
2948 /* Special case rb == 0, for the LDI pseudo-op.
2949 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2951 tcg_gen_movi_reg(tcg_rt, i);
2953 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
2955 save_gpr(ctx, rt, tcg_rt);
2956 cond_free(&ctx->null_cond);
2960 static bool trans_load(DisasContext *ctx, uint32_t insn,
2961 bool is_mod, TCGMemOp mop)
2963 unsigned rb = extract32(insn, 21, 5);
2964 unsigned rt = extract32(insn, 16, 5);
2965 unsigned sp = extract32(insn, 14, 2);
2966 target_sreg i = assemble_16(insn);
2968 do_load(ctx, rt, rb, 0, 0, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
2972 static bool trans_load_w(DisasContext *ctx, uint32_t insn)
2974 unsigned rb = extract32(insn, 21, 5);
2975 unsigned rt = extract32(insn, 16, 5);
2976 unsigned sp = extract32(insn, 14, 2);
2977 target_sreg i = assemble_16a(insn);
2978 unsigned ext2 = extract32(insn, 1, 2);
2983 /* FLDW without modification. */
2984 do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
2987 /* LDW with modification. Note that the sign of I selects
2988 post-dec vs pre-inc. */
2989 do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
2992 return gen_illegal(ctx);
2997 static bool trans_fload_mod(DisasContext *ctx, uint32_t insn)
2999 target_sreg i = assemble_16a(insn);
3000 unsigned t1 = extract32(insn, 1, 1);
3001 unsigned a = extract32(insn, 2, 1);
3002 unsigned sp = extract32(insn, 14, 2);
3003 unsigned t0 = extract32(insn, 16, 5);
3004 unsigned rb = extract32(insn, 21, 5);
3006 /* FLDW with modification. */
3007 do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3011 static bool trans_store(DisasContext *ctx, uint32_t insn,
3012 bool is_mod, TCGMemOp mop)
3014 unsigned rb = extract32(insn, 21, 5);
3015 unsigned rt = extract32(insn, 16, 5);
3016 unsigned sp = extract32(insn, 14, 2);
3017 target_sreg i = assemble_16(insn);
3019 do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3023 static bool trans_store_w(DisasContext *ctx, uint32_t insn)
3025 unsigned rb = extract32(insn, 21, 5);
3026 unsigned rt = extract32(insn, 16, 5);
3027 unsigned sp = extract32(insn, 14, 2);
3028 target_sreg i = assemble_16a(insn);
3029 unsigned ext2 = extract32(insn, 1, 2);
3034 /* FSTW without modification. */
3035 do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3038 /* STW with modification. */
3039 do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3042 return gen_illegal(ctx);
3047 static bool trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3049 target_sreg i = assemble_16a(insn);
3050 unsigned t1 = extract32(insn, 1, 1);
3051 unsigned a = extract32(insn, 2, 1);
3052 unsigned sp = extract32(insn, 14, 2);
3053 unsigned t0 = extract32(insn, 16, 5);
3054 unsigned rb = extract32(insn, 21, 5);
3056 /* FSTW with modification. */
3057 do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3061 static bool trans_copr_w(DisasContext *ctx, uint32_t insn)
3063 unsigned t0 = extract32(insn, 0, 5);
3064 unsigned m = extract32(insn, 5, 1);
3065 unsigned t1 = extract32(insn, 6, 1);
3066 unsigned ext3 = extract32(insn, 7, 3);
3067 /* unsigned cc = extract32(insn, 10, 2); */
3068 unsigned i = extract32(insn, 12, 1);
3069 unsigned ua = extract32(insn, 13, 1);
3070 unsigned sp = extract32(insn, 14, 2);
3071 unsigned rx = extract32(insn, 16, 5);
3072 unsigned rb = extract32(insn, 21, 5);
3073 unsigned rt = t1 * 32 + t0;
3074 int modify = (m ? (ua ? -1 : 1) : 0);
3078 scale = (ua ? 2 : 0);
3082 disp = low_sextract(rx, 0, 5);
3085 modify = (m ? (ua ? -1 : 1) : 0);
3090 do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3093 do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3096 return gen_illegal(ctx);
3101 static bool trans_copr_dw(DisasContext *ctx, uint32_t insn)
3103 unsigned rt = extract32(insn, 0, 5);
3104 unsigned m = extract32(insn, 5, 1);
3105 unsigned ext4 = extract32(insn, 6, 4);
3106 /* unsigned cc = extract32(insn, 10, 2); */
3107 unsigned i = extract32(insn, 12, 1);
3108 unsigned ua = extract32(insn, 13, 1);
3109 unsigned sp = extract32(insn, 14, 2);
3110 unsigned rx = extract32(insn, 16, 5);
3111 unsigned rb = extract32(insn, 21, 5);
3112 int modify = (m ? (ua ? -1 : 1) : 0);
3116 scale = (ua ? 3 : 0);
3120 disp = low_sextract(rx, 0, 5);
3123 modify = (m ? (ua ? -1 : 1) : 0);
3128 do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3131 do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3134 return gen_illegal(ctx);
3139 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3140 unsigned c, unsigned f, unsigned n, int disp)
3142 TCGv_reg dest, in2, sv;
3145 in2 = load_gpr(ctx, r);
3146 dest = get_temp(ctx);
3148 tcg_gen_sub_reg(dest, in1, in2);
3152 sv = do_sub_sv(ctx, dest, in1, in2);
3155 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3156 return do_cbranch(ctx, disp, n, &cond);
3159 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3162 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3165 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3168 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3171 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3172 unsigned c, unsigned f, unsigned n, int disp)
3174 TCGv_reg dest, in2, sv, cb_msb;
3177 in2 = load_gpr(ctx, r);
3178 dest = dest_gpr(ctx, r);
3184 tcg_gen_add_reg(dest, in1, in2);
3187 cb_msb = get_temp(ctx);
3188 tcg_gen_movi_reg(cb_msb, 0);
3189 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3192 tcg_gen_add_reg(dest, in1, in2);
3193 sv = do_add_sv(ctx, dest, in1, in2);
3197 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
3198 return do_cbranch(ctx, disp, n, &cond);
3201 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3204 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3207 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3210 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3213 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3215 TCGv_reg tmp, tcg_r;
3220 tmp = tcg_temp_new();
3221 tcg_r = load_gpr(ctx, a->r);
3222 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3224 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3226 return do_cbranch(ctx, a->disp, a->n, &cond);
3229 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3231 TCGv_reg tmp, tcg_r;
3236 tmp = tcg_temp_new();
3237 tcg_r = load_gpr(ctx, a->r);
3238 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3240 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3242 return do_cbranch(ctx, a->disp, a->n, &cond);
3245 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3252 dest = dest_gpr(ctx, a->r2);
3254 tcg_gen_movi_reg(dest, 0);
3256 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
3259 cond = do_sed_cond(a->c, dest);
3260 return do_cbranch(ctx, a->disp, a->n, &cond);
3263 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3270 dest = dest_gpr(ctx, a->r);
3271 tcg_gen_movi_reg(dest, a->i);
3273 cond = do_sed_cond(a->c, dest);
3274 return do_cbranch(ctx, a->disp, a->n, &cond);
3277 static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
3285 dest = dest_gpr(ctx, a->t);
3287 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
3288 tcg_gen_shr_reg(dest, dest, cpu_sar);
3289 } else if (a->r1 == a->r2) {
3290 TCGv_i32 t32 = tcg_temp_new_i32();
3291 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
3292 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3293 tcg_gen_extu_i32_reg(dest, t32);
3294 tcg_temp_free_i32(t32);
3296 TCGv_i64 t = tcg_temp_new_i64();
3297 TCGv_i64 s = tcg_temp_new_i64();
3299 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
3300 tcg_gen_extu_reg_i64(s, cpu_sar);
3301 tcg_gen_shr_i64(t, t, s);
3302 tcg_gen_trunc_i64_reg(dest, t);
3304 tcg_temp_free_i64(t);
3305 tcg_temp_free_i64(s);
3307 save_gpr(ctx, a->t, dest);
3309 /* Install the new nullification. */
3310 cond_free(&ctx->null_cond);
3312 ctx->null_cond = do_sed_cond(a->c, dest);
3314 return nullify_end(ctx);
3317 static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
3319 unsigned sa = 31 - a->cpos;
3326 dest = dest_gpr(ctx, a->t);
3327 t2 = load_gpr(ctx, a->r2);
3328 if (a->r1 == a->r2) {
3329 TCGv_i32 t32 = tcg_temp_new_i32();
3330 tcg_gen_trunc_reg_i32(t32, t2);
3331 tcg_gen_rotri_i32(t32, t32, sa);
3332 tcg_gen_extu_i32_reg(dest, t32);
3333 tcg_temp_free_i32(t32);
3334 } else if (a->r1 == 0) {
3335 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3337 TCGv_reg t0 = tcg_temp_new();
3338 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3339 tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
3342 save_gpr(ctx, a->t, dest);
3344 /* Install the new nullification. */
3345 cond_free(&ctx->null_cond);
3347 ctx->null_cond = do_sed_cond(a->c, dest);
3349 return nullify_end(ctx);
3352 static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
3354 unsigned len = 32 - a->clen;
3355 TCGv_reg dest, src, tmp;
3361 dest = dest_gpr(ctx, a->t);
3362 src = load_gpr(ctx, a->r);
3363 tmp = tcg_temp_new();
3365 /* Recall that SAR is using big-endian bit numbering. */
3366 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3368 tcg_gen_sar_reg(dest, src, tmp);
3369 tcg_gen_sextract_reg(dest, dest, 0, len);
3371 tcg_gen_shr_reg(dest, src, tmp);
3372 tcg_gen_extract_reg(dest, dest, 0, len);
3375 save_gpr(ctx, a->t, dest);
3377 /* Install the new nullification. */
3378 cond_free(&ctx->null_cond);
3380 ctx->null_cond = do_sed_cond(a->c, dest);
3382 return nullify_end(ctx);
3385 static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
3387 unsigned len = 32 - a->clen;
3388 unsigned cpos = 31 - a->pos;
3395 dest = dest_gpr(ctx, a->t);
3396 src = load_gpr(ctx, a->r);
3398 tcg_gen_sextract_reg(dest, src, cpos, len);
3400 tcg_gen_extract_reg(dest, src, cpos, len);
3402 save_gpr(ctx, a->t, dest);
3404 /* Install the new nullification. */
3405 cond_free(&ctx->null_cond);
3407 ctx->null_cond = do_sed_cond(a->c, dest);
3409 return nullify_end(ctx);
3412 static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
3414 unsigned len = 32 - a->clen;
3415 target_sreg mask0, mask1;
3421 if (a->cpos + len > 32) {
3425 dest = dest_gpr(ctx, a->t);
3426 mask0 = deposit64(0, a->cpos, len, a->i);
3427 mask1 = deposit64(-1, a->cpos, len, a->i);
3430 TCGv_reg src = load_gpr(ctx, a->t);
3432 tcg_gen_andi_reg(dest, src, mask1);
3435 tcg_gen_ori_reg(dest, src, mask0);
3437 tcg_gen_movi_reg(dest, mask0);
3439 save_gpr(ctx, a->t, dest);
3441 /* Install the new nullification. */
3442 cond_free(&ctx->null_cond);
3444 ctx->null_cond = do_sed_cond(a->c, dest);
3446 return nullify_end(ctx);
3449 static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
3451 unsigned rs = a->nz ? a->t : 0;
3452 unsigned len = 32 - a->clen;
3458 if (a->cpos + len > 32) {
3462 dest = dest_gpr(ctx, a->t);
3463 val = load_gpr(ctx, a->r);
3465 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
3467 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
3469 save_gpr(ctx, a->t, dest);
3471 /* Install the new nullification. */
3472 cond_free(&ctx->null_cond);
3474 ctx->null_cond = do_sed_cond(a->c, dest);
3476 return nullify_end(ctx);
3479 static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3480 unsigned nz, unsigned clen, TCGv_reg val)
3482 unsigned rs = nz ? rt : 0;
3483 unsigned len = 32 - clen;
3484 TCGv_reg mask, tmp, shift, dest;
3485 unsigned msb = 1U << (len - 1);
3491 dest = dest_gpr(ctx, rt);
3492 shift = tcg_temp_new();
3493 tmp = tcg_temp_new();
3495 /* Convert big-endian bit numbering in SAR to left-shift. */
3496 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3498 mask = tcg_const_reg(msb + (msb - 1));
3499 tcg_gen_and_reg(tmp, val, mask);
3501 tcg_gen_shl_reg(mask, mask, shift);
3502 tcg_gen_shl_reg(tmp, tmp, shift);
3503 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3504 tcg_gen_or_reg(dest, dest, tmp);
3506 tcg_gen_shl_reg(dest, tmp, shift);
3508 tcg_temp_free(shift);
3509 tcg_temp_free(mask);
3511 save_gpr(ctx, rt, dest);
3513 /* Install the new nullification. */
3514 cond_free(&ctx->null_cond);
3516 ctx->null_cond = do_sed_cond(c, dest);
3518 return nullify_end(ctx);
3521 static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3523 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3526 static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3528 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3531 static bool trans_be(DisasContext *ctx, arg_be *a)
3535 #ifdef CONFIG_USER_ONLY
3536 /* ??? It seems like there should be a good way of using
3537 "be disp(sr2, r0)", the canonical gateway entry mechanism
3538 to our advantage. But that appears to be inconvenient to
3539 manage along side branch delay slots. Therefore we handle
3540 entry into the gateway page via absolute address. */
3541 /* Since we don't implement spaces, just branch. Do notice the special
3542 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3543 goto_tb to the TB containing the syscall. */
3545 return do_dbranch(ctx, a->disp, a->l, a->n);
3551 tmp = get_temp(ctx);
3552 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
3553 tmp = do_ibranch_priv(ctx, tmp);
3555 #ifdef CONFIG_USER_ONLY
3556 return do_ibranch(ctx, tmp, a->l, a->n);
3558 TCGv_i64 new_spc = tcg_temp_new_i64();
3560 load_spr(ctx, new_spc, a->sp);
3562 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3563 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3565 if (a->n && use_nullify_skip(ctx)) {
3566 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3567 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3568 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3569 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3571 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3572 if (ctx->iaoq_b == -1) {
3573 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3575 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3576 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3577 nullify_set(ctx, a->n);
3579 tcg_temp_free_i64(new_spc);
3580 tcg_gen_lookup_and_goto_ptr();
3581 ctx->base.is_jmp = DISAS_NORETURN;
3582 return nullify_end(ctx);
3586 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3588 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3591 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3593 target_ureg dest = iaoq_dest(ctx, a->disp);
3595 /* Make sure the caller hasn't done something weird with the queue.
3596 * ??? This is not quite the same as the PSW[B] bit, which would be
3597 * expensive to track. Real hardware will trap for
3599 * b gateway+4 (in delay slot of first branch)
3600 * However, checking for a non-sequential instruction queue *will*
3601 * diagnose the security hole
3604 * in which instructions at evil would run with increased privs.
3606 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3607 return gen_illegal(ctx);
3610 #ifndef CONFIG_USER_ONLY
3611 if (ctx->tb_flags & PSW_C) {
3612 CPUHPPAState *env = ctx->cs->env_ptr;
3613 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3614 /* If we could not find a TLB entry, then we need to generate an
3615 ITLB miss exception so the kernel will provide it.
3616 The resulting TLB fill operation will invalidate this TB and
3617 we will re-translate, at which point we *will* be able to find
3618 the TLB entry and determine if this is in fact a gateway page. */
3620 gen_excp(ctx, EXCP_ITLB_MISS);
3623 /* No change for non-gateway pages or for priv decrease. */
3624 if (type >= 4 && type - 4 < ctx->privilege) {
3625 dest = deposit32(dest, 0, 2, type - 4);
3628 dest &= -4; /* priv = 0 */
3632 return do_dbranch(ctx, dest, a->l, a->n);
3635 static bool trans_blr(DisasContext *ctx, arg_blr *a)
3637 TCGv_reg tmp = get_temp(ctx);
3639 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3640 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3641 /* The computation here never changes privilege level. */
3642 return do_ibranch(ctx, tmp, a->l, a->n);
3645 static bool trans_bv(DisasContext *ctx, arg_bv *a)
3650 dest = load_gpr(ctx, a->b);
3652 dest = get_temp(ctx);
3653 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3654 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
3656 dest = do_ibranch_priv(ctx, dest);
3657 return do_ibranch(ctx, dest, 0, a->n);
3660 static bool trans_bve(DisasContext *ctx, arg_bve *a)
3664 #ifdef CONFIG_USER_ONLY
3665 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3666 return do_ibranch(ctx, dest, a->l, a->n);
3669 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3671 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3672 if (ctx->iaoq_b == -1) {
3673 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3675 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3676 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3678 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3680 nullify_set(ctx, a->n);
3681 tcg_gen_lookup_and_goto_ptr();
3682 ctx->base.is_jmp = DISAS_NORETURN;
3683 return nullify_end(ctx);
3687 static bool trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3688 const DisasInsn *di)
3690 unsigned rt = extract32(insn, 0, 5);
3691 unsigned ra = extract32(insn, 21, 5);
3692 do_fop_wew(ctx, rt, ra, di->f.wew);
3696 static bool trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3697 const DisasInsn *di)
3699 unsigned rt = assemble_rt64(insn);
3700 unsigned ra = assemble_ra64(insn);
3701 do_fop_wew(ctx, rt, ra, di->f.wew);
3705 static bool trans_fop_ded(DisasContext *ctx, uint32_t insn,
3706 const DisasInsn *di)
3708 unsigned rt = extract32(insn, 0, 5);
3709 unsigned ra = extract32(insn, 21, 5);
3710 do_fop_ded(ctx, rt, ra, di->f.ded);
3714 static bool trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3715 const DisasInsn *di)
3717 unsigned rt = extract32(insn, 0, 5);
3718 unsigned ra = extract32(insn, 21, 5);
3719 do_fop_wed(ctx, rt, ra, di->f.wed);
3723 static bool trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3724 const DisasInsn *di)
3726 unsigned rt = assemble_rt64(insn);
3727 unsigned ra = extract32(insn, 21, 5);
3728 do_fop_wed(ctx, rt, ra, di->f.wed);
3732 static bool trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
3733 const DisasInsn *di)
3735 unsigned rt = extract32(insn, 0, 5);
3736 unsigned ra = extract32(insn, 21, 5);
3737 do_fop_dew(ctx, rt, ra, di->f.dew);
3741 static bool trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
3742 const DisasInsn *di)
3744 unsigned rt = extract32(insn, 0, 5);
3745 unsigned ra = assemble_ra64(insn);
3746 do_fop_dew(ctx, rt, ra, di->f.dew);
3750 static bool trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
3751 const DisasInsn *di)
3753 unsigned rt = extract32(insn, 0, 5);
3754 unsigned rb = extract32(insn, 16, 5);
3755 unsigned ra = extract32(insn, 21, 5);
3756 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3760 static bool trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
3761 const DisasInsn *di)
3763 unsigned rt = assemble_rt64(insn);
3764 unsigned rb = assemble_rb64(insn);
3765 unsigned ra = assemble_ra64(insn);
3766 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
3770 static bool trans_fop_dedd(DisasContext *ctx, uint32_t insn,
3771 const DisasInsn *di)
3773 unsigned rt = extract32(insn, 0, 5);
3774 unsigned rb = extract32(insn, 16, 5);
3775 unsigned ra = extract32(insn, 21, 5);
3776 do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
3780 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3782 tcg_gen_mov_i32(dst, src);
3785 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3787 tcg_gen_mov_i64(dst, src);
3790 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3792 tcg_gen_andi_i32(dst, src, INT32_MAX);
3795 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3797 tcg_gen_andi_i64(dst, src, INT64_MAX);
3800 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3802 tcg_gen_xori_i32(dst, src, INT32_MIN);
3805 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3807 tcg_gen_xori_i64(dst, src, INT64_MIN);
3810 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3812 tcg_gen_ori_i32(dst, src, INT32_MIN);
3815 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3817 tcg_gen_ori_i64(dst, src, INT64_MIN);
3820 static void do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
3821 unsigned y, unsigned c)
3823 TCGv_i32 ta, tb, tc, ty;
3827 ta = load_frw0_i32(ra);
3828 tb = load_frw0_i32(rb);
3829 ty = tcg_const_i32(y);
3830 tc = tcg_const_i32(c);
3832 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3834 tcg_temp_free_i32(ta);
3835 tcg_temp_free_i32(tb);
3836 tcg_temp_free_i32(ty);
3837 tcg_temp_free_i32(tc);
3842 static bool trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
3843 const DisasInsn *di)
3845 unsigned c = extract32(insn, 0, 5);
3846 unsigned y = extract32(insn, 13, 3);
3847 unsigned rb = extract32(insn, 16, 5);
3848 unsigned ra = extract32(insn, 21, 5);
3849 do_fcmp_s(ctx, ra, rb, y, c);
3853 static bool trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
3854 const DisasInsn *di)
3856 unsigned c = extract32(insn, 0, 5);
3857 unsigned y = extract32(insn, 13, 3);
3858 unsigned rb = assemble_rb64(insn);
3859 unsigned ra = assemble_ra64(insn);
3860 do_fcmp_s(ctx, ra, rb, y, c);
3864 static bool trans_fcmp_d(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3866 unsigned c = extract32(insn, 0, 5);
3867 unsigned y = extract32(insn, 13, 3);
3868 unsigned rb = extract32(insn, 16, 5);
3869 unsigned ra = extract32(insn, 21, 5);
3877 ty = tcg_const_i32(y);
3878 tc = tcg_const_i32(c);
3880 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3882 tcg_temp_free_i64(ta);
3883 tcg_temp_free_i64(tb);
3884 tcg_temp_free_i32(ty);
3885 tcg_temp_free_i32(tc);
3887 return nullify_end(ctx);
3890 static bool trans_ftest_t(DisasContext *ctx, uint32_t insn,
3891 const DisasInsn *di)
3893 unsigned y = extract32(insn, 13, 3);
3894 unsigned cbit = (y ^ 1) - 1;
3900 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3901 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3902 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3905 return nullify_end(ctx);
3908 static bool trans_ftest_q(DisasContext *ctx, uint32_t insn,
3909 const DisasInsn *di)
3911 unsigned c = extract32(insn, 0, 5);
3919 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
3922 case 0: /* simple */
3923 tcg_gen_andi_reg(t, t, 0x4000000);
3924 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3948 return gen_illegal(ctx);
3951 TCGv_reg c = load_const(ctx, mask);
3952 tcg_gen_or_reg(t, t, c);
3953 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3955 tcg_gen_andi_reg(t, t, mask);
3956 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3959 return nullify_end(ctx);
3962 static bool trans_xmpyu(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3964 unsigned rt = extract32(insn, 0, 5);
3965 unsigned rb = assemble_rb64(insn);
3966 unsigned ra = assemble_ra64(insn);
3971 a = load_frw0_i64(ra);
3972 b = load_frw0_i64(rb);
3973 tcg_gen_mul_i64(a, a, b);
3975 tcg_temp_free_i64(a);
3976 tcg_temp_free_i64(b);
3978 return nullify_end(ctx);
3981 #define FOP_DED trans_fop_ded, .f.ded
3982 #define FOP_DEDD trans_fop_dedd, .f.dedd
3984 #define FOP_WEW trans_fop_wew_0c, .f.wew
3985 #define FOP_DEW trans_fop_dew_0c, .f.dew
3986 #define FOP_WED trans_fop_wed_0c, .f.wed
3987 #define FOP_WEWW trans_fop_weww_0c, .f.weww
3989 static const DisasInsn table_float_0c[] = {
3990 /* floating point class zero */
3991 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
3992 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
3993 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
3994 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
3995 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
3996 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
3998 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
3999 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4000 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4001 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4002 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4003 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4005 /* floating point class three */
4006 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4007 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4008 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4009 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4011 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4012 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4013 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4014 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4016 /* floating point class one */
4018 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4019 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4021 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4022 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4023 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4024 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4026 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4027 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4028 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4029 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4030 /* float/int truncate */
4031 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4032 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4033 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4034 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4036 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4037 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4038 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4039 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4041 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4042 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4043 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4044 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4045 /* float/uint truncate */
4046 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4047 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4048 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4049 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4051 /* floating point class two */
4052 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4053 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4054 { 0x30002420, 0xffffffe0, trans_ftest_q },
4055 { 0x30000420, 0xffff1fff, trans_ftest_t },
4057 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4058 This is machine/revision == 0, which is reserved for simulator. */
4059 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4066 #define FOP_WEW trans_fop_wew_0e, .f.wew
4067 #define FOP_DEW trans_fop_dew_0e, .f.dew
4068 #define FOP_WED trans_fop_wed_0e, .f.wed
4069 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4071 static const DisasInsn table_float_0e[] = {
4072 /* floating point class zero */
4073 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4074 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4075 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4076 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4077 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4078 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4080 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4081 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4082 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4083 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4084 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4085 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4087 /* floating point class three */
4088 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4089 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4090 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4091 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4093 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4094 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4095 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4096 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4098 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4100 /* floating point class one */
4102 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4103 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4105 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4106 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4107 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4108 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4110 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4111 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4112 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4113 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4114 /* float/int truncate */
4115 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4116 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4117 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4118 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4120 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4121 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4122 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4123 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4125 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4126 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4127 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4128 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4129 /* float/uint truncate */
4130 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4131 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4132 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4133 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4135 /* floating point class two */
4136 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4137 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4147 /* Convert the fmpyadd single-precision register encodings to standard. */
4148 static inline int fmpyadd_s_reg(unsigned r)
4150 return (r & 16) * 2 + 16 + (r & 15);
4153 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4155 int tm = fmpyadd_s_reg(a->tm);
4156 int ra = fmpyadd_s_reg(a->ra);
4157 int ta = fmpyadd_s_reg(a->ta);
4158 int rm2 = fmpyadd_s_reg(a->rm2);
4159 int rm1 = fmpyadd_s_reg(a->rm1);
4163 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4164 do_fop_weww(ctx, ta, ta, ra,
4165 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4167 return nullify_end(ctx);
4170 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4172 return do_fmpyadd_s(ctx, a, false);
4175 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4177 return do_fmpyadd_s(ctx, a, true);
4180 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4184 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4185 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4186 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4188 return nullify_end(ctx);
4191 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4193 return do_fmpyadd_d(ctx, a, false);
4196 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4198 return do_fmpyadd_d(ctx, a, true);
4201 static bool trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4202 const DisasInsn *di)
4204 unsigned rt = assemble_rt64(insn);
4205 unsigned neg = extract32(insn, 5, 1);
4206 unsigned rm1 = assemble_ra64(insn);
4207 unsigned rm2 = assemble_rb64(insn);
4208 unsigned ra3 = assemble_rc64(insn);
4212 a = load_frw0_i32(rm1);
4213 b = load_frw0_i32(rm2);
4214 c = load_frw0_i32(ra3);
4217 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4219 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4222 tcg_temp_free_i32(b);
4223 tcg_temp_free_i32(c);
4224 save_frw_i32(rt, a);
4225 tcg_temp_free_i32(a);
4226 return nullify_end(ctx);
4229 static bool trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4230 const DisasInsn *di)
4232 unsigned rt = extract32(insn, 0, 5);
4233 unsigned neg = extract32(insn, 5, 1);
4234 unsigned rm1 = extract32(insn, 21, 5);
4235 unsigned rm2 = extract32(insn, 16, 5);
4236 unsigned ra3 = assemble_rc64(insn);
4245 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4247 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4250 tcg_temp_free_i64(b);
4251 tcg_temp_free_i64(c);
4253 tcg_temp_free_i64(a);
4254 return nullify_end(ctx);
4257 static const DisasInsn table_fp_fused[] = {
4258 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4259 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4262 static void translate_table_int(DisasContext *ctx, uint32_t insn,
4263 const DisasInsn table[], size_t n)
4266 for (i = 0; i < n; ++i) {
4267 if ((insn & table[i].mask) == table[i].insn) {
4268 table[i].trans(ctx, insn, &table[i]);
4272 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4273 insn, ctx->base.pc_next);
4277 #define translate_table(ctx, insn, table) \
4278 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4280 static void translate_one(DisasContext *ctx, uint32_t insn)
4284 /* Transition to the auto-generated decoder. */
4285 if (decode(ctx, insn)) {
4289 opc = extract32(insn, 26, 6);
4292 trans_ldil(ctx, insn);
4295 trans_copr_w(ctx, insn);
4298 trans_addil(ctx, insn);
4301 trans_copr_dw(ctx, insn);
4304 translate_table(ctx, insn, table_float_0c);
4307 trans_ldo(ctx, insn);
4310 translate_table(ctx, insn, table_float_0e);
4314 trans_load(ctx, insn, false, MO_UB);
4317 trans_load(ctx, insn, false, MO_TEUW);
4320 trans_load(ctx, insn, false, MO_TEUL);
4323 trans_load(ctx, insn, true, MO_TEUL);
4326 trans_fload_mod(ctx, insn);
4329 trans_load_w(ctx, insn);
4332 trans_store(ctx, insn, false, MO_UB);
4335 trans_store(ctx, insn, false, MO_TEUW);
4338 trans_store(ctx, insn, false, MO_TEUL);
4341 trans_store(ctx, insn, true, MO_TEUL);
4344 trans_fstore_mod(ctx, insn);
4347 trans_store_w(ctx, insn);
4351 trans_cmpiclr(ctx, insn);
4354 trans_subi(ctx, insn);
4358 trans_addi(ctx, insn);
4361 translate_table(ctx, insn, table_fp_fused);
4364 case 0x04: /* spopn */
4365 case 0x05: /* diag */
4366 case 0x0F: /* product specific */
4369 case 0x07: /* unassigned */
4370 case 0x15: /* unassigned */
4371 case 0x1D: /* unassigned */
4372 case 0x37: /* unassigned */
4375 #ifndef CONFIG_USER_ONLY
4376 /* Unassigned, but use as system-halt. */
4377 if (insn == 0xfffdead0) {
4378 gen_hlt(ctx, 0); /* halt system */
4381 if (insn == 0xfffdead1) {
4382 gen_hlt(ctx, 1); /* reset system */
4393 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4395 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4399 ctx->tb_flags = ctx->base.tb->flags;
4401 #ifdef CONFIG_USER_ONLY
4402 ctx->privilege = MMU_USER_IDX;
4403 ctx->mmu_idx = MMU_USER_IDX;
4404 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4405 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4407 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4408 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4410 /* Recover the IAOQ values from the GVA + PRIV. */
4411 uint64_t cs_base = ctx->base.tb->cs_base;
4412 uint64_t iasq_f = cs_base & ~0xffffffffull;
4413 int32_t diff = cs_base;
4415 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4416 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4419 ctx->iaoq_n_var = NULL;
4421 /* Bound the number of instructions by those left on the page. */
4422 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4423 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4427 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4428 memset(ctx->templ, 0, sizeof(ctx->templ));
4431 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4433 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4435 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4436 ctx->null_cond = cond_make_f();
4437 ctx->psw_n_nonzero = false;
4438 if (ctx->tb_flags & PSW_N) {
4439 ctx->null_cond.c = TCG_COND_ALWAYS;
4440 ctx->psw_n_nonzero = true;
4442 ctx->null_lab = NULL;
4445 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4447 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4449 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4452 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4453 const CPUBreakpoint *bp)
4455 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4457 gen_excp(ctx, EXCP_DEBUG);
4458 ctx->base.pc_next += 4;
4462 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4464 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4465 CPUHPPAState *env = cs->env_ptr;
4469 /* Execute one insn. */
4470 #ifdef CONFIG_USER_ONLY
4471 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4473 ret = ctx->base.is_jmp;
4474 assert(ret != DISAS_NEXT);
4478 /* Always fetch the insn, even if nullified, so that we check
4479 the page permissions for execute. */
4480 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4482 /* Set up the IA queue for the next insn.
4483 This will be overwritten by a branch. */
4484 if (ctx->iaoq_b == -1) {
4486 ctx->iaoq_n_var = get_temp(ctx);
4487 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4489 ctx->iaoq_n = ctx->iaoq_b + 4;
4490 ctx->iaoq_n_var = NULL;
4493 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4494 ctx->null_cond.c = TCG_COND_NEVER;
4498 translate_one(ctx, insn);
4499 ret = ctx->base.is_jmp;
4500 assert(ctx->null_lab == NULL);
4504 /* Free any temporaries allocated. */
4505 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4506 tcg_temp_free(ctx->tempr[i]);
4507 ctx->tempr[i] = NULL;
4509 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4510 tcg_temp_free_tl(ctx->templ[i]);
4511 ctx->templ[i] = NULL;
4516 /* Advance the insn queue. Note that this check also detects
4517 a priority change within the instruction queue. */
4518 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4519 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4520 && use_goto_tb(ctx, ctx->iaoq_b)
4521 && (ctx->null_cond.c == TCG_COND_NEVER
4522 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4523 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4524 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4525 ctx->base.is_jmp = ret = DISAS_NORETURN;
4527 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4530 ctx->iaoq_f = ctx->iaoq_b;
4531 ctx->iaoq_b = ctx->iaoq_n;
4532 ctx->base.pc_next += 4;
4534 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4537 if (ctx->iaoq_f == -1) {
4538 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4539 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4540 #ifndef CONFIG_USER_ONLY
4541 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4544 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4545 } else if (ctx->iaoq_b == -1) {
4546 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4550 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4552 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4553 DisasJumpType is_jmp = ctx->base.is_jmp;
4556 case DISAS_NORETURN:
4558 case DISAS_TOO_MANY:
4559 case DISAS_IAQ_N_STALE:
4560 case DISAS_IAQ_N_STALE_EXIT:
4561 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4562 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4565 case DISAS_IAQ_N_UPDATED:
4566 if (ctx->base.singlestep_enabled) {
4567 gen_excp_1(EXCP_DEBUG);
4568 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4569 tcg_gen_exit_tb(NULL, 0);
4571 tcg_gen_lookup_and_goto_ptr();
4575 g_assert_not_reached();
4579 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4581 target_ulong pc = dcbase->pc_first;
4583 #ifdef CONFIG_USER_ONLY
4586 qemu_log("IN:\n0x00000000: (null)\n");
4589 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4592 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4595 qemu_log("IN:\n0x00000100: syscall\n");
4600 qemu_log("IN: %s\n", lookup_symbol(pc));
4601 log_target_disas(cs, pc, dcbase->tb->size);
4604 static const TranslatorOps hppa_tr_ops = {
4605 .init_disas_context = hppa_tr_init_disas_context,
4606 .tb_start = hppa_tr_tb_start,
4607 .insn_start = hppa_tr_insn_start,
4608 .breakpoint_check = hppa_tr_breakpoint_check,
4609 .translate_insn = hppa_tr_translate_insn,
4610 .tb_stop = hppa_tr_tb_stop,
4611 .disas_log = hppa_tr_disas_log,
4614 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4618 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4621 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4624 env->iaoq_f = data[0];
4625 if (data[1] != (target_ureg)-1) {
4626 env->iaoq_b = data[1];
4628 /* Since we were executing the instruction at IAOQ_F, and took some
4629 sort of action that provoked the cpu_restore_state, we can infer
4630 that the instruction was not nullified. */