2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond {
258 typedef struct DisasContext {
259 DisasContextBase base;
281 /* Include the auto-generated decoder. */
282 #include "decode.inc.c"
284 /* We are not using a goto_tb (for whatever reason), but have updated
285 the iaq (for whatever reason), so don't do it again on exit. */
286 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
288 /* We are exiting the TB, but have neither emitted a goto_tb, nor
289 updated the iaq for the next instruction to be executed. */
290 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
292 /* Similarly, but we want to return to the main loop immediately
293 to recognize unmasked interrupts. */
294 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
296 typedef struct DisasInsn {
298 bool (*trans)(DisasContext *ctx, uint32_t insn,
299 const struct DisasInsn *f);
301 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
302 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
303 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
304 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
305 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
306 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
307 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
311 /* global register indexes */
312 static TCGv_reg cpu_gr[32];
313 static TCGv_i64 cpu_sr[4];
314 static TCGv_i64 cpu_srH;
315 static TCGv_reg cpu_iaoq_f;
316 static TCGv_reg cpu_iaoq_b;
317 static TCGv_i64 cpu_iasq_f;
318 static TCGv_i64 cpu_iasq_b;
319 static TCGv_reg cpu_sar;
320 static TCGv_reg cpu_psw_n;
321 static TCGv_reg cpu_psw_v;
322 static TCGv_reg cpu_psw_cb;
323 static TCGv_reg cpu_psw_cb_msb;
325 #include "exec/gen-icount.h"
327 void hppa_translate_init(void)
329 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
331 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
332 static const GlobalVar vars[] = {
333 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
344 /* Use the symbolic register names that match the disassembler. */
345 static const char gr_names[32][4] = {
346 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
347 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
348 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
349 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
351 /* SR[4-7] are not global registers so that we can index them. */
352 static const char sr_names[5][4] = {
353 "sr0", "sr1", "sr2", "sr3", "srH"
359 for (i = 1; i < 32; i++) {
360 cpu_gr[i] = tcg_global_mem_new(cpu_env,
361 offsetof(CPUHPPAState, gr[i]),
364 for (i = 0; i < 4; i++) {
365 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
366 offsetof(CPUHPPAState, sr[i]),
369 cpu_srH = tcg_global_mem_new_i64(cpu_env,
370 offsetof(CPUHPPAState, sr[4]),
373 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
374 const GlobalVar *v = &vars[i];
375 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
378 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
379 offsetof(CPUHPPAState, iasq_f),
381 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
382 offsetof(CPUHPPAState, iasq_b),
386 static DisasCond cond_make_f(void)
395 static DisasCond cond_make_n(void)
406 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
408 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
410 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
411 r.a0 = tcg_temp_new();
412 tcg_gen_mov_reg(r.a0, a0);
417 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
419 DisasCond r = { .c = c };
421 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
422 r.a0 = tcg_temp_new();
423 tcg_gen_mov_reg(r.a0, a0);
424 r.a1 = tcg_temp_new();
425 tcg_gen_mov_reg(r.a1, a1);
430 static void cond_prep(DisasCond *cond)
433 cond->a1_is_0 = false;
434 cond->a1 = tcg_const_reg(0);
438 static void cond_free(DisasCond *cond)
442 if (!cond->a0_is_n) {
443 tcg_temp_free(cond->a0);
445 if (!cond->a1_is_0) {
446 tcg_temp_free(cond->a1);
448 cond->a0_is_n = false;
449 cond->a1_is_0 = false;
453 case TCG_COND_ALWAYS:
454 cond->c = TCG_COND_NEVER;
461 static TCGv_reg get_temp(DisasContext *ctx)
463 unsigned i = ctx->ntempr++;
464 g_assert(i < ARRAY_SIZE(ctx->tempr));
465 return ctx->tempr[i] = tcg_temp_new();
468 #ifndef CONFIG_USER_ONLY
469 static TCGv_tl get_temp_tl(DisasContext *ctx)
471 unsigned i = ctx->ntempl++;
472 g_assert(i < ARRAY_SIZE(ctx->templ));
473 return ctx->templ[i] = tcg_temp_new_tl();
477 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
479 TCGv_reg t = get_temp(ctx);
480 tcg_gen_movi_reg(t, v);
484 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
487 TCGv_reg t = get_temp(ctx);
488 tcg_gen_movi_reg(t, 0);
495 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
497 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
498 return get_temp(ctx);
504 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
506 if (ctx->null_cond.c != TCG_COND_NEVER) {
507 cond_prep(&ctx->null_cond);
508 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
509 ctx->null_cond.a1, dest, t);
511 tcg_gen_mov_reg(dest, t);
515 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
518 save_or_nullify(ctx, cpu_gr[reg], t);
522 #ifdef HOST_WORDS_BIGENDIAN
530 static TCGv_i32 load_frw_i32(unsigned rt)
532 TCGv_i32 ret = tcg_temp_new_i32();
533 tcg_gen_ld_i32(ret, cpu_env,
534 offsetof(CPUHPPAState, fr[rt & 31])
535 + (rt & 32 ? LO_OFS : HI_OFS));
539 static TCGv_i32 load_frw0_i32(unsigned rt)
542 return tcg_const_i32(0);
544 return load_frw_i32(rt);
548 static TCGv_i64 load_frw0_i64(unsigned rt)
551 return tcg_const_i64(0);
553 TCGv_i64 ret = tcg_temp_new_i64();
554 tcg_gen_ld32u_i64(ret, cpu_env,
555 offsetof(CPUHPPAState, fr[rt & 31])
556 + (rt & 32 ? LO_OFS : HI_OFS));
561 static void save_frw_i32(unsigned rt, TCGv_i32 val)
563 tcg_gen_st_i32(val, cpu_env,
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
571 static TCGv_i64 load_frd(unsigned rt)
573 TCGv_i64 ret = tcg_temp_new_i64();
574 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
578 static TCGv_i64 load_frd0(unsigned rt)
581 return tcg_const_i64(0);
587 static void save_frd(unsigned rt, TCGv_i64 val)
589 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
592 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
594 #ifdef CONFIG_USER_ONLY
595 tcg_gen_movi_i64(dest, 0);
598 tcg_gen_mov_i64(dest, cpu_sr[reg]);
599 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
600 tcg_gen_mov_i64(dest, cpu_srH);
602 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
607 /* Skip over the implementation of an insn that has been nullified.
608 Use this when the insn is too complex for a conditional move. */
609 static void nullify_over(DisasContext *ctx)
611 if (ctx->null_cond.c != TCG_COND_NEVER) {
612 /* The always condition should have been handled in the main loop. */
613 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
615 ctx->null_lab = gen_new_label();
616 cond_prep(&ctx->null_cond);
618 /* If we're using PSW[N], copy it to a temp because... */
619 if (ctx->null_cond.a0_is_n) {
620 ctx->null_cond.a0_is_n = false;
621 ctx->null_cond.a0 = tcg_temp_new();
622 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
624 /* ... we clear it before branching over the implementation,
625 so that (1) it's clear after nullifying this insn and
626 (2) if this insn nullifies the next, PSW[N] is valid. */
627 if (ctx->psw_n_nonzero) {
628 ctx->psw_n_nonzero = false;
629 tcg_gen_movi_reg(cpu_psw_n, 0);
632 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
633 ctx->null_cond.a1, ctx->null_lab);
634 cond_free(&ctx->null_cond);
638 /* Save the current nullification state to PSW[N]. */
639 static void nullify_save(DisasContext *ctx)
641 if (ctx->null_cond.c == TCG_COND_NEVER) {
642 if (ctx->psw_n_nonzero) {
643 tcg_gen_movi_reg(cpu_psw_n, 0);
647 if (!ctx->null_cond.a0_is_n) {
648 cond_prep(&ctx->null_cond);
649 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
650 ctx->null_cond.a0, ctx->null_cond.a1);
651 ctx->psw_n_nonzero = true;
653 cond_free(&ctx->null_cond);
656 /* Set a PSW[N] to X. The intention is that this is used immediately
657 before a goto_tb/exit_tb, so that there is no fallthru path to other
658 code within the TB. Therefore we do not update psw_n_nonzero. */
659 static void nullify_set(DisasContext *ctx, bool x)
661 if (ctx->psw_n_nonzero || x) {
662 tcg_gen_movi_reg(cpu_psw_n, x);
666 /* Mark the end of an instruction that may have been nullified.
667 This is the pair to nullify_over. Always returns true so that
668 it may be tail-called from a translate function. */
669 static bool nullify_end(DisasContext *ctx)
671 TCGLabel *null_lab = ctx->null_lab;
672 DisasJumpType status = ctx->base.is_jmp;
674 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
675 For UPDATED, we cannot update on the nullified path. */
676 assert(status != DISAS_IAQ_N_UPDATED);
678 if (likely(null_lab == NULL)) {
679 /* The current insn wasn't conditional or handled the condition
680 applied to it without a branch, so the (new) setting of
681 NULL_COND can be applied directly to the next insn. */
684 ctx->null_lab = NULL;
686 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
687 /* The next instruction will be unconditional,
688 and NULL_COND already reflects that. */
689 gen_set_label(null_lab);
691 /* The insn that we just executed is itself nullifying the next
692 instruction. Store the condition in the PSW[N] global.
693 We asserted PSW[N] = 0 in nullify_over, so that after the
694 label we have the proper value in place. */
696 gen_set_label(null_lab);
697 ctx->null_cond = cond_make_n();
699 if (status == DISAS_NORETURN) {
700 ctx->base.is_jmp = DISAS_NEXT;
705 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
707 if (unlikely(ival == -1)) {
708 tcg_gen_mov_reg(dest, vval);
710 tcg_gen_movi_reg(dest, ival);
714 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
716 return ctx->iaoq_f + disp + 8;
719 static void gen_excp_1(int exception)
721 TCGv_i32 t = tcg_const_i32(exception);
722 gen_helper_excp(cpu_env, t);
723 tcg_temp_free_i32(t);
726 static void gen_excp(DisasContext *ctx, int exception)
728 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
729 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
731 gen_excp_1(exception);
732 ctx->base.is_jmp = DISAS_NORETURN;
735 static bool gen_excp_iir(DisasContext *ctx, int exc)
740 tmp = tcg_const_reg(ctx->insn);
741 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
744 return nullify_end(ctx);
747 static bool gen_illegal(DisasContext *ctx)
749 return gen_excp_iir(ctx, EXCP_ILL);
752 #ifdef CONFIG_USER_ONLY
753 #define CHECK_MOST_PRIVILEGED(EXCP) \
754 return gen_excp_iir(ctx, EXCP)
756 #define CHECK_MOST_PRIVILEGED(EXCP) \
758 if (ctx->privilege != 0) { \
759 return gen_excp_iir(ctx, EXCP); \
764 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
766 /* Suppress goto_tb in the case of single-steping and IO. */
767 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
768 || ctx->base.singlestep_enabled) {
774 /* If the next insn is to be nullified, and it's on the same page,
775 and we're not attempting to set a breakpoint on it, then we can
776 totally skip the nullified insn. This avoids creating and
777 executing a TB that merely branches to the next TB. */
778 static bool use_nullify_skip(DisasContext *ctx)
780 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
781 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
784 static void gen_goto_tb(DisasContext *ctx, int which,
785 target_ureg f, target_ureg b)
787 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
788 tcg_gen_goto_tb(which);
789 tcg_gen_movi_reg(cpu_iaoq_f, f);
790 tcg_gen_movi_reg(cpu_iaoq_b, b);
791 tcg_gen_exit_tb(ctx->base.tb, which);
793 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
794 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
795 if (ctx->base.singlestep_enabled) {
796 gen_excp_1(EXCP_DEBUG);
798 tcg_gen_lookup_and_goto_ptr();
803 /* PA has a habit of taking the LSB of a field and using that as the sign,
804 with the rest of the field becoming the least significant bits. */
805 static target_sreg low_sextract(uint32_t val, int pos, int len)
807 target_ureg x = -(target_ureg)extract32(val, pos, 1);
808 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
812 static unsigned assemble_rt64(uint32_t insn)
814 unsigned r1 = extract32(insn, 6, 1);
815 unsigned r0 = extract32(insn, 0, 5);
819 static unsigned assemble_ra64(uint32_t insn)
821 unsigned r1 = extract32(insn, 7, 1);
822 unsigned r0 = extract32(insn, 21, 5);
826 static unsigned assemble_rb64(uint32_t insn)
828 unsigned r1 = extract32(insn, 12, 1);
829 unsigned r0 = extract32(insn, 16, 5);
833 static unsigned assemble_rc64(uint32_t insn)
835 unsigned r2 = extract32(insn, 8, 1);
836 unsigned r1 = extract32(insn, 13, 3);
837 unsigned r0 = extract32(insn, 9, 2);
838 return r2 * 32 + r1 * 4 + r0;
841 static inline unsigned assemble_sr3(uint32_t insn)
843 unsigned s2 = extract32(insn, 13, 1);
844 unsigned s0 = extract32(insn, 14, 2);
848 static target_sreg assemble_12(uint32_t insn)
850 target_ureg x = -(target_ureg)(insn & 1);
851 x = (x << 1) | extract32(insn, 2, 1);
852 x = (x << 10) | extract32(insn, 3, 10);
856 static target_sreg assemble_16(uint32_t insn)
858 /* Take the name from PA2.0, which produces a 16-bit number
859 only with wide mode; otherwise a 14-bit number. Since we don't
860 implement wide mode, this is always the 14-bit number. */
861 return low_sextract(insn, 0, 14);
864 static target_sreg assemble_16a(uint32_t insn)
866 /* Take the name from PA2.0, which produces a 14-bit shifted number
867 only with wide mode; otherwise a 12-bit shifted number. Since we
868 don't implement wide mode, this is always the 12-bit number. */
869 target_ureg x = -(target_ureg)(insn & 1);
870 x = (x << 11) | extract32(insn, 2, 11);
874 static target_sreg assemble_17(uint32_t insn)
876 target_ureg x = -(target_ureg)(insn & 1);
877 x = (x << 5) | extract32(insn, 16, 5);
878 x = (x << 1) | extract32(insn, 2, 1);
879 x = (x << 10) | extract32(insn, 3, 10);
883 static target_sreg assemble_21(uint32_t insn)
885 target_ureg x = -(target_ureg)(insn & 1);
886 x = (x << 11) | extract32(insn, 1, 11);
887 x = (x << 2) | extract32(insn, 14, 2);
888 x = (x << 5) | extract32(insn, 16, 5);
889 x = (x << 2) | extract32(insn, 12, 2);
893 static target_sreg assemble_22(uint32_t insn)
895 target_ureg x = -(target_ureg)(insn & 1);
896 x = (x << 10) | extract32(insn, 16, 10);
897 x = (x << 1) | extract32(insn, 2, 1);
898 x = (x << 10) | extract32(insn, 3, 10);
902 /* The parisc documentation describes only the general interpretation of
903 the conditions, without describing their exact implementation. The
904 interpretations do not stand up well when considering ADD,C and SUB,B.
905 However, considering the Addition, Subtraction and Logical conditions
906 as a whole it would appear that these relations are similar to what
907 a traditional NZCV set of flags would produce. */
909 static DisasCond do_cond(unsigned cf, TCGv_reg res,
910 TCGv_reg cb_msb, TCGv_reg sv)
916 case 0: /* Never / TR */
917 cond = cond_make_f();
919 case 1: /* = / <> (Z / !Z) */
920 cond = cond_make_0(TCG_COND_EQ, res);
922 case 2: /* < / >= (N / !N) */
923 cond = cond_make_0(TCG_COND_LT, res);
925 case 3: /* <= / > (N | Z / !N & !Z) */
926 cond = cond_make_0(TCG_COND_LE, res);
928 case 4: /* NUV / UV (!C / C) */
929 cond = cond_make_0(TCG_COND_EQ, cb_msb);
931 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
932 tmp = tcg_temp_new();
933 tcg_gen_neg_reg(tmp, cb_msb);
934 tcg_gen_and_reg(tmp, tmp, res);
935 cond = cond_make_0(TCG_COND_EQ, tmp);
938 case 6: /* SV / NSV (V / !V) */
939 cond = cond_make_0(TCG_COND_LT, sv);
941 case 7: /* OD / EV */
942 tmp = tcg_temp_new();
943 tcg_gen_andi_reg(tmp, res, 1);
944 cond = cond_make_0(TCG_COND_NE, tmp);
948 g_assert_not_reached();
951 cond.c = tcg_invert_cond(cond.c);
957 /* Similar, but for the special case of subtraction without borrow, we
958 can use the inputs directly. This can allow other computation to be
959 deleted as unused. */
961 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
962 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
968 cond = cond_make(TCG_COND_EQ, in1, in2);
971 cond = cond_make(TCG_COND_LT, in1, in2);
974 cond = cond_make(TCG_COND_LE, in1, in2);
976 case 4: /* << / >>= */
977 cond = cond_make(TCG_COND_LTU, in1, in2);
979 case 5: /* <<= / >> */
980 cond = cond_make(TCG_COND_LEU, in1, in2);
983 return do_cond(cf, res, sv, sv);
986 cond.c = tcg_invert_cond(cond.c);
992 /* Similar, but for logicals, where the carry and overflow bits are not
993 computed, and use of them is undefined. */
995 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
998 case 4: case 5: case 6:
1002 return do_cond(cf, res, res, res);
1005 /* Similar, but for shift/extract/deposit conditions. */
1007 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1011 /* Convert the compressed condition codes to standard.
1012 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1013 4-7 are the reverse of 0-3. */
1020 return do_log_cond(c * 2 + f, res);
1023 /* Similar, but for unit conditions. */
1025 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1026 TCGv_reg in1, TCGv_reg in2)
1029 TCGv_reg tmp, cb = NULL;
1032 /* Since we want to test lots of carry-out bits all at once, do not
1033 * do our normal thing and compute carry-in of bit B+1 since that
1034 * leaves us with carry bits spread across two words.
1036 cb = tcg_temp_new();
1037 tmp = tcg_temp_new();
1038 tcg_gen_or_reg(cb, in1, in2);
1039 tcg_gen_and_reg(tmp, in1, in2);
1040 tcg_gen_andc_reg(cb, cb, res);
1041 tcg_gen_or_reg(cb, cb, tmp);
1046 case 0: /* never / TR */
1047 case 1: /* undefined */
1048 case 5: /* undefined */
1049 cond = cond_make_f();
1052 case 2: /* SBZ / NBZ */
1053 /* See hasless(v,1) from
1054 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1056 tmp = tcg_temp_new();
1057 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1058 tcg_gen_andc_reg(tmp, tmp, res);
1059 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1060 cond = cond_make_0(TCG_COND_NE, tmp);
1064 case 3: /* SHZ / NHZ */
1065 tmp = tcg_temp_new();
1066 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1067 tcg_gen_andc_reg(tmp, tmp, res);
1068 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1069 cond = cond_make_0(TCG_COND_NE, tmp);
1073 case 4: /* SDC / NDC */
1074 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1075 cond = cond_make_0(TCG_COND_NE, cb);
1078 case 6: /* SBC / NBC */
1079 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1080 cond = cond_make_0(TCG_COND_NE, cb);
1083 case 7: /* SHC / NHC */
1084 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1085 cond = cond_make_0(TCG_COND_NE, cb);
1089 g_assert_not_reached();
1095 cond.c = tcg_invert_cond(cond.c);
1101 /* Compute signed overflow for addition. */
1102 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1103 TCGv_reg in1, TCGv_reg in2)
1105 TCGv_reg sv = get_temp(ctx);
1106 TCGv_reg tmp = tcg_temp_new();
1108 tcg_gen_xor_reg(sv, res, in1);
1109 tcg_gen_xor_reg(tmp, in1, in2);
1110 tcg_gen_andc_reg(sv, sv, tmp);
1116 /* Compute signed overflow for subtraction. */
1117 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1118 TCGv_reg in1, TCGv_reg in2)
1120 TCGv_reg sv = get_temp(ctx);
1121 TCGv_reg tmp = tcg_temp_new();
1123 tcg_gen_xor_reg(sv, res, in1);
1124 tcg_gen_xor_reg(tmp, in1, in2);
1125 tcg_gen_and_reg(sv, sv, tmp);
1131 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1132 TCGv_reg in2, unsigned shift, bool is_l,
1133 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1135 TCGv_reg dest, cb, cb_msb, sv, tmp;
1136 unsigned c = cf >> 1;
1139 dest = tcg_temp_new();
1144 tmp = get_temp(ctx);
1145 tcg_gen_shli_reg(tmp, in1, shift);
1149 if (!is_l || c == 4 || c == 5) {
1150 TCGv_reg zero = tcg_const_reg(0);
1151 cb_msb = get_temp(ctx);
1152 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1154 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1156 tcg_temp_free(zero);
1159 tcg_gen_xor_reg(cb, in1, in2);
1160 tcg_gen_xor_reg(cb, cb, dest);
1163 tcg_gen_add_reg(dest, in1, in2);
1165 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1169 /* Compute signed overflow if required. */
1171 if (is_tsv || c == 6) {
1172 sv = do_add_sv(ctx, dest, in1, in2);
1174 /* ??? Need to include overflow from shift. */
1175 gen_helper_tsv(cpu_env, sv);
1179 /* Emit any conditional trap before any writeback. */
1180 cond = do_cond(cf, dest, cb_msb, sv);
1183 tmp = tcg_temp_new();
1184 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1185 gen_helper_tcond(cpu_env, tmp);
1189 /* Write back the result. */
1191 save_or_nullify(ctx, cpu_psw_cb, cb);
1192 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1194 save_gpr(ctx, rt, dest);
1195 tcg_temp_free(dest);
1197 /* Install the new nullification. */
1198 cond_free(&ctx->null_cond);
1199 ctx->null_cond = cond;
1202 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1203 TCGv_reg in2, bool is_tsv, bool is_b,
1204 bool is_tc, unsigned cf)
1206 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1207 unsigned c = cf >> 1;
1210 dest = tcg_temp_new();
1211 cb = tcg_temp_new();
1212 cb_msb = tcg_temp_new();
1214 zero = tcg_const_reg(0);
1216 /* DEST,C = IN1 + ~IN2 + C. */
1217 tcg_gen_not_reg(cb, in2);
1218 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1219 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1220 tcg_gen_xor_reg(cb, cb, in1);
1221 tcg_gen_xor_reg(cb, cb, dest);
1223 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1224 operations by seeding the high word with 1 and subtracting. */
1225 tcg_gen_movi_reg(cb_msb, 1);
1226 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1227 tcg_gen_eqv_reg(cb, in1, in2);
1228 tcg_gen_xor_reg(cb, cb, dest);
1230 tcg_temp_free(zero);
1232 /* Compute signed overflow if required. */
1234 if (is_tsv || c == 6) {
1235 sv = do_sub_sv(ctx, dest, in1, in2);
1237 gen_helper_tsv(cpu_env, sv);
1241 /* Compute the condition. We cannot use the special case for borrow. */
1243 cond = do_sub_cond(cf, dest, in1, in2, sv);
1245 cond = do_cond(cf, dest, cb_msb, sv);
1248 /* Emit any conditional trap before any writeback. */
1251 tmp = tcg_temp_new();
1252 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1253 gen_helper_tcond(cpu_env, tmp);
1257 /* Write back the result. */
1258 save_or_nullify(ctx, cpu_psw_cb, cb);
1259 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1260 save_gpr(ctx, rt, dest);
1261 tcg_temp_free(dest);
1263 /* Install the new nullification. */
1264 cond_free(&ctx->null_cond);
1265 ctx->null_cond = cond;
1268 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1269 TCGv_reg in2, unsigned cf)
1274 dest = tcg_temp_new();
1275 tcg_gen_sub_reg(dest, in1, in2);
1277 /* Compute signed overflow if required. */
1279 if ((cf >> 1) == 6) {
1280 sv = do_sub_sv(ctx, dest, in1, in2);
1283 /* Form the condition for the compare. */
1284 cond = do_sub_cond(cf, dest, in1, in2, sv);
1287 tcg_gen_movi_reg(dest, 0);
1288 save_gpr(ctx, rt, dest);
1289 tcg_temp_free(dest);
1291 /* Install the new nullification. */
1292 cond_free(&ctx->null_cond);
1293 ctx->null_cond = cond;
1296 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1297 TCGv_reg in2, unsigned cf,
1298 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1300 TCGv_reg dest = dest_gpr(ctx, rt);
1302 /* Perform the operation, and writeback. */
1304 save_gpr(ctx, rt, dest);
1306 /* Install the new nullification. */
1307 cond_free(&ctx->null_cond);
1309 ctx->null_cond = do_log_cond(cf, dest);
1313 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1314 TCGv_reg in2, unsigned cf, bool is_tc,
1315 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1321 dest = dest_gpr(ctx, rt);
1323 save_gpr(ctx, rt, dest);
1324 cond_free(&ctx->null_cond);
1326 dest = tcg_temp_new();
1329 cond = do_unit_cond(cf, dest, in1, in2);
1332 TCGv_reg tmp = tcg_temp_new();
1334 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1335 gen_helper_tcond(cpu_env, tmp);
1338 save_gpr(ctx, rt, dest);
1340 cond_free(&ctx->null_cond);
1341 ctx->null_cond = cond;
1345 #ifndef CONFIG_USER_ONLY
1346 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1347 from the top 2 bits of the base register. There are a few system
1348 instructions that have a 3-bit space specifier, for which SR0 is
1349 not special. To handle this, pass ~SP. */
1350 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1360 spc = get_temp_tl(ctx);
1361 load_spr(ctx, spc, sp);
1364 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1368 ptr = tcg_temp_new_ptr();
1369 tmp = tcg_temp_new();
1370 spc = get_temp_tl(ctx);
1372 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1373 tcg_gen_andi_reg(tmp, tmp, 030);
1374 tcg_gen_trunc_reg_ptr(ptr, tmp);
1377 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1378 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1379 tcg_temp_free_ptr(ptr);
1385 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1386 unsigned rb, unsigned rx, int scale, target_sreg disp,
1387 unsigned sp, int modify, bool is_phys)
1389 TCGv_reg base = load_gpr(ctx, rb);
1392 /* Note that RX is mutually exclusive with DISP. */
1394 ofs = get_temp(ctx);
1395 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1396 tcg_gen_add_reg(ofs, ofs, base);
1397 } else if (disp || modify) {
1398 ofs = get_temp(ctx);
1399 tcg_gen_addi_reg(ofs, base, disp);
1405 #ifdef CONFIG_USER_ONLY
1406 *pgva = (modify <= 0 ? ofs : base);
1408 TCGv_tl addr = get_temp_tl(ctx);
1409 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1410 if (ctx->tb_flags & PSW_W) {
1411 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1414 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1420 /* Emit a memory load. The modify parameter should be
1421 * < 0 for pre-modify,
1422 * > 0 for post-modify,
1423 * = 0 for no base register update.
1425 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1426 unsigned rx, int scale, target_sreg disp,
1427 unsigned sp, int modify, TCGMemOp mop)
1432 /* Caller uses nullify_over/nullify_end. */
1433 assert(ctx->null_cond.c == TCG_COND_NEVER);
1435 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1436 ctx->mmu_idx == MMU_PHYS_IDX);
1437 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1439 save_gpr(ctx, rb, ofs);
1443 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1444 unsigned rx, int scale, target_sreg disp,
1445 unsigned sp, int modify, TCGMemOp mop)
1450 /* Caller uses nullify_over/nullify_end. */
1451 assert(ctx->null_cond.c == TCG_COND_NEVER);
1453 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1454 ctx->mmu_idx == MMU_PHYS_IDX);
1455 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1457 save_gpr(ctx, rb, ofs);
1461 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1462 unsigned rx, int scale, target_sreg disp,
1463 unsigned sp, int modify, TCGMemOp mop)
1468 /* Caller uses nullify_over/nullify_end. */
1469 assert(ctx->null_cond.c == TCG_COND_NEVER);
1471 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1472 ctx->mmu_idx == MMU_PHYS_IDX);
1473 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1475 save_gpr(ctx, rb, ofs);
1479 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1480 unsigned rx, int scale, target_sreg disp,
1481 unsigned sp, int modify, TCGMemOp mop)
1486 /* Caller uses nullify_over/nullify_end. */
1487 assert(ctx->null_cond.c == TCG_COND_NEVER);
1489 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1490 ctx->mmu_idx == MMU_PHYS_IDX);
1491 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1493 save_gpr(ctx, rb, ofs);
1497 #if TARGET_REGISTER_BITS == 64
1498 #define do_load_reg do_load_64
1499 #define do_store_reg do_store_64
1501 #define do_load_reg do_load_32
1502 #define do_store_reg do_store_32
1505 static void do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1506 unsigned rx, int scale, target_sreg disp,
1507 unsigned sp, int modify, TCGMemOp mop)
1514 /* No base register update. */
1515 dest = dest_gpr(ctx, rt);
1517 /* Make sure if RT == RB, we see the result of the load. */
1518 dest = get_temp(ctx);
1520 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1521 save_gpr(ctx, rt, dest);
1526 static void do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1527 unsigned rx, int scale, target_sreg disp,
1528 unsigned sp, int modify)
1534 tmp = tcg_temp_new_i32();
1535 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1536 save_frw_i32(rt, tmp);
1537 tcg_temp_free_i32(tmp);
1540 gen_helper_loaded_fr0(cpu_env);
1546 static void do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1547 unsigned rx, int scale, target_sreg disp,
1548 unsigned sp, int modify)
1554 tmp = tcg_temp_new_i64();
1555 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1557 tcg_temp_free_i64(tmp);
1560 gen_helper_loaded_fr0(cpu_env);
1566 static void do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1567 target_sreg disp, unsigned sp,
1568 int modify, TCGMemOp mop)
1571 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1575 static void do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1576 unsigned rx, int scale, target_sreg disp,
1577 unsigned sp, int modify)
1583 tmp = load_frw_i32(rt);
1584 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1585 tcg_temp_free_i32(tmp);
1590 static void do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1591 unsigned rx, int scale, target_sreg disp,
1592 unsigned sp, int modify)
1599 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1600 tcg_temp_free_i64(tmp);
1605 static void do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1606 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1611 tmp = load_frw0_i32(ra);
1613 func(tmp, cpu_env, tmp);
1615 save_frw_i32(rt, tmp);
1616 tcg_temp_free_i32(tmp);
1620 static void do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1621 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1628 dst = tcg_temp_new_i32();
1630 func(dst, cpu_env, src);
1632 tcg_temp_free_i64(src);
1633 save_frw_i32(rt, dst);
1634 tcg_temp_free_i32(dst);
1638 static void do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1639 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1644 tmp = load_frd0(ra);
1646 func(tmp, cpu_env, tmp);
1649 tcg_temp_free_i64(tmp);
1653 static void do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1654 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1660 src = load_frw0_i32(ra);
1661 dst = tcg_temp_new_i64();
1663 func(dst, cpu_env, src);
1665 tcg_temp_free_i32(src);
1667 tcg_temp_free_i64(dst);
1671 static void do_fop_weww(DisasContext *ctx, unsigned rt,
1672 unsigned ra, unsigned rb,
1673 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1678 a = load_frw0_i32(ra);
1679 b = load_frw0_i32(rb);
1681 func(a, cpu_env, a, b);
1683 tcg_temp_free_i32(b);
1684 save_frw_i32(rt, a);
1685 tcg_temp_free_i32(a);
1689 static void do_fop_dedd(DisasContext *ctx, unsigned rt,
1690 unsigned ra, unsigned rb,
1691 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1699 func(a, cpu_env, a, b);
1701 tcg_temp_free_i64(b);
1703 tcg_temp_free_i64(a);
1707 /* Emit an unconditional branch to a direct target, which may or may not
1708 have already had nullification handled. */
1709 static void do_dbranch(DisasContext *ctx, target_ureg dest,
1710 unsigned link, bool is_n)
1712 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1714 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1718 ctx->null_cond.c = TCG_COND_ALWAYS;
1724 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1727 if (is_n && use_nullify_skip(ctx)) {
1728 nullify_set(ctx, 0);
1729 gen_goto_tb(ctx, 0, dest, dest + 4);
1731 nullify_set(ctx, is_n);
1732 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1737 nullify_set(ctx, 0);
1738 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1739 ctx->base.is_jmp = DISAS_NORETURN;
1743 /* Emit a conditional branch to a direct target. If the branch itself
1744 is nullified, we should have already used nullify_over. */
1745 static void do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1748 target_ureg dest = iaoq_dest(ctx, disp);
1749 TCGLabel *taken = NULL;
1750 TCGCond c = cond->c;
1753 assert(ctx->null_cond.c == TCG_COND_NEVER);
1755 /* Handle TRUE and NEVER as direct branches. */
1756 if (c == TCG_COND_ALWAYS) {
1757 do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1760 if (c == TCG_COND_NEVER) {
1761 do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1765 taken = gen_new_label();
1767 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1770 /* Not taken: Condition not satisfied; nullify on backward branches. */
1771 n = is_n && disp < 0;
1772 if (n && use_nullify_skip(ctx)) {
1773 nullify_set(ctx, 0);
1774 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1776 if (!n && ctx->null_lab) {
1777 gen_set_label(ctx->null_lab);
1778 ctx->null_lab = NULL;
1780 nullify_set(ctx, n);
1781 if (ctx->iaoq_n == -1) {
1782 /* The temporary iaoq_n_var died at the branch above.
1783 Regenerate it here instead of saving it. */
1784 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1786 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1789 gen_set_label(taken);
1791 /* Taken: Condition satisfied; nullify on forward branches. */
1792 n = is_n && disp >= 0;
1793 if (n && use_nullify_skip(ctx)) {
1794 nullify_set(ctx, 0);
1795 gen_goto_tb(ctx, 1, dest, dest + 4);
1797 nullify_set(ctx, n);
1798 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1801 /* Not taken: the branch itself was nullified. */
1802 if (ctx->null_lab) {
1803 gen_set_label(ctx->null_lab);
1804 ctx->null_lab = NULL;
1805 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1807 ctx->base.is_jmp = DISAS_NORETURN;
1811 /* Emit an unconditional branch to an indirect target. This handles
1812 nullification of the branch itself. */
1813 static void do_ibranch(DisasContext *ctx, TCGv_reg dest,
1814 unsigned link, bool is_n)
1816 TCGv_reg a0, a1, next, tmp;
1819 assert(ctx->null_lab == NULL);
1821 if (ctx->null_cond.c == TCG_COND_NEVER) {
1823 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1825 next = get_temp(ctx);
1826 tcg_gen_mov_reg(next, dest);
1828 if (use_nullify_skip(ctx)) {
1829 tcg_gen_mov_reg(cpu_iaoq_f, next);
1830 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1831 nullify_set(ctx, 0);
1832 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1835 ctx->null_cond.c = TCG_COND_ALWAYS;
1838 ctx->iaoq_n_var = next;
1839 } else if (is_n && use_nullify_skip(ctx)) {
1840 /* The (conditional) branch, B, nullifies the next insn, N,
1841 and we're allowed to skip execution N (no single-step or
1842 tracepoint in effect). Since the goto_ptr that we must use
1843 for the indirect branch consumes no special resources, we
1844 can (conditionally) skip B and continue execution. */
1845 /* The use_nullify_skip test implies we have a known control path. */
1846 tcg_debug_assert(ctx->iaoq_b != -1);
1847 tcg_debug_assert(ctx->iaoq_n != -1);
1849 /* We do have to handle the non-local temporary, DEST, before
1850 branching. Since IOAQ_F is not really live at this point, we
1851 can simply store DEST optimistically. Similarly with IAOQ_B. */
1852 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1853 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1857 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1859 tcg_gen_lookup_and_goto_ptr();
1862 cond_prep(&ctx->null_cond);
1863 c = ctx->null_cond.c;
1864 a0 = ctx->null_cond.a0;
1865 a1 = ctx->null_cond.a1;
1867 tmp = tcg_temp_new();
1868 next = get_temp(ctx);
1870 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1871 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1873 ctx->iaoq_n_var = next;
1876 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1880 /* The branch nullifies the next insn, which means the state of N
1881 after the branch is the inverse of the state of N that applied
1883 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1884 cond_free(&ctx->null_cond);
1885 ctx->null_cond = cond_make_n();
1886 ctx->psw_n_nonzero = true;
1888 cond_free(&ctx->null_cond);
1894 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1895 * IAOQ_Next{30..31} ← GR[b]{30..31};
1897 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1898 * which keeps the privilege level from being increased.
1900 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1903 switch (ctx->privilege) {
1905 /* Privilege 0 is maximum and is allowed to decrease. */
1908 /* Privilege 3 is minimum and is never allowed increase. */
1909 dest = get_temp(ctx);
1910 tcg_gen_ori_reg(dest, offset, 3);
1913 dest = tcg_temp_new();
1914 tcg_gen_andi_reg(dest, offset, -4);
1915 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1916 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1917 tcg_temp_free(dest);
1923 #ifdef CONFIG_USER_ONLY
1924 /* On Linux, page zero is normally marked execute only + gateway.
1925 Therefore normal read or write is supposed to fail, but specific
1926 offsets have kernel code mapped to raise permissions to implement
1927 system calls. Handling this via an explicit check here, rather
1928 in than the "be disp(sr2,r0)" instruction that probably sent us
1929 here, is the easiest way to handle the branch delay slot on the
1930 aforementioned BE. */
1931 static void do_page_zero(DisasContext *ctx)
1933 /* If by some means we get here with PSW[N]=1, that implies that
1934 the B,GATE instruction would be skipped, and we'd fault on the
1935 next insn within the privilaged page. */
1936 switch (ctx->null_cond.c) {
1937 case TCG_COND_NEVER:
1939 case TCG_COND_ALWAYS:
1940 tcg_gen_movi_reg(cpu_psw_n, 0);
1943 /* Since this is always the first (and only) insn within the
1944 TB, we should know the state of PSW[N] from TB->FLAGS. */
1945 g_assert_not_reached();
1948 /* Check that we didn't arrive here via some means that allowed
1949 non-sequential instruction execution. Normally the PSW[B] bit
1950 detects this by disallowing the B,GATE instruction to execute
1951 under such conditions. */
1952 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1956 switch (ctx->iaoq_f & -4) {
1957 case 0x00: /* Null pointer call */
1958 gen_excp_1(EXCP_IMP);
1959 ctx->base.is_jmp = DISAS_NORETURN;
1962 case 0xb0: /* LWS */
1963 gen_excp_1(EXCP_SYSCALL_LWS);
1964 ctx->base.is_jmp = DISAS_NORETURN;
1967 case 0xe0: /* SET_THREAD_POINTER */
1968 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1969 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1970 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1971 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1974 case 0x100: /* SYSCALL */
1975 gen_excp_1(EXCP_SYSCALL);
1976 ctx->base.is_jmp = DISAS_NORETURN;
1981 gen_excp_1(EXCP_ILL);
1982 ctx->base.is_jmp = DISAS_NORETURN;
1988 static bool trans_nop(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
1990 cond_free(&ctx->null_cond);
1994 static bool trans_break(DisasContext *ctx, arg_break *a)
1996 return gen_excp_iir(ctx, EXCP_BREAK);
1999 static bool trans_sync(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2001 /* No point in nullifying the memory barrier. */
2002 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2004 cond_free(&ctx->null_cond);
2008 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2011 TCGv_reg tmp = dest_gpr(ctx, rt);
2012 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2013 save_gpr(ctx, rt, tmp);
2015 cond_free(&ctx->null_cond);
2019 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2022 unsigned rs = a->sp;
2023 TCGv_i64 t0 = tcg_temp_new_i64();
2024 TCGv_reg t1 = tcg_temp_new();
2026 load_spr(ctx, t0, rs);
2027 tcg_gen_shri_i64(t0, t0, 32);
2028 tcg_gen_trunc_i64_reg(t1, t0);
2030 save_gpr(ctx, rt, t1);
2032 tcg_temp_free_i64(t0);
2034 cond_free(&ctx->null_cond);
2038 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2041 unsigned ctl = a->r;
2046 #ifdef TARGET_HPPA64
2048 /* MFSAR without ,W masks low 5 bits. */
2049 tmp = dest_gpr(ctx, rt);
2050 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2051 save_gpr(ctx, rt, tmp);
2055 save_gpr(ctx, rt, cpu_sar);
2057 case CR_IT: /* Interval Timer */
2058 /* FIXME: Respect PSW_S bit. */
2060 tmp = dest_gpr(ctx, rt);
2061 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2063 gen_helper_read_interval_timer(tmp);
2065 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2067 gen_helper_read_interval_timer(tmp);
2069 save_gpr(ctx, rt, tmp);
2070 return nullify_end(ctx);
2075 /* All other control registers are privileged. */
2076 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2080 tmp = get_temp(ctx);
2081 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2082 save_gpr(ctx, rt, tmp);
2085 cond_free(&ctx->null_cond);
2089 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2092 unsigned rs = a->sp;
2096 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2100 t64 = tcg_temp_new_i64();
2101 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2102 tcg_gen_shli_i64(t64, t64, 32);
2105 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2106 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2108 tcg_gen_mov_i64(cpu_sr[rs], t64);
2110 tcg_temp_free_i64(t64);
2112 return nullify_end(ctx);
2115 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2117 unsigned ctl = a->t;
2118 TCGv_reg reg = load_gpr(ctx, a->r);
2121 if (ctl == CR_SAR) {
2122 tmp = tcg_temp_new();
2123 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2124 save_or_nullify(ctx, cpu_sar, tmp);
2127 cond_free(&ctx->null_cond);
2131 /* All other control registers are privileged or read-only. */
2132 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2134 #ifndef CONFIG_USER_ONLY
2138 gen_helper_write_interval_timer(cpu_env, reg);
2141 gen_helper_write_eirr(cpu_env, reg);
2144 gen_helper_write_eiem(cpu_env, reg);
2145 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2150 /* FIXME: Respect PSW_Q bit */
2151 /* The write advances the queue and stores to the back element. */
2152 tmp = get_temp(ctx);
2153 tcg_gen_ld_reg(tmp, cpu_env,
2154 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2155 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2156 tcg_gen_st_reg(reg, cpu_env,
2157 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2161 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2164 return nullify_end(ctx);
2168 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2170 TCGv_reg tmp = tcg_temp_new();
2172 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2173 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2174 save_or_nullify(ctx, cpu_sar, tmp);
2177 cond_free(&ctx->null_cond);
2181 static bool trans_ldsid(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2183 unsigned rt = extract32(insn, 0, 5);
2184 TCGv_reg dest = dest_gpr(ctx, rt);
2186 #ifdef CONFIG_USER_ONLY
2187 /* We don't implement space registers in user mode. */
2188 tcg_gen_movi_reg(dest, 0);
2190 unsigned rb = extract32(insn, 21, 5);
2191 unsigned sp = extract32(insn, 14, 2);
2192 TCGv_i64 t0 = tcg_temp_new_i64();
2194 tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
2195 tcg_gen_shri_i64(t0, t0, 32);
2196 tcg_gen_trunc_i64_reg(dest, t0);
2198 tcg_temp_free_i64(t0);
2200 save_gpr(ctx, rt, dest);
2202 cond_free(&ctx->null_cond);
2206 #ifndef CONFIG_USER_ONLY
2207 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
2208 static target_ureg extract_sm_imm(uint32_t insn)
2210 target_ureg val = extract32(insn, 16, 10);
2212 if (val & PSW_SM_E) {
2213 val = (val & ~PSW_SM_E) | PSW_E;
2215 if (val & PSW_SM_W) {
2216 val = (val & ~PSW_SM_W) | PSW_W;
2221 static bool trans_rsm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2223 unsigned rt = extract32(insn, 0, 5);
2224 target_ureg sm = extract_sm_imm(insn);
2227 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2230 tmp = get_temp(ctx);
2231 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2232 tcg_gen_andi_reg(tmp, tmp, ~sm);
2233 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2234 save_gpr(ctx, rt, tmp);
2236 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2237 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2238 return nullify_end(ctx);
2241 static bool trans_ssm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2243 unsigned rt = extract32(insn, 0, 5);
2244 target_ureg sm = extract_sm_imm(insn);
2247 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2250 tmp = get_temp(ctx);
2251 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2252 tcg_gen_ori_reg(tmp, tmp, sm);
2253 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2254 save_gpr(ctx, rt, tmp);
2256 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2257 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2258 return nullify_end(ctx);
2260 #endif /* !CONFIG_USER_ONLY */
2262 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2264 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2265 #ifndef CONFIG_USER_ONLY
2269 reg = load_gpr(ctx, a->r);
2270 tmp = get_temp(ctx);
2271 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2273 /* Exit the TB to recognize new interrupts. */
2274 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2275 return nullify_end(ctx);
2279 #ifndef CONFIG_USER_ONLY
2280 static bool trans_rfi(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2282 unsigned comp = extract32(insn, 5, 4);
2284 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2288 gen_helper_rfi_r(cpu_env);
2290 gen_helper_rfi(cpu_env);
2292 /* Exit the TB to recognize new interrupts. */
2293 if (ctx->base.singlestep_enabled) {
2294 gen_excp_1(EXCP_DEBUG);
2296 tcg_gen_exit_tb(NULL, 0);
2298 ctx->base.is_jmp = DISAS_NORETURN;
2300 return nullify_end(ctx);
2303 static bool gen_hlt(DisasContext *ctx, int reset)
2305 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2308 gen_helper_reset(cpu_env);
2310 gen_helper_halt(cpu_env);
2312 ctx->base.is_jmp = DISAS_NORETURN;
2313 return nullify_end(ctx);
2315 #endif /* !CONFIG_USER_ONLY */
2317 static const DisasInsn table_system[] = {
2318 { 0x00000400u, 0xffffffffu, trans_sync }, /* sync */
2319 { 0x00100400u, 0xffffffffu, trans_sync }, /* syncdma */
2320 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2321 #ifndef CONFIG_USER_ONLY
2322 { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2323 { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2324 { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2328 static bool trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2329 const DisasInsn *di)
2331 unsigned rb = extract32(insn, 21, 5);
2332 unsigned rx = extract32(insn, 16, 5);
2333 TCGv_reg dest = dest_gpr(ctx, rb);
2334 TCGv_reg src1 = load_gpr(ctx, rb);
2335 TCGv_reg src2 = load_gpr(ctx, rx);
2337 /* The only thing we need to do is the base register modification. */
2338 tcg_gen_add_reg(dest, src1, src2);
2339 save_gpr(ctx, rb, dest);
2341 cond_free(&ctx->null_cond);
2345 static bool trans_probe(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2347 unsigned rt = extract32(insn, 0, 5);
2348 unsigned sp = extract32(insn, 14, 2);
2349 unsigned rr = extract32(insn, 16, 5);
2350 unsigned rb = extract32(insn, 21, 5);
2351 unsigned is_write = extract32(insn, 6, 1);
2352 unsigned is_imm = extract32(insn, 13, 1);
2354 TCGv_i32 level, want;
2359 dest = dest_gpr(ctx, rt);
2360 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2363 level = tcg_const_i32(extract32(insn, 16, 2));
2365 level = tcg_temp_new_i32();
2366 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2367 tcg_gen_andi_i32(level, level, 3);
2369 want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2371 gen_helper_probe(dest, cpu_env, addr, level, want);
2373 tcg_temp_free_i32(want);
2374 tcg_temp_free_i32(level);
2376 save_gpr(ctx, rt, dest);
2377 return nullify_end(ctx);
2380 #ifndef CONFIG_USER_ONLY
2381 static bool trans_ixtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2384 unsigned rr = extract32(insn, 16, 5);
2385 unsigned rb = extract32(insn, 21, 5);
2386 unsigned is_data = insn & 0x1000;
2387 unsigned is_addr = insn & 0x40;
2392 sp = extract32(insn, 14, 2);
2394 sp = ~assemble_sr3(insn);
2397 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2400 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2401 reg = load_gpr(ctx, rr);
2403 gen_helper_itlba(cpu_env, addr, reg);
2405 gen_helper_itlbp(cpu_env, addr, reg);
2408 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2409 the case, since the OS TLB fill handler runs with mmu disabled. */
2410 if (!is_data && (ctx->tb_flags & PSW_C)) {
2411 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2413 return nullify_end(ctx);
2416 static bool trans_pxtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2418 unsigned m = extract32(insn, 5, 1);
2420 unsigned rx = extract32(insn, 16, 5);
2421 unsigned rb = extract32(insn, 21, 5);
2422 unsigned is_data = insn & 0x1000;
2423 unsigned is_local = insn & 0x40;
2428 sp = extract32(insn, 14, 2);
2430 sp = ~assemble_sr3(insn);
2433 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2436 form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2438 save_gpr(ctx, rb, ofs);
2441 gen_helper_ptlbe(cpu_env);
2443 gen_helper_ptlb(cpu_env, addr);
2446 /* Exit TB for TLB change if mmu is enabled. */
2447 if (!is_data && (ctx->tb_flags & PSW_C)) {
2448 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2450 return nullify_end(ctx);
2453 static bool trans_lpa(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2455 unsigned rt = extract32(insn, 0, 5);
2456 unsigned m = extract32(insn, 5, 1);
2457 unsigned sp = extract32(insn, 14, 2);
2458 unsigned rx = extract32(insn, 16, 5);
2459 unsigned rb = extract32(insn, 21, 5);
2461 TCGv_reg ofs, paddr;
2463 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2466 form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2468 paddr = tcg_temp_new();
2469 gen_helper_lpa(paddr, cpu_env, vaddr);
2471 /* Note that physical address result overrides base modification. */
2473 save_gpr(ctx, rb, ofs);
2475 save_gpr(ctx, rt, paddr);
2476 tcg_temp_free(paddr);
2478 return nullify_end(ctx);
2481 static bool trans_lci(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2483 unsigned rt = extract32(insn, 0, 5);
2486 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2488 /* The Coherence Index is an implementation-defined function of the
2489 physical address. Two addresses with the same CI have a coherent
2490 view of the cache. Our implementation is to return 0 for all,
2491 since the entire address space is coherent. */
2492 ci = tcg_const_reg(0);
2493 save_gpr(ctx, rt, ci);
2496 cond_free(&ctx->null_cond);
2499 #endif /* !CONFIG_USER_ONLY */
2501 static const DisasInsn table_mem_mgmt[] = {
2502 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
2503 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
2504 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2505 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
2506 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2507 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
2508 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2509 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
2510 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2511 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
2512 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2513 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
2514 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2515 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
2516 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
2517 #ifndef CONFIG_USER_ONLY
2518 { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
2519 { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
2520 { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
2521 { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
2522 { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
2523 { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
2524 { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
2525 { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
2526 { 0x04001340u, 0xfc003fc0u, trans_lpa },
2527 { 0x04001300u, 0xfc003fe0u, trans_lci },
2531 static bool trans_add(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2533 unsigned r2 = extract32(insn, 21, 5);
2534 unsigned r1 = extract32(insn, 16, 5);
2535 unsigned cf = extract32(insn, 12, 4);
2536 unsigned ext = extract32(insn, 8, 4);
2537 unsigned shift = extract32(insn, 6, 2);
2538 unsigned rt = extract32(insn, 0, 5);
2539 TCGv_reg tcg_r1, tcg_r2;
2543 bool is_tsv = false;
2546 case 0x6: /* ADD, SHLADD */
2548 case 0xa: /* ADD,L, SHLADD,L */
2551 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2554 case 0x7: /* ADD,C */
2557 case 0xf: /* ADD,C,TSV */
2558 is_c = is_tsv = true;
2561 return gen_illegal(ctx);
2567 tcg_r1 = load_gpr(ctx, r1);
2568 tcg_r2 = load_gpr(ctx, r2);
2569 do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2570 return nullify_end(ctx);
2573 static bool trans_sub(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2575 unsigned r2 = extract32(insn, 21, 5);
2576 unsigned r1 = extract32(insn, 16, 5);
2577 unsigned cf = extract32(insn, 12, 4);
2578 unsigned ext = extract32(insn, 6, 6);
2579 unsigned rt = extract32(insn, 0, 5);
2580 TCGv_reg tcg_r1, tcg_r2;
2583 bool is_tsv = false;
2586 case 0x10: /* SUB */
2588 case 0x30: /* SUB,TSV */
2591 case 0x14: /* SUB,B */
2594 case 0x34: /* SUB,B,TSV */
2595 is_b = is_tsv = true;
2597 case 0x13: /* SUB,TC */
2600 case 0x33: /* SUB,TSV,TC */
2601 is_tc = is_tsv = true;
2604 return gen_illegal(ctx);
2610 tcg_r1 = load_gpr(ctx, r1);
2611 tcg_r2 = load_gpr(ctx, r2);
2612 do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2613 return nullify_end(ctx);
2616 static bool trans_log(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2618 unsigned r2 = extract32(insn, 21, 5);
2619 unsigned r1 = extract32(insn, 16, 5);
2620 unsigned cf = extract32(insn, 12, 4);
2621 unsigned rt = extract32(insn, 0, 5);
2622 TCGv_reg tcg_r1, tcg_r2;
2627 tcg_r1 = load_gpr(ctx, r1);
2628 tcg_r2 = load_gpr(ctx, r2);
2629 do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2630 return nullify_end(ctx);
2633 /* OR r,0,t -> COPY (according to gas) */
2634 static bool trans_copy(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2636 unsigned r1 = extract32(insn, 16, 5);
2637 unsigned rt = extract32(insn, 0, 5);
2640 TCGv_reg dest = dest_gpr(ctx, rt);
2641 tcg_gen_movi_reg(dest, 0);
2642 save_gpr(ctx, rt, dest);
2644 save_gpr(ctx, rt, cpu_gr[r1]);
2646 cond_free(&ctx->null_cond);
2650 static bool trans_cmpclr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2652 unsigned r2 = extract32(insn, 21, 5);
2653 unsigned r1 = extract32(insn, 16, 5);
2654 unsigned cf = extract32(insn, 12, 4);
2655 unsigned rt = extract32(insn, 0, 5);
2656 TCGv_reg tcg_r1, tcg_r2;
2661 tcg_r1 = load_gpr(ctx, r1);
2662 tcg_r2 = load_gpr(ctx, r2);
2663 do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2664 return nullify_end(ctx);
2667 static bool trans_uxor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2669 unsigned r2 = extract32(insn, 21, 5);
2670 unsigned r1 = extract32(insn, 16, 5);
2671 unsigned cf = extract32(insn, 12, 4);
2672 unsigned rt = extract32(insn, 0, 5);
2673 TCGv_reg tcg_r1, tcg_r2;
2678 tcg_r1 = load_gpr(ctx, r1);
2679 tcg_r2 = load_gpr(ctx, r2);
2680 do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2681 return nullify_end(ctx);
2684 static bool trans_uaddcm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2686 unsigned r2 = extract32(insn, 21, 5);
2687 unsigned r1 = extract32(insn, 16, 5);
2688 unsigned cf = extract32(insn, 12, 4);
2689 unsigned is_tc = extract32(insn, 6, 1);
2690 unsigned rt = extract32(insn, 0, 5);
2691 TCGv_reg tcg_r1, tcg_r2, tmp;
2696 tcg_r1 = load_gpr(ctx, r1);
2697 tcg_r2 = load_gpr(ctx, r2);
2698 tmp = get_temp(ctx);
2699 tcg_gen_not_reg(tmp, tcg_r2);
2700 do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2701 return nullify_end(ctx);
2704 static bool trans_dcor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2706 unsigned r2 = extract32(insn, 21, 5);
2707 unsigned cf = extract32(insn, 12, 4);
2708 unsigned is_i = extract32(insn, 6, 1);
2709 unsigned rt = extract32(insn, 0, 5);
2714 tmp = get_temp(ctx);
2715 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2717 tcg_gen_not_reg(tmp, tmp);
2719 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2720 tcg_gen_muli_reg(tmp, tmp, 6);
2721 do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2722 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2724 return nullify_end(ctx);
2727 static bool trans_ds(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2729 unsigned r2 = extract32(insn, 21, 5);
2730 unsigned r1 = extract32(insn, 16, 5);
2731 unsigned cf = extract32(insn, 12, 4);
2732 unsigned rt = extract32(insn, 0, 5);
2733 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2737 in1 = load_gpr(ctx, r1);
2738 in2 = load_gpr(ctx, r2);
2740 add1 = tcg_temp_new();
2741 add2 = tcg_temp_new();
2742 addc = tcg_temp_new();
2743 dest = tcg_temp_new();
2744 zero = tcg_const_reg(0);
2746 /* Form R1 << 1 | PSW[CB]{8}. */
2747 tcg_gen_add_reg(add1, in1, in1);
2748 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2750 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2751 carry{8} requires that we subtract via + ~R2 + 1, as described in
2752 the manual. By extracting and masking V, we can produce the
2753 proper inputs to the addition without movcond. */
2754 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2755 tcg_gen_xor_reg(add2, in2, addc);
2756 tcg_gen_andi_reg(addc, addc, 1);
2757 /* ??? This is only correct for 32-bit. */
2758 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2759 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2761 tcg_temp_free(addc);
2762 tcg_temp_free(zero);
2764 /* Write back the result register. */
2765 save_gpr(ctx, rt, dest);
2767 /* Write back PSW[CB]. */
2768 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2769 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2771 /* Write back PSW[V] for the division step. */
2772 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2773 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2775 /* Install the new nullification. */
2779 /* ??? The lshift is supposed to contribute to overflow. */
2780 sv = do_add_sv(ctx, dest, add1, add2);
2782 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2785 tcg_temp_free(add1);
2786 tcg_temp_free(add2);
2787 tcg_temp_free(dest);
2789 return nullify_end(ctx);
2792 #ifndef CONFIG_USER_ONLY
2793 /* These are QEMU extensions and are nops in the real architecture:
2795 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2796 * or %r31,%r31,%r31 -- death loop; offline cpu
2797 * currently implemented as idle.
2799 static bool trans_pause(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2803 /* No need to check for supervisor, as userland can only pause
2804 until the next timer interrupt. */
2807 /* Advance the instruction queue. */
2808 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2809 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2810 nullify_set(ctx, 0);
2812 /* Tell the qemu main loop to halt until this cpu has work. */
2813 tmp = tcg_const_i32(1);
2814 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2815 offsetof(CPUState, halted));
2816 tcg_temp_free_i32(tmp);
2817 gen_excp_1(EXCP_HALTED);
2818 ctx->base.is_jmp = DISAS_NORETURN;
2820 return nullify_end(ctx);
2824 static const DisasInsn table_arith_log[] = {
2825 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2826 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2827 #ifndef CONFIG_USER_ONLY
2828 { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
2829 { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
2831 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2832 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2833 { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2834 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2835 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2836 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2837 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2838 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2839 { 0x08000440u, 0xfc000fe0u, trans_ds },
2840 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2841 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2842 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2843 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2846 static bool trans_addi(DisasContext *ctx, uint32_t insn)
2848 target_sreg im = low_sextract(insn, 0, 11);
2849 unsigned e1 = extract32(insn, 11, 1);
2850 unsigned cf = extract32(insn, 12, 4);
2851 unsigned rt = extract32(insn, 16, 5);
2852 unsigned r2 = extract32(insn, 21, 5);
2853 unsigned o1 = extract32(insn, 26, 1);
2854 TCGv_reg tcg_im, tcg_r2;
2860 tcg_im = load_const(ctx, im);
2861 tcg_r2 = load_gpr(ctx, r2);
2862 do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2864 return nullify_end(ctx);
2867 static bool trans_subi(DisasContext *ctx, uint32_t insn)
2869 target_sreg im = low_sextract(insn, 0, 11);
2870 unsigned e1 = extract32(insn, 11, 1);
2871 unsigned cf = extract32(insn, 12, 4);
2872 unsigned rt = extract32(insn, 16, 5);
2873 unsigned r2 = extract32(insn, 21, 5);
2874 TCGv_reg tcg_im, tcg_r2;
2880 tcg_im = load_const(ctx, im);
2881 tcg_r2 = load_gpr(ctx, r2);
2882 do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2884 return nullify_end(ctx);
2887 static bool trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2889 target_sreg im = low_sextract(insn, 0, 11);
2890 unsigned cf = extract32(insn, 12, 4);
2891 unsigned rt = extract32(insn, 16, 5);
2892 unsigned r2 = extract32(insn, 21, 5);
2893 TCGv_reg tcg_im, tcg_r2;
2899 tcg_im = load_const(ctx, im);
2900 tcg_r2 = load_gpr(ctx, r2);
2901 do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2903 return nullify_end(ctx);
2906 static bool trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2907 const DisasInsn *di)
2909 unsigned rt = extract32(insn, 0, 5);
2910 unsigned m = extract32(insn, 5, 1);
2911 unsigned sz = extract32(insn, 6, 2);
2912 unsigned a = extract32(insn, 13, 1);
2913 unsigned sp = extract32(insn, 14, 2);
2914 int disp = low_sextract(insn, 16, 5);
2915 unsigned rb = extract32(insn, 21, 5);
2916 int modify = (m ? (a ? -1 : 1) : 0);
2917 TCGMemOp mop = MO_TE | sz;
2919 do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2923 static bool trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2924 const DisasInsn *di)
2926 unsigned rt = extract32(insn, 0, 5);
2927 unsigned m = extract32(insn, 5, 1);
2928 unsigned sz = extract32(insn, 6, 2);
2929 unsigned u = extract32(insn, 13, 1);
2930 unsigned sp = extract32(insn, 14, 2);
2931 unsigned rx = extract32(insn, 16, 5);
2932 unsigned rb = extract32(insn, 21, 5);
2933 TCGMemOp mop = MO_TE | sz;
2935 do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2939 static bool trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2940 const DisasInsn *di)
2942 int disp = low_sextract(insn, 0, 5);
2943 unsigned m = extract32(insn, 5, 1);
2944 unsigned sz = extract32(insn, 6, 2);
2945 unsigned a = extract32(insn, 13, 1);
2946 unsigned sp = extract32(insn, 14, 2);
2947 unsigned rr = extract32(insn, 16, 5);
2948 unsigned rb = extract32(insn, 21, 5);
2949 int modify = (m ? (a ? -1 : 1) : 0);
2950 TCGMemOp mop = MO_TE | sz;
2952 do_store(ctx, rr, rb, disp, sp, modify, mop);
2956 static bool trans_ldcw(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2958 unsigned rt = extract32(insn, 0, 5);
2959 unsigned m = extract32(insn, 5, 1);
2960 unsigned i = extract32(insn, 12, 1);
2961 unsigned au = extract32(insn, 13, 1);
2962 unsigned sp = extract32(insn, 14, 2);
2963 unsigned rx = extract32(insn, 16, 5);
2964 unsigned rb = extract32(insn, 21, 5);
2965 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2966 TCGv_reg zero, dest, ofs;
2968 int modify, disp = 0, scale = 0;
2973 modify = (m ? (au ? -1 : 1) : 0);
2974 disp = low_sextract(rx, 0, 5);
2979 scale = mop & MO_SIZE;
2983 /* Base register modification. Make sure if RT == RB,
2984 we see the result of the load. */
2985 dest = get_temp(ctx);
2987 dest = dest_gpr(ctx, rt);
2990 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
2991 ctx->mmu_idx == MMU_PHYS_IDX);
2992 zero = tcg_const_reg(0);
2993 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2995 save_gpr(ctx, rb, ofs);
2997 save_gpr(ctx, rt, dest);
2999 return nullify_end(ctx);
3002 static bool trans_stby(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3004 target_sreg disp = low_sextract(insn, 0, 5);
3005 unsigned m = extract32(insn, 5, 1);
3006 unsigned a = extract32(insn, 13, 1);
3007 unsigned sp = extract32(insn, 14, 2);
3008 unsigned rt = extract32(insn, 16, 5);
3009 unsigned rb = extract32(insn, 21, 5);
3015 form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3016 ctx->mmu_idx == MMU_PHYS_IDX);
3017 val = load_gpr(ctx, rt);
3019 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3020 gen_helper_stby_e_parallel(cpu_env, addr, val);
3022 gen_helper_stby_e(cpu_env, addr, val);
3025 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3026 gen_helper_stby_b_parallel(cpu_env, addr, val);
3028 gen_helper_stby_b(cpu_env, addr, val);
3033 tcg_gen_andi_reg(ofs, ofs, ~3);
3034 save_gpr(ctx, rb, ofs);
3037 return nullify_end(ctx);
3040 #ifndef CONFIG_USER_ONLY
3041 static bool trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3042 const DisasInsn *di)
3044 int hold_mmu_idx = ctx->mmu_idx;
3046 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3048 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3049 format wrt the sub-opcode in bits 6:9. */
3050 ctx->mmu_idx = MMU_PHYS_IDX;
3051 trans_ld_idx_i(ctx, insn, di);
3052 ctx->mmu_idx = hold_mmu_idx;
3056 static bool trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3057 const DisasInsn *di)
3059 int hold_mmu_idx = ctx->mmu_idx;
3061 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3063 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3064 format wrt the sub-opcode in bits 6:9. */
3065 ctx->mmu_idx = MMU_PHYS_IDX;
3066 trans_ld_idx_x(ctx, insn, di);
3067 ctx->mmu_idx = hold_mmu_idx;
3071 static bool trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3072 const DisasInsn *di)
3074 int hold_mmu_idx = ctx->mmu_idx;
3076 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3078 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3079 format wrt the sub-opcode in bits 6:9. */
3080 ctx->mmu_idx = MMU_PHYS_IDX;
3081 trans_st_idx_i(ctx, insn, di);
3082 ctx->mmu_idx = hold_mmu_idx;
3087 static const DisasInsn table_index_mem[] = {
3088 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3089 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3090 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3091 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3092 { 0x0c001300u, 0xfc0013c0, trans_stby },
3093 #ifndef CONFIG_USER_ONLY
3094 { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3095 { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3096 { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3100 static bool trans_ldil(DisasContext *ctx, uint32_t insn)
3102 unsigned rt = extract32(insn, 21, 5);
3103 target_sreg i = assemble_21(insn);
3104 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3106 tcg_gen_movi_reg(tcg_rt, i);
3107 save_gpr(ctx, rt, tcg_rt);
3108 cond_free(&ctx->null_cond);
3112 static bool trans_addil(DisasContext *ctx, uint32_t insn)
3114 unsigned rt = extract32(insn, 21, 5);
3115 target_sreg i = assemble_21(insn);
3116 TCGv_reg tcg_rt = load_gpr(ctx, rt);
3117 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3119 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3120 save_gpr(ctx, 1, tcg_r1);
3121 cond_free(&ctx->null_cond);
3125 static bool trans_ldo(DisasContext *ctx, uint32_t insn)
3127 unsigned rb = extract32(insn, 21, 5);
3128 unsigned rt = extract32(insn, 16, 5);
3129 target_sreg i = assemble_16(insn);
3130 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3132 /* Special case rb == 0, for the LDI pseudo-op.
3133 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3135 tcg_gen_movi_reg(tcg_rt, i);
3137 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3139 save_gpr(ctx, rt, tcg_rt);
3140 cond_free(&ctx->null_cond);
3144 static bool trans_load(DisasContext *ctx, uint32_t insn,
3145 bool is_mod, TCGMemOp mop)
3147 unsigned rb = extract32(insn, 21, 5);
3148 unsigned rt = extract32(insn, 16, 5);
3149 unsigned sp = extract32(insn, 14, 2);
3150 target_sreg i = assemble_16(insn);
3152 do_load(ctx, rt, rb, 0, 0, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3156 static bool trans_load_w(DisasContext *ctx, uint32_t insn)
3158 unsigned rb = extract32(insn, 21, 5);
3159 unsigned rt = extract32(insn, 16, 5);
3160 unsigned sp = extract32(insn, 14, 2);
3161 target_sreg i = assemble_16a(insn);
3162 unsigned ext2 = extract32(insn, 1, 2);
3167 /* FLDW without modification. */
3168 do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3171 /* LDW with modification. Note that the sign of I selects
3172 post-dec vs pre-inc. */
3173 do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3176 return gen_illegal(ctx);
3181 static bool trans_fload_mod(DisasContext *ctx, uint32_t insn)
3183 target_sreg i = assemble_16a(insn);
3184 unsigned t1 = extract32(insn, 1, 1);
3185 unsigned a = extract32(insn, 2, 1);
3186 unsigned sp = extract32(insn, 14, 2);
3187 unsigned t0 = extract32(insn, 16, 5);
3188 unsigned rb = extract32(insn, 21, 5);
3190 /* FLDW with modification. */
3191 do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3195 static bool trans_store(DisasContext *ctx, uint32_t insn,
3196 bool is_mod, TCGMemOp mop)
3198 unsigned rb = extract32(insn, 21, 5);
3199 unsigned rt = extract32(insn, 16, 5);
3200 unsigned sp = extract32(insn, 14, 2);
3201 target_sreg i = assemble_16(insn);
3203 do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3207 static bool trans_store_w(DisasContext *ctx, uint32_t insn)
3209 unsigned rb = extract32(insn, 21, 5);
3210 unsigned rt = extract32(insn, 16, 5);
3211 unsigned sp = extract32(insn, 14, 2);
3212 target_sreg i = assemble_16a(insn);
3213 unsigned ext2 = extract32(insn, 1, 2);
3218 /* FSTW without modification. */
3219 do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3222 /* STW with modification. */
3223 do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3226 return gen_illegal(ctx);
3231 static bool trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3233 target_sreg i = assemble_16a(insn);
3234 unsigned t1 = extract32(insn, 1, 1);
3235 unsigned a = extract32(insn, 2, 1);
3236 unsigned sp = extract32(insn, 14, 2);
3237 unsigned t0 = extract32(insn, 16, 5);
3238 unsigned rb = extract32(insn, 21, 5);
3240 /* FSTW with modification. */
3241 do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3245 static bool trans_copr_w(DisasContext *ctx, uint32_t insn)
3247 unsigned t0 = extract32(insn, 0, 5);
3248 unsigned m = extract32(insn, 5, 1);
3249 unsigned t1 = extract32(insn, 6, 1);
3250 unsigned ext3 = extract32(insn, 7, 3);
3251 /* unsigned cc = extract32(insn, 10, 2); */
3252 unsigned i = extract32(insn, 12, 1);
3253 unsigned ua = extract32(insn, 13, 1);
3254 unsigned sp = extract32(insn, 14, 2);
3255 unsigned rx = extract32(insn, 16, 5);
3256 unsigned rb = extract32(insn, 21, 5);
3257 unsigned rt = t1 * 32 + t0;
3258 int modify = (m ? (ua ? -1 : 1) : 0);
3262 scale = (ua ? 2 : 0);
3266 disp = low_sextract(rx, 0, 5);
3269 modify = (m ? (ua ? -1 : 1) : 0);
3274 do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3277 do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3280 return gen_illegal(ctx);
3285 static bool trans_copr_dw(DisasContext *ctx, uint32_t insn)
3287 unsigned rt = extract32(insn, 0, 5);
3288 unsigned m = extract32(insn, 5, 1);
3289 unsigned ext4 = extract32(insn, 6, 4);
3290 /* unsigned cc = extract32(insn, 10, 2); */
3291 unsigned i = extract32(insn, 12, 1);
3292 unsigned ua = extract32(insn, 13, 1);
3293 unsigned sp = extract32(insn, 14, 2);
3294 unsigned rx = extract32(insn, 16, 5);
3295 unsigned rb = extract32(insn, 21, 5);
3296 int modify = (m ? (ua ? -1 : 1) : 0);
3300 scale = (ua ? 3 : 0);
3304 disp = low_sextract(rx, 0, 5);
3307 modify = (m ? (ua ? -1 : 1) : 0);
3312 do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3315 do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3318 return gen_illegal(ctx);
3323 static bool trans_cmpb(DisasContext *ctx, uint32_t insn,
3324 bool is_true, bool is_imm, bool is_dw)
3326 target_sreg disp = assemble_12(insn) * 4;
3327 unsigned n = extract32(insn, 1, 1);
3328 unsigned c = extract32(insn, 13, 3);
3329 unsigned r = extract32(insn, 21, 5);
3330 unsigned cf = c * 2 + !is_true;
3331 TCGv_reg dest, in1, in2, sv;
3337 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3339 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3341 in2 = load_gpr(ctx, r);
3342 dest = get_temp(ctx);
3344 tcg_gen_sub_reg(dest, in1, in2);
3348 sv = do_sub_sv(ctx, dest, in1, in2);
3351 cond = do_sub_cond(cf, dest, in1, in2, sv);
3352 do_cbranch(ctx, disp, n, &cond);
3356 static bool trans_addb(DisasContext *ctx, uint32_t insn,
3357 bool is_true, bool is_imm)
3359 target_sreg disp = assemble_12(insn) * 4;
3360 unsigned n = extract32(insn, 1, 1);
3361 unsigned c = extract32(insn, 13, 3);
3362 unsigned r = extract32(insn, 21, 5);
3363 unsigned cf = c * 2 + !is_true;
3364 TCGv_reg dest, in1, in2, sv, cb_msb;
3370 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3372 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3374 in2 = load_gpr(ctx, r);
3375 dest = dest_gpr(ctx, r);
3381 tcg_gen_add_reg(dest, in1, in2);
3384 cb_msb = get_temp(ctx);
3385 tcg_gen_movi_reg(cb_msb, 0);
3386 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3389 tcg_gen_add_reg(dest, in1, in2);
3390 sv = do_add_sv(ctx, dest, in1, in2);
3394 cond = do_cond(cf, dest, cb_msb, sv);
3395 do_cbranch(ctx, disp, n, &cond);
3399 static bool trans_bb(DisasContext *ctx, uint32_t insn)
3401 target_sreg disp = assemble_12(insn) * 4;
3402 unsigned n = extract32(insn, 1, 1);
3403 unsigned c = extract32(insn, 15, 1);
3404 unsigned r = extract32(insn, 16, 5);
3405 unsigned p = extract32(insn, 21, 5);
3406 unsigned i = extract32(insn, 26, 1);
3407 TCGv_reg tmp, tcg_r;
3412 tmp = tcg_temp_new();
3413 tcg_r = load_gpr(ctx, r);
3415 tcg_gen_shli_reg(tmp, tcg_r, p);
3417 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3420 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3422 do_cbranch(ctx, disp, n, &cond);
3426 static bool trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3428 target_sreg disp = assemble_12(insn) * 4;
3429 unsigned n = extract32(insn, 1, 1);
3430 unsigned c = extract32(insn, 13, 3);
3431 unsigned t = extract32(insn, 16, 5);
3432 unsigned r = extract32(insn, 21, 5);
3438 dest = dest_gpr(ctx, r);
3440 tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3441 } else if (t == 0) {
3442 tcg_gen_movi_reg(dest, 0);
3444 tcg_gen_mov_reg(dest, cpu_gr[t]);
3447 cond = do_sed_cond(c, dest);
3448 do_cbranch(ctx, disp, n, &cond);
3452 static bool trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3453 const DisasInsn *di)
3455 unsigned rt = extract32(insn, 0, 5);
3456 unsigned c = extract32(insn, 13, 3);
3457 unsigned r1 = extract32(insn, 16, 5);
3458 unsigned r2 = extract32(insn, 21, 5);
3465 dest = dest_gpr(ctx, rt);
3467 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3468 tcg_gen_shr_reg(dest, dest, cpu_sar);
3469 } else if (r1 == r2) {
3470 TCGv_i32 t32 = tcg_temp_new_i32();
3471 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3472 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3473 tcg_gen_extu_i32_reg(dest, t32);
3474 tcg_temp_free_i32(t32);
3476 TCGv_i64 t = tcg_temp_new_i64();
3477 TCGv_i64 s = tcg_temp_new_i64();
3479 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3480 tcg_gen_extu_reg_i64(s, cpu_sar);
3481 tcg_gen_shr_i64(t, t, s);
3482 tcg_gen_trunc_i64_reg(dest, t);
3484 tcg_temp_free_i64(t);
3485 tcg_temp_free_i64(s);
3487 save_gpr(ctx, rt, dest);
3489 /* Install the new nullification. */
3490 cond_free(&ctx->null_cond);
3492 ctx->null_cond = do_sed_cond(c, dest);
3494 return nullify_end(ctx);
3497 static bool trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3498 const DisasInsn *di)
3500 unsigned rt = extract32(insn, 0, 5);
3501 unsigned cpos = extract32(insn, 5, 5);
3502 unsigned c = extract32(insn, 13, 3);
3503 unsigned r1 = extract32(insn, 16, 5);
3504 unsigned r2 = extract32(insn, 21, 5);
3505 unsigned sa = 31 - cpos;
3512 dest = dest_gpr(ctx, rt);
3513 t2 = load_gpr(ctx, r2);
3515 TCGv_i32 t32 = tcg_temp_new_i32();
3516 tcg_gen_trunc_reg_i32(t32, t2);
3517 tcg_gen_rotri_i32(t32, t32, sa);
3518 tcg_gen_extu_i32_reg(dest, t32);
3519 tcg_temp_free_i32(t32);
3520 } else if (r1 == 0) {
3521 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3523 TCGv_reg t0 = tcg_temp_new();
3524 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3525 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3528 save_gpr(ctx, rt, dest);
3530 /* Install the new nullification. */
3531 cond_free(&ctx->null_cond);
3533 ctx->null_cond = do_sed_cond(c, dest);
3535 return nullify_end(ctx);
3538 static bool trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3539 const DisasInsn *di)
3541 unsigned clen = extract32(insn, 0, 5);
3542 unsigned is_se = extract32(insn, 10, 1);
3543 unsigned c = extract32(insn, 13, 3);
3544 unsigned rt = extract32(insn, 16, 5);
3545 unsigned rr = extract32(insn, 21, 5);
3546 unsigned len = 32 - clen;
3547 TCGv_reg dest, src, tmp;
3553 dest = dest_gpr(ctx, rt);
3554 src = load_gpr(ctx, rr);
3555 tmp = tcg_temp_new();
3557 /* Recall that SAR is using big-endian bit numbering. */
3558 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3560 tcg_gen_sar_reg(dest, src, tmp);
3561 tcg_gen_sextract_reg(dest, dest, 0, len);
3563 tcg_gen_shr_reg(dest, src, tmp);
3564 tcg_gen_extract_reg(dest, dest, 0, len);
3567 save_gpr(ctx, rt, dest);
3569 /* Install the new nullification. */
3570 cond_free(&ctx->null_cond);
3572 ctx->null_cond = do_sed_cond(c, dest);
3574 return nullify_end(ctx);
3577 static bool trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3578 const DisasInsn *di)
3580 unsigned clen = extract32(insn, 0, 5);
3581 unsigned pos = extract32(insn, 5, 5);
3582 unsigned is_se = extract32(insn, 10, 1);
3583 unsigned c = extract32(insn, 13, 3);
3584 unsigned rt = extract32(insn, 16, 5);
3585 unsigned rr = extract32(insn, 21, 5);
3586 unsigned len = 32 - clen;
3587 unsigned cpos = 31 - pos;
3594 dest = dest_gpr(ctx, rt);
3595 src = load_gpr(ctx, rr);
3597 tcg_gen_sextract_reg(dest, src, cpos, len);
3599 tcg_gen_extract_reg(dest, src, cpos, len);
3601 save_gpr(ctx, rt, dest);
3603 /* Install the new nullification. */
3604 cond_free(&ctx->null_cond);
3606 ctx->null_cond = do_sed_cond(c, dest);
3608 return nullify_end(ctx);
3611 static const DisasInsn table_sh_ex[] = {
3612 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3613 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3614 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3615 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3618 static bool trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3619 const DisasInsn *di)
3621 unsigned clen = extract32(insn, 0, 5);
3622 unsigned cpos = extract32(insn, 5, 5);
3623 unsigned nz = extract32(insn, 10, 1);
3624 unsigned c = extract32(insn, 13, 3);
3625 target_sreg val = low_sextract(insn, 16, 5);
3626 unsigned rt = extract32(insn, 21, 5);
3627 unsigned len = 32 - clen;
3628 target_sreg mask0, mask1;
3634 if (cpos + len > 32) {
3638 dest = dest_gpr(ctx, rt);
3639 mask0 = deposit64(0, cpos, len, val);
3640 mask1 = deposit64(-1, cpos, len, val);
3643 TCGv_reg src = load_gpr(ctx, rt);
3645 tcg_gen_andi_reg(dest, src, mask1);
3648 tcg_gen_ori_reg(dest, src, mask0);
3650 tcg_gen_movi_reg(dest, mask0);
3652 save_gpr(ctx, rt, dest);
3654 /* Install the new nullification. */
3655 cond_free(&ctx->null_cond);
3657 ctx->null_cond = do_sed_cond(c, dest);
3659 return nullify_end(ctx);
3662 static bool trans_depw_imm(DisasContext *ctx, uint32_t insn,
3663 const DisasInsn *di)
3665 unsigned clen = extract32(insn, 0, 5);
3666 unsigned cpos = extract32(insn, 5, 5);
3667 unsigned nz = extract32(insn, 10, 1);
3668 unsigned c = extract32(insn, 13, 3);
3669 unsigned rr = extract32(insn, 16, 5);
3670 unsigned rt = extract32(insn, 21, 5);
3671 unsigned rs = nz ? rt : 0;
3672 unsigned len = 32 - clen;
3678 if (cpos + len > 32) {
3682 dest = dest_gpr(ctx, rt);
3683 val = load_gpr(ctx, rr);
3685 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3687 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3689 save_gpr(ctx, rt, dest);
3691 /* Install the new nullification. */
3692 cond_free(&ctx->null_cond);
3694 ctx->null_cond = do_sed_cond(c, dest);
3696 return nullify_end(ctx);
3699 static bool trans_depw_sar(DisasContext *ctx, uint32_t insn,
3700 const DisasInsn *di)
3702 unsigned clen = extract32(insn, 0, 5);
3703 unsigned nz = extract32(insn, 10, 1);
3704 unsigned i = extract32(insn, 12, 1);
3705 unsigned c = extract32(insn, 13, 3);
3706 unsigned rt = extract32(insn, 21, 5);
3707 unsigned rs = nz ? rt : 0;
3708 unsigned len = 32 - clen;
3709 TCGv_reg val, mask, tmp, shift, dest;
3710 unsigned msb = 1U << (len - 1);
3717 val = load_const(ctx, low_sextract(insn, 16, 5));
3719 val = load_gpr(ctx, extract32(insn, 16, 5));
3721 dest = dest_gpr(ctx, rt);
3722 shift = tcg_temp_new();
3723 tmp = tcg_temp_new();
3725 /* Convert big-endian bit numbering in SAR to left-shift. */
3726 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3728 mask = tcg_const_reg(msb + (msb - 1));
3729 tcg_gen_and_reg(tmp, val, mask);
3731 tcg_gen_shl_reg(mask, mask, shift);
3732 tcg_gen_shl_reg(tmp, tmp, shift);
3733 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3734 tcg_gen_or_reg(dest, dest, tmp);
3736 tcg_gen_shl_reg(dest, tmp, shift);
3738 tcg_temp_free(shift);
3739 tcg_temp_free(mask);
3741 save_gpr(ctx, rt, dest);
3743 /* Install the new nullification. */
3744 cond_free(&ctx->null_cond);
3746 ctx->null_cond = do_sed_cond(c, dest);
3748 return nullify_end(ctx);
3751 static const DisasInsn table_depw[] = {
3752 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3753 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3754 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3757 static bool trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3759 unsigned n = extract32(insn, 1, 1);
3760 unsigned b = extract32(insn, 21, 5);
3761 target_sreg disp = assemble_17(insn);
3764 #ifdef CONFIG_USER_ONLY
3765 /* ??? It seems like there should be a good way of using
3766 "be disp(sr2, r0)", the canonical gateway entry mechanism
3767 to our advantage. But that appears to be inconvenient to
3768 manage along side branch delay slots. Therefore we handle
3769 entry into the gateway page via absolute address. */
3770 /* Since we don't implement spaces, just branch. Do notice the special
3771 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3772 goto_tb to the TB containing the syscall. */
3774 do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3778 int sp = assemble_sr3(insn);
3782 tmp = get_temp(ctx);
3783 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3784 tmp = do_ibranch_priv(ctx, tmp);
3786 #ifdef CONFIG_USER_ONLY
3787 do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3789 TCGv_i64 new_spc = tcg_temp_new_i64();
3791 load_spr(ctx, new_spc, sp);
3793 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3794 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3796 if (n && use_nullify_skip(ctx)) {
3797 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3798 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3799 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3800 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3802 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3803 if (ctx->iaoq_b == -1) {
3804 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3806 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3807 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3808 nullify_set(ctx, n);
3810 tcg_temp_free_i64(new_spc);
3811 tcg_gen_lookup_and_goto_ptr();
3812 ctx->base.is_jmp = DISAS_NORETURN;
3813 return nullify_end(ctx);
3818 static bool trans_bl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3820 unsigned n = extract32(insn, 1, 1);
3821 unsigned link = extract32(insn, 21, 5);
3822 target_sreg disp = assemble_17(insn);
3824 do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3828 static bool trans_b_gate(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3830 unsigned n = extract32(insn, 1, 1);
3831 unsigned link = extract32(insn, 21, 5);
3832 target_sreg disp = assemble_17(insn);
3833 target_ureg dest = iaoq_dest(ctx, disp);
3835 /* Make sure the caller hasn't done something weird with the queue.
3836 * ??? This is not quite the same as the PSW[B] bit, which would be
3837 * expensive to track. Real hardware will trap for
3839 * b gateway+4 (in delay slot of first branch)
3840 * However, checking for a non-sequential instruction queue *will*
3841 * diagnose the security hole
3844 * in which instructions at evil would run with increased privs.
3846 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3847 return gen_illegal(ctx);
3850 #ifndef CONFIG_USER_ONLY
3851 if (ctx->tb_flags & PSW_C) {
3852 CPUHPPAState *env = ctx->cs->env_ptr;
3853 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3854 /* If we could not find a TLB entry, then we need to generate an
3855 ITLB miss exception so the kernel will provide it.
3856 The resulting TLB fill operation will invalidate this TB and
3857 we will re-translate, at which point we *will* be able to find
3858 the TLB entry and determine if this is in fact a gateway page. */
3860 gen_excp(ctx, EXCP_ITLB_MISS);
3863 /* No change for non-gateway pages or for priv decrease. */
3864 if (type >= 4 && type - 4 < ctx->privilege) {
3865 dest = deposit32(dest, 0, 2, type - 4);
3868 dest &= -4; /* priv = 0 */
3872 do_dbranch(ctx, dest, link, n);
3876 static bool trans_bl_long(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3878 unsigned n = extract32(insn, 1, 1);
3879 target_sreg disp = assemble_22(insn);
3881 do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3885 static bool trans_blr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3887 unsigned n = extract32(insn, 1, 1);
3888 unsigned rx = extract32(insn, 16, 5);
3889 unsigned link = extract32(insn, 21, 5);
3890 TCGv_reg tmp = get_temp(ctx);
3892 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3893 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3894 /* The computation here never changes privilege level. */
3895 do_ibranch(ctx, tmp, link, n);
3899 static bool trans_bv(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3901 unsigned n = extract32(insn, 1, 1);
3902 unsigned rx = extract32(insn, 16, 5);
3903 unsigned rb = extract32(insn, 21, 5);
3907 dest = load_gpr(ctx, rb);
3909 dest = get_temp(ctx);
3910 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3911 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3913 dest = do_ibranch_priv(ctx, dest);
3914 do_ibranch(ctx, dest, 0, n);
3918 static bool trans_bve(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3920 unsigned n = extract32(insn, 1, 1);
3921 unsigned rb = extract32(insn, 21, 5);
3922 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3925 #ifdef CONFIG_USER_ONLY
3926 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3927 do_ibranch(ctx, dest, link, n);
3930 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3932 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3933 if (ctx->iaoq_b == -1) {
3934 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3936 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3937 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3939 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3941 nullify_set(ctx, n);
3942 tcg_gen_lookup_and_goto_ptr();
3943 ctx->base.is_jmp = DISAS_NORETURN;
3944 return nullify_end(ctx);
3949 static const DisasInsn table_branch[] = {
3950 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3951 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3952 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3953 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3954 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3955 { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3958 static bool trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3959 const DisasInsn *di)
3961 unsigned rt = extract32(insn, 0, 5);
3962 unsigned ra = extract32(insn, 21, 5);
3963 do_fop_wew(ctx, rt, ra, di->f.wew);
3967 static bool trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3968 const DisasInsn *di)
3970 unsigned rt = assemble_rt64(insn);
3971 unsigned ra = assemble_ra64(insn);
3972 do_fop_wew(ctx, rt, ra, di->f.wew);
3976 static bool trans_fop_ded(DisasContext *ctx, uint32_t insn,
3977 const DisasInsn *di)
3979 unsigned rt = extract32(insn, 0, 5);
3980 unsigned ra = extract32(insn, 21, 5);
3981 do_fop_ded(ctx, rt, ra, di->f.ded);
3985 static bool trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3986 const DisasInsn *di)
3988 unsigned rt = extract32(insn, 0, 5);
3989 unsigned ra = extract32(insn, 21, 5);
3990 do_fop_wed(ctx, rt, ra, di->f.wed);
3994 static bool trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3995 const DisasInsn *di)
3997 unsigned rt = assemble_rt64(insn);
3998 unsigned ra = extract32(insn, 21, 5);
3999 do_fop_wed(ctx, rt, ra, di->f.wed);
4003 static bool trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4004 const DisasInsn *di)
4006 unsigned rt = extract32(insn, 0, 5);
4007 unsigned ra = extract32(insn, 21, 5);
4008 do_fop_dew(ctx, rt, ra, di->f.dew);
4012 static bool trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4013 const DisasInsn *di)
4015 unsigned rt = extract32(insn, 0, 5);
4016 unsigned ra = assemble_ra64(insn);
4017 do_fop_dew(ctx, rt, ra, di->f.dew);
4021 static bool trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4022 const DisasInsn *di)
4024 unsigned rt = extract32(insn, 0, 5);
4025 unsigned rb = extract32(insn, 16, 5);
4026 unsigned ra = extract32(insn, 21, 5);
4027 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4031 static bool trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4032 const DisasInsn *di)
4034 unsigned rt = assemble_rt64(insn);
4035 unsigned rb = assemble_rb64(insn);
4036 unsigned ra = assemble_ra64(insn);
4037 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4041 static bool trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4042 const DisasInsn *di)
4044 unsigned rt = extract32(insn, 0, 5);
4045 unsigned rb = extract32(insn, 16, 5);
4046 unsigned ra = extract32(insn, 21, 5);
4047 do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4051 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4053 tcg_gen_mov_i32(dst, src);
4056 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4058 tcg_gen_mov_i64(dst, src);
4061 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4063 tcg_gen_andi_i32(dst, src, INT32_MAX);
4066 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4068 tcg_gen_andi_i64(dst, src, INT64_MAX);
4071 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4073 tcg_gen_xori_i32(dst, src, INT32_MIN);
4076 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4078 tcg_gen_xori_i64(dst, src, INT64_MIN);
4081 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4083 tcg_gen_ori_i32(dst, src, INT32_MIN);
4086 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4088 tcg_gen_ori_i64(dst, src, INT64_MIN);
4091 static void do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4092 unsigned y, unsigned c)
4094 TCGv_i32 ta, tb, tc, ty;
4098 ta = load_frw0_i32(ra);
4099 tb = load_frw0_i32(rb);
4100 ty = tcg_const_i32(y);
4101 tc = tcg_const_i32(c);
4103 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4105 tcg_temp_free_i32(ta);
4106 tcg_temp_free_i32(tb);
4107 tcg_temp_free_i32(ty);
4108 tcg_temp_free_i32(tc);
4113 static bool trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4114 const DisasInsn *di)
4116 unsigned c = extract32(insn, 0, 5);
4117 unsigned y = extract32(insn, 13, 3);
4118 unsigned rb = extract32(insn, 16, 5);
4119 unsigned ra = extract32(insn, 21, 5);
4120 do_fcmp_s(ctx, ra, rb, y, c);
4124 static bool trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4125 const DisasInsn *di)
4127 unsigned c = extract32(insn, 0, 5);
4128 unsigned y = extract32(insn, 13, 3);
4129 unsigned rb = assemble_rb64(insn);
4130 unsigned ra = assemble_ra64(insn);
4131 do_fcmp_s(ctx, ra, rb, y, c);
4135 static bool trans_fcmp_d(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4137 unsigned c = extract32(insn, 0, 5);
4138 unsigned y = extract32(insn, 13, 3);
4139 unsigned rb = extract32(insn, 16, 5);
4140 unsigned ra = extract32(insn, 21, 5);
4148 ty = tcg_const_i32(y);
4149 tc = tcg_const_i32(c);
4151 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4153 tcg_temp_free_i64(ta);
4154 tcg_temp_free_i64(tb);
4155 tcg_temp_free_i32(ty);
4156 tcg_temp_free_i32(tc);
4158 return nullify_end(ctx);
4161 static bool trans_ftest_t(DisasContext *ctx, uint32_t insn,
4162 const DisasInsn *di)
4164 unsigned y = extract32(insn, 13, 3);
4165 unsigned cbit = (y ^ 1) - 1;
4171 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4172 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4173 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4176 return nullify_end(ctx);
4179 static bool trans_ftest_q(DisasContext *ctx, uint32_t insn,
4180 const DisasInsn *di)
4182 unsigned c = extract32(insn, 0, 5);
4190 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4193 case 0: /* simple */
4194 tcg_gen_andi_reg(t, t, 0x4000000);
4195 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4219 return gen_illegal(ctx);
4222 TCGv_reg c = load_const(ctx, mask);
4223 tcg_gen_or_reg(t, t, c);
4224 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4226 tcg_gen_andi_reg(t, t, mask);
4227 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4230 return nullify_end(ctx);
4233 static bool trans_xmpyu(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4235 unsigned rt = extract32(insn, 0, 5);
4236 unsigned rb = assemble_rb64(insn);
4237 unsigned ra = assemble_ra64(insn);
4242 a = load_frw0_i64(ra);
4243 b = load_frw0_i64(rb);
4244 tcg_gen_mul_i64(a, a, b);
4246 tcg_temp_free_i64(a);
4247 tcg_temp_free_i64(b);
4249 return nullify_end(ctx);
4252 #define FOP_DED trans_fop_ded, .f.ded
4253 #define FOP_DEDD trans_fop_dedd, .f.dedd
4255 #define FOP_WEW trans_fop_wew_0c, .f.wew
4256 #define FOP_DEW trans_fop_dew_0c, .f.dew
4257 #define FOP_WED trans_fop_wed_0c, .f.wed
4258 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4260 static const DisasInsn table_float_0c[] = {
4261 /* floating point class zero */
4262 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4263 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4264 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4265 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4266 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4267 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4269 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4270 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4271 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4272 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4273 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4274 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4276 /* floating point class three */
4277 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4278 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4279 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4280 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4282 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4283 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4284 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4285 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4287 /* floating point class one */
4289 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4290 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4292 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4293 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4294 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4295 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4297 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4298 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4299 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4300 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4301 /* float/int truncate */
4302 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4303 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4304 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4305 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4307 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4308 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4309 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4310 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4312 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4313 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4314 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4315 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4316 /* float/uint truncate */
4317 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4318 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4319 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4320 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4322 /* floating point class two */
4323 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4324 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4325 { 0x30002420, 0xffffffe0, trans_ftest_q },
4326 { 0x30000420, 0xffff1fff, trans_ftest_t },
4328 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4329 This is machine/revision == 0, which is reserved for simulator. */
4330 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4337 #define FOP_WEW trans_fop_wew_0e, .f.wew
4338 #define FOP_DEW trans_fop_dew_0e, .f.dew
4339 #define FOP_WED trans_fop_wed_0e, .f.wed
4340 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4342 static const DisasInsn table_float_0e[] = {
4343 /* floating point class zero */
4344 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4345 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4346 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4347 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4348 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4349 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4351 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4352 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4353 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4354 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4355 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4356 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4358 /* floating point class three */
4359 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4360 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4361 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4362 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4364 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4365 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4366 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4367 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4369 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4371 /* floating point class one */
4373 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4374 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4376 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4377 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4378 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4379 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4381 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4382 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4383 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4384 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4385 /* float/int truncate */
4386 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4387 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4388 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4389 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4391 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4392 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4393 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4394 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4396 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4397 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4398 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4399 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4400 /* float/uint truncate */
4401 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4402 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4403 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4404 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4406 /* floating point class two */
4407 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4408 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4418 /* Convert the fmpyadd single-precision register encodings to standard. */
4419 static inline int fmpyadd_s_reg(unsigned r)
4421 return (r & 16) * 2 + 16 + (r & 15);
4424 static bool trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
4426 unsigned tm = extract32(insn, 0, 5);
4427 unsigned f = extract32(insn, 5, 1);
4428 unsigned ra = extract32(insn, 6, 5);
4429 unsigned ta = extract32(insn, 11, 5);
4430 unsigned rm2 = extract32(insn, 16, 5);
4431 unsigned rm1 = extract32(insn, 21, 5);
4435 /* Independent multiply & add/sub, with undefined behaviour
4436 if outputs overlap inputs. */
4438 tm = fmpyadd_s_reg(tm);
4439 ra = fmpyadd_s_reg(ra);
4440 ta = fmpyadd_s_reg(ta);
4441 rm2 = fmpyadd_s_reg(rm2);
4442 rm1 = fmpyadd_s_reg(rm1);
4443 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4444 do_fop_weww(ctx, ta, ta, ra,
4445 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4447 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4448 do_fop_dedd(ctx, ta, ta, ra,
4449 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4452 return nullify_end(ctx);
4455 static bool trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4456 const DisasInsn *di)
4458 unsigned rt = assemble_rt64(insn);
4459 unsigned neg = extract32(insn, 5, 1);
4460 unsigned rm1 = assemble_ra64(insn);
4461 unsigned rm2 = assemble_rb64(insn);
4462 unsigned ra3 = assemble_rc64(insn);
4466 a = load_frw0_i32(rm1);
4467 b = load_frw0_i32(rm2);
4468 c = load_frw0_i32(ra3);
4471 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4473 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4476 tcg_temp_free_i32(b);
4477 tcg_temp_free_i32(c);
4478 save_frw_i32(rt, a);
4479 tcg_temp_free_i32(a);
4480 return nullify_end(ctx);
4483 static bool trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4484 const DisasInsn *di)
4486 unsigned rt = extract32(insn, 0, 5);
4487 unsigned neg = extract32(insn, 5, 1);
4488 unsigned rm1 = extract32(insn, 21, 5);
4489 unsigned rm2 = extract32(insn, 16, 5);
4490 unsigned ra3 = assemble_rc64(insn);
4499 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4501 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4504 tcg_temp_free_i64(b);
4505 tcg_temp_free_i64(c);
4507 tcg_temp_free_i64(a);
4508 return nullify_end(ctx);
4511 static const DisasInsn table_fp_fused[] = {
4512 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4513 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4516 static void translate_table_int(DisasContext *ctx, uint32_t insn,
4517 const DisasInsn table[], size_t n)
4520 for (i = 0; i < n; ++i) {
4521 if ((insn & table[i].mask) == table[i].insn) {
4522 table[i].trans(ctx, insn, &table[i]);
4526 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4527 insn, ctx->base.pc_next);
4531 #define translate_table(ctx, insn, table) \
4532 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4534 static void translate_one(DisasContext *ctx, uint32_t insn)
4538 /* Transition to the auto-generated decoder. */
4539 if (decode(ctx, insn)) {
4543 opc = extract32(insn, 26, 6);
4545 case 0x00: /* system op */
4546 translate_table(ctx, insn, table_system);
4549 translate_table(ctx, insn, table_mem_mgmt);
4552 translate_table(ctx, insn, table_arith_log);
4555 translate_table(ctx, insn, table_index_mem);
4558 trans_fmpyadd(ctx, insn, false);
4561 trans_ldil(ctx, insn);
4564 trans_copr_w(ctx, insn);
4567 trans_addil(ctx, insn);
4570 trans_copr_dw(ctx, insn);
4573 translate_table(ctx, insn, table_float_0c);
4576 trans_ldo(ctx, insn);
4579 translate_table(ctx, insn, table_float_0e);
4583 trans_load(ctx, insn, false, MO_UB);
4586 trans_load(ctx, insn, false, MO_TEUW);
4589 trans_load(ctx, insn, false, MO_TEUL);
4592 trans_load(ctx, insn, true, MO_TEUL);
4595 trans_fload_mod(ctx, insn);
4598 trans_load_w(ctx, insn);
4601 trans_store(ctx, insn, false, MO_UB);
4604 trans_store(ctx, insn, false, MO_TEUW);
4607 trans_store(ctx, insn, false, MO_TEUL);
4610 trans_store(ctx, insn, true, MO_TEUL);
4613 trans_fstore_mod(ctx, insn);
4616 trans_store_w(ctx, insn);
4620 trans_cmpb(ctx, insn, true, false, false);
4623 trans_cmpb(ctx, insn, true, true, false);
4626 trans_cmpb(ctx, insn, false, false, false);
4629 trans_cmpb(ctx, insn, false, true, false);
4632 trans_cmpiclr(ctx, insn);
4635 trans_subi(ctx, insn);
4638 trans_fmpyadd(ctx, insn, true);
4641 trans_cmpb(ctx, insn, true, false, true);
4644 trans_addb(ctx, insn, true, false);
4647 trans_addb(ctx, insn, true, true);
4650 trans_addb(ctx, insn, false, false);
4653 trans_addb(ctx, insn, false, true);
4657 trans_addi(ctx, insn);
4660 translate_table(ctx, insn, table_fp_fused);
4663 trans_cmpb(ctx, insn, false, false, true);
4668 trans_bb(ctx, insn);
4671 trans_movb(ctx, insn, false);
4674 trans_movb(ctx, insn, true);
4677 translate_table(ctx, insn, table_sh_ex);
4680 translate_table(ctx, insn, table_depw);
4683 trans_be(ctx, insn, false);
4686 trans_be(ctx, insn, true);
4689 translate_table(ctx, insn, table_branch);
4692 case 0x04: /* spopn */
4693 case 0x05: /* diag */
4694 case 0x0F: /* product specific */
4697 case 0x07: /* unassigned */
4698 case 0x15: /* unassigned */
4699 case 0x1D: /* unassigned */
4700 case 0x37: /* unassigned */
4703 #ifndef CONFIG_USER_ONLY
4704 /* Unassigned, but use as system-halt. */
4705 if (insn == 0xfffdead0) {
4706 gen_hlt(ctx, 0); /* halt system */
4709 if (insn == 0xfffdead1) {
4710 gen_hlt(ctx, 1); /* reset system */
4721 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4723 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4727 ctx->tb_flags = ctx->base.tb->flags;
4729 #ifdef CONFIG_USER_ONLY
4730 ctx->privilege = MMU_USER_IDX;
4731 ctx->mmu_idx = MMU_USER_IDX;
4732 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4733 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4735 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4736 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4738 /* Recover the IAOQ values from the GVA + PRIV. */
4739 uint64_t cs_base = ctx->base.tb->cs_base;
4740 uint64_t iasq_f = cs_base & ~0xffffffffull;
4741 int32_t diff = cs_base;
4743 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4744 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4747 ctx->iaoq_n_var = NULL;
4749 /* Bound the number of instructions by those left on the page. */
4750 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4751 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4755 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4756 memset(ctx->templ, 0, sizeof(ctx->templ));
4759 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4761 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4763 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4764 ctx->null_cond = cond_make_f();
4765 ctx->psw_n_nonzero = false;
4766 if (ctx->tb_flags & PSW_N) {
4767 ctx->null_cond.c = TCG_COND_ALWAYS;
4768 ctx->psw_n_nonzero = true;
4770 ctx->null_lab = NULL;
4773 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4775 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4777 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4780 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4781 const CPUBreakpoint *bp)
4783 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4785 gen_excp(ctx, EXCP_DEBUG);
4786 ctx->base.pc_next += 4;
4790 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4792 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4793 CPUHPPAState *env = cs->env_ptr;
4797 /* Execute one insn. */
4798 #ifdef CONFIG_USER_ONLY
4799 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4801 ret = ctx->base.is_jmp;
4802 assert(ret != DISAS_NEXT);
4806 /* Always fetch the insn, even if nullified, so that we check
4807 the page permissions for execute. */
4808 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4810 /* Set up the IA queue for the next insn.
4811 This will be overwritten by a branch. */
4812 if (ctx->iaoq_b == -1) {
4814 ctx->iaoq_n_var = get_temp(ctx);
4815 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4817 ctx->iaoq_n = ctx->iaoq_b + 4;
4818 ctx->iaoq_n_var = NULL;
4821 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4822 ctx->null_cond.c = TCG_COND_NEVER;
4826 translate_one(ctx, insn);
4827 ret = ctx->base.is_jmp;
4828 assert(ctx->null_lab == NULL);
4832 /* Free any temporaries allocated. */
4833 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4834 tcg_temp_free(ctx->tempr[i]);
4835 ctx->tempr[i] = NULL;
4837 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4838 tcg_temp_free_tl(ctx->templ[i]);
4839 ctx->templ[i] = NULL;
4844 /* Advance the insn queue. Note that this check also detects
4845 a priority change within the instruction queue. */
4846 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4847 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4848 && use_goto_tb(ctx, ctx->iaoq_b)
4849 && (ctx->null_cond.c == TCG_COND_NEVER
4850 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4851 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4852 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4853 ctx->base.is_jmp = ret = DISAS_NORETURN;
4855 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4858 ctx->iaoq_f = ctx->iaoq_b;
4859 ctx->iaoq_b = ctx->iaoq_n;
4860 ctx->base.pc_next += 4;
4862 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4865 if (ctx->iaoq_f == -1) {
4866 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4867 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4868 #ifndef CONFIG_USER_ONLY
4869 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4872 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4873 } else if (ctx->iaoq_b == -1) {
4874 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4878 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4880 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4881 DisasJumpType is_jmp = ctx->base.is_jmp;
4884 case DISAS_NORETURN:
4886 case DISAS_TOO_MANY:
4887 case DISAS_IAQ_N_STALE:
4888 case DISAS_IAQ_N_STALE_EXIT:
4889 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4890 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4893 case DISAS_IAQ_N_UPDATED:
4894 if (ctx->base.singlestep_enabled) {
4895 gen_excp_1(EXCP_DEBUG);
4896 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4897 tcg_gen_exit_tb(NULL, 0);
4899 tcg_gen_lookup_and_goto_ptr();
4903 g_assert_not_reached();
4907 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4909 target_ulong pc = dcbase->pc_first;
4911 #ifdef CONFIG_USER_ONLY
4914 qemu_log("IN:\n0x00000000: (null)\n");
4917 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4920 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4923 qemu_log("IN:\n0x00000100: syscall\n");
4928 qemu_log("IN: %s\n", lookup_symbol(pc));
4929 log_target_disas(cs, pc, dcbase->tb->size);
4932 static const TranslatorOps hppa_tr_ops = {
4933 .init_disas_context = hppa_tr_init_disas_context,
4934 .tb_start = hppa_tr_tb_start,
4935 .insn_start = hppa_tr_insn_start,
4936 .breakpoint_check = hppa_tr_breakpoint_check,
4937 .translate_insn = hppa_tr_translate_insn,
4938 .tb_stop = hppa_tr_tb_stop,
4939 .disas_log = hppa_tr_disas_log,
4942 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4946 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4949 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4952 env->iaoq_f = data[0];
4953 if (data[1] != (target_ureg)-1) {
4954 env->iaoq_b = data[1];
4956 /* Since we were executing the instruction at IAOQ_F, and took some
4957 sort of action that provoked the cpu_restore_state, we can infer
4958 that the instruction was not nullified. */