2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond {
258 typedef struct DisasContext {
259 DisasContextBase base;
281 /* Include the auto-generated decoder. */
282 #include "decode.inc.c"
284 /* We are not using a goto_tb (for whatever reason), but have updated
285 the iaq (for whatever reason), so don't do it again on exit. */
286 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
288 /* We are exiting the TB, but have neither emitted a goto_tb, nor
289 updated the iaq for the next instruction to be executed. */
290 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
292 /* Similarly, but we want to return to the main loop immediately
293 to recognize unmasked interrupts. */
294 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
296 typedef struct DisasInsn {
298 bool (*trans)(DisasContext *ctx, uint32_t insn,
299 const struct DisasInsn *f);
301 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
302 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
303 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
304 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
305 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
306 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
307 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
311 /* global register indexes */
312 static TCGv_reg cpu_gr[32];
313 static TCGv_i64 cpu_sr[4];
314 static TCGv_i64 cpu_srH;
315 static TCGv_reg cpu_iaoq_f;
316 static TCGv_reg cpu_iaoq_b;
317 static TCGv_i64 cpu_iasq_f;
318 static TCGv_i64 cpu_iasq_b;
319 static TCGv_reg cpu_sar;
320 static TCGv_reg cpu_psw_n;
321 static TCGv_reg cpu_psw_v;
322 static TCGv_reg cpu_psw_cb;
323 static TCGv_reg cpu_psw_cb_msb;
325 #include "exec/gen-icount.h"
327 void hppa_translate_init(void)
329 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
331 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
332 static const GlobalVar vars[] = {
333 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
344 /* Use the symbolic register names that match the disassembler. */
345 static const char gr_names[32][4] = {
346 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
347 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
348 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
349 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
351 /* SR[4-7] are not global registers so that we can index them. */
352 static const char sr_names[5][4] = {
353 "sr0", "sr1", "sr2", "sr3", "srH"
359 for (i = 1; i < 32; i++) {
360 cpu_gr[i] = tcg_global_mem_new(cpu_env,
361 offsetof(CPUHPPAState, gr[i]),
364 for (i = 0; i < 4; i++) {
365 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
366 offsetof(CPUHPPAState, sr[i]),
369 cpu_srH = tcg_global_mem_new_i64(cpu_env,
370 offsetof(CPUHPPAState, sr[4]),
373 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
374 const GlobalVar *v = &vars[i];
375 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
378 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
379 offsetof(CPUHPPAState, iasq_f),
381 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
382 offsetof(CPUHPPAState, iasq_b),
386 static DisasCond cond_make_f(void)
395 static DisasCond cond_make_n(void)
406 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
408 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
410 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
411 r.a0 = tcg_temp_new();
412 tcg_gen_mov_reg(r.a0, a0);
417 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
419 DisasCond r = { .c = c };
421 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
422 r.a0 = tcg_temp_new();
423 tcg_gen_mov_reg(r.a0, a0);
424 r.a1 = tcg_temp_new();
425 tcg_gen_mov_reg(r.a1, a1);
430 static void cond_prep(DisasCond *cond)
433 cond->a1_is_0 = false;
434 cond->a1 = tcg_const_reg(0);
438 static void cond_free(DisasCond *cond)
442 if (!cond->a0_is_n) {
443 tcg_temp_free(cond->a0);
445 if (!cond->a1_is_0) {
446 tcg_temp_free(cond->a1);
448 cond->a0_is_n = false;
449 cond->a1_is_0 = false;
453 case TCG_COND_ALWAYS:
454 cond->c = TCG_COND_NEVER;
461 static TCGv_reg get_temp(DisasContext *ctx)
463 unsigned i = ctx->ntempr++;
464 g_assert(i < ARRAY_SIZE(ctx->tempr));
465 return ctx->tempr[i] = tcg_temp_new();
468 #ifndef CONFIG_USER_ONLY
469 static TCGv_tl get_temp_tl(DisasContext *ctx)
471 unsigned i = ctx->ntempl++;
472 g_assert(i < ARRAY_SIZE(ctx->templ));
473 return ctx->templ[i] = tcg_temp_new_tl();
477 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
479 TCGv_reg t = get_temp(ctx);
480 tcg_gen_movi_reg(t, v);
484 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
487 TCGv_reg t = get_temp(ctx);
488 tcg_gen_movi_reg(t, 0);
495 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
497 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
498 return get_temp(ctx);
504 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
506 if (ctx->null_cond.c != TCG_COND_NEVER) {
507 cond_prep(&ctx->null_cond);
508 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
509 ctx->null_cond.a1, dest, t);
511 tcg_gen_mov_reg(dest, t);
515 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
518 save_or_nullify(ctx, cpu_gr[reg], t);
522 #ifdef HOST_WORDS_BIGENDIAN
530 static TCGv_i32 load_frw_i32(unsigned rt)
532 TCGv_i32 ret = tcg_temp_new_i32();
533 tcg_gen_ld_i32(ret, cpu_env,
534 offsetof(CPUHPPAState, fr[rt & 31])
535 + (rt & 32 ? LO_OFS : HI_OFS));
539 static TCGv_i32 load_frw0_i32(unsigned rt)
542 return tcg_const_i32(0);
544 return load_frw_i32(rt);
548 static TCGv_i64 load_frw0_i64(unsigned rt)
551 return tcg_const_i64(0);
553 TCGv_i64 ret = tcg_temp_new_i64();
554 tcg_gen_ld32u_i64(ret, cpu_env,
555 offsetof(CPUHPPAState, fr[rt & 31])
556 + (rt & 32 ? LO_OFS : HI_OFS));
561 static void save_frw_i32(unsigned rt, TCGv_i32 val)
563 tcg_gen_st_i32(val, cpu_env,
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
571 static TCGv_i64 load_frd(unsigned rt)
573 TCGv_i64 ret = tcg_temp_new_i64();
574 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
578 static TCGv_i64 load_frd0(unsigned rt)
581 return tcg_const_i64(0);
587 static void save_frd(unsigned rt, TCGv_i64 val)
589 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
592 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
594 #ifdef CONFIG_USER_ONLY
595 tcg_gen_movi_i64(dest, 0);
598 tcg_gen_mov_i64(dest, cpu_sr[reg]);
599 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
600 tcg_gen_mov_i64(dest, cpu_srH);
602 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
607 /* Skip over the implementation of an insn that has been nullified.
608 Use this when the insn is too complex for a conditional move. */
609 static void nullify_over(DisasContext *ctx)
611 if (ctx->null_cond.c != TCG_COND_NEVER) {
612 /* The always condition should have been handled in the main loop. */
613 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
615 ctx->null_lab = gen_new_label();
616 cond_prep(&ctx->null_cond);
618 /* If we're using PSW[N], copy it to a temp because... */
619 if (ctx->null_cond.a0_is_n) {
620 ctx->null_cond.a0_is_n = false;
621 ctx->null_cond.a0 = tcg_temp_new();
622 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
624 /* ... we clear it before branching over the implementation,
625 so that (1) it's clear after nullifying this insn and
626 (2) if this insn nullifies the next, PSW[N] is valid. */
627 if (ctx->psw_n_nonzero) {
628 ctx->psw_n_nonzero = false;
629 tcg_gen_movi_reg(cpu_psw_n, 0);
632 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
633 ctx->null_cond.a1, ctx->null_lab);
634 cond_free(&ctx->null_cond);
638 /* Save the current nullification state to PSW[N]. */
639 static void nullify_save(DisasContext *ctx)
641 if (ctx->null_cond.c == TCG_COND_NEVER) {
642 if (ctx->psw_n_nonzero) {
643 tcg_gen_movi_reg(cpu_psw_n, 0);
647 if (!ctx->null_cond.a0_is_n) {
648 cond_prep(&ctx->null_cond);
649 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
650 ctx->null_cond.a0, ctx->null_cond.a1);
651 ctx->psw_n_nonzero = true;
653 cond_free(&ctx->null_cond);
656 /* Set a PSW[N] to X. The intention is that this is used immediately
657 before a goto_tb/exit_tb, so that there is no fallthru path to other
658 code within the TB. Therefore we do not update psw_n_nonzero. */
659 static void nullify_set(DisasContext *ctx, bool x)
661 if (ctx->psw_n_nonzero || x) {
662 tcg_gen_movi_reg(cpu_psw_n, x);
666 /* Mark the end of an instruction that may have been nullified.
667 This is the pair to nullify_over. Always returns true so that
668 it may be tail-called from a translate function. */
669 static bool nullify_end(DisasContext *ctx)
671 TCGLabel *null_lab = ctx->null_lab;
672 DisasJumpType status = ctx->base.is_jmp;
674 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
675 For UPDATED, we cannot update on the nullified path. */
676 assert(status != DISAS_IAQ_N_UPDATED);
678 if (likely(null_lab == NULL)) {
679 /* The current insn wasn't conditional or handled the condition
680 applied to it without a branch, so the (new) setting of
681 NULL_COND can be applied directly to the next insn. */
684 ctx->null_lab = NULL;
686 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
687 /* The next instruction will be unconditional,
688 and NULL_COND already reflects that. */
689 gen_set_label(null_lab);
691 /* The insn that we just executed is itself nullifying the next
692 instruction. Store the condition in the PSW[N] global.
693 We asserted PSW[N] = 0 in nullify_over, so that after the
694 label we have the proper value in place. */
696 gen_set_label(null_lab);
697 ctx->null_cond = cond_make_n();
699 if (status == DISAS_NORETURN) {
700 ctx->base.is_jmp = DISAS_NEXT;
705 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
707 if (unlikely(ival == -1)) {
708 tcg_gen_mov_reg(dest, vval);
710 tcg_gen_movi_reg(dest, ival);
714 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
716 return ctx->iaoq_f + disp + 8;
719 static void gen_excp_1(int exception)
721 TCGv_i32 t = tcg_const_i32(exception);
722 gen_helper_excp(cpu_env, t);
723 tcg_temp_free_i32(t);
726 static void gen_excp(DisasContext *ctx, int exception)
728 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
729 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
731 gen_excp_1(exception);
732 ctx->base.is_jmp = DISAS_NORETURN;
735 static bool gen_excp_iir(DisasContext *ctx, int exc)
740 tmp = tcg_const_reg(ctx->insn);
741 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
744 return nullify_end(ctx);
747 static bool gen_illegal(DisasContext *ctx)
749 return gen_excp_iir(ctx, EXCP_ILL);
752 #ifdef CONFIG_USER_ONLY
753 #define CHECK_MOST_PRIVILEGED(EXCP) \
754 return gen_excp_iir(ctx, EXCP)
756 #define CHECK_MOST_PRIVILEGED(EXCP) \
758 if (ctx->privilege != 0) { \
759 return gen_excp_iir(ctx, EXCP); \
764 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
766 /* Suppress goto_tb in the case of single-steping and IO. */
767 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
768 || ctx->base.singlestep_enabled) {
774 /* If the next insn is to be nullified, and it's on the same page,
775 and we're not attempting to set a breakpoint on it, then we can
776 totally skip the nullified insn. This avoids creating and
777 executing a TB that merely branches to the next TB. */
778 static bool use_nullify_skip(DisasContext *ctx)
780 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
781 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
784 static void gen_goto_tb(DisasContext *ctx, int which,
785 target_ureg f, target_ureg b)
787 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
788 tcg_gen_goto_tb(which);
789 tcg_gen_movi_reg(cpu_iaoq_f, f);
790 tcg_gen_movi_reg(cpu_iaoq_b, b);
791 tcg_gen_exit_tb(ctx->base.tb, which);
793 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
794 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
795 if (ctx->base.singlestep_enabled) {
796 gen_excp_1(EXCP_DEBUG);
798 tcg_gen_lookup_and_goto_ptr();
803 /* PA has a habit of taking the LSB of a field and using that as the sign,
804 with the rest of the field becoming the least significant bits. */
805 static target_sreg low_sextract(uint32_t val, int pos, int len)
807 target_ureg x = -(target_ureg)extract32(val, pos, 1);
808 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
812 static unsigned assemble_rt64(uint32_t insn)
814 unsigned r1 = extract32(insn, 6, 1);
815 unsigned r0 = extract32(insn, 0, 5);
819 static unsigned assemble_ra64(uint32_t insn)
821 unsigned r1 = extract32(insn, 7, 1);
822 unsigned r0 = extract32(insn, 21, 5);
826 static unsigned assemble_rb64(uint32_t insn)
828 unsigned r1 = extract32(insn, 12, 1);
829 unsigned r0 = extract32(insn, 16, 5);
833 static unsigned assemble_rc64(uint32_t insn)
835 unsigned r2 = extract32(insn, 8, 1);
836 unsigned r1 = extract32(insn, 13, 3);
837 unsigned r0 = extract32(insn, 9, 2);
838 return r2 * 32 + r1 * 4 + r0;
841 static unsigned assemble_sr3(uint32_t insn)
843 unsigned s2 = extract32(insn, 13, 1);
844 unsigned s0 = extract32(insn, 14, 2);
848 static target_sreg assemble_12(uint32_t insn)
850 target_ureg x = -(target_ureg)(insn & 1);
851 x = (x << 1) | extract32(insn, 2, 1);
852 x = (x << 10) | extract32(insn, 3, 10);
856 static target_sreg assemble_16(uint32_t insn)
858 /* Take the name from PA2.0, which produces a 16-bit number
859 only with wide mode; otherwise a 14-bit number. Since we don't
860 implement wide mode, this is always the 14-bit number. */
861 return low_sextract(insn, 0, 14);
864 static target_sreg assemble_16a(uint32_t insn)
866 /* Take the name from PA2.0, which produces a 14-bit shifted number
867 only with wide mode; otherwise a 12-bit shifted number. Since we
868 don't implement wide mode, this is always the 12-bit number. */
869 target_ureg x = -(target_ureg)(insn & 1);
870 x = (x << 11) | extract32(insn, 2, 11);
874 static target_sreg assemble_17(uint32_t insn)
876 target_ureg x = -(target_ureg)(insn & 1);
877 x = (x << 5) | extract32(insn, 16, 5);
878 x = (x << 1) | extract32(insn, 2, 1);
879 x = (x << 10) | extract32(insn, 3, 10);
883 static target_sreg assemble_21(uint32_t insn)
885 target_ureg x = -(target_ureg)(insn & 1);
886 x = (x << 11) | extract32(insn, 1, 11);
887 x = (x << 2) | extract32(insn, 14, 2);
888 x = (x << 5) | extract32(insn, 16, 5);
889 x = (x << 2) | extract32(insn, 12, 2);
893 static target_sreg assemble_22(uint32_t insn)
895 target_ureg x = -(target_ureg)(insn & 1);
896 x = (x << 10) | extract32(insn, 16, 10);
897 x = (x << 1) | extract32(insn, 2, 1);
898 x = (x << 10) | extract32(insn, 3, 10);
902 /* The parisc documentation describes only the general interpretation of
903 the conditions, without describing their exact implementation. The
904 interpretations do not stand up well when considering ADD,C and SUB,B.
905 However, considering the Addition, Subtraction and Logical conditions
906 as a whole it would appear that these relations are similar to what
907 a traditional NZCV set of flags would produce. */
909 static DisasCond do_cond(unsigned cf, TCGv_reg res,
910 TCGv_reg cb_msb, TCGv_reg sv)
916 case 0: /* Never / TR */
917 cond = cond_make_f();
919 case 1: /* = / <> (Z / !Z) */
920 cond = cond_make_0(TCG_COND_EQ, res);
922 case 2: /* < / >= (N / !N) */
923 cond = cond_make_0(TCG_COND_LT, res);
925 case 3: /* <= / > (N | Z / !N & !Z) */
926 cond = cond_make_0(TCG_COND_LE, res);
928 case 4: /* NUV / UV (!C / C) */
929 cond = cond_make_0(TCG_COND_EQ, cb_msb);
931 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
932 tmp = tcg_temp_new();
933 tcg_gen_neg_reg(tmp, cb_msb);
934 tcg_gen_and_reg(tmp, tmp, res);
935 cond = cond_make_0(TCG_COND_EQ, tmp);
938 case 6: /* SV / NSV (V / !V) */
939 cond = cond_make_0(TCG_COND_LT, sv);
941 case 7: /* OD / EV */
942 tmp = tcg_temp_new();
943 tcg_gen_andi_reg(tmp, res, 1);
944 cond = cond_make_0(TCG_COND_NE, tmp);
948 g_assert_not_reached();
951 cond.c = tcg_invert_cond(cond.c);
957 /* Similar, but for the special case of subtraction without borrow, we
958 can use the inputs directly. This can allow other computation to be
959 deleted as unused. */
961 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
962 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
968 cond = cond_make(TCG_COND_EQ, in1, in2);
971 cond = cond_make(TCG_COND_LT, in1, in2);
974 cond = cond_make(TCG_COND_LE, in1, in2);
976 case 4: /* << / >>= */
977 cond = cond_make(TCG_COND_LTU, in1, in2);
979 case 5: /* <<= / >> */
980 cond = cond_make(TCG_COND_LEU, in1, in2);
983 return do_cond(cf, res, sv, sv);
986 cond.c = tcg_invert_cond(cond.c);
992 /* Similar, but for logicals, where the carry and overflow bits are not
993 computed, and use of them is undefined. */
995 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
998 case 4: case 5: case 6:
1002 return do_cond(cf, res, res, res);
1005 /* Similar, but for shift/extract/deposit conditions. */
1007 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1011 /* Convert the compressed condition codes to standard.
1012 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1013 4-7 are the reverse of 0-3. */
1020 return do_log_cond(c * 2 + f, res);
1023 /* Similar, but for unit conditions. */
1025 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1026 TCGv_reg in1, TCGv_reg in2)
1029 TCGv_reg tmp, cb = NULL;
1032 /* Since we want to test lots of carry-out bits all at once, do not
1033 * do our normal thing and compute carry-in of bit B+1 since that
1034 * leaves us with carry bits spread across two words.
1036 cb = tcg_temp_new();
1037 tmp = tcg_temp_new();
1038 tcg_gen_or_reg(cb, in1, in2);
1039 tcg_gen_and_reg(tmp, in1, in2);
1040 tcg_gen_andc_reg(cb, cb, res);
1041 tcg_gen_or_reg(cb, cb, tmp);
1046 case 0: /* never / TR */
1047 case 1: /* undefined */
1048 case 5: /* undefined */
1049 cond = cond_make_f();
1052 case 2: /* SBZ / NBZ */
1053 /* See hasless(v,1) from
1054 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1056 tmp = tcg_temp_new();
1057 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1058 tcg_gen_andc_reg(tmp, tmp, res);
1059 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1060 cond = cond_make_0(TCG_COND_NE, tmp);
1064 case 3: /* SHZ / NHZ */
1065 tmp = tcg_temp_new();
1066 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1067 tcg_gen_andc_reg(tmp, tmp, res);
1068 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1069 cond = cond_make_0(TCG_COND_NE, tmp);
1073 case 4: /* SDC / NDC */
1074 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1075 cond = cond_make_0(TCG_COND_NE, cb);
1078 case 6: /* SBC / NBC */
1079 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1080 cond = cond_make_0(TCG_COND_NE, cb);
1083 case 7: /* SHC / NHC */
1084 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1085 cond = cond_make_0(TCG_COND_NE, cb);
1089 g_assert_not_reached();
1095 cond.c = tcg_invert_cond(cond.c);
1101 /* Compute signed overflow for addition. */
1102 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1103 TCGv_reg in1, TCGv_reg in2)
1105 TCGv_reg sv = get_temp(ctx);
1106 TCGv_reg tmp = tcg_temp_new();
1108 tcg_gen_xor_reg(sv, res, in1);
1109 tcg_gen_xor_reg(tmp, in1, in2);
1110 tcg_gen_andc_reg(sv, sv, tmp);
1116 /* Compute signed overflow for subtraction. */
1117 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1118 TCGv_reg in1, TCGv_reg in2)
1120 TCGv_reg sv = get_temp(ctx);
1121 TCGv_reg tmp = tcg_temp_new();
1123 tcg_gen_xor_reg(sv, res, in1);
1124 tcg_gen_xor_reg(tmp, in1, in2);
1125 tcg_gen_and_reg(sv, sv, tmp);
1131 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1132 TCGv_reg in2, unsigned shift, bool is_l,
1133 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1135 TCGv_reg dest, cb, cb_msb, sv, tmp;
1136 unsigned c = cf >> 1;
1139 dest = tcg_temp_new();
1144 tmp = get_temp(ctx);
1145 tcg_gen_shli_reg(tmp, in1, shift);
1149 if (!is_l || c == 4 || c == 5) {
1150 TCGv_reg zero = tcg_const_reg(0);
1151 cb_msb = get_temp(ctx);
1152 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1154 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1156 tcg_temp_free(zero);
1159 tcg_gen_xor_reg(cb, in1, in2);
1160 tcg_gen_xor_reg(cb, cb, dest);
1163 tcg_gen_add_reg(dest, in1, in2);
1165 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1169 /* Compute signed overflow if required. */
1171 if (is_tsv || c == 6) {
1172 sv = do_add_sv(ctx, dest, in1, in2);
1174 /* ??? Need to include overflow from shift. */
1175 gen_helper_tsv(cpu_env, sv);
1179 /* Emit any conditional trap before any writeback. */
1180 cond = do_cond(cf, dest, cb_msb, sv);
1183 tmp = tcg_temp_new();
1184 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1185 gen_helper_tcond(cpu_env, tmp);
1189 /* Write back the result. */
1191 save_or_nullify(ctx, cpu_psw_cb, cb);
1192 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1194 save_gpr(ctx, rt, dest);
1195 tcg_temp_free(dest);
1197 /* Install the new nullification. */
1198 cond_free(&ctx->null_cond);
1199 ctx->null_cond = cond;
1202 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1203 TCGv_reg in2, bool is_tsv, bool is_b,
1204 bool is_tc, unsigned cf)
1206 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1207 unsigned c = cf >> 1;
1210 dest = tcg_temp_new();
1211 cb = tcg_temp_new();
1212 cb_msb = tcg_temp_new();
1214 zero = tcg_const_reg(0);
1216 /* DEST,C = IN1 + ~IN2 + C. */
1217 tcg_gen_not_reg(cb, in2);
1218 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1219 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1220 tcg_gen_xor_reg(cb, cb, in1);
1221 tcg_gen_xor_reg(cb, cb, dest);
1223 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1224 operations by seeding the high word with 1 and subtracting. */
1225 tcg_gen_movi_reg(cb_msb, 1);
1226 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1227 tcg_gen_eqv_reg(cb, in1, in2);
1228 tcg_gen_xor_reg(cb, cb, dest);
1230 tcg_temp_free(zero);
1232 /* Compute signed overflow if required. */
1234 if (is_tsv || c == 6) {
1235 sv = do_sub_sv(ctx, dest, in1, in2);
1237 gen_helper_tsv(cpu_env, sv);
1241 /* Compute the condition. We cannot use the special case for borrow. */
1243 cond = do_sub_cond(cf, dest, in1, in2, sv);
1245 cond = do_cond(cf, dest, cb_msb, sv);
1248 /* Emit any conditional trap before any writeback. */
1251 tmp = tcg_temp_new();
1252 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1253 gen_helper_tcond(cpu_env, tmp);
1257 /* Write back the result. */
1258 save_or_nullify(ctx, cpu_psw_cb, cb);
1259 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1260 save_gpr(ctx, rt, dest);
1261 tcg_temp_free(dest);
1263 /* Install the new nullification. */
1264 cond_free(&ctx->null_cond);
1265 ctx->null_cond = cond;
1268 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1269 TCGv_reg in2, unsigned cf)
1274 dest = tcg_temp_new();
1275 tcg_gen_sub_reg(dest, in1, in2);
1277 /* Compute signed overflow if required. */
1279 if ((cf >> 1) == 6) {
1280 sv = do_sub_sv(ctx, dest, in1, in2);
1283 /* Form the condition for the compare. */
1284 cond = do_sub_cond(cf, dest, in1, in2, sv);
1287 tcg_gen_movi_reg(dest, 0);
1288 save_gpr(ctx, rt, dest);
1289 tcg_temp_free(dest);
1291 /* Install the new nullification. */
1292 cond_free(&ctx->null_cond);
1293 ctx->null_cond = cond;
1296 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1297 TCGv_reg in2, unsigned cf,
1298 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1300 TCGv_reg dest = dest_gpr(ctx, rt);
1302 /* Perform the operation, and writeback. */
1304 save_gpr(ctx, rt, dest);
1306 /* Install the new nullification. */
1307 cond_free(&ctx->null_cond);
1309 ctx->null_cond = do_log_cond(cf, dest);
1313 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1314 TCGv_reg in2, unsigned cf, bool is_tc,
1315 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1321 dest = dest_gpr(ctx, rt);
1323 save_gpr(ctx, rt, dest);
1324 cond_free(&ctx->null_cond);
1326 dest = tcg_temp_new();
1329 cond = do_unit_cond(cf, dest, in1, in2);
1332 TCGv_reg tmp = tcg_temp_new();
1334 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1335 gen_helper_tcond(cpu_env, tmp);
1338 save_gpr(ctx, rt, dest);
1340 cond_free(&ctx->null_cond);
1341 ctx->null_cond = cond;
1345 #ifndef CONFIG_USER_ONLY
1346 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1347 from the top 2 bits of the base register. There are a few system
1348 instructions that have a 3-bit space specifier, for which SR0 is
1349 not special. To handle this, pass ~SP. */
1350 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1360 spc = get_temp_tl(ctx);
1361 load_spr(ctx, spc, sp);
1364 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1368 ptr = tcg_temp_new_ptr();
1369 tmp = tcg_temp_new();
1370 spc = get_temp_tl(ctx);
1372 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1373 tcg_gen_andi_reg(tmp, tmp, 030);
1374 tcg_gen_trunc_reg_ptr(ptr, tmp);
1377 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1378 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1379 tcg_temp_free_ptr(ptr);
1385 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1386 unsigned rb, unsigned rx, int scale, target_sreg disp,
1387 unsigned sp, int modify, bool is_phys)
1389 TCGv_reg base = load_gpr(ctx, rb);
1392 /* Note that RX is mutually exclusive with DISP. */
1394 ofs = get_temp(ctx);
1395 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1396 tcg_gen_add_reg(ofs, ofs, base);
1397 } else if (disp || modify) {
1398 ofs = get_temp(ctx);
1399 tcg_gen_addi_reg(ofs, base, disp);
1405 #ifdef CONFIG_USER_ONLY
1406 *pgva = (modify <= 0 ? ofs : base);
1408 TCGv_tl addr = get_temp_tl(ctx);
1409 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1410 if (ctx->tb_flags & PSW_W) {
1411 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1414 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1420 /* Emit a memory load. The modify parameter should be
1421 * < 0 for pre-modify,
1422 * > 0 for post-modify,
1423 * = 0 for no base register update.
1425 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1426 unsigned rx, int scale, target_sreg disp,
1427 unsigned sp, int modify, TCGMemOp mop)
1432 /* Caller uses nullify_over/nullify_end. */
1433 assert(ctx->null_cond.c == TCG_COND_NEVER);
1435 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1436 ctx->mmu_idx == MMU_PHYS_IDX);
1437 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1439 save_gpr(ctx, rb, ofs);
1443 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1444 unsigned rx, int scale, target_sreg disp,
1445 unsigned sp, int modify, TCGMemOp mop)
1450 /* Caller uses nullify_over/nullify_end. */
1451 assert(ctx->null_cond.c == TCG_COND_NEVER);
1453 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1454 ctx->mmu_idx == MMU_PHYS_IDX);
1455 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1457 save_gpr(ctx, rb, ofs);
1461 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1462 unsigned rx, int scale, target_sreg disp,
1463 unsigned sp, int modify, TCGMemOp mop)
1468 /* Caller uses nullify_over/nullify_end. */
1469 assert(ctx->null_cond.c == TCG_COND_NEVER);
1471 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1472 ctx->mmu_idx == MMU_PHYS_IDX);
1473 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1475 save_gpr(ctx, rb, ofs);
1479 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1480 unsigned rx, int scale, target_sreg disp,
1481 unsigned sp, int modify, TCGMemOp mop)
1486 /* Caller uses nullify_over/nullify_end. */
1487 assert(ctx->null_cond.c == TCG_COND_NEVER);
1489 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1490 ctx->mmu_idx == MMU_PHYS_IDX);
1491 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1493 save_gpr(ctx, rb, ofs);
1497 #if TARGET_REGISTER_BITS == 64
1498 #define do_load_reg do_load_64
1499 #define do_store_reg do_store_64
1501 #define do_load_reg do_load_32
1502 #define do_store_reg do_store_32
1505 static void do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1506 unsigned rx, int scale, target_sreg disp,
1507 unsigned sp, int modify, TCGMemOp mop)
1514 /* No base register update. */
1515 dest = dest_gpr(ctx, rt);
1517 /* Make sure if RT == RB, we see the result of the load. */
1518 dest = get_temp(ctx);
1520 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1521 save_gpr(ctx, rt, dest);
1526 static void do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1527 unsigned rx, int scale, target_sreg disp,
1528 unsigned sp, int modify)
1534 tmp = tcg_temp_new_i32();
1535 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1536 save_frw_i32(rt, tmp);
1537 tcg_temp_free_i32(tmp);
1540 gen_helper_loaded_fr0(cpu_env);
1546 static void do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1547 unsigned rx, int scale, target_sreg disp,
1548 unsigned sp, int modify)
1554 tmp = tcg_temp_new_i64();
1555 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1557 tcg_temp_free_i64(tmp);
1560 gen_helper_loaded_fr0(cpu_env);
1566 static void do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1567 target_sreg disp, unsigned sp,
1568 int modify, TCGMemOp mop)
1571 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1575 static void do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1576 unsigned rx, int scale, target_sreg disp,
1577 unsigned sp, int modify)
1583 tmp = load_frw_i32(rt);
1584 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1585 tcg_temp_free_i32(tmp);
1590 static void do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1591 unsigned rx, int scale, target_sreg disp,
1592 unsigned sp, int modify)
1599 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1600 tcg_temp_free_i64(tmp);
1605 static void do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1606 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1611 tmp = load_frw0_i32(ra);
1613 func(tmp, cpu_env, tmp);
1615 save_frw_i32(rt, tmp);
1616 tcg_temp_free_i32(tmp);
1620 static void do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1621 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1628 dst = tcg_temp_new_i32();
1630 func(dst, cpu_env, src);
1632 tcg_temp_free_i64(src);
1633 save_frw_i32(rt, dst);
1634 tcg_temp_free_i32(dst);
1638 static void do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1639 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1644 tmp = load_frd0(ra);
1646 func(tmp, cpu_env, tmp);
1649 tcg_temp_free_i64(tmp);
1653 static void do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1654 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1660 src = load_frw0_i32(ra);
1661 dst = tcg_temp_new_i64();
1663 func(dst, cpu_env, src);
1665 tcg_temp_free_i32(src);
1667 tcg_temp_free_i64(dst);
1671 static void do_fop_weww(DisasContext *ctx, unsigned rt,
1672 unsigned ra, unsigned rb,
1673 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1678 a = load_frw0_i32(ra);
1679 b = load_frw0_i32(rb);
1681 func(a, cpu_env, a, b);
1683 tcg_temp_free_i32(b);
1684 save_frw_i32(rt, a);
1685 tcg_temp_free_i32(a);
1689 static void do_fop_dedd(DisasContext *ctx, unsigned rt,
1690 unsigned ra, unsigned rb,
1691 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1699 func(a, cpu_env, a, b);
1701 tcg_temp_free_i64(b);
1703 tcg_temp_free_i64(a);
1707 /* Emit an unconditional branch to a direct target, which may or may not
1708 have already had nullification handled. */
1709 static void do_dbranch(DisasContext *ctx, target_ureg dest,
1710 unsigned link, bool is_n)
1712 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1714 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1718 ctx->null_cond.c = TCG_COND_ALWAYS;
1724 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1727 if (is_n && use_nullify_skip(ctx)) {
1728 nullify_set(ctx, 0);
1729 gen_goto_tb(ctx, 0, dest, dest + 4);
1731 nullify_set(ctx, is_n);
1732 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1737 nullify_set(ctx, 0);
1738 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1739 ctx->base.is_jmp = DISAS_NORETURN;
1743 /* Emit a conditional branch to a direct target. If the branch itself
1744 is nullified, we should have already used nullify_over. */
1745 static void do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1748 target_ureg dest = iaoq_dest(ctx, disp);
1749 TCGLabel *taken = NULL;
1750 TCGCond c = cond->c;
1753 assert(ctx->null_cond.c == TCG_COND_NEVER);
1755 /* Handle TRUE and NEVER as direct branches. */
1756 if (c == TCG_COND_ALWAYS) {
1757 do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1760 if (c == TCG_COND_NEVER) {
1761 do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1765 taken = gen_new_label();
1767 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1770 /* Not taken: Condition not satisfied; nullify on backward branches. */
1771 n = is_n && disp < 0;
1772 if (n && use_nullify_skip(ctx)) {
1773 nullify_set(ctx, 0);
1774 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1776 if (!n && ctx->null_lab) {
1777 gen_set_label(ctx->null_lab);
1778 ctx->null_lab = NULL;
1780 nullify_set(ctx, n);
1781 if (ctx->iaoq_n == -1) {
1782 /* The temporary iaoq_n_var died at the branch above.
1783 Regenerate it here instead of saving it. */
1784 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1786 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1789 gen_set_label(taken);
1791 /* Taken: Condition satisfied; nullify on forward branches. */
1792 n = is_n && disp >= 0;
1793 if (n && use_nullify_skip(ctx)) {
1794 nullify_set(ctx, 0);
1795 gen_goto_tb(ctx, 1, dest, dest + 4);
1797 nullify_set(ctx, n);
1798 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1801 /* Not taken: the branch itself was nullified. */
1802 if (ctx->null_lab) {
1803 gen_set_label(ctx->null_lab);
1804 ctx->null_lab = NULL;
1805 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1807 ctx->base.is_jmp = DISAS_NORETURN;
1811 /* Emit an unconditional branch to an indirect target. This handles
1812 nullification of the branch itself. */
1813 static void do_ibranch(DisasContext *ctx, TCGv_reg dest,
1814 unsigned link, bool is_n)
1816 TCGv_reg a0, a1, next, tmp;
1819 assert(ctx->null_lab == NULL);
1821 if (ctx->null_cond.c == TCG_COND_NEVER) {
1823 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1825 next = get_temp(ctx);
1826 tcg_gen_mov_reg(next, dest);
1828 if (use_nullify_skip(ctx)) {
1829 tcg_gen_mov_reg(cpu_iaoq_f, next);
1830 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1831 nullify_set(ctx, 0);
1832 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1835 ctx->null_cond.c = TCG_COND_ALWAYS;
1838 ctx->iaoq_n_var = next;
1839 } else if (is_n && use_nullify_skip(ctx)) {
1840 /* The (conditional) branch, B, nullifies the next insn, N,
1841 and we're allowed to skip execution N (no single-step or
1842 tracepoint in effect). Since the goto_ptr that we must use
1843 for the indirect branch consumes no special resources, we
1844 can (conditionally) skip B and continue execution. */
1845 /* The use_nullify_skip test implies we have a known control path. */
1846 tcg_debug_assert(ctx->iaoq_b != -1);
1847 tcg_debug_assert(ctx->iaoq_n != -1);
1849 /* We do have to handle the non-local temporary, DEST, before
1850 branching. Since IOAQ_F is not really live at this point, we
1851 can simply store DEST optimistically. Similarly with IAOQ_B. */
1852 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1853 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1857 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1859 tcg_gen_lookup_and_goto_ptr();
1862 cond_prep(&ctx->null_cond);
1863 c = ctx->null_cond.c;
1864 a0 = ctx->null_cond.a0;
1865 a1 = ctx->null_cond.a1;
1867 tmp = tcg_temp_new();
1868 next = get_temp(ctx);
1870 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1871 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1873 ctx->iaoq_n_var = next;
1876 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1880 /* The branch nullifies the next insn, which means the state of N
1881 after the branch is the inverse of the state of N that applied
1883 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1884 cond_free(&ctx->null_cond);
1885 ctx->null_cond = cond_make_n();
1886 ctx->psw_n_nonzero = true;
1888 cond_free(&ctx->null_cond);
1894 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1895 * IAOQ_Next{30..31} ← GR[b]{30..31};
1897 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1898 * which keeps the privilege level from being increased.
1900 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1903 switch (ctx->privilege) {
1905 /* Privilege 0 is maximum and is allowed to decrease. */
1908 /* Privilege 3 is minimum and is never allowed increase. */
1909 dest = get_temp(ctx);
1910 tcg_gen_ori_reg(dest, offset, 3);
1913 dest = tcg_temp_new();
1914 tcg_gen_andi_reg(dest, offset, -4);
1915 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1916 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1917 tcg_temp_free(dest);
1923 #ifdef CONFIG_USER_ONLY
1924 /* On Linux, page zero is normally marked execute only + gateway.
1925 Therefore normal read or write is supposed to fail, but specific
1926 offsets have kernel code mapped to raise permissions to implement
1927 system calls. Handling this via an explicit check here, rather
1928 in than the "be disp(sr2,r0)" instruction that probably sent us
1929 here, is the easiest way to handle the branch delay slot on the
1930 aforementioned BE. */
1931 static void do_page_zero(DisasContext *ctx)
1933 /* If by some means we get here with PSW[N]=1, that implies that
1934 the B,GATE instruction would be skipped, and we'd fault on the
1935 next insn within the privilaged page. */
1936 switch (ctx->null_cond.c) {
1937 case TCG_COND_NEVER:
1939 case TCG_COND_ALWAYS:
1940 tcg_gen_movi_reg(cpu_psw_n, 0);
1943 /* Since this is always the first (and only) insn within the
1944 TB, we should know the state of PSW[N] from TB->FLAGS. */
1945 g_assert_not_reached();
1948 /* Check that we didn't arrive here via some means that allowed
1949 non-sequential instruction execution. Normally the PSW[B] bit
1950 detects this by disallowing the B,GATE instruction to execute
1951 under such conditions. */
1952 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1956 switch (ctx->iaoq_f & -4) {
1957 case 0x00: /* Null pointer call */
1958 gen_excp_1(EXCP_IMP);
1959 ctx->base.is_jmp = DISAS_NORETURN;
1962 case 0xb0: /* LWS */
1963 gen_excp_1(EXCP_SYSCALL_LWS);
1964 ctx->base.is_jmp = DISAS_NORETURN;
1967 case 0xe0: /* SET_THREAD_POINTER */
1968 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1969 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1970 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1971 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1974 case 0x100: /* SYSCALL */
1975 gen_excp_1(EXCP_SYSCALL);
1976 ctx->base.is_jmp = DISAS_NORETURN;
1981 gen_excp_1(EXCP_ILL);
1982 ctx->base.is_jmp = DISAS_NORETURN;
1988 static bool trans_nop(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
1990 cond_free(&ctx->null_cond);
1994 static bool trans_break(DisasContext *ctx, arg_break *a)
1996 return gen_excp_iir(ctx, EXCP_BREAK);
1999 static bool trans_sync(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2001 /* No point in nullifying the memory barrier. */
2002 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2004 cond_free(&ctx->null_cond);
2008 static bool trans_mfia(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2010 unsigned rt = extract32(insn, 0, 5);
2011 TCGv_reg tmp = dest_gpr(ctx, rt);
2012 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2013 save_gpr(ctx, rt, tmp);
2015 cond_free(&ctx->null_cond);
2019 static bool trans_mfsp(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2021 unsigned rt = extract32(insn, 0, 5);
2022 unsigned rs = assemble_sr3(insn);
2023 TCGv_i64 t0 = tcg_temp_new_i64();
2024 TCGv_reg t1 = tcg_temp_new();
2026 load_spr(ctx, t0, rs);
2027 tcg_gen_shri_i64(t0, t0, 32);
2028 tcg_gen_trunc_i64_reg(t1, t0);
2030 save_gpr(ctx, rt, t1);
2032 tcg_temp_free_i64(t0);
2034 cond_free(&ctx->null_cond);
2038 static bool trans_mfctl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2040 unsigned rt = extract32(insn, 0, 5);
2041 unsigned ctl = extract32(insn, 21, 5);
2046 #ifdef TARGET_HPPA64
2047 if (extract32(insn, 14, 1) == 0) {
2048 /* MFSAR without ,W masks low 5 bits. */
2049 tmp = dest_gpr(ctx, rt);
2050 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2051 save_gpr(ctx, rt, tmp);
2055 save_gpr(ctx, rt, cpu_sar);
2057 case CR_IT: /* Interval Timer */
2058 /* FIXME: Respect PSW_S bit. */
2060 tmp = dest_gpr(ctx, rt);
2061 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2063 gen_helper_read_interval_timer(tmp);
2065 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2067 gen_helper_read_interval_timer(tmp);
2069 save_gpr(ctx, rt, tmp);
2070 return nullify_end(ctx);
2075 /* All other control registers are privileged. */
2076 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2080 tmp = get_temp(ctx);
2081 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2082 save_gpr(ctx, rt, tmp);
2085 cond_free(&ctx->null_cond);
2089 static bool trans_mtsp(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2091 unsigned rr = extract32(insn, 16, 5);
2092 unsigned rs = assemble_sr3(insn);
2096 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2100 t64 = tcg_temp_new_i64();
2101 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2102 tcg_gen_shli_i64(t64, t64, 32);
2105 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2106 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2108 tcg_gen_mov_i64(cpu_sr[rs], t64);
2110 tcg_temp_free_i64(t64);
2112 return nullify_end(ctx);
2115 static bool trans_mtctl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2117 unsigned rin = extract32(insn, 16, 5);
2118 unsigned ctl = extract32(insn, 21, 5);
2119 TCGv_reg reg = load_gpr(ctx, rin);
2122 if (ctl == CR_SAR) {
2123 tmp = tcg_temp_new();
2124 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2125 save_or_nullify(ctx, cpu_sar, tmp);
2128 cond_free(&ctx->null_cond);
2132 /* All other control registers are privileged or read-only. */
2133 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2135 #ifdef CONFIG_USER_ONLY
2136 g_assert_not_reached();
2141 gen_helper_write_interval_timer(cpu_env, reg);
2144 gen_helper_write_eirr(cpu_env, reg);
2147 gen_helper_write_eiem(cpu_env, reg);
2148 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2153 /* FIXME: Respect PSW_Q bit */
2154 /* The write advances the queue and stores to the back element. */
2155 tmp = get_temp(ctx);
2156 tcg_gen_ld_reg(tmp, cpu_env,
2157 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2158 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2159 tcg_gen_st_reg(reg, cpu_env,
2160 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2164 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2167 return nullify_end(ctx);
2171 static bool trans_mtsarcm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2173 unsigned rin = extract32(insn, 16, 5);
2174 TCGv_reg tmp = tcg_temp_new();
2176 tcg_gen_not_reg(tmp, load_gpr(ctx, rin));
2177 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2178 save_or_nullify(ctx, cpu_sar, tmp);
2181 cond_free(&ctx->null_cond);
2185 static bool trans_ldsid(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2187 unsigned rt = extract32(insn, 0, 5);
2188 TCGv_reg dest = dest_gpr(ctx, rt);
2190 #ifdef CONFIG_USER_ONLY
2191 /* We don't implement space registers in user mode. */
2192 tcg_gen_movi_reg(dest, 0);
2194 unsigned rb = extract32(insn, 21, 5);
2195 unsigned sp = extract32(insn, 14, 2);
2196 TCGv_i64 t0 = tcg_temp_new_i64();
2198 tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
2199 tcg_gen_shri_i64(t0, t0, 32);
2200 tcg_gen_trunc_i64_reg(dest, t0);
2202 tcg_temp_free_i64(t0);
2204 save_gpr(ctx, rt, dest);
2206 cond_free(&ctx->null_cond);
2210 #ifndef CONFIG_USER_ONLY
2211 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
2212 static target_ureg extract_sm_imm(uint32_t insn)
2214 target_ureg val = extract32(insn, 16, 10);
2216 if (val & PSW_SM_E) {
2217 val = (val & ~PSW_SM_E) | PSW_E;
2219 if (val & PSW_SM_W) {
2220 val = (val & ~PSW_SM_W) | PSW_W;
2225 static bool trans_rsm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2227 unsigned rt = extract32(insn, 0, 5);
2228 target_ureg sm = extract_sm_imm(insn);
2231 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2234 tmp = get_temp(ctx);
2235 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2236 tcg_gen_andi_reg(tmp, tmp, ~sm);
2237 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2238 save_gpr(ctx, rt, tmp);
2240 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2241 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2242 return nullify_end(ctx);
2245 static bool trans_ssm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2247 unsigned rt = extract32(insn, 0, 5);
2248 target_ureg sm = extract_sm_imm(insn);
2251 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2254 tmp = get_temp(ctx);
2255 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2256 tcg_gen_ori_reg(tmp, tmp, sm);
2257 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2258 save_gpr(ctx, rt, tmp);
2260 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2261 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2262 return nullify_end(ctx);
2265 static bool trans_mtsm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2267 unsigned rr = extract32(insn, 16, 5);
2270 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2273 reg = load_gpr(ctx, rr);
2274 tmp = get_temp(ctx);
2275 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2277 /* Exit the TB to recognize new interrupts. */
2278 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2279 return nullify_end(ctx);
2282 static bool trans_rfi(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2284 unsigned comp = extract32(insn, 5, 4);
2286 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2290 gen_helper_rfi_r(cpu_env);
2292 gen_helper_rfi(cpu_env);
2294 /* Exit the TB to recognize new interrupts. */
2295 if (ctx->base.singlestep_enabled) {
2296 gen_excp_1(EXCP_DEBUG);
2298 tcg_gen_exit_tb(NULL, 0);
2300 ctx->base.is_jmp = DISAS_NORETURN;
2302 return nullify_end(ctx);
2305 static bool gen_hlt(DisasContext *ctx, int reset)
2307 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2310 gen_helper_reset(cpu_env);
2312 gen_helper_halt(cpu_env);
2314 ctx->base.is_jmp = DISAS_NORETURN;
2315 return nullify_end(ctx);
2317 #endif /* !CONFIG_USER_ONLY */
2319 static const DisasInsn table_system[] = {
2320 { 0x00001820u, 0xffe01fffu, trans_mtsp },
2321 { 0x00001840u, 0xfc00ffffu, trans_mtctl },
2322 { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
2323 { 0x000014a0u, 0xffffffe0u, trans_mfia },
2324 { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
2325 { 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
2326 { 0x00000400u, 0xffffffffu, trans_sync }, /* sync */
2327 { 0x00100400u, 0xffffffffu, trans_sync }, /* syncdma */
2328 { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
2329 #ifndef CONFIG_USER_ONLY
2330 { 0x00000e60u, 0xfc00ffe0u, trans_rsm },
2331 { 0x00000d60u, 0xfc00ffe0u, trans_ssm },
2332 { 0x00001860u, 0xffe0ffffu, trans_mtsm },
2333 { 0x00000c00u, 0xfffffe1fu, trans_rfi },
2337 static bool trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2338 const DisasInsn *di)
2340 unsigned rb = extract32(insn, 21, 5);
2341 unsigned rx = extract32(insn, 16, 5);
2342 TCGv_reg dest = dest_gpr(ctx, rb);
2343 TCGv_reg src1 = load_gpr(ctx, rb);
2344 TCGv_reg src2 = load_gpr(ctx, rx);
2346 /* The only thing we need to do is the base register modification. */
2347 tcg_gen_add_reg(dest, src1, src2);
2348 save_gpr(ctx, rb, dest);
2350 cond_free(&ctx->null_cond);
2354 static bool trans_probe(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2356 unsigned rt = extract32(insn, 0, 5);
2357 unsigned sp = extract32(insn, 14, 2);
2358 unsigned rr = extract32(insn, 16, 5);
2359 unsigned rb = extract32(insn, 21, 5);
2360 unsigned is_write = extract32(insn, 6, 1);
2361 unsigned is_imm = extract32(insn, 13, 1);
2363 TCGv_i32 level, want;
2368 dest = dest_gpr(ctx, rt);
2369 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2372 level = tcg_const_i32(extract32(insn, 16, 2));
2374 level = tcg_temp_new_i32();
2375 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2376 tcg_gen_andi_i32(level, level, 3);
2378 want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2380 gen_helper_probe(dest, cpu_env, addr, level, want);
2382 tcg_temp_free_i32(want);
2383 tcg_temp_free_i32(level);
2385 save_gpr(ctx, rt, dest);
2386 return nullify_end(ctx);
2389 #ifndef CONFIG_USER_ONLY
2390 static bool trans_ixtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2393 unsigned rr = extract32(insn, 16, 5);
2394 unsigned rb = extract32(insn, 21, 5);
2395 unsigned is_data = insn & 0x1000;
2396 unsigned is_addr = insn & 0x40;
2401 sp = extract32(insn, 14, 2);
2403 sp = ~assemble_sr3(insn);
2406 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2409 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2410 reg = load_gpr(ctx, rr);
2412 gen_helper_itlba(cpu_env, addr, reg);
2414 gen_helper_itlbp(cpu_env, addr, reg);
2417 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2418 the case, since the OS TLB fill handler runs with mmu disabled. */
2419 if (!is_data && (ctx->tb_flags & PSW_C)) {
2420 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2422 return nullify_end(ctx);
2425 static bool trans_pxtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2427 unsigned m = extract32(insn, 5, 1);
2429 unsigned rx = extract32(insn, 16, 5);
2430 unsigned rb = extract32(insn, 21, 5);
2431 unsigned is_data = insn & 0x1000;
2432 unsigned is_local = insn & 0x40;
2437 sp = extract32(insn, 14, 2);
2439 sp = ~assemble_sr3(insn);
2442 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2445 form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2447 save_gpr(ctx, rb, ofs);
2450 gen_helper_ptlbe(cpu_env);
2452 gen_helper_ptlb(cpu_env, addr);
2455 /* Exit TB for TLB change if mmu is enabled. */
2456 if (!is_data && (ctx->tb_flags & PSW_C)) {
2457 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2459 return nullify_end(ctx);
2462 static bool trans_lpa(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2464 unsigned rt = extract32(insn, 0, 5);
2465 unsigned m = extract32(insn, 5, 1);
2466 unsigned sp = extract32(insn, 14, 2);
2467 unsigned rx = extract32(insn, 16, 5);
2468 unsigned rb = extract32(insn, 21, 5);
2470 TCGv_reg ofs, paddr;
2472 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2475 form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2477 paddr = tcg_temp_new();
2478 gen_helper_lpa(paddr, cpu_env, vaddr);
2480 /* Note that physical address result overrides base modification. */
2482 save_gpr(ctx, rb, ofs);
2484 save_gpr(ctx, rt, paddr);
2485 tcg_temp_free(paddr);
2487 return nullify_end(ctx);
2490 static bool trans_lci(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2492 unsigned rt = extract32(insn, 0, 5);
2495 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2497 /* The Coherence Index is an implementation-defined function of the
2498 physical address. Two addresses with the same CI have a coherent
2499 view of the cache. Our implementation is to return 0 for all,
2500 since the entire address space is coherent. */
2501 ci = tcg_const_reg(0);
2502 save_gpr(ctx, rt, ci);
2505 cond_free(&ctx->null_cond);
2508 #endif /* !CONFIG_USER_ONLY */
2510 static const DisasInsn table_mem_mgmt[] = {
2511 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
2512 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
2513 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2514 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
2515 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2516 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
2517 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2518 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
2519 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2520 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
2521 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2522 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
2523 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2524 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
2525 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
2526 #ifndef CONFIG_USER_ONLY
2527 { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
2528 { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
2529 { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
2530 { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
2531 { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
2532 { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
2533 { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
2534 { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
2535 { 0x04001340u, 0xfc003fc0u, trans_lpa },
2536 { 0x04001300u, 0xfc003fe0u, trans_lci },
2540 static bool trans_add(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2542 unsigned r2 = extract32(insn, 21, 5);
2543 unsigned r1 = extract32(insn, 16, 5);
2544 unsigned cf = extract32(insn, 12, 4);
2545 unsigned ext = extract32(insn, 8, 4);
2546 unsigned shift = extract32(insn, 6, 2);
2547 unsigned rt = extract32(insn, 0, 5);
2548 TCGv_reg tcg_r1, tcg_r2;
2552 bool is_tsv = false;
2555 case 0x6: /* ADD, SHLADD */
2557 case 0xa: /* ADD,L, SHLADD,L */
2560 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2563 case 0x7: /* ADD,C */
2566 case 0xf: /* ADD,C,TSV */
2567 is_c = is_tsv = true;
2570 return gen_illegal(ctx);
2576 tcg_r1 = load_gpr(ctx, r1);
2577 tcg_r2 = load_gpr(ctx, r2);
2578 do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2579 return nullify_end(ctx);
2582 static bool trans_sub(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2584 unsigned r2 = extract32(insn, 21, 5);
2585 unsigned r1 = extract32(insn, 16, 5);
2586 unsigned cf = extract32(insn, 12, 4);
2587 unsigned ext = extract32(insn, 6, 6);
2588 unsigned rt = extract32(insn, 0, 5);
2589 TCGv_reg tcg_r1, tcg_r2;
2592 bool is_tsv = false;
2595 case 0x10: /* SUB */
2597 case 0x30: /* SUB,TSV */
2600 case 0x14: /* SUB,B */
2603 case 0x34: /* SUB,B,TSV */
2604 is_b = is_tsv = true;
2606 case 0x13: /* SUB,TC */
2609 case 0x33: /* SUB,TSV,TC */
2610 is_tc = is_tsv = true;
2613 return gen_illegal(ctx);
2619 tcg_r1 = load_gpr(ctx, r1);
2620 tcg_r2 = load_gpr(ctx, r2);
2621 do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2622 return nullify_end(ctx);
2625 static bool trans_log(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2627 unsigned r2 = extract32(insn, 21, 5);
2628 unsigned r1 = extract32(insn, 16, 5);
2629 unsigned cf = extract32(insn, 12, 4);
2630 unsigned rt = extract32(insn, 0, 5);
2631 TCGv_reg tcg_r1, tcg_r2;
2636 tcg_r1 = load_gpr(ctx, r1);
2637 tcg_r2 = load_gpr(ctx, r2);
2638 do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2639 return nullify_end(ctx);
2642 /* OR r,0,t -> COPY (according to gas) */
2643 static bool trans_copy(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2645 unsigned r1 = extract32(insn, 16, 5);
2646 unsigned rt = extract32(insn, 0, 5);
2649 TCGv_reg dest = dest_gpr(ctx, rt);
2650 tcg_gen_movi_reg(dest, 0);
2651 save_gpr(ctx, rt, dest);
2653 save_gpr(ctx, rt, cpu_gr[r1]);
2655 cond_free(&ctx->null_cond);
2659 static bool trans_cmpclr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2661 unsigned r2 = extract32(insn, 21, 5);
2662 unsigned r1 = extract32(insn, 16, 5);
2663 unsigned cf = extract32(insn, 12, 4);
2664 unsigned rt = extract32(insn, 0, 5);
2665 TCGv_reg tcg_r1, tcg_r2;
2670 tcg_r1 = load_gpr(ctx, r1);
2671 tcg_r2 = load_gpr(ctx, r2);
2672 do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2673 return nullify_end(ctx);
2676 static bool trans_uxor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2678 unsigned r2 = extract32(insn, 21, 5);
2679 unsigned r1 = extract32(insn, 16, 5);
2680 unsigned cf = extract32(insn, 12, 4);
2681 unsigned rt = extract32(insn, 0, 5);
2682 TCGv_reg tcg_r1, tcg_r2;
2687 tcg_r1 = load_gpr(ctx, r1);
2688 tcg_r2 = load_gpr(ctx, r2);
2689 do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2690 return nullify_end(ctx);
2693 static bool trans_uaddcm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2695 unsigned r2 = extract32(insn, 21, 5);
2696 unsigned r1 = extract32(insn, 16, 5);
2697 unsigned cf = extract32(insn, 12, 4);
2698 unsigned is_tc = extract32(insn, 6, 1);
2699 unsigned rt = extract32(insn, 0, 5);
2700 TCGv_reg tcg_r1, tcg_r2, tmp;
2705 tcg_r1 = load_gpr(ctx, r1);
2706 tcg_r2 = load_gpr(ctx, r2);
2707 tmp = get_temp(ctx);
2708 tcg_gen_not_reg(tmp, tcg_r2);
2709 do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2710 return nullify_end(ctx);
2713 static bool trans_dcor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2715 unsigned r2 = extract32(insn, 21, 5);
2716 unsigned cf = extract32(insn, 12, 4);
2717 unsigned is_i = extract32(insn, 6, 1);
2718 unsigned rt = extract32(insn, 0, 5);
2723 tmp = get_temp(ctx);
2724 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2726 tcg_gen_not_reg(tmp, tmp);
2728 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2729 tcg_gen_muli_reg(tmp, tmp, 6);
2730 do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2731 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2733 return nullify_end(ctx);
2736 static bool trans_ds(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2738 unsigned r2 = extract32(insn, 21, 5);
2739 unsigned r1 = extract32(insn, 16, 5);
2740 unsigned cf = extract32(insn, 12, 4);
2741 unsigned rt = extract32(insn, 0, 5);
2742 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2746 in1 = load_gpr(ctx, r1);
2747 in2 = load_gpr(ctx, r2);
2749 add1 = tcg_temp_new();
2750 add2 = tcg_temp_new();
2751 addc = tcg_temp_new();
2752 dest = tcg_temp_new();
2753 zero = tcg_const_reg(0);
2755 /* Form R1 << 1 | PSW[CB]{8}. */
2756 tcg_gen_add_reg(add1, in1, in1);
2757 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2759 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2760 carry{8} requires that we subtract via + ~R2 + 1, as described in
2761 the manual. By extracting and masking V, we can produce the
2762 proper inputs to the addition without movcond. */
2763 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2764 tcg_gen_xor_reg(add2, in2, addc);
2765 tcg_gen_andi_reg(addc, addc, 1);
2766 /* ??? This is only correct for 32-bit. */
2767 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2768 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2770 tcg_temp_free(addc);
2771 tcg_temp_free(zero);
2773 /* Write back the result register. */
2774 save_gpr(ctx, rt, dest);
2776 /* Write back PSW[CB]. */
2777 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2778 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2780 /* Write back PSW[V] for the division step. */
2781 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2782 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2784 /* Install the new nullification. */
2788 /* ??? The lshift is supposed to contribute to overflow. */
2789 sv = do_add_sv(ctx, dest, add1, add2);
2791 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2794 tcg_temp_free(add1);
2795 tcg_temp_free(add2);
2796 tcg_temp_free(dest);
2798 return nullify_end(ctx);
2801 #ifndef CONFIG_USER_ONLY
2802 /* These are QEMU extensions and are nops in the real architecture:
2804 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2805 * or %r31,%r31,%r31 -- death loop; offline cpu
2806 * currently implemented as idle.
2808 static bool trans_pause(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2812 /* No need to check for supervisor, as userland can only pause
2813 until the next timer interrupt. */
2816 /* Advance the instruction queue. */
2817 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2818 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2819 nullify_set(ctx, 0);
2821 /* Tell the qemu main loop to halt until this cpu has work. */
2822 tmp = tcg_const_i32(1);
2823 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2824 offsetof(CPUState, halted));
2825 tcg_temp_free_i32(tmp);
2826 gen_excp_1(EXCP_HALTED);
2827 ctx->base.is_jmp = DISAS_NORETURN;
2829 return nullify_end(ctx);
2833 static const DisasInsn table_arith_log[] = {
2834 { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
2835 { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
2836 #ifndef CONFIG_USER_ONLY
2837 { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
2838 { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
2840 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2841 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2842 { 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
2843 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2844 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2845 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2846 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2847 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2848 { 0x08000440u, 0xfc000fe0u, trans_ds },
2849 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2850 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2851 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2852 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2855 static bool trans_addi(DisasContext *ctx, uint32_t insn)
2857 target_sreg im = low_sextract(insn, 0, 11);
2858 unsigned e1 = extract32(insn, 11, 1);
2859 unsigned cf = extract32(insn, 12, 4);
2860 unsigned rt = extract32(insn, 16, 5);
2861 unsigned r2 = extract32(insn, 21, 5);
2862 unsigned o1 = extract32(insn, 26, 1);
2863 TCGv_reg tcg_im, tcg_r2;
2869 tcg_im = load_const(ctx, im);
2870 tcg_r2 = load_gpr(ctx, r2);
2871 do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2873 return nullify_end(ctx);
2876 static bool trans_subi(DisasContext *ctx, uint32_t insn)
2878 target_sreg im = low_sextract(insn, 0, 11);
2879 unsigned e1 = extract32(insn, 11, 1);
2880 unsigned cf = extract32(insn, 12, 4);
2881 unsigned rt = extract32(insn, 16, 5);
2882 unsigned r2 = extract32(insn, 21, 5);
2883 TCGv_reg tcg_im, tcg_r2;
2889 tcg_im = load_const(ctx, im);
2890 tcg_r2 = load_gpr(ctx, r2);
2891 do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2893 return nullify_end(ctx);
2896 static bool trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2898 target_sreg im = low_sextract(insn, 0, 11);
2899 unsigned cf = extract32(insn, 12, 4);
2900 unsigned rt = extract32(insn, 16, 5);
2901 unsigned r2 = extract32(insn, 21, 5);
2902 TCGv_reg tcg_im, tcg_r2;
2908 tcg_im = load_const(ctx, im);
2909 tcg_r2 = load_gpr(ctx, r2);
2910 do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2912 return nullify_end(ctx);
2915 static bool trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2916 const DisasInsn *di)
2918 unsigned rt = extract32(insn, 0, 5);
2919 unsigned m = extract32(insn, 5, 1);
2920 unsigned sz = extract32(insn, 6, 2);
2921 unsigned a = extract32(insn, 13, 1);
2922 unsigned sp = extract32(insn, 14, 2);
2923 int disp = low_sextract(insn, 16, 5);
2924 unsigned rb = extract32(insn, 21, 5);
2925 int modify = (m ? (a ? -1 : 1) : 0);
2926 TCGMemOp mop = MO_TE | sz;
2928 do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2932 static bool trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2933 const DisasInsn *di)
2935 unsigned rt = extract32(insn, 0, 5);
2936 unsigned m = extract32(insn, 5, 1);
2937 unsigned sz = extract32(insn, 6, 2);
2938 unsigned u = extract32(insn, 13, 1);
2939 unsigned sp = extract32(insn, 14, 2);
2940 unsigned rx = extract32(insn, 16, 5);
2941 unsigned rb = extract32(insn, 21, 5);
2942 TCGMemOp mop = MO_TE | sz;
2944 do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2948 static bool trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2949 const DisasInsn *di)
2951 int disp = low_sextract(insn, 0, 5);
2952 unsigned m = extract32(insn, 5, 1);
2953 unsigned sz = extract32(insn, 6, 2);
2954 unsigned a = extract32(insn, 13, 1);
2955 unsigned sp = extract32(insn, 14, 2);
2956 unsigned rr = extract32(insn, 16, 5);
2957 unsigned rb = extract32(insn, 21, 5);
2958 int modify = (m ? (a ? -1 : 1) : 0);
2959 TCGMemOp mop = MO_TE | sz;
2961 do_store(ctx, rr, rb, disp, sp, modify, mop);
2965 static bool trans_ldcw(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2967 unsigned rt = extract32(insn, 0, 5);
2968 unsigned m = extract32(insn, 5, 1);
2969 unsigned i = extract32(insn, 12, 1);
2970 unsigned au = extract32(insn, 13, 1);
2971 unsigned sp = extract32(insn, 14, 2);
2972 unsigned rx = extract32(insn, 16, 5);
2973 unsigned rb = extract32(insn, 21, 5);
2974 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2975 TCGv_reg zero, dest, ofs;
2977 int modify, disp = 0, scale = 0;
2982 modify = (m ? (au ? -1 : 1) : 0);
2983 disp = low_sextract(rx, 0, 5);
2988 scale = mop & MO_SIZE;
2992 /* Base register modification. Make sure if RT == RB,
2993 we see the result of the load. */
2994 dest = get_temp(ctx);
2996 dest = dest_gpr(ctx, rt);
2999 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
3000 ctx->mmu_idx == MMU_PHYS_IDX);
3001 zero = tcg_const_reg(0);
3002 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
3004 save_gpr(ctx, rb, ofs);
3006 save_gpr(ctx, rt, dest);
3008 return nullify_end(ctx);
3011 static bool trans_stby(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3013 target_sreg disp = low_sextract(insn, 0, 5);
3014 unsigned m = extract32(insn, 5, 1);
3015 unsigned a = extract32(insn, 13, 1);
3016 unsigned sp = extract32(insn, 14, 2);
3017 unsigned rt = extract32(insn, 16, 5);
3018 unsigned rb = extract32(insn, 21, 5);
3024 form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3025 ctx->mmu_idx == MMU_PHYS_IDX);
3026 val = load_gpr(ctx, rt);
3028 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3029 gen_helper_stby_e_parallel(cpu_env, addr, val);
3031 gen_helper_stby_e(cpu_env, addr, val);
3034 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3035 gen_helper_stby_b_parallel(cpu_env, addr, val);
3037 gen_helper_stby_b(cpu_env, addr, val);
3042 tcg_gen_andi_reg(ofs, ofs, ~3);
3043 save_gpr(ctx, rb, ofs);
3046 return nullify_end(ctx);
3049 #ifndef CONFIG_USER_ONLY
3050 static bool trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3051 const DisasInsn *di)
3053 int hold_mmu_idx = ctx->mmu_idx;
3055 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3057 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3058 format wrt the sub-opcode in bits 6:9. */
3059 ctx->mmu_idx = MMU_PHYS_IDX;
3060 trans_ld_idx_i(ctx, insn, di);
3061 ctx->mmu_idx = hold_mmu_idx;
3065 static bool trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3066 const DisasInsn *di)
3068 int hold_mmu_idx = ctx->mmu_idx;
3070 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3072 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3073 format wrt the sub-opcode in bits 6:9. */
3074 ctx->mmu_idx = MMU_PHYS_IDX;
3075 trans_ld_idx_x(ctx, insn, di);
3076 ctx->mmu_idx = hold_mmu_idx;
3080 static bool trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3081 const DisasInsn *di)
3083 int hold_mmu_idx = ctx->mmu_idx;
3085 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3087 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3088 format wrt the sub-opcode in bits 6:9. */
3089 ctx->mmu_idx = MMU_PHYS_IDX;
3090 trans_st_idx_i(ctx, insn, di);
3091 ctx->mmu_idx = hold_mmu_idx;
3096 static const DisasInsn table_index_mem[] = {
3097 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3098 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3099 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3100 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3101 { 0x0c001300u, 0xfc0013c0, trans_stby },
3102 #ifndef CONFIG_USER_ONLY
3103 { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3104 { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3105 { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3109 static bool trans_ldil(DisasContext *ctx, uint32_t insn)
3111 unsigned rt = extract32(insn, 21, 5);
3112 target_sreg i = assemble_21(insn);
3113 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3115 tcg_gen_movi_reg(tcg_rt, i);
3116 save_gpr(ctx, rt, tcg_rt);
3117 cond_free(&ctx->null_cond);
3121 static bool trans_addil(DisasContext *ctx, uint32_t insn)
3123 unsigned rt = extract32(insn, 21, 5);
3124 target_sreg i = assemble_21(insn);
3125 TCGv_reg tcg_rt = load_gpr(ctx, rt);
3126 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3128 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3129 save_gpr(ctx, 1, tcg_r1);
3130 cond_free(&ctx->null_cond);
3134 static bool trans_ldo(DisasContext *ctx, uint32_t insn)
3136 unsigned rb = extract32(insn, 21, 5);
3137 unsigned rt = extract32(insn, 16, 5);
3138 target_sreg i = assemble_16(insn);
3139 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3141 /* Special case rb == 0, for the LDI pseudo-op.
3142 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3144 tcg_gen_movi_reg(tcg_rt, i);
3146 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3148 save_gpr(ctx, rt, tcg_rt);
3149 cond_free(&ctx->null_cond);
3153 static bool trans_load(DisasContext *ctx, uint32_t insn,
3154 bool is_mod, TCGMemOp mop)
3156 unsigned rb = extract32(insn, 21, 5);
3157 unsigned rt = extract32(insn, 16, 5);
3158 unsigned sp = extract32(insn, 14, 2);
3159 target_sreg i = assemble_16(insn);
3161 do_load(ctx, rt, rb, 0, 0, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3165 static bool trans_load_w(DisasContext *ctx, uint32_t insn)
3167 unsigned rb = extract32(insn, 21, 5);
3168 unsigned rt = extract32(insn, 16, 5);
3169 unsigned sp = extract32(insn, 14, 2);
3170 target_sreg i = assemble_16a(insn);
3171 unsigned ext2 = extract32(insn, 1, 2);
3176 /* FLDW without modification. */
3177 do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3180 /* LDW with modification. Note that the sign of I selects
3181 post-dec vs pre-inc. */
3182 do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3185 return gen_illegal(ctx);
3190 static bool trans_fload_mod(DisasContext *ctx, uint32_t insn)
3192 target_sreg i = assemble_16a(insn);
3193 unsigned t1 = extract32(insn, 1, 1);
3194 unsigned a = extract32(insn, 2, 1);
3195 unsigned sp = extract32(insn, 14, 2);
3196 unsigned t0 = extract32(insn, 16, 5);
3197 unsigned rb = extract32(insn, 21, 5);
3199 /* FLDW with modification. */
3200 do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3204 static bool trans_store(DisasContext *ctx, uint32_t insn,
3205 bool is_mod, TCGMemOp mop)
3207 unsigned rb = extract32(insn, 21, 5);
3208 unsigned rt = extract32(insn, 16, 5);
3209 unsigned sp = extract32(insn, 14, 2);
3210 target_sreg i = assemble_16(insn);
3212 do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3216 static bool trans_store_w(DisasContext *ctx, uint32_t insn)
3218 unsigned rb = extract32(insn, 21, 5);
3219 unsigned rt = extract32(insn, 16, 5);
3220 unsigned sp = extract32(insn, 14, 2);
3221 target_sreg i = assemble_16a(insn);
3222 unsigned ext2 = extract32(insn, 1, 2);
3227 /* FSTW without modification. */
3228 do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3231 /* STW with modification. */
3232 do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3235 return gen_illegal(ctx);
3240 static bool trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3242 target_sreg i = assemble_16a(insn);
3243 unsigned t1 = extract32(insn, 1, 1);
3244 unsigned a = extract32(insn, 2, 1);
3245 unsigned sp = extract32(insn, 14, 2);
3246 unsigned t0 = extract32(insn, 16, 5);
3247 unsigned rb = extract32(insn, 21, 5);
3249 /* FSTW with modification. */
3250 do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3254 static bool trans_copr_w(DisasContext *ctx, uint32_t insn)
3256 unsigned t0 = extract32(insn, 0, 5);
3257 unsigned m = extract32(insn, 5, 1);
3258 unsigned t1 = extract32(insn, 6, 1);
3259 unsigned ext3 = extract32(insn, 7, 3);
3260 /* unsigned cc = extract32(insn, 10, 2); */
3261 unsigned i = extract32(insn, 12, 1);
3262 unsigned ua = extract32(insn, 13, 1);
3263 unsigned sp = extract32(insn, 14, 2);
3264 unsigned rx = extract32(insn, 16, 5);
3265 unsigned rb = extract32(insn, 21, 5);
3266 unsigned rt = t1 * 32 + t0;
3267 int modify = (m ? (ua ? -1 : 1) : 0);
3271 scale = (ua ? 2 : 0);
3275 disp = low_sextract(rx, 0, 5);
3278 modify = (m ? (ua ? -1 : 1) : 0);
3283 do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3286 do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3289 return gen_illegal(ctx);
3294 static bool trans_copr_dw(DisasContext *ctx, uint32_t insn)
3296 unsigned rt = extract32(insn, 0, 5);
3297 unsigned m = extract32(insn, 5, 1);
3298 unsigned ext4 = extract32(insn, 6, 4);
3299 /* unsigned cc = extract32(insn, 10, 2); */
3300 unsigned i = extract32(insn, 12, 1);
3301 unsigned ua = extract32(insn, 13, 1);
3302 unsigned sp = extract32(insn, 14, 2);
3303 unsigned rx = extract32(insn, 16, 5);
3304 unsigned rb = extract32(insn, 21, 5);
3305 int modify = (m ? (ua ? -1 : 1) : 0);
3309 scale = (ua ? 3 : 0);
3313 disp = low_sextract(rx, 0, 5);
3316 modify = (m ? (ua ? -1 : 1) : 0);
3321 do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3324 do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3327 return gen_illegal(ctx);
3332 static bool trans_cmpb(DisasContext *ctx, uint32_t insn,
3333 bool is_true, bool is_imm, bool is_dw)
3335 target_sreg disp = assemble_12(insn) * 4;
3336 unsigned n = extract32(insn, 1, 1);
3337 unsigned c = extract32(insn, 13, 3);
3338 unsigned r = extract32(insn, 21, 5);
3339 unsigned cf = c * 2 + !is_true;
3340 TCGv_reg dest, in1, in2, sv;
3346 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3348 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3350 in2 = load_gpr(ctx, r);
3351 dest = get_temp(ctx);
3353 tcg_gen_sub_reg(dest, in1, in2);
3357 sv = do_sub_sv(ctx, dest, in1, in2);
3360 cond = do_sub_cond(cf, dest, in1, in2, sv);
3361 do_cbranch(ctx, disp, n, &cond);
3365 static bool trans_addb(DisasContext *ctx, uint32_t insn,
3366 bool is_true, bool is_imm)
3368 target_sreg disp = assemble_12(insn) * 4;
3369 unsigned n = extract32(insn, 1, 1);
3370 unsigned c = extract32(insn, 13, 3);
3371 unsigned r = extract32(insn, 21, 5);
3372 unsigned cf = c * 2 + !is_true;
3373 TCGv_reg dest, in1, in2, sv, cb_msb;
3379 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3381 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3383 in2 = load_gpr(ctx, r);
3384 dest = dest_gpr(ctx, r);
3390 tcg_gen_add_reg(dest, in1, in2);
3393 cb_msb = get_temp(ctx);
3394 tcg_gen_movi_reg(cb_msb, 0);
3395 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3398 tcg_gen_add_reg(dest, in1, in2);
3399 sv = do_add_sv(ctx, dest, in1, in2);
3403 cond = do_cond(cf, dest, cb_msb, sv);
3404 do_cbranch(ctx, disp, n, &cond);
3408 static bool trans_bb(DisasContext *ctx, uint32_t insn)
3410 target_sreg disp = assemble_12(insn) * 4;
3411 unsigned n = extract32(insn, 1, 1);
3412 unsigned c = extract32(insn, 15, 1);
3413 unsigned r = extract32(insn, 16, 5);
3414 unsigned p = extract32(insn, 21, 5);
3415 unsigned i = extract32(insn, 26, 1);
3416 TCGv_reg tmp, tcg_r;
3421 tmp = tcg_temp_new();
3422 tcg_r = load_gpr(ctx, r);
3424 tcg_gen_shli_reg(tmp, tcg_r, p);
3426 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3429 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3431 do_cbranch(ctx, disp, n, &cond);
3435 static bool trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3437 target_sreg disp = assemble_12(insn) * 4;
3438 unsigned n = extract32(insn, 1, 1);
3439 unsigned c = extract32(insn, 13, 3);
3440 unsigned t = extract32(insn, 16, 5);
3441 unsigned r = extract32(insn, 21, 5);
3447 dest = dest_gpr(ctx, r);
3449 tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3450 } else if (t == 0) {
3451 tcg_gen_movi_reg(dest, 0);
3453 tcg_gen_mov_reg(dest, cpu_gr[t]);
3456 cond = do_sed_cond(c, dest);
3457 do_cbranch(ctx, disp, n, &cond);
3461 static bool trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3462 const DisasInsn *di)
3464 unsigned rt = extract32(insn, 0, 5);
3465 unsigned c = extract32(insn, 13, 3);
3466 unsigned r1 = extract32(insn, 16, 5);
3467 unsigned r2 = extract32(insn, 21, 5);
3474 dest = dest_gpr(ctx, rt);
3476 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3477 tcg_gen_shr_reg(dest, dest, cpu_sar);
3478 } else if (r1 == r2) {
3479 TCGv_i32 t32 = tcg_temp_new_i32();
3480 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3481 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3482 tcg_gen_extu_i32_reg(dest, t32);
3483 tcg_temp_free_i32(t32);
3485 TCGv_i64 t = tcg_temp_new_i64();
3486 TCGv_i64 s = tcg_temp_new_i64();
3488 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3489 tcg_gen_extu_reg_i64(s, cpu_sar);
3490 tcg_gen_shr_i64(t, t, s);
3491 tcg_gen_trunc_i64_reg(dest, t);
3493 tcg_temp_free_i64(t);
3494 tcg_temp_free_i64(s);
3496 save_gpr(ctx, rt, dest);
3498 /* Install the new nullification. */
3499 cond_free(&ctx->null_cond);
3501 ctx->null_cond = do_sed_cond(c, dest);
3503 return nullify_end(ctx);
3506 static bool trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3507 const DisasInsn *di)
3509 unsigned rt = extract32(insn, 0, 5);
3510 unsigned cpos = extract32(insn, 5, 5);
3511 unsigned c = extract32(insn, 13, 3);
3512 unsigned r1 = extract32(insn, 16, 5);
3513 unsigned r2 = extract32(insn, 21, 5);
3514 unsigned sa = 31 - cpos;
3521 dest = dest_gpr(ctx, rt);
3522 t2 = load_gpr(ctx, r2);
3524 TCGv_i32 t32 = tcg_temp_new_i32();
3525 tcg_gen_trunc_reg_i32(t32, t2);
3526 tcg_gen_rotri_i32(t32, t32, sa);
3527 tcg_gen_extu_i32_reg(dest, t32);
3528 tcg_temp_free_i32(t32);
3529 } else if (r1 == 0) {
3530 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3532 TCGv_reg t0 = tcg_temp_new();
3533 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3534 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3537 save_gpr(ctx, rt, dest);
3539 /* Install the new nullification. */
3540 cond_free(&ctx->null_cond);
3542 ctx->null_cond = do_sed_cond(c, dest);
3544 return nullify_end(ctx);
3547 static bool trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3548 const DisasInsn *di)
3550 unsigned clen = extract32(insn, 0, 5);
3551 unsigned is_se = extract32(insn, 10, 1);
3552 unsigned c = extract32(insn, 13, 3);
3553 unsigned rt = extract32(insn, 16, 5);
3554 unsigned rr = extract32(insn, 21, 5);
3555 unsigned len = 32 - clen;
3556 TCGv_reg dest, src, tmp;
3562 dest = dest_gpr(ctx, rt);
3563 src = load_gpr(ctx, rr);
3564 tmp = tcg_temp_new();
3566 /* Recall that SAR is using big-endian bit numbering. */
3567 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3569 tcg_gen_sar_reg(dest, src, tmp);
3570 tcg_gen_sextract_reg(dest, dest, 0, len);
3572 tcg_gen_shr_reg(dest, src, tmp);
3573 tcg_gen_extract_reg(dest, dest, 0, len);
3576 save_gpr(ctx, rt, dest);
3578 /* Install the new nullification. */
3579 cond_free(&ctx->null_cond);
3581 ctx->null_cond = do_sed_cond(c, dest);
3583 return nullify_end(ctx);
3586 static bool trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3587 const DisasInsn *di)
3589 unsigned clen = extract32(insn, 0, 5);
3590 unsigned pos = extract32(insn, 5, 5);
3591 unsigned is_se = extract32(insn, 10, 1);
3592 unsigned c = extract32(insn, 13, 3);
3593 unsigned rt = extract32(insn, 16, 5);
3594 unsigned rr = extract32(insn, 21, 5);
3595 unsigned len = 32 - clen;
3596 unsigned cpos = 31 - pos;
3603 dest = dest_gpr(ctx, rt);
3604 src = load_gpr(ctx, rr);
3606 tcg_gen_sextract_reg(dest, src, cpos, len);
3608 tcg_gen_extract_reg(dest, src, cpos, len);
3610 save_gpr(ctx, rt, dest);
3612 /* Install the new nullification. */
3613 cond_free(&ctx->null_cond);
3615 ctx->null_cond = do_sed_cond(c, dest);
3617 return nullify_end(ctx);
3620 static const DisasInsn table_sh_ex[] = {
3621 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3622 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3623 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3624 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3627 static bool trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3628 const DisasInsn *di)
3630 unsigned clen = extract32(insn, 0, 5);
3631 unsigned cpos = extract32(insn, 5, 5);
3632 unsigned nz = extract32(insn, 10, 1);
3633 unsigned c = extract32(insn, 13, 3);
3634 target_sreg val = low_sextract(insn, 16, 5);
3635 unsigned rt = extract32(insn, 21, 5);
3636 unsigned len = 32 - clen;
3637 target_sreg mask0, mask1;
3643 if (cpos + len > 32) {
3647 dest = dest_gpr(ctx, rt);
3648 mask0 = deposit64(0, cpos, len, val);
3649 mask1 = deposit64(-1, cpos, len, val);
3652 TCGv_reg src = load_gpr(ctx, rt);
3654 tcg_gen_andi_reg(dest, src, mask1);
3657 tcg_gen_ori_reg(dest, src, mask0);
3659 tcg_gen_movi_reg(dest, mask0);
3661 save_gpr(ctx, rt, dest);
3663 /* Install the new nullification. */
3664 cond_free(&ctx->null_cond);
3666 ctx->null_cond = do_sed_cond(c, dest);
3668 return nullify_end(ctx);
3671 static bool trans_depw_imm(DisasContext *ctx, uint32_t insn,
3672 const DisasInsn *di)
3674 unsigned clen = extract32(insn, 0, 5);
3675 unsigned cpos = extract32(insn, 5, 5);
3676 unsigned nz = extract32(insn, 10, 1);
3677 unsigned c = extract32(insn, 13, 3);
3678 unsigned rr = extract32(insn, 16, 5);
3679 unsigned rt = extract32(insn, 21, 5);
3680 unsigned rs = nz ? rt : 0;
3681 unsigned len = 32 - clen;
3687 if (cpos + len > 32) {
3691 dest = dest_gpr(ctx, rt);
3692 val = load_gpr(ctx, rr);
3694 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3696 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3698 save_gpr(ctx, rt, dest);
3700 /* Install the new nullification. */
3701 cond_free(&ctx->null_cond);
3703 ctx->null_cond = do_sed_cond(c, dest);
3705 return nullify_end(ctx);
3708 static bool trans_depw_sar(DisasContext *ctx, uint32_t insn,
3709 const DisasInsn *di)
3711 unsigned clen = extract32(insn, 0, 5);
3712 unsigned nz = extract32(insn, 10, 1);
3713 unsigned i = extract32(insn, 12, 1);
3714 unsigned c = extract32(insn, 13, 3);
3715 unsigned rt = extract32(insn, 21, 5);
3716 unsigned rs = nz ? rt : 0;
3717 unsigned len = 32 - clen;
3718 TCGv_reg val, mask, tmp, shift, dest;
3719 unsigned msb = 1U << (len - 1);
3726 val = load_const(ctx, low_sextract(insn, 16, 5));
3728 val = load_gpr(ctx, extract32(insn, 16, 5));
3730 dest = dest_gpr(ctx, rt);
3731 shift = tcg_temp_new();
3732 tmp = tcg_temp_new();
3734 /* Convert big-endian bit numbering in SAR to left-shift. */
3735 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3737 mask = tcg_const_reg(msb + (msb - 1));
3738 tcg_gen_and_reg(tmp, val, mask);
3740 tcg_gen_shl_reg(mask, mask, shift);
3741 tcg_gen_shl_reg(tmp, tmp, shift);
3742 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3743 tcg_gen_or_reg(dest, dest, tmp);
3745 tcg_gen_shl_reg(dest, tmp, shift);
3747 tcg_temp_free(shift);
3748 tcg_temp_free(mask);
3750 save_gpr(ctx, rt, dest);
3752 /* Install the new nullification. */
3753 cond_free(&ctx->null_cond);
3755 ctx->null_cond = do_sed_cond(c, dest);
3757 return nullify_end(ctx);
3760 static const DisasInsn table_depw[] = {
3761 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3762 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3763 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3766 static bool trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3768 unsigned n = extract32(insn, 1, 1);
3769 unsigned b = extract32(insn, 21, 5);
3770 target_sreg disp = assemble_17(insn);
3773 #ifdef CONFIG_USER_ONLY
3774 /* ??? It seems like there should be a good way of using
3775 "be disp(sr2, r0)", the canonical gateway entry mechanism
3776 to our advantage. But that appears to be inconvenient to
3777 manage along side branch delay slots. Therefore we handle
3778 entry into the gateway page via absolute address. */
3779 /* Since we don't implement spaces, just branch. Do notice the special
3780 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3781 goto_tb to the TB containing the syscall. */
3783 do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3787 int sp = assemble_sr3(insn);
3791 tmp = get_temp(ctx);
3792 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3793 tmp = do_ibranch_priv(ctx, tmp);
3795 #ifdef CONFIG_USER_ONLY
3796 do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3798 TCGv_i64 new_spc = tcg_temp_new_i64();
3800 load_spr(ctx, new_spc, sp);
3802 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3803 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3805 if (n && use_nullify_skip(ctx)) {
3806 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3807 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3808 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3809 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3811 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3812 if (ctx->iaoq_b == -1) {
3813 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3815 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3816 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3817 nullify_set(ctx, n);
3819 tcg_temp_free_i64(new_spc);
3820 tcg_gen_lookup_and_goto_ptr();
3821 ctx->base.is_jmp = DISAS_NORETURN;
3822 return nullify_end(ctx);
3827 static bool trans_bl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3829 unsigned n = extract32(insn, 1, 1);
3830 unsigned link = extract32(insn, 21, 5);
3831 target_sreg disp = assemble_17(insn);
3833 do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3837 static bool trans_b_gate(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3839 unsigned n = extract32(insn, 1, 1);
3840 unsigned link = extract32(insn, 21, 5);
3841 target_sreg disp = assemble_17(insn);
3842 target_ureg dest = iaoq_dest(ctx, disp);
3844 /* Make sure the caller hasn't done something weird with the queue.
3845 * ??? This is not quite the same as the PSW[B] bit, which would be
3846 * expensive to track. Real hardware will trap for
3848 * b gateway+4 (in delay slot of first branch)
3849 * However, checking for a non-sequential instruction queue *will*
3850 * diagnose the security hole
3853 * in which instructions at evil would run with increased privs.
3855 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3856 return gen_illegal(ctx);
3859 #ifndef CONFIG_USER_ONLY
3860 if (ctx->tb_flags & PSW_C) {
3861 CPUHPPAState *env = ctx->cs->env_ptr;
3862 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3863 /* If we could not find a TLB entry, then we need to generate an
3864 ITLB miss exception so the kernel will provide it.
3865 The resulting TLB fill operation will invalidate this TB and
3866 we will re-translate, at which point we *will* be able to find
3867 the TLB entry and determine if this is in fact a gateway page. */
3869 gen_excp(ctx, EXCP_ITLB_MISS);
3872 /* No change for non-gateway pages or for priv decrease. */
3873 if (type >= 4 && type - 4 < ctx->privilege) {
3874 dest = deposit32(dest, 0, 2, type - 4);
3877 dest &= -4; /* priv = 0 */
3881 do_dbranch(ctx, dest, link, n);
3885 static bool trans_bl_long(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3887 unsigned n = extract32(insn, 1, 1);
3888 target_sreg disp = assemble_22(insn);
3890 do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3894 static bool trans_blr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3896 unsigned n = extract32(insn, 1, 1);
3897 unsigned rx = extract32(insn, 16, 5);
3898 unsigned link = extract32(insn, 21, 5);
3899 TCGv_reg tmp = get_temp(ctx);
3901 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3902 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3903 /* The computation here never changes privilege level. */
3904 do_ibranch(ctx, tmp, link, n);
3908 static bool trans_bv(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3910 unsigned n = extract32(insn, 1, 1);
3911 unsigned rx = extract32(insn, 16, 5);
3912 unsigned rb = extract32(insn, 21, 5);
3916 dest = load_gpr(ctx, rb);
3918 dest = get_temp(ctx);
3919 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3920 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3922 dest = do_ibranch_priv(ctx, dest);
3923 do_ibranch(ctx, dest, 0, n);
3927 static bool trans_bve(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3929 unsigned n = extract32(insn, 1, 1);
3930 unsigned rb = extract32(insn, 21, 5);
3931 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3934 #ifdef CONFIG_USER_ONLY
3935 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3936 do_ibranch(ctx, dest, link, n);
3939 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3941 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3942 if (ctx->iaoq_b == -1) {
3943 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3945 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3946 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3948 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3950 nullify_set(ctx, n);
3951 tcg_gen_lookup_and_goto_ptr();
3952 ctx->base.is_jmp = DISAS_NORETURN;
3953 return nullify_end(ctx);
3958 static const DisasInsn table_branch[] = {
3959 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3960 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3961 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3962 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3963 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3964 { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3967 static bool trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3968 const DisasInsn *di)
3970 unsigned rt = extract32(insn, 0, 5);
3971 unsigned ra = extract32(insn, 21, 5);
3972 do_fop_wew(ctx, rt, ra, di->f.wew);
3976 static bool trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3977 const DisasInsn *di)
3979 unsigned rt = assemble_rt64(insn);
3980 unsigned ra = assemble_ra64(insn);
3981 do_fop_wew(ctx, rt, ra, di->f.wew);
3985 static bool trans_fop_ded(DisasContext *ctx, uint32_t insn,
3986 const DisasInsn *di)
3988 unsigned rt = extract32(insn, 0, 5);
3989 unsigned ra = extract32(insn, 21, 5);
3990 do_fop_ded(ctx, rt, ra, di->f.ded);
3994 static bool trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3995 const DisasInsn *di)
3997 unsigned rt = extract32(insn, 0, 5);
3998 unsigned ra = extract32(insn, 21, 5);
3999 do_fop_wed(ctx, rt, ra, di->f.wed);
4003 static bool trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
4004 const DisasInsn *di)
4006 unsigned rt = assemble_rt64(insn);
4007 unsigned ra = extract32(insn, 21, 5);
4008 do_fop_wed(ctx, rt, ra, di->f.wed);
4012 static bool trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4013 const DisasInsn *di)
4015 unsigned rt = extract32(insn, 0, 5);
4016 unsigned ra = extract32(insn, 21, 5);
4017 do_fop_dew(ctx, rt, ra, di->f.dew);
4021 static bool trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4022 const DisasInsn *di)
4024 unsigned rt = extract32(insn, 0, 5);
4025 unsigned ra = assemble_ra64(insn);
4026 do_fop_dew(ctx, rt, ra, di->f.dew);
4030 static bool trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4031 const DisasInsn *di)
4033 unsigned rt = extract32(insn, 0, 5);
4034 unsigned rb = extract32(insn, 16, 5);
4035 unsigned ra = extract32(insn, 21, 5);
4036 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4040 static bool trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4041 const DisasInsn *di)
4043 unsigned rt = assemble_rt64(insn);
4044 unsigned rb = assemble_rb64(insn);
4045 unsigned ra = assemble_ra64(insn);
4046 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4050 static bool trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4051 const DisasInsn *di)
4053 unsigned rt = extract32(insn, 0, 5);
4054 unsigned rb = extract32(insn, 16, 5);
4055 unsigned ra = extract32(insn, 21, 5);
4056 do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4060 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4062 tcg_gen_mov_i32(dst, src);
4065 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4067 tcg_gen_mov_i64(dst, src);
4070 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4072 tcg_gen_andi_i32(dst, src, INT32_MAX);
4075 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4077 tcg_gen_andi_i64(dst, src, INT64_MAX);
4080 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4082 tcg_gen_xori_i32(dst, src, INT32_MIN);
4085 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4087 tcg_gen_xori_i64(dst, src, INT64_MIN);
4090 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4092 tcg_gen_ori_i32(dst, src, INT32_MIN);
4095 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4097 tcg_gen_ori_i64(dst, src, INT64_MIN);
4100 static void do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4101 unsigned y, unsigned c)
4103 TCGv_i32 ta, tb, tc, ty;
4107 ta = load_frw0_i32(ra);
4108 tb = load_frw0_i32(rb);
4109 ty = tcg_const_i32(y);
4110 tc = tcg_const_i32(c);
4112 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4114 tcg_temp_free_i32(ta);
4115 tcg_temp_free_i32(tb);
4116 tcg_temp_free_i32(ty);
4117 tcg_temp_free_i32(tc);
4122 static bool trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4123 const DisasInsn *di)
4125 unsigned c = extract32(insn, 0, 5);
4126 unsigned y = extract32(insn, 13, 3);
4127 unsigned rb = extract32(insn, 16, 5);
4128 unsigned ra = extract32(insn, 21, 5);
4129 do_fcmp_s(ctx, ra, rb, y, c);
4133 static bool trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4134 const DisasInsn *di)
4136 unsigned c = extract32(insn, 0, 5);
4137 unsigned y = extract32(insn, 13, 3);
4138 unsigned rb = assemble_rb64(insn);
4139 unsigned ra = assemble_ra64(insn);
4140 do_fcmp_s(ctx, ra, rb, y, c);
4144 static bool trans_fcmp_d(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4146 unsigned c = extract32(insn, 0, 5);
4147 unsigned y = extract32(insn, 13, 3);
4148 unsigned rb = extract32(insn, 16, 5);
4149 unsigned ra = extract32(insn, 21, 5);
4157 ty = tcg_const_i32(y);
4158 tc = tcg_const_i32(c);
4160 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4162 tcg_temp_free_i64(ta);
4163 tcg_temp_free_i64(tb);
4164 tcg_temp_free_i32(ty);
4165 tcg_temp_free_i32(tc);
4167 return nullify_end(ctx);
4170 static bool trans_ftest_t(DisasContext *ctx, uint32_t insn,
4171 const DisasInsn *di)
4173 unsigned y = extract32(insn, 13, 3);
4174 unsigned cbit = (y ^ 1) - 1;
4180 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4181 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4182 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4185 return nullify_end(ctx);
4188 static bool trans_ftest_q(DisasContext *ctx, uint32_t insn,
4189 const DisasInsn *di)
4191 unsigned c = extract32(insn, 0, 5);
4199 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4202 case 0: /* simple */
4203 tcg_gen_andi_reg(t, t, 0x4000000);
4204 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4228 return gen_illegal(ctx);
4231 TCGv_reg c = load_const(ctx, mask);
4232 tcg_gen_or_reg(t, t, c);
4233 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4235 tcg_gen_andi_reg(t, t, mask);
4236 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4239 return nullify_end(ctx);
4242 static bool trans_xmpyu(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4244 unsigned rt = extract32(insn, 0, 5);
4245 unsigned rb = assemble_rb64(insn);
4246 unsigned ra = assemble_ra64(insn);
4251 a = load_frw0_i64(ra);
4252 b = load_frw0_i64(rb);
4253 tcg_gen_mul_i64(a, a, b);
4255 tcg_temp_free_i64(a);
4256 tcg_temp_free_i64(b);
4258 return nullify_end(ctx);
4261 #define FOP_DED trans_fop_ded, .f.ded
4262 #define FOP_DEDD trans_fop_dedd, .f.dedd
4264 #define FOP_WEW trans_fop_wew_0c, .f.wew
4265 #define FOP_DEW trans_fop_dew_0c, .f.dew
4266 #define FOP_WED trans_fop_wed_0c, .f.wed
4267 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4269 static const DisasInsn table_float_0c[] = {
4270 /* floating point class zero */
4271 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4272 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4273 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4274 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4275 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4276 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4278 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4279 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4280 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4281 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4282 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4283 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4285 /* floating point class three */
4286 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4287 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4288 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4289 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4291 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4292 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4293 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4294 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4296 /* floating point class one */
4298 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4299 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4301 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4302 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4303 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4304 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4306 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4307 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4308 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4309 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4310 /* float/int truncate */
4311 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4312 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4313 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4314 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4316 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4317 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4318 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4319 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4321 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4322 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4323 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4324 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4325 /* float/uint truncate */
4326 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4327 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4328 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4329 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4331 /* floating point class two */
4332 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4333 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4334 { 0x30002420, 0xffffffe0, trans_ftest_q },
4335 { 0x30000420, 0xffff1fff, trans_ftest_t },
4337 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4338 This is machine/revision == 0, which is reserved for simulator. */
4339 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4346 #define FOP_WEW trans_fop_wew_0e, .f.wew
4347 #define FOP_DEW trans_fop_dew_0e, .f.dew
4348 #define FOP_WED trans_fop_wed_0e, .f.wed
4349 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4351 static const DisasInsn table_float_0e[] = {
4352 /* floating point class zero */
4353 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4354 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4355 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4356 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4357 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4358 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4360 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4361 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4362 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4363 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4364 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4365 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4367 /* floating point class three */
4368 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4369 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4370 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4371 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4373 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4374 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4375 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4376 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4378 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4380 /* floating point class one */
4382 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4383 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4385 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4386 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4387 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4388 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4390 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4391 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4392 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4393 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4394 /* float/int truncate */
4395 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4396 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4397 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4398 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4400 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4401 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4402 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4403 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4405 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4406 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4407 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4408 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4409 /* float/uint truncate */
4410 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4411 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4412 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4413 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4415 /* floating point class two */
4416 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4417 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4427 /* Convert the fmpyadd single-precision register encodings to standard. */
4428 static inline int fmpyadd_s_reg(unsigned r)
4430 return (r & 16) * 2 + 16 + (r & 15);
4433 static bool trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
4435 unsigned tm = extract32(insn, 0, 5);
4436 unsigned f = extract32(insn, 5, 1);
4437 unsigned ra = extract32(insn, 6, 5);
4438 unsigned ta = extract32(insn, 11, 5);
4439 unsigned rm2 = extract32(insn, 16, 5);
4440 unsigned rm1 = extract32(insn, 21, 5);
4444 /* Independent multiply & add/sub, with undefined behaviour
4445 if outputs overlap inputs. */
4447 tm = fmpyadd_s_reg(tm);
4448 ra = fmpyadd_s_reg(ra);
4449 ta = fmpyadd_s_reg(ta);
4450 rm2 = fmpyadd_s_reg(rm2);
4451 rm1 = fmpyadd_s_reg(rm1);
4452 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4453 do_fop_weww(ctx, ta, ta, ra,
4454 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4456 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4457 do_fop_dedd(ctx, ta, ta, ra,
4458 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4461 return nullify_end(ctx);
4464 static bool trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4465 const DisasInsn *di)
4467 unsigned rt = assemble_rt64(insn);
4468 unsigned neg = extract32(insn, 5, 1);
4469 unsigned rm1 = assemble_ra64(insn);
4470 unsigned rm2 = assemble_rb64(insn);
4471 unsigned ra3 = assemble_rc64(insn);
4475 a = load_frw0_i32(rm1);
4476 b = load_frw0_i32(rm2);
4477 c = load_frw0_i32(ra3);
4480 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4482 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4485 tcg_temp_free_i32(b);
4486 tcg_temp_free_i32(c);
4487 save_frw_i32(rt, a);
4488 tcg_temp_free_i32(a);
4489 return nullify_end(ctx);
4492 static bool trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4493 const DisasInsn *di)
4495 unsigned rt = extract32(insn, 0, 5);
4496 unsigned neg = extract32(insn, 5, 1);
4497 unsigned rm1 = extract32(insn, 21, 5);
4498 unsigned rm2 = extract32(insn, 16, 5);
4499 unsigned ra3 = assemble_rc64(insn);
4508 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4510 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4513 tcg_temp_free_i64(b);
4514 tcg_temp_free_i64(c);
4516 tcg_temp_free_i64(a);
4517 return nullify_end(ctx);
4520 static const DisasInsn table_fp_fused[] = {
4521 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4522 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4525 static void translate_table_int(DisasContext *ctx, uint32_t insn,
4526 const DisasInsn table[], size_t n)
4529 for (i = 0; i < n; ++i) {
4530 if ((insn & table[i].mask) == table[i].insn) {
4531 table[i].trans(ctx, insn, &table[i]);
4535 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4536 insn, ctx->base.pc_next);
4540 #define translate_table(ctx, insn, table) \
4541 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4543 static void translate_one(DisasContext *ctx, uint32_t insn)
4547 /* Transition to the auto-generated decoder. */
4548 if (decode(ctx, insn)) {
4552 opc = extract32(insn, 26, 6);
4554 case 0x00: /* system op */
4555 translate_table(ctx, insn, table_system);
4558 translate_table(ctx, insn, table_mem_mgmt);
4561 translate_table(ctx, insn, table_arith_log);
4564 translate_table(ctx, insn, table_index_mem);
4567 trans_fmpyadd(ctx, insn, false);
4570 trans_ldil(ctx, insn);
4573 trans_copr_w(ctx, insn);
4576 trans_addil(ctx, insn);
4579 trans_copr_dw(ctx, insn);
4582 translate_table(ctx, insn, table_float_0c);
4585 trans_ldo(ctx, insn);
4588 translate_table(ctx, insn, table_float_0e);
4592 trans_load(ctx, insn, false, MO_UB);
4595 trans_load(ctx, insn, false, MO_TEUW);
4598 trans_load(ctx, insn, false, MO_TEUL);
4601 trans_load(ctx, insn, true, MO_TEUL);
4604 trans_fload_mod(ctx, insn);
4607 trans_load_w(ctx, insn);
4610 trans_store(ctx, insn, false, MO_UB);
4613 trans_store(ctx, insn, false, MO_TEUW);
4616 trans_store(ctx, insn, false, MO_TEUL);
4619 trans_store(ctx, insn, true, MO_TEUL);
4622 trans_fstore_mod(ctx, insn);
4625 trans_store_w(ctx, insn);
4629 trans_cmpb(ctx, insn, true, false, false);
4632 trans_cmpb(ctx, insn, true, true, false);
4635 trans_cmpb(ctx, insn, false, false, false);
4638 trans_cmpb(ctx, insn, false, true, false);
4641 trans_cmpiclr(ctx, insn);
4644 trans_subi(ctx, insn);
4647 trans_fmpyadd(ctx, insn, true);
4650 trans_cmpb(ctx, insn, true, false, true);
4653 trans_addb(ctx, insn, true, false);
4656 trans_addb(ctx, insn, true, true);
4659 trans_addb(ctx, insn, false, false);
4662 trans_addb(ctx, insn, false, true);
4666 trans_addi(ctx, insn);
4669 translate_table(ctx, insn, table_fp_fused);
4672 trans_cmpb(ctx, insn, false, false, true);
4677 trans_bb(ctx, insn);
4680 trans_movb(ctx, insn, false);
4683 trans_movb(ctx, insn, true);
4686 translate_table(ctx, insn, table_sh_ex);
4689 translate_table(ctx, insn, table_depw);
4692 trans_be(ctx, insn, false);
4695 trans_be(ctx, insn, true);
4698 translate_table(ctx, insn, table_branch);
4701 case 0x04: /* spopn */
4702 case 0x05: /* diag */
4703 case 0x0F: /* product specific */
4706 case 0x07: /* unassigned */
4707 case 0x15: /* unassigned */
4708 case 0x1D: /* unassigned */
4709 case 0x37: /* unassigned */
4712 #ifndef CONFIG_USER_ONLY
4713 /* Unassigned, but use as system-halt. */
4714 if (insn == 0xfffdead0) {
4715 gen_hlt(ctx, 0); /* halt system */
4718 if (insn == 0xfffdead1) {
4719 gen_hlt(ctx, 1); /* reset system */
4730 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4732 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4736 ctx->tb_flags = ctx->base.tb->flags;
4738 #ifdef CONFIG_USER_ONLY
4739 ctx->privilege = MMU_USER_IDX;
4740 ctx->mmu_idx = MMU_USER_IDX;
4741 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4742 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4744 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4745 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4747 /* Recover the IAOQ values from the GVA + PRIV. */
4748 uint64_t cs_base = ctx->base.tb->cs_base;
4749 uint64_t iasq_f = cs_base & ~0xffffffffull;
4750 int32_t diff = cs_base;
4752 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4753 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4756 ctx->iaoq_n_var = NULL;
4758 /* Bound the number of instructions by those left on the page. */
4759 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4760 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4764 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4765 memset(ctx->templ, 0, sizeof(ctx->templ));
4768 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4770 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4772 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4773 ctx->null_cond = cond_make_f();
4774 ctx->psw_n_nonzero = false;
4775 if (ctx->tb_flags & PSW_N) {
4776 ctx->null_cond.c = TCG_COND_ALWAYS;
4777 ctx->psw_n_nonzero = true;
4779 ctx->null_lab = NULL;
4782 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4784 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4786 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4789 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4790 const CPUBreakpoint *bp)
4792 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4794 gen_excp(ctx, EXCP_DEBUG);
4795 ctx->base.pc_next += 4;
4799 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4801 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4802 CPUHPPAState *env = cs->env_ptr;
4806 /* Execute one insn. */
4807 #ifdef CONFIG_USER_ONLY
4808 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4810 ret = ctx->base.is_jmp;
4811 assert(ret != DISAS_NEXT);
4815 /* Always fetch the insn, even if nullified, so that we check
4816 the page permissions for execute. */
4817 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4819 /* Set up the IA queue for the next insn.
4820 This will be overwritten by a branch. */
4821 if (ctx->iaoq_b == -1) {
4823 ctx->iaoq_n_var = get_temp(ctx);
4824 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4826 ctx->iaoq_n = ctx->iaoq_b + 4;
4827 ctx->iaoq_n_var = NULL;
4830 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4831 ctx->null_cond.c = TCG_COND_NEVER;
4835 translate_one(ctx, insn);
4836 ret = ctx->base.is_jmp;
4837 assert(ctx->null_lab == NULL);
4841 /* Free any temporaries allocated. */
4842 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4843 tcg_temp_free(ctx->tempr[i]);
4844 ctx->tempr[i] = NULL;
4846 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4847 tcg_temp_free_tl(ctx->templ[i]);
4848 ctx->templ[i] = NULL;
4853 /* Advance the insn queue. Note that this check also detects
4854 a priority change within the instruction queue. */
4855 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4856 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4857 && use_goto_tb(ctx, ctx->iaoq_b)
4858 && (ctx->null_cond.c == TCG_COND_NEVER
4859 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4860 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4861 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4862 ctx->base.is_jmp = ret = DISAS_NORETURN;
4864 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4867 ctx->iaoq_f = ctx->iaoq_b;
4868 ctx->iaoq_b = ctx->iaoq_n;
4869 ctx->base.pc_next += 4;
4871 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4874 if (ctx->iaoq_f == -1) {
4875 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4876 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4877 #ifndef CONFIG_USER_ONLY
4878 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4881 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4882 } else if (ctx->iaoq_b == -1) {
4883 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4887 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4889 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4890 DisasJumpType is_jmp = ctx->base.is_jmp;
4893 case DISAS_NORETURN:
4895 case DISAS_TOO_MANY:
4896 case DISAS_IAQ_N_STALE:
4897 case DISAS_IAQ_N_STALE_EXIT:
4898 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4899 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4902 case DISAS_IAQ_N_UPDATED:
4903 if (ctx->base.singlestep_enabled) {
4904 gen_excp_1(EXCP_DEBUG);
4905 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4906 tcg_gen_exit_tb(NULL, 0);
4908 tcg_gen_lookup_and_goto_ptr();
4912 g_assert_not_reached();
4916 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4918 target_ulong pc = dcbase->pc_first;
4920 #ifdef CONFIG_USER_ONLY
4923 qemu_log("IN:\n0x00000000: (null)\n");
4926 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4929 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4932 qemu_log("IN:\n0x00000100: syscall\n");
4937 qemu_log("IN: %s\n", lookup_symbol(pc));
4938 log_target_disas(cs, pc, dcbase->tb->size);
4941 static const TranslatorOps hppa_tr_ops = {
4942 .init_disas_context = hppa_tr_init_disas_context,
4943 .tb_start = hppa_tr_tb_start,
4944 .insn_start = hppa_tr_insn_start,
4945 .breakpoint_check = hppa_tr_breakpoint_check,
4946 .translate_insn = hppa_tr_translate_insn,
4947 .tb_stop = hppa_tr_tb_stop,
4948 .disas_log = hppa_tr_disas_log,
4951 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4955 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4958 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4961 env->iaoq_f = data[0];
4962 if (data[1] != (target_ureg)-1) {
4963 env->iaoq_b = data[1];
4965 /* Since we were executing the instruction at IAOQ_F, and took some
4966 sort of action that provoked the cpu_restore_state, we can infer
4967 that the instruction was not nullified. */