2 * HPPA emulation cpu translation for qemu.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond {
258 typedef struct DisasContext {
259 DisasContextBase base;
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282 static int expand_sm_imm(int val)
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
293 /* Include the auto-generated decoder. */
294 #include "decode.inc.c"
296 /* We are not using a goto_tb (for whatever reason), but have updated
297 the iaq (for whatever reason), so don't do it again on exit. */
298 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
300 /* We are exiting the TB, but have neither emitted a goto_tb, nor
301 updated the iaq for the next instruction to be executed. */
302 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
304 /* Similarly, but we want to return to the main loop immediately
305 to recognize unmasked interrupts. */
306 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
308 typedef struct DisasInsn {
310 bool (*trans)(DisasContext *ctx, uint32_t insn,
311 const struct DisasInsn *f);
313 void (*ttt)(TCGv_reg, TCGv_reg, TCGv_reg);
314 void (*weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
315 void (*dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
316 void (*wew)(TCGv_i32, TCGv_env, TCGv_i32);
317 void (*ded)(TCGv_i64, TCGv_env, TCGv_i64);
318 void (*wed)(TCGv_i32, TCGv_env, TCGv_i64);
319 void (*dew)(TCGv_i64, TCGv_env, TCGv_i32);
323 /* global register indexes */
324 static TCGv_reg cpu_gr[32];
325 static TCGv_i64 cpu_sr[4];
326 static TCGv_i64 cpu_srH;
327 static TCGv_reg cpu_iaoq_f;
328 static TCGv_reg cpu_iaoq_b;
329 static TCGv_i64 cpu_iasq_f;
330 static TCGv_i64 cpu_iasq_b;
331 static TCGv_reg cpu_sar;
332 static TCGv_reg cpu_psw_n;
333 static TCGv_reg cpu_psw_v;
334 static TCGv_reg cpu_psw_cb;
335 static TCGv_reg cpu_psw_cb_msb;
337 #include "exec/gen-icount.h"
339 void hppa_translate_init(void)
341 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
343 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
344 static const GlobalVar vars[] = {
345 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
356 /* Use the symbolic register names that match the disassembler. */
357 static const char gr_names[32][4] = {
358 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
359 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
360 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
361 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
363 /* SR[4-7] are not global registers so that we can index them. */
364 static const char sr_names[5][4] = {
365 "sr0", "sr1", "sr2", "sr3", "srH"
371 for (i = 1; i < 32; i++) {
372 cpu_gr[i] = tcg_global_mem_new(cpu_env,
373 offsetof(CPUHPPAState, gr[i]),
376 for (i = 0; i < 4; i++) {
377 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
378 offsetof(CPUHPPAState, sr[i]),
381 cpu_srH = tcg_global_mem_new_i64(cpu_env,
382 offsetof(CPUHPPAState, sr[4]),
385 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
386 const GlobalVar *v = &vars[i];
387 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
390 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
391 offsetof(CPUHPPAState, iasq_f),
393 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
394 offsetof(CPUHPPAState, iasq_b),
398 static DisasCond cond_make_f(void)
407 static DisasCond cond_make_n(void)
418 static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
420 DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
422 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
423 r.a0 = tcg_temp_new();
424 tcg_gen_mov_reg(r.a0, a0);
429 static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
431 DisasCond r = { .c = c };
433 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
434 r.a0 = tcg_temp_new();
435 tcg_gen_mov_reg(r.a0, a0);
436 r.a1 = tcg_temp_new();
437 tcg_gen_mov_reg(r.a1, a1);
442 static void cond_prep(DisasCond *cond)
445 cond->a1_is_0 = false;
446 cond->a1 = tcg_const_reg(0);
450 static void cond_free(DisasCond *cond)
454 if (!cond->a0_is_n) {
455 tcg_temp_free(cond->a0);
457 if (!cond->a1_is_0) {
458 tcg_temp_free(cond->a1);
460 cond->a0_is_n = false;
461 cond->a1_is_0 = false;
465 case TCG_COND_ALWAYS:
466 cond->c = TCG_COND_NEVER;
473 static TCGv_reg get_temp(DisasContext *ctx)
475 unsigned i = ctx->ntempr++;
476 g_assert(i < ARRAY_SIZE(ctx->tempr));
477 return ctx->tempr[i] = tcg_temp_new();
480 #ifndef CONFIG_USER_ONLY
481 static TCGv_tl get_temp_tl(DisasContext *ctx)
483 unsigned i = ctx->ntempl++;
484 g_assert(i < ARRAY_SIZE(ctx->templ));
485 return ctx->templ[i] = tcg_temp_new_tl();
489 static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
491 TCGv_reg t = get_temp(ctx);
492 tcg_gen_movi_reg(t, v);
496 static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
499 TCGv_reg t = get_temp(ctx);
500 tcg_gen_movi_reg(t, 0);
507 static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
509 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
510 return get_temp(ctx);
516 static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
518 if (ctx->null_cond.c != TCG_COND_NEVER) {
519 cond_prep(&ctx->null_cond);
520 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
521 ctx->null_cond.a1, dest, t);
523 tcg_gen_mov_reg(dest, t);
527 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
530 save_or_nullify(ctx, cpu_gr[reg], t);
534 #ifdef HOST_WORDS_BIGENDIAN
542 static TCGv_i32 load_frw_i32(unsigned rt)
544 TCGv_i32 ret = tcg_temp_new_i32();
545 tcg_gen_ld_i32(ret, cpu_env,
546 offsetof(CPUHPPAState, fr[rt & 31])
547 + (rt & 32 ? LO_OFS : HI_OFS));
551 static TCGv_i32 load_frw0_i32(unsigned rt)
554 return tcg_const_i32(0);
556 return load_frw_i32(rt);
560 static TCGv_i64 load_frw0_i64(unsigned rt)
563 return tcg_const_i64(0);
565 TCGv_i64 ret = tcg_temp_new_i64();
566 tcg_gen_ld32u_i64(ret, cpu_env,
567 offsetof(CPUHPPAState, fr[rt & 31])
568 + (rt & 32 ? LO_OFS : HI_OFS));
573 static void save_frw_i32(unsigned rt, TCGv_i32 val)
575 tcg_gen_st_i32(val, cpu_env,
576 offsetof(CPUHPPAState, fr[rt & 31])
577 + (rt & 32 ? LO_OFS : HI_OFS));
583 static TCGv_i64 load_frd(unsigned rt)
585 TCGv_i64 ret = tcg_temp_new_i64();
586 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
590 static TCGv_i64 load_frd0(unsigned rt)
593 return tcg_const_i64(0);
599 static void save_frd(unsigned rt, TCGv_i64 val)
601 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
604 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
606 #ifdef CONFIG_USER_ONLY
607 tcg_gen_movi_i64(dest, 0);
610 tcg_gen_mov_i64(dest, cpu_sr[reg]);
611 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
612 tcg_gen_mov_i64(dest, cpu_srH);
614 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
619 /* Skip over the implementation of an insn that has been nullified.
620 Use this when the insn is too complex for a conditional move. */
621 static void nullify_over(DisasContext *ctx)
623 if (ctx->null_cond.c != TCG_COND_NEVER) {
624 /* The always condition should have been handled in the main loop. */
625 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
627 ctx->null_lab = gen_new_label();
628 cond_prep(&ctx->null_cond);
630 /* If we're using PSW[N], copy it to a temp because... */
631 if (ctx->null_cond.a0_is_n) {
632 ctx->null_cond.a0_is_n = false;
633 ctx->null_cond.a0 = tcg_temp_new();
634 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
636 /* ... we clear it before branching over the implementation,
637 so that (1) it's clear after nullifying this insn and
638 (2) if this insn nullifies the next, PSW[N] is valid. */
639 if (ctx->psw_n_nonzero) {
640 ctx->psw_n_nonzero = false;
641 tcg_gen_movi_reg(cpu_psw_n, 0);
644 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
645 ctx->null_cond.a1, ctx->null_lab);
646 cond_free(&ctx->null_cond);
650 /* Save the current nullification state to PSW[N]. */
651 static void nullify_save(DisasContext *ctx)
653 if (ctx->null_cond.c == TCG_COND_NEVER) {
654 if (ctx->psw_n_nonzero) {
655 tcg_gen_movi_reg(cpu_psw_n, 0);
659 if (!ctx->null_cond.a0_is_n) {
660 cond_prep(&ctx->null_cond);
661 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
662 ctx->null_cond.a0, ctx->null_cond.a1);
663 ctx->psw_n_nonzero = true;
665 cond_free(&ctx->null_cond);
668 /* Set a PSW[N] to X. The intention is that this is used immediately
669 before a goto_tb/exit_tb, so that there is no fallthru path to other
670 code within the TB. Therefore we do not update psw_n_nonzero. */
671 static void nullify_set(DisasContext *ctx, bool x)
673 if (ctx->psw_n_nonzero || x) {
674 tcg_gen_movi_reg(cpu_psw_n, x);
678 /* Mark the end of an instruction that may have been nullified.
679 This is the pair to nullify_over. Always returns true so that
680 it may be tail-called from a translate function. */
681 static bool nullify_end(DisasContext *ctx)
683 TCGLabel *null_lab = ctx->null_lab;
684 DisasJumpType status = ctx->base.is_jmp;
686 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
687 For UPDATED, we cannot update on the nullified path. */
688 assert(status != DISAS_IAQ_N_UPDATED);
690 if (likely(null_lab == NULL)) {
691 /* The current insn wasn't conditional or handled the condition
692 applied to it without a branch, so the (new) setting of
693 NULL_COND can be applied directly to the next insn. */
696 ctx->null_lab = NULL;
698 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
699 /* The next instruction will be unconditional,
700 and NULL_COND already reflects that. */
701 gen_set_label(null_lab);
703 /* The insn that we just executed is itself nullifying the next
704 instruction. Store the condition in the PSW[N] global.
705 We asserted PSW[N] = 0 in nullify_over, so that after the
706 label we have the proper value in place. */
708 gen_set_label(null_lab);
709 ctx->null_cond = cond_make_n();
711 if (status == DISAS_NORETURN) {
712 ctx->base.is_jmp = DISAS_NEXT;
717 static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
719 if (unlikely(ival == -1)) {
720 tcg_gen_mov_reg(dest, vval);
722 tcg_gen_movi_reg(dest, ival);
726 static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
728 return ctx->iaoq_f + disp + 8;
731 static void gen_excp_1(int exception)
733 TCGv_i32 t = tcg_const_i32(exception);
734 gen_helper_excp(cpu_env, t);
735 tcg_temp_free_i32(t);
738 static void gen_excp(DisasContext *ctx, int exception)
740 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
741 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
743 gen_excp_1(exception);
744 ctx->base.is_jmp = DISAS_NORETURN;
747 static bool gen_excp_iir(DisasContext *ctx, int exc)
752 tmp = tcg_const_reg(ctx->insn);
753 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
756 return nullify_end(ctx);
759 static bool gen_illegal(DisasContext *ctx)
761 return gen_excp_iir(ctx, EXCP_ILL);
764 #ifdef CONFIG_USER_ONLY
765 #define CHECK_MOST_PRIVILEGED(EXCP) \
766 return gen_excp_iir(ctx, EXCP)
768 #define CHECK_MOST_PRIVILEGED(EXCP) \
770 if (ctx->privilege != 0) { \
771 return gen_excp_iir(ctx, EXCP); \
776 static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
778 /* Suppress goto_tb in the case of single-steping and IO. */
779 if ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
780 || ctx->base.singlestep_enabled) {
786 /* If the next insn is to be nullified, and it's on the same page,
787 and we're not attempting to set a breakpoint on it, then we can
788 totally skip the nullified insn. This avoids creating and
789 executing a TB that merely branches to the next TB. */
790 static bool use_nullify_skip(DisasContext *ctx)
792 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
793 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
796 static void gen_goto_tb(DisasContext *ctx, int which,
797 target_ureg f, target_ureg b)
799 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
800 tcg_gen_goto_tb(which);
801 tcg_gen_movi_reg(cpu_iaoq_f, f);
802 tcg_gen_movi_reg(cpu_iaoq_b, b);
803 tcg_gen_exit_tb(ctx->base.tb, which);
805 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
806 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
807 if (ctx->base.singlestep_enabled) {
808 gen_excp_1(EXCP_DEBUG);
810 tcg_gen_lookup_and_goto_ptr();
815 /* PA has a habit of taking the LSB of a field and using that as the sign,
816 with the rest of the field becoming the least significant bits. */
817 static target_sreg low_sextract(uint32_t val, int pos, int len)
819 target_ureg x = -(target_ureg)extract32(val, pos, 1);
820 x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
824 static unsigned assemble_rt64(uint32_t insn)
826 unsigned r1 = extract32(insn, 6, 1);
827 unsigned r0 = extract32(insn, 0, 5);
831 static unsigned assemble_ra64(uint32_t insn)
833 unsigned r1 = extract32(insn, 7, 1);
834 unsigned r0 = extract32(insn, 21, 5);
838 static unsigned assemble_rb64(uint32_t insn)
840 unsigned r1 = extract32(insn, 12, 1);
841 unsigned r0 = extract32(insn, 16, 5);
845 static unsigned assemble_rc64(uint32_t insn)
847 unsigned r2 = extract32(insn, 8, 1);
848 unsigned r1 = extract32(insn, 13, 3);
849 unsigned r0 = extract32(insn, 9, 2);
850 return r2 * 32 + r1 * 4 + r0;
853 static inline unsigned assemble_sr3(uint32_t insn)
855 unsigned s2 = extract32(insn, 13, 1);
856 unsigned s0 = extract32(insn, 14, 2);
860 static target_sreg assemble_12(uint32_t insn)
862 target_ureg x = -(target_ureg)(insn & 1);
863 x = (x << 1) | extract32(insn, 2, 1);
864 x = (x << 10) | extract32(insn, 3, 10);
868 static target_sreg assemble_16(uint32_t insn)
870 /* Take the name from PA2.0, which produces a 16-bit number
871 only with wide mode; otherwise a 14-bit number. Since we don't
872 implement wide mode, this is always the 14-bit number. */
873 return low_sextract(insn, 0, 14);
876 static target_sreg assemble_16a(uint32_t insn)
878 /* Take the name from PA2.0, which produces a 14-bit shifted number
879 only with wide mode; otherwise a 12-bit shifted number. Since we
880 don't implement wide mode, this is always the 12-bit number. */
881 target_ureg x = -(target_ureg)(insn & 1);
882 x = (x << 11) | extract32(insn, 2, 11);
886 static target_sreg assemble_17(uint32_t insn)
888 target_ureg x = -(target_ureg)(insn & 1);
889 x = (x << 5) | extract32(insn, 16, 5);
890 x = (x << 1) | extract32(insn, 2, 1);
891 x = (x << 10) | extract32(insn, 3, 10);
895 static target_sreg assemble_21(uint32_t insn)
897 target_ureg x = -(target_ureg)(insn & 1);
898 x = (x << 11) | extract32(insn, 1, 11);
899 x = (x << 2) | extract32(insn, 14, 2);
900 x = (x << 5) | extract32(insn, 16, 5);
901 x = (x << 2) | extract32(insn, 12, 2);
905 static target_sreg assemble_22(uint32_t insn)
907 target_ureg x = -(target_ureg)(insn & 1);
908 x = (x << 10) | extract32(insn, 16, 10);
909 x = (x << 1) | extract32(insn, 2, 1);
910 x = (x << 10) | extract32(insn, 3, 10);
914 /* The parisc documentation describes only the general interpretation of
915 the conditions, without describing their exact implementation. The
916 interpretations do not stand up well when considering ADD,C and SUB,B.
917 However, considering the Addition, Subtraction and Logical conditions
918 as a whole it would appear that these relations are similar to what
919 a traditional NZCV set of flags would produce. */
921 static DisasCond do_cond(unsigned cf, TCGv_reg res,
922 TCGv_reg cb_msb, TCGv_reg sv)
928 case 0: /* Never / TR */
929 cond = cond_make_f();
931 case 1: /* = / <> (Z / !Z) */
932 cond = cond_make_0(TCG_COND_EQ, res);
934 case 2: /* < / >= (N / !N) */
935 cond = cond_make_0(TCG_COND_LT, res);
937 case 3: /* <= / > (N | Z / !N & !Z) */
938 cond = cond_make_0(TCG_COND_LE, res);
940 case 4: /* NUV / UV (!C / C) */
941 cond = cond_make_0(TCG_COND_EQ, cb_msb);
943 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
944 tmp = tcg_temp_new();
945 tcg_gen_neg_reg(tmp, cb_msb);
946 tcg_gen_and_reg(tmp, tmp, res);
947 cond = cond_make_0(TCG_COND_EQ, tmp);
950 case 6: /* SV / NSV (V / !V) */
951 cond = cond_make_0(TCG_COND_LT, sv);
953 case 7: /* OD / EV */
954 tmp = tcg_temp_new();
955 tcg_gen_andi_reg(tmp, res, 1);
956 cond = cond_make_0(TCG_COND_NE, tmp);
960 g_assert_not_reached();
963 cond.c = tcg_invert_cond(cond.c);
969 /* Similar, but for the special case of subtraction without borrow, we
970 can use the inputs directly. This can allow other computation to be
971 deleted as unused. */
973 static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
974 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
980 cond = cond_make(TCG_COND_EQ, in1, in2);
983 cond = cond_make(TCG_COND_LT, in1, in2);
986 cond = cond_make(TCG_COND_LE, in1, in2);
988 case 4: /* << / >>= */
989 cond = cond_make(TCG_COND_LTU, in1, in2);
991 case 5: /* <<= / >> */
992 cond = cond_make(TCG_COND_LEU, in1, in2);
995 return do_cond(cf, res, sv, sv);
998 cond.c = tcg_invert_cond(cond.c);
1004 /* Similar, but for logicals, where the carry and overflow bits are not
1005 computed, and use of them is undefined. */
1007 static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
1010 case 4: case 5: case 6:
1014 return do_cond(cf, res, res, res);
1017 /* Similar, but for shift/extract/deposit conditions. */
1019 static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
1023 /* Convert the compressed condition codes to standard.
1024 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1025 4-7 are the reverse of 0-3. */
1032 return do_log_cond(c * 2 + f, res);
1035 /* Similar, but for unit conditions. */
1037 static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1038 TCGv_reg in1, TCGv_reg in2)
1041 TCGv_reg tmp, cb = NULL;
1044 /* Since we want to test lots of carry-out bits all at once, do not
1045 * do our normal thing and compute carry-in of bit B+1 since that
1046 * leaves us with carry bits spread across two words.
1048 cb = tcg_temp_new();
1049 tmp = tcg_temp_new();
1050 tcg_gen_or_reg(cb, in1, in2);
1051 tcg_gen_and_reg(tmp, in1, in2);
1052 tcg_gen_andc_reg(cb, cb, res);
1053 tcg_gen_or_reg(cb, cb, tmp);
1058 case 0: /* never / TR */
1059 case 1: /* undefined */
1060 case 5: /* undefined */
1061 cond = cond_make_f();
1064 case 2: /* SBZ / NBZ */
1065 /* See hasless(v,1) from
1066 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1068 tmp = tcg_temp_new();
1069 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1070 tcg_gen_andc_reg(tmp, tmp, res);
1071 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
1072 cond = cond_make_0(TCG_COND_NE, tmp);
1076 case 3: /* SHZ / NHZ */
1077 tmp = tcg_temp_new();
1078 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1079 tcg_gen_andc_reg(tmp, tmp, res);
1080 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
1081 cond = cond_make_0(TCG_COND_NE, tmp);
1085 case 4: /* SDC / NDC */
1086 tcg_gen_andi_reg(cb, cb, 0x88888888u);
1087 cond = cond_make_0(TCG_COND_NE, cb);
1090 case 6: /* SBC / NBC */
1091 tcg_gen_andi_reg(cb, cb, 0x80808080u);
1092 cond = cond_make_0(TCG_COND_NE, cb);
1095 case 7: /* SHC / NHC */
1096 tcg_gen_andi_reg(cb, cb, 0x80008000u);
1097 cond = cond_make_0(TCG_COND_NE, cb);
1101 g_assert_not_reached();
1107 cond.c = tcg_invert_cond(cond.c);
1113 /* Compute signed overflow for addition. */
1114 static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1115 TCGv_reg in1, TCGv_reg in2)
1117 TCGv_reg sv = get_temp(ctx);
1118 TCGv_reg tmp = tcg_temp_new();
1120 tcg_gen_xor_reg(sv, res, in1);
1121 tcg_gen_xor_reg(tmp, in1, in2);
1122 tcg_gen_andc_reg(sv, sv, tmp);
1128 /* Compute signed overflow for subtraction. */
1129 static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1130 TCGv_reg in1, TCGv_reg in2)
1132 TCGv_reg sv = get_temp(ctx);
1133 TCGv_reg tmp = tcg_temp_new();
1135 tcg_gen_xor_reg(sv, res, in1);
1136 tcg_gen_xor_reg(tmp, in1, in2);
1137 tcg_gen_and_reg(sv, sv, tmp);
1143 static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1144 TCGv_reg in2, unsigned shift, bool is_l,
1145 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
1147 TCGv_reg dest, cb, cb_msb, sv, tmp;
1148 unsigned c = cf >> 1;
1151 dest = tcg_temp_new();
1156 tmp = get_temp(ctx);
1157 tcg_gen_shli_reg(tmp, in1, shift);
1161 if (!is_l || c == 4 || c == 5) {
1162 TCGv_reg zero = tcg_const_reg(0);
1163 cb_msb = get_temp(ctx);
1164 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
1166 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
1168 tcg_temp_free(zero);
1171 tcg_gen_xor_reg(cb, in1, in2);
1172 tcg_gen_xor_reg(cb, cb, dest);
1175 tcg_gen_add_reg(dest, in1, in2);
1177 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
1181 /* Compute signed overflow if required. */
1183 if (is_tsv || c == 6) {
1184 sv = do_add_sv(ctx, dest, in1, in2);
1186 /* ??? Need to include overflow from shift. */
1187 gen_helper_tsv(cpu_env, sv);
1191 /* Emit any conditional trap before any writeback. */
1192 cond = do_cond(cf, dest, cb_msb, sv);
1195 tmp = tcg_temp_new();
1196 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1197 gen_helper_tcond(cpu_env, tmp);
1201 /* Write back the result. */
1203 save_or_nullify(ctx, cpu_psw_cb, cb);
1204 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1206 save_gpr(ctx, rt, dest);
1207 tcg_temp_free(dest);
1209 /* Install the new nullification. */
1210 cond_free(&ctx->null_cond);
1211 ctx->null_cond = cond;
1214 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1215 TCGv_reg in2, bool is_tsv, bool is_b,
1216 bool is_tc, unsigned cf)
1218 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
1219 unsigned c = cf >> 1;
1222 dest = tcg_temp_new();
1223 cb = tcg_temp_new();
1224 cb_msb = tcg_temp_new();
1226 zero = tcg_const_reg(0);
1228 /* DEST,C = IN1 + ~IN2 + C. */
1229 tcg_gen_not_reg(cb, in2);
1230 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1231 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1232 tcg_gen_xor_reg(cb, cb, in1);
1233 tcg_gen_xor_reg(cb, cb, dest);
1235 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1236 operations by seeding the high word with 1 and subtracting. */
1237 tcg_gen_movi_reg(cb_msb, 1);
1238 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1239 tcg_gen_eqv_reg(cb, in1, in2);
1240 tcg_gen_xor_reg(cb, cb, dest);
1242 tcg_temp_free(zero);
1244 /* Compute signed overflow if required. */
1246 if (is_tsv || c == 6) {
1247 sv = do_sub_sv(ctx, dest, in1, in2);
1249 gen_helper_tsv(cpu_env, sv);
1253 /* Compute the condition. We cannot use the special case for borrow. */
1255 cond = do_sub_cond(cf, dest, in1, in2, sv);
1257 cond = do_cond(cf, dest, cb_msb, sv);
1260 /* Emit any conditional trap before any writeback. */
1263 tmp = tcg_temp_new();
1264 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1265 gen_helper_tcond(cpu_env, tmp);
1269 /* Write back the result. */
1270 save_or_nullify(ctx, cpu_psw_cb, cb);
1271 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1272 save_gpr(ctx, rt, dest);
1273 tcg_temp_free(dest);
1275 /* Install the new nullification. */
1276 cond_free(&ctx->null_cond);
1277 ctx->null_cond = cond;
1280 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1281 TCGv_reg in2, unsigned cf)
1286 dest = tcg_temp_new();
1287 tcg_gen_sub_reg(dest, in1, in2);
1289 /* Compute signed overflow if required. */
1291 if ((cf >> 1) == 6) {
1292 sv = do_sub_sv(ctx, dest, in1, in2);
1295 /* Form the condition for the compare. */
1296 cond = do_sub_cond(cf, dest, in1, in2, sv);
1299 tcg_gen_movi_reg(dest, 0);
1300 save_gpr(ctx, rt, dest);
1301 tcg_temp_free(dest);
1303 /* Install the new nullification. */
1304 cond_free(&ctx->null_cond);
1305 ctx->null_cond = cond;
1308 static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1309 TCGv_reg in2, unsigned cf,
1310 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1312 TCGv_reg dest = dest_gpr(ctx, rt);
1314 /* Perform the operation, and writeback. */
1316 save_gpr(ctx, rt, dest);
1318 /* Install the new nullification. */
1319 cond_free(&ctx->null_cond);
1321 ctx->null_cond = do_log_cond(cf, dest);
1325 static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1326 TCGv_reg in2, unsigned cf, bool is_tc,
1327 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1333 dest = dest_gpr(ctx, rt);
1335 save_gpr(ctx, rt, dest);
1336 cond_free(&ctx->null_cond);
1338 dest = tcg_temp_new();
1341 cond = do_unit_cond(cf, dest, in1, in2);
1344 TCGv_reg tmp = tcg_temp_new();
1346 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
1347 gen_helper_tcond(cpu_env, tmp);
1350 save_gpr(ctx, rt, dest);
1352 cond_free(&ctx->null_cond);
1353 ctx->null_cond = cond;
1357 #ifndef CONFIG_USER_ONLY
1358 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1359 from the top 2 bits of the base register. There are a few system
1360 instructions that have a 3-bit space specifier, for which SR0 is
1361 not special. To handle this, pass ~SP. */
1362 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1372 spc = get_temp_tl(ctx);
1373 load_spr(ctx, spc, sp);
1376 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1380 ptr = tcg_temp_new_ptr();
1381 tmp = tcg_temp_new();
1382 spc = get_temp_tl(ctx);
1384 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1385 tcg_gen_andi_reg(tmp, tmp, 030);
1386 tcg_gen_trunc_reg_ptr(ptr, tmp);
1389 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1390 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1391 tcg_temp_free_ptr(ptr);
1397 static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1398 unsigned rb, unsigned rx, int scale, target_sreg disp,
1399 unsigned sp, int modify, bool is_phys)
1401 TCGv_reg base = load_gpr(ctx, rb);
1404 /* Note that RX is mutually exclusive with DISP. */
1406 ofs = get_temp(ctx);
1407 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1408 tcg_gen_add_reg(ofs, ofs, base);
1409 } else if (disp || modify) {
1410 ofs = get_temp(ctx);
1411 tcg_gen_addi_reg(ofs, base, disp);
1417 #ifdef CONFIG_USER_ONLY
1418 *pgva = (modify <= 0 ? ofs : base);
1420 TCGv_tl addr = get_temp_tl(ctx);
1421 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
1422 if (ctx->tb_flags & PSW_W) {
1423 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1426 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1432 /* Emit a memory load. The modify parameter should be
1433 * < 0 for pre-modify,
1434 * > 0 for post-modify,
1435 * = 0 for no base register update.
1437 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1438 unsigned rx, int scale, target_sreg disp,
1439 unsigned sp, int modify, TCGMemOp mop)
1444 /* Caller uses nullify_over/nullify_end. */
1445 assert(ctx->null_cond.c == TCG_COND_NEVER);
1447 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1448 ctx->mmu_idx == MMU_PHYS_IDX);
1449 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1451 save_gpr(ctx, rb, ofs);
1455 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1456 unsigned rx, int scale, target_sreg disp,
1457 unsigned sp, int modify, TCGMemOp mop)
1462 /* Caller uses nullify_over/nullify_end. */
1463 assert(ctx->null_cond.c == TCG_COND_NEVER);
1465 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1466 ctx->mmu_idx == MMU_PHYS_IDX);
1467 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1469 save_gpr(ctx, rb, ofs);
1473 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1474 unsigned rx, int scale, target_sreg disp,
1475 unsigned sp, int modify, TCGMemOp mop)
1480 /* Caller uses nullify_over/nullify_end. */
1481 assert(ctx->null_cond.c == TCG_COND_NEVER);
1483 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1484 ctx->mmu_idx == MMU_PHYS_IDX);
1485 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1487 save_gpr(ctx, rb, ofs);
1491 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1492 unsigned rx, int scale, target_sreg disp,
1493 unsigned sp, int modify, TCGMemOp mop)
1498 /* Caller uses nullify_over/nullify_end. */
1499 assert(ctx->null_cond.c == TCG_COND_NEVER);
1501 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1502 ctx->mmu_idx == MMU_PHYS_IDX);
1503 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1505 save_gpr(ctx, rb, ofs);
1509 #if TARGET_REGISTER_BITS == 64
1510 #define do_load_reg do_load_64
1511 #define do_store_reg do_store_64
1513 #define do_load_reg do_load_32
1514 #define do_store_reg do_store_32
1517 static void do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1518 unsigned rx, int scale, target_sreg disp,
1519 unsigned sp, int modify, TCGMemOp mop)
1526 /* No base register update. */
1527 dest = dest_gpr(ctx, rt);
1529 /* Make sure if RT == RB, we see the result of the load. */
1530 dest = get_temp(ctx);
1532 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1533 save_gpr(ctx, rt, dest);
1538 static void do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1539 unsigned rx, int scale, target_sreg disp,
1540 unsigned sp, int modify)
1546 tmp = tcg_temp_new_i32();
1547 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1548 save_frw_i32(rt, tmp);
1549 tcg_temp_free_i32(tmp);
1552 gen_helper_loaded_fr0(cpu_env);
1558 static void do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1559 unsigned rx, int scale, target_sreg disp,
1560 unsigned sp, int modify)
1566 tmp = tcg_temp_new_i64();
1567 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1569 tcg_temp_free_i64(tmp);
1572 gen_helper_loaded_fr0(cpu_env);
1578 static void do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1579 target_sreg disp, unsigned sp,
1580 int modify, TCGMemOp mop)
1583 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1587 static void do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1588 unsigned rx, int scale, target_sreg disp,
1589 unsigned sp, int modify)
1595 tmp = load_frw_i32(rt);
1596 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1597 tcg_temp_free_i32(tmp);
1602 static void do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1603 unsigned rx, int scale, target_sreg disp,
1604 unsigned sp, int modify)
1611 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
1612 tcg_temp_free_i64(tmp);
1617 static void do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1618 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1623 tmp = load_frw0_i32(ra);
1625 func(tmp, cpu_env, tmp);
1627 save_frw_i32(rt, tmp);
1628 tcg_temp_free_i32(tmp);
1632 static void do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1633 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1640 dst = tcg_temp_new_i32();
1642 func(dst, cpu_env, src);
1644 tcg_temp_free_i64(src);
1645 save_frw_i32(rt, dst);
1646 tcg_temp_free_i32(dst);
1650 static void do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1651 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1656 tmp = load_frd0(ra);
1658 func(tmp, cpu_env, tmp);
1661 tcg_temp_free_i64(tmp);
1665 static void do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1666 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1672 src = load_frw0_i32(ra);
1673 dst = tcg_temp_new_i64();
1675 func(dst, cpu_env, src);
1677 tcg_temp_free_i32(src);
1679 tcg_temp_free_i64(dst);
1683 static void do_fop_weww(DisasContext *ctx, unsigned rt,
1684 unsigned ra, unsigned rb,
1685 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1690 a = load_frw0_i32(ra);
1691 b = load_frw0_i32(rb);
1693 func(a, cpu_env, a, b);
1695 tcg_temp_free_i32(b);
1696 save_frw_i32(rt, a);
1697 tcg_temp_free_i32(a);
1701 static void do_fop_dedd(DisasContext *ctx, unsigned rt,
1702 unsigned ra, unsigned rb,
1703 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1711 func(a, cpu_env, a, b);
1713 tcg_temp_free_i64(b);
1715 tcg_temp_free_i64(a);
1719 /* Emit an unconditional branch to a direct target, which may or may not
1720 have already had nullification handled. */
1721 static void do_dbranch(DisasContext *ctx, target_ureg dest,
1722 unsigned link, bool is_n)
1724 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1726 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1730 ctx->null_cond.c = TCG_COND_ALWAYS;
1736 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1739 if (is_n && use_nullify_skip(ctx)) {
1740 nullify_set(ctx, 0);
1741 gen_goto_tb(ctx, 0, dest, dest + 4);
1743 nullify_set(ctx, is_n);
1744 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1749 nullify_set(ctx, 0);
1750 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1751 ctx->base.is_jmp = DISAS_NORETURN;
1755 /* Emit a conditional branch to a direct target. If the branch itself
1756 is nullified, we should have already used nullify_over. */
1757 static void do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
1760 target_ureg dest = iaoq_dest(ctx, disp);
1761 TCGLabel *taken = NULL;
1762 TCGCond c = cond->c;
1765 assert(ctx->null_cond.c == TCG_COND_NEVER);
1767 /* Handle TRUE and NEVER as direct branches. */
1768 if (c == TCG_COND_ALWAYS) {
1769 do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1772 if (c == TCG_COND_NEVER) {
1773 do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1777 taken = gen_new_label();
1779 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
1782 /* Not taken: Condition not satisfied; nullify on backward branches. */
1783 n = is_n && disp < 0;
1784 if (n && use_nullify_skip(ctx)) {
1785 nullify_set(ctx, 0);
1786 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1788 if (!n && ctx->null_lab) {
1789 gen_set_label(ctx->null_lab);
1790 ctx->null_lab = NULL;
1792 nullify_set(ctx, n);
1793 if (ctx->iaoq_n == -1) {
1794 /* The temporary iaoq_n_var died at the branch above.
1795 Regenerate it here instead of saving it. */
1796 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1798 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1801 gen_set_label(taken);
1803 /* Taken: Condition satisfied; nullify on forward branches. */
1804 n = is_n && disp >= 0;
1805 if (n && use_nullify_skip(ctx)) {
1806 nullify_set(ctx, 0);
1807 gen_goto_tb(ctx, 1, dest, dest + 4);
1809 nullify_set(ctx, n);
1810 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1813 /* Not taken: the branch itself was nullified. */
1814 if (ctx->null_lab) {
1815 gen_set_label(ctx->null_lab);
1816 ctx->null_lab = NULL;
1817 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1819 ctx->base.is_jmp = DISAS_NORETURN;
1823 /* Emit an unconditional branch to an indirect target. This handles
1824 nullification of the branch itself. */
1825 static void do_ibranch(DisasContext *ctx, TCGv_reg dest,
1826 unsigned link, bool is_n)
1828 TCGv_reg a0, a1, next, tmp;
1831 assert(ctx->null_lab == NULL);
1833 if (ctx->null_cond.c == TCG_COND_NEVER) {
1835 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1837 next = get_temp(ctx);
1838 tcg_gen_mov_reg(next, dest);
1840 if (use_nullify_skip(ctx)) {
1841 tcg_gen_mov_reg(cpu_iaoq_f, next);
1842 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1843 nullify_set(ctx, 0);
1844 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1847 ctx->null_cond.c = TCG_COND_ALWAYS;
1850 ctx->iaoq_n_var = next;
1851 } else if (is_n && use_nullify_skip(ctx)) {
1852 /* The (conditional) branch, B, nullifies the next insn, N,
1853 and we're allowed to skip execution N (no single-step or
1854 tracepoint in effect). Since the goto_ptr that we must use
1855 for the indirect branch consumes no special resources, we
1856 can (conditionally) skip B and continue execution. */
1857 /* The use_nullify_skip test implies we have a known control path. */
1858 tcg_debug_assert(ctx->iaoq_b != -1);
1859 tcg_debug_assert(ctx->iaoq_n != -1);
1861 /* We do have to handle the non-local temporary, DEST, before
1862 branching. Since IOAQ_F is not really live at this point, we
1863 can simply store DEST optimistically. Similarly with IAOQ_B. */
1864 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1865 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
1869 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
1871 tcg_gen_lookup_and_goto_ptr();
1874 cond_prep(&ctx->null_cond);
1875 c = ctx->null_cond.c;
1876 a0 = ctx->null_cond.a0;
1877 a1 = ctx->null_cond.a1;
1879 tmp = tcg_temp_new();
1880 next = get_temp(ctx);
1882 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1883 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
1885 ctx->iaoq_n_var = next;
1888 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1892 /* The branch nullifies the next insn, which means the state of N
1893 after the branch is the inverse of the state of N that applied
1895 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1896 cond_free(&ctx->null_cond);
1897 ctx->null_cond = cond_make_n();
1898 ctx->psw_n_nonzero = true;
1900 cond_free(&ctx->null_cond);
1906 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1907 * IAOQ_Next{30..31} ← GR[b]{30..31};
1909 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1910 * which keeps the privilege level from being increased.
1912 static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1915 switch (ctx->privilege) {
1917 /* Privilege 0 is maximum and is allowed to decrease. */
1920 /* Privilege 3 is minimum and is never allowed increase. */
1921 dest = get_temp(ctx);
1922 tcg_gen_ori_reg(dest, offset, 3);
1925 dest = tcg_temp_new();
1926 tcg_gen_andi_reg(dest, offset, -4);
1927 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1928 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
1929 tcg_temp_free(dest);
1935 #ifdef CONFIG_USER_ONLY
1936 /* On Linux, page zero is normally marked execute only + gateway.
1937 Therefore normal read or write is supposed to fail, but specific
1938 offsets have kernel code mapped to raise permissions to implement
1939 system calls. Handling this via an explicit check here, rather
1940 in than the "be disp(sr2,r0)" instruction that probably sent us
1941 here, is the easiest way to handle the branch delay slot on the
1942 aforementioned BE. */
1943 static void do_page_zero(DisasContext *ctx)
1945 /* If by some means we get here with PSW[N]=1, that implies that
1946 the B,GATE instruction would be skipped, and we'd fault on the
1947 next insn within the privilaged page. */
1948 switch (ctx->null_cond.c) {
1949 case TCG_COND_NEVER:
1951 case TCG_COND_ALWAYS:
1952 tcg_gen_movi_reg(cpu_psw_n, 0);
1955 /* Since this is always the first (and only) insn within the
1956 TB, we should know the state of PSW[N] from TB->FLAGS. */
1957 g_assert_not_reached();
1960 /* Check that we didn't arrive here via some means that allowed
1961 non-sequential instruction execution. Normally the PSW[B] bit
1962 detects this by disallowing the B,GATE instruction to execute
1963 under such conditions. */
1964 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1968 switch (ctx->iaoq_f & -4) {
1969 case 0x00: /* Null pointer call */
1970 gen_excp_1(EXCP_IMP);
1971 ctx->base.is_jmp = DISAS_NORETURN;
1974 case 0xb0: /* LWS */
1975 gen_excp_1(EXCP_SYSCALL_LWS);
1976 ctx->base.is_jmp = DISAS_NORETURN;
1979 case 0xe0: /* SET_THREAD_POINTER */
1980 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
1981 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
1982 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
1983 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1986 case 0x100: /* SYSCALL */
1987 gen_excp_1(EXCP_SYSCALL);
1988 ctx->base.is_jmp = DISAS_NORETURN;
1993 gen_excp_1(EXCP_ILL);
1994 ctx->base.is_jmp = DISAS_NORETURN;
2000 static bool trans_nop(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2002 cond_free(&ctx->null_cond);
2006 static bool trans_break(DisasContext *ctx, arg_break *a)
2008 return gen_excp_iir(ctx, EXCP_BREAK);
2011 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2013 /* No point in nullifying the memory barrier. */
2014 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2016 cond_free(&ctx->null_cond);
2020 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2023 TCGv_reg tmp = dest_gpr(ctx, rt);
2024 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
2025 save_gpr(ctx, rt, tmp);
2027 cond_free(&ctx->null_cond);
2031 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2034 unsigned rs = a->sp;
2035 TCGv_i64 t0 = tcg_temp_new_i64();
2036 TCGv_reg t1 = tcg_temp_new();
2038 load_spr(ctx, t0, rs);
2039 tcg_gen_shri_i64(t0, t0, 32);
2040 tcg_gen_trunc_i64_reg(t1, t0);
2042 save_gpr(ctx, rt, t1);
2044 tcg_temp_free_i64(t0);
2046 cond_free(&ctx->null_cond);
2050 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2053 unsigned ctl = a->r;
2058 #ifdef TARGET_HPPA64
2060 /* MFSAR without ,W masks low 5 bits. */
2061 tmp = dest_gpr(ctx, rt);
2062 tcg_gen_andi_reg(tmp, cpu_sar, 31);
2063 save_gpr(ctx, rt, tmp);
2067 save_gpr(ctx, rt, cpu_sar);
2069 case CR_IT: /* Interval Timer */
2070 /* FIXME: Respect PSW_S bit. */
2072 tmp = dest_gpr(ctx, rt);
2073 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2075 gen_helper_read_interval_timer(tmp);
2077 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2079 gen_helper_read_interval_timer(tmp);
2081 save_gpr(ctx, rt, tmp);
2082 return nullify_end(ctx);
2087 /* All other control registers are privileged. */
2088 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2092 tmp = get_temp(ctx);
2093 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2094 save_gpr(ctx, rt, tmp);
2097 cond_free(&ctx->null_cond);
2101 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2104 unsigned rs = a->sp;
2108 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2112 t64 = tcg_temp_new_i64();
2113 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2114 tcg_gen_shli_i64(t64, t64, 32);
2117 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
2118 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2120 tcg_gen_mov_i64(cpu_sr[rs], t64);
2122 tcg_temp_free_i64(t64);
2124 return nullify_end(ctx);
2127 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2129 unsigned ctl = a->t;
2130 TCGv_reg reg = load_gpr(ctx, a->r);
2133 if (ctl == CR_SAR) {
2134 tmp = tcg_temp_new();
2135 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
2136 save_or_nullify(ctx, cpu_sar, tmp);
2139 cond_free(&ctx->null_cond);
2143 /* All other control registers are privileged or read-only. */
2144 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2146 #ifndef CONFIG_USER_ONLY
2150 gen_helper_write_interval_timer(cpu_env, reg);
2153 gen_helper_write_eirr(cpu_env, reg);
2156 gen_helper_write_eiem(cpu_env, reg);
2157 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2162 /* FIXME: Respect PSW_Q bit */
2163 /* The write advances the queue and stores to the back element. */
2164 tmp = get_temp(ctx);
2165 tcg_gen_ld_reg(tmp, cpu_env,
2166 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2167 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2168 tcg_gen_st_reg(reg, cpu_env,
2169 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2173 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2176 return nullify_end(ctx);
2180 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2182 TCGv_reg tmp = tcg_temp_new();
2184 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
2185 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
2186 save_or_nullify(ctx, cpu_sar, tmp);
2189 cond_free(&ctx->null_cond);
2193 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2195 TCGv_reg dest = dest_gpr(ctx, a->t);
2197 #ifdef CONFIG_USER_ONLY
2198 /* We don't implement space registers in user mode. */
2199 tcg_gen_movi_reg(dest, 0);
2201 TCGv_i64 t0 = tcg_temp_new_i64();
2203 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2204 tcg_gen_shri_i64(t0, t0, 32);
2205 tcg_gen_trunc_i64_reg(dest, t0);
2207 tcg_temp_free_i64(t0);
2209 save_gpr(ctx, a->t, dest);
2211 cond_free(&ctx->null_cond);
2215 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2217 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2218 #ifndef CONFIG_USER_ONLY
2223 tmp = get_temp(ctx);
2224 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2225 tcg_gen_andi_reg(tmp, tmp, ~a->i);
2226 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2227 save_gpr(ctx, a->t, tmp);
2229 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2230 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2231 return nullify_end(ctx);
2235 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2237 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2238 #ifndef CONFIG_USER_ONLY
2243 tmp = get_temp(ctx);
2244 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
2245 tcg_gen_ori_reg(tmp, tmp, a->i);
2246 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
2247 save_gpr(ctx, a->t, tmp);
2249 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2250 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2251 return nullify_end(ctx);
2255 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2257 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2258 #ifndef CONFIG_USER_ONLY
2262 reg = load_gpr(ctx, a->r);
2263 tmp = get_temp(ctx);
2264 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2266 /* Exit the TB to recognize new interrupts. */
2267 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2268 return nullify_end(ctx);
2272 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2274 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2275 #ifndef CONFIG_USER_ONLY
2279 gen_helper_rfi_r(cpu_env);
2281 gen_helper_rfi(cpu_env);
2283 /* Exit the TB to recognize new interrupts. */
2284 if (ctx->base.singlestep_enabled) {
2285 gen_excp_1(EXCP_DEBUG);
2287 tcg_gen_exit_tb(NULL, 0);
2289 ctx->base.is_jmp = DISAS_NORETURN;
2291 return nullify_end(ctx);
2295 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2297 return do_rfi(ctx, false);
2300 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2302 return do_rfi(ctx, true);
2305 #ifndef CONFIG_USER_ONLY
2306 static bool gen_hlt(DisasContext *ctx, int reset)
2308 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2311 gen_helper_reset(cpu_env);
2313 gen_helper_halt(cpu_env);
2315 ctx->base.is_jmp = DISAS_NORETURN;
2316 return nullify_end(ctx);
2318 #endif /* !CONFIG_USER_ONLY */
2320 static bool trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
2321 const DisasInsn *di)
2323 unsigned rb = extract32(insn, 21, 5);
2324 unsigned rx = extract32(insn, 16, 5);
2325 TCGv_reg dest = dest_gpr(ctx, rb);
2326 TCGv_reg src1 = load_gpr(ctx, rb);
2327 TCGv_reg src2 = load_gpr(ctx, rx);
2329 /* The only thing we need to do is the base register modification. */
2330 tcg_gen_add_reg(dest, src1, src2);
2331 save_gpr(ctx, rb, dest);
2333 cond_free(&ctx->null_cond);
2337 static bool trans_probe(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2339 unsigned rt = extract32(insn, 0, 5);
2340 unsigned sp = extract32(insn, 14, 2);
2341 unsigned rr = extract32(insn, 16, 5);
2342 unsigned rb = extract32(insn, 21, 5);
2343 unsigned is_write = extract32(insn, 6, 1);
2344 unsigned is_imm = extract32(insn, 13, 1);
2346 TCGv_i32 level, want;
2351 dest = dest_gpr(ctx, rt);
2352 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2355 level = tcg_const_i32(extract32(insn, 16, 2));
2357 level = tcg_temp_new_i32();
2358 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
2359 tcg_gen_andi_i32(level, level, 3);
2361 want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
2363 gen_helper_probe(dest, cpu_env, addr, level, want);
2365 tcg_temp_free_i32(want);
2366 tcg_temp_free_i32(level);
2368 save_gpr(ctx, rt, dest);
2369 return nullify_end(ctx);
2372 #ifndef CONFIG_USER_ONLY
2373 static bool trans_ixtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2376 unsigned rr = extract32(insn, 16, 5);
2377 unsigned rb = extract32(insn, 21, 5);
2378 unsigned is_data = insn & 0x1000;
2379 unsigned is_addr = insn & 0x40;
2384 sp = extract32(insn, 14, 2);
2386 sp = ~assemble_sr3(insn);
2389 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2392 form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
2393 reg = load_gpr(ctx, rr);
2395 gen_helper_itlba(cpu_env, addr, reg);
2397 gen_helper_itlbp(cpu_env, addr, reg);
2400 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2401 the case, since the OS TLB fill handler runs with mmu disabled. */
2402 if (!is_data && (ctx->tb_flags & PSW_C)) {
2403 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2405 return nullify_end(ctx);
2408 static bool trans_pxtlbx(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2410 unsigned m = extract32(insn, 5, 1);
2412 unsigned rx = extract32(insn, 16, 5);
2413 unsigned rb = extract32(insn, 21, 5);
2414 unsigned is_data = insn & 0x1000;
2415 unsigned is_local = insn & 0x40;
2420 sp = extract32(insn, 14, 2);
2422 sp = ~assemble_sr3(insn);
2425 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2428 form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
2430 save_gpr(ctx, rb, ofs);
2433 gen_helper_ptlbe(cpu_env);
2435 gen_helper_ptlb(cpu_env, addr);
2438 /* Exit TB for TLB change if mmu is enabled. */
2439 if (!is_data && (ctx->tb_flags & PSW_C)) {
2440 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2442 return nullify_end(ctx);
2445 static bool trans_lpa(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2447 unsigned rt = extract32(insn, 0, 5);
2448 unsigned m = extract32(insn, 5, 1);
2449 unsigned sp = extract32(insn, 14, 2);
2450 unsigned rx = extract32(insn, 16, 5);
2451 unsigned rb = extract32(insn, 21, 5);
2453 TCGv_reg ofs, paddr;
2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2458 form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
2460 paddr = tcg_temp_new();
2461 gen_helper_lpa(paddr, cpu_env, vaddr);
2463 /* Note that physical address result overrides base modification. */
2465 save_gpr(ctx, rb, ofs);
2467 save_gpr(ctx, rt, paddr);
2468 tcg_temp_free(paddr);
2470 return nullify_end(ctx);
2473 static bool trans_lci(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2475 unsigned rt = extract32(insn, 0, 5);
2478 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2480 /* The Coherence Index is an implementation-defined function of the
2481 physical address. Two addresses with the same CI have a coherent
2482 view of the cache. Our implementation is to return 0 for all,
2483 since the entire address space is coherent. */
2484 ci = tcg_const_reg(0);
2485 save_gpr(ctx, rt, ci);
2488 cond_free(&ctx->null_cond);
2491 #endif /* !CONFIG_USER_ONLY */
2493 static const DisasInsn table_mem_mgmt[] = {
2494 { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
2495 { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
2496 { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
2497 { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
2498 { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
2499 { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
2500 { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
2501 { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
2502 { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
2503 { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
2504 { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
2505 { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
2506 { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
2507 { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
2508 { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
2509 #ifndef CONFIG_USER_ONLY
2510 { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
2511 { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
2512 { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
2513 { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
2514 { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
2515 { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
2516 { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
2517 { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
2518 { 0x04001340u, 0xfc003fc0u, trans_lpa },
2519 { 0x04001300u, 0xfc003fe0u, trans_lci },
2523 static bool trans_add(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2525 unsigned r2 = extract32(insn, 21, 5);
2526 unsigned r1 = extract32(insn, 16, 5);
2527 unsigned cf = extract32(insn, 12, 4);
2528 unsigned ext = extract32(insn, 8, 4);
2529 unsigned shift = extract32(insn, 6, 2);
2530 unsigned rt = extract32(insn, 0, 5);
2531 TCGv_reg tcg_r1, tcg_r2;
2535 bool is_tsv = false;
2538 case 0x6: /* ADD, SHLADD */
2540 case 0xa: /* ADD,L, SHLADD,L */
2543 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
2546 case 0x7: /* ADD,C */
2549 case 0xf: /* ADD,C,TSV */
2550 is_c = is_tsv = true;
2553 return gen_illegal(ctx);
2559 tcg_r1 = load_gpr(ctx, r1);
2560 tcg_r2 = load_gpr(ctx, r2);
2561 do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
2562 return nullify_end(ctx);
2565 static bool trans_sub(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2567 unsigned r2 = extract32(insn, 21, 5);
2568 unsigned r1 = extract32(insn, 16, 5);
2569 unsigned cf = extract32(insn, 12, 4);
2570 unsigned ext = extract32(insn, 6, 6);
2571 unsigned rt = extract32(insn, 0, 5);
2572 TCGv_reg tcg_r1, tcg_r2;
2575 bool is_tsv = false;
2578 case 0x10: /* SUB */
2580 case 0x30: /* SUB,TSV */
2583 case 0x14: /* SUB,B */
2586 case 0x34: /* SUB,B,TSV */
2587 is_b = is_tsv = true;
2589 case 0x13: /* SUB,TC */
2592 case 0x33: /* SUB,TSV,TC */
2593 is_tc = is_tsv = true;
2596 return gen_illegal(ctx);
2602 tcg_r1 = load_gpr(ctx, r1);
2603 tcg_r2 = load_gpr(ctx, r2);
2604 do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
2605 return nullify_end(ctx);
2608 static bool trans_log(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2610 unsigned r2 = extract32(insn, 21, 5);
2611 unsigned r1 = extract32(insn, 16, 5);
2612 unsigned cf = extract32(insn, 12, 4);
2613 unsigned rt = extract32(insn, 0, 5);
2614 TCGv_reg tcg_r1, tcg_r2;
2619 tcg_r1 = load_gpr(ctx, r1);
2620 tcg_r2 = load_gpr(ctx, r2);
2621 do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f.ttt);
2622 return nullify_end(ctx);
2625 static bool trans_or(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2627 unsigned r2 = extract32(insn, 21, 5);
2628 unsigned r1 = extract32(insn, 16, 5);
2629 unsigned cf = extract32(insn, 12, 4);
2630 unsigned rt = extract32(insn, 0, 5);
2631 TCGv_reg tcg_r1, tcg_r2;
2634 if (rt == 0) { /* NOP */
2635 cond_free(&ctx->null_cond);
2638 if (r2 == 0) { /* COPY */
2640 TCGv_reg dest = dest_gpr(ctx, rt);
2641 tcg_gen_movi_reg(dest, 0);
2642 save_gpr(ctx, rt, dest);
2644 save_gpr(ctx, rt, cpu_gr[r1]);
2646 cond_free(&ctx->null_cond);
2649 #ifndef CONFIG_USER_ONLY
2650 /* These are QEMU extensions and are nops in the real architecture:
2652 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2653 * or %r31,%r31,%r31 -- death loop; offline cpu
2654 * currently implemented as idle.
2656 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2659 /* No need to check for supervisor, as userland can only pause
2660 until the next timer interrupt. */
2663 /* Advance the instruction queue. */
2664 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2665 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2666 nullify_set(ctx, 0);
2668 /* Tell the qemu main loop to halt until this cpu has work. */
2669 tmp = tcg_const_i32(1);
2670 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2671 offsetof(CPUState, halted));
2672 tcg_temp_free_i32(tmp);
2673 gen_excp_1(EXCP_HALTED);
2674 ctx->base.is_jmp = DISAS_NORETURN;
2676 return nullify_end(ctx);
2684 tcg_r1 = load_gpr(ctx, r1);
2685 tcg_r2 = load_gpr(ctx, r2);
2686 do_log(ctx, rt, tcg_r1, tcg_r2, cf, tcg_gen_or_reg);
2687 return nullify_end(ctx);
2690 static bool trans_cmpclr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2692 unsigned r2 = extract32(insn, 21, 5);
2693 unsigned r1 = extract32(insn, 16, 5);
2694 unsigned cf = extract32(insn, 12, 4);
2695 unsigned rt = extract32(insn, 0, 5);
2696 TCGv_reg tcg_r1, tcg_r2;
2701 tcg_r1 = load_gpr(ctx, r1);
2702 tcg_r2 = load_gpr(ctx, r2);
2703 do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
2704 return nullify_end(ctx);
2707 static bool trans_uxor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2709 unsigned r2 = extract32(insn, 21, 5);
2710 unsigned r1 = extract32(insn, 16, 5);
2711 unsigned cf = extract32(insn, 12, 4);
2712 unsigned rt = extract32(insn, 0, 5);
2713 TCGv_reg tcg_r1, tcg_r2;
2718 tcg_r1 = load_gpr(ctx, r1);
2719 tcg_r2 = load_gpr(ctx, r2);
2720 do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_reg);
2721 return nullify_end(ctx);
2724 static bool trans_uaddcm(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2726 unsigned r2 = extract32(insn, 21, 5);
2727 unsigned r1 = extract32(insn, 16, 5);
2728 unsigned cf = extract32(insn, 12, 4);
2729 unsigned is_tc = extract32(insn, 6, 1);
2730 unsigned rt = extract32(insn, 0, 5);
2731 TCGv_reg tcg_r1, tcg_r2, tmp;
2736 tcg_r1 = load_gpr(ctx, r1);
2737 tcg_r2 = load_gpr(ctx, r2);
2738 tmp = get_temp(ctx);
2739 tcg_gen_not_reg(tmp, tcg_r2);
2740 do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_reg);
2741 return nullify_end(ctx);
2744 static bool trans_dcor(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2746 unsigned r2 = extract32(insn, 21, 5);
2747 unsigned cf = extract32(insn, 12, 4);
2748 unsigned is_i = extract32(insn, 6, 1);
2749 unsigned rt = extract32(insn, 0, 5);
2754 tmp = get_temp(ctx);
2755 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
2757 tcg_gen_not_reg(tmp, tmp);
2759 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2760 tcg_gen_muli_reg(tmp, tmp, 6);
2761 do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
2762 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
2764 return nullify_end(ctx);
2767 static bool trans_ds(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2769 unsigned r2 = extract32(insn, 21, 5);
2770 unsigned r1 = extract32(insn, 16, 5);
2771 unsigned cf = extract32(insn, 12, 4);
2772 unsigned rt = extract32(insn, 0, 5);
2773 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
2777 in1 = load_gpr(ctx, r1);
2778 in2 = load_gpr(ctx, r2);
2780 add1 = tcg_temp_new();
2781 add2 = tcg_temp_new();
2782 addc = tcg_temp_new();
2783 dest = tcg_temp_new();
2784 zero = tcg_const_reg(0);
2786 /* Form R1 << 1 | PSW[CB]{8}. */
2787 tcg_gen_add_reg(add1, in1, in1);
2788 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
2790 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2791 carry{8} requires that we subtract via + ~R2 + 1, as described in
2792 the manual. By extracting and masking V, we can produce the
2793 proper inputs to the addition without movcond. */
2794 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2795 tcg_gen_xor_reg(add2, in2, addc);
2796 tcg_gen_andi_reg(addc, addc, 1);
2797 /* ??? This is only correct for 32-bit. */
2798 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2799 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2801 tcg_temp_free(addc);
2802 tcg_temp_free(zero);
2804 /* Write back the result register. */
2805 save_gpr(ctx, rt, dest);
2807 /* Write back PSW[CB]. */
2808 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2809 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
2811 /* Write back PSW[V] for the division step. */
2812 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2813 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
2815 /* Install the new nullification. */
2819 /* ??? The lshift is supposed to contribute to overflow. */
2820 sv = do_add_sv(ctx, dest, add1, add2);
2822 ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
2825 tcg_temp_free(add1);
2826 tcg_temp_free(add2);
2827 tcg_temp_free(dest);
2829 return nullify_end(ctx);
2832 static const DisasInsn table_arith_log[] = {
2833 { 0x08000240u, 0xfc000fe0u, trans_or },
2834 { 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
2835 { 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
2836 { 0x08000280u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_xor_reg },
2837 { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
2838 { 0x08000380u, 0xfc000fe0u, trans_uxor },
2839 { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
2840 { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
2841 { 0x08000440u, 0xfc000fe0u, trans_ds },
2842 { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
2843 { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
2844 { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
2845 { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
2848 static bool trans_addi(DisasContext *ctx, uint32_t insn)
2850 target_sreg im = low_sextract(insn, 0, 11);
2851 unsigned e1 = extract32(insn, 11, 1);
2852 unsigned cf = extract32(insn, 12, 4);
2853 unsigned rt = extract32(insn, 16, 5);
2854 unsigned r2 = extract32(insn, 21, 5);
2855 unsigned o1 = extract32(insn, 26, 1);
2856 TCGv_reg tcg_im, tcg_r2;
2862 tcg_im = load_const(ctx, im);
2863 tcg_r2 = load_gpr(ctx, r2);
2864 do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
2866 return nullify_end(ctx);
2869 static bool trans_subi(DisasContext *ctx, uint32_t insn)
2871 target_sreg im = low_sextract(insn, 0, 11);
2872 unsigned e1 = extract32(insn, 11, 1);
2873 unsigned cf = extract32(insn, 12, 4);
2874 unsigned rt = extract32(insn, 16, 5);
2875 unsigned r2 = extract32(insn, 21, 5);
2876 TCGv_reg tcg_im, tcg_r2;
2882 tcg_im = load_const(ctx, im);
2883 tcg_r2 = load_gpr(ctx, r2);
2884 do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
2886 return nullify_end(ctx);
2889 static bool trans_cmpiclr(DisasContext *ctx, uint32_t insn)
2891 target_sreg im = low_sextract(insn, 0, 11);
2892 unsigned cf = extract32(insn, 12, 4);
2893 unsigned rt = extract32(insn, 16, 5);
2894 unsigned r2 = extract32(insn, 21, 5);
2895 TCGv_reg tcg_im, tcg_r2;
2901 tcg_im = load_const(ctx, im);
2902 tcg_r2 = load_gpr(ctx, r2);
2903 do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
2905 return nullify_end(ctx);
2908 static bool trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
2909 const DisasInsn *di)
2911 unsigned rt = extract32(insn, 0, 5);
2912 unsigned m = extract32(insn, 5, 1);
2913 unsigned sz = extract32(insn, 6, 2);
2914 unsigned a = extract32(insn, 13, 1);
2915 unsigned sp = extract32(insn, 14, 2);
2916 int disp = low_sextract(insn, 16, 5);
2917 unsigned rb = extract32(insn, 21, 5);
2918 int modify = (m ? (a ? -1 : 1) : 0);
2919 TCGMemOp mop = MO_TE | sz;
2921 do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
2925 static bool trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
2926 const DisasInsn *di)
2928 unsigned rt = extract32(insn, 0, 5);
2929 unsigned m = extract32(insn, 5, 1);
2930 unsigned sz = extract32(insn, 6, 2);
2931 unsigned u = extract32(insn, 13, 1);
2932 unsigned sp = extract32(insn, 14, 2);
2933 unsigned rx = extract32(insn, 16, 5);
2934 unsigned rb = extract32(insn, 21, 5);
2935 TCGMemOp mop = MO_TE | sz;
2937 do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
2941 static bool trans_st_idx_i(DisasContext *ctx, uint32_t insn,
2942 const DisasInsn *di)
2944 int disp = low_sextract(insn, 0, 5);
2945 unsigned m = extract32(insn, 5, 1);
2946 unsigned sz = extract32(insn, 6, 2);
2947 unsigned a = extract32(insn, 13, 1);
2948 unsigned sp = extract32(insn, 14, 2);
2949 unsigned rr = extract32(insn, 16, 5);
2950 unsigned rb = extract32(insn, 21, 5);
2951 int modify = (m ? (a ? -1 : 1) : 0);
2952 TCGMemOp mop = MO_TE | sz;
2954 do_store(ctx, rr, rb, disp, sp, modify, mop);
2958 static bool trans_ldcw(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
2960 unsigned rt = extract32(insn, 0, 5);
2961 unsigned m = extract32(insn, 5, 1);
2962 unsigned i = extract32(insn, 12, 1);
2963 unsigned au = extract32(insn, 13, 1);
2964 unsigned sp = extract32(insn, 14, 2);
2965 unsigned rx = extract32(insn, 16, 5);
2966 unsigned rb = extract32(insn, 21, 5);
2967 TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
2968 TCGv_reg zero, dest, ofs;
2970 int modify, disp = 0, scale = 0;
2975 modify = (m ? (au ? -1 : 1) : 0);
2976 disp = low_sextract(rx, 0, 5);
2981 scale = mop & MO_SIZE;
2985 /* Base register modification. Make sure if RT == RB,
2986 we see the result of the load. */
2987 dest = get_temp(ctx);
2989 dest = dest_gpr(ctx, rt);
2992 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
2993 ctx->mmu_idx == MMU_PHYS_IDX);
2994 zero = tcg_const_reg(0);
2995 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
2997 save_gpr(ctx, rb, ofs);
2999 save_gpr(ctx, rt, dest);
3001 return nullify_end(ctx);
3004 static bool trans_stby(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3006 target_sreg disp = low_sextract(insn, 0, 5);
3007 unsigned m = extract32(insn, 5, 1);
3008 unsigned a = extract32(insn, 13, 1);
3009 unsigned sp = extract32(insn, 14, 2);
3010 unsigned rt = extract32(insn, 16, 5);
3011 unsigned rb = extract32(insn, 21, 5);
3017 form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
3018 ctx->mmu_idx == MMU_PHYS_IDX);
3019 val = load_gpr(ctx, rt);
3021 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3022 gen_helper_stby_e_parallel(cpu_env, addr, val);
3024 gen_helper_stby_e(cpu_env, addr, val);
3027 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3028 gen_helper_stby_b_parallel(cpu_env, addr, val);
3030 gen_helper_stby_b(cpu_env, addr, val);
3035 tcg_gen_andi_reg(ofs, ofs, ~3);
3036 save_gpr(ctx, rb, ofs);
3039 return nullify_end(ctx);
3042 #ifndef CONFIG_USER_ONLY
3043 static bool trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
3044 const DisasInsn *di)
3046 int hold_mmu_idx = ctx->mmu_idx;
3048 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3050 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3051 format wrt the sub-opcode in bits 6:9. */
3052 ctx->mmu_idx = MMU_PHYS_IDX;
3053 trans_ld_idx_i(ctx, insn, di);
3054 ctx->mmu_idx = hold_mmu_idx;
3058 static bool trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
3059 const DisasInsn *di)
3061 int hold_mmu_idx = ctx->mmu_idx;
3063 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3065 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3066 format wrt the sub-opcode in bits 6:9. */
3067 ctx->mmu_idx = MMU_PHYS_IDX;
3068 trans_ld_idx_x(ctx, insn, di);
3069 ctx->mmu_idx = hold_mmu_idx;
3073 static bool trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
3074 const DisasInsn *di)
3076 int hold_mmu_idx = ctx->mmu_idx;
3078 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3080 /* ??? needs fixing for hppa64 -- ldda does not follow the same
3081 format wrt the sub-opcode in bits 6:9. */
3082 ctx->mmu_idx = MMU_PHYS_IDX;
3083 trans_st_idx_i(ctx, insn, di);
3084 ctx->mmu_idx = hold_mmu_idx;
3089 static const DisasInsn table_index_mem[] = {
3090 { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
3091 { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
3092 { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
3093 { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
3094 { 0x0c001300u, 0xfc0013c0, trans_stby },
3095 #ifndef CONFIG_USER_ONLY
3096 { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
3097 { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
3098 { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
3102 static bool trans_ldil(DisasContext *ctx, uint32_t insn)
3104 unsigned rt = extract32(insn, 21, 5);
3105 target_sreg i = assemble_21(insn);
3106 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3108 tcg_gen_movi_reg(tcg_rt, i);
3109 save_gpr(ctx, rt, tcg_rt);
3110 cond_free(&ctx->null_cond);
3114 static bool trans_addil(DisasContext *ctx, uint32_t insn)
3116 unsigned rt = extract32(insn, 21, 5);
3117 target_sreg i = assemble_21(insn);
3118 TCGv_reg tcg_rt = load_gpr(ctx, rt);
3119 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
3121 tcg_gen_addi_reg(tcg_r1, tcg_rt, i);
3122 save_gpr(ctx, 1, tcg_r1);
3123 cond_free(&ctx->null_cond);
3127 static bool trans_ldo(DisasContext *ctx, uint32_t insn)
3129 unsigned rb = extract32(insn, 21, 5);
3130 unsigned rt = extract32(insn, 16, 5);
3131 target_sreg i = assemble_16(insn);
3132 TCGv_reg tcg_rt = dest_gpr(ctx, rt);
3134 /* Special case rb == 0, for the LDI pseudo-op.
3135 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3137 tcg_gen_movi_reg(tcg_rt, i);
3139 tcg_gen_addi_reg(tcg_rt, cpu_gr[rb], i);
3141 save_gpr(ctx, rt, tcg_rt);
3142 cond_free(&ctx->null_cond);
3146 static bool trans_load(DisasContext *ctx, uint32_t insn,
3147 bool is_mod, TCGMemOp mop)
3149 unsigned rb = extract32(insn, 21, 5);
3150 unsigned rt = extract32(insn, 16, 5);
3151 unsigned sp = extract32(insn, 14, 2);
3152 target_sreg i = assemble_16(insn);
3154 do_load(ctx, rt, rb, 0, 0, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3158 static bool trans_load_w(DisasContext *ctx, uint32_t insn)
3160 unsigned rb = extract32(insn, 21, 5);
3161 unsigned rt = extract32(insn, 16, 5);
3162 unsigned sp = extract32(insn, 14, 2);
3163 target_sreg i = assemble_16a(insn);
3164 unsigned ext2 = extract32(insn, 1, 2);
3169 /* FLDW without modification. */
3170 do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3173 /* LDW with modification. Note that the sign of I selects
3174 post-dec vs pre-inc. */
3175 do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3178 return gen_illegal(ctx);
3183 static bool trans_fload_mod(DisasContext *ctx, uint32_t insn)
3185 target_sreg i = assemble_16a(insn);
3186 unsigned t1 = extract32(insn, 1, 1);
3187 unsigned a = extract32(insn, 2, 1);
3188 unsigned sp = extract32(insn, 14, 2);
3189 unsigned t0 = extract32(insn, 16, 5);
3190 unsigned rb = extract32(insn, 21, 5);
3192 /* FLDW with modification. */
3193 do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3197 static bool trans_store(DisasContext *ctx, uint32_t insn,
3198 bool is_mod, TCGMemOp mop)
3200 unsigned rb = extract32(insn, 21, 5);
3201 unsigned rt = extract32(insn, 16, 5);
3202 unsigned sp = extract32(insn, 14, 2);
3203 target_sreg i = assemble_16(insn);
3205 do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
3209 static bool trans_store_w(DisasContext *ctx, uint32_t insn)
3211 unsigned rb = extract32(insn, 21, 5);
3212 unsigned rt = extract32(insn, 16, 5);
3213 unsigned sp = extract32(insn, 14, 2);
3214 target_sreg i = assemble_16a(insn);
3215 unsigned ext2 = extract32(insn, 1, 2);
3220 /* FSTW without modification. */
3221 do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
3224 /* STW with modification. */
3225 do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
3228 return gen_illegal(ctx);
3233 static bool trans_fstore_mod(DisasContext *ctx, uint32_t insn)
3235 target_sreg i = assemble_16a(insn);
3236 unsigned t1 = extract32(insn, 1, 1);
3237 unsigned a = extract32(insn, 2, 1);
3238 unsigned sp = extract32(insn, 14, 2);
3239 unsigned t0 = extract32(insn, 16, 5);
3240 unsigned rb = extract32(insn, 21, 5);
3242 /* FSTW with modification. */
3243 do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
3247 static bool trans_copr_w(DisasContext *ctx, uint32_t insn)
3249 unsigned t0 = extract32(insn, 0, 5);
3250 unsigned m = extract32(insn, 5, 1);
3251 unsigned t1 = extract32(insn, 6, 1);
3252 unsigned ext3 = extract32(insn, 7, 3);
3253 /* unsigned cc = extract32(insn, 10, 2); */
3254 unsigned i = extract32(insn, 12, 1);
3255 unsigned ua = extract32(insn, 13, 1);
3256 unsigned sp = extract32(insn, 14, 2);
3257 unsigned rx = extract32(insn, 16, 5);
3258 unsigned rb = extract32(insn, 21, 5);
3259 unsigned rt = t1 * 32 + t0;
3260 int modify = (m ? (ua ? -1 : 1) : 0);
3264 scale = (ua ? 2 : 0);
3268 disp = low_sextract(rx, 0, 5);
3271 modify = (m ? (ua ? -1 : 1) : 0);
3276 do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
3279 do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
3282 return gen_illegal(ctx);
3287 static bool trans_copr_dw(DisasContext *ctx, uint32_t insn)
3289 unsigned rt = extract32(insn, 0, 5);
3290 unsigned m = extract32(insn, 5, 1);
3291 unsigned ext4 = extract32(insn, 6, 4);
3292 /* unsigned cc = extract32(insn, 10, 2); */
3293 unsigned i = extract32(insn, 12, 1);
3294 unsigned ua = extract32(insn, 13, 1);
3295 unsigned sp = extract32(insn, 14, 2);
3296 unsigned rx = extract32(insn, 16, 5);
3297 unsigned rb = extract32(insn, 21, 5);
3298 int modify = (m ? (ua ? -1 : 1) : 0);
3302 scale = (ua ? 3 : 0);
3306 disp = low_sextract(rx, 0, 5);
3309 modify = (m ? (ua ? -1 : 1) : 0);
3314 do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
3317 do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
3320 return gen_illegal(ctx);
3325 static bool trans_cmpb(DisasContext *ctx, uint32_t insn,
3326 bool is_true, bool is_imm, bool is_dw)
3328 target_sreg disp = assemble_12(insn) * 4;
3329 unsigned n = extract32(insn, 1, 1);
3330 unsigned c = extract32(insn, 13, 3);
3331 unsigned r = extract32(insn, 21, 5);
3332 unsigned cf = c * 2 + !is_true;
3333 TCGv_reg dest, in1, in2, sv;
3339 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3341 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3343 in2 = load_gpr(ctx, r);
3344 dest = get_temp(ctx);
3346 tcg_gen_sub_reg(dest, in1, in2);
3350 sv = do_sub_sv(ctx, dest, in1, in2);
3353 cond = do_sub_cond(cf, dest, in1, in2, sv);
3354 do_cbranch(ctx, disp, n, &cond);
3358 static bool trans_addb(DisasContext *ctx, uint32_t insn,
3359 bool is_true, bool is_imm)
3361 target_sreg disp = assemble_12(insn) * 4;
3362 unsigned n = extract32(insn, 1, 1);
3363 unsigned c = extract32(insn, 13, 3);
3364 unsigned r = extract32(insn, 21, 5);
3365 unsigned cf = c * 2 + !is_true;
3366 TCGv_reg dest, in1, in2, sv, cb_msb;
3372 in1 = load_const(ctx, low_sextract(insn, 16, 5));
3374 in1 = load_gpr(ctx, extract32(insn, 16, 5));
3376 in2 = load_gpr(ctx, r);
3377 dest = dest_gpr(ctx, r);
3383 tcg_gen_add_reg(dest, in1, in2);
3386 cb_msb = get_temp(ctx);
3387 tcg_gen_movi_reg(cb_msb, 0);
3388 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3391 tcg_gen_add_reg(dest, in1, in2);
3392 sv = do_add_sv(ctx, dest, in1, in2);
3396 cond = do_cond(cf, dest, cb_msb, sv);
3397 do_cbranch(ctx, disp, n, &cond);
3401 static bool trans_bb(DisasContext *ctx, uint32_t insn)
3403 target_sreg disp = assemble_12(insn) * 4;
3404 unsigned n = extract32(insn, 1, 1);
3405 unsigned c = extract32(insn, 15, 1);
3406 unsigned r = extract32(insn, 16, 5);
3407 unsigned p = extract32(insn, 21, 5);
3408 unsigned i = extract32(insn, 26, 1);
3409 TCGv_reg tmp, tcg_r;
3414 tmp = tcg_temp_new();
3415 tcg_r = load_gpr(ctx, r);
3417 tcg_gen_shli_reg(tmp, tcg_r, p);
3419 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3422 cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
3424 do_cbranch(ctx, disp, n, &cond);
3428 static bool trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
3430 target_sreg disp = assemble_12(insn) * 4;
3431 unsigned n = extract32(insn, 1, 1);
3432 unsigned c = extract32(insn, 13, 3);
3433 unsigned t = extract32(insn, 16, 5);
3434 unsigned r = extract32(insn, 21, 5);
3440 dest = dest_gpr(ctx, r);
3442 tcg_gen_movi_reg(dest, low_sextract(t, 0, 5));
3443 } else if (t == 0) {
3444 tcg_gen_movi_reg(dest, 0);
3446 tcg_gen_mov_reg(dest, cpu_gr[t]);
3449 cond = do_sed_cond(c, dest);
3450 do_cbranch(ctx, disp, n, &cond);
3454 static bool trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
3455 const DisasInsn *di)
3457 unsigned rt = extract32(insn, 0, 5);
3458 unsigned c = extract32(insn, 13, 3);
3459 unsigned r1 = extract32(insn, 16, 5);
3460 unsigned r2 = extract32(insn, 21, 5);
3467 dest = dest_gpr(ctx, rt);
3469 tcg_gen_ext32u_reg(dest, load_gpr(ctx, r2));
3470 tcg_gen_shr_reg(dest, dest, cpu_sar);
3471 } else if (r1 == r2) {
3472 TCGv_i32 t32 = tcg_temp_new_i32();
3473 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, r2));
3474 tcg_gen_rotr_i32(t32, t32, cpu_sar);
3475 tcg_gen_extu_i32_reg(dest, t32);
3476 tcg_temp_free_i32(t32);
3478 TCGv_i64 t = tcg_temp_new_i64();
3479 TCGv_i64 s = tcg_temp_new_i64();
3481 tcg_gen_concat_reg_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
3482 tcg_gen_extu_reg_i64(s, cpu_sar);
3483 tcg_gen_shr_i64(t, t, s);
3484 tcg_gen_trunc_i64_reg(dest, t);
3486 tcg_temp_free_i64(t);
3487 tcg_temp_free_i64(s);
3489 save_gpr(ctx, rt, dest);
3491 /* Install the new nullification. */
3492 cond_free(&ctx->null_cond);
3494 ctx->null_cond = do_sed_cond(c, dest);
3496 return nullify_end(ctx);
3499 static bool trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
3500 const DisasInsn *di)
3502 unsigned rt = extract32(insn, 0, 5);
3503 unsigned cpos = extract32(insn, 5, 5);
3504 unsigned c = extract32(insn, 13, 3);
3505 unsigned r1 = extract32(insn, 16, 5);
3506 unsigned r2 = extract32(insn, 21, 5);
3507 unsigned sa = 31 - cpos;
3514 dest = dest_gpr(ctx, rt);
3515 t2 = load_gpr(ctx, r2);
3517 TCGv_i32 t32 = tcg_temp_new_i32();
3518 tcg_gen_trunc_reg_i32(t32, t2);
3519 tcg_gen_rotri_i32(t32, t32, sa);
3520 tcg_gen_extu_i32_reg(dest, t32);
3521 tcg_temp_free_i32(t32);
3522 } else if (r1 == 0) {
3523 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3525 TCGv_reg t0 = tcg_temp_new();
3526 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
3527 tcg_gen_deposit_reg(dest, t0, cpu_gr[r1], 32 - sa, sa);
3530 save_gpr(ctx, rt, dest);
3532 /* Install the new nullification. */
3533 cond_free(&ctx->null_cond);
3535 ctx->null_cond = do_sed_cond(c, dest);
3537 return nullify_end(ctx);
3540 static bool trans_extrw_sar(DisasContext *ctx, uint32_t insn,
3541 const DisasInsn *di)
3543 unsigned clen = extract32(insn, 0, 5);
3544 unsigned is_se = extract32(insn, 10, 1);
3545 unsigned c = extract32(insn, 13, 3);
3546 unsigned rt = extract32(insn, 16, 5);
3547 unsigned rr = extract32(insn, 21, 5);
3548 unsigned len = 32 - clen;
3549 TCGv_reg dest, src, tmp;
3555 dest = dest_gpr(ctx, rt);
3556 src = load_gpr(ctx, rr);
3557 tmp = tcg_temp_new();
3559 /* Recall that SAR is using big-endian bit numbering. */
3560 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
3562 tcg_gen_sar_reg(dest, src, tmp);
3563 tcg_gen_sextract_reg(dest, dest, 0, len);
3565 tcg_gen_shr_reg(dest, src, tmp);
3566 tcg_gen_extract_reg(dest, dest, 0, len);
3569 save_gpr(ctx, rt, dest);
3571 /* Install the new nullification. */
3572 cond_free(&ctx->null_cond);
3574 ctx->null_cond = do_sed_cond(c, dest);
3576 return nullify_end(ctx);
3579 static bool trans_extrw_imm(DisasContext *ctx, uint32_t insn,
3580 const DisasInsn *di)
3582 unsigned clen = extract32(insn, 0, 5);
3583 unsigned pos = extract32(insn, 5, 5);
3584 unsigned is_se = extract32(insn, 10, 1);
3585 unsigned c = extract32(insn, 13, 3);
3586 unsigned rt = extract32(insn, 16, 5);
3587 unsigned rr = extract32(insn, 21, 5);
3588 unsigned len = 32 - clen;
3589 unsigned cpos = 31 - pos;
3596 dest = dest_gpr(ctx, rt);
3597 src = load_gpr(ctx, rr);
3599 tcg_gen_sextract_reg(dest, src, cpos, len);
3601 tcg_gen_extract_reg(dest, src, cpos, len);
3603 save_gpr(ctx, rt, dest);
3605 /* Install the new nullification. */
3606 cond_free(&ctx->null_cond);
3608 ctx->null_cond = do_sed_cond(c, dest);
3610 return nullify_end(ctx);
3613 static const DisasInsn table_sh_ex[] = {
3614 { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
3615 { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
3616 { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
3617 { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
3620 static bool trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
3621 const DisasInsn *di)
3623 unsigned clen = extract32(insn, 0, 5);
3624 unsigned cpos = extract32(insn, 5, 5);
3625 unsigned nz = extract32(insn, 10, 1);
3626 unsigned c = extract32(insn, 13, 3);
3627 target_sreg val = low_sextract(insn, 16, 5);
3628 unsigned rt = extract32(insn, 21, 5);
3629 unsigned len = 32 - clen;
3630 target_sreg mask0, mask1;
3636 if (cpos + len > 32) {
3640 dest = dest_gpr(ctx, rt);
3641 mask0 = deposit64(0, cpos, len, val);
3642 mask1 = deposit64(-1, cpos, len, val);
3645 TCGv_reg src = load_gpr(ctx, rt);
3647 tcg_gen_andi_reg(dest, src, mask1);
3650 tcg_gen_ori_reg(dest, src, mask0);
3652 tcg_gen_movi_reg(dest, mask0);
3654 save_gpr(ctx, rt, dest);
3656 /* Install the new nullification. */
3657 cond_free(&ctx->null_cond);
3659 ctx->null_cond = do_sed_cond(c, dest);
3661 return nullify_end(ctx);
3664 static bool trans_depw_imm(DisasContext *ctx, uint32_t insn,
3665 const DisasInsn *di)
3667 unsigned clen = extract32(insn, 0, 5);
3668 unsigned cpos = extract32(insn, 5, 5);
3669 unsigned nz = extract32(insn, 10, 1);
3670 unsigned c = extract32(insn, 13, 3);
3671 unsigned rr = extract32(insn, 16, 5);
3672 unsigned rt = extract32(insn, 21, 5);
3673 unsigned rs = nz ? rt : 0;
3674 unsigned len = 32 - clen;
3680 if (cpos + len > 32) {
3684 dest = dest_gpr(ctx, rt);
3685 val = load_gpr(ctx, rr);
3687 tcg_gen_deposit_z_reg(dest, val, cpos, len);
3689 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, cpos, len);
3691 save_gpr(ctx, rt, dest);
3693 /* Install the new nullification. */
3694 cond_free(&ctx->null_cond);
3696 ctx->null_cond = do_sed_cond(c, dest);
3698 return nullify_end(ctx);
3701 static bool trans_depw_sar(DisasContext *ctx, uint32_t insn,
3702 const DisasInsn *di)
3704 unsigned clen = extract32(insn, 0, 5);
3705 unsigned nz = extract32(insn, 10, 1);
3706 unsigned i = extract32(insn, 12, 1);
3707 unsigned c = extract32(insn, 13, 3);
3708 unsigned rt = extract32(insn, 21, 5);
3709 unsigned rs = nz ? rt : 0;
3710 unsigned len = 32 - clen;
3711 TCGv_reg val, mask, tmp, shift, dest;
3712 unsigned msb = 1U << (len - 1);
3719 val = load_const(ctx, low_sextract(insn, 16, 5));
3721 val = load_gpr(ctx, extract32(insn, 16, 5));
3723 dest = dest_gpr(ctx, rt);
3724 shift = tcg_temp_new();
3725 tmp = tcg_temp_new();
3727 /* Convert big-endian bit numbering in SAR to left-shift. */
3728 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
3730 mask = tcg_const_reg(msb + (msb - 1));
3731 tcg_gen_and_reg(tmp, val, mask);
3733 tcg_gen_shl_reg(mask, mask, shift);
3734 tcg_gen_shl_reg(tmp, tmp, shift);
3735 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3736 tcg_gen_or_reg(dest, dest, tmp);
3738 tcg_gen_shl_reg(dest, tmp, shift);
3740 tcg_temp_free(shift);
3741 tcg_temp_free(mask);
3743 save_gpr(ctx, rt, dest);
3745 /* Install the new nullification. */
3746 cond_free(&ctx->null_cond);
3748 ctx->null_cond = do_sed_cond(c, dest);
3750 return nullify_end(ctx);
3753 static const DisasInsn table_depw[] = {
3754 { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
3755 { 0xd4000800u, 0xfc001800u, trans_depw_imm },
3756 { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
3759 static bool trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
3761 unsigned n = extract32(insn, 1, 1);
3762 unsigned b = extract32(insn, 21, 5);
3763 target_sreg disp = assemble_17(insn);
3766 #ifdef CONFIG_USER_ONLY
3767 /* ??? It seems like there should be a good way of using
3768 "be disp(sr2, r0)", the canonical gateway entry mechanism
3769 to our advantage. But that appears to be inconvenient to
3770 manage along side branch delay slots. Therefore we handle
3771 entry into the gateway page via absolute address. */
3772 /* Since we don't implement spaces, just branch. Do notice the special
3773 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3774 goto_tb to the TB containing the syscall. */
3776 do_dbranch(ctx, disp, is_l ? 31 : 0, n);
3780 int sp = assemble_sr3(insn);
3784 tmp = get_temp(ctx);
3785 tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
3786 tmp = do_ibranch_priv(ctx, tmp);
3788 #ifdef CONFIG_USER_ONLY
3789 do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
3791 TCGv_i64 new_spc = tcg_temp_new_i64();
3793 load_spr(ctx, new_spc, sp);
3795 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3796 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3798 if (n && use_nullify_skip(ctx)) {
3799 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3800 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3801 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3802 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3804 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3805 if (ctx->iaoq_b == -1) {
3806 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3808 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3809 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3810 nullify_set(ctx, n);
3812 tcg_temp_free_i64(new_spc);
3813 tcg_gen_lookup_and_goto_ptr();
3814 ctx->base.is_jmp = DISAS_NORETURN;
3815 return nullify_end(ctx);
3820 static bool trans_bl(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3822 unsigned n = extract32(insn, 1, 1);
3823 unsigned link = extract32(insn, 21, 5);
3824 target_sreg disp = assemble_17(insn);
3826 do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
3830 static bool trans_b_gate(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3832 unsigned n = extract32(insn, 1, 1);
3833 unsigned link = extract32(insn, 21, 5);
3834 target_sreg disp = assemble_17(insn);
3835 target_ureg dest = iaoq_dest(ctx, disp);
3837 /* Make sure the caller hasn't done something weird with the queue.
3838 * ??? This is not quite the same as the PSW[B] bit, which would be
3839 * expensive to track. Real hardware will trap for
3841 * b gateway+4 (in delay slot of first branch)
3842 * However, checking for a non-sequential instruction queue *will*
3843 * diagnose the security hole
3846 * in which instructions at evil would run with increased privs.
3848 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3849 return gen_illegal(ctx);
3852 #ifndef CONFIG_USER_ONLY
3853 if (ctx->tb_flags & PSW_C) {
3854 CPUHPPAState *env = ctx->cs->env_ptr;
3855 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3856 /* If we could not find a TLB entry, then we need to generate an
3857 ITLB miss exception so the kernel will provide it.
3858 The resulting TLB fill operation will invalidate this TB and
3859 we will re-translate, at which point we *will* be able to find
3860 the TLB entry and determine if this is in fact a gateway page. */
3862 gen_excp(ctx, EXCP_ITLB_MISS);
3865 /* No change for non-gateway pages or for priv decrease. */
3866 if (type >= 4 && type - 4 < ctx->privilege) {
3867 dest = deposit32(dest, 0, 2, type - 4);
3870 dest &= -4; /* priv = 0 */
3874 do_dbranch(ctx, dest, link, n);
3878 static bool trans_bl_long(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3880 unsigned n = extract32(insn, 1, 1);
3881 target_sreg disp = assemble_22(insn);
3883 do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
3887 static bool trans_blr(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3889 unsigned n = extract32(insn, 1, 1);
3890 unsigned rx = extract32(insn, 16, 5);
3891 unsigned link = extract32(insn, 21, 5);
3892 TCGv_reg tmp = get_temp(ctx);
3894 tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
3895 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3896 /* The computation here never changes privilege level. */
3897 do_ibranch(ctx, tmp, link, n);
3901 static bool trans_bv(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3903 unsigned n = extract32(insn, 1, 1);
3904 unsigned rx = extract32(insn, 16, 5);
3905 unsigned rb = extract32(insn, 21, 5);
3909 dest = load_gpr(ctx, rb);
3911 dest = get_temp(ctx);
3912 tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
3913 tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
3915 dest = do_ibranch_priv(ctx, dest);
3916 do_ibranch(ctx, dest, 0, n);
3920 static bool trans_bve(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
3922 unsigned n = extract32(insn, 1, 1);
3923 unsigned rb = extract32(insn, 21, 5);
3924 unsigned link = extract32(insn, 13, 1) ? 2 : 0;
3927 #ifdef CONFIG_USER_ONLY
3928 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3929 do_ibranch(ctx, dest, link, n);
3932 dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
3934 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3935 if (ctx->iaoq_b == -1) {
3936 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3938 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3939 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3941 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
3943 nullify_set(ctx, n);
3944 tcg_gen_lookup_and_goto_ptr();
3945 ctx->base.is_jmp = DISAS_NORETURN;
3946 return nullify_end(ctx);
3951 static const DisasInsn table_branch[] = {
3952 { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
3953 { 0xe800a000u, 0xfc00e000u, trans_bl_long },
3954 { 0xe8004000u, 0xfc00fffdu, trans_blr },
3955 { 0xe800c000u, 0xfc00fffdu, trans_bv },
3956 { 0xe800d000u, 0xfc00dffcu, trans_bve },
3957 { 0xe8002000u, 0xfc00e000u, trans_b_gate },
3960 static bool trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
3961 const DisasInsn *di)
3963 unsigned rt = extract32(insn, 0, 5);
3964 unsigned ra = extract32(insn, 21, 5);
3965 do_fop_wew(ctx, rt, ra, di->f.wew);
3969 static bool trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
3970 const DisasInsn *di)
3972 unsigned rt = assemble_rt64(insn);
3973 unsigned ra = assemble_ra64(insn);
3974 do_fop_wew(ctx, rt, ra, di->f.wew);
3978 static bool trans_fop_ded(DisasContext *ctx, uint32_t insn,
3979 const DisasInsn *di)
3981 unsigned rt = extract32(insn, 0, 5);
3982 unsigned ra = extract32(insn, 21, 5);
3983 do_fop_ded(ctx, rt, ra, di->f.ded);
3987 static bool trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
3988 const DisasInsn *di)
3990 unsigned rt = extract32(insn, 0, 5);
3991 unsigned ra = extract32(insn, 21, 5);
3992 do_fop_wed(ctx, rt, ra, di->f.wed);
3996 static bool trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
3997 const DisasInsn *di)
3999 unsigned rt = assemble_rt64(insn);
4000 unsigned ra = extract32(insn, 21, 5);
4001 do_fop_wed(ctx, rt, ra, di->f.wed);
4005 static bool trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
4006 const DisasInsn *di)
4008 unsigned rt = extract32(insn, 0, 5);
4009 unsigned ra = extract32(insn, 21, 5);
4010 do_fop_dew(ctx, rt, ra, di->f.dew);
4014 static bool trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
4015 const DisasInsn *di)
4017 unsigned rt = extract32(insn, 0, 5);
4018 unsigned ra = assemble_ra64(insn);
4019 do_fop_dew(ctx, rt, ra, di->f.dew);
4023 static bool trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
4024 const DisasInsn *di)
4026 unsigned rt = extract32(insn, 0, 5);
4027 unsigned rb = extract32(insn, 16, 5);
4028 unsigned ra = extract32(insn, 21, 5);
4029 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4033 static bool trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
4034 const DisasInsn *di)
4036 unsigned rt = assemble_rt64(insn);
4037 unsigned rb = assemble_rb64(insn);
4038 unsigned ra = assemble_ra64(insn);
4039 do_fop_weww(ctx, rt, ra, rb, di->f.weww);
4043 static bool trans_fop_dedd(DisasContext *ctx, uint32_t insn,
4044 const DisasInsn *di)
4046 unsigned rt = extract32(insn, 0, 5);
4047 unsigned rb = extract32(insn, 16, 5);
4048 unsigned ra = extract32(insn, 21, 5);
4049 do_fop_dedd(ctx, rt, ra, rb, di->f.dedd);
4053 static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4055 tcg_gen_mov_i32(dst, src);
4058 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4060 tcg_gen_mov_i64(dst, src);
4063 static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4065 tcg_gen_andi_i32(dst, src, INT32_MAX);
4068 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4070 tcg_gen_andi_i64(dst, src, INT64_MAX);
4073 static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4075 tcg_gen_xori_i32(dst, src, INT32_MIN);
4078 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4080 tcg_gen_xori_i64(dst, src, INT64_MIN);
4083 static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4085 tcg_gen_ori_i32(dst, src, INT32_MIN);
4088 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4090 tcg_gen_ori_i64(dst, src, INT64_MIN);
4093 static void do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
4094 unsigned y, unsigned c)
4096 TCGv_i32 ta, tb, tc, ty;
4100 ta = load_frw0_i32(ra);
4101 tb = load_frw0_i32(rb);
4102 ty = tcg_const_i32(y);
4103 tc = tcg_const_i32(c);
4105 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
4107 tcg_temp_free_i32(ta);
4108 tcg_temp_free_i32(tb);
4109 tcg_temp_free_i32(ty);
4110 tcg_temp_free_i32(tc);
4115 static bool trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
4116 const DisasInsn *di)
4118 unsigned c = extract32(insn, 0, 5);
4119 unsigned y = extract32(insn, 13, 3);
4120 unsigned rb = extract32(insn, 16, 5);
4121 unsigned ra = extract32(insn, 21, 5);
4122 do_fcmp_s(ctx, ra, rb, y, c);
4126 static bool trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
4127 const DisasInsn *di)
4129 unsigned c = extract32(insn, 0, 5);
4130 unsigned y = extract32(insn, 13, 3);
4131 unsigned rb = assemble_rb64(insn);
4132 unsigned ra = assemble_ra64(insn);
4133 do_fcmp_s(ctx, ra, rb, y, c);
4137 static bool trans_fcmp_d(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4139 unsigned c = extract32(insn, 0, 5);
4140 unsigned y = extract32(insn, 13, 3);
4141 unsigned rb = extract32(insn, 16, 5);
4142 unsigned ra = extract32(insn, 21, 5);
4150 ty = tcg_const_i32(y);
4151 tc = tcg_const_i32(c);
4153 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
4155 tcg_temp_free_i64(ta);
4156 tcg_temp_free_i64(tb);
4157 tcg_temp_free_i32(ty);
4158 tcg_temp_free_i32(tc);
4160 return nullify_end(ctx);
4163 static bool trans_ftest_t(DisasContext *ctx, uint32_t insn,
4164 const DisasInsn *di)
4166 unsigned y = extract32(insn, 13, 3);
4167 unsigned cbit = (y ^ 1) - 1;
4173 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4174 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
4175 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4178 return nullify_end(ctx);
4181 static bool trans_ftest_q(DisasContext *ctx, uint32_t insn,
4182 const DisasInsn *di)
4184 unsigned c = extract32(insn, 0, 5);
4192 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
4195 case 0: /* simple */
4196 tcg_gen_andi_reg(t, t, 0x4000000);
4197 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4221 return gen_illegal(ctx);
4224 TCGv_reg c = load_const(ctx, mask);
4225 tcg_gen_or_reg(t, t, c);
4226 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4228 tcg_gen_andi_reg(t, t, mask);
4229 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4232 return nullify_end(ctx);
4235 static bool trans_xmpyu(DisasContext *ctx, uint32_t insn, const DisasInsn *di)
4237 unsigned rt = extract32(insn, 0, 5);
4238 unsigned rb = assemble_rb64(insn);
4239 unsigned ra = assemble_ra64(insn);
4244 a = load_frw0_i64(ra);
4245 b = load_frw0_i64(rb);
4246 tcg_gen_mul_i64(a, a, b);
4248 tcg_temp_free_i64(a);
4249 tcg_temp_free_i64(b);
4251 return nullify_end(ctx);
4254 #define FOP_DED trans_fop_ded, .f.ded
4255 #define FOP_DEDD trans_fop_dedd, .f.dedd
4257 #define FOP_WEW trans_fop_wew_0c, .f.wew
4258 #define FOP_DEW trans_fop_dew_0c, .f.dew
4259 #define FOP_WED trans_fop_wed_0c, .f.wed
4260 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4262 static const DisasInsn table_float_0c[] = {
4263 /* floating point class zero */
4264 { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
4265 { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
4266 { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
4267 { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
4268 { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
4269 { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
4271 { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4272 { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4273 { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4274 { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4275 { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4276 { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4278 /* floating point class three */
4279 { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
4280 { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
4281 { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
4282 { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
4284 { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4285 { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4286 { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4287 { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4289 /* floating point class one */
4291 { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
4292 { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
4294 { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
4295 { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
4296 { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
4297 { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4299 { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
4300 { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
4301 { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
4302 { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4303 /* float/int truncate */
4304 { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
4305 { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
4306 { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
4307 { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4309 { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
4310 { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
4311 { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
4312 { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4314 { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
4315 { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
4316 { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
4317 { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4318 /* float/uint truncate */
4319 { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
4320 { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
4321 { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
4322 { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4324 /* floating point class two */
4325 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
4326 { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
4327 { 0x30002420, 0xffffffe0, trans_ftest_q },
4328 { 0x30000420, 0xffff1fff, trans_ftest_t },
4330 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4331 This is machine/revision == 0, which is reserved for simulator. */
4332 { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
4339 #define FOP_WEW trans_fop_wew_0e, .f.wew
4340 #define FOP_DEW trans_fop_dew_0e, .f.dew
4341 #define FOP_WED trans_fop_wed_0e, .f.wed
4342 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4344 static const DisasInsn table_float_0e[] = {
4345 /* floating point class zero */
4346 { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
4347 { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
4348 { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
4349 { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
4350 { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
4351 { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
4353 { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
4354 { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
4355 { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
4356 { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
4357 { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
4358 { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
4360 /* floating point class three */
4361 { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
4362 { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
4363 { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
4364 { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
4366 { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
4367 { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
4368 { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
4369 { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
4371 { 0x38004700, 0xfc00ef60, trans_xmpyu },
4373 /* floating point class one */
4375 { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
4376 { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
4378 { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
4379 { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
4380 { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
4381 { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
4383 { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
4384 { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
4385 { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
4386 { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
4387 /* float/int truncate */
4388 { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
4389 { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
4390 { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
4391 { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
4393 { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
4394 { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
4395 { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
4396 { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
4398 { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
4399 { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
4400 { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
4401 { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
4402 /* float/uint truncate */
4403 { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
4404 { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
4405 { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
4406 { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
4408 /* floating point class two */
4409 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
4410 { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
4420 /* Convert the fmpyadd single-precision register encodings to standard. */
4421 static inline int fmpyadd_s_reg(unsigned r)
4423 return (r & 16) * 2 + 16 + (r & 15);
4426 static bool trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
4428 unsigned tm = extract32(insn, 0, 5);
4429 unsigned f = extract32(insn, 5, 1);
4430 unsigned ra = extract32(insn, 6, 5);
4431 unsigned ta = extract32(insn, 11, 5);
4432 unsigned rm2 = extract32(insn, 16, 5);
4433 unsigned rm1 = extract32(insn, 21, 5);
4437 /* Independent multiply & add/sub, with undefined behaviour
4438 if outputs overlap inputs. */
4440 tm = fmpyadd_s_reg(tm);
4441 ra = fmpyadd_s_reg(ra);
4442 ta = fmpyadd_s_reg(ta);
4443 rm2 = fmpyadd_s_reg(rm2);
4444 rm1 = fmpyadd_s_reg(rm1);
4445 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4446 do_fop_weww(ctx, ta, ta, ra,
4447 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4449 do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
4450 do_fop_dedd(ctx, ta, ta, ra,
4451 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4454 return nullify_end(ctx);
4457 static bool trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
4458 const DisasInsn *di)
4460 unsigned rt = assemble_rt64(insn);
4461 unsigned neg = extract32(insn, 5, 1);
4462 unsigned rm1 = assemble_ra64(insn);
4463 unsigned rm2 = assemble_rb64(insn);
4464 unsigned ra3 = assemble_rc64(insn);
4468 a = load_frw0_i32(rm1);
4469 b = load_frw0_i32(rm2);
4470 c = load_frw0_i32(ra3);
4473 gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
4475 gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
4478 tcg_temp_free_i32(b);
4479 tcg_temp_free_i32(c);
4480 save_frw_i32(rt, a);
4481 tcg_temp_free_i32(a);
4482 return nullify_end(ctx);
4485 static bool trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
4486 const DisasInsn *di)
4488 unsigned rt = extract32(insn, 0, 5);
4489 unsigned neg = extract32(insn, 5, 1);
4490 unsigned rm1 = extract32(insn, 21, 5);
4491 unsigned rm2 = extract32(insn, 16, 5);
4492 unsigned ra3 = assemble_rc64(insn);
4501 gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
4503 gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
4506 tcg_temp_free_i64(b);
4507 tcg_temp_free_i64(c);
4509 tcg_temp_free_i64(a);
4510 return nullify_end(ctx);
4513 static const DisasInsn table_fp_fused[] = {
4514 { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
4515 { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
4518 static void translate_table_int(DisasContext *ctx, uint32_t insn,
4519 const DisasInsn table[], size_t n)
4522 for (i = 0; i < n; ++i) {
4523 if ((insn & table[i].mask) == table[i].insn) {
4524 table[i].trans(ctx, insn, &table[i]);
4528 qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
4529 insn, ctx->base.pc_next);
4533 #define translate_table(ctx, insn, table) \
4534 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4536 static void translate_one(DisasContext *ctx, uint32_t insn)
4540 /* Transition to the auto-generated decoder. */
4541 if (decode(ctx, insn)) {
4545 opc = extract32(insn, 26, 6);
4548 translate_table(ctx, insn, table_mem_mgmt);
4551 translate_table(ctx, insn, table_arith_log);
4554 translate_table(ctx, insn, table_index_mem);
4557 trans_fmpyadd(ctx, insn, false);
4560 trans_ldil(ctx, insn);
4563 trans_copr_w(ctx, insn);
4566 trans_addil(ctx, insn);
4569 trans_copr_dw(ctx, insn);
4572 translate_table(ctx, insn, table_float_0c);
4575 trans_ldo(ctx, insn);
4578 translate_table(ctx, insn, table_float_0e);
4582 trans_load(ctx, insn, false, MO_UB);
4585 trans_load(ctx, insn, false, MO_TEUW);
4588 trans_load(ctx, insn, false, MO_TEUL);
4591 trans_load(ctx, insn, true, MO_TEUL);
4594 trans_fload_mod(ctx, insn);
4597 trans_load_w(ctx, insn);
4600 trans_store(ctx, insn, false, MO_UB);
4603 trans_store(ctx, insn, false, MO_TEUW);
4606 trans_store(ctx, insn, false, MO_TEUL);
4609 trans_store(ctx, insn, true, MO_TEUL);
4612 trans_fstore_mod(ctx, insn);
4615 trans_store_w(ctx, insn);
4619 trans_cmpb(ctx, insn, true, false, false);
4622 trans_cmpb(ctx, insn, true, true, false);
4625 trans_cmpb(ctx, insn, false, false, false);
4628 trans_cmpb(ctx, insn, false, true, false);
4631 trans_cmpiclr(ctx, insn);
4634 trans_subi(ctx, insn);
4637 trans_fmpyadd(ctx, insn, true);
4640 trans_cmpb(ctx, insn, true, false, true);
4643 trans_addb(ctx, insn, true, false);
4646 trans_addb(ctx, insn, true, true);
4649 trans_addb(ctx, insn, false, false);
4652 trans_addb(ctx, insn, false, true);
4656 trans_addi(ctx, insn);
4659 translate_table(ctx, insn, table_fp_fused);
4662 trans_cmpb(ctx, insn, false, false, true);
4667 trans_bb(ctx, insn);
4670 trans_movb(ctx, insn, false);
4673 trans_movb(ctx, insn, true);
4676 translate_table(ctx, insn, table_sh_ex);
4679 translate_table(ctx, insn, table_depw);
4682 trans_be(ctx, insn, false);
4685 trans_be(ctx, insn, true);
4688 translate_table(ctx, insn, table_branch);
4691 case 0x04: /* spopn */
4692 case 0x05: /* diag */
4693 case 0x0F: /* product specific */
4696 case 0x07: /* unassigned */
4697 case 0x15: /* unassigned */
4698 case 0x1D: /* unassigned */
4699 case 0x37: /* unassigned */
4702 #ifndef CONFIG_USER_ONLY
4703 /* Unassigned, but use as system-halt. */
4704 if (insn == 0xfffdead0) {
4705 gen_hlt(ctx, 0); /* halt system */
4708 if (insn == 0xfffdead1) {
4709 gen_hlt(ctx, 1); /* reset system */
4720 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4722 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4726 ctx->tb_flags = ctx->base.tb->flags;
4728 #ifdef CONFIG_USER_ONLY
4729 ctx->privilege = MMU_USER_IDX;
4730 ctx->mmu_idx = MMU_USER_IDX;
4731 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4732 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
4734 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4735 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
4737 /* Recover the IAOQ values from the GVA + PRIV. */
4738 uint64_t cs_base = ctx->base.tb->cs_base;
4739 uint64_t iasq_f = cs_base & ~0xffffffffull;
4740 int32_t diff = cs_base;
4742 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4743 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4746 ctx->iaoq_n_var = NULL;
4748 /* Bound the number of instructions by those left on the page. */
4749 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4750 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4754 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4755 memset(ctx->templ, 0, sizeof(ctx->templ));
4758 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4760 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4762 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4763 ctx->null_cond = cond_make_f();
4764 ctx->psw_n_nonzero = false;
4765 if (ctx->tb_flags & PSW_N) {
4766 ctx->null_cond.c = TCG_COND_ALWAYS;
4767 ctx->psw_n_nonzero = true;
4769 ctx->null_lab = NULL;
4772 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4774 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4776 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4779 static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4780 const CPUBreakpoint *bp)
4782 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4784 gen_excp(ctx, EXCP_DEBUG);
4785 ctx->base.pc_next += 4;
4789 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4791 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4792 CPUHPPAState *env = cs->env_ptr;
4796 /* Execute one insn. */
4797 #ifdef CONFIG_USER_ONLY
4798 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4800 ret = ctx->base.is_jmp;
4801 assert(ret != DISAS_NEXT);
4805 /* Always fetch the insn, even if nullified, so that we check
4806 the page permissions for execute. */
4807 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
4809 /* Set up the IA queue for the next insn.
4810 This will be overwritten by a branch. */
4811 if (ctx->iaoq_b == -1) {
4813 ctx->iaoq_n_var = get_temp(ctx);
4814 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4816 ctx->iaoq_n = ctx->iaoq_b + 4;
4817 ctx->iaoq_n_var = NULL;
4820 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4821 ctx->null_cond.c = TCG_COND_NEVER;
4825 translate_one(ctx, insn);
4826 ret = ctx->base.is_jmp;
4827 assert(ctx->null_lab == NULL);
4831 /* Free any temporaries allocated. */
4832 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4833 tcg_temp_free(ctx->tempr[i]);
4834 ctx->tempr[i] = NULL;
4836 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4837 tcg_temp_free_tl(ctx->templ[i]);
4838 ctx->templ[i] = NULL;
4843 /* Advance the insn queue. Note that this check also detects
4844 a priority change within the instruction queue. */
4845 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4846 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4847 && use_goto_tb(ctx, ctx->iaoq_b)
4848 && (ctx->null_cond.c == TCG_COND_NEVER
4849 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4850 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4851 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4852 ctx->base.is_jmp = ret = DISAS_NORETURN;
4854 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4857 ctx->iaoq_f = ctx->iaoq_b;
4858 ctx->iaoq_b = ctx->iaoq_n;
4859 ctx->base.pc_next += 4;
4861 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4864 if (ctx->iaoq_f == -1) {
4865 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4866 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4867 #ifndef CONFIG_USER_ONLY
4868 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4871 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4872 } else if (ctx->iaoq_b == -1) {
4873 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4877 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4879 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4880 DisasJumpType is_jmp = ctx->base.is_jmp;
4883 case DISAS_NORETURN:
4885 case DISAS_TOO_MANY:
4886 case DISAS_IAQ_N_STALE:
4887 case DISAS_IAQ_N_STALE_EXIT:
4888 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4889 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4892 case DISAS_IAQ_N_UPDATED:
4893 if (ctx->base.singlestep_enabled) {
4894 gen_excp_1(EXCP_DEBUG);
4895 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4896 tcg_gen_exit_tb(NULL, 0);
4898 tcg_gen_lookup_and_goto_ptr();
4902 g_assert_not_reached();
4906 static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4908 target_ulong pc = dcbase->pc_first;
4910 #ifdef CONFIG_USER_ONLY
4913 qemu_log("IN:\n0x00000000: (null)\n");
4916 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4919 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4922 qemu_log("IN:\n0x00000100: syscall\n");
4927 qemu_log("IN: %s\n", lookup_symbol(pc));
4928 log_target_disas(cs, pc, dcbase->tb->size);
4931 static const TranslatorOps hppa_tr_ops = {
4932 .init_disas_context = hppa_tr_init_disas_context,
4933 .tb_start = hppa_tr_tb_start,
4934 .insn_start = hppa_tr_insn_start,
4935 .breakpoint_check = hppa_tr_breakpoint_check,
4936 .translate_insn = hppa_tr_translate_insn,
4937 .tb_stop = hppa_tr_tb_stop,
4938 .disas_log = hppa_tr_disas_log,
4941 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4945 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
4948 void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4951 env->iaoq_f = data[0];
4952 if (data[1] != (target_ureg)-1) {
4953 env->iaoq_b = data[1];
4955 /* Since we were executing the instruction at IAOQ_F, and took some
4956 sort of action that provoked the cpu_restore_state, we can infer
4957 that the instruction was not nullified. */