]> Git Repo - qemu.git/blame - target/hppa/translate.c
target/hppa: add TLB protection id check
[qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <[email protected]>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/cpu_ldst.h"
61766fe9
RH
27#include "exec/helper-proto.h"
28#include "exec/helper-gen.h"
869051ea 29#include "exec/translator.h"
61766fe9
RH
30#include "trace-tcg.h"
31#include "exec/log.h"
32
eaa3783b
RH
33/* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
35
36#undef TCGv
37#undef tcg_temp_new
38#undef tcg_global_reg_new
39#undef tcg_global_mem_new
40#undef tcg_temp_local_new
41#undef tcg_temp_free
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
46#define tcg_temp_free_tl tcg_temp_free_i64
47#if TARGET_REGISTER_BITS == 64
48#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
49#else
50#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51#endif
52#else
53#define TCGv_tl TCGv_i32
54#define tcg_temp_new_tl tcg_temp_new_i32
55#define tcg_temp_free_tl tcg_temp_free_i32
56#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57#endif
58
59#if TARGET_REGISTER_BITS == 64
60#define TCGv_reg TCGv_i64
61
62#define tcg_temp_new tcg_temp_new_i64
63#define tcg_global_reg_new tcg_global_reg_new_i64
64#define tcg_global_mem_new tcg_global_mem_new_i64
65#define tcg_temp_local_new tcg_temp_local_new_i64
66#define tcg_temp_free tcg_temp_free_i64
67
68#define tcg_gen_movi_reg tcg_gen_movi_i64
69#define tcg_gen_mov_reg tcg_gen_mov_i64
70#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76#define tcg_gen_ld_reg tcg_gen_ld_i64
77#define tcg_gen_st8_reg tcg_gen_st8_i64
78#define tcg_gen_st16_reg tcg_gen_st16_i64
79#define tcg_gen_st32_reg tcg_gen_st32_i64
80#define tcg_gen_st_reg tcg_gen_st_i64
81#define tcg_gen_add_reg tcg_gen_add_i64
82#define tcg_gen_addi_reg tcg_gen_addi_i64
83#define tcg_gen_sub_reg tcg_gen_sub_i64
84#define tcg_gen_neg_reg tcg_gen_neg_i64
85#define tcg_gen_subfi_reg tcg_gen_subfi_i64
86#define tcg_gen_subi_reg tcg_gen_subi_i64
87#define tcg_gen_and_reg tcg_gen_and_i64
88#define tcg_gen_andi_reg tcg_gen_andi_i64
89#define tcg_gen_or_reg tcg_gen_or_i64
90#define tcg_gen_ori_reg tcg_gen_ori_i64
91#define tcg_gen_xor_reg tcg_gen_xor_i64
92#define tcg_gen_xori_reg tcg_gen_xori_i64
93#define tcg_gen_not_reg tcg_gen_not_i64
94#define tcg_gen_shl_reg tcg_gen_shl_i64
95#define tcg_gen_shli_reg tcg_gen_shli_i64
96#define tcg_gen_shr_reg tcg_gen_shr_i64
97#define tcg_gen_shri_reg tcg_gen_shri_i64
98#define tcg_gen_sar_reg tcg_gen_sar_i64
99#define tcg_gen_sari_reg tcg_gen_sari_i64
100#define tcg_gen_brcond_reg tcg_gen_brcond_i64
101#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102#define tcg_gen_setcond_reg tcg_gen_setcond_i64
103#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104#define tcg_gen_mul_reg tcg_gen_mul_i64
105#define tcg_gen_muli_reg tcg_gen_muli_i64
106#define tcg_gen_div_reg tcg_gen_div_i64
107#define tcg_gen_rem_reg tcg_gen_rem_i64
108#define tcg_gen_divu_reg tcg_gen_divu_i64
109#define tcg_gen_remu_reg tcg_gen_remu_i64
110#define tcg_gen_discard_reg tcg_gen_discard_i64
111#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127#define tcg_gen_andc_reg tcg_gen_andc_i64
128#define tcg_gen_eqv_reg tcg_gen_eqv_i64
129#define tcg_gen_nand_reg tcg_gen_nand_i64
130#define tcg_gen_nor_reg tcg_gen_nor_i64
131#define tcg_gen_orc_reg tcg_gen_orc_i64
132#define tcg_gen_clz_reg tcg_gen_clz_i64
133#define tcg_gen_ctz_reg tcg_gen_ctz_i64
134#define tcg_gen_clzi_reg tcg_gen_clzi_i64
135#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138#define tcg_gen_rotl_reg tcg_gen_rotl_i64
139#define tcg_gen_rotli_reg tcg_gen_rotli_i64
140#define tcg_gen_rotr_reg tcg_gen_rotr_i64
141#define tcg_gen_rotri_reg tcg_gen_rotri_i64
142#define tcg_gen_deposit_reg tcg_gen_deposit_i64
143#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144#define tcg_gen_extract_reg tcg_gen_extract_i64
145#define tcg_gen_sextract_reg tcg_gen_sextract_i64
146#define tcg_const_reg tcg_const_i64
147#define tcg_const_local_reg tcg_const_local_i64
148#define tcg_gen_movcond_reg tcg_gen_movcond_i64
149#define tcg_gen_add2_reg tcg_gen_add2_i64
150#define tcg_gen_sub2_reg tcg_gen_sub2_i64
151#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 154#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
155#else
156#define TCGv_reg TCGv_i32
157#define tcg_temp_new tcg_temp_new_i32
158#define tcg_global_reg_new tcg_global_reg_new_i32
159#define tcg_global_mem_new tcg_global_mem_new_i32
160#define tcg_temp_local_new tcg_temp_local_new_i32
161#define tcg_temp_free tcg_temp_free_i32
162
163#define tcg_gen_movi_reg tcg_gen_movi_i32
164#define tcg_gen_mov_reg tcg_gen_mov_i32
165#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169#define tcg_gen_ld32u_reg tcg_gen_ld_i32
170#define tcg_gen_ld32s_reg tcg_gen_ld_i32
171#define tcg_gen_ld_reg tcg_gen_ld_i32
172#define tcg_gen_st8_reg tcg_gen_st8_i32
173#define tcg_gen_st16_reg tcg_gen_st16_i32
174#define tcg_gen_st32_reg tcg_gen_st32_i32
175#define tcg_gen_st_reg tcg_gen_st_i32
176#define tcg_gen_add_reg tcg_gen_add_i32
177#define tcg_gen_addi_reg tcg_gen_addi_i32
178#define tcg_gen_sub_reg tcg_gen_sub_i32
179#define tcg_gen_neg_reg tcg_gen_neg_i32
180#define tcg_gen_subfi_reg tcg_gen_subfi_i32
181#define tcg_gen_subi_reg tcg_gen_subi_i32
182#define tcg_gen_and_reg tcg_gen_and_i32
183#define tcg_gen_andi_reg tcg_gen_andi_i32
184#define tcg_gen_or_reg tcg_gen_or_i32
185#define tcg_gen_ori_reg tcg_gen_ori_i32
186#define tcg_gen_xor_reg tcg_gen_xor_i32
187#define tcg_gen_xori_reg tcg_gen_xori_i32
188#define tcg_gen_not_reg tcg_gen_not_i32
189#define tcg_gen_shl_reg tcg_gen_shl_i32
190#define tcg_gen_shli_reg tcg_gen_shli_i32
191#define tcg_gen_shr_reg tcg_gen_shr_i32
192#define tcg_gen_shri_reg tcg_gen_shri_i32
193#define tcg_gen_sar_reg tcg_gen_sar_i32
194#define tcg_gen_sari_reg tcg_gen_sari_i32
195#define tcg_gen_brcond_reg tcg_gen_brcond_i32
196#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197#define tcg_gen_setcond_reg tcg_gen_setcond_i32
198#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199#define tcg_gen_mul_reg tcg_gen_mul_i32
200#define tcg_gen_muli_reg tcg_gen_muli_i32
201#define tcg_gen_div_reg tcg_gen_div_i32
202#define tcg_gen_rem_reg tcg_gen_rem_i32
203#define tcg_gen_divu_reg tcg_gen_divu_i32
204#define tcg_gen_remu_reg tcg_gen_remu_i32
205#define tcg_gen_discard_reg tcg_gen_discard_i32
206#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216#define tcg_gen_ext32u_reg tcg_gen_mov_i32
217#define tcg_gen_ext32s_reg tcg_gen_mov_i32
218#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221#define tcg_gen_andc_reg tcg_gen_andc_i32
222#define tcg_gen_eqv_reg tcg_gen_eqv_i32
223#define tcg_gen_nand_reg tcg_gen_nand_i32
224#define tcg_gen_nor_reg tcg_gen_nor_i32
225#define tcg_gen_orc_reg tcg_gen_orc_i32
226#define tcg_gen_clz_reg tcg_gen_clz_i32
227#define tcg_gen_ctz_reg tcg_gen_ctz_i32
228#define tcg_gen_clzi_reg tcg_gen_clzi_i32
229#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232#define tcg_gen_rotl_reg tcg_gen_rotl_i32
233#define tcg_gen_rotli_reg tcg_gen_rotli_i32
234#define tcg_gen_rotr_reg tcg_gen_rotr_i32
235#define tcg_gen_rotri_reg tcg_gen_rotri_i32
236#define tcg_gen_deposit_reg tcg_gen_deposit_i32
237#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238#define tcg_gen_extract_reg tcg_gen_extract_i32
239#define tcg_gen_sextract_reg tcg_gen_sextract_i32
240#define tcg_const_reg tcg_const_i32
241#define tcg_const_local_reg tcg_const_local_i32
242#define tcg_gen_movcond_reg tcg_gen_movcond_i32
243#define tcg_gen_add2_reg tcg_gen_add2_i32
244#define tcg_gen_sub2_reg tcg_gen_sub2_i32
245#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 248#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
249#endif /* TARGET_REGISTER_BITS */
250
61766fe9
RH
251typedef struct DisasCond {
252 TCGCond c;
eaa3783b 253 TCGv_reg a0, a1;
61766fe9
RH
254 bool a0_is_n;
255 bool a1_is_0;
256} DisasCond;
257
258typedef struct DisasContext {
d01a3625 259 DisasContextBase base;
61766fe9
RH
260 CPUState *cs;
261
eaa3783b
RH
262 target_ureg iaoq_f;
263 target_ureg iaoq_b;
264 target_ureg iaoq_n;
265 TCGv_reg iaoq_n_var;
61766fe9 266
86f8d05f 267 int ntempr, ntempl;
5eecd37a 268 TCGv_reg tempr[8];
86f8d05f 269 TCGv_tl templ[4];
61766fe9
RH
270
271 DisasCond null_cond;
272 TCGLabel *null_lab;
273
1a19da0d 274 uint32_t insn;
494737b7 275 uint32_t tb_flags;
3d68ee7b
RH
276 int mmu_idx;
277 int privilege;
61766fe9
RH
278 bool psw_n_nonzero;
279} DisasContext;
280
e36f27ef
RH
281/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282static int expand_sm_imm(int val)
283{
284 if (val & PSW_SM_E) {
285 val = (val & ~PSW_SM_E) | PSW_E;
286 }
287 if (val & PSW_SM_W) {
288 val = (val & ~PSW_SM_W) | PSW_W;
289 }
290 return val;
291}
292
deee69a1
RH
293/* Inverted space register indicates 0 means sr0 not inferred from base. */
294static int expand_sr3x(int val)
295{
296 return ~val;
297}
298
1cd012a5
RH
299/* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
301static int ma_to_m(int val)
302{
303 return val & 2 ? (val & 1 ? -1 : 1) : 0;
304}
305
740038d7
RH
306/* Convert the sign of the displacement to a pre or post-modify. */
307static int pos_to_m(int val)
308{
309 return val ? 1 : -1;
310}
311
312static int neg_to_m(int val)
313{
314 return val ? -1 : 1;
315}
316
317/* Used for branch targets and fp memory ops. */
01afb7be
RH
318static int expand_shl2(int val)
319{
320 return val << 2;
321}
322
740038d7
RH
323/* Used for fp memory ops. */
324static int expand_shl3(int val)
325{
326 return val << 3;
327}
328
0588e061
RH
329/* Used for assemble_21. */
330static int expand_shl11(int val)
331{
332 return val << 11;
333}
334
01afb7be 335
40f9f908
RH
336/* Include the auto-generated decoder. */
337#include "decode.inc.c"
338
869051ea
RH
339/* We are not using a goto_tb (for whatever reason), but have updated
340 the iaq (for whatever reason), so don't do it again on exit. */
341#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 342
869051ea
RH
343/* We are exiting the TB, but have neither emitted a goto_tb, nor
344 updated the iaq for the next instruction to be executed. */
345#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 346
e1b5a5ed
RH
347/* Similarly, but we want to return to the main loop immediately
348 to recognize unmasked interrupts. */
349#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
350
61766fe9 351/* global register indexes */
eaa3783b 352static TCGv_reg cpu_gr[32];
33423472 353static TCGv_i64 cpu_sr[4];
494737b7 354static TCGv_i64 cpu_srH;
eaa3783b
RH
355static TCGv_reg cpu_iaoq_f;
356static TCGv_reg cpu_iaoq_b;
c301f34e
RH
357static TCGv_i64 cpu_iasq_f;
358static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
359static TCGv_reg cpu_sar;
360static TCGv_reg cpu_psw_n;
361static TCGv_reg cpu_psw_v;
362static TCGv_reg cpu_psw_cb;
363static TCGv_reg cpu_psw_cb_msb;
61766fe9
RH
364
365#include "exec/gen-icount.h"
366
367void hppa_translate_init(void)
368{
369#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
370
eaa3783b 371 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 372 static const GlobalVar vars[] = {
35136a77 373 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
374 DEF_VAR(psw_n),
375 DEF_VAR(psw_v),
376 DEF_VAR(psw_cb),
377 DEF_VAR(psw_cb_msb),
378 DEF_VAR(iaoq_f),
379 DEF_VAR(iaoq_b),
380 };
381
382#undef DEF_VAR
383
384 /* Use the symbolic register names that match the disassembler. */
385 static const char gr_names[32][4] = {
386 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
387 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
388 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
390 };
33423472 391 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
392 static const char sr_names[5][4] = {
393 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 394 };
61766fe9 395
61766fe9
RH
396 int i;
397
f764718d 398 cpu_gr[0] = NULL;
61766fe9
RH
399 for (i = 1; i < 32; i++) {
400 cpu_gr[i] = tcg_global_mem_new(cpu_env,
401 offsetof(CPUHPPAState, gr[i]),
402 gr_names[i]);
403 }
33423472
RH
404 for (i = 0; i < 4; i++) {
405 cpu_sr[i] = tcg_global_mem_new_i64(cpu_env,
406 offsetof(CPUHPPAState, sr[i]),
407 sr_names[i]);
408 }
494737b7
RH
409 cpu_srH = tcg_global_mem_new_i64(cpu_env,
410 offsetof(CPUHPPAState, sr[4]),
411 sr_names[4]);
61766fe9
RH
412
413 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
414 const GlobalVar *v = &vars[i];
415 *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
416 }
c301f34e
RH
417
418 cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
419 offsetof(CPUHPPAState, iasq_f),
420 "iasq_f");
421 cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
422 offsetof(CPUHPPAState, iasq_b),
423 "iasq_b");
61766fe9
RH
424}
425
129e9cc3
RH
426static DisasCond cond_make_f(void)
427{
f764718d
RH
428 return (DisasCond){
429 .c = TCG_COND_NEVER,
430 .a0 = NULL,
431 .a1 = NULL,
432 };
129e9cc3
RH
433}
434
df0232fe
RH
435static DisasCond cond_make_t(void)
436{
437 return (DisasCond){
438 .c = TCG_COND_ALWAYS,
439 .a0 = NULL,
440 .a1 = NULL,
441 };
442}
443
129e9cc3
RH
444static DisasCond cond_make_n(void)
445{
f764718d
RH
446 return (DisasCond){
447 .c = TCG_COND_NE,
448 .a0 = cpu_psw_n,
449 .a0_is_n = true,
450 .a1 = NULL,
451 .a1_is_0 = true
452 };
129e9cc3
RH
453}
454
b47a4a02 455static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 456{
129e9cc3 457 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02
SS
458 return (DisasCond){
459 .c = c, .a0 = a0, .a1_is_0 = true
460 };
461}
129e9cc3 462
b47a4a02
SS
463static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
464{
465 TCGv_reg tmp = tcg_temp_new();
466 tcg_gen_mov_reg(tmp, a0);
467 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
468}
469
eaa3783b 470static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
471{
472 DisasCond r = { .c = c };
473
474 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
475 r.a0 = tcg_temp_new();
eaa3783b 476 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 477 r.a1 = tcg_temp_new();
eaa3783b 478 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
479
480 return r;
481}
482
483static void cond_prep(DisasCond *cond)
484{
485 if (cond->a1_is_0) {
486 cond->a1_is_0 = false;
eaa3783b 487 cond->a1 = tcg_const_reg(0);
129e9cc3
RH
488 }
489}
490
491static void cond_free(DisasCond *cond)
492{
493 switch (cond->c) {
494 default:
495 if (!cond->a0_is_n) {
496 tcg_temp_free(cond->a0);
497 }
498 if (!cond->a1_is_0) {
499 tcg_temp_free(cond->a1);
500 }
501 cond->a0_is_n = false;
502 cond->a1_is_0 = false;
f764718d
RH
503 cond->a0 = NULL;
504 cond->a1 = NULL;
129e9cc3
RH
505 /* fallthru */
506 case TCG_COND_ALWAYS:
507 cond->c = TCG_COND_NEVER;
508 break;
509 case TCG_COND_NEVER:
510 break;
511 }
512}
513
eaa3783b 514static TCGv_reg get_temp(DisasContext *ctx)
61766fe9 515{
86f8d05f
RH
516 unsigned i = ctx->ntempr++;
517 g_assert(i < ARRAY_SIZE(ctx->tempr));
518 return ctx->tempr[i] = tcg_temp_new();
61766fe9
RH
519}
520
86f8d05f
RH
521#ifndef CONFIG_USER_ONLY
522static TCGv_tl get_temp_tl(DisasContext *ctx)
523{
524 unsigned i = ctx->ntempl++;
525 g_assert(i < ARRAY_SIZE(ctx->templ));
526 return ctx->templ[i] = tcg_temp_new_tl();
527}
528#endif
529
eaa3783b 530static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
61766fe9 531{
eaa3783b
RH
532 TCGv_reg t = get_temp(ctx);
533 tcg_gen_movi_reg(t, v);
61766fe9
RH
534 return t;
535}
536
eaa3783b 537static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
538{
539 if (reg == 0) {
eaa3783b
RH
540 TCGv_reg t = get_temp(ctx);
541 tcg_gen_movi_reg(t, 0);
61766fe9
RH
542 return t;
543 } else {
544 return cpu_gr[reg];
545 }
546}
547
eaa3783b 548static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 549{
129e9cc3 550 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
61766fe9
RH
551 return get_temp(ctx);
552 } else {
553 return cpu_gr[reg];
554 }
555}
556
eaa3783b 557static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
558{
559 if (ctx->null_cond.c != TCG_COND_NEVER) {
560 cond_prep(&ctx->null_cond);
eaa3783b 561 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
129e9cc3
RH
562 ctx->null_cond.a1, dest, t);
563 } else {
eaa3783b 564 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
565 }
566}
567
eaa3783b 568static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
569{
570 if (reg != 0) {
571 save_or_nullify(ctx, cpu_gr[reg], t);
572 }
573}
574
96d6407f
RH
575#ifdef HOST_WORDS_BIGENDIAN
576# define HI_OFS 0
577# define LO_OFS 4
578#else
579# define HI_OFS 4
580# define LO_OFS 0
581#endif
582
583static TCGv_i32 load_frw_i32(unsigned rt)
584{
585 TCGv_i32 ret = tcg_temp_new_i32();
586 tcg_gen_ld_i32(ret, cpu_env,
587 offsetof(CPUHPPAState, fr[rt & 31])
588 + (rt & 32 ? LO_OFS : HI_OFS));
589 return ret;
590}
591
ebe9383c
RH
592static TCGv_i32 load_frw0_i32(unsigned rt)
593{
594 if (rt == 0) {
595 return tcg_const_i32(0);
596 } else {
597 return load_frw_i32(rt);
598 }
599}
600
601static TCGv_i64 load_frw0_i64(unsigned rt)
602{
603 if (rt == 0) {
604 return tcg_const_i64(0);
605 } else {
606 TCGv_i64 ret = tcg_temp_new_i64();
607 tcg_gen_ld32u_i64(ret, cpu_env,
608 offsetof(CPUHPPAState, fr[rt & 31])
609 + (rt & 32 ? LO_OFS : HI_OFS));
610 return ret;
611 }
612}
613
96d6407f
RH
614static void save_frw_i32(unsigned rt, TCGv_i32 val)
615{
616 tcg_gen_st_i32(val, cpu_env,
617 offsetof(CPUHPPAState, fr[rt & 31])
618 + (rt & 32 ? LO_OFS : HI_OFS));
619}
620
621#undef HI_OFS
622#undef LO_OFS
623
624static TCGv_i64 load_frd(unsigned rt)
625{
626 TCGv_i64 ret = tcg_temp_new_i64();
627 tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
628 return ret;
629}
630
ebe9383c
RH
631static TCGv_i64 load_frd0(unsigned rt)
632{
633 if (rt == 0) {
634 return tcg_const_i64(0);
635 } else {
636 return load_frd(rt);
637 }
638}
639
96d6407f
RH
640static void save_frd(unsigned rt, TCGv_i64 val)
641{
642 tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
643}
644
33423472
RH
645static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
646{
647#ifdef CONFIG_USER_ONLY
648 tcg_gen_movi_i64(dest, 0);
649#else
650 if (reg < 4) {
651 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
652 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
653 tcg_gen_mov_i64(dest, cpu_srH);
33423472
RH
654 } else {
655 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
656 }
657#endif
658}
659
129e9cc3
RH
660/* Skip over the implementation of an insn that has been nullified.
661 Use this when the insn is too complex for a conditional move. */
662static void nullify_over(DisasContext *ctx)
663{
664 if (ctx->null_cond.c != TCG_COND_NEVER) {
665 /* The always condition should have been handled in the main loop. */
666 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
667
668 ctx->null_lab = gen_new_label();
669 cond_prep(&ctx->null_cond);
670
671 /* If we're using PSW[N], copy it to a temp because... */
672 if (ctx->null_cond.a0_is_n) {
673 ctx->null_cond.a0_is_n = false;
674 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 675 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
676 }
677 /* ... we clear it before branching over the implementation,
678 so that (1) it's clear after nullifying this insn and
679 (2) if this insn nullifies the next, PSW[N] is valid. */
680 if (ctx->psw_n_nonzero) {
681 ctx->psw_n_nonzero = false;
eaa3783b 682 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
683 }
684
eaa3783b 685 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
129e9cc3
RH
686 ctx->null_cond.a1, ctx->null_lab);
687 cond_free(&ctx->null_cond);
688 }
689}
690
691/* Save the current nullification state to PSW[N]. */
692static void nullify_save(DisasContext *ctx)
693{
694 if (ctx->null_cond.c == TCG_COND_NEVER) {
695 if (ctx->psw_n_nonzero) {
eaa3783b 696 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
697 }
698 return;
699 }
700 if (!ctx->null_cond.a0_is_n) {
701 cond_prep(&ctx->null_cond);
eaa3783b 702 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
129e9cc3
RH
703 ctx->null_cond.a0, ctx->null_cond.a1);
704 ctx->psw_n_nonzero = true;
705 }
706 cond_free(&ctx->null_cond);
707}
708
709/* Set a PSW[N] to X. The intention is that this is used immediately
710 before a goto_tb/exit_tb, so that there is no fallthru path to other
711 code within the TB. Therefore we do not update psw_n_nonzero. */
712static void nullify_set(DisasContext *ctx, bool x)
713{
714 if (ctx->psw_n_nonzero || x) {
eaa3783b 715 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
716 }
717}
718
719/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
720 This is the pair to nullify_over. Always returns true so that
721 it may be tail-called from a translate function. */
31234768 722static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
723{
724 TCGLabel *null_lab = ctx->null_lab;
31234768 725 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 726
f49b3537
RH
727 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
728 For UPDATED, we cannot update on the nullified path. */
729 assert(status != DISAS_IAQ_N_UPDATED);
730
129e9cc3
RH
731 if (likely(null_lab == NULL)) {
732 /* The current insn wasn't conditional or handled the condition
733 applied to it without a branch, so the (new) setting of
734 NULL_COND can be applied directly to the next insn. */
31234768 735 return true;
129e9cc3
RH
736 }
737 ctx->null_lab = NULL;
738
739 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
740 /* The next instruction will be unconditional,
741 and NULL_COND already reflects that. */
742 gen_set_label(null_lab);
743 } else {
744 /* The insn that we just executed is itself nullifying the next
745 instruction. Store the condition in the PSW[N] global.
746 We asserted PSW[N] = 0 in nullify_over, so that after the
747 label we have the proper value in place. */
748 nullify_save(ctx);
749 gen_set_label(null_lab);
750 ctx->null_cond = cond_make_n();
751 }
869051ea 752 if (status == DISAS_NORETURN) {
31234768 753 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 754 }
31234768 755 return true;
129e9cc3
RH
756}
757
eaa3783b 758static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
759{
760 if (unlikely(ival == -1)) {
eaa3783b 761 tcg_gen_mov_reg(dest, vval);
61766fe9 762 } else {
eaa3783b 763 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
764 }
765}
766
eaa3783b 767static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
768{
769 return ctx->iaoq_f + disp + 8;
770}
771
772static void gen_excp_1(int exception)
773{
774 TCGv_i32 t = tcg_const_i32(exception);
775 gen_helper_excp(cpu_env, t);
776 tcg_temp_free_i32(t);
777}
778
31234768 779static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
780{
781 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
782 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 783 nullify_save(ctx);
61766fe9 784 gen_excp_1(exception);
31234768 785 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
786}
787
31234768 788static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 789{
31234768
RH
790 TCGv_reg tmp;
791
792 nullify_over(ctx);
793 tmp = tcg_const_reg(ctx->insn);
1a19da0d
RH
794 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[CR_IIR]));
795 tcg_temp_free(tmp);
31234768
RH
796 gen_excp(ctx, exc);
797 return nullify_end(ctx);
1a19da0d
RH
798}
799
31234768 800static bool gen_illegal(DisasContext *ctx)
61766fe9 801{
31234768 802 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
803}
804
40f9f908
RH
805#ifdef CONFIG_USER_ONLY
806#define CHECK_MOST_PRIVILEGED(EXCP) \
807 return gen_excp_iir(ctx, EXCP)
808#else
809#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
810 do { \
811 if (ctx->privilege != 0) { \
812 return gen_excp_iir(ctx, EXCP); \
813 } \
e1b5a5ed 814 } while (0)
40f9f908 815#endif
e1b5a5ed 816
eaa3783b 817static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 818{
f3b423ec
RH
819 /* Suppress goto_tb for page crossing, IO, or single-steping. */
820 return !(((ctx->base.pc_first ^ dest) & TARGET_PAGE_MASK)
821 || (tb_cflags(ctx->base.tb) & CF_LAST_IO)
822 || ctx->base.singlestep_enabled);
61766fe9
RH
823}
824
129e9cc3
RH
825/* If the next insn is to be nullified, and it's on the same page,
826 and we're not attempting to set a breakpoint on it, then we can
827 totally skip the nullified insn. This avoids creating and
828 executing a TB that merely branches to the next TB. */
829static bool use_nullify_skip(DisasContext *ctx)
830{
831 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
832 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
833}
834
61766fe9 835static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 836 target_ureg f, target_ureg b)
61766fe9
RH
837{
838 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
839 tcg_gen_goto_tb(which);
eaa3783b
RH
840 tcg_gen_movi_reg(cpu_iaoq_f, f);
841 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 842 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
843 } else {
844 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
845 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
d01a3625 846 if (ctx->base.singlestep_enabled) {
61766fe9
RH
847 gen_excp_1(EXCP_DEBUG);
848 } else {
7f11636d 849 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
850 }
851 }
852}
853
b47a4a02
SS
854static bool cond_need_sv(int c)
855{
856 return c == 2 || c == 3 || c == 6;
857}
858
859static bool cond_need_cb(int c)
860{
861 return c == 4 || c == 5;
862}
863
864/*
865 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
866 * the Parisc 1.1 Architecture Reference Manual for details.
867 */
b2167459 868
eaa3783b
RH
869static DisasCond do_cond(unsigned cf, TCGv_reg res,
870 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
871{
872 DisasCond cond;
eaa3783b 873 TCGv_reg tmp;
b2167459
RH
874
875 switch (cf >> 1) {
b47a4a02 876 case 0: /* Never / TR (0 / 1) */
b2167459
RH
877 cond = cond_make_f();
878 break;
879 case 1: /* = / <> (Z / !Z) */
880 cond = cond_make_0(TCG_COND_EQ, res);
881 break;
b47a4a02
SS
882 case 2: /* < / >= (N ^ V / !(N ^ V) */
883 tmp = tcg_temp_new();
884 tcg_gen_xor_reg(tmp, res, sv);
885 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 886 break;
b47a4a02
SS
887 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
888 /*
889 * Simplify:
890 * (N ^ V) | Z
891 * ((res < 0) ^ (sv < 0)) | !res
892 * ((res ^ sv) < 0) | !res
893 * (~(res ^ sv) >= 0) | !res
894 * !(~(res ^ sv) >> 31) | !res
895 * !(~(res ^ sv) >> 31 & res)
896 */
897 tmp = tcg_temp_new();
898 tcg_gen_eqv_reg(tmp, res, sv);
899 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
900 tcg_gen_and_reg(tmp, tmp, res);
901 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
902 break;
903 case 4: /* NUV / UV (!C / C) */
904 cond = cond_make_0(TCG_COND_EQ, cb_msb);
905 break;
906 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
907 tmp = tcg_temp_new();
eaa3783b
RH
908 tcg_gen_neg_reg(tmp, cb_msb);
909 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 910 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
911 break;
912 case 6: /* SV / NSV (V / !V) */
913 cond = cond_make_0(TCG_COND_LT, sv);
914 break;
915 case 7: /* OD / EV */
916 tmp = tcg_temp_new();
eaa3783b 917 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 918 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
919 break;
920 default:
921 g_assert_not_reached();
922 }
923 if (cf & 1) {
924 cond.c = tcg_invert_cond(cond.c);
925 }
926
927 return cond;
928}
929
930/* Similar, but for the special case of subtraction without borrow, we
931 can use the inputs directly. This can allow other computation to be
932 deleted as unused. */
933
eaa3783b
RH
934static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
935 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
936{
937 DisasCond cond;
938
939 switch (cf >> 1) {
940 case 1: /* = / <> */
941 cond = cond_make(TCG_COND_EQ, in1, in2);
942 break;
943 case 2: /* < / >= */
944 cond = cond_make(TCG_COND_LT, in1, in2);
945 break;
946 case 3: /* <= / > */
947 cond = cond_make(TCG_COND_LE, in1, in2);
948 break;
949 case 4: /* << / >>= */
950 cond = cond_make(TCG_COND_LTU, in1, in2);
951 break;
952 case 5: /* <<= / >> */
953 cond = cond_make(TCG_COND_LEU, in1, in2);
954 break;
955 default:
b47a4a02 956 return do_cond(cf, res, NULL, sv);
b2167459
RH
957 }
958 if (cf & 1) {
959 cond.c = tcg_invert_cond(cond.c);
960 }
961
962 return cond;
963}
964
df0232fe
RH
965/*
966 * Similar, but for logicals, where the carry and overflow bits are not
967 * computed, and use of them is undefined.
968 *
969 * Undefined or not, hardware does not trap. It seems reasonable to
970 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
971 * how cases c={2,3} are treated.
972 */
b2167459 973
eaa3783b 974static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 975{
df0232fe
RH
976 switch (cf) {
977 case 0: /* never */
978 case 9: /* undef, C */
979 case 11: /* undef, C & !Z */
980 case 12: /* undef, V */
981 return cond_make_f();
982
983 case 1: /* true */
984 case 8: /* undef, !C */
985 case 10: /* undef, !C | Z */
986 case 13: /* undef, !V */
987 return cond_make_t();
988
989 case 2: /* == */
990 return cond_make_0(TCG_COND_EQ, res);
991 case 3: /* <> */
992 return cond_make_0(TCG_COND_NE, res);
993 case 4: /* < */
994 return cond_make_0(TCG_COND_LT, res);
995 case 5: /* >= */
996 return cond_make_0(TCG_COND_GE, res);
997 case 6: /* <= */
998 return cond_make_0(TCG_COND_LE, res);
999 case 7: /* > */
1000 return cond_make_0(TCG_COND_GT, res);
1001
1002 case 14: /* OD */
1003 case 15: /* EV */
1004 return do_cond(cf, res, NULL, NULL);
1005
1006 default:
1007 g_assert_not_reached();
b2167459 1008 }
b2167459
RH
1009}
1010
98cd9ca7
RH
1011/* Similar, but for shift/extract/deposit conditions. */
1012
eaa3783b 1013static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
1014{
1015 unsigned c, f;
1016
1017 /* Convert the compressed condition codes to standard.
1018 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1019 4-7 are the reverse of 0-3. */
1020 c = orig & 3;
1021 if (c == 3) {
1022 c = 7;
1023 }
1024 f = (orig & 4) / 4;
1025
1026 return do_log_cond(c * 2 + f, res);
1027}
1028
b2167459
RH
1029/* Similar, but for unit conditions. */
1030
eaa3783b
RH
1031static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1032 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1033{
1034 DisasCond cond;
eaa3783b 1035 TCGv_reg tmp, cb = NULL;
b2167459 1036
b2167459
RH
1037 if (cf & 8) {
1038 /* Since we want to test lots of carry-out bits all at once, do not
1039 * do our normal thing and compute carry-in of bit B+1 since that
1040 * leaves us with carry bits spread across two words.
1041 */
1042 cb = tcg_temp_new();
1043 tmp = tcg_temp_new();
eaa3783b
RH
1044 tcg_gen_or_reg(cb, in1, in2);
1045 tcg_gen_and_reg(tmp, in1, in2);
1046 tcg_gen_andc_reg(cb, cb, res);
1047 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1048 tcg_temp_free(tmp);
1049 }
1050
1051 switch (cf >> 1) {
1052 case 0: /* never / TR */
1053 case 1: /* undefined */
1054 case 5: /* undefined */
1055 cond = cond_make_f();
1056 break;
1057
1058 case 2: /* SBZ / NBZ */
1059 /* See hasless(v,1) from
1060 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1061 */
1062 tmp = tcg_temp_new();
eaa3783b
RH
1063 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1064 tcg_gen_andc_reg(tmp, tmp, res);
1065 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459
RH
1066 cond = cond_make_0(TCG_COND_NE, tmp);
1067 tcg_temp_free(tmp);
1068 break;
1069
1070 case 3: /* SHZ / NHZ */
1071 tmp = tcg_temp_new();
eaa3783b
RH
1072 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1073 tcg_gen_andc_reg(tmp, tmp, res);
1074 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459
RH
1075 cond = cond_make_0(TCG_COND_NE, tmp);
1076 tcg_temp_free(tmp);
1077 break;
1078
1079 case 4: /* SDC / NDC */
eaa3783b 1080 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1081 cond = cond_make_0(TCG_COND_NE, cb);
1082 break;
1083
1084 case 6: /* SBC / NBC */
eaa3783b 1085 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1086 cond = cond_make_0(TCG_COND_NE, cb);
1087 break;
1088
1089 case 7: /* SHC / NHC */
eaa3783b 1090 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1091 cond = cond_make_0(TCG_COND_NE, cb);
1092 break;
1093
1094 default:
1095 g_assert_not_reached();
1096 }
1097 if (cf & 8) {
1098 tcg_temp_free(cb);
1099 }
1100 if (cf & 1) {
1101 cond.c = tcg_invert_cond(cond.c);
1102 }
1103
1104 return cond;
1105}
1106
1107/* Compute signed overflow for addition. */
eaa3783b
RH
1108static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1109 TCGv_reg in1, TCGv_reg in2)
b2167459 1110{
eaa3783b
RH
1111 TCGv_reg sv = get_temp(ctx);
1112 TCGv_reg tmp = tcg_temp_new();
b2167459 1113
eaa3783b
RH
1114 tcg_gen_xor_reg(sv, res, in1);
1115 tcg_gen_xor_reg(tmp, in1, in2);
1116 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1117 tcg_temp_free(tmp);
1118
1119 return sv;
1120}
1121
1122/* Compute signed overflow for subtraction. */
eaa3783b
RH
1123static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1124 TCGv_reg in1, TCGv_reg in2)
b2167459 1125{
eaa3783b
RH
1126 TCGv_reg sv = get_temp(ctx);
1127 TCGv_reg tmp = tcg_temp_new();
b2167459 1128
eaa3783b
RH
1129 tcg_gen_xor_reg(sv, res, in1);
1130 tcg_gen_xor_reg(tmp, in1, in2);
1131 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1132 tcg_temp_free(tmp);
1133
1134 return sv;
1135}
1136
31234768
RH
1137static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1138 TCGv_reg in2, unsigned shift, bool is_l,
1139 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1140{
eaa3783b 1141 TCGv_reg dest, cb, cb_msb, sv, tmp;
b2167459
RH
1142 unsigned c = cf >> 1;
1143 DisasCond cond;
1144
1145 dest = tcg_temp_new();
f764718d
RH
1146 cb = NULL;
1147 cb_msb = NULL;
b2167459
RH
1148
1149 if (shift) {
1150 tmp = get_temp(ctx);
eaa3783b 1151 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1152 in1 = tmp;
1153 }
1154
b47a4a02 1155 if (!is_l || cond_need_cb(c)) {
eaa3783b 1156 TCGv_reg zero = tcg_const_reg(0);
b2167459 1157 cb_msb = get_temp(ctx);
eaa3783b 1158 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1159 if (is_c) {
eaa3783b 1160 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
b2167459
RH
1161 }
1162 tcg_temp_free(zero);
1163 if (!is_l) {
1164 cb = get_temp(ctx);
eaa3783b
RH
1165 tcg_gen_xor_reg(cb, in1, in2);
1166 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1167 }
1168 } else {
eaa3783b 1169 tcg_gen_add_reg(dest, in1, in2);
b2167459 1170 if (is_c) {
eaa3783b 1171 tcg_gen_add_reg(dest, dest, cpu_psw_cb_msb);
b2167459
RH
1172 }
1173 }
1174
1175 /* Compute signed overflow if required. */
f764718d 1176 sv = NULL;
b47a4a02 1177 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1178 sv = do_add_sv(ctx, dest, in1, in2);
1179 if (is_tsv) {
1180 /* ??? Need to include overflow from shift. */
1181 gen_helper_tsv(cpu_env, sv);
1182 }
1183 }
1184
1185 /* Emit any conditional trap before any writeback. */
1186 cond = do_cond(cf, dest, cb_msb, sv);
1187 if (is_tc) {
1188 cond_prep(&cond);
1189 tmp = tcg_temp_new();
eaa3783b 1190 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1191 gen_helper_tcond(cpu_env, tmp);
1192 tcg_temp_free(tmp);
1193 }
1194
1195 /* Write back the result. */
1196 if (!is_l) {
1197 save_or_nullify(ctx, cpu_psw_cb, cb);
1198 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1199 }
1200 save_gpr(ctx, rt, dest);
1201 tcg_temp_free(dest);
1202
1203 /* Install the new nullification. */
1204 cond_free(&ctx->null_cond);
1205 ctx->null_cond = cond;
b2167459
RH
1206}
1207
0c982a28
RH
1208static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1209 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1210{
1211 TCGv_reg tcg_r1, tcg_r2;
1212
1213 if (a->cf) {
1214 nullify_over(ctx);
1215 }
1216 tcg_r1 = load_gpr(ctx, a->r1);
1217 tcg_r2 = load_gpr(ctx, a->r2);
1218 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1219 return nullify_end(ctx);
1220}
1221
0588e061
RH
1222static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1223 bool is_tsv, bool is_tc)
1224{
1225 TCGv_reg tcg_im, tcg_r2;
1226
1227 if (a->cf) {
1228 nullify_over(ctx);
1229 }
1230 tcg_im = load_const(ctx, a->i);
1231 tcg_r2 = load_gpr(ctx, a->r);
1232 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1233 return nullify_end(ctx);
1234}
1235
31234768
RH
1236static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1237 TCGv_reg in2, bool is_tsv, bool is_b,
1238 bool is_tc, unsigned cf)
b2167459 1239{
eaa3783b 1240 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1241 unsigned c = cf >> 1;
1242 DisasCond cond;
1243
1244 dest = tcg_temp_new();
1245 cb = tcg_temp_new();
1246 cb_msb = tcg_temp_new();
1247
eaa3783b 1248 zero = tcg_const_reg(0);
b2167459
RH
1249 if (is_b) {
1250 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b
RH
1251 tcg_gen_not_reg(cb, in2);
1252 tcg_gen_add2_reg(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
1253 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1254 tcg_gen_xor_reg(cb, cb, in1);
1255 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1256 } else {
1257 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1258 operations by seeding the high word with 1 and subtracting. */
eaa3783b
RH
1259 tcg_gen_movi_reg(cb_msb, 1);
1260 tcg_gen_sub2_reg(dest, cb_msb, in1, cb_msb, in2, zero);
1261 tcg_gen_eqv_reg(cb, in1, in2);
1262 tcg_gen_xor_reg(cb, cb, dest);
b2167459
RH
1263 }
1264 tcg_temp_free(zero);
1265
1266 /* Compute signed overflow if required. */
f764718d 1267 sv = NULL;
b47a4a02 1268 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1269 sv = do_sub_sv(ctx, dest, in1, in2);
1270 if (is_tsv) {
1271 gen_helper_tsv(cpu_env, sv);
1272 }
1273 }
1274
1275 /* Compute the condition. We cannot use the special case for borrow. */
1276 if (!is_b) {
1277 cond = do_sub_cond(cf, dest, in1, in2, sv);
1278 } else {
1279 cond = do_cond(cf, dest, cb_msb, sv);
1280 }
1281
1282 /* Emit any conditional trap before any writeback. */
1283 if (is_tc) {
1284 cond_prep(&cond);
1285 tmp = tcg_temp_new();
eaa3783b 1286 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1287 gen_helper_tcond(cpu_env, tmp);
1288 tcg_temp_free(tmp);
1289 }
1290
1291 /* Write back the result. */
1292 save_or_nullify(ctx, cpu_psw_cb, cb);
1293 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1294 save_gpr(ctx, rt, dest);
1295 tcg_temp_free(dest);
1296
1297 /* Install the new nullification. */
1298 cond_free(&ctx->null_cond);
1299 ctx->null_cond = cond;
b2167459
RH
1300}
1301
0c982a28
RH
1302static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1303 bool is_tsv, bool is_b, bool is_tc)
1304{
1305 TCGv_reg tcg_r1, tcg_r2;
1306
1307 if (a->cf) {
1308 nullify_over(ctx);
1309 }
1310 tcg_r1 = load_gpr(ctx, a->r1);
1311 tcg_r2 = load_gpr(ctx, a->r2);
1312 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1313 return nullify_end(ctx);
1314}
1315
0588e061
RH
1316static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1317{
1318 TCGv_reg tcg_im, tcg_r2;
1319
1320 if (a->cf) {
1321 nullify_over(ctx);
1322 }
1323 tcg_im = load_const(ctx, a->i);
1324 tcg_r2 = load_gpr(ctx, a->r);
1325 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1326 return nullify_end(ctx);
1327}
1328
31234768
RH
1329static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1330 TCGv_reg in2, unsigned cf)
b2167459 1331{
eaa3783b 1332 TCGv_reg dest, sv;
b2167459
RH
1333 DisasCond cond;
1334
1335 dest = tcg_temp_new();
eaa3783b 1336 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1337
1338 /* Compute signed overflow if required. */
f764718d 1339 sv = NULL;
b47a4a02 1340 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1341 sv = do_sub_sv(ctx, dest, in1, in2);
1342 }
1343
1344 /* Form the condition for the compare. */
1345 cond = do_sub_cond(cf, dest, in1, in2, sv);
1346
1347 /* Clear. */
eaa3783b 1348 tcg_gen_movi_reg(dest, 0);
b2167459
RH
1349 save_gpr(ctx, rt, dest);
1350 tcg_temp_free(dest);
1351
1352 /* Install the new nullification. */
1353 cond_free(&ctx->null_cond);
1354 ctx->null_cond = cond;
b2167459
RH
1355}
1356
31234768
RH
1357static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1358 TCGv_reg in2, unsigned cf,
1359 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1360{
eaa3783b 1361 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1362
1363 /* Perform the operation, and writeback. */
1364 fn(dest, in1, in2);
1365 save_gpr(ctx, rt, dest);
1366
1367 /* Install the new nullification. */
1368 cond_free(&ctx->null_cond);
1369 if (cf) {
1370 ctx->null_cond = do_log_cond(cf, dest);
1371 }
b2167459
RH
1372}
1373
0c982a28
RH
1374static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1375 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1376{
1377 TCGv_reg tcg_r1, tcg_r2;
1378
1379 if (a->cf) {
1380 nullify_over(ctx);
1381 }
1382 tcg_r1 = load_gpr(ctx, a->r1);
1383 tcg_r2 = load_gpr(ctx, a->r2);
1384 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1385 return nullify_end(ctx);
1386}
1387
31234768
RH
1388static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1389 TCGv_reg in2, unsigned cf, bool is_tc,
1390 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1391{
eaa3783b 1392 TCGv_reg dest;
b2167459
RH
1393 DisasCond cond;
1394
1395 if (cf == 0) {
1396 dest = dest_gpr(ctx, rt);
1397 fn(dest, in1, in2);
1398 save_gpr(ctx, rt, dest);
1399 cond_free(&ctx->null_cond);
1400 } else {
1401 dest = tcg_temp_new();
1402 fn(dest, in1, in2);
1403
1404 cond = do_unit_cond(cf, dest, in1, in2);
1405
1406 if (is_tc) {
eaa3783b 1407 TCGv_reg tmp = tcg_temp_new();
b2167459 1408 cond_prep(&cond);
eaa3783b 1409 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
b2167459
RH
1410 gen_helper_tcond(cpu_env, tmp);
1411 tcg_temp_free(tmp);
1412 }
1413 save_gpr(ctx, rt, dest);
1414
1415 cond_free(&ctx->null_cond);
1416 ctx->null_cond = cond;
1417 }
b2167459
RH
1418}
1419
86f8d05f 1420#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1421/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1422 from the top 2 bits of the base register. There are a few system
1423 instructions that have a 3-bit space specifier, for which SR0 is
1424 not special. To handle this, pass ~SP. */
86f8d05f
RH
1425static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1426{
1427 TCGv_ptr ptr;
1428 TCGv_reg tmp;
1429 TCGv_i64 spc;
1430
1431 if (sp != 0) {
8d6ae7fb
RH
1432 if (sp < 0) {
1433 sp = ~sp;
1434 }
1435 spc = get_temp_tl(ctx);
1436 load_spr(ctx, spc, sp);
1437 return spc;
86f8d05f 1438 }
494737b7
RH
1439 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1440 return cpu_srH;
1441 }
86f8d05f
RH
1442
1443 ptr = tcg_temp_new_ptr();
1444 tmp = tcg_temp_new();
1445 spc = get_temp_tl(ctx);
1446
1447 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1448 tcg_gen_andi_reg(tmp, tmp, 030);
1449 tcg_gen_trunc_reg_ptr(ptr, tmp);
1450 tcg_temp_free(tmp);
1451
1452 tcg_gen_add_ptr(ptr, ptr, cpu_env);
1453 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1454 tcg_temp_free_ptr(ptr);
1455
1456 return spc;
1457}
1458#endif
1459
1460static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1461 unsigned rb, unsigned rx, int scale, target_sreg disp,
1462 unsigned sp, int modify, bool is_phys)
1463{
1464 TCGv_reg base = load_gpr(ctx, rb);
1465 TCGv_reg ofs;
1466
1467 /* Note that RX is mutually exclusive with DISP. */
1468 if (rx) {
1469 ofs = get_temp(ctx);
1470 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1471 tcg_gen_add_reg(ofs, ofs, base);
1472 } else if (disp || modify) {
1473 ofs = get_temp(ctx);
1474 tcg_gen_addi_reg(ofs, base, disp);
1475 } else {
1476 ofs = base;
1477 }
1478
1479 *pofs = ofs;
1480#ifdef CONFIG_USER_ONLY
1481 *pgva = (modify <= 0 ? ofs : base);
1482#else
1483 TCGv_tl addr = get_temp_tl(ctx);
1484 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1485 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1486 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1487 }
1488 if (!is_phys) {
1489 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1490 }
1491 *pgva = addr;
1492#endif
1493}
1494
96d6407f
RH
1495/* Emit a memory load. The modify parameter should be
1496 * < 0 for pre-modify,
1497 * > 0 for post-modify,
1498 * = 0 for no base register update.
1499 */
1500static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1501 unsigned rx, int scale, target_sreg disp,
86f8d05f 1502 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1503{
86f8d05f
RH
1504 TCGv_reg ofs;
1505 TCGv_tl addr;
96d6407f
RH
1506
1507 /* Caller uses nullify_over/nullify_end. */
1508 assert(ctx->null_cond.c == TCG_COND_NEVER);
1509
86f8d05f
RH
1510 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1511 ctx->mmu_idx == MMU_PHYS_IDX);
1512 tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
1513 if (modify) {
1514 save_gpr(ctx, rb, ofs);
96d6407f 1515 }
96d6407f
RH
1516}
1517
1518static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1519 unsigned rx, int scale, target_sreg disp,
86f8d05f 1520 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1521{
86f8d05f
RH
1522 TCGv_reg ofs;
1523 TCGv_tl addr;
96d6407f
RH
1524
1525 /* Caller uses nullify_over/nullify_end. */
1526 assert(ctx->null_cond.c == TCG_COND_NEVER);
1527
86f8d05f
RH
1528 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1529 ctx->mmu_idx == MMU_PHYS_IDX);
1530 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
1531 if (modify) {
1532 save_gpr(ctx, rb, ofs);
96d6407f 1533 }
96d6407f
RH
1534}
1535
1536static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1537 unsigned rx, int scale, target_sreg disp,
86f8d05f 1538 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1539{
86f8d05f
RH
1540 TCGv_reg ofs;
1541 TCGv_tl addr;
96d6407f
RH
1542
1543 /* Caller uses nullify_over/nullify_end. */
1544 assert(ctx->null_cond.c == TCG_COND_NEVER);
1545
86f8d05f
RH
1546 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1547 ctx->mmu_idx == MMU_PHYS_IDX);
1548 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
1549 if (modify) {
1550 save_gpr(ctx, rb, ofs);
96d6407f 1551 }
96d6407f
RH
1552}
1553
1554static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1555 unsigned rx, int scale, target_sreg disp,
86f8d05f 1556 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1557{
86f8d05f
RH
1558 TCGv_reg ofs;
1559 TCGv_tl addr;
96d6407f
RH
1560
1561 /* Caller uses nullify_over/nullify_end. */
1562 assert(ctx->null_cond.c == TCG_COND_NEVER);
1563
86f8d05f
RH
1564 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1565 ctx->mmu_idx == MMU_PHYS_IDX);
1566 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
1567 if (modify) {
1568 save_gpr(ctx, rb, ofs);
96d6407f 1569 }
96d6407f
RH
1570}
1571
eaa3783b
RH
1572#if TARGET_REGISTER_BITS == 64
1573#define do_load_reg do_load_64
1574#define do_store_reg do_store_64
96d6407f 1575#else
eaa3783b
RH
1576#define do_load_reg do_load_32
1577#define do_store_reg do_store_32
96d6407f
RH
1578#endif
1579
1cd012a5 1580static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1581 unsigned rx, int scale, target_sreg disp,
1582 unsigned sp, int modify, TCGMemOp mop)
96d6407f 1583{
eaa3783b 1584 TCGv_reg dest;
96d6407f
RH
1585
1586 nullify_over(ctx);
1587
1588 if (modify == 0) {
1589 /* No base register update. */
1590 dest = dest_gpr(ctx, rt);
1591 } else {
1592 /* Make sure if RT == RB, we see the result of the load. */
1593 dest = get_temp(ctx);
1594 }
86f8d05f 1595 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1596 save_gpr(ctx, rt, dest);
1597
1cd012a5 1598 return nullify_end(ctx);
96d6407f
RH
1599}
1600
740038d7 1601static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1602 unsigned rx, int scale, target_sreg disp,
1603 unsigned sp, int modify)
96d6407f
RH
1604{
1605 TCGv_i32 tmp;
1606
1607 nullify_over(ctx);
1608
1609 tmp = tcg_temp_new_i32();
86f8d05f 1610 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1611 save_frw_i32(rt, tmp);
1612 tcg_temp_free_i32(tmp);
1613
1614 if (rt == 0) {
1615 gen_helper_loaded_fr0(cpu_env);
1616 }
1617
740038d7
RH
1618 return nullify_end(ctx);
1619}
1620
1621static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1622{
1623 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1624 a->disp, a->sp, a->m);
96d6407f
RH
1625}
1626
740038d7 1627static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1628 unsigned rx, int scale, target_sreg disp,
1629 unsigned sp, int modify)
96d6407f
RH
1630{
1631 TCGv_i64 tmp;
1632
1633 nullify_over(ctx);
1634
1635 tmp = tcg_temp_new_i64();
86f8d05f 1636 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
96d6407f
RH
1637 save_frd(rt, tmp);
1638 tcg_temp_free_i64(tmp);
1639
1640 if (rt == 0) {
1641 gen_helper_loaded_fr0(cpu_env);
1642 }
1643
740038d7
RH
1644 return nullify_end(ctx);
1645}
1646
1647static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1648{
1649 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1650 a->disp, a->sp, a->m);
96d6407f
RH
1651}
1652
1cd012a5 1653static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1654 target_sreg disp, unsigned sp,
1655 int modify, TCGMemOp mop)
96d6407f
RH
1656{
1657 nullify_over(ctx);
86f8d05f 1658 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1659 return nullify_end(ctx);
96d6407f
RH
1660}
1661
740038d7 1662static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1663 unsigned rx, int scale, target_sreg disp,
1664 unsigned sp, int modify)
96d6407f
RH
1665{
1666 TCGv_i32 tmp;
1667
1668 nullify_over(ctx);
1669
1670 tmp = load_frw_i32(rt);
86f8d05f 1671 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f
RH
1672 tcg_temp_free_i32(tmp);
1673
740038d7
RH
1674 return nullify_end(ctx);
1675}
1676
1677static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1678{
1679 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1680 a->disp, a->sp, a->m);
96d6407f
RH
1681}
1682
740038d7 1683static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1684 unsigned rx, int scale, target_sreg disp,
1685 unsigned sp, int modify)
96d6407f
RH
1686{
1687 TCGv_i64 tmp;
1688
1689 nullify_over(ctx);
1690
1691 tmp = load_frd(rt);
86f8d05f 1692 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
96d6407f
RH
1693 tcg_temp_free_i64(tmp);
1694
740038d7
RH
1695 return nullify_end(ctx);
1696}
1697
1698static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1699{
1700 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1701 a->disp, a->sp, a->m);
96d6407f
RH
1702}
1703
1ca74648 1704static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1705 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1706{
1707 TCGv_i32 tmp;
1708
1709 nullify_over(ctx);
1710 tmp = load_frw0_i32(ra);
1711
1712 func(tmp, cpu_env, tmp);
1713
1714 save_frw_i32(rt, tmp);
1715 tcg_temp_free_i32(tmp);
1ca74648 1716 return nullify_end(ctx);
ebe9383c
RH
1717}
1718
1ca74648 1719static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1720 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1721{
1722 TCGv_i32 dst;
1723 TCGv_i64 src;
1724
1725 nullify_over(ctx);
1726 src = load_frd(ra);
1727 dst = tcg_temp_new_i32();
1728
1729 func(dst, cpu_env, src);
1730
1731 tcg_temp_free_i64(src);
1732 save_frw_i32(rt, dst);
1733 tcg_temp_free_i32(dst);
1ca74648 1734 return nullify_end(ctx);
ebe9383c
RH
1735}
1736
1ca74648 1737static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1738 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1739{
1740 TCGv_i64 tmp;
1741
1742 nullify_over(ctx);
1743 tmp = load_frd0(ra);
1744
1745 func(tmp, cpu_env, tmp);
1746
1747 save_frd(rt, tmp);
1748 tcg_temp_free_i64(tmp);
1ca74648 1749 return nullify_end(ctx);
ebe9383c
RH
1750}
1751
1ca74648 1752static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1753 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1754{
1755 TCGv_i32 src;
1756 TCGv_i64 dst;
1757
1758 nullify_over(ctx);
1759 src = load_frw0_i32(ra);
1760 dst = tcg_temp_new_i64();
1761
1762 func(dst, cpu_env, src);
1763
1764 tcg_temp_free_i32(src);
1765 save_frd(rt, dst);
1766 tcg_temp_free_i64(dst);
1ca74648 1767 return nullify_end(ctx);
ebe9383c
RH
1768}
1769
1ca74648 1770static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1771 unsigned ra, unsigned rb,
1772 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1773{
1774 TCGv_i32 a, b;
1775
1776 nullify_over(ctx);
1777 a = load_frw0_i32(ra);
1778 b = load_frw0_i32(rb);
1779
1780 func(a, cpu_env, a, b);
1781
1782 tcg_temp_free_i32(b);
1783 save_frw_i32(rt, a);
1784 tcg_temp_free_i32(a);
1ca74648 1785 return nullify_end(ctx);
ebe9383c
RH
1786}
1787
1ca74648 1788static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1789 unsigned ra, unsigned rb,
1790 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1791{
1792 TCGv_i64 a, b;
1793
1794 nullify_over(ctx);
1795 a = load_frd0(ra);
1796 b = load_frd0(rb);
1797
1798 func(a, cpu_env, a, b);
1799
1800 tcg_temp_free_i64(b);
1801 save_frd(rt, a);
1802 tcg_temp_free_i64(a);
1ca74648 1803 return nullify_end(ctx);
ebe9383c
RH
1804}
1805
98cd9ca7
RH
1806/* Emit an unconditional branch to a direct target, which may or may not
1807 have already had nullification handled. */
01afb7be 1808static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1809 unsigned link, bool is_n)
98cd9ca7
RH
1810{
1811 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1812 if (link != 0) {
1813 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1814 }
1815 ctx->iaoq_n = dest;
1816 if (is_n) {
1817 ctx->null_cond.c = TCG_COND_ALWAYS;
1818 }
98cd9ca7
RH
1819 } else {
1820 nullify_over(ctx);
1821
1822 if (link != 0) {
1823 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1824 }
1825
1826 if (is_n && use_nullify_skip(ctx)) {
1827 nullify_set(ctx, 0);
1828 gen_goto_tb(ctx, 0, dest, dest + 4);
1829 } else {
1830 nullify_set(ctx, is_n);
1831 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1832 }
1833
31234768 1834 nullify_end(ctx);
98cd9ca7
RH
1835
1836 nullify_set(ctx, 0);
1837 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1838 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1839 }
01afb7be 1840 return true;
98cd9ca7
RH
1841}
1842
1843/* Emit a conditional branch to a direct target. If the branch itself
1844 is nullified, we should have already used nullify_over. */
01afb7be 1845static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1846 DisasCond *cond)
98cd9ca7 1847{
eaa3783b 1848 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1849 TCGLabel *taken = NULL;
1850 TCGCond c = cond->c;
98cd9ca7
RH
1851 bool n;
1852
1853 assert(ctx->null_cond.c == TCG_COND_NEVER);
1854
1855 /* Handle TRUE and NEVER as direct branches. */
1856 if (c == TCG_COND_ALWAYS) {
01afb7be 1857 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1858 }
1859 if (c == TCG_COND_NEVER) {
01afb7be 1860 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1861 }
1862
1863 taken = gen_new_label();
1864 cond_prep(cond);
eaa3783b 1865 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1866 cond_free(cond);
1867
1868 /* Not taken: Condition not satisfied; nullify on backward branches. */
1869 n = is_n && disp < 0;
1870 if (n && use_nullify_skip(ctx)) {
1871 nullify_set(ctx, 0);
a881c8e7 1872 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1873 } else {
1874 if (!n && ctx->null_lab) {
1875 gen_set_label(ctx->null_lab);
1876 ctx->null_lab = NULL;
1877 }
1878 nullify_set(ctx, n);
c301f34e
RH
1879 if (ctx->iaoq_n == -1) {
1880 /* The temporary iaoq_n_var died at the branch above.
1881 Regenerate it here instead of saving it. */
1882 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1883 }
a881c8e7 1884 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1885 }
1886
1887 gen_set_label(taken);
1888
1889 /* Taken: Condition satisfied; nullify on forward branches. */
1890 n = is_n && disp >= 0;
1891 if (n && use_nullify_skip(ctx)) {
1892 nullify_set(ctx, 0);
a881c8e7 1893 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1894 } else {
1895 nullify_set(ctx, n);
a881c8e7 1896 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1897 }
1898
1899 /* Not taken: the branch itself was nullified. */
1900 if (ctx->null_lab) {
1901 gen_set_label(ctx->null_lab);
1902 ctx->null_lab = NULL;
31234768 1903 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1904 } else {
31234768 1905 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1906 }
01afb7be 1907 return true;
98cd9ca7
RH
1908}
1909
1910/* Emit an unconditional branch to an indirect target. This handles
1911 nullification of the branch itself. */
01afb7be 1912static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1913 unsigned link, bool is_n)
98cd9ca7 1914{
eaa3783b 1915 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1916 TCGCond c;
1917
1918 assert(ctx->null_lab == NULL);
1919
1920 if (ctx->null_cond.c == TCG_COND_NEVER) {
1921 if (link != 0) {
1922 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1923 }
1924 next = get_temp(ctx);
eaa3783b 1925 tcg_gen_mov_reg(next, dest);
98cd9ca7 1926 if (is_n) {
c301f34e
RH
1927 if (use_nullify_skip(ctx)) {
1928 tcg_gen_mov_reg(cpu_iaoq_f, next);
1929 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1930 nullify_set(ctx, 0);
31234768 1931 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1932 return true;
c301f34e 1933 }
98cd9ca7
RH
1934 ctx->null_cond.c = TCG_COND_ALWAYS;
1935 }
c301f34e
RH
1936 ctx->iaoq_n = -1;
1937 ctx->iaoq_n_var = next;
98cd9ca7
RH
1938 } else if (is_n && use_nullify_skip(ctx)) {
1939 /* The (conditional) branch, B, nullifies the next insn, N,
1940 and we're allowed to skip execution N (no single-step or
4137cb83 1941 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1942 for the indirect branch consumes no special resources, we
1943 can (conditionally) skip B and continue execution. */
1944 /* The use_nullify_skip test implies we have a known control path. */
1945 tcg_debug_assert(ctx->iaoq_b != -1);
1946 tcg_debug_assert(ctx->iaoq_n != -1);
1947
1948 /* We do have to handle the non-local temporary, DEST, before
1949 branching. Since IOAQ_F is not really live at this point, we
1950 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1951 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1952 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1953
1954 nullify_over(ctx);
1955 if (link != 0) {
eaa3783b 1956 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1957 }
7f11636d 1958 tcg_gen_lookup_and_goto_ptr();
01afb7be 1959 return nullify_end(ctx);
98cd9ca7
RH
1960 } else {
1961 cond_prep(&ctx->null_cond);
1962 c = ctx->null_cond.c;
1963 a0 = ctx->null_cond.a0;
1964 a1 = ctx->null_cond.a1;
1965
1966 tmp = tcg_temp_new();
1967 next = get_temp(ctx);
1968
1969 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1970 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1971 ctx->iaoq_n = -1;
1972 ctx->iaoq_n_var = next;
1973
1974 if (link != 0) {
eaa3783b 1975 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1976 }
1977
1978 if (is_n) {
1979 /* The branch nullifies the next insn, which means the state of N
1980 after the branch is the inverse of the state of N that applied
1981 to the branch. */
eaa3783b 1982 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1983 cond_free(&ctx->null_cond);
1984 ctx->null_cond = cond_make_n();
1985 ctx->psw_n_nonzero = true;
1986 } else {
1987 cond_free(&ctx->null_cond);
1988 }
1989 }
01afb7be 1990 return true;
98cd9ca7
RH
1991}
1992
660eefe1
RH
1993/* Implement
1994 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1995 * IAOQ_Next{30..31} ← GR[b]{30..31};
1996 * else
1997 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1998 * which keeps the privilege level from being increased.
1999 */
2000static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
2001{
660eefe1
RH
2002 TCGv_reg dest;
2003 switch (ctx->privilege) {
2004 case 0:
2005 /* Privilege 0 is maximum and is allowed to decrease. */
2006 return offset;
2007 case 3:
993119fe 2008 /* Privilege 3 is minimum and is never allowed to increase. */
660eefe1
RH
2009 dest = get_temp(ctx);
2010 tcg_gen_ori_reg(dest, offset, 3);
2011 break;
2012 default:
993119fe 2013 dest = get_temp(ctx);
660eefe1
RH
2014 tcg_gen_andi_reg(dest, offset, -4);
2015 tcg_gen_ori_reg(dest, dest, ctx->privilege);
2016 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
2017 break;
2018 }
2019 return dest;
660eefe1
RH
2020}
2021
ba1d0b44 2022#ifdef CONFIG_USER_ONLY
7ad439df
RH
2023/* On Linux, page zero is normally marked execute only + gateway.
2024 Therefore normal read or write is supposed to fail, but specific
2025 offsets have kernel code mapped to raise permissions to implement
2026 system calls. Handling this via an explicit check here, rather
2027 in than the "be disp(sr2,r0)" instruction that probably sent us
2028 here, is the easiest way to handle the branch delay slot on the
2029 aforementioned BE. */
31234768 2030static void do_page_zero(DisasContext *ctx)
7ad439df
RH
2031{
2032 /* If by some means we get here with PSW[N]=1, that implies that
2033 the B,GATE instruction would be skipped, and we'd fault on the
2034 next insn within the privilaged page. */
2035 switch (ctx->null_cond.c) {
2036 case TCG_COND_NEVER:
2037 break;
2038 case TCG_COND_ALWAYS:
eaa3783b 2039 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
2040 goto do_sigill;
2041 default:
2042 /* Since this is always the first (and only) insn within the
2043 TB, we should know the state of PSW[N] from TB->FLAGS. */
2044 g_assert_not_reached();
2045 }
2046
2047 /* Check that we didn't arrive here via some means that allowed
2048 non-sequential instruction execution. Normally the PSW[B] bit
2049 detects this by disallowing the B,GATE instruction to execute
2050 under such conditions. */
2051 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2052 goto do_sigill;
2053 }
2054
ebd0e151 2055 switch (ctx->iaoq_f & -4) {
7ad439df 2056 case 0x00: /* Null pointer call */
2986721d 2057 gen_excp_1(EXCP_IMP);
31234768
RH
2058 ctx->base.is_jmp = DISAS_NORETURN;
2059 break;
7ad439df
RH
2060
2061 case 0xb0: /* LWS */
2062 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
2063 ctx->base.is_jmp = DISAS_NORETURN;
2064 break;
7ad439df
RH
2065
2066 case 0xe0: /* SET_THREAD_POINTER */
35136a77 2067 tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2068 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2069 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2070 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2071 break;
7ad439df
RH
2072
2073 case 0x100: /* SYSCALL */
2074 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2075 ctx->base.is_jmp = DISAS_NORETURN;
2076 break;
7ad439df
RH
2077
2078 default:
2079 do_sigill:
2986721d 2080 gen_excp_1(EXCP_ILL);
31234768
RH
2081 ctx->base.is_jmp = DISAS_NORETURN;
2082 break;
7ad439df
RH
2083 }
2084}
ba1d0b44 2085#endif
7ad439df 2086
deee69a1 2087static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2088{
2089 cond_free(&ctx->null_cond);
31234768 2090 return true;
b2167459
RH
2091}
2092
40f9f908 2093static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2094{
31234768 2095 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2096}
2097
e36f27ef 2098static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2099{
2100 /* No point in nullifying the memory barrier. */
2101 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2102
2103 cond_free(&ctx->null_cond);
31234768 2104 return true;
98a9cb79
RH
2105}
2106
c603e14a 2107static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2108{
c603e14a 2109 unsigned rt = a->t;
eaa3783b
RH
2110 TCGv_reg tmp = dest_gpr(ctx, rt);
2111 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2112 save_gpr(ctx, rt, tmp);
2113
2114 cond_free(&ctx->null_cond);
31234768 2115 return true;
98a9cb79
RH
2116}
2117
c603e14a 2118static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2119{
c603e14a
RH
2120 unsigned rt = a->t;
2121 unsigned rs = a->sp;
33423472
RH
2122 TCGv_i64 t0 = tcg_temp_new_i64();
2123 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2124
33423472
RH
2125 load_spr(ctx, t0, rs);
2126 tcg_gen_shri_i64(t0, t0, 32);
2127 tcg_gen_trunc_i64_reg(t1, t0);
2128
2129 save_gpr(ctx, rt, t1);
2130 tcg_temp_free(t1);
2131 tcg_temp_free_i64(t0);
98a9cb79
RH
2132
2133 cond_free(&ctx->null_cond);
31234768 2134 return true;
98a9cb79
RH
2135}
2136
c603e14a 2137static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2138{
c603e14a
RH
2139 unsigned rt = a->t;
2140 unsigned ctl = a->r;
eaa3783b 2141 TCGv_reg tmp;
98a9cb79
RH
2142
2143 switch (ctl) {
35136a77 2144 case CR_SAR:
98a9cb79 2145#ifdef TARGET_HPPA64
c603e14a 2146 if (a->e == 0) {
98a9cb79
RH
2147 /* MFSAR without ,W masks low 5 bits. */
2148 tmp = dest_gpr(ctx, rt);
eaa3783b 2149 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2150 save_gpr(ctx, rt, tmp);
35136a77 2151 goto done;
98a9cb79
RH
2152 }
2153#endif
2154 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2155 goto done;
2156 case CR_IT: /* Interval Timer */
2157 /* FIXME: Respect PSW_S bit. */
2158 nullify_over(ctx);
98a9cb79 2159 tmp = dest_gpr(ctx, rt);
84b41e65 2160 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
49c29d6c
RH
2161 gen_io_start();
2162 gen_helper_read_interval_timer(tmp);
2163 gen_io_end();
31234768 2164 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2165 } else {
2166 gen_helper_read_interval_timer(tmp);
49c29d6c 2167 }
98a9cb79 2168 save_gpr(ctx, rt, tmp);
31234768 2169 return nullify_end(ctx);
98a9cb79 2170 case 26:
98a9cb79 2171 case 27:
98a9cb79
RH
2172 break;
2173 default:
2174 /* All other control registers are privileged. */
35136a77
RH
2175 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2176 break;
98a9cb79
RH
2177 }
2178
35136a77
RH
2179 tmp = get_temp(ctx);
2180 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2181 save_gpr(ctx, rt, tmp);
2182
2183 done:
98a9cb79 2184 cond_free(&ctx->null_cond);
31234768 2185 return true;
98a9cb79
RH
2186}
2187
c603e14a 2188static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2189{
c603e14a
RH
2190 unsigned rr = a->r;
2191 unsigned rs = a->sp;
33423472
RH
2192 TCGv_i64 t64;
2193
2194 if (rs >= 5) {
2195 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2196 }
2197 nullify_over(ctx);
2198
2199 t64 = tcg_temp_new_i64();
2200 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2201 tcg_gen_shli_i64(t64, t64, 32);
2202
2203 if (rs >= 4) {
2204 tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2205 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2206 } else {
2207 tcg_gen_mov_i64(cpu_sr[rs], t64);
2208 }
2209 tcg_temp_free_i64(t64);
2210
31234768 2211 return nullify_end(ctx);
33423472
RH
2212}
2213
c603e14a 2214static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2215{
c603e14a
RH
2216 unsigned ctl = a->t;
2217 TCGv_reg reg = load_gpr(ctx, a->r);
eaa3783b 2218 TCGv_reg tmp;
98a9cb79 2219
35136a77 2220 if (ctl == CR_SAR) {
98a9cb79 2221 tmp = tcg_temp_new();
35136a77 2222 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2223 save_or_nullify(ctx, cpu_sar, tmp);
2224 tcg_temp_free(tmp);
35136a77
RH
2225
2226 cond_free(&ctx->null_cond);
31234768 2227 return true;
98a9cb79
RH
2228 }
2229
35136a77
RH
2230 /* All other control registers are privileged or read-only. */
2231 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2232
c603e14a 2233#ifndef CONFIG_USER_ONLY
35136a77
RH
2234 nullify_over(ctx);
2235 switch (ctl) {
2236 case CR_IT:
49c29d6c 2237 gen_helper_write_interval_timer(cpu_env, reg);
35136a77 2238 break;
4f5f2548
RH
2239 case CR_EIRR:
2240 gen_helper_write_eirr(cpu_env, reg);
2241 break;
2242 case CR_EIEM:
2243 gen_helper_write_eiem(cpu_env, reg);
31234768 2244 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2245 break;
2246
35136a77
RH
2247 case CR_IIASQ:
2248 case CR_IIAOQ:
2249 /* FIXME: Respect PSW_Q bit */
2250 /* The write advances the queue and stores to the back element. */
2251 tmp = get_temp(ctx);
2252 tcg_gen_ld_reg(tmp, cpu_env,
2253 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2254 tcg_gen_st_reg(tmp, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2255 tcg_gen_st_reg(reg, cpu_env,
2256 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2257 break;
2258
d5de20bd
SS
2259 case CR_PID1:
2260 case CR_PID2:
2261 case CR_PID3:
2262 case CR_PID4:
2263 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2264#ifndef CONFIG_USER_ONLY
2265 gen_helper_change_prot_id(cpu_env);
2266#endif
2267 break;
2268
35136a77
RH
2269 default:
2270 tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
2271 break;
2272 }
31234768 2273 return nullify_end(ctx);
4f5f2548 2274#endif
98a9cb79
RH
2275}
2276
c603e14a 2277static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2278{
eaa3783b 2279 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2280
c603e14a 2281 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2282 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79
RH
2283 save_or_nullify(ctx, cpu_sar, tmp);
2284 tcg_temp_free(tmp);
2285
2286 cond_free(&ctx->null_cond);
31234768 2287 return true;
98a9cb79
RH
2288}
2289
e36f27ef 2290static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2291{
e36f27ef 2292 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2293
2330504c
HD
2294#ifdef CONFIG_USER_ONLY
2295 /* We don't implement space registers in user mode. */
eaa3783b 2296 tcg_gen_movi_reg(dest, 0);
2330504c 2297#else
2330504c
HD
2298 TCGv_i64 t0 = tcg_temp_new_i64();
2299
e36f27ef 2300 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2301 tcg_gen_shri_i64(t0, t0, 32);
2302 tcg_gen_trunc_i64_reg(dest, t0);
2303
2304 tcg_temp_free_i64(t0);
2305#endif
e36f27ef 2306 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2307
2308 cond_free(&ctx->null_cond);
31234768 2309 return true;
98a9cb79
RH
2310}
2311
e36f27ef 2312static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2313{
e36f27ef
RH
2314 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2315#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2316 TCGv_reg tmp;
2317
e1b5a5ed
RH
2318 nullify_over(ctx);
2319
2320 tmp = get_temp(ctx);
2321 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2322 tcg_gen_andi_reg(tmp, tmp, ~a->i);
e1b5a5ed 2323 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2324 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2325
2326 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2327 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2328 return nullify_end(ctx);
e36f27ef 2329#endif
e1b5a5ed
RH
2330}
2331
e36f27ef 2332static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2333{
e36f27ef
RH
2334 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2335#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2336 TCGv_reg tmp;
2337
e1b5a5ed
RH
2338 nullify_over(ctx);
2339
2340 tmp = get_temp(ctx);
2341 tcg_gen_ld_reg(tmp, cpu_env, offsetof(CPUHPPAState, psw));
e36f27ef 2342 tcg_gen_ori_reg(tmp, tmp, a->i);
e1b5a5ed 2343 gen_helper_swap_system_mask(tmp, cpu_env, tmp);
e36f27ef 2344 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2345
2346 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2347 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2348 return nullify_end(ctx);
e36f27ef 2349#endif
e1b5a5ed
RH
2350}
2351
c603e14a 2352static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2353{
e1b5a5ed 2354 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2355#ifndef CONFIG_USER_ONLY
2356 TCGv_reg tmp, reg;
e1b5a5ed
RH
2357 nullify_over(ctx);
2358
c603e14a 2359 reg = load_gpr(ctx, a->r);
e1b5a5ed
RH
2360 tmp = get_temp(ctx);
2361 gen_helper_swap_system_mask(tmp, cpu_env, reg);
2362
2363 /* Exit the TB to recognize new interrupts. */
31234768
RH
2364 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2365 return nullify_end(ctx);
c603e14a 2366#endif
e1b5a5ed 2367}
f49b3537 2368
e36f27ef 2369static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2370{
f49b3537 2371 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2372#ifndef CONFIG_USER_ONLY
f49b3537
RH
2373 nullify_over(ctx);
2374
e36f27ef 2375 if (rfi_r) {
f49b3537
RH
2376 gen_helper_rfi_r(cpu_env);
2377 } else {
2378 gen_helper_rfi(cpu_env);
2379 }
31234768 2380 /* Exit the TB to recognize new interrupts. */
f49b3537
RH
2381 if (ctx->base.singlestep_enabled) {
2382 gen_excp_1(EXCP_DEBUG);
2383 } else {
07ea28b4 2384 tcg_gen_exit_tb(NULL, 0);
f49b3537 2385 }
31234768 2386 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2387
31234768 2388 return nullify_end(ctx);
e36f27ef
RH
2389#endif
2390}
2391
2392static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2393{
2394 return do_rfi(ctx, false);
2395}
2396
2397static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2398{
2399 return do_rfi(ctx, true);
f49b3537 2400}
6210db05 2401
96927adb
RH
2402static bool trans_halt(DisasContext *ctx, arg_halt *a)
2403{
2404 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2405#ifndef CONFIG_USER_ONLY
96927adb
RH
2406 nullify_over(ctx);
2407 gen_helper_halt(cpu_env);
2408 ctx->base.is_jmp = DISAS_NORETURN;
2409 return nullify_end(ctx);
2410#endif
2411}
2412
2413static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2414{
2415 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2416#ifndef CONFIG_USER_ONLY
6210db05 2417 nullify_over(ctx);
96927adb 2418 gen_helper_reset(cpu_env);
31234768
RH
2419 ctx->base.is_jmp = DISAS_NORETURN;
2420 return nullify_end(ctx);
96927adb 2421#endif
6210db05 2422}
e1b5a5ed 2423
deee69a1 2424static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2425{
deee69a1
RH
2426 if (a->m) {
2427 TCGv_reg dest = dest_gpr(ctx, a->b);
2428 TCGv_reg src1 = load_gpr(ctx, a->b);
2429 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2430
deee69a1
RH
2431 /* The only thing we need to do is the base register modification. */
2432 tcg_gen_add_reg(dest, src1, src2);
2433 save_gpr(ctx, a->b, dest);
2434 }
98a9cb79 2435 cond_free(&ctx->null_cond);
31234768 2436 return true;
98a9cb79
RH
2437}
2438
deee69a1 2439static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2440{
86f8d05f 2441 TCGv_reg dest, ofs;
eed14219 2442 TCGv_i32 level, want;
86f8d05f 2443 TCGv_tl addr;
98a9cb79
RH
2444
2445 nullify_over(ctx);
2446
deee69a1
RH
2447 dest = dest_gpr(ctx, a->t);
2448 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2449
deee69a1
RH
2450 if (a->imm) {
2451 level = tcg_const_i32(a->ri);
98a9cb79 2452 } else {
eed14219 2453 level = tcg_temp_new_i32();
deee69a1 2454 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2455 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2456 }
deee69a1 2457 want = tcg_const_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219
RH
2458
2459 gen_helper_probe(dest, cpu_env, addr, level, want);
2460
2461 tcg_temp_free_i32(want);
2462 tcg_temp_free_i32(level);
2463
deee69a1 2464 save_gpr(ctx, a->t, dest);
31234768 2465 return nullify_end(ctx);
98a9cb79
RH
2466}
2467
deee69a1 2468static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2469{
deee69a1
RH
2470 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2471#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2472 TCGv_tl addr;
2473 TCGv_reg ofs, reg;
2474
8d6ae7fb
RH
2475 nullify_over(ctx);
2476
deee69a1
RH
2477 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2478 reg = load_gpr(ctx, a->r);
2479 if (a->addr) {
8d6ae7fb
RH
2480 gen_helper_itlba(cpu_env, addr, reg);
2481 } else {
2482 gen_helper_itlbp(cpu_env, addr, reg);
2483 }
2484
2485 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2486 the case, since the OS TLB fill handler runs with mmu disabled. */
deee69a1 2487 if (!a->data && (ctx->tb_flags & PSW_C)) {
31234768
RH
2488 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2489 }
2490 return nullify_end(ctx);
deee69a1 2491#endif
8d6ae7fb 2492}
63300a00 2493
deee69a1 2494static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2495{
deee69a1
RH
2496 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2497#ifndef CONFIG_USER_ONLY
63300a00
RH
2498 TCGv_tl addr;
2499 TCGv_reg ofs;
2500
63300a00
RH
2501 nullify_over(ctx);
2502
deee69a1
RH
2503 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2504 if (a->m) {
2505 save_gpr(ctx, a->b, ofs);
63300a00 2506 }
deee69a1 2507 if (a->local) {
63300a00
RH
2508 gen_helper_ptlbe(cpu_env);
2509 } else {
2510 gen_helper_ptlb(cpu_env, addr);
2511 }
2512
2513 /* Exit TB for TLB change if mmu is enabled. */
deee69a1 2514 if (!a->data && (ctx->tb_flags & PSW_C)) {
31234768
RH
2515 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2516 }
2517 return nullify_end(ctx);
deee69a1 2518#endif
63300a00 2519}
2dfcca9f 2520
deee69a1 2521static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2522{
deee69a1
RH
2523 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2524#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2525 TCGv_tl vaddr;
2526 TCGv_reg ofs, paddr;
2527
2dfcca9f
RH
2528 nullify_over(ctx);
2529
deee69a1 2530 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2531
2532 paddr = tcg_temp_new();
2533 gen_helper_lpa(paddr, cpu_env, vaddr);
2534
2535 /* Note that physical address result overrides base modification. */
deee69a1
RH
2536 if (a->m) {
2537 save_gpr(ctx, a->b, ofs);
2dfcca9f 2538 }
deee69a1 2539 save_gpr(ctx, a->t, paddr);
2dfcca9f
RH
2540 tcg_temp_free(paddr);
2541
31234768 2542 return nullify_end(ctx);
deee69a1 2543#endif
2dfcca9f 2544}
43a97b81 2545
deee69a1 2546static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2547{
43a97b81
RH
2548 TCGv_reg ci;
2549
2550 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2551
2552 /* The Coherence Index is an implementation-defined function of the
2553 physical address. Two addresses with the same CI have a coherent
2554 view of the cache. Our implementation is to return 0 for all,
2555 since the entire address space is coherent. */
2556 ci = tcg_const_reg(0);
deee69a1 2557 save_gpr(ctx, a->t, ci);
43a97b81
RH
2558 tcg_temp_free(ci);
2559
31234768
RH
2560 cond_free(&ctx->null_cond);
2561 return true;
43a97b81 2562}
98a9cb79 2563
0c982a28 2564static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2565{
0c982a28
RH
2566 return do_add_reg(ctx, a, false, false, false, false);
2567}
b2167459 2568
0c982a28
RH
2569static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2570{
2571 return do_add_reg(ctx, a, true, false, false, false);
2572}
b2167459 2573
0c982a28
RH
2574static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2575{
2576 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2577}
2578
0c982a28 2579static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2580{
0c982a28
RH
2581 return do_add_reg(ctx, a, false, false, false, true);
2582}
b2167459 2583
0c982a28
RH
2584static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2585{
2586 return do_add_reg(ctx, a, false, true, false, true);
2587}
b2167459 2588
0c982a28
RH
2589static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2590{
2591 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2592}
2593
0c982a28 2594static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2595{
0c982a28
RH
2596 return do_sub_reg(ctx, a, true, false, false);
2597}
b2167459 2598
0c982a28
RH
2599static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2600{
2601 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2602}
2603
0c982a28 2604static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2605{
0c982a28
RH
2606 return do_sub_reg(ctx, a, true, false, true);
2607}
2608
2609static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2610{
2611 return do_sub_reg(ctx, a, false, true, false);
2612}
2613
2614static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2615{
2616 return do_sub_reg(ctx, a, true, true, false);
2617}
2618
2619static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2620{
2621 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2622}
2623
2624static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2625{
2626 return do_log_reg(ctx, a, tcg_gen_and_reg);
2627}
2628
2629static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2630{
2631 if (a->cf == 0) {
2632 unsigned r2 = a->r2;
2633 unsigned r1 = a->r1;
2634 unsigned rt = a->t;
b2167459 2635
7aee8189
RH
2636 if (rt == 0) { /* NOP */
2637 cond_free(&ctx->null_cond);
2638 return true;
2639 }
2640 if (r2 == 0) { /* COPY */
2641 if (r1 == 0) {
2642 TCGv_reg dest = dest_gpr(ctx, rt);
2643 tcg_gen_movi_reg(dest, 0);
2644 save_gpr(ctx, rt, dest);
2645 } else {
2646 save_gpr(ctx, rt, cpu_gr[r1]);
2647 }
2648 cond_free(&ctx->null_cond);
2649 return true;
2650 }
2651#ifndef CONFIG_USER_ONLY
2652 /* These are QEMU extensions and are nops in the real architecture:
2653 *
2654 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2655 * or %r31,%r31,%r31 -- death loop; offline cpu
2656 * currently implemented as idle.
2657 */
2658 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2659 TCGv_i32 tmp;
2660
2661 /* No need to check for supervisor, as userland can only pause
2662 until the next timer interrupt. */
2663 nullify_over(ctx);
2664
2665 /* Advance the instruction queue. */
2666 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2667 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2668 nullify_set(ctx, 0);
2669
2670 /* Tell the qemu main loop to halt until this cpu has work. */
2671 tmp = tcg_const_i32(1);
2672 tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
2673 offsetof(CPUState, halted));
2674 tcg_temp_free_i32(tmp);
2675 gen_excp_1(EXCP_HALTED);
2676 ctx->base.is_jmp = DISAS_NORETURN;
2677
2678 return nullify_end(ctx);
2679 }
2680#endif
b2167459 2681 }
0c982a28
RH
2682 return do_log_reg(ctx, a, tcg_gen_or_reg);
2683}
7aee8189 2684
0c982a28
RH
2685static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2686{
2687 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2688}
2689
0c982a28 2690static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2691{
eaa3783b 2692 TCGv_reg tcg_r1, tcg_r2;
b2167459 2693
0c982a28 2694 if (a->cf) {
b2167459
RH
2695 nullify_over(ctx);
2696 }
0c982a28
RH
2697 tcg_r1 = load_gpr(ctx, a->r1);
2698 tcg_r2 = load_gpr(ctx, a->r2);
2699 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2700 return nullify_end(ctx);
b2167459
RH
2701}
2702
0c982a28 2703static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2704{
eaa3783b 2705 TCGv_reg tcg_r1, tcg_r2;
b2167459 2706
0c982a28 2707 if (a->cf) {
b2167459
RH
2708 nullify_over(ctx);
2709 }
0c982a28
RH
2710 tcg_r1 = load_gpr(ctx, a->r1);
2711 tcg_r2 = load_gpr(ctx, a->r2);
2712 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2713 return nullify_end(ctx);
b2167459
RH
2714}
2715
0c982a28 2716static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2717{
eaa3783b 2718 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2719
0c982a28 2720 if (a->cf) {
b2167459
RH
2721 nullify_over(ctx);
2722 }
0c982a28
RH
2723 tcg_r1 = load_gpr(ctx, a->r1);
2724 tcg_r2 = load_gpr(ctx, a->r2);
b2167459 2725 tmp = get_temp(ctx);
eaa3783b 2726 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2727 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2728 return nullify_end(ctx);
b2167459
RH
2729}
2730
0c982a28
RH
2731static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2732{
2733 return do_uaddcm(ctx, a, false);
2734}
2735
2736static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2737{
2738 return do_uaddcm(ctx, a, true);
2739}
2740
2741static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2742{
eaa3783b 2743 TCGv_reg tmp;
b2167459
RH
2744
2745 nullify_over(ctx);
2746
2747 tmp = get_temp(ctx);
eaa3783b 2748 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2749 if (!is_i) {
eaa3783b 2750 tcg_gen_not_reg(tmp, tmp);
b2167459 2751 }
eaa3783b
RH
2752 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2753 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2754 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2755 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2756 return nullify_end(ctx);
b2167459
RH
2757}
2758
0c982a28
RH
2759static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2760{
2761 return do_dcor(ctx, a, false);
2762}
2763
2764static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2765{
2766 return do_dcor(ctx, a, true);
2767}
2768
2769static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2770{
eaa3783b 2771 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
b2167459
RH
2772
2773 nullify_over(ctx);
2774
0c982a28
RH
2775 in1 = load_gpr(ctx, a->r1);
2776 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2777
2778 add1 = tcg_temp_new();
2779 add2 = tcg_temp_new();
2780 addc = tcg_temp_new();
2781 dest = tcg_temp_new();
eaa3783b 2782 zero = tcg_const_reg(0);
b2167459
RH
2783
2784 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b
RH
2785 tcg_gen_add_reg(add1, in1, in1);
2786 tcg_gen_add_reg(add1, add1, cpu_psw_cb_msb);
b2167459
RH
2787
2788 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2789 carry{8} requires that we subtract via + ~R2 + 1, as described in
2790 the manual. By extracting and masking V, we can produce the
2791 proper inputs to the addition without movcond. */
eaa3783b
RH
2792 tcg_gen_sari_reg(addc, cpu_psw_v, TARGET_REGISTER_BITS - 1);
2793 tcg_gen_xor_reg(add2, in2, addc);
2794 tcg_gen_andi_reg(addc, addc, 1);
b2167459
RH
2795 /* ??? This is only correct for 32-bit. */
2796 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2797 tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
2798
2799 tcg_temp_free(addc);
2800 tcg_temp_free(zero);
2801
2802 /* Write back the result register. */
0c982a28 2803 save_gpr(ctx, a->t, dest);
b2167459
RH
2804
2805 /* Write back PSW[CB]. */
eaa3783b
RH
2806 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2807 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2808
2809 /* Write back PSW[V] for the division step. */
eaa3783b
RH
2810 tcg_gen_neg_reg(cpu_psw_v, cpu_psw_cb_msb);
2811 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2812
2813 /* Install the new nullification. */
0c982a28 2814 if (a->cf) {
eaa3783b 2815 TCGv_reg sv = NULL;
b47a4a02 2816 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2817 /* ??? The lshift is supposed to contribute to overflow. */
2818 sv = do_add_sv(ctx, dest, add1, add2);
2819 }
0c982a28 2820 ctx->null_cond = do_cond(a->cf, dest, cpu_psw_cb_msb, sv);
b2167459
RH
2821 }
2822
2823 tcg_temp_free(add1);
2824 tcg_temp_free(add2);
2825 tcg_temp_free(dest);
2826
31234768 2827 return nullify_end(ctx);
b2167459
RH
2828}
2829
0588e061 2830static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2831{
0588e061
RH
2832 return do_add_imm(ctx, a, false, false);
2833}
b2167459 2834
0588e061
RH
2835static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2836{
2837 return do_add_imm(ctx, a, true, false);
b2167459
RH
2838}
2839
0588e061 2840static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2841{
0588e061
RH
2842 return do_add_imm(ctx, a, false, true);
2843}
b2167459 2844
0588e061
RH
2845static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2846{
2847 return do_add_imm(ctx, a, true, true);
2848}
b2167459 2849
0588e061
RH
2850static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2851{
2852 return do_sub_imm(ctx, a, false);
2853}
b2167459 2854
0588e061
RH
2855static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2856{
2857 return do_sub_imm(ctx, a, true);
b2167459
RH
2858}
2859
0588e061 2860static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2861{
eaa3783b 2862 TCGv_reg tcg_im, tcg_r2;
b2167459 2863
0588e061 2864 if (a->cf) {
b2167459
RH
2865 nullify_over(ctx);
2866 }
2867
0588e061
RH
2868 tcg_im = load_const(ctx, a->i);
2869 tcg_r2 = load_gpr(ctx, a->r);
2870 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2871
31234768 2872 return nullify_end(ctx);
b2167459
RH
2873}
2874
1cd012a5 2875static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2876{
1cd012a5
RH
2877 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
2878 a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2879}
2880
1cd012a5 2881static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2882{
1cd012a5
RH
2883 assert(a->x == 0 && a->scale == 0);
2884 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
96d6407f
RH
2885}
2886
1cd012a5 2887static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2888{
1cd012a5 2889 TCGMemOp mop = MO_TEUL | MO_ALIGN_16 | a->size;
86f8d05f
RH
2890 TCGv_reg zero, dest, ofs;
2891 TCGv_tl addr;
96d6407f
RH
2892
2893 nullify_over(ctx);
2894
1cd012a5 2895 if (a->m) {
86f8d05f
RH
2896 /* Base register modification. Make sure if RT == RB,
2897 we see the result of the load. */
96d6407f
RH
2898 dest = get_temp(ctx);
2899 } else {
1cd012a5 2900 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2901 }
2902
1cd012a5
RH
2903 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2904 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
eaa3783b 2905 zero = tcg_const_reg(0);
86f8d05f 2906 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
1cd012a5
RH
2907 if (a->m) {
2908 save_gpr(ctx, a->b, ofs);
96d6407f 2909 }
1cd012a5 2910 save_gpr(ctx, a->t, dest);
96d6407f 2911
31234768 2912 return nullify_end(ctx);
96d6407f
RH
2913}
2914
1cd012a5 2915static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2916{
86f8d05f
RH
2917 TCGv_reg ofs, val;
2918 TCGv_tl addr;
96d6407f
RH
2919
2920 nullify_over(ctx);
2921
1cd012a5 2922 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2923 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2924 val = load_gpr(ctx, a->r);
2925 if (a->a) {
f9f46db4
EC
2926 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2927 gen_helper_stby_e_parallel(cpu_env, addr, val);
2928 } else {
2929 gen_helper_stby_e(cpu_env, addr, val);
2930 }
96d6407f 2931 } else {
f9f46db4
EC
2932 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
2933 gen_helper_stby_b_parallel(cpu_env, addr, val);
2934 } else {
2935 gen_helper_stby_b(cpu_env, addr, val);
2936 }
96d6407f 2937 }
1cd012a5 2938 if (a->m) {
86f8d05f 2939 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2940 save_gpr(ctx, a->b, ofs);
96d6407f 2941 }
96d6407f 2942
31234768 2943 return nullify_end(ctx);
96d6407f
RH
2944}
2945
1cd012a5 2946static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2947{
2948 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2949
2950 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2951 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2952 trans_ld(ctx, a);
d0a851cc 2953 ctx->mmu_idx = hold_mmu_idx;
31234768 2954 return true;
d0a851cc
RH
2955}
2956
1cd012a5 2957static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2958{
2959 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2960
2961 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2962 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2963 trans_st(ctx, a);
d0a851cc 2964 ctx->mmu_idx = hold_mmu_idx;
31234768 2965 return true;
d0a851cc 2966}
95412a61 2967
0588e061 2968static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2969{
0588e061 2970 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2971
0588e061
RH
2972 tcg_gen_movi_reg(tcg_rt, a->i);
2973 save_gpr(ctx, a->t, tcg_rt);
b2167459 2974 cond_free(&ctx->null_cond);
31234768 2975 return true;
b2167459
RH
2976}
2977
0588e061 2978static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2979{
0588e061 2980 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2981 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2982
0588e061 2983 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2984 save_gpr(ctx, 1, tcg_r1);
2985 cond_free(&ctx->null_cond);
31234768 2986 return true;
b2167459
RH
2987}
2988
0588e061 2989static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2990{
0588e061 2991 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2992
2993 /* Special case rb == 0, for the LDI pseudo-op.
2994 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2995 if (a->b == 0) {
2996 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2997 } else {
0588e061 2998 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2999 }
0588e061 3000 save_gpr(ctx, a->t, tcg_rt);
b2167459 3001 cond_free(&ctx->null_cond);
31234768 3002 return true;
b2167459
RH
3003}
3004
01afb7be
RH
3005static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3006 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 3007{
01afb7be 3008 TCGv_reg dest, in2, sv;
98cd9ca7
RH
3009 DisasCond cond;
3010
98cd9ca7
RH
3011 in2 = load_gpr(ctx, r);
3012 dest = get_temp(ctx);
3013
eaa3783b 3014 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3015
f764718d 3016 sv = NULL;
b47a4a02 3017 if (cond_need_sv(c)) {
98cd9ca7
RH
3018 sv = do_sub_sv(ctx, dest, in1, in2);
3019 }
3020
01afb7be
RH
3021 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3022 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3023}
3024
01afb7be 3025static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3026{
01afb7be
RH
3027 nullify_over(ctx);
3028 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3029}
98cd9ca7 3030
01afb7be
RH
3031static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3032{
98cd9ca7 3033 nullify_over(ctx);
01afb7be
RH
3034 return do_cmpb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3035}
3036
3037static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3038 unsigned c, unsigned f, unsigned n, int disp)
3039{
3040 TCGv_reg dest, in2, sv, cb_msb;
3041 DisasCond cond;
98cd9ca7 3042
98cd9ca7 3043 in2 = load_gpr(ctx, r);
43675d20 3044 dest = tcg_temp_new();
f764718d
RH
3045 sv = NULL;
3046 cb_msb = NULL;
98cd9ca7 3047
b47a4a02 3048 if (cond_need_cb(c)) {
98cd9ca7 3049 cb_msb = get_temp(ctx);
eaa3783b
RH
3050 tcg_gen_movi_reg(cb_msb, 0);
3051 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
b47a4a02 3052 } else {
eaa3783b 3053 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3054 }
3055 if (cond_need_sv(c)) {
98cd9ca7 3056 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3057 }
3058
01afb7be 3059 cond = do_cond(c * 2 + f, dest, cb_msb, sv);
43675d20
SS
3060 save_gpr(ctx, r, dest);
3061 tcg_temp_free(dest);
01afb7be 3062 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3063}
3064
01afb7be
RH
3065static bool trans_addb(DisasContext *ctx, arg_addb *a)
3066{
3067 nullify_over(ctx);
3068 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3069}
3070
3071static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3072{
3073 nullify_over(ctx);
3074 return do_addb(ctx, a->r, load_const(ctx, a->i), a->c, a->f, a->n, a->disp);
3075}
3076
3077static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3078{
eaa3783b 3079 TCGv_reg tmp, tcg_r;
98cd9ca7
RH
3080 DisasCond cond;
3081
3082 nullify_over(ctx);
3083
3084 tmp = tcg_temp_new();
01afb7be
RH
3085 tcg_r = load_gpr(ctx, a->r);
3086 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
98cd9ca7 3087
01afb7be 3088 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
98cd9ca7 3089 tcg_temp_free(tmp);
01afb7be 3090 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3091}
3092
01afb7be
RH
3093static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3094{
3095 TCGv_reg tmp, tcg_r;
3096 DisasCond cond;
3097
3098 nullify_over(ctx);
3099
3100 tmp = tcg_temp_new();
3101 tcg_r = load_gpr(ctx, a->r);
3102 tcg_gen_shli_reg(tmp, tcg_r, a->p);
3103
3104 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3105 tcg_temp_free(tmp);
3106 return do_cbranch(ctx, a->disp, a->n, &cond);
3107}
3108
3109static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3110{
eaa3783b 3111 TCGv_reg dest;
98cd9ca7
RH
3112 DisasCond cond;
3113
3114 nullify_over(ctx);
3115
01afb7be
RH
3116 dest = dest_gpr(ctx, a->r2);
3117 if (a->r1 == 0) {
eaa3783b 3118 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3119 } else {
01afb7be 3120 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3121 }
3122
01afb7be
RH
3123 cond = do_sed_cond(a->c, dest);
3124 return do_cbranch(ctx, a->disp, a->n, &cond);
3125}
3126
3127static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3128{
3129 TCGv_reg dest;
3130 DisasCond cond;
3131
3132 nullify_over(ctx);
3133
3134 dest = dest_gpr(ctx, a->r);
3135 tcg_gen_movi_reg(dest, a->i);
3136
3137 cond = do_sed_cond(a->c, dest);
3138 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3139}
3140
30878590 3141static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3142{
eaa3783b 3143 TCGv_reg dest;
0b1347d2 3144
30878590 3145 if (a->c) {
0b1347d2
RH
3146 nullify_over(ctx);
3147 }
3148
30878590
RH
3149 dest = dest_gpr(ctx, a->t);
3150 if (a->r1 == 0) {
3151 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3152 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3153 } else if (a->r1 == a->r2) {
0b1347d2 3154 TCGv_i32 t32 = tcg_temp_new_i32();
30878590 3155 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
0b1347d2 3156 tcg_gen_rotr_i32(t32, t32, cpu_sar);
eaa3783b 3157 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3158 tcg_temp_free_i32(t32);
3159 } else {
3160 TCGv_i64 t = tcg_temp_new_i64();
3161 TCGv_i64 s = tcg_temp_new_i64();
3162
30878590 3163 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3164 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3165 tcg_gen_shr_i64(t, t, s);
eaa3783b 3166 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2
RH
3167
3168 tcg_temp_free_i64(t);
3169 tcg_temp_free_i64(s);
3170 }
30878590 3171 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3172
3173 /* Install the new nullification. */
3174 cond_free(&ctx->null_cond);
30878590
RH
3175 if (a->c) {
3176 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3177 }
31234768 3178 return nullify_end(ctx);
0b1347d2
RH
3179}
3180
30878590 3181static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3182{
30878590 3183 unsigned sa = 31 - a->cpos;
eaa3783b 3184 TCGv_reg dest, t2;
0b1347d2 3185
30878590 3186 if (a->c) {
0b1347d2
RH
3187 nullify_over(ctx);
3188 }
3189
30878590
RH
3190 dest = dest_gpr(ctx, a->t);
3191 t2 = load_gpr(ctx, a->r2);
3192 if (a->r1 == a->r2) {
0b1347d2 3193 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3194 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3195 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3196 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3197 tcg_temp_free_i32(t32);
30878590 3198 } else if (a->r1 == 0) {
eaa3783b 3199 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
0b1347d2 3200 } else {
eaa3783b
RH
3201 TCGv_reg t0 = tcg_temp_new();
3202 tcg_gen_extract_reg(t0, t2, sa, 32 - sa);
30878590 3203 tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa);
0b1347d2
RH
3204 tcg_temp_free(t0);
3205 }
30878590 3206 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3207
3208 /* Install the new nullification. */
3209 cond_free(&ctx->null_cond);
30878590
RH
3210 if (a->c) {
3211 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3212 }
31234768 3213 return nullify_end(ctx);
0b1347d2
RH
3214}
3215
30878590 3216static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3217{
30878590 3218 unsigned len = 32 - a->clen;
eaa3783b 3219 TCGv_reg dest, src, tmp;
0b1347d2 3220
30878590 3221 if (a->c) {
0b1347d2
RH
3222 nullify_over(ctx);
3223 }
3224
30878590
RH
3225 dest = dest_gpr(ctx, a->t);
3226 src = load_gpr(ctx, a->r);
0b1347d2
RH
3227 tmp = tcg_temp_new();
3228
3229 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3230 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3231 if (a->se) {
eaa3783b
RH
3232 tcg_gen_sar_reg(dest, src, tmp);
3233 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3234 } else {
eaa3783b
RH
3235 tcg_gen_shr_reg(dest, src, tmp);
3236 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2
RH
3237 }
3238 tcg_temp_free(tmp);
30878590 3239 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3240
3241 /* Install the new nullification. */
3242 cond_free(&ctx->null_cond);
30878590
RH
3243 if (a->c) {
3244 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3245 }
31234768 3246 return nullify_end(ctx);
0b1347d2
RH
3247}
3248
30878590 3249static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3250{
30878590
RH
3251 unsigned len = 32 - a->clen;
3252 unsigned cpos = 31 - a->pos;
eaa3783b 3253 TCGv_reg dest, src;
0b1347d2 3254
30878590 3255 if (a->c) {
0b1347d2
RH
3256 nullify_over(ctx);
3257 }
3258
30878590
RH
3259 dest = dest_gpr(ctx, a->t);
3260 src = load_gpr(ctx, a->r);
3261 if (a->se) {
eaa3783b 3262 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3263 } else {
eaa3783b 3264 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3265 }
30878590 3266 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3267
3268 /* Install the new nullification. */
3269 cond_free(&ctx->null_cond);
30878590
RH
3270 if (a->c) {
3271 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3272 }
31234768 3273 return nullify_end(ctx);
0b1347d2
RH
3274}
3275
30878590 3276static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3277{
30878590 3278 unsigned len = 32 - a->clen;
eaa3783b
RH
3279 target_sreg mask0, mask1;
3280 TCGv_reg dest;
0b1347d2 3281
30878590 3282 if (a->c) {
0b1347d2
RH
3283 nullify_over(ctx);
3284 }
30878590
RH
3285 if (a->cpos + len > 32) {
3286 len = 32 - a->cpos;
0b1347d2
RH
3287 }
3288
30878590
RH
3289 dest = dest_gpr(ctx, a->t);
3290 mask0 = deposit64(0, a->cpos, len, a->i);
3291 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3292
30878590
RH
3293 if (a->nz) {
3294 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3295 if (mask1 != -1) {
eaa3783b 3296 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3297 src = dest;
3298 }
eaa3783b 3299 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3300 } else {
eaa3783b 3301 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3302 }
30878590 3303 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3304
3305 /* Install the new nullification. */
3306 cond_free(&ctx->null_cond);
30878590
RH
3307 if (a->c) {
3308 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3309 }
31234768 3310 return nullify_end(ctx);
0b1347d2
RH
3311}
3312
30878590 3313static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3314{
30878590
RH
3315 unsigned rs = a->nz ? a->t : 0;
3316 unsigned len = 32 - a->clen;
eaa3783b 3317 TCGv_reg dest, val;
0b1347d2 3318
30878590 3319 if (a->c) {
0b1347d2
RH
3320 nullify_over(ctx);
3321 }
30878590
RH
3322 if (a->cpos + len > 32) {
3323 len = 32 - a->cpos;
0b1347d2
RH
3324 }
3325
30878590
RH
3326 dest = dest_gpr(ctx, a->t);
3327 val = load_gpr(ctx, a->r);
0b1347d2 3328 if (rs == 0) {
30878590 3329 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3330 } else {
30878590 3331 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3332 }
30878590 3333 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3334
3335 /* Install the new nullification. */
3336 cond_free(&ctx->null_cond);
30878590
RH
3337 if (a->c) {
3338 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3339 }
31234768 3340 return nullify_end(ctx);
0b1347d2
RH
3341}
3342
30878590
RH
3343static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3344 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3345{
0b1347d2
RH
3346 unsigned rs = nz ? rt : 0;
3347 unsigned len = 32 - clen;
30878590 3348 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3349 unsigned msb = 1U << (len - 1);
3350
3351 if (c) {
3352 nullify_over(ctx);
3353 }
3354
0b1347d2
RH
3355 dest = dest_gpr(ctx, rt);
3356 shift = tcg_temp_new();
3357 tmp = tcg_temp_new();
3358
3359 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3360 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3361
eaa3783b
RH
3362 mask = tcg_const_reg(msb + (msb - 1));
3363 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3364 if (rs) {
eaa3783b
RH
3365 tcg_gen_shl_reg(mask, mask, shift);
3366 tcg_gen_shl_reg(tmp, tmp, shift);
3367 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3368 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3369 } else {
eaa3783b 3370 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2
RH
3371 }
3372 tcg_temp_free(shift);
3373 tcg_temp_free(mask);
3374 tcg_temp_free(tmp);
3375 save_gpr(ctx, rt, dest);
3376
3377 /* Install the new nullification. */
3378 cond_free(&ctx->null_cond);
3379 if (c) {
3380 ctx->null_cond = do_sed_cond(c, dest);
3381 }
31234768 3382 return nullify_end(ctx);
0b1347d2
RH
3383}
3384
30878590
RH
3385static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3386{
3387 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3388}
3389
3390static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3391{
3392 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_const(ctx, a->i));
3393}
0b1347d2 3394
8340f534 3395static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3396{
660eefe1 3397 TCGv_reg tmp;
98cd9ca7 3398
c301f34e 3399#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3400 /* ??? It seems like there should be a good way of using
3401 "be disp(sr2, r0)", the canonical gateway entry mechanism
3402 to our advantage. But that appears to be inconvenient to
3403 manage along side branch delay slots. Therefore we handle
3404 entry into the gateway page via absolute address. */
98cd9ca7
RH
3405 /* Since we don't implement spaces, just branch. Do notice the special
3406 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3407 goto_tb to the TB containing the syscall. */
8340f534
RH
3408 if (a->b == 0) {
3409 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3410 }
c301f34e 3411#else
c301f34e 3412 nullify_over(ctx);
660eefe1
RH
3413#endif
3414
3415 tmp = get_temp(ctx);
8340f534 3416 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3417 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3418
3419#ifdef CONFIG_USER_ONLY
8340f534 3420 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3421#else
3422 TCGv_i64 new_spc = tcg_temp_new_i64();
3423
8340f534
RH
3424 load_spr(ctx, new_spc, a->sp);
3425 if (a->l) {
c301f34e
RH
3426 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3427 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3428 }
8340f534 3429 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3430 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3431 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3432 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3433 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3434 } else {
3435 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3436 if (ctx->iaoq_b == -1) {
3437 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3438 }
3439 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3440 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3441 nullify_set(ctx, a->n);
c301f34e
RH
3442 }
3443 tcg_temp_free_i64(new_spc);
3444 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3445 ctx->base.is_jmp = DISAS_NORETURN;
3446 return nullify_end(ctx);
c301f34e 3447#endif
98cd9ca7
RH
3448}
3449
8340f534 3450static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3451{
8340f534 3452 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3453}
3454
8340f534 3455static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3456{
8340f534 3457 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3458
6e5f5300
SS
3459 nullify_over(ctx);
3460
43e05652
RH
3461 /* Make sure the caller hasn't done something weird with the queue.
3462 * ??? This is not quite the same as the PSW[B] bit, which would be
3463 * expensive to track. Real hardware will trap for
3464 * b gateway
3465 * b gateway+4 (in delay slot of first branch)
3466 * However, checking for a non-sequential instruction queue *will*
3467 * diagnose the security hole
3468 * b gateway
3469 * b evil
3470 * in which instructions at evil would run with increased privs.
3471 */
3472 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3473 return gen_illegal(ctx);
3474 }
3475
3476#ifndef CONFIG_USER_ONLY
3477 if (ctx->tb_flags & PSW_C) {
3478 CPUHPPAState *env = ctx->cs->env_ptr;
3479 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3480 /* If we could not find a TLB entry, then we need to generate an
3481 ITLB miss exception so the kernel will provide it.
3482 The resulting TLB fill operation will invalidate this TB and
3483 we will re-translate, at which point we *will* be able to find
3484 the TLB entry and determine if this is in fact a gateway page. */
3485 if (type < 0) {
31234768
RH
3486 gen_excp(ctx, EXCP_ITLB_MISS);
3487 return true;
43e05652
RH
3488 }
3489 /* No change for non-gateway pages or for priv decrease. */
3490 if (type >= 4 && type - 4 < ctx->privilege) {
3491 dest = deposit32(dest, 0, 2, type - 4);
3492 }
3493 } else {
3494 dest &= -4; /* priv = 0 */
3495 }
3496#endif
3497
6e5f5300
SS
3498 if (a->l) {
3499 TCGv_reg tmp = dest_gpr(ctx, a->l);
3500 if (ctx->privilege < 3) {
3501 tcg_gen_andi_reg(tmp, tmp, -4);
3502 }
3503 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3504 save_gpr(ctx, a->l, tmp);
3505 }
3506
3507 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3508}
3509
8340f534 3510static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3511{
b35aec85
RH
3512 if (a->x) {
3513 TCGv_reg tmp = get_temp(ctx);
3514 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3515 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3516 /* The computation here never changes privilege level. */
3517 return do_ibranch(ctx, tmp, a->l, a->n);
3518 } else {
3519 /* BLR R0,RX is a good way to load PC+8 into RX. */
3520 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3521 }
98cd9ca7
RH
3522}
3523
8340f534 3524static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3525{
eaa3783b 3526 TCGv_reg dest;
98cd9ca7 3527
8340f534
RH
3528 if (a->x == 0) {
3529 dest = load_gpr(ctx, a->b);
98cd9ca7
RH
3530 } else {
3531 dest = get_temp(ctx);
8340f534
RH
3532 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3533 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3534 }
660eefe1 3535 dest = do_ibranch_priv(ctx, dest);
8340f534 3536 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3537}
3538
8340f534 3539static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3540{
660eefe1 3541 TCGv_reg dest;
98cd9ca7 3542
c301f34e 3543#ifdef CONFIG_USER_ONLY
8340f534
RH
3544 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3545 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3546#else
3547 nullify_over(ctx);
8340f534 3548 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3549
3550 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3551 if (ctx->iaoq_b == -1) {
3552 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3553 }
3554 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3555 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3556 if (a->l) {
3557 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3558 }
8340f534 3559 nullify_set(ctx, a->n);
c301f34e 3560 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3561 ctx->base.is_jmp = DISAS_NORETURN;
3562 return nullify_end(ctx);
c301f34e 3563#endif
98cd9ca7
RH
3564}
3565
1ca74648
RH
3566/*
3567 * Float class 0
3568 */
ebe9383c 3569
1ca74648 3570static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3571{
1ca74648 3572 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3573}
3574
1ca74648 3575static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3576{
1ca74648 3577 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3578}
3579
1ca74648 3580static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3581{
1ca74648 3582 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3583}
3584
1ca74648 3585static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3586{
1ca74648 3587 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3588}
3589
1ca74648 3590static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3591{
1ca74648 3592 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3593}
3594
1ca74648 3595static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3596{
1ca74648 3597 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3598}
3599
1ca74648 3600static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3601{
1ca74648 3602 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3603}
3604
1ca74648 3605static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3606{
1ca74648 3607 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3608}
3609
1ca74648 3610static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3611{
1ca74648 3612 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3613}
3614
1ca74648 3615static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3616{
1ca74648 3617 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3618}
3619
1ca74648 3620static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3621{
1ca74648 3622 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3623}
3624
1ca74648 3625static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3626{
1ca74648 3627 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3628}
3629
1ca74648 3630static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3631{
1ca74648 3632 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3633}
3634
1ca74648 3635static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3636{
1ca74648 3637 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3638}
3639
3640static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3641{
3642 tcg_gen_xori_i64(dst, src, INT64_MIN);
3643}
3644
1ca74648
RH
3645static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3646{
3647 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3648}
3649
3650static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3651{
3652 tcg_gen_ori_i32(dst, src, INT32_MIN);
3653}
3654
1ca74648
RH
3655static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3656{
3657 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3658}
3659
ebe9383c
RH
3660static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3661{
3662 tcg_gen_ori_i64(dst, src, INT64_MIN);
3663}
3664
1ca74648
RH
3665static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3666{
3667 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3668}
3669
3670/*
3671 * Float class 1
3672 */
3673
3674static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3675{
3676 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3677}
3678
3679static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3680{
3681 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3682}
3683
3684static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3685{
3686 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3687}
3688
3689static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3690{
3691 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3692}
3693
3694static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3695{
3696 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3697}
3698
3699static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3700{
3701 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3702}
3703
3704static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3705{
3706 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3707}
3708
3709static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3710{
3711 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3712}
3713
3714static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3715{
3716 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3717}
3718
3719static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3720{
3721 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3722}
3723
3724static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3725{
3726 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3727}
3728
3729static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3730{
3731 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3732}
3733
3734static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3735{
3736 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3737}
3738
3739static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3740{
3741 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3742}
3743
3744static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3745{
3746 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3747}
3748
3749static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3750{
3751 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3752}
3753
3754static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3755{
3756 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3757}
3758
3759static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3760{
3761 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3762}
3763
3764static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3765{
3766 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3767}
3768
3769static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3770{
3771 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3772}
3773
3774static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3775{
3776 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3777}
3778
3779static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3780{
3781 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3782}
3783
3784static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3785{
3786 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3787}
3788
3789static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3790{
3791 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3792}
3793
3794static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3795{
3796 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3797}
3798
3799static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3800{
3801 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3802}
3803
3804/*
3805 * Float class 2
3806 */
3807
3808static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3809{
3810 TCGv_i32 ta, tb, tc, ty;
3811
3812 nullify_over(ctx);
3813
1ca74648
RH
3814 ta = load_frw0_i32(a->r1);
3815 tb = load_frw0_i32(a->r2);
3816 ty = tcg_const_i32(a->y);
3817 tc = tcg_const_i32(a->c);
ebe9383c
RH
3818
3819 gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
3820
3821 tcg_temp_free_i32(ta);
3822 tcg_temp_free_i32(tb);
3823 tcg_temp_free_i32(ty);
3824 tcg_temp_free_i32(tc);
3825
1ca74648 3826 return nullify_end(ctx);
ebe9383c
RH
3827}
3828
1ca74648 3829static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3830{
ebe9383c
RH
3831 TCGv_i64 ta, tb;
3832 TCGv_i32 tc, ty;
3833
3834 nullify_over(ctx);
3835
1ca74648
RH
3836 ta = load_frd0(a->r1);
3837 tb = load_frd0(a->r2);
3838 ty = tcg_const_i32(a->y);
3839 tc = tcg_const_i32(a->c);
ebe9383c
RH
3840
3841 gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
3842
3843 tcg_temp_free_i64(ta);
3844 tcg_temp_free_i64(tb);
3845 tcg_temp_free_i32(ty);
3846 tcg_temp_free_i32(tc);
3847
31234768 3848 return nullify_end(ctx);
ebe9383c
RH
3849}
3850
1ca74648 3851static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3852{
eaa3783b 3853 TCGv_reg t;
ebe9383c
RH
3854
3855 nullify_over(ctx);
3856
1ca74648 3857 t = get_temp(ctx);
eaa3783b 3858 tcg_gen_ld32u_reg(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3859
1ca74648
RH
3860 if (a->y == 1) {
3861 int mask;
3862 bool inv = false;
3863
3864 switch (a->c) {
3865 case 0: /* simple */
3866 tcg_gen_andi_reg(t, t, 0x4000000);
3867 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3868 goto done;
3869 case 2: /* rej */
3870 inv = true;
3871 /* fallthru */
3872 case 1: /* acc */
3873 mask = 0x43ff800;
3874 break;
3875 case 6: /* rej8 */
3876 inv = true;
3877 /* fallthru */
3878 case 5: /* acc8 */
3879 mask = 0x43f8000;
3880 break;
3881 case 9: /* acc6 */
3882 mask = 0x43e0000;
3883 break;
3884 case 13: /* acc4 */
3885 mask = 0x4380000;
3886 break;
3887 case 17: /* acc2 */
3888 mask = 0x4200000;
3889 break;
3890 default:
3891 gen_illegal(ctx);
3892 return true;
3893 }
3894 if (inv) {
3895 TCGv_reg c = load_const(ctx, mask);
3896 tcg_gen_or_reg(t, t, c);
3897 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3898 } else {
3899 tcg_gen_andi_reg(t, t, mask);
3900 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3901 }
3902 } else {
3903 unsigned cbit = (a->y ^ 1) - 1;
3904
3905 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3906 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3907 tcg_temp_free(t);
3908 }
3909
3910 done:
31234768 3911 return nullify_end(ctx);
ebe9383c
RH
3912}
3913
1ca74648
RH
3914/*
3915 * Float class 2
3916 */
3917
3918static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3919{
1ca74648
RH
3920 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3921}
ebe9383c 3922
1ca74648
RH
3923static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3924{
3925 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3926}
ebe9383c 3927
1ca74648
RH
3928static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3929{
3930 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3931}
ebe9383c 3932
1ca74648
RH
3933static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3934{
3935 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3936}
3937
1ca74648 3938static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3939{
1ca74648
RH
3940 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3941}
3942
3943static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3944{
3945 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3946}
3947
3948static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3949{
3950 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3951}
3952
3953static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3954{
3955 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3956}
3957
3958static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3959{
3960 TCGv_i64 x, y;
ebe9383c
RH
3961
3962 nullify_over(ctx);
3963
1ca74648
RH
3964 x = load_frw0_i64(a->r1);
3965 y = load_frw0_i64(a->r2);
3966 tcg_gen_mul_i64(x, x, y);
3967 save_frd(a->t, x);
3968 tcg_temp_free_i64(x);
3969 tcg_temp_free_i64(y);
ebe9383c 3970
31234768 3971 return nullify_end(ctx);
ebe9383c
RH
3972}
3973
ebe9383c
RH
3974/* Convert the fmpyadd single-precision register encodings to standard. */
3975static inline int fmpyadd_s_reg(unsigned r)
3976{
3977 return (r & 16) * 2 + 16 + (r & 15);
3978}
3979
b1e2af57 3980static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3981{
b1e2af57
RH
3982 int tm = fmpyadd_s_reg(a->tm);
3983 int ra = fmpyadd_s_reg(a->ra);
3984 int ta = fmpyadd_s_reg(a->ta);
3985 int rm2 = fmpyadd_s_reg(a->rm2);
3986 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3987
3988 nullify_over(ctx);
3989
b1e2af57
RH
3990 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3991 do_fop_weww(ctx, ta, ta, ra,
3992 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 3993
31234768 3994 return nullify_end(ctx);
ebe9383c
RH
3995}
3996
b1e2af57
RH
3997static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
3998{
3999 return do_fmpyadd_s(ctx, a, false);
4000}
4001
4002static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4003{
4004 return do_fmpyadd_s(ctx, a, true);
4005}
4006
4007static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4008{
4009 nullify_over(ctx);
4010
4011 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4012 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4013 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4014
4015 return nullify_end(ctx);
4016}
4017
4018static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4019{
4020 return do_fmpyadd_d(ctx, a, false);
4021}
4022
4023static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4024{
4025 return do_fmpyadd_d(ctx, a, true);
4026}
4027
c3bad4f8 4028static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4029{
c3bad4f8 4030 TCGv_i32 x, y, z;
ebe9383c
RH
4031
4032 nullify_over(ctx);
c3bad4f8
RH
4033 x = load_frw0_i32(a->rm1);
4034 y = load_frw0_i32(a->rm2);
4035 z = load_frw0_i32(a->ra3);
ebe9383c 4036
c3bad4f8
RH
4037 if (a->neg) {
4038 gen_helper_fmpynfadd_s(x, cpu_env, x, y, z);
ebe9383c 4039 } else {
c3bad4f8 4040 gen_helper_fmpyfadd_s(x, cpu_env, x, y, z);
ebe9383c
RH
4041 }
4042
c3bad4f8
RH
4043 tcg_temp_free_i32(y);
4044 tcg_temp_free_i32(z);
4045 save_frw_i32(a->t, x);
4046 tcg_temp_free_i32(x);
31234768 4047 return nullify_end(ctx);
ebe9383c
RH
4048}
4049
c3bad4f8 4050static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4051{
c3bad4f8 4052 TCGv_i64 x, y, z;
ebe9383c
RH
4053
4054 nullify_over(ctx);
c3bad4f8
RH
4055 x = load_frd0(a->rm1);
4056 y = load_frd0(a->rm2);
4057 z = load_frd0(a->ra3);
ebe9383c 4058
c3bad4f8
RH
4059 if (a->neg) {
4060 gen_helper_fmpynfadd_d(x, cpu_env, x, y, z);
ebe9383c 4061 } else {
c3bad4f8 4062 gen_helper_fmpyfadd_d(x, cpu_env, x, y, z);
ebe9383c
RH
4063 }
4064
c3bad4f8
RH
4065 tcg_temp_free_i64(y);
4066 tcg_temp_free_i64(z);
4067 save_frd(a->t, x);
4068 tcg_temp_free_i64(x);
31234768 4069 return nullify_end(ctx);
ebe9383c
RH
4070}
4071
15da177b
SS
4072static bool trans_diag(DisasContext *ctx, arg_diag *a)
4073{
4074 qemu_log_mask(LOG_UNIMP, "DIAG opcode ignored\n");
4075 cond_free(&ctx->null_cond);
4076 return true;
4077}
4078
b542683d 4079static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4080{
51b061fb 4081 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4082 int bound;
61766fe9 4083
51b061fb 4084 ctx->cs = cs;
494737b7 4085 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4086
4087#ifdef CONFIG_USER_ONLY
4088 ctx->privilege = MMU_USER_IDX;
4089 ctx->mmu_idx = MMU_USER_IDX;
ebd0e151
RH
4090 ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
4091 ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
3d68ee7b 4092#else
494737b7
RH
4093 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4094 ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
3d68ee7b 4095
c301f34e
RH
4096 /* Recover the IAOQ values from the GVA + PRIV. */
4097 uint64_t cs_base = ctx->base.tb->cs_base;
4098 uint64_t iasq_f = cs_base & ~0xffffffffull;
4099 int32_t diff = cs_base;
4100
4101 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4102 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4103#endif
51b061fb 4104 ctx->iaoq_n = -1;
f764718d 4105 ctx->iaoq_n_var = NULL;
61766fe9 4106
3d68ee7b
RH
4107 /* Bound the number of instructions by those left on the page. */
4108 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4109 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
3d68ee7b 4110
86f8d05f
RH
4111 ctx->ntempr = 0;
4112 ctx->ntempl = 0;
4113 memset(ctx->tempr, 0, sizeof(ctx->tempr));
4114 memset(ctx->templ, 0, sizeof(ctx->templ));
51b061fb 4115}
61766fe9 4116
51b061fb
RH
4117static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4118{
4119 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4120
3d68ee7b 4121 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4122 ctx->null_cond = cond_make_f();
4123 ctx->psw_n_nonzero = false;
494737b7 4124 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4125 ctx->null_cond.c = TCG_COND_ALWAYS;
4126 ctx->psw_n_nonzero = true;
129e9cc3 4127 }
51b061fb
RH
4128 ctx->null_lab = NULL;
4129}
129e9cc3 4130
51b061fb
RH
4131static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4132{
4133 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4134
51b061fb
RH
4135 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4136}
4137
4138static bool hppa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
4139 const CPUBreakpoint *bp)
4140{
4141 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4142
31234768 4143 gen_excp(ctx, EXCP_DEBUG);
c301f34e 4144 ctx->base.pc_next += 4;
51b061fb
RH
4145 return true;
4146}
4147
4148static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4149{
4150 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4151 CPUHPPAState *env = cs->env_ptr;
4152 DisasJumpType ret;
4153 int i, n;
4154
4155 /* Execute one insn. */
ba1d0b44 4156#ifdef CONFIG_USER_ONLY
c301f34e 4157 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4158 do_page_zero(ctx);
4159 ret = ctx->base.is_jmp;
51b061fb 4160 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4161 } else
4162#endif
4163 {
51b061fb
RH
4164 /* Always fetch the insn, even if nullified, so that we check
4165 the page permissions for execute. */
c301f34e 4166 uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
51b061fb
RH
4167
4168 /* Set up the IA queue for the next insn.
4169 This will be overwritten by a branch. */
4170 if (ctx->iaoq_b == -1) {
4171 ctx->iaoq_n = -1;
4172 ctx->iaoq_n_var = get_temp(ctx);
eaa3783b 4173 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4174 } else {
51b061fb 4175 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4176 ctx->iaoq_n_var = NULL;
61766fe9
RH
4177 }
4178
51b061fb
RH
4179 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4180 ctx->null_cond.c = TCG_COND_NEVER;
4181 ret = DISAS_NEXT;
4182 } else {
1a19da0d 4183 ctx->insn = insn;
31274b46
RH
4184 if (!decode(ctx, insn)) {
4185 gen_illegal(ctx);
4186 }
31234768 4187 ret = ctx->base.is_jmp;
51b061fb 4188 assert(ctx->null_lab == NULL);
61766fe9 4189 }
51b061fb 4190 }
61766fe9 4191
51b061fb 4192 /* Free any temporaries allocated. */
86f8d05f
RH
4193 for (i = 0, n = ctx->ntempr; i < n; ++i) {
4194 tcg_temp_free(ctx->tempr[i]);
4195 ctx->tempr[i] = NULL;
4196 }
4197 for (i = 0, n = ctx->ntempl; i < n; ++i) {
4198 tcg_temp_free_tl(ctx->templ[i]);
4199 ctx->templ[i] = NULL;
51b061fb 4200 }
86f8d05f
RH
4201 ctx->ntempr = 0;
4202 ctx->ntempl = 0;
61766fe9 4203
3d68ee7b
RH
4204 /* Advance the insn queue. Note that this check also detects
4205 a priority change within the instruction queue. */
51b061fb 4206 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4207 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4208 && use_goto_tb(ctx, ctx->iaoq_b)
4209 && (ctx->null_cond.c == TCG_COND_NEVER
4210 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4211 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4212 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4213 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4214 } else {
31234768 4215 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4216 }
61766fe9 4217 }
51b061fb
RH
4218 ctx->iaoq_f = ctx->iaoq_b;
4219 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4220 ctx->base.pc_next += 4;
51b061fb
RH
4221
4222 if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
4223 return;
4224 }
4225 if (ctx->iaoq_f == -1) {
eaa3783b 4226 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
51b061fb 4227 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e
RH
4228#ifndef CONFIG_USER_ONLY
4229 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4230#endif
51b061fb
RH
4231 nullify_save(ctx);
4232 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
4233 } else if (ctx->iaoq_b == -1) {
eaa3783b 4234 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
51b061fb
RH
4235 }
4236}
4237
4238static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4239{
4240 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4241 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4242
e1b5a5ed 4243 switch (is_jmp) {
869051ea 4244 case DISAS_NORETURN:
61766fe9 4245 break;
51b061fb 4246 case DISAS_TOO_MANY:
869051ea 4247 case DISAS_IAQ_N_STALE:
e1b5a5ed 4248 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4249 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4250 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4251 nullify_save(ctx);
61766fe9 4252 /* FALLTHRU */
869051ea 4253 case DISAS_IAQ_N_UPDATED:
51b061fb 4254 if (ctx->base.singlestep_enabled) {
61766fe9 4255 gen_excp_1(EXCP_DEBUG);
e1b5a5ed 4256 } else if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
07ea28b4 4257 tcg_gen_exit_tb(NULL, 0);
61766fe9 4258 } else {
7f11636d 4259 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
4260 }
4261 break;
4262 default:
51b061fb 4263 g_assert_not_reached();
61766fe9 4264 }
51b061fb 4265}
61766fe9 4266
51b061fb
RH
4267static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
4268{
c301f34e 4269 target_ulong pc = dcbase->pc_first;
61766fe9 4270
ba1d0b44
RH
4271#ifdef CONFIG_USER_ONLY
4272 switch (pc) {
51b061fb
RH
4273 case 0x00:
4274 qemu_log("IN:\n0x00000000: (null)\n");
ba1d0b44 4275 return;
51b061fb
RH
4276 case 0xb0:
4277 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4278 return;
51b061fb
RH
4279 case 0xe0:
4280 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4281 return;
51b061fb
RH
4282 case 0x100:
4283 qemu_log("IN:\n0x00000100: syscall\n");
ba1d0b44 4284 return;
61766fe9 4285 }
ba1d0b44
RH
4286#endif
4287
4288 qemu_log("IN: %s\n", lookup_symbol(pc));
eaa3783b 4289 log_target_disas(cs, pc, dcbase->tb->size);
51b061fb
RH
4290}
4291
4292static const TranslatorOps hppa_tr_ops = {
4293 .init_disas_context = hppa_tr_init_disas_context,
4294 .tb_start = hppa_tr_tb_start,
4295 .insn_start = hppa_tr_insn_start,
4296 .breakpoint_check = hppa_tr_breakpoint_check,
4297 .translate_insn = hppa_tr_translate_insn,
4298 .tb_stop = hppa_tr_tb_stop,
4299 .disas_log = hppa_tr_disas_log,
4300};
4301
4302void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
4303
4304{
4305 DisasContext ctx;
4306 translator_loop(&hppa_tr_ops, &ctx.base, cs, tb);
61766fe9
RH
4307}
4308
4309void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
4310 target_ulong *data)
4311{
4312 env->iaoq_f = data[0];
86f8d05f 4313 if (data[1] != (target_ureg)-1) {
61766fe9
RH
4314 env->iaoq_b = data[1];
4315 }
4316 /* Since we were executing the instruction at IAOQ_F, and took some
4317 sort of action that provoked the cpu_restore_state, we can infer
4318 that the instruction was not nullified. */
4319 env->psw_n = 0;
4320}
This page took 0.730709 seconds and 4 git commands to generate.