#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
-#if UINTPTR_MAX == UINT32_MAX
-# define tcg_gen_trunc_reg_ptr(p, r) \
- tcg_gen_trunc_i64_i32(TCGV_PTR_TO_NAT(p), r)
-#else
-# define tcg_gen_trunc_reg_ptr(p, r) \
- tcg_gen_mov_i64(TCGV_PTR_TO_NAT(p), r)
-#endif
+#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
#else
#define TCGv_reg TCGv_i32
#define tcg_temp_new tcg_temp_new_i32
#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
-#if UINTPTR_MAX == UINT32_MAX
-# define tcg_gen_trunc_reg_ptr(p, r) \
- tcg_gen_mov_i32(TCGV_PTR_TO_NAT(p), r)
-#else
-# define tcg_gen_trunc_reg_ptr(p, r) \
- tcg_gen_extu_i32_i64(TCGV_PTR_TO_NAT(p), r)
-#endif
+#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
#endif /* TARGET_REGISTER_BITS */
typedef struct DisasCond {
target_ureg iaoq_n;
TCGv_reg iaoq_n_var;
- int ntemps;
- TCGv_reg temps[8];
+ int ntempr, ntempl;
+ TCGv_reg tempr[8];
+ TCGv_tl templ[4];
DisasCond null_cond;
TCGLabel *null_lab;
uint32_t insn;
+ uint32_t tb_flags;
int mmu_idx;
int privilege;
bool psw_n_nonzero;
/* global register indexes */
static TCGv_reg cpu_gr[32];
static TCGv_i64 cpu_sr[4];
+static TCGv_i64 cpu_srH;
static TCGv_reg cpu_iaoq_f;
static TCGv_reg cpu_iaoq_b;
+static TCGv_i64 cpu_iasq_f;
+static TCGv_i64 cpu_iasq_b;
static TCGv_reg cpu_sar;
static TCGv_reg cpu_psw_n;
static TCGv_reg cpu_psw_v;
"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
};
/* SR[4-7] are not global registers so that we can index them. */
- static const char sr_names[4][4] = {
- "sr0", "sr1", "sr2", "sr3"
+ static const char sr_names[5][4] = {
+ "sr0", "sr1", "sr2", "sr3", "srH"
};
int i;
offsetof(CPUHPPAState, sr[i]),
sr_names[i]);
}
+ cpu_srH = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, sr[4]),
+ sr_names[4]);
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
const GlobalVar *v = &vars[i];
*v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
}
+
+ cpu_iasq_f = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_f),
+ "iasq_f");
+ cpu_iasq_b = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUHPPAState, iasq_b),
+ "iasq_b");
}
static DisasCond cond_make_f(void)
static TCGv_reg get_temp(DisasContext *ctx)
{
- unsigned i = ctx->ntemps++;
- g_assert(i < ARRAY_SIZE(ctx->temps));
- return ctx->temps[i] = tcg_temp_new();
+ unsigned i = ctx->ntempr++;
+ g_assert(i < ARRAY_SIZE(ctx->tempr));
+ return ctx->tempr[i] = tcg_temp_new();
}
+#ifndef CONFIG_USER_ONLY
+static TCGv_tl get_temp_tl(DisasContext *ctx)
+{
+ unsigned i = ctx->ntempl++;
+ g_assert(i < ARRAY_SIZE(ctx->templ));
+ return ctx->templ[i] = tcg_temp_new_tl();
+}
+#endif
+
static TCGv_reg load_const(DisasContext *ctx, target_sreg v)
{
TCGv_reg t = get_temp(ctx);
#else
if (reg < 4) {
tcg_gen_mov_i64(dest, cpu_sr[reg]);
+ } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ tcg_gen_mov_i64(dest, cpu_srH);
} else {
tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUHPPAState, sr[reg]));
}
return DISAS_NEXT;
}
+#ifndef CONFIG_USER_ONLY
+/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
+ from the top 2 bits of the base register. There are a few system
+ instructions that have a 3-bit space specifier, for which SR0 is
+ not special. To handle this, pass ~SP. */
+static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
+{
+ TCGv_ptr ptr;
+ TCGv_reg tmp;
+ TCGv_i64 spc;
+
+ if (sp != 0) {
+ if (sp < 0) {
+ sp = ~sp;
+ }
+ spc = get_temp_tl(ctx);
+ load_spr(ctx, spc, sp);
+ return spc;
+ }
+ if (ctx->tb_flags & TB_FLAG_SR_SAME) {
+ return cpu_srH;
+ }
+
+ ptr = tcg_temp_new_ptr();
+ tmp = tcg_temp_new();
+ spc = get_temp_tl(ctx);
+
+ tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
+ tcg_gen_andi_reg(tmp, tmp, 030);
+ tcg_gen_trunc_reg_ptr(ptr, tmp);
+ tcg_temp_free(tmp);
+
+ tcg_gen_add_ptr(ptr, ptr, cpu_env);
+ tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
+ tcg_temp_free_ptr(ptr);
+
+ return spc;
+}
+#endif
+
+static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
+ unsigned rb, unsigned rx, int scale, target_sreg disp,
+ unsigned sp, int modify, bool is_phys)
+{
+ TCGv_reg base = load_gpr(ctx, rb);
+ TCGv_reg ofs;
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ ofs = get_temp(ctx);
+ tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
+ tcg_gen_add_reg(ofs, ofs, base);
+ } else if (disp || modify) {
+ ofs = get_temp(ctx);
+ tcg_gen_addi_reg(ofs, base, disp);
+ } else {
+ ofs = base;
+ }
+
+ *pofs = ofs;
+#ifdef CONFIG_USER_ONLY
+ *pgva = (modify <= 0 ? ofs : base);
+#else
+ TCGv_tl addr = get_temp_tl(ctx);
+ tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
+ if (ctx->tb_flags & PSW_W) {
+ tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
+ }
+ if (!is_phys) {
+ tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
+ }
+ *pgva = addr;
+#endif
+}
+
/* Emit a memory load. The modify parameter should be
* < 0 for pre-modify,
* > 0 for post-modify,
*/
static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify, TCGMemOp mop)
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv_reg addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
- tcg_gen_add_reg(addr, addr, base);
- } else {
- tcg_gen_addi_reg(addr, base, disp);
- }
-
- if (modify == 0) {
- tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop);
- } else {
- tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
- ctx->mmu_idx, mop);
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify, TCGMemOp mop)
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv_reg addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
- tcg_gen_add_reg(addr, addr, base);
- } else {
- tcg_gen_addi_reg(addr, base, disp);
- }
-
- if (modify == 0) {
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
- } else {
- tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
- ctx->mmu_idx, mop);
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify, TCGMemOp mop)
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv_reg addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
- tcg_gen_add_reg(addr, addr, base);
- } else {
- tcg_gen_addi_reg(addr, base, disp);
- }
-
- tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), ctx->mmu_idx, mop);
-
- if (modify != 0) {
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify, TCGMemOp mop)
+ unsigned sp, int modify, TCGMemOp mop)
{
- TCGv_reg addr, base;
+ TCGv_reg ofs;
+ TCGv_tl addr;
/* Caller uses nullify_over/nullify_end. */
assert(ctx->null_cond.c == TCG_COND_NEVER);
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
-
- /* Note that RX is mutually exclusive with DISP. */
- if (rx) {
- tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
- tcg_gen_add_reg(addr, addr, base);
- } else {
- tcg_gen_addi_reg(addr, base, disp);
- }
-
- tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), ctx->mmu_idx, mop);
-
- if (modify != 0) {
- save_gpr(ctx, rb, addr);
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
+ if (modify) {
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
}
#if TARGET_REGISTER_BITS == 64
static DisasJumpType do_load(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify, TCGMemOp mop)
+ unsigned sp, int modify, TCGMemOp mop)
{
TCGv_reg dest;
/* Make sure if RT == RB, we see the result of the load. */
dest = get_temp(ctx);
}
- do_load_reg(ctx, dest, rb, rx, scale, disp, modify, mop);
+ do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
save_gpr(ctx, rt, dest);
return nullify_end(ctx, DISAS_NEXT);
static DisasJumpType do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify)
+ unsigned sp, int modify)
{
TCGv_i32 tmp;
nullify_over(ctx);
tmp = tcg_temp_new_i32();
- do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
save_frw_i32(rt, tmp);
tcg_temp_free_i32(tmp);
static DisasJumpType do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify)
+ unsigned sp, int modify)
{
TCGv_i64 tmp;
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
save_frd(rt, tmp);
tcg_temp_free_i64(tmp);
}
static DisasJumpType do_store(DisasContext *ctx, unsigned rt, unsigned rb,
- target_sreg disp, int modify, TCGMemOp mop)
+ target_sreg disp, unsigned sp,
+ int modify, TCGMemOp mop)
{
nullify_over(ctx);
- do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
+ do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
return nullify_end(ctx, DISAS_NEXT);
}
static DisasJumpType do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify)
+ unsigned sp, int modify)
{
TCGv_i32 tmp;
nullify_over(ctx);
tmp = load_frw_i32(rt);
- do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
tcg_temp_free_i32(tmp);
return nullify_end(ctx, DISAS_NEXT);
static DisasJumpType do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
unsigned rx, int scale, target_sreg disp,
- int modify)
+ unsigned sp, int modify)
{
TCGv_i64 tmp;
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
tcg_temp_free_i64(tmp);
return nullify_end(ctx, DISAS_NEXT);
ctx->null_lab = NULL;
}
nullify_set(ctx, n);
+ if (ctx->iaoq_n == -1) {
+ /* The temporary iaoq_n_var died at the branch above.
+ Regenerate it here instead of saving it. */
+ tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
+ }
gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
}
}
next = get_temp(ctx);
tcg_gen_mov_reg(next, dest);
- ctx->iaoq_n = -1;
- ctx->iaoq_n_var = next;
if (is_n) {
+ if (use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, next);
+ tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
+ nullify_set(ctx, 0);
+ return DISAS_IAQ_N_UPDATED;
+ }
ctx->null_cond.c = TCG_COND_ALWAYS;
}
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
} else if (is_n && use_nullify_skip(ctx)) {
/* The (conditional) branch, B, nullifies the next insn, N,
and we're allowed to skip execution N (no single-step or
return DISAS_NEXT;
}
+/* Implement
+ * if (IAOQ_Front{30..31} < GR[b]{30..31})
+ * IAOQ_Next{30..31} ← GR[b]{30..31};
+ * else
+ * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
+ * which keeps the privilege level from being increased.
+ */
+static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
+{
+ TCGv_reg dest;
+ switch (ctx->privilege) {
+ case 0:
+ /* Privilege 0 is maximum and is allowed to decrease. */
+ return offset;
+ case 3:
+ /* Privilege 3 is minimum and is never allowed increase. */
+ dest = get_temp(ctx);
+ tcg_gen_ori_reg(dest, offset, 3);
+ break;
+ default:
+ dest = tcg_temp_new();
+ tcg_gen_andi_reg(dest, offset, -4);
+ tcg_gen_ori_reg(dest, dest, ctx->privilege);
+ tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
+ tcg_temp_free(dest);
+ break;
+ }
+ return dest;
+}
+
#ifdef CONFIG_USER_ONLY
/* On Linux, page zero is normally marked execute only + gateway.
Therefore normal read or write is supposed to fail, but specific
goto do_sigill;
}
- switch (ctx->iaoq_f) {
+ switch (ctx->iaoq_f & -4) {
case 0x00: /* Null pointer call */
gen_excp_1(EXCP_IMP);
return DISAS_NORETURN;
case 0xe0: /* SET_THREAD_POINTER */
tcg_gen_st_reg(cpu_gr[26], cpu_env, offsetof(CPUHPPAState, cr[27]));
- tcg_gen_mov_reg(cpu_iaoq_f, cpu_gr[31]);
+ tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
return DISAS_IAQ_N_UPDATED;
unsigned rt = extract32(insn, 0, 5);
unsigned ctl = extract32(insn, 21, 5);
TCGv_reg tmp;
+ DisasJumpType ret;
switch (ctl) {
case CR_SAR:
/* FIXME: Respect PSW_S bit. */
nullify_over(ctx);
tmp = dest_gpr(ctx, rt);
- tcg_gen_movi_reg(tmp, 0); /* FIXME */
+ if (ctx->base.tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ gen_helper_read_interval_timer(tmp);
+ gen_io_end();
+ ret = DISAS_IAQ_N_STALE;
+ } else {
+ gen_helper_read_interval_timer(tmp);
+ ret = DISAS_NEXT;
+ }
save_gpr(ctx, rt, tmp);
- break;
+ return nullify_end(ctx, ret);
case 26:
case 27:
break;
if (rs >= 4) {
tcg_gen_st_i64(t64, cpu_env, offsetof(CPUHPPAState, sr[rs]));
+ ctx->tb_flags &= ~TB_FLAG_SR_SAME;
} else {
tcg_gen_mov_i64(cpu_sr[rs], t64);
}
/* All other control registers are privileged or read-only. */
CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
+ DisasJumpType ret = DISAS_NEXT;
+
nullify_over(ctx);
switch (ctl) {
case CR_IT:
- /* ??? modify interval timer offset */
+ gen_helper_write_interval_timer(cpu_env, reg);
+ break;
+ case CR_EIRR:
+ gen_helper_write_eirr(cpu_env, reg);
+ break;
+ case CR_EIEM:
+ gen_helper_write_eiem(cpu_env, reg);
+ ret = DISAS_IAQ_N_STALE_EXIT;
break;
case CR_IIASQ:
tcg_gen_st_reg(reg, cpu_env, offsetof(CPUHPPAState, cr[ctl]));
break;
}
- return nullify_end(ctx, DISAS_NEXT);
+ return nullify_end(ctx, ret);
+#endif
}
static DisasJumpType trans_mtsarcm(DisasContext *ctx, uint32_t insn,
unsigned rt = extract32(insn, 0, 5);
TCGv_reg dest = dest_gpr(ctx, rt);
- /* Since we don't implement space registers, this returns zero. */
+#ifdef CONFIG_USER_ONLY
+ /* We don't implement space registers in user mode. */
tcg_gen_movi_reg(dest, 0);
+#else
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned sp = extract32(insn, 14, 2);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, space_select(ctx, sp, load_gpr(ctx, rb)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_trunc_i64_reg(dest, t0);
+
+ tcg_temp_free_i64(t0);
+#endif
save_gpr(ctx, rt, dest);
cond_free(&ctx->null_cond);
/* Exit the TB to recognize new interrupts. */
return nullify_end(ctx, DISAS_NORETURN);
}
+
+static DisasJumpType gen_hlt(DisasContext *ctx, int reset)
+{
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+ if (reset) {
+ gen_helper_reset(cpu_env);
+ } else {
+ gen_helper_halt(cpu_env);
+ }
+ return nullify_end(ctx, DISAS_NORETURN);
+}
#endif /* !CONFIG_USER_ONLY */
static const DisasInsn table_system[] = {
{ 0x000014a0u, 0xffffffe0u, trans_mfia },
{ 0x000004a0u, 0xffff1fe0u, trans_mfsp },
{ 0x000008a0u, 0xfc1fbfe0u, trans_mfctl },
- { 0x00000400u, 0xffffffffu, trans_sync },
+ { 0x00000400u, 0xffffffffu, trans_sync }, /* sync */
+ { 0x00100400u, 0xffffffffu, trans_sync }, /* syncdma */
{ 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
#ifndef CONFIG_USER_ONLY
{ 0x00000e60u, 0xfc00ffe0u, trans_rsm },
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
+ unsigned sp = extract32(insn, 14, 2);
+ unsigned rr = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
unsigned is_write = extract32(insn, 6, 1);
- TCGv_reg dest;
+ unsigned is_imm = extract32(insn, 13, 1);
+ TCGv_reg dest, ofs;
+ TCGv_i32 level, want;
+ TCGv_tl addr;
nullify_over(ctx);
- /* ??? Do something with priv level operand. */
dest = dest_gpr(ctx, rt);
- if (is_write) {
- gen_helper_probe_w(dest, load_gpr(ctx, rb));
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
+
+ if (is_imm) {
+ level = tcg_const_i32(extract32(insn, 16, 2));
} else {
- gen_helper_probe_r(dest, load_gpr(ctx, rb));
+ level = tcg_temp_new_i32();
+ tcg_gen_trunc_reg_i32(level, load_gpr(ctx, rr));
+ tcg_gen_andi_i32(level, level, 3);
}
+ want = tcg_const_i32(is_write ? PAGE_WRITE : PAGE_READ);
+
+ gen_helper_probe(dest, cpu_env, addr, level, want);
+
+ tcg_temp_free_i32(want);
+ tcg_temp_free_i32(level);
+
save_gpr(ctx, rt, dest);
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned sp;
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_addr = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs, reg;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
+ reg = load_gpr(ctx, rr);
+ if (is_addr) {
+ gen_helper_itlba(cpu_env, addr, reg);
+ } else {
+ gen_helper_itlbp(cpu_env, addr, reg);
+ }
+
+ /* Exit TB for ITLB change if mmu is enabled. This *should* not be
+ the case, since the OS TLB fill handler runs with mmu disabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp;
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_local = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ if (is_local) {
+ gen_helper_ptlbe(cpu_env);
+ } else {
+ gen_helper_ptlb(cpu_env, addr);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp = extract32(insn, 14, 2);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGv_tl vaddr;
+ TCGv_reg ofs, paddr;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
+
+ paddr = tcg_temp_new();
+ gen_helper_lpa(paddr, cpu_env, vaddr);
+
+ /* Note that physical address result overrides base modification. */
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ save_gpr(ctx, rt, paddr);
+ tcg_temp_free(paddr);
+
+ return nullify_end(ctx, DISAS_NEXT);
+}
+
+static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv_reg ci;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* The Coherence Index is an implementation-defined function of the
+ physical address. Two addresses with the same CI have a coherent
+ view of the cache. Our implementation is to return 0 for all,
+ since the entire address space is coherent. */
+ ci = tcg_const_reg(0);
+ save_gpr(ctx, rt, ci);
+ tcg_temp_free(ci);
+
+ return DISAS_NEXT;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const DisasInsn table_mem_mgmt[] = {
{ 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
{ 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
{ 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
{ 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
{ 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
+#ifndef CONFIG_USER_ONLY
+ { 0x04000000u, 0xfc001fffu, trans_ixtlbx }, /* iitlbp */
+ { 0x04000040u, 0xfc001fffu, trans_ixtlbx }, /* iitlba */
+ { 0x04001000u, 0xfc001fffu, trans_ixtlbx }, /* idtlbp */
+ { 0x04001040u, 0xfc001fffu, trans_ixtlbx }, /* idtlba */
+ { 0x04000200u, 0xfc001fdfu, trans_pxtlbx }, /* pitlb */
+ { 0x04000240u, 0xfc001fdfu, trans_pxtlbx }, /* pitlbe */
+ { 0x04001200u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlb */
+ { 0x04001240u, 0xfc001fdfu, trans_pxtlbx }, /* pdtlbe */
+ { 0x04001340u, 0xfc003fc0u, trans_lpa },
+ { 0x04001300u, 0xfc003fe0u, trans_lci },
+#endif
};
static DisasJumpType trans_add(DisasContext *ctx, uint32_t insn,
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+/* These are QEMU extensions and are nops in the real architecture:
+ *
+ * or %r10,%r10,%r10 -- idle loop; wait for interrupt
+ * or %r31,%r31,%r31 -- death loop; offline cpu
+ * currently implemented as idle.
+ */
+static DisasJumpType trans_pause(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ TCGv_i32 tmp;
+
+ /* No need to check for supervisor, as userland can only pause
+ until the next timer interrupt. */
+ nullify_over(ctx);
+
+ /* Advance the instruction queue. */
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+ nullify_set(ctx, 0);
+
+ /* Tell the qemu main loop to halt until this cpu has work. */
+ tmp = tcg_const_i32(1);
+ tcg_gen_st_i32(tmp, cpu_env, -offsetof(HPPACPU, env) +
+ offsetof(CPUState, halted));
+ tcg_temp_free_i32(tmp);
+ gen_excp_1(EXCP_HALTED);
+
+ return nullify_end(ctx, DISAS_NORETURN);
+}
+#endif
+
static const DisasInsn table_arith_log[] = {
{ 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
{ 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
+#ifndef CONFIG_USER_ONLY
+ { 0x094a024au, 0xffffffffu, trans_pause }, /* or r10,r10,r10 */
+ { 0x0bff025fu, 0xffffffffu, trans_pause }, /* or r31,r31,r31 */
+#endif
{ 0x08000000u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_andc_reg },
{ 0x08000200u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_and_reg },
{ 0x08000240u, 0xfc000fe0u, trans_log, .f.ttt = tcg_gen_or_reg },
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
int disp = low_sextract(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (a ? -1 : 1) : 0);
TCGMemOp mop = MO_TE | sz;
- return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
+ return do_load(ctx, rt, rb, 0, 0, disp, sp, modify, mop);
}
static DisasJumpType trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned u = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
TCGMemOp mop = MO_TE | sz;
- return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
+ return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, sp, m, mop);
}
static DisasJumpType trans_st_idx_i(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned sz = extract32(insn, 6, 2);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rr = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (a ? -1 : 1) : 0);
TCGMemOp mop = MO_TE | sz;
- return do_store(ctx, rr, rb, disp, modify, mop);
+ return do_store(ctx, rr, rb, disp, sp, modify, mop);
}
static DisasJumpType trans_ldcw(DisasContext *ctx, uint32_t insn,
unsigned m = extract32(insn, 5, 1);
unsigned i = extract32(insn, 12, 1);
unsigned au = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
- TCGv_reg zero, addr, base, dest;
+ TCGv_reg zero, dest, ofs;
+ TCGv_tl addr;
int modify, disp = 0, scale = 0;
nullify_over(ctx);
- /* ??? Share more code with do_load and do_load_{32,64}. */
-
if (i) {
modify = (m ? (au ? -1 : 1) : 0);
disp = low_sextract(rx, 0, 5);
}
}
if (modify) {
- /* Base register modification. Make sure if RT == RB, we see
- the result of the load. */
+ /* Base register modification. Make sure if RT == RB,
+ we see the result of the load. */
dest = get_temp(ctx);
} else {
dest = dest_gpr(ctx, rt);
}
- addr = tcg_temp_new();
- base = load_gpr(ctx, rb);
- if (rx) {
- tcg_gen_shli_reg(addr, cpu_gr[rx], scale);
- tcg_gen_add_reg(addr, addr, base);
- } else {
- tcg_gen_addi_reg(addr, base, disp);
- }
-
+ form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
+ ctx->mmu_idx == MMU_PHYS_IDX);
zero = tcg_const_reg(0);
- tcg_gen_atomic_xchg_reg(dest, (modify <= 0 ? addr : base),
- zero, ctx->mmu_idx, mop);
+ tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
if (modify) {
- save_gpr(ctx, rb, addr);
+ save_gpr(ctx, rb, ofs);
}
save_gpr(ctx, rt, dest);
target_sreg disp = low_sextract(insn, 0, 5);
unsigned m = extract32(insn, 5, 1);
unsigned a = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rt = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
- TCGv_reg addr, val;
+ TCGv_reg ofs, val;
+ TCGv_tl addr;
nullify_over(ctx);
- addr = tcg_temp_new();
- if (m || disp == 0) {
- tcg_gen_mov_reg(addr, load_gpr(ctx, rb));
- } else {
- tcg_gen_addi_reg(addr, load_gpr(ctx, rb), disp);
- }
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, disp, sp, m,
+ ctx->mmu_idx == MMU_PHYS_IDX);
val = load_gpr(ctx, rt);
-
if (a) {
if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
gen_helper_stby_e_parallel(cpu_env, addr, val);
}
if (m) {
- tcg_gen_addi_reg(addr, addr, disp);
- tcg_gen_andi_reg(addr, addr, ~3);
- save_gpr(ctx, rb, addr);
+ tcg_gen_andi_reg(ofs, ofs, ~3);
+ save_gpr(ctx, rb, ofs);
}
- tcg_temp_free(addr);
return nullify_end(ctx, DISAS_NEXT);
}
+#ifndef CONFIG_USER_ONLY
+static DisasJumpType trans_ldwa_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_ld_idx_i(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+
+static DisasJumpType trans_ldwa_idx_x(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_ld_idx_x(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+
+static DisasJumpType trans_stwa_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int hold_mmu_idx = ctx->mmu_idx;
+ DisasJumpType ret;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* ??? needs fixing for hppa64 -- ldda does not follow the same
+ format wrt the sub-opcode in bits 6:9. */
+ ctx->mmu_idx = MMU_PHYS_IDX;
+ ret = trans_st_idx_i(ctx, insn, di);
+ ctx->mmu_idx = hold_mmu_idx;
+ return ret;
+}
+#endif
+
static const DisasInsn table_index_mem[] = {
{ 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
{ 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
{ 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
{ 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
{ 0x0c001300u, 0xfc0013c0, trans_stby },
+#ifndef CONFIG_USER_ONLY
+ { 0x0c000180u, 0xfc00d3c0, trans_ldwa_idx_x }, /* LDWA, rx */
+ { 0x0c001180u, 0xfc00d3c0, trans_ldwa_idx_i }, /* LDWA, im */
+ { 0x0c001380u, 0xfc00d3c0, trans_stwa_idx_i }, /* STWA, im */
+#endif
};
static DisasJumpType trans_ldil(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
+ unsigned sp = extract32(insn, 14, 2);
target_sreg i = assemble_16(insn);
- return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+ return do_load(ctx, rt, rb, 0, 0, i, sp,
+ is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
static DisasJumpType trans_load_w(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
+ unsigned sp = extract32(insn, 14, 2);
target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
case 0:
case 1:
/* FLDW without modification. */
- return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
case 2:
/* LDW with modification. Note that the sign of I selects
post-dec vs pre-inc. */
- return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
+ return do_load(ctx, rt, rb, 0, 0, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
default:
return gen_illegal(ctx);
}
target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned t0 = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
/* FLDW with modification. */
- return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+ return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
}
static DisasJumpType trans_store(DisasContext *ctx, uint32_t insn,
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
+ unsigned sp = extract32(insn, 14, 2);
target_sreg i = assemble_16(insn);
- return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+ return do_store(ctx, rt, rb, i, sp, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
}
static DisasJumpType trans_store_w(DisasContext *ctx, uint32_t insn)
{
unsigned rb = extract32(insn, 21, 5);
unsigned rt = extract32(insn, 16, 5);
+ unsigned sp = extract32(insn, 14, 2);
target_sreg i = assemble_16a(insn);
unsigned ext2 = extract32(insn, 1, 2);
case 0:
case 1:
/* FSTW without modification. */
- return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, sp, 0);
case 2:
- /* LDW with modification. */
- return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
+ /* STW with modification. */
+ return do_store(ctx, rt, rb, i, sp, (i < 0 ? 1 : -1), MO_TEUL);
default:
return gen_illegal(ctx);
}
target_sreg i = assemble_16a(insn);
unsigned t1 = extract32(insn, 1, 1);
unsigned a = extract32(insn, 2, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned t0 = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
/* FSTW with modification. */
- return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+ return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, sp, (a ? -1 : 1));
}
static DisasJumpType trans_copr_w(DisasContext *ctx, uint32_t insn)
/* unsigned cc = extract32(insn, 10, 2); */
unsigned i = extract32(insn, 12, 1);
unsigned ua = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
unsigned rt = t1 * 32 + t0;
switch (ext3) {
case 0: /* FLDW */
- return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
+ return do_floadw(ctx, rt, rb, rx, scale, disp, sp, modify);
case 4: /* FSTW */
- return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
+ return do_fstorew(ctx, rt, rb, rx, scale, disp, sp, modify);
}
return gen_illegal(ctx);
}
/* unsigned cc = extract32(insn, 10, 2); */
unsigned i = extract32(insn, 12, 1);
unsigned ua = extract32(insn, 13, 1);
+ unsigned sp = extract32(insn, 14, 2);
unsigned rx = extract32(insn, 16, 5);
unsigned rb = extract32(insn, 21, 5);
int modify = (m ? (ua ? -1 : 1) : 0);
switch (ext4) {
case 0: /* FLDD */
- return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
+ return do_floadd(ctx, rt, rb, rx, scale, disp, sp, modify);
case 8: /* FSTD */
- return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
+ return do_fstored(ctx, rt, rb, rx, scale, disp, sp, modify);
default:
return gen_illegal(ctx);
}
unsigned n = extract32(insn, 1, 1);
unsigned b = extract32(insn, 21, 5);
target_sreg disp = assemble_17(insn);
+ TCGv_reg tmp;
- /* unsigned s = low_uextract(insn, 13, 3); */
+#ifdef CONFIG_USER_ONLY
/* ??? It seems like there should be a good way of using
"be disp(sr2, r0)", the canonical gateway entry mechanism
to our advantage. But that appears to be inconvenient to
manage along side branch delay slots. Therefore we handle
entry into the gateway page via absolute address. */
-
/* Since we don't implement spaces, just branch. Do notice the special
case of "be disp(*,r0)" using a direct branch to disp, so that we can
goto_tb to the TB containing the syscall. */
if (b == 0) {
return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
+ }
+#else
+ int sp = assemble_sr3(insn);
+ nullify_over(ctx);
+#endif
+
+ tmp = get_temp(ctx);
+ tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
+ tmp = do_ibranch_priv(ctx, tmp);
+
+#ifdef CONFIG_USER_ONLY
+ return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
+#else
+ TCGv_i64 new_spc = tcg_temp_new_i64();
+
+ load_spr(ctx, new_spc, sp);
+ if (is_l) {
+ copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
+ }
+ if (n && use_nullify_skip(ctx)) {
+ tcg_gen_mov_reg(cpu_iaoq_f, tmp);
+ tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
+ tcg_gen_mov_i64(cpu_iasq_f, new_spc);
+ tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
} else {
- TCGv_reg tmp = get_temp(ctx);
- tcg_gen_addi_reg(tmp, load_gpr(ctx, b), disp);
- return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ tcg_gen_mov_reg(cpu_iaoq_b, tmp);
+ tcg_gen_mov_i64(cpu_iasq_b, new_spc);
+ nullify_set(ctx, n);
}
+ tcg_temp_free_i64(new_spc);
+ tcg_gen_lookup_and_goto_ptr();
+ return nullify_end(ctx, DISAS_NORETURN);
+#endif
}
static DisasJumpType trans_bl(DisasContext *ctx, uint32_t insn,
return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
}
+static DisasJumpType trans_b_gate(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned link = extract32(insn, 21, 5);
+ target_sreg disp = assemble_17(insn);
+ target_ureg dest = iaoq_dest(ctx, disp);
+
+ /* Make sure the caller hasn't done something weird with the queue.
+ * ??? This is not quite the same as the PSW[B] bit, which would be
+ * expensive to track. Real hardware will trap for
+ * b gateway
+ * b gateway+4 (in delay slot of first branch)
+ * However, checking for a non-sequential instruction queue *will*
+ * diagnose the security hole
+ * b gateway
+ * b evil
+ * in which instructions at evil would run with increased privs.
+ */
+ if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
+ return gen_illegal(ctx);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (ctx->tb_flags & PSW_C) {
+ CPUHPPAState *env = ctx->cs->env_ptr;
+ int type = hppa_artype_for_page(env, ctx->base.pc_next);
+ /* If we could not find a TLB entry, then we need to generate an
+ ITLB miss exception so the kernel will provide it.
+ The resulting TLB fill operation will invalidate this TB and
+ we will re-translate, at which point we *will* be able to find
+ the TLB entry and determine if this is in fact a gateway page. */
+ if (type < 0) {
+ return gen_excp(ctx, EXCP_ITLB_MISS);
+ }
+ /* No change for non-gateway pages or for priv decrease. */
+ if (type >= 4 && type - 4 < ctx->privilege) {
+ dest = deposit32(dest, 0, 2, type - 4);
+ }
+ } else {
+ dest &= -4; /* priv = 0 */
+ }
+#endif
+
+ return do_dbranch(ctx, dest, link, n);
+}
+
static DisasJumpType trans_bl_long(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
tcg_gen_shli_reg(tmp, load_gpr(ctx, rx), 3);
tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
+ /* The computation here never changes privilege level. */
return do_ibranch(ctx, tmp, link, n);
}
tcg_gen_shli_reg(dest, load_gpr(ctx, rx), 3);
tcg_gen_add_reg(dest, dest, load_gpr(ctx, rb));
}
+ dest = do_ibranch_priv(ctx, dest);
return do_ibranch(ctx, dest, 0, n);
}
unsigned n = extract32(insn, 1, 1);
unsigned rb = extract32(insn, 21, 5);
unsigned link = extract32(insn, 13, 1) ? 2 : 0;
+ TCGv_reg dest;
- return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
+#ifdef CONFIG_USER_ONLY
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
+ return do_ibranch(ctx, dest, link, n);
+#else
+ nullify_over(ctx);
+ dest = do_ibranch_priv(ctx, load_gpr(ctx, rb));
+
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
+ if (ctx->iaoq_b == -1) {
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+ }
+ copy_iaoq_entry(cpu_iaoq_b, -1, dest);
+ tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
+ if (link) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ nullify_set(ctx, n);
+ tcg_gen_lookup_and_goto_ptr();
+ return nullify_end(ctx, DISAS_NORETURN);
+#endif
}
static const DisasInsn table_branch[] = {
{ 0xe8004000u, 0xfc00fffdu, trans_blr },
{ 0xe800c000u, 0xfc00fffdu, trans_bv },
{ 0xe800d000u, 0xfc00dffcu, trans_bve },
+ { 0xe8002000u, 0xfc00e000u, trans_b_gate },
};
static DisasJumpType trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
/* floating point class one */
/* float/float */
{ 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
- { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
+ { 0x38002200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_d },
/* int/float */
- { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
+ { 0x38008200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_w_s },
{ 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
{ 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
{ 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
/* float/int */
- { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
+ { 0x38010200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_w },
{ 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
{ 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
{ 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
/* float/int truncate */
- { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
+ { 0x38018200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_w },
{ 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
{ 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
{ 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
/* uint/float */
- { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
+ { 0x38028200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_uw_s },
{ 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
{ 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
{ 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
/* float/uint */
- { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
+ { 0x38030200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_s_uw },
{ 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
{ 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
{ 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
/* float/uint truncate */
- { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
+ { 0x38038200, 0xfc1ffe20, FOP_WEW = gen_helper_fcnv_t_s_uw },
{ 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
{ 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
{ 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
return table[i].trans(ctx, insn, &table[i]);
}
}
+ qemu_log_mask(LOG_UNIMP, "UNIMP insn %08x @ " TARGET_FMT_lx "\n",
+ insn, ctx->base.pc_next);
return gen_illegal(ctx);
}
case 0x15: /* unassigned */
case 0x1D: /* unassigned */
case 0x37: /* unassigned */
- case 0x3F: /* unassigned */
+ break;
+ case 0x3F:
+#ifndef CONFIG_USER_ONLY
+ /* Unassigned, but use as system-halt. */
+ if (insn == 0xfffdead0) {
+ return gen_hlt(ctx, 0); /* halt system */
+ }
+ if (insn == 0xfffdead1) {
+ return gen_hlt(ctx, 1); /* reset system */
+ }
+#endif
+ break;
default:
break;
}
int bound;
ctx->cs = cs;
+ ctx->tb_flags = ctx->base.tb->flags;
#ifdef CONFIG_USER_ONLY
ctx->privilege = MMU_USER_IDX;
ctx->mmu_idx = MMU_USER_IDX;
+ ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
+ ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
#else
- ctx->privilege = ctx->base.pc_first & 3;
- ctx->mmu_idx = (ctx->base.tb->flags & PSW_D
- ? ctx->privilege : MMU_PHYS_IDX);
-#endif
- ctx->iaoq_f = ctx->base.pc_first;
- ctx->iaoq_b = ctx->base.tb->cs_base;
- ctx->base.pc_first &= -4;
+ ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
+ ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
+
+ /* Recover the IAOQ values from the GVA + PRIV. */
+ uint64_t cs_base = ctx->base.tb->cs_base;
+ uint64_t iasq_f = cs_base & ~0xffffffffull;
+ int32_t diff = cs_base;
+ ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
+ ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
+#endif
ctx->iaoq_n = -1;
ctx->iaoq_n_var = NULL;
bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
bound = MIN(max_insns, bound);
- ctx->ntemps = 0;
- memset(ctx->temps, 0, sizeof(ctx->temps));
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
+ memset(ctx->tempr, 0, sizeof(ctx->tempr));
+ memset(ctx->templ, 0, sizeof(ctx->templ));
return bound;
}
/* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
ctx->null_cond = cond_make_f();
ctx->psw_n_nonzero = false;
- if (ctx->base.tb->flags & PSW_N) {
+ if (ctx->tb_flags & PSW_N) {
ctx->null_cond.c = TCG_COND_ALWAYS;
ctx->psw_n_nonzero = true;
}
DisasContext *ctx = container_of(dcbase, DisasContext, base);
ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG);
- ctx->base.pc_next = (ctx->iaoq_f & -4) + 4;
+ ctx->base.pc_next += 4;
return true;
}
/* Execute one insn. */
#ifdef CONFIG_USER_ONLY
- if (ctx->iaoq_f < TARGET_PAGE_SIZE) {
+ if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
ret = do_page_zero(ctx);
assert(ret != DISAS_NEXT);
} else
{
/* Always fetch the insn, even if nullified, so that we check
the page permissions for execute. */
- uint32_t insn = cpu_ldl_code(env, ctx->iaoq_f & -4);
+ uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
/* Set up the IA queue for the next insn.
This will be overwritten by a branch. */
}
/* Free any temporaries allocated. */
- for (i = 0, n = ctx->ntemps; i < n; ++i) {
- tcg_temp_free(ctx->temps[i]);
- ctx->temps[i] = NULL;
+ for (i = 0, n = ctx->ntempr; i < n; ++i) {
+ tcg_temp_free(ctx->tempr[i]);
+ ctx->tempr[i] = NULL;
+ }
+ for (i = 0, n = ctx->ntempl; i < n; ++i) {
+ tcg_temp_free_tl(ctx->templ[i]);
+ ctx->templ[i] = NULL;
}
- ctx->ntemps = 0;
+ ctx->ntempr = 0;
+ ctx->ntempl = 0;
/* Advance the insn queue. Note that this check also detects
a priority change within the instruction queue. */
if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
- if (ctx->null_cond.c == TCG_COND_NEVER
- || ctx->null_cond.c == TCG_COND_ALWAYS) {
+ if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
+ && use_goto_tb(ctx, ctx->iaoq_b)
+ && (ctx->null_cond.c == TCG_COND_NEVER
+ || ctx->null_cond.c == TCG_COND_ALWAYS)) {
nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
ret = DISAS_NORETURN;
} else {
ret = DISAS_IAQ_N_STALE;
- }
+ }
}
ctx->iaoq_f = ctx->iaoq_b;
ctx->iaoq_b = ctx->iaoq_n;
ctx->base.is_jmp = ret;
+ ctx->base.pc_next += 4;
if (ret == DISAS_NORETURN || ret == DISAS_IAQ_N_UPDATED) {
return;
if (ctx->iaoq_f == -1) {
tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
+#endif
nullify_save(ctx);
ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
} else if (ctx->iaoq_b == -1) {
default:
g_assert_not_reached();
}
-
- /* We don't actually use this during normal translation,
- but we should interact with the generic main loop. */
- ctx->base.pc_next = ctx->base.pc_first + 4 * ctx->base.num_insns;
}
static void hppa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
{
- target_ureg pc = dcbase->pc_first;
+ target_ulong pc = dcbase->pc_first;
#ifdef CONFIG_USER_ONLY
switch (pc) {
target_ulong *data)
{
env->iaoq_f = data[0];
- if (data[1] != -1) {
+ if (data[1] != (target_ureg)-1) {
env->iaoq_b = data[1];
}
/* Since we were executing the instruction at IAOQ_F, and took some