+#ifndef CONFIG_USER_ONLY
+static DisasJumpType trans_ixtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned sp;
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_addr = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs, reg;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, 0, 0, 0, sp, 0, false);
+ reg = load_gpr(ctx, rr);
+ if (is_addr) {
+ gen_helper_itlba(cpu_env, addr, reg);
+ } else {
+ gen_helper_itlbp(cpu_env, addr, reg);
+ }
+
+ /* Exit TB for ITLB change if mmu is enabled. This *should* not be
+ the case, since the OS TLB fill handler runs with mmu disabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_pxtlbx(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp;
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_data = insn & 0x1000;
+ unsigned is_local = insn & 0x40;
+ TCGv_tl addr;
+ TCGv_reg ofs;
+
+ if (is_data) {
+ sp = extract32(insn, 14, 2);
+ } else {
+ sp = ~assemble_sr3(insn);
+ }
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &addr, &ofs, rb, rx, 0, 0, sp, m, false);
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ if (is_local) {
+ gen_helper_ptlbe(cpu_env);
+ } else {
+ gen_helper_ptlb(cpu_env, addr);
+ }
+
+ /* Exit TB for TLB change if mmu is enabled. */
+ return nullify_end(ctx, !is_data && (ctx->tb_flags & PSW_C)
+ ? DISAS_IAQ_N_STALE : DISAS_NEXT);
+}
+
+static DisasJumpType trans_lpa(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sp = extract32(insn, 14, 2);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGv_tl vaddr;
+ TCGv_reg ofs, paddr;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+ nullify_over(ctx);
+
+ form_gva(ctx, &vaddr, &ofs, rb, rx, 0, 0, sp, m, false);
+
+ paddr = tcg_temp_new();
+ gen_helper_lpa(paddr, cpu_env, vaddr);
+
+ /* Note that physical address result overrides base modification. */
+ if (m) {
+ save_gpr(ctx, rb, ofs);
+ }
+ save_gpr(ctx, rt, paddr);
+ tcg_temp_free(paddr);
+
+ return nullify_end(ctx, DISAS_NEXT);
+}
+
+static DisasJumpType trans_lci(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv_reg ci;
+
+ CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
+
+ /* The Coherence Index is an implementation-defined function of the
+ physical address. Two addresses with the same CI have a coherent
+ view of the cache. Our implementation is to return 0 for all,
+ since the entire address space is coherent. */
+ ci = tcg_const_reg(0);
+ save_gpr(ctx, rt, ci);
+ tcg_temp_free(ci);
+
+ return DISAS_NEXT;
+}
+#endif /* !CONFIG_USER_ONLY */
+