]> Git Repo - qemu.git/blobdiff - target-tricore/translate.c
Merge remote-tracking branch 'remotes/mjt/tags/pull-trivial-patches-2014-12-11' into...
[qemu.git] / target-tricore / translate.c
index 74db70d7588b58821e4e1a853fecd520fe64b8ed..65abf453f051e3868bb89be4d5e23624df6827b7 100644 (file)
@@ -115,6 +115,10 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f,
     tcg_temp_free_i32(helper_tmp);                                \
     } while (0)
 
+#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
+#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
+                           ((offset & 0x0fffff) << 1))
+
 /* Functions for load/save to/from memory */
 
 static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
@@ -135,6 +139,157 @@ static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
     tcg_temp_free(temp);
 }
 
+static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+    TCGv_i64 temp = tcg_temp_new_i64();
+
+    tcg_gen_concat_i32_i64(temp, rl, rh);
+    tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
+
+    tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+                                DisasContext *ctx)
+{
+    TCGv temp = tcg_temp_new();
+    tcg_gen_addi_tl(temp, base, con);
+    gen_st_2regs_64(rh, rl, temp, ctx);
+    tcg_temp_free(temp);
+}
+
+static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+    TCGv_i64 temp = tcg_temp_new_i64();
+
+    tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
+    /* write back to two 32 bit regs */
+    tcg_gen_extr_i64_i32(rl, rh, temp);
+
+    tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+                                DisasContext *ctx)
+{
+    TCGv temp = tcg_temp_new();
+    tcg_gen_addi_tl(temp, base, con);
+    gen_ld_2regs_64(rh, rl, temp, ctx);
+    tcg_temp_free(temp);
+}
+
+static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+                           TCGMemOp mop)
+{
+    TCGv temp = tcg_temp_new();
+    tcg_gen_addi_tl(temp, r2, off);
+    tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+    tcg_gen_mov_tl(r2, temp);
+    tcg_temp_free(temp);
+}
+
+static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+                           TCGMemOp mop)
+{
+    TCGv temp = tcg_temp_new();
+    tcg_gen_addi_tl(temp, r2, off);
+    tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+    tcg_gen_mov_tl(r2, temp);
+    tcg_temp_free(temp);
+}
+
+/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
+static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+
+    /* temp = (M(EA, word) */
+    tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+    /* temp = temp & ~E[a][63:32]) */
+    tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
+    /* temp2 = (E[a][31:0] & E[a][63:32]); */
+    tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
+    /* temp = temp | temp2; */
+    tcg_gen_or_tl(temp, temp, temp2);
+    /* M(EA, word) = temp; */
+    tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+/* tmp = M(EA, word);
+   M(EA, word) = D[a];
+   D[a] = tmp[31:0];*/
+static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
+{
+    TCGv temp = tcg_temp_new();
+
+    tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+    tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
+    tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+    tcg_temp_free(temp);
+}
+
+/* We generate loads and store to core special function register (csfr) through
+   the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
+   makros R, A and E, which allow read-only, all and endinit protected access.
+   These makros also specify in which ISA version the csfr was introduced. */
+#define R(ADDRESS, REG, FEATURE)                                         \
+    case ADDRESS:                                                        \
+        if (tricore_feature(env, FEATURE)) {                             \
+            tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
+        }                                                                \
+        break;
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
+{
+    /* since we're caching PSW make this a special case */
+    if (offset == 0xfe04) {
+        gen_helper_psw_read(ret, cpu_env);
+    } else {
+        switch (offset) {
+#include "csfr.def"
+        }
+    }
+}
+#undef R
+#undef A
+#undef E
+
+#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
+                                    since no execption occurs */
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)                \
+    case ADDRESS:                                                        \
+        if (tricore_feature(env, FEATURE)) {                             \
+            tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG));  \
+        }                                                                \
+        break;
+/* Endinit protected registers
+   TODO: Since the endinit bit is in a register of a not yet implemented
+         watchdog device, we handle endinit protected registers like
+         all-access registers for now. */
+#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
+static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
+                            int32_t offset)
+{
+    if (ctx->hflags & TRICORE_HFLAG_SM) {
+        /* since we're caching PSW make this a special case */
+        if (offset == 0xfe04) {
+            gen_helper_psw_write(cpu_env, r1);
+        } else {
+            switch (offset) {
+#include "csfr.def"
+            }
+        }
+    } else {
+        /* generate privilege trap */
+    }
+}
+
 /* Functions for arithmetic instructions  */
 
 static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
@@ -161,12 +316,337 @@ static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
     tcg_temp_free(t0);
 }
 
+/* ret = r2 + (r1 * r3); */
+static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+
+    tcg_gen_ext_i32_i64(t1, r1);
+    tcg_gen_ext_i32_i64(t2, r2);
+    tcg_gen_ext_i32_i64(t3, r3);
+
+    tcg_gen_mul_i64(t1, t1, t3);
+    tcg_gen_add_i64(t1, t2, t1);
+
+    tcg_gen_trunc_i64_i32(ret, t1);
+    /* calc V
+       t1 > 0x7fffffff */
+    tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+    /* t1 < -0x80000000 */
+    tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+    tcg_gen_or_i64(t2, t2, t3);
+    tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+    tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+}
+
+static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_madd32_d(ret, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+             TCGv r3)
+{
+    TCGv t1 = tcg_temp_new();
+    TCGv t2 = tcg_temp_new();
+    TCGv t3 = tcg_temp_new();
+    TCGv t4 = tcg_temp_new();
+
+    tcg_gen_muls2_tl(t1, t2, r1, r3);
+    /* only the add can overflow */
+    tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
+    /* calc V bit */
+    tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+    tcg_gen_xor_tl(t1, r2_high, t2);
+    tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+    tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back the result */
+    tcg_gen_mov_tl(ret_low, t3);
+    tcg_gen_mov_tl(ret_high, t4);
+
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+    tcg_temp_free(t3);
+    tcg_temp_free(t4);
+}
+
+static inline void
+gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              TCGv r3)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+
+    tcg_gen_extu_i32_i64(t1, r1);
+    tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+    tcg_gen_extu_i32_i64(t3, r3);
+
+    tcg_gen_mul_i64(t1, t1, t3);
+    tcg_gen_add_i64(t2, t2, t1);
+    /* write back result */
+    tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
+    /* only the add overflows, if t2 < t1
+       calc V bit */
+    tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
+    tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+    tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+               int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+    tcg_temp_free(temp);
+}
+
+/* ret = r2 - (r1 * r3); */
+static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+
+    tcg_gen_ext_i32_i64(t1, r1);
+    tcg_gen_ext_i32_i64(t2, r2);
+    tcg_gen_ext_i32_i64(t3, r3);
+
+    tcg_gen_mul_i64(t1, t1, t3);
+    tcg_gen_sub_i64(t1, t2, t1);
+
+    tcg_gen_trunc_i64_i32(ret, t1);
+    /* calc V
+       t2 > 0x7fffffff */
+    tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+    /* result < -0x80000000 */
+    tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+    tcg_gen_or_i64(t2, t2, t3);
+    tcg_gen_trunc_i64_i32(cpu_PSW_V, t2);
+    tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+}
+
+static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_msub32_d(ret, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+             TCGv r3)
+{
+    TCGv t1 = tcg_temp_new();
+    TCGv t2 = tcg_temp_new();
+    TCGv t3 = tcg_temp_new();
+    TCGv t4 = tcg_temp_new();
+
+    tcg_gen_muls2_tl(t1, t2, r1, r3);
+    /* only the sub can overflow */
+    tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
+    /* calc V bit */
+    tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+    tcg_gen_xor_tl(t1, r2_high, t2);
+    tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+    tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back the result */
+    tcg_gen_mov_tl(ret_low, t3);
+    tcg_gen_mov_tl(ret_high, t4);
+
+    tcg_temp_free(t1);
+    tcg_temp_free(t2);
+    tcg_temp_free(t3);
+    tcg_temp_free(t4);
+}
+
+static inline void
+gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              TCGv r3)
+{
+    TCGv_i64 t1 = tcg_temp_new_i64();
+    TCGv_i64 t2 = tcg_temp_new_i64();
+    TCGv_i64 t3 = tcg_temp_new_i64();
+
+    tcg_gen_extu_i32_i64(t1, r1);
+    tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+    tcg_gen_extu_i32_i64(t3, r3);
+
+    tcg_gen_mul_i64(t1, t1, t3);
+    tcg_gen_sub_i64(t3, t2, t1);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
+    /* calc V bit, only the sub can overflow, if t1 > t2 */
+    tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
+    tcg_gen_trunc_i64_i32(cpu_PSW_V, t1);
+    tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+    tcg_temp_free_i64(t1);
+    tcg_temp_free_i64(t2);
+    tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+               int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+    tcg_temp_free(temp);
+}
+
 static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
 {
     TCGv temp = tcg_const_i32(r2);
     gen_add_d(ret, r1, temp);
     tcg_temp_free(temp);
 }
+/* calculate the carry bit too */
+static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+    TCGv t0    = tcg_temp_new_i32();
+    TCGv result = tcg_temp_new_i32();
+
+    tcg_gen_movi_tl(t0, 0);
+    /* Addition and set C/V/SV bits */
+    tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
+    /* calc V bit */
+    tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+    tcg_gen_xor_tl(t0, r1, r2);
+    tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, result, result);
+    tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back result */
+    tcg_gen_mov_tl(ret, result);
+
+    tcg_temp_free(result);
+    tcg_temp_free(t0);
+}
+
+static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_add_CC(ret, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+    TCGv carry = tcg_temp_new_i32();
+    TCGv t0    = tcg_temp_new_i32();
+    TCGv result = tcg_temp_new_i32();
+
+    tcg_gen_movi_tl(t0, 0);
+    tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
+    /* Addition, carry and set C/V/SV bits */
+    tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
+    tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
+    /* calc V bit */
+    tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+    tcg_gen_xor_tl(t0, r1, r2);
+    tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+    /* Calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV/SAV bits */
+    tcg_gen_add_tl(cpu_PSW_AV, result, result);
+    tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+    /* calc SAV */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back result */
+    tcg_gen_mov_tl(ret, result);
+
+    tcg_temp_free(result);
+    tcg_temp_free(t0);
+    tcg_temp_free(carry);
+}
+
+static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_addc_CC(ret, r1, temp);
+    tcg_temp_free(temp);
+}
 
 static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
                                 TCGv r4)
@@ -239,6 +719,49 @@ static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
     tcg_temp_free(result);
 }
 
+static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+{
+    TCGv temp = tcg_temp_new_i32();
+    TCGv result = tcg_temp_new_i32();
+
+    tcg_gen_sub_tl(result, r1, r2);
+    tcg_gen_sub_tl(temp, r2, r1);
+    tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
+
+    /* calc V bit */
+    tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+    tcg_gen_xor_tl(temp, result, r2);
+    tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
+    tcg_gen_xor_tl(temp, r1, r2);
+    tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+    /* calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV bit */
+    tcg_gen_add_tl(cpu_PSW_AV, result, result);
+    tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+    /* calc SAV bit */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+    /* write back result */
+    tcg_gen_mov_tl(ret, result);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(result);
+}
+
+static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_absdif(ret, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
+    tcg_temp_free(temp);
+}
+
 static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
 {
     TCGv high = tcg_temp_new();
@@ -262,9 +785,177 @@ static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
     tcg_temp_free(low);
 }
 
-static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
 {
-    if (shift_count == -32) {
+    TCGv temp = tcg_const_i32(con);
+    gen_mul_i32s(ret, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+    tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
+    /* clear V bit */
+    tcg_gen_movi_tl(cpu_PSW_V, 0);
+    /* calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV bit */
+    tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+    /* calc SAV bit */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
+                                int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_mul_i64s(ret_low, ret_high, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+    tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
+    /* clear V bit */
+    tcg_gen_movi_tl(cpu_PSW_V, 0);
+    /* calc SV bit */
+    tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+    /* Calc AV bit */
+    tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+    tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+    /* calc SAV bit */
+    tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
+                                int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_mul_i64u(ret_low, ret_high, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_mul_ssov(ret, cpu_env, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_mul_suov(ret, cpu_env, r1, temp);
+    tcg_temp_free(temp);
+}
+/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
+static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+    gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, temp);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+               int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+    gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, temp);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+              int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+    gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, temp);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+               int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    TCGv_i64 temp64 = tcg_temp_new_i64();
+    tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+    gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, temp);
+    tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+    tcg_temp_free(temp);
+    tcg_temp_free_i64(temp64);
+}
+
+static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
+{
+    TCGv sat_neg = tcg_const_i32(low);
+    TCGv temp = tcg_const_i32(up);
+
+    /* sat_neg = (arg < low ) ? low : arg; */
+    tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
+
+    /* ret = (sat_neg > up ) ? up  : sat_neg; */
+    tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
+
+    tcg_temp_free(sat_neg);
+    tcg_temp_free(temp);
+}
+
+static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
+{
+    TCGv temp = tcg_const_i32(up);
+    /* sat_neg = (arg > up ) ? up : arg; */
+    tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
+    tcg_temp_free(temp);
+}
+
+static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+    if (shift_count == -32) {
         tcg_gen_movi_tl(ret, 0);
     } else if (shift_count >= 0) {
         tcg_gen_shli_tl(ret, r1, shift_count);
@@ -273,6 +964,27 @@ static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
     }
 }
 
+static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
+{
+    TCGv temp_low, temp_high;
+
+    if (shiftcount == -16) {
+        tcg_gen_movi_tl(ret, 0);
+    } else {
+        temp_high = tcg_temp_new();
+        temp_low = tcg_temp_new();
+
+        tcg_gen_andi_tl(temp_low, r1, 0xffff);
+        tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
+        gen_shi(temp_low, temp_low, shiftcount);
+        gen_shi(ret, temp_high, shiftcount);
+        tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
+
+        tcg_temp_free(temp_low);
+        tcg_temp_free(temp_high);
+    }
+}
+
 static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
 {
     uint32_t msk, msk_start;
@@ -332,16 +1044,246 @@ static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
     tcg_temp_free(t_0);
 }
 
+static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
+{
+    gen_helper_sha_ssov(ret, cpu_env, r1, r2);
+}
+
+static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_shas(ret, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+    TCGv low, high;
+
+    if (shift_count == 0) {
+        tcg_gen_mov_tl(ret, r1);
+    } else if (shift_count > 0) {
+        low = tcg_temp_new();
+        high = tcg_temp_new();
+
+        tcg_gen_andi_tl(high, r1, 0xffff0000);
+        tcg_gen_shli_tl(low, r1, shift_count);
+        tcg_gen_shli_tl(ret, high, shift_count);
+        tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+        tcg_temp_free(low);
+        tcg_temp_free(high);
+    } else {
+        low = tcg_temp_new();
+        high = tcg_temp_new();
+
+        tcg_gen_ext16s_tl(low, r1);
+        tcg_gen_sari_tl(low, low, -shift_count);
+        tcg_gen_sari_tl(ret, r1, -shift_count);
+        tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+        tcg_temp_free(low);
+        tcg_temp_free(high);
+    }
+
+}
+
+/* ret = {ret[30:0], (r1 cond r2)}; */
+static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+
+    tcg_gen_shli_tl(temp, ret, 1);
+    tcg_gen_setcond_tl(cond, temp2, r1, r2);
+    tcg_gen_or_tl(ret, temp, temp2);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_sh_cond(cond, ret, r1, temp);
+    tcg_temp_free(temp);
+}
+
 static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
 {
     gen_helper_add_ssov(ret, cpu_env, r1, r2);
 }
 
+static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_add_ssov(ret, cpu_env, r1, temp);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_helper_add_suov(ret, cpu_env, r1, temp);
+    tcg_temp_free(temp);
+}
+
 static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
 {
     gen_helper_sub_ssov(ret, cpu_env, r1, r2);
 }
 
+static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+{
+    gen_helper_sub_suov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
+                               int pos1, int pos2,
+                               void(*op1)(TCGv, TCGv, TCGv),
+                               void(*op2)(TCGv, TCGv, TCGv))
+{
+    TCGv temp1, temp2;
+
+    temp1 = tcg_temp_new();
+    temp2 = tcg_temp_new();
+
+    tcg_gen_shri_tl(temp2, r2, pos2);
+    tcg_gen_shri_tl(temp1, r1, pos1);
+
+    (*op1)(temp1, temp1, temp2);
+    (*op2)(temp1 , ret, temp1);
+
+    tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
+
+    tcg_temp_free(temp1);
+    tcg_temp_free(temp2);
+}
+
+/* ret = r1[pos1] op1 r2[pos2]; */
+static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
+                               int pos1, int pos2,
+                               void(*op1)(TCGv, TCGv, TCGv))
+{
+    TCGv temp1, temp2;
+
+    temp1 = tcg_temp_new();
+    temp2 = tcg_temp_new();
+
+    tcg_gen_shri_tl(temp2, r2, pos2);
+    tcg_gen_shri_tl(temp1, r1, pos1);
+
+    (*op1)(ret, temp1, temp2);
+
+    tcg_gen_andi_tl(ret, ret, 0x1);
+
+    tcg_temp_free(temp1);
+    tcg_temp_free(temp2);
+}
+
+static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
+                                         void(*op)(TCGv, TCGv, TCGv))
+{
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+    /* temp = (arg1 cond arg2 )*/
+    tcg_gen_setcond_tl(cond, temp, r1, r2);
+    /* temp2 = ret[0]*/
+    tcg_gen_andi_tl(temp2, ret, 0x1);
+    /* temp = temp insn temp2 */
+    (*op)(temp, temp, temp2);
+    /* ret = {ret[31:1], temp} */
+    tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
+
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static inline void
+gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
+                       void(*op)(TCGv, TCGv, TCGv))
+{
+    TCGv temp = tcg_const_i32(con);
+    gen_accumulating_cond(cond, ret, r1, temp, op);
+    tcg_temp_free(temp);
+}
+
+static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv b0 = tcg_temp_new();
+    TCGv b1 = tcg_temp_new();
+    TCGv b2 = tcg_temp_new();
+    TCGv b3 = tcg_temp_new();
+
+    /* byte 0 */
+    tcg_gen_andi_tl(b0, r1, 0xff);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
+
+    /* byte 1 */
+    tcg_gen_andi_tl(b1, r1, 0xff00);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
+
+    /* byte 2 */
+    tcg_gen_andi_tl(b2, r1, 0xff0000);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
+
+    /* byte 3 */
+    tcg_gen_andi_tl(b3, r1, 0xff000000);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
+
+    /* combine them */
+    tcg_gen_or_tl(ret, b0, b1);
+    tcg_gen_or_tl(ret, ret, b2);
+    tcg_gen_or_tl(ret, ret, b3);
+
+    tcg_temp_free(b0);
+    tcg_temp_free(b1);
+    tcg_temp_free(b2);
+    tcg_temp_free(b3);
+}
+
+static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+{
+    TCGv h0 = tcg_temp_new();
+    TCGv h1 = tcg_temp_new();
+
+    /* halfword 0 */
+    tcg_gen_andi_tl(h0, r1, 0xffff);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
+
+    /* halfword 1 */
+    tcg_gen_andi_tl(h1, r1, 0xffff0000);
+    tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
+
+    /* combine them */
+    tcg_gen_or_tl(ret, h0, h1);
+
+    tcg_temp_free(h0);
+    tcg_temp_free(h1);
+}
+/* mask = ((1 << width) -1) << pos;
+   ret = (r1 & ~mask) | (r2 << pos) & mask); */
+static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+{
+    TCGv mask = tcg_temp_new();
+    TCGv temp = tcg_temp_new();
+    TCGv temp2 = tcg_temp_new();
+
+    tcg_gen_movi_tl(mask, 1);
+    tcg_gen_shl_tl(mask, mask, width);
+    tcg_gen_subi_tl(mask, mask, 1);
+    tcg_gen_shl_tl(mask, mask, pos);
+
+    tcg_gen_shl_tl(temp, r2, pos);
+    tcg_gen_and_tl(temp, temp, mask);
+    tcg_gen_andc_tl(temp2, r1, mask);
+    tcg_gen_or_tl(ret, temp, temp2);
+
+    tcg_temp_free(mask);
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
 /* helpers for generating program flow micro-ops */
 
 static inline void gen_save_pc(target_ulong pc)
@@ -403,7 +1345,8 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
 static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
                                int r2 , int32_t constant , int32_t offset)
 {
-    TCGv temp;
+    TCGv temp, temp2;
+    int n;
 
     switch (opc) {
 /* SB-format jumps */
@@ -411,6 +1354,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
     case OPC1_32_B_J:
         gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
         break;
+    case OPC1_32_B_CALL:
     case OPC1_16_SB_CALL:
         gen_helper_1arg(call, ctx->next_pc);
         gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
@@ -477,6 +1421,157 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
     case OPC1_16_SBR_LOOP:
         gen_loop(ctx, r1, offset * 2 - 32);
         break;
+/* SR-format jumps */
+    case OPC1_16_SR_JI:
+        tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
+        tcg_gen_exit_tb(0);
+        break;
+    case OPC2_16_SR_RET:
+        gen_helper_ret(cpu_env);
+        tcg_gen_exit_tb(0);
+        break;
+/* B-format */
+    case OPC1_32_B_CALLA:
+        gen_helper_1arg(call, ctx->next_pc);
+        gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+        break;
+    case OPC1_32_B_JLA:
+        tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+    case OPC1_32_B_JA:
+        gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+        break;
+    case OPC1_32_B_JL:
+        tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+        gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+        break;
+/* BOL format */
+    case OPCM_32_BRC_EQ_NEQ:
+         if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) {
+            gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset);
+         } else {
+            gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset);
+         }
+         break;
+    case OPCM_32_BRC_GE:
+         if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) {
+            gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset);
+         } else {
+            constant = MASK_OP_BRC_CONST4(ctx->opcode);
+            gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant,
+                             offset);
+         }
+         break;
+    case OPCM_32_BRC_JLT:
+         if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) {
+            gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset);
+         } else {
+            constant = MASK_OP_BRC_CONST4(ctx->opcode);
+            gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant,
+                             offset);
+         }
+         break;
+    case OPCM_32_BRC_JNE:
+        temp = tcg_temp_new();
+        if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
+            tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+            /* subi is unconditional */
+            tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+            gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+        } else {
+            tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+            /* addi is unconditional */
+            tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+            gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+        }
+        tcg_temp_free(temp);
+        break;
+/* BRN format */
+    case OPCM_32_BRN_JTT:
+        n = MASK_OP_BRN_N(ctx->opcode);
+
+        temp = tcg_temp_new();
+        tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
+
+        if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
+            gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
+        } else {
+            gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
+        }
+        tcg_temp_free(temp);
+        break;
+/* BRR Format */
+    case OPCM_32_BRR_EQ_NEQ:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) {
+            gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        } else {
+            gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        }
+        break;
+    case OPCM_32_BRR_ADDR_EQ_NEQ:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) {
+            gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2],
+                            offset);
+        } else {
+            gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2],
+                            offset);
+        }
+        break;
+    case OPCM_32_BRR_GE:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) {
+            gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        } else {
+            gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        }
+        break;
+    case OPCM_32_BRR_JLT:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) {
+            gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        } else {
+            gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                            offset);
+        }
+        break;
+    case OPCM_32_BRR_LOOP:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) {
+            gen_loop(ctx, r1, offset * 2);
+        } else {
+            /* OPC2_32_BRR_LOOPU */
+            gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+        }
+        break;
+    case OPCM_32_BRR_JNE:
+        temp = tcg_temp_new();
+        temp2 = tcg_temp_new();
+        if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
+            tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+            /* also save r2, in case of r1 == r2, so r2 is not decremented */
+            tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+            /* subi is unconditional */
+            tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+            gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+        } else {
+            tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+            /* also save r2, in case of r1 == r2, so r2 is not decremented */
+            tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+            /* addi is unconditional */
+            tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+            gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+        }
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+        break;
+    case OPCM_32_BRR_JNZ:
+        if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) {
+            gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
+        } else {
+            gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
+        }
+        break;
     default:
         printf("Branch Error at %x\n", ctx->pc);
     }
@@ -680,19 +1775,197 @@ static void decode_ssr_opc(DisasContext *ctx, int op1)
     }
 }
 
-static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+static void decode_sc_opc(DisasContext *ctx, int op1)
 {
-    int op1;
-    int r1, r2;
     int32_t const16;
-    int32_t address;
-    TCGv temp;
 
-    op1 = MASK_OP_MAJOR(ctx->opcode);
+    const16 = MASK_OP_SC_CONST8(ctx->opcode);
 
-    /* handle ADDSC.A opcode only being 6 bit long */
-    if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
-        op1 = OPC1_16_SRRS_ADDSC_A;
+    switch (op1) {
+    case OPC1_16_SC_AND:
+        tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+        break;
+    case OPC1_16_SC_BISR:
+        gen_helper_1arg(bisr, const16 & 0xff);
+        break;
+    case OPC1_16_SC_LD_A:
+        gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+        break;
+    case OPC1_16_SC_LD_W:
+        gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+        break;
+    case OPC1_16_SC_MOV:
+        tcg_gen_movi_tl(cpu_gpr_d[15], const16);
+        break;
+    case OPC1_16_SC_OR:
+        tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+        break;
+    case OPC1_16_SC_ST_A:
+        gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+        break;
+    case OPC1_16_SC_ST_W:
+        gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+        break;
+    case OPC1_16_SC_SUB_A:
+        tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
+        break;
+    }
+}
+
+static void decode_slr_opc(DisasContext *ctx, int op1)
+{
+    int r1, r2;
+
+    r1 = MASK_OP_SLR_D(ctx->opcode);
+    r2 = MASK_OP_SLR_S2(ctx->opcode);
+
+    switch (op1) {
+/* SLR-format */
+    case OPC1_16_SLR_LD_A:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+        break;
+    case OPC1_16_SLR_LD_A_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+        break;
+    case OPC1_16_SLR_LD_BU:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+        break;
+    case OPC1_16_SLR_LD_BU_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+        break;
+    case OPC1_16_SLR_LD_H:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+        break;
+    case OPC1_16_SLR_LD_H_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+        break;
+    case OPC1_16_SLR_LD_W:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+        break;
+    case OPC1_16_SLR_LD_W_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+        break;
+    }
+}
+
+static void decode_sro_opc(DisasContext *ctx, int op1)
+{
+    int r2;
+    int32_t address;
+
+    r2 = MASK_OP_SRO_S2(ctx->opcode);
+    address = MASK_OP_SRO_OFF4(ctx->opcode);
+
+/* SRO-format */
+    switch (op1) {
+    case OPC1_16_SRO_LD_A:
+        gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+        break;
+    case OPC1_16_SRO_LD_BU:
+        gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+        break;
+    case OPC1_16_SRO_LD_H:
+        gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
+        break;
+    case OPC1_16_SRO_LD_W:
+        gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+        break;
+    case OPC1_16_SRO_ST_A:
+        gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+        break;
+    case OPC1_16_SRO_ST_B:
+        gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+        break;
+    case OPC1_16_SRO_ST_H:
+        gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
+        break;
+    case OPC1_16_SRO_ST_W:
+        gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+        break;
+    }
+}
+
+static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_16_SR_NOP:
+        break;
+    case OPC2_16_SR_RET:
+        gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+        break;
+    case OPC2_16_SR_RFE:
+        gen_helper_rfe(cpu_env);
+        tcg_gen_exit_tb(0);
+        ctx->bstate = BS_BRANCH;
+        break;
+    case OPC2_16_SR_DEBUG:
+        /* raise EXCP_DEBUG */
+        break;
+    }
+}
+
+static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t r1;
+    TCGv temp;
+
+    r1 = MASK_OP_SR_S1D(ctx->opcode);
+    op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_16_SR_RSUB:
+        /* overflow only if r1 = -0x80000000 */
+        temp = tcg_const_i32(-0x80000000);
+        /* calc V bit */
+        tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
+        tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+        /* calc SV bit */
+        tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+        /* sub */
+        tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+        /* calc av */
+        tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
+        tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
+        /* calc sav */
+        tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_16_SR_SAT_B:
+        gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
+        break;
+    case OPC2_16_SR_SAT_BU:
+        gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
+        break;
+    case OPC2_16_SR_SAT_H:
+        gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
+        break;
+    case OPC2_16_SR_SAT_HU:
+        gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
+        break;
+    }
+}
+
+static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int op1;
+    int r1, r2;
+    int32_t const16;
+    int32_t address;
+    TCGv temp;
+
+    op1 = MASK_OP_MAJOR(ctx->opcode);
+
+    /* handle ADDSC.A opcode only being 6 bit long */
+    if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
+        op1 = OPC1_16_SRRS_ADDSC_A;
     }
 
     switch (op1) {
@@ -816,11 +2089,1945 @@ static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
         address = MASK_OP_SBR_DISP4(ctx->opcode);
         gen_compute_branch(ctx, op1, r1, 0, 0, address);
         break;
+/* SC-format */
+    case OPC1_16_SC_AND:
+    case OPC1_16_SC_BISR:
+    case OPC1_16_SC_LD_A:
+    case OPC1_16_SC_LD_W:
+    case OPC1_16_SC_MOV:
+    case OPC1_16_SC_OR:
+    case OPC1_16_SC_ST_A:
+    case OPC1_16_SC_ST_W:
+    case OPC1_16_SC_SUB_A:
+        decode_sc_opc(ctx, op1);
+        break;
+/* SLR-format */
+    case OPC1_16_SLR_LD_A:
+    case OPC1_16_SLR_LD_A_POSTINC:
+    case OPC1_16_SLR_LD_BU:
+    case OPC1_16_SLR_LD_BU_POSTINC:
+    case OPC1_16_SLR_LD_H:
+    case OPC1_16_SLR_LD_H_POSTINC:
+    case OPC1_16_SLR_LD_W:
+    case OPC1_16_SLR_LD_W_POSTINC:
+        decode_slr_opc(ctx, op1);
+        break;
+/* SRO-format */
+    case OPC1_16_SRO_LD_A:
+    case OPC1_16_SRO_LD_BU:
+    case OPC1_16_SRO_LD_H:
+    case OPC1_16_SRO_LD_W:
+    case OPC1_16_SRO_ST_A:
+    case OPC1_16_SRO_ST_B:
+    case OPC1_16_SRO_ST_H:
+    case OPC1_16_SRO_ST_W:
+        decode_sro_opc(ctx, op1);
+        break;
+/* SSRO-format */
+    case OPC1_16_SSRO_ST_A:
+        r1 = MASK_OP_SSRO_S1(ctx->opcode);
+        const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+        gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+        break;
+    case OPC1_16_SSRO_ST_B:
+        r1 = MASK_OP_SSRO_S1(ctx->opcode);
+        const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
+        break;
+    case OPC1_16_SSRO_ST_H:
+        r1 = MASK_OP_SSRO_S1(ctx->opcode);
+        const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
+        break;
+    case OPC1_16_SSRO_ST_W:
+        r1 = MASK_OP_SSRO_S1(ctx->opcode);
+        const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+        break;
+/* SR-format */
+    case OPCM_16_SR_SYSTEM:
+        decode_sr_system(env, ctx);
+        break;
+    case OPCM_16_SR_ACCU:
+        decode_sr_accu(env, ctx);
+        break;
+    case OPC1_16_SR_JI:
+        r1 = MASK_OP_SR_S1D(ctx->opcode);
+        gen_compute_branch(ctx, op1, r1, 0, 0, 0);
+        break;
+    case OPC1_16_SR_NOT:
+        r1 = MASK_OP_SR_S1D(ctx->opcode);
+        tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+        break;
     }
 }
 
-static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+/*
+ * 32 bit instructions
+ */
+
+/* ABS-format */
+static void decode_abs_ldw(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int32_t op2;
+    int32_t r1;
+    uint32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_ABS_S1D(ctx->opcode);
+    address = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+    temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+    switch (op2) {
+    case OPC2_32_ABS_LD_A:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+        break;
+    case OPC2_32_ABS_LD_D:
+        gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+        break;
+    case OPC2_32_ABS_LD_DA:
+        gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+        break;
+    case OPC2_32_ABS_LD_W:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+        break;
+    }
+
+    tcg_temp_free(temp);
+}
+
+static void decode_abs_ldb(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int32_t op2;
+    int32_t r1;
+    uint32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_ABS_S1D(ctx->opcode);
+    address = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+    temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+    switch (op2) {
+    case OPC2_32_ABS_LD_B:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
+        break;
+    case OPC2_32_ABS_LD_BU:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+        break;
+    case OPC2_32_ABS_LD_H:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
+        break;
+    case OPC2_32_ABS_LD_HU:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+        break;
+    }
+
+    tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_swap(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int32_t op2;
+    int32_t r1;
+    uint32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_ABS_S1D(ctx->opcode);
+    address = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+    temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+    switch (op2) {
+    case OPC2_32_ABS_LDMST:
+        gen_ldmst(ctx, r1, temp);
+        break;
+    case OPC2_32_ABS_SWAP_W:
+        gen_swap(ctx, r1, temp);
+        break;
+    }
+
+    tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_context(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int32_t off18;
+
+    off18 = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2   = MASK_OP_ABS_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_ABS_LDLCX:
+        gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18));
+        break;
+    case OPC2_32_ABS_LDUCX:
+        gen_helper_1arg(lducx, EA_ABS_FORMAT(off18));
+        break;
+    case OPC2_32_ABS_STLCX:
+        gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18));
+        break;
+    case OPC2_32_ABS_STUCX:
+        gen_helper_1arg(stucx, EA_ABS_FORMAT(off18));
+        break;
+    }
+}
+
+static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int32_t op2;
+    int32_t r1;
+    uint32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_ABS_S1D(ctx->opcode);
+    address = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+    temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+    switch (op2) {
+    case OPC2_32_ABS_ST_A:
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+        break;
+    case OPC2_32_ABS_ST_D:
+        gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+        break;
+    case OPC2_32_ABS_ST_DA:
+        gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+        break;
+    case OPC2_32_ABS_ST_W:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+        break;
+
+    }
+    tcg_temp_free(temp);
+}
+
+static void decode_abs_storeb_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int32_t op2;
+    int32_t r1;
+    uint32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_ABS_S1D(ctx->opcode);
+    address = MASK_OP_ABS_OFF18(ctx->opcode);
+    op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+    temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+    switch (op2) {
+    case OPC2_32_ABS_ST_B:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+        break;
+    case OPC2_32_ABS_ST_H:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+        break;
+    }
+    tcg_temp_free(temp);
+}
+
+/* Bit-format */
+
+static void decode_bit_andacc(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3;
+    int pos1, pos2;
+
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+
+    switch (op2) {
+    case OPC2_32_BIT_AND_AND_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_BIT_AND_ANDN_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_BIT_AND_NOR_T:
+        if (TCG_TARGET_HAS_andc_i32) {
+            gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                        pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
+        } else {
+            gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                        pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl);
+        }
+        break;
+    case OPC2_32_BIT_AND_OR_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl);
+        break;
+    }
+}
+
+static void decode_bit_logical_t(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3;
+    int pos1, pos2;
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_BIT_AND_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_BIT_ANDN_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_andc_tl);
+        break;
+    case OPC2_32_BIT_NOR_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_nor_tl);
+        break;
+    case OPC2_32_BIT_OR_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_or_tl);
+        break;
+    }
+}
+
+static void decode_bit_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3;
+    int pos1, pos2;
+    TCGv temp;
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
+    if (op2 == OPC2_32_BIT_INSN_T) {
+        tcg_gen_not_tl(temp, temp);
+    }
+    tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
+    tcg_temp_free(temp);
+}
+
+static void decode_bit_logical_t2(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+
+    int r1, r2, r3;
+    int pos1, pos2;
+
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_BIT_NAND_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_nand_tl);
+        break;
+    case OPC2_32_BIT_ORN_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_orc_tl);
+        break;
+    case OPC2_32_BIT_XNOR_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_eqv_tl);
+        break;
+    case OPC2_32_BIT_XOR_T:
+        gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_xor_tl);
+        break;
+    }
+}
+
+static void decode_bit_orand(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+
+    int r1, r2, r3;
+    int pos1, pos2;
+
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_BIT_OR_AND_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_BIT_OR_ANDN_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_BIT_OR_NOR_T:
+        if (TCG_TARGET_HAS_orc_i32) {
+            gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                        pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
+        } else {
+            gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                        pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl);
+        }
+        break;
+    case OPC2_32_BIT_OR_OR_T:
+        gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl);
+        break;
+    }
+}
+
+static void decode_bit_sh_logic1(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3;
+    int pos1, pos2;
+    TCGv temp;
+
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_BIT_SH_AND_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_BIT_SH_ANDN_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_andc_tl);
+        break;
+    case OPC2_32_BIT_SH_NOR_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_nor_tl);
+        break;
+    case OPC2_32_BIT_SH_OR_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_or_tl);
+        break;
+    }
+    tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+    tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+    tcg_temp_free(temp);
+}
+
+static void decode_bit_sh_logic2(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2, r3;
+    int pos1, pos2;
+    TCGv temp;
+
+    op2 = MASK_OP_BIT_OP2(ctx->opcode);
+    r1 = MASK_OP_BIT_S1(ctx->opcode);
+    r2 = MASK_OP_BIT_S2(ctx->opcode);
+    r3 = MASK_OP_BIT_D(ctx->opcode);
+    pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+    pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_BIT_SH_NAND_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] ,
+                    pos1, pos2, &tcg_gen_nand_tl);
+        break;
+    case OPC2_32_BIT_SH_ORN_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_orc_tl);
+        break;
+    case OPC2_32_BIT_SH_XNOR_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_eqv_tl);
+        break;
+    case OPC2_32_BIT_SH_XOR_T:
+        gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+                    pos1, pos2, &tcg_gen_xor_tl);
+        break;
+    }
+    tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+    tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+    tcg_temp_free(temp);
+}
+
+/* BO-format */
+
+
+static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env,
+                                             DisasContext *ctx)
 {
+    uint32_t op2;
+    uint32_t off10;
+    int32_t r1, r2;
+    TCGv temp;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2  = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_BO_CACHEA_WI_SHORTOFF:
+    case OPC2_32_BO_CACHEA_W_SHORTOFF:
+    case OPC2_32_BO_CACHEA_I_SHORTOFF:
+        /* instruction to access the cache */
+        break;
+    case OPC2_32_BO_CACHEA_WI_POSTINC:
+    case OPC2_32_BO_CACHEA_W_POSTINC:
+    case OPC2_32_BO_CACHEA_I_POSTINC:
+        /* instruction to access the cache, but we still need to handle
+           the addressing mode */
+        tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
+        break;
+    case OPC2_32_BO_CACHEA_WI_PREINC:
+    case OPC2_32_BO_CACHEA_W_PREINC:
+    case OPC2_32_BO_CACHEA_I_PREINC:
+        /* instruction to access the cache, but we still need to handle
+           the addressing mode */
+        tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
+        break;
+    case OPC2_32_BO_CACHEI_WI_SHORTOFF:
+    case OPC2_32_BO_CACHEI_W_SHORTOFF:
+        /* TODO: Raise illegal opcode trap,
+                 if !tricore_feature(TRICORE_FEATURE_131) */
+        break;
+    case OPC2_32_BO_CACHEI_W_POSTINC:
+    case OPC2_32_BO_CACHEI_WI_POSTINC:
+        if (tricore_feature(env, TRICORE_FEATURE_131)) {
+            tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
+        } /* TODO: else raise illegal opcode trap */
+        break;
+    case OPC2_32_BO_CACHEI_W_PREINC:
+    case OPC2_32_BO_CACHEI_WI_PREINC:
+        if (tricore_feature(env, TRICORE_FEATURE_131)) {
+            tcg_gen_addi_tl(cpu_gpr_d[r2], cpu_gpr_d[r2], off10);
+        } /* TODO: else raise illegal opcode trap */
+        break;
+    case OPC2_32_BO_ST_A_SHORTOFF:
+        gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+        break;
+    case OPC2_32_BO_ST_A_POSTINC:
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LESL);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_A_PREINC:
+        gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+        break;
+    case OPC2_32_BO_ST_B_SHORTOFF:
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+        break;
+    case OPC2_32_BO_ST_B_POSTINC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_UB);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_B_PREINC:
+        gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+        break;
+    case OPC2_32_BO_ST_D_SHORTOFF:
+        gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+                            off10, ctx);
+        break;
+    case OPC2_32_BO_ST_D_POSTINC:
+        gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_D_PREINC:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+        tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_ST_DA_SHORTOFF:
+        gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+                            off10, ctx);
+        break;
+    case OPC2_32_BO_ST_DA_POSTINC:
+        gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_DA_PREINC:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+        tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_ST_H_SHORTOFF:
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        break;
+    case OPC2_32_BO_ST_H_POSTINC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_H_PREINC:
+        gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        break;
+    case OPC2_32_BO_ST_Q_SHORTOFF:
+        temp = tcg_temp_new();
+        tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+        gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_ST_Q_POSTINC:
+        temp = tcg_temp_new();
+        tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_ST_Q_PREINC:
+        temp = tcg_temp_new();
+        tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+        gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_ST_W_SHORTOFF:
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    case OPC2_32_BO_ST_W_POSTINC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUL);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_ST_W_PREINC:
+        gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    }
+}
+
+static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState *env,
+                                                   DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t off10;
+    int32_t r1, r2;
+    TCGv temp, temp2, temp3;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2  = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+    temp = tcg_temp_new();
+    temp2 = tcg_temp_new();
+    temp3 = tcg_const_i32(off10);
+
+    tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+    tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+    switch (op2) {
+    case OPC2_32_BO_CACHEA_WI_BR:
+    case OPC2_32_BO_CACHEA_W_BR:
+    case OPC2_32_BO_CACHEA_I_BR:
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_CACHEA_WI_CIRC:
+    case OPC2_32_BO_CACHEA_W_CIRC:
+    case OPC2_32_BO_CACHEA_I_CIRC:
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_A_BR:
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_A_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_B_BR:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_B_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_D_BR:
+        gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_D_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+        tcg_gen_addi_tl(temp, temp, 4);
+        tcg_gen_rem_tl(temp, temp, temp2);
+        tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_DA_BR:
+        gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_DA_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+        tcg_gen_addi_tl(temp, temp, 4);
+        tcg_gen_rem_tl(temp, temp, temp2);
+        tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+        tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_H_BR:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_H_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_Q_BR:
+        tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_Q_CIRC:
+        tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+        tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_ST_W_BR:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_ST_W_CIRC:
+        tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState *env,
+                                                DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t off10;
+    int32_t r1, r2;
+    TCGv temp;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2  = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_BO_LD_A_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    case OPC2_32_BO_LD_A_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUL);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_A_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    case OPC2_32_BO_LD_B_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+        break;
+    case OPC2_32_BO_LD_B_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_SB);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_B_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+        break;
+    case OPC2_32_BO_LD_BU_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+        break;
+    case OPC2_32_BO_LD_BU_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_UB);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_BU_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+        break;
+    case OPC2_32_BO_LD_D_SHORTOFF:
+        gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+                            off10, ctx);
+        break;
+    case OPC2_32_BO_LD_D_POSTINC:
+        gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_D_PREINC:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+        tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_LD_DA_SHORTOFF:
+        gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+                            off10, ctx);
+        break;
+    case OPC2_32_BO_LD_DA_POSTINC:
+        gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_DA_PREINC:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+        tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+        tcg_temp_free(temp);
+        break;
+    case OPC2_32_BO_LD_H_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+        break;
+    case OPC2_32_BO_LD_H_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LESW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_H_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+        break;
+    case OPC2_32_BO_LD_HU_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        break;
+    case OPC2_32_BO_LD_HU_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUW);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_HU_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        break;
+    case OPC2_32_BO_LD_Q_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+        break;
+    case OPC2_32_BO_LD_Q_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_Q_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+        break;
+    case OPC2_32_BO_LD_W_SHORTOFF:
+        gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    case OPC2_32_BO_LD_W_POSTINC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+                           MO_LEUL);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LD_W_PREINC:
+        gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+        break;
+    }
+}
+
+static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState *env,
+                                                DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t off10;
+    int r1, r2;
+
+    TCGv temp, temp2, temp3;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2 = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+    temp = tcg_temp_new();
+    temp2 = tcg_temp_new();
+    temp3 = tcg_const_i32(off10);
+
+    tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+    tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+
+    switch (op2) {
+    case OPC2_32_BO_LD_A_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_A_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_B_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_B_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_BU_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_BU_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_D_BR:
+        gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_D_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+        tcg_gen_addi_tl(temp, temp, 4);
+        tcg_gen_rem_tl(temp, temp, temp2);
+        tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_DA_BR:
+        gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_DA_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+        tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+        tcg_gen_addi_tl(temp, temp, 4);
+        tcg_gen_rem_tl(temp, temp, temp2);
+        tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_H_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_H_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_HU_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_HU_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_Q_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_Q_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_LD_W_BR:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LD_W_CIRC:
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState *env,
+                                                   DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t off10;
+    int r1, r2;
+
+    TCGv temp, temp2;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2 = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+
+    temp = tcg_temp_new();
+    temp2 = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_BO_LDLCX_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_helper_ldlcx(cpu_env, temp);
+        break;
+    case OPC2_32_BO_LDMST_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_ldmst(ctx, r1, temp);
+        break;
+    case OPC2_32_BO_LDMST_POSTINC:
+        gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_LDMST_PREINC:
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+        break;
+    case OPC2_32_BO_LDUCX_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_helper_lducx(cpu_env, temp);
+        break;
+    case OPC2_32_BO_LEA_SHORTOFF:
+        tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_STLCX_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_helper_stlcx(cpu_env, temp);
+        break;
+    case OPC2_32_BO_STUCX_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_helper_stucx(cpu_env, temp);
+        break;
+    case OPC2_32_BO_SWAP_W_SHORTOFF:
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+        gen_swap(ctx, r1, temp);
+        break;
+    case OPC2_32_BO_SWAP_W_POSTINC:
+        gen_swap(ctx, r1, cpu_gpr_a[r2]);
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        break;
+    case OPC2_32_BO_SWAP_W_PREINC:
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+        gen_swap(ctx, r1, cpu_gpr_a[r2]);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState *env,
+                                                         DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t off10;
+    int r1, r2;
+
+    TCGv temp, temp2, temp3;
+
+    r1 = MASK_OP_BO_S1D(ctx->opcode);
+    r2 = MASK_OP_BO_S2(ctx->opcode);
+    off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+    op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+    temp = tcg_temp_new();
+    temp2 = tcg_temp_new();
+    temp3 = tcg_const_i32(off10);
+
+    tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+    tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+    switch (op2) {
+    case OPC2_32_BO_LDMST_BR:
+        gen_ldmst(ctx, r1, temp2);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_LDMST_CIRC:
+        gen_ldmst(ctx, r1, temp2);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    case OPC2_32_BO_SWAP_W_BR:
+        gen_swap(ctx, r1, temp2);
+        gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+        break;
+    case OPC2_32_BO_SWAP_W_CIRC:
+        gen_swap(ctx, r1, temp2);
+        gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+    tcg_temp_free(temp3);
+}
+
+static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1)
+{
+    int r1, r2;
+    int32_t address;
+    TCGv temp;
+
+    r1 = MASK_OP_BOL_S1D(ctx->opcode);
+    r2 = MASK_OP_BOL_S2(ctx->opcode);
+    address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode);
+
+    switch (op1) {
+    case OPC1_32_BOL_LD_A_LONGOFF:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+        tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
+        tcg_temp_free(temp);
+        break;
+    case OPC1_32_BOL_LD_W_LONFOFF:
+        temp = tcg_temp_new();
+        tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
+        tcg_temp_free(temp);
+        break;
+    case OPC1_32_BOL_LEA_LONGOFF:
+        tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
+        break;
+    case OPC1_32_BOL_ST_A_LONGOFF:
+        if (tricore_feature(env, TRICORE_FEATURE_16)) {
+            gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL);
+        } else {
+            /* raise illegal opcode trap */
+        }
+        break;
+    case OPC1_32_BOL_ST_W_LONGOFF:
+        gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL);
+        break;
+    }
+
+}
+
+/* RC format */
+static void decode_rc_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2;
+    int32_t const9;
+    TCGv temp;
+
+    r2 = MASK_OP_RC_D(ctx->opcode);
+    r1 = MASK_OP_RC_S1(ctx->opcode);
+    const9 = MASK_OP_RC_CONST9(ctx->opcode);
+    op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RC_AND:
+        tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ANDN:
+        tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+        break;
+    case OPC2_32_RC_NAND:
+        tcg_gen_movi_tl(temp, const9);
+        tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_NOR:
+        tcg_gen_movi_tl(temp, const9);
+        tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_OR:
+        tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ORN:
+        tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+        break;
+    case OPC2_32_RC_SH:
+        const9 = sextract32(const9, 0, 6);
+        gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_H:
+        const9 = sextract32(const9, 0, 5);
+        gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SHA:
+        const9 = sextract32(const9, 0, 6);
+        gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SHA_H:
+        const9 = sextract32(const9, 0, 5);
+        gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SHAS:
+        gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_XNOR:
+        tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
+        break;
+    case OPC2_32_RC_XOR:
+        tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    }
+    tcg_temp_free(temp);
+}
+
+static void decode_rc_accumulator(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2;
+    int16_t const9;
+
+    TCGv temp;
+
+    r2 = MASK_OP_RC_D(ctx->opcode);
+    r1 = MASK_OP_RC_S1(ctx->opcode);
+    const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+    op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+    temp = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RC_ABSDIF:
+        gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ABSDIFS:
+        gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ADD:
+        gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ADDC:
+        gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ADDS:
+        gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ADDS_U:
+        gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_ADDX:
+        gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_AND_EQ:
+        gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_AND_GE:
+        gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_AND_GE_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_AND_LT:
+        gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_AND_LT_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_AND_NE:
+        gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_and_tl);
+        break;
+    case OPC2_32_RC_EQ:
+        tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_EQANY_B:
+        gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_EQANY_H:
+        gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_GE:
+        tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_GE_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_LT:
+        tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_LT_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_MAX:
+        tcg_gen_movi_tl(temp, const9);
+        tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+                           cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_MAX_U:
+        tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+        tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+                           cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_MIN:
+        tcg_gen_movi_tl(temp, const9);
+        tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+                           cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_MIN_U:
+        tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+        tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+                           cpu_gpr_d[r1], temp);
+        break;
+    case OPC2_32_RC_NE:
+        tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_OR_EQ:
+        gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_OR_GE:
+        gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_OR_GE_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_OR_LT:
+        gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_OR_LT_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_OR_NE:
+        gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_or_tl);
+        break;
+    case OPC2_32_RC_RSUB:
+        tcg_gen_movi_tl(temp, const9);
+        gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+        break;
+    case OPC2_32_RC_RSUBS:
+        tcg_gen_movi_tl(temp, const9);
+        gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+        break;
+    case OPC2_32_RC_RSUBS_U:
+        tcg_gen_movi_tl(temp, const9);
+        gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+        break;
+    case OPC2_32_RC_SH_EQ:
+        gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_GE:
+        gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_GE_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_LT:
+        gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_LT_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_SH_NE:
+        gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_XOR_EQ:
+        gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    case OPC2_32_RC_XOR_GE:
+        gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    case OPC2_32_RC_XOR_GE_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    case OPC2_32_RC_XOR_LT:
+        gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    case OPC2_32_RC_XOR_LT_U:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    case OPC2_32_RC_XOR_NE:
+        gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+                               const9, &tcg_gen_xor_tl);
+        break;
+    }
+    tcg_temp_free(temp);
+}
+
+static void decode_rc_serviceroutine(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    uint32_t const9;
+
+    op2 = MASK_OP_RC_OP2(ctx->opcode);
+    const9 = MASK_OP_RC_CONST9(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RC_BISR:
+        gen_helper_1arg(bisr, const9);
+        break;
+    case OPC2_32_RC_SYSCALL:
+        /* TODO: Add exception generation */
+        break;
+    }
+}
+
+static void decode_rc_mul(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2;
+    int16_t const9;
+
+    r2 = MASK_OP_RC_D(ctx->opcode);
+    r1 = MASK_OP_RC_S1(ctx->opcode);
+    const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+    op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RC_MUL_32:
+        gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_MUL_64:
+        gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_MULS_32:
+        gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_MUL_U_64:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+        break;
+    case OPC2_32_RC_MULS_U_32:
+        const9 = MASK_OP_RC_CONST9(ctx->opcode);
+        gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+        break;
+    }
+}
+
+/* RCPW format */
+static void decode_rcpw_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r2;
+    int32_t pos, width, const4;
+
+    TCGv temp;
+
+    op2    = MASK_OP_RCPW_OP2(ctx->opcode);
+    r1     = MASK_OP_RCPW_S1(ctx->opcode);
+    r2     = MASK_OP_RCPW_D(ctx->opcode);
+    const4 = MASK_OP_RCPW_CONST4(ctx->opcode);
+    width  = MASK_OP_RCPW_WIDTH(ctx->opcode);
+    pos    = MASK_OP_RCPW_POS(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RCPW_IMASK:
+        /* if pos + width > 31 undefined result */
+        if (pos + width <= 31) {
+            tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
+            tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
+        }
+        break;
+    case OPC2_32_RCPW_INSERT:
+        /* if pos + width > 32 undefined result */
+        if (pos + width <= 32) {
+            temp = tcg_const_i32(const4);
+            tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
+            tcg_temp_free(temp);
+        }
+        break;
+    }
+}
+
+/* RCRW format */
+
+static void decode_rcrw_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r3, r4;
+    int32_t width, const4;
+
+    TCGv temp, temp2, temp3;
+
+    op2    = MASK_OP_RCRW_OP2(ctx->opcode);
+    r1     = MASK_OP_RCRW_S1(ctx->opcode);
+    r3     = MASK_OP_RCRW_S3(ctx->opcode);
+    r4     = MASK_OP_RCRW_D(ctx->opcode);
+    width  = MASK_OP_RCRW_WIDTH(ctx->opcode);
+    const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
+
+    temp = tcg_temp_new();
+    temp2 = tcg_temp_new();
+
+    switch (op2) {
+    case OPC2_32_RCRW_IMASK:
+        tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f);
+        tcg_gen_movi_tl(temp2, (1 << width) - 1);
+        tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp);
+        tcg_gen_movi_tl(temp2, const4);
+        tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp);
+        break;
+    case OPC2_32_RCRW_INSERT:
+        temp3 = tcg_temp_new();
+
+        tcg_gen_movi_tl(temp, width);
+        tcg_gen_movi_tl(temp2, const4);
+        tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f);
+        gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3);
+
+        tcg_temp_free(temp3);
+        break;
+    }
+    tcg_temp_free(temp);
+    tcg_temp_free(temp2);
+}
+
+/* RCR format */
+
+static void decode_rcr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r3, r4;
+    int32_t const9;
+
+    TCGv temp, temp2;
+
+    op2 = MASK_OP_RCR_OP2(ctx->opcode);
+    r1 = MASK_OP_RCR_S1(ctx->opcode);
+    const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+    r3 = MASK_OP_RCR_S3(ctx->opcode);
+    r4 = MASK_OP_RCR_D(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RCR_CADD:
+        gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
+                      cpu_gpr_d[r4]);
+        break;
+    case OPC2_32_RCR_CADDN:
+        gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
+                      cpu_gpr_d[r4]);
+        break;
+    case OPC2_32_RCR_SEL:
+        temp = tcg_const_i32(0);
+        temp2 = tcg_const_i32(const9);
+        tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r4], temp,
+                           cpu_gpr_d[r1], temp2);
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+        break;
+    case OPC2_32_RCR_SELN:
+        temp = tcg_const_i32(0);
+        temp2 = tcg_const_i32(const9);
+        tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r4], temp,
+                           cpu_gpr_d[r1], temp2);
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+        break;
+    }
+}
+
+static void decode_rcr_madd(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r3, r4;
+    int32_t const9;
+
+
+    op2 = MASK_OP_RCR_OP2(ctx->opcode);
+    r1 = MASK_OP_RCR_S1(ctx->opcode);
+    const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+    r3 = MASK_OP_RCR_S3(ctx->opcode);
+    r4 = MASK_OP_RCR_D(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RCR_MADD_32:
+        gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MADD_64:
+        gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                      cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MADDS_32:
+        gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MADDS_64:
+        gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                      cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MADD_U_64:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                       cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MADDS_U_32:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MADDS_U_64:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                       cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    }
+}
+
+static void decode_rcr_msub(CPUTriCoreState *env, DisasContext *ctx)
+{
+    uint32_t op2;
+    int r1, r3, r4;
+    int32_t const9;
+
+
+    op2 = MASK_OP_RCR_OP2(ctx->opcode);
+    r1 = MASK_OP_RCR_S1(ctx->opcode);
+    const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+    r3 = MASK_OP_RCR_S3(ctx->opcode);
+    r4 = MASK_OP_RCR_D(ctx->opcode);
+
+    switch (op2) {
+    case OPC2_32_RCR_MSUB_32:
+        gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MSUB_64:
+        gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                      cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MSUBS_32:
+        gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MSUBS_64:
+        gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                      cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MSUB_U_64:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                       cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    case OPC2_32_RCR_MSUBS_U_32:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+        break;
+    case OPC2_32_RCR_MSUBS_U_64:
+        const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+        gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+                       cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+        break;
+    }
+}
+
+/* RLC format */
+
+static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx,
+                           uint32_t op1)
+{
+    int32_t const16;
+    int r1, r2;
+
+    const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
+    r1      = MASK_OP_RLC_S1(ctx->opcode);
+    r2      = MASK_OP_RLC_D(ctx->opcode);
+
+    switch (op1) {
+    case OPC1_32_RLC_ADDI:
+        gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
+        break;
+    case OPC1_32_RLC_ADDIH:
+        gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
+        break;
+    case OPC1_32_RLC_ADDIH_A:
+        tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
+        break;
+    case OPC1_32_RLC_MFCR:
+        gen_mfcr(env, cpu_gpr_d[r2], const16);
+        break;
+    case OPC1_32_RLC_MOV:
+        tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+        break;
+    case OPC1_32_RLC_MOV_U:
+        const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+        tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+        break;
+    case OPC1_32_RLC_MOV_H:
+        tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
+        break;
+    case OPC1_32_RLC_MOVH_A:
+        tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
+        break;
+    case OPC1_32_RLC_MTCR:
+        gen_mtcr(env, ctx, cpu_gpr_d[r2], const16);
+        break;
+    }
+}
+
+static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+    int op1;
+    int32_t r1, r2, r3;
+    int32_t address, const16;
+    int8_t b, const4;
+    int32_t bpos;
+    TCGv temp, temp2, temp3;
+
+    op1 = MASK_OP_MAJOR(ctx->opcode);
+
+    /* handle JNZ.T opcode only being 6 bit long */
+    if (unlikely((op1 & 0x3f) == OPCM_32_BRN_JTT)) {
+        op1 = OPCM_32_BRN_JTT;
+    }
+
+    switch (op1) {
+/* ABS-format */
+    case OPCM_32_ABS_LDW:
+        decode_abs_ldw(env, ctx);
+        break;
+    case OPCM_32_ABS_LDB:
+        decode_abs_ldb(env, ctx);
+        break;
+    case OPCM_32_ABS_LDMST_SWAP:
+        decode_abs_ldst_swap(env, ctx);
+        break;
+    case OPCM_32_ABS_LDST_CONTEXT:
+        decode_abs_ldst_context(env, ctx);
+        break;
+    case OPCM_32_ABS_STORE:
+        decode_abs_store(env, ctx);
+        break;
+    case OPCM_32_ABS_STOREB_H:
+        decode_abs_storeb_h(env, ctx);
+        break;
+    case OPC1_32_ABS_STOREQ:
+        address = MASK_OP_ABS_OFF18(ctx->opcode);
+        r1 = MASK_OP_ABS_S1D(ctx->opcode);
+        temp = tcg_const_i32(EA_ABS_FORMAT(address));
+        temp2 = tcg_temp_new();
+
+        tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
+        tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
+
+        tcg_temp_free(temp2);
+        tcg_temp_free(temp);
+        break;
+    case OPC1_32_ABS_LD_Q:
+        address = MASK_OP_ABS_OFF18(ctx->opcode);
+        r1 = MASK_OP_ABS_S1D(ctx->opcode);
+        temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+        tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+        tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+
+        tcg_temp_free(temp);
+        break;
+    case OPC1_32_ABS_LEA:
+        address = MASK_OP_ABS_OFF18(ctx->opcode);
+        r1 = MASK_OP_ABS_S1D(ctx->opcode);
+        tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
+        break;
+/* ABSB-format */
+    case OPC1_32_ABSB_ST_T:
+        address = MASK_OP_ABS_OFF18(ctx->opcode);
+        b = MASK_OP_ABSB_B(ctx->opcode);
+        bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
+
+        temp = tcg_const_i32(EA_ABS_FORMAT(address));
+        temp2 = tcg_temp_new();
+
+        tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
+        tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
+        tcg_gen_ori_tl(temp2, temp2, (b << bpos));
+        tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
+
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+        break;
+/* B-format */
+    case OPC1_32_B_CALL:
+    case OPC1_32_B_CALLA:
+    case OPC1_32_B_J:
+    case OPC1_32_B_JA:
+    case OPC1_32_B_JL:
+    case OPC1_32_B_JLA:
+        address = MASK_OP_B_DISP24(ctx->opcode);
+        gen_compute_branch(ctx, op1, 0, 0, 0, address);
+        break;
+/* Bit-format */
+    case OPCM_32_BIT_ANDACC:
+        decode_bit_andacc(env, ctx);
+        break;
+    case OPCM_32_BIT_LOGICAL_T1:
+        decode_bit_logical_t(env, ctx);
+        break;
+    case OPCM_32_BIT_INSERT:
+        decode_bit_insert(env, ctx);
+        break;
+    case OPCM_32_BIT_LOGICAL_T2:
+        decode_bit_logical_t2(env, ctx);
+        break;
+    case OPCM_32_BIT_ORAND:
+        decode_bit_orand(env, ctx);
+        break;
+    case OPCM_32_BIT_SH_LOGIC1:
+        decode_bit_sh_logic1(env, ctx);
+        break;
+    case OPCM_32_BIT_SH_LOGIC2:
+        decode_bit_sh_logic2(env, ctx);
+        break;
+    /* BO Format */
+    case OPCM_32_BO_ADDRMODE_POST_PRE_BASE:
+        decode_bo_addrmode_post_pre_base(env, ctx);
+        break;
+    case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR:
+        decode_bo_addrmode_bitreverse_circular(env, ctx);
+        break;
+    case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE:
+        decode_bo_addrmode_ld_post_pre_base(env, ctx);
+        break;
+    case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR:
+        decode_bo_addrmode_ld_bitreverse_circular(env, ctx);
+        break;
+    case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE:
+        decode_bo_addrmode_stctx_post_pre_base(env, ctx);
+        break;
+    case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR:
+        decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx);
+        break;
+/* BOL-format */
+    case OPC1_32_BOL_LD_A_LONGOFF:
+    case OPC1_32_BOL_LD_W_LONFOFF:
+    case OPC1_32_BOL_LEA_LONGOFF:
+    case OPC1_32_BOL_ST_W_LONGOFF:
+    case OPC1_32_BOL_ST_A_LONGOFF:
+        decode_bol_opc(env, ctx, op1);
+        break;
+/* BRC Format */
+    case OPCM_32_BRC_EQ_NEQ:
+    case OPCM_32_BRC_GE:
+    case OPCM_32_BRC_JLT:
+    case OPCM_32_BRC_JNE:
+        const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode);
+        address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode);
+        r1 = MASK_OP_BRC_S1(ctx->opcode);
+        gen_compute_branch(ctx, op1, r1, 0, const4, address);
+        break;
+/* BRN Format */
+    case OPCM_32_BRN_JTT:
+        address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode);
+        r1 = MASK_OP_BRN_S1(ctx->opcode);
+        gen_compute_branch(ctx, op1, r1, 0, 0, address);
+        break;
+/* BRR Format */
+    case OPCM_32_BRR_EQ_NEQ:
+    case OPCM_32_BRR_ADDR_EQ_NEQ:
+    case OPCM_32_BRR_GE:
+    case OPCM_32_BRR_JLT:
+    case OPCM_32_BRR_JNE:
+    case OPCM_32_BRR_JNZ:
+    case OPCM_32_BRR_LOOP:
+        address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode);
+        r2 = MASK_OP_BRR_S2(ctx->opcode);
+        r1 = MASK_OP_BRR_S1(ctx->opcode);
+        gen_compute_branch(ctx, op1, r1, r2, 0, address);
+        break;
+/* RC Format */
+    case OPCM_32_RC_LOGICAL_SHIFT:
+        decode_rc_logical_shift(env, ctx);
+        break;
+    case OPCM_32_RC_ACCUMULATOR:
+        decode_rc_accumulator(env, ctx);
+        break;
+    case OPCM_32_RC_SERVICEROUTINE:
+        decode_rc_serviceroutine(env, ctx);
+        break;
+    case OPCM_32_RC_MUL:
+        decode_rc_mul(env, ctx);
+        break;
+/* RCPW Format */
+    case OPCM_32_RCPW_MASK_INSERT:
+        decode_rcpw_insert(env, ctx);
+        break;
+/* RCRR Format */
+    case OPC1_32_RCRR_INSERT:
+        r1 = MASK_OP_RCRR_S1(ctx->opcode);
+        r2 = MASK_OP_RCRR_S3(ctx->opcode);
+        r3 = MASK_OP_RCRR_D(ctx->opcode);
+        const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
+        temp = tcg_const_i32(const16);
+        temp2 = tcg_temp_new(); /* width*/
+        temp3 = tcg_temp_new(); /* pos */
+
+        tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
+        tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+
+        gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
+
+        tcg_temp_free(temp);
+        tcg_temp_free(temp2);
+        tcg_temp_free(temp3);
+        break;
+/* RCRW Format */
+    case OPCM_32_RCRW_MASK_INSERT:
+        decode_rcrw_insert(env, ctx);
+        break;
+/* RCR Format */
+    case OPCM_32_RCR_COND_SELECT:
+        decode_rcr_cond_select(env, ctx);
+        break;
+    case OPCM_32_RCR_MADD:
+        decode_rcr_madd(env, ctx);
+        break;
+    case OPCM_32_RCR_MSUB:
+        decode_rcr_msub(env, ctx);
+        break;
+/* RLC Format */
+    case OPC1_32_RLC_ADDI:
+    case OPC1_32_RLC_ADDIH:
+    case OPC1_32_RLC_ADDIH_A:
+    case OPC1_32_RLC_MFCR:
+    case OPC1_32_RLC_MOV:
+    case OPC1_32_RLC_MOV_U:
+    case OPC1_32_RLC_MOV_H:
+    case OPC1_32_RLC_MOVH_A:
+    case OPC1_32_RLC_MTCR:
+        decode_rlc_opc(env, ctx, op1);
+        break;
+    }
 }
 
 static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
This page took 0.094908 seconds and 4 git commands to generate.