-/* CC optimization */
-
-/* Instead of computing the condition codes after each x86 instruction,
- * QEMU just stores the result (called CC_DST), the type of operation
- * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
- * CC_VR). When the condition codes are needed, the condition codes can
- * be calculated using this information. Condition codes are not generated
- * if they are only needed for conditional branches.
- */
-enum cc_op {
- CC_OP_CONST0 = 0, /* CC is 0 */
- CC_OP_CONST1, /* CC is 1 */
- CC_OP_CONST2, /* CC is 2 */
- CC_OP_CONST3, /* CC is 3 */
-
- CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
- CC_OP_STATIC, /* CC value is env->cc_op */
-
- CC_OP_NZ, /* env->cc_dst != 0 */
- CC_OP_LTGT_32, /* signed less/greater than (32bit) */
- CC_OP_LTGT_64, /* signed less/greater than (64bit) */
- CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
- CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
- CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
- CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
-
- CC_OP_ADD_64, /* overflow on add (64bit) */
- CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
- CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
- CC_OP_SUB_64, /* overflow on subtraction (64bit) */
- CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
- CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
- CC_OP_ABS_64, /* sign eval on abs (64bit) */
- CC_OP_NABS_64, /* sign eval on nabs (64bit) */
-
- CC_OP_ADD_32, /* overflow on add (32bit) */
- CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
- CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
- CC_OP_SUB_32, /* overflow on subtraction (32bit) */
- CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
- CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
- CC_OP_ABS_32, /* sign eval on abs (64bit) */
- CC_OP_NABS_32, /* sign eval on nabs (64bit) */
-
- CC_OP_COMP_32, /* complement */
- CC_OP_COMP_64, /* complement */
-
- CC_OP_TM_32, /* test under mask (32bit) */
- CC_OP_TM_64, /* test under mask (64bit) */
-
- CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
- CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
- CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
-
- CC_OP_ICM, /* insert characters under mask */
- CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
- CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
- CC_OP_FLOGR, /* find leftmost one */
- CC_OP_MAX
-};
-
-static const char *cc_names[] = {
- [CC_OP_CONST0] = "CC_OP_CONST0",
- [CC_OP_CONST1] = "CC_OP_CONST1",
- [CC_OP_CONST2] = "CC_OP_CONST2",
- [CC_OP_CONST3] = "CC_OP_CONST3",
- [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
- [CC_OP_STATIC] = "CC_OP_STATIC",
- [CC_OP_NZ] = "CC_OP_NZ",
- [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
- [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
- [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
- [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
- [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
- [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
- [CC_OP_ADD_64] = "CC_OP_ADD_64",
- [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
- [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
- [CC_OP_SUB_64] = "CC_OP_SUB_64",
- [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
- [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
- [CC_OP_ABS_64] = "CC_OP_ABS_64",
- [CC_OP_NABS_64] = "CC_OP_NABS_64",
- [CC_OP_ADD_32] = "CC_OP_ADD_32",
- [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
- [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
- [CC_OP_SUB_32] = "CC_OP_SUB_32",
- [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
- [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
- [CC_OP_ABS_32] = "CC_OP_ABS_32",
- [CC_OP_NABS_32] = "CC_OP_NABS_32",
- [CC_OP_COMP_32] = "CC_OP_COMP_32",
- [CC_OP_COMP_64] = "CC_OP_COMP_64",
- [CC_OP_TM_32] = "CC_OP_TM_32",
- [CC_OP_TM_64] = "CC_OP_TM_64",
- [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
- [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
- [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
- [CC_OP_ICM] = "CC_OP_ICM",
- [CC_OP_SLA_32] = "CC_OP_SLA_32",
- [CC_OP_SLA_64] = "CC_OP_SLA_64",
- [CC_OP_FLOGR] = "CC_OP_FLOGR",
-};
-
-static inline const char *cc_name(int cc_op)
-{
- return cc_names[cc_op];
-}
-