case INDEX_op_shr_i32:
return (uint32_t)x >> (y & 31);
- case INDEX_op_trunc_shr_i64_i32:
case INDEX_op_shr_i64:
return (uint64_t)x >> (y & 63);
CASE_OP_32_64(ext16u):
return (uint16_t)x;
+ case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
return (int32_t)x;
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
case INDEX_op_ext32u_i64:
return (uint32_t)x;
+ case INDEX_op_extrh_i64_i32:
+ return (uint64_t)x >> 32;
+
case INDEX_op_muluh_i32:
return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
case INDEX_op_mulsh_i32:
mask = temps[args[1]].mask & mask;
break;
+ case INDEX_op_ext_i32_i64:
+ if ((temps[args[1]].mask & 0x80000000) != 0) {
+ break;
+ }
+ case INDEX_op_extu_i32_i64:
+ /* We do not compute affected as it is a size changing op. */
+ mask = (uint32_t)temps[args[1]].mask;
+ break;
+
CASE_OP_32_64(andc):
/* Known-zeros does not imply known-ones. Therefore unless
args[2] is constant, we can't infer anything from it. */
}
break;
- case INDEX_op_trunc_shr_i64_i32:
- mask = (uint64_t)temps[args[1]].mask >> args[2];
+ case INDEX_op_extrl_i64_i32:
+ mask = (uint32_t)temps[args[1]].mask;
+ break;
+ case INDEX_op_extrh_i64_i32:
+ mask = (uint64_t)temps[args[1]].mask >> 32;
break;
CASE_OP_32_64(shl):
CASE_OP_32_64(ext16u):
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
if (temp_is_const(args[1])) {
tmp = do_constant_folding(opc, temps[args[1]].val, 0);
tcg_opt_gen_movi(s, op, args, args[0], tmp);
}
goto do_default;
- case INDEX_op_trunc_shr_i64_i32:
- if (temp_is_const(args[1])) {
- tmp = do_constant_folding(opc, temps[args[1]].val, args[2]);
- tcg_opt_gen_movi(s, op, args, args[0], tmp);
- break;
- }
- goto do_default;
-
CASE_OP_32_64(add):
CASE_OP_32_64(sub):
CASE_OP_32_64(mul):