@@ -114,7 +114,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
-#define TCG_TARGET_HAS_extr_i64_i32 1
+#define TCG_TARGET_HAS_extr_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 0
@@ -529,11 +529,6 @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
tcg_out_ext32u(s, rd, rs);
}
-static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
-{
- tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
-}
-
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
{
return false;
@@ -1429,9 +1424,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
- case INDEX_op_extrh_i64_i32:
- tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
- break;
case INDEX_op_brcond_i64:
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
@@ -1483,7 +1475,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
default:
g_assert_not_reached();
}
@@ -1515,8 +1506,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
case INDEX_op_qemu_ld_a32_i32:
case INDEX_op_qemu_ld_a64_i32:
case INDEX_op_qemu_ld_a32_i64:
Since a59a29312660 ("tcg/sparc64: Remove sparc32plus constraints") we no longer distinguish registers with 32 vs 64 bits. Therefore we can remove support for the backend-specific type change opcodes. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- Based-on: 20230822175127.1173698-1-richard.henderson@linaro.org ("tcg: Unify TCG_TARGET_HAS_extr[lh]_i64_i32") --- tcg/sparc64/tcg-target.h | 2 +- tcg/sparc64/tcg-target.c.inc | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-)