@@ -39,6 +39,8 @@ DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT)
+DEF(add, 1, 2, 0, TCG_OPF_INT)
+
DEF(setcond_i32, 1, 2, 1, 0)
DEF(negsetcond_i32, 1, 2, 1, 0)
DEF(movcond_i32, 1, 4, 1, 0)
@@ -52,7 +54,6 @@ DEF(st8_i32, 0, 2, 1, 0)
DEF(st16_i32, 0, 2, 1, 0)
DEF(st_i32, 0, 2, 1, 0)
/* arith */
-DEF(add_i32, 1, 2, 0, 0)
DEF(sub_i32, 1, 2, 0, 0)
DEF(mul_i32, 1, 2, 0, 0)
DEF(div_i32, 1, 2, 0, 0)
@@ -115,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0)
DEF(st32_i64, 0, 2, 1, 0)
DEF(st_i64, 0, 2, 1, 0)
/* arith */
-DEF(add_i64, 1, 2, 0, 0)
DEF(sub_i64, 1, 2, 0, 0)
DEF(mul_i64, 1, 2, 0, 0)
DEF(div_i64, 1, 2, 0, 0)
@@ -1940,7 +1940,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
NEXT_INSN;
switch (ctx->opcode & 0xf00f) {
case 0x300c: /* add Rm,Rn */
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
goto do_reg_op;
case 0x2009: /* and Rm,Rn */
op_opc = INDEX_op_and_i32;
@@ -1984,7 +1984,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
if (op_dst != B11_8 || mv_src >= 0) {
goto fail;
}
- op_opc = INDEX_op_add_i32;
+ op_opc = INDEX_op_add;
op_arg = tcg_constant_i32(B7_0s);
break;
@@ -2087,7 +2087,7 @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
ctx->memidx, ld_mop);
break;
- case INDEX_op_add_i32:
+ case INDEX_op_add:
if (op_dst != st_src) {
goto fail;
}
@@ -412,7 +412,7 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
uint64_t l64, h64;
switch (op) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
return x + y;
CASE_OP_32_64(sub):
@@ -2246,7 +2246,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
break;
}
if (convert) {
- TCGOpcode add_opc, xor_opc, neg_opc;
+ TCGOpcode xor_opc, neg_opc;
if (!inv && !neg) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
@@ -2254,12 +2254,10 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
switch (ctx->type) {
case TCG_TYPE_I32:
- add_opc = INDEX_op_add_i32;
neg_opc = INDEX_op_neg_i32;
xor_opc = INDEX_op_xor_i32;
break;
case TCG_TYPE_I64:
- add_opc = INDEX_op_add_i64;
neg_opc = INDEX_op_neg_i64;
xor_opc = INDEX_op_xor_i64;
break;
@@ -2270,7 +2268,7 @@ static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
if (!inv) {
op->opc = neg_opc;
} else if (neg) {
- op->opc = add_opc;
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -1);
} else {
op->opc = xor_opc;
@@ -2635,8 +2633,7 @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
if (arg_is_const(op->args[2])) {
uint64_t val = arg_info(op->args[2])->val;
- op->opc = (ctx->type == TCG_TYPE_I32
- ? INDEX_op_add_i32 : INDEX_op_add_i64);
+ op->opc = INDEX_op_add;
op->args[2] = arg_new_constant(ctx, -val);
}
return finish_folding(ctx, op);
@@ -2827,7 +2824,7 @@ void tcg_optimize(TCGContext *s)
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64(add):
+ case INDEX_op_add:
done = fold_add(&ctx, op);
break;
case INDEX_op_add_vec:
@@ -362,7 +362,7 @@ void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg)
void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
+ tcg_gen_op3_i32(INDEX_op_add, ret, arg1, arg2);
}
void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
@@ -1555,7 +1555,7 @@ void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset)
void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_REG_BITS == 64) {
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
+ tcg_gen_op3_i64(INDEX_op_add, ret, arg1, arg2);
} else {
tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2));
@@ -991,8 +991,7 @@ static const TCGOutOp outop_notreached = {
static const TCGOutOp * const all_outop[NB_OPS] = {
[0 ... NB_OPS - 1] = &outop_notreached,
- OUTOP(INDEX_op_add_i32, TCGOutOpBinary, outop_add),
- OUTOP(INDEX_op_add_i64, TCGOutOpBinary, outop_add),
+ OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add),
};
#undef OUTOP
@@ -2206,6 +2205,7 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_qemu_st_a64_i128:
return TCG_TARGET_HAS_qemu_ldst_i128;
+ case INDEX_op_add:
case INDEX_op_mov:
return has_type;
@@ -2220,7 +2220,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
- case INDEX_op_add_i32:
case INDEX_op_sub_i32:
case INDEX_op_neg_i32:
case INDEX_op_mul_i32:
@@ -2304,7 +2303,6 @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
- case INDEX_op_add_i64:
case INDEX_op_sub_i64:
case INDEX_op_neg_i64:
case INDEX_op_mul_i64:
@@ -4016,14 +4014,12 @@ liveness_pass_1(TCGContext *s)
break;
case INDEX_op_add2_i32:
- opc_new = INDEX_op_add_i32;
+ case INDEX_op_add2_i64:
+ opc_new = INDEX_op_add;
goto do_addsub2;
case INDEX_op_sub2_i32:
opc_new = INDEX_op_sub_i32;
goto do_addsub2;
- case INDEX_op_add2_i64:
- opc_new = INDEX_op_add_i64;
- goto do_addsub2;
case INDEX_op_sub2_i64:
opc_new = INDEX_op_sub_i64;
do_addsub2:
@@ -5430,8 +5426,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
tcg_out_extrl_i64_i32(s, new_args[0], new_args[1]);
break;
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
+ case INDEX_op_add:
{
const TCGOutOpBinary *out =
container_of(all_outop[op->opc], TCGOutOpBinary, base);
@@ -533,7 +533,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* Arithmetic operations (mixed 32/64 bit). */
- CASE_32_64(add)
+ case INDEX_op_add:
tci_args_rrr(insn, &r0, &r1, &r2);
regs[r0] = regs[r1] + regs[r2];
break;
@@ -1138,8 +1138,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
op_name, str_r(r0), str_r(r1));
break;
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
+ case INDEX_op_add:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
@@ -475,9 +475,7 @@ static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
stack_bounds_check(base, offset);
if (offset != sextract32(offset, 0, 16)) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
- tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
- ? INDEX_op_add_i32 : INDEX_op_add_i64),
- TCG_REG_TMP, TCG_REG_TMP, base);
+ tcg_out_op_rrr(s, INDEX_op_add, TCG_REG_TMP, TCG_REG_TMP, base);
base = TCG_REG_TMP;
offset = 0;
}
@@ -666,7 +664,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
static void tgen_add(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2)
{
- tcg_out_op_rrr(s, glue(INDEX_op_add_i,TCG_TARGET_REG_BITS), a0, a1, a2);
+ tcg_out_op_rrr(s, INDEX_op_add, a0, a1, a2);
}
static const TCGOutOpBinary outop_add = {
Rely on TCGOP_TYPE instead of opcodes specific to each type. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 4 ++-- target/sh4/translate.c | 6 +++--- tcg/optimize.c | 13 +++++-------- tcg/tcg-op.c | 4 ++-- tcg/tcg.c | 15 +++++---------- tcg/tci.c | 5 ++--- tcg/tci/tcg-target.c.inc | 6 ++---- 7 files changed, 21 insertions(+), 32 deletions(-)