@@ -46,6 +46,7 @@ DEF(eqv, 1, 2, 0, TCG_OPF_INT)
DEF(nand, 1, 2, 0, TCG_OPF_INT)
DEF(neg, 1, 1, 0, TCG_OPF_INT)
DEF(nor, 1, 2, 0, TCG_OPF_INT)
+DEF(not, 1, 1, 0, TCG_OPF_INT)
DEF(or, 1, 2, 0, TCG_OPF_INT)
DEF(orc, 1, 2, 0, TCG_OPF_INT)
DEF(sub, 1, 2, 0, TCG_OPF_INT)
@@ -95,7 +96,6 @@ DEF(setcond2_i32, 1, 4, 1, 0)
DEF(bswap16_i32, 1, 1, 1, 0)
DEF(bswap32_i32, 1, 1, 1, 0)
-DEF(not_i32, 1, 1, 0, 0)
DEF(clz_i32, 1, 2, 0, 0)
DEF(ctz_i32, 1, 2, 0, 0)
DEF(ctpop_i32, 1, 1, 0, 0)
@@ -144,7 +144,6 @@ DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
DEF(bswap16_i64, 1, 1, 1, 0)
DEF(bswap32_i64, 1, 1, 1, 0)
DEF(bswap64_i64, 1, 1, 1, 0)
-DEF(not_i64, 1, 1, 0, 0)
DEF(clz_i64, 1, 2, 0, 0)
DEF(ctz_i64, 1, 2, 0, 0)
DEF(ctpop_i64, 1, 1, 0, 0)
@@ -463,7 +463,8 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
case INDEX_op_rotl_i64:
return rol64(x, y & 63);
- CASE_OP_32_64_VEC(not):
+ case INDEX_op_not:
+ case INDEX_op_not_vec:
return ~x;
case INDEX_op_neg:
@@ -1088,12 +1089,9 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
switch (ctx->type) {
case TCG_TYPE_I32:
- not_op = INDEX_op_not_i32;
- have_not = tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0);
- break;
case TCG_TYPE_I64:
- not_op = INDEX_op_not_i64;
- have_not = tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0);
+ not_op = INDEX_op_not;
+ have_not = tcg_op_supported(INDEX_op_not, ctx->type, 0);
break;
case TCG_TYPE_V64:
case TCG_TYPE_V128:
@@ -2972,7 +2970,8 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_nor_vec:
done = fold_nor(&ctx, op);
break;
- CASE_OP_32_64_VEC(not):
+ case INDEX_op_not:
+ case INDEX_op_not_vec:
done = fold_not(&ctx, op);
break;
case INDEX_op_or:
@@ -462,9 +462,9 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
} else if (arg2 == -1 &&
- tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
/* Don't recurse with tcg_gen_not_i32. */
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1);
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg1);
} else {
tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2));
}
@@ -472,8 +472,8 @@ void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
{
- if (tcg_op_supported(INDEX_op_not_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
+ if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I32, 0)) {
+ tcg_gen_op2_i32(INDEX_op_not, ret, arg);
} else {
tcg_gen_xori_i32(ret, arg, -1);
}
@@ -1764,9 +1764,9 @@ void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);
} else if (arg2 == -1 &&
- tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
+ tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
/* Don't recurse with tcg_gen_not_i64. */
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1);
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg1);
} else {
tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2));
}
@@ -2254,8 +2254,8 @@ void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg)
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg));
tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg));
- } else if (tcg_op_supported(INDEX_op_not_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg);
+ } else if (tcg_op_supported(INDEX_op_not, TCG_TYPE_I64, 0)) {
+ tcg_gen_op2_i64(INDEX_op_not, ret, arg);
} else {
tcg_gen_xori_i64(ret, arg, -1);
}
@@ -1011,8 +1011,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
OUTOP(INDEX_op_nor, TCGOutOpBinary, outop_nor),
- OUTOP(INDEX_op_not_i32, TCGOutOpUnary, outop_not),
- OUTOP(INDEX_op_not_i64, TCGOutOpUnary, outop_not),
+ OUTOP(INDEX_op_not, TCGOutOpUnary, outop_not),
OUTOP(INDEX_op_or, TCGOutOpBinary, outop_or),
OUTOP(INDEX_op_orc, TCGOutOpBinary, outop_orc),
OUTOP(INDEX_op_sub, TCGOutOpSubtract, outop_sub),
@@ -5454,8 +5453,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
break;
case INDEX_op_neg:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
+ case INDEX_op_not:
{
const TCGOutOpUnary *out =
container_of(all_outop[op->opc], TCGOutOpUnary, base);
@@ -581,6 +581,10 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_args_rr(insn, &r0, &r1);
regs[r0] = -regs[r1];
break;
+ case INDEX_op_not:
+ tci_args_rr(insn, &r0, &r1);
+ regs[r0] = ~regs[r1];
+ break;
/* Arithmetic operations (32 bit). */
@@ -705,10 +709,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
regs[r0] = bswap32(regs[r1]);
break;
#endif
- CASE_32_64(not)
- tci_args_rr(insn, &r0, &r1);
- regs[r0] = ~regs[r1];
- break;
#if TCG_TARGET_REG_BITS == 64
/* Load/store operations (64 bit). */
@@ -1109,6 +1109,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_mov:
case INDEX_op_neg:
+ case INDEX_op_not:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_bswap16_i32:
@@ -1116,8 +1117,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
case INDEX_op_bswap32_i32:
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
case INDEX_op_ctpop_i32:
case INDEX_op_ctpop_i64:
tci_args_rr(insn, &r0, &r1);
@@ -761,7 +761,7 @@ static const TCGOutOpUnary outop_neg = {
static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
{
- tcg_out_op_rr(s, glue(INDEX_op_not_i,TCG_TARGET_REG_BITS), a0, a1);
+ tcg_out_op_rr(s, INDEX_op_not, a0, a1);
}
static const TCGOutOpUnary outop_not = {
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 13 ++++++------- tcg/tcg-op.c | 16 ++++++++-------- tcg/tcg.c | 6 ++---- tcg/tci.c | 11 +++++------ tcg/tci/tcg-target.c.inc | 2 +- 6 files changed, 23 insertions(+), 28 deletions(-)