@@ -53,6 +53,7 @@ DEF(eqv, 1, 2, 0, TCG_OPF_INT)
DEF(mul, 1, 2, 0, TCG_OPF_INT)
DEF(muls2, 2, 2, 0, TCG_OPF_INT)
DEF(mulsh, 1, 2, 0, TCG_OPF_INT)
+DEF(mulu2, 2, 2, 0, TCG_OPF_INT)
DEF(muluh, 1, 2, 0, TCG_OPF_INT)
DEF(nand, 1, 2, 0, TCG_OPF_INT)
DEF(neg, 1, 1, 0, TCG_OPF_INT)
@@ -92,7 +93,6 @@ DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
DEF(add2_i32, 2, 4, 0, 0)
DEF(sub2_i32, 2, 4, 0, 0)
-DEF(mulu2_i32, 2, 2, 0, 0)
DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
DEF(setcond2_i32, 1, 4, 1, 0)
@@ -133,7 +133,6 @@ DEF(bswap64_i64, 1, 1, 1, 0)
DEF(add2_i64, 2, 4, 0, 0)
DEF(sub2_i64, 2, 4, 0, 0)
-DEF(mulu2_i64, 2, 2, 0, 0)
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
@@ -2057,13 +2057,14 @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
TCGOp *op2;
switch (op->opc) {
- case INDEX_op_mulu2_i32:
- l = (uint64_t)(uint32_t)a * (uint32_t)b;
- h = (int32_t)(l >> 32);
- l = (int32_t)l;
- break;
- case INDEX_op_mulu2_i64:
- mulu64(&l, &h, a, b);
+ case INDEX_op_mulu2:
+ if (ctx->type == TCG_TYPE_I32) {
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
+ h = (int32_t)(l >> 32);
+ l = (int32_t)l;
+ } else {
+ mulu64(&l, &h, a, b);
+ }
break;
case INDEX_op_muls2:
if (ctx->type == TCG_TYPE_I32) {
@@ -2963,7 +2964,7 @@ void tcg_optimize(TCGContext *s)
done = fold_mul_highpart(&ctx, op);
break;
case INDEX_op_muls2:
- CASE_OP_32_64(mulu2):
+ case INDEX_op_mulu2:
done = fold_multiply2(&ctx, op);
break;
case INDEX_op_nand:
@@ -1138,8 +1138,8 @@ void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2)
{
- if (tcg_op_supported(INDEX_op_mulu2_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I32, 0)) {
+ tcg_gen_op4_i32(INDEX_op_mulu2, rl, rh, arg1, arg2);
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I32, 0)) {
TCGv_i32 t = tcg_temp_ebb_new_i32();
tcg_gen_op3_i32(INDEX_op_mul, t, arg1, arg2);
@@ -2861,8 +2861,8 @@ void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
{
- if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0)) {
- tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2);
+ if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0)) {
+ tcg_gen_op4_i64(INDEX_op_mulu2, rl, rh, arg1, arg2);
} else if (tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
TCGv_i64 t = tcg_temp_ebb_new_i64();
tcg_gen_op3_i64(INDEX_op_mul, t, arg1, arg2);
@@ -2888,7 +2888,7 @@ void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2)
tcg_gen_op3_i64(INDEX_op_mulsh, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
- } else if (tcg_op_supported(INDEX_op_mulu2_i64, TCG_TYPE_I64, 0) ||
+ } else if (tcg_op_supported(INDEX_op_mulu2, TCG_TYPE_I64, 0) ||
tcg_op_supported(INDEX_op_muluh, TCG_TYPE_I64, 0)) {
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
@@ -1043,8 +1043,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh),
- OUTOP(INDEX_op_mulu2_i32, TCGOutOpMul2, outop_mulu2),
- OUTOP(INDEX_op_mulu2_i64, TCGOutOpMul2, outop_mulu2),
+ OUTOP(INDEX_op_mulu2, TCGOutOpMul2, outop_mulu2),
OUTOP(INDEX_op_muluh, TCGOutOpBinary, outop_muluh),
OUTOP(INDEX_op_nand, TCGOutOpBinary, outop_nand),
OUTOP(INDEX_op_neg, TCGOutOpUnary, outop_neg),
@@ -4009,8 +4008,7 @@ liveness_pass_1(TCGContext *s)
opc_new = INDEX_op_mul;
opc_new2 = INDEX_op_mulsh;
goto do_mul2;
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
+ case INDEX_op_mulu2:
opc_new = INDEX_op_mul;
opc_new2 = INDEX_op_muluh;
do_mul2:
@@ -5462,8 +5460,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
break;
case INDEX_op_muls2:
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
+ case INDEX_op_mulu2:
{
const TCGOutOpMul2 *out =
container_of(all_outop[op->opc], TCGOutOpMul2, base);
@@ -590,8 +590,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
muls64(®s[r0], ®s[r1], regs[r2], regs[r3]);
#endif
break;
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
+ case INDEX_op_mulu2:
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
#if TCG_TARGET_REG_BITS == 32
tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
@@ -1092,8 +1091,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
break;
case INDEX_op_muls2:
- case INDEX_op_mulu2_i32:
- case INDEX_op_mulu2_i64:
+ case INDEX_op_mulu2:
tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
op_name, str_r(r0), str_r(r1),
@@ -599,7 +599,7 @@ Multiword arithmetic support
formed from two single-word arguments, and the double-word output *t0*
is returned in two single-word outputs.
- * - mulu2_i32/i64 *t0_low*, *t0_high*, *t1*, *t2*
+ * - mulu2 *t0_low*, *t0_high*, *t1*, *t2*
- | Similar to mul, except two unsigned inputs *t1* and *t2* yielding the full
double-word product *t0*. The latter is returned in two single-word outputs.
@@ -728,8 +728,7 @@ static const TCGOutOpBinary outop_mulsh = {
static void tgen_mulu2(TCGContext *s, TCGType type,
TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
{
- tcg_out_op_rrrr(s, glue(INDEX_op_mulu2_i,TCG_TARGET_REG_BITS),
- a0, a1, a2, a3);
+ tcg_out_op_rrrr(s, INDEX_op_mulu2, a0, a1, a2, a3);
}
static const TCGOutOpMul2 outop_mulu2 = {
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 17 +++++++++-------- tcg/tcg-op.c | 10 +++++----- tcg/tcg.c | 9 +++------ tcg/tci.c | 6 ++---- docs/devel/tcg-ops.rst | 2 +- tcg/tci/tcg-target.c.inc | 3 +-- 7 files changed, 22 insertions(+), 28 deletions(-)