Message ID | 20250415192515.232910-43-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | tcg: Convert to TCGOutOp structures | expand |
On 4/15/25 12:23, Richard Henderson wrote: > Rename to INDEX_op_divs to emphasize signed inputs, > and mirroring INDEX_op_divu_*. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/tcg/tcg-opc.h | 3 +-- > tcg/optimize.c | 12 +++++++----- > tcg/tcg-op.c | 16 ++++++++-------- > tcg/tcg.c | 6 ++---- > tcg/tci.c | 5 ++--- > docs/devel/tcg-ops.rst | 2 +- > tcg/tci/tcg-target.c.inc | 2 +- > 7 files changed, 22 insertions(+), 24 deletions(-) > > diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h > index a9d7938a52..6d4edd0b16 100644 > --- a/include/tcg/tcg-opc.h > +++ b/include/tcg/tcg-opc.h > @@ -42,6 +42,7 @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT) > DEF(add, 1, 2, 0, TCG_OPF_INT) > DEF(and, 1, 2, 0, TCG_OPF_INT) > DEF(andc, 1, 2, 0, TCG_OPF_INT) > +DEF(divs, 1, 2, 0, TCG_OPF_INT) > DEF(eqv, 1, 2, 0, TCG_OPF_INT) > DEF(mul, 1, 2, 0, TCG_OPF_INT) > DEF(mulsh, 1, 2, 0, TCG_OPF_INT) > @@ -68,7 +69,6 @@ DEF(st8_i32, 0, 2, 1, 0) > DEF(st16_i32, 0, 2, 1, 0) > DEF(st_i32, 0, 2, 1, 0) > /* arith */ > -DEF(div_i32, 1, 2, 0, 0) > DEF(divu_i32, 1, 2, 0, 0) > DEF(rem_i32, 1, 2, 0, 0) > DEF(remu_i32, 1, 2, 0, 0) > @@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0) > DEF(st32_i64, 0, 2, 1, 0) > DEF(st_i64, 0, 2, 1, 0) > /* arith */ > -DEF(div_i64, 1, 2, 0, 0) > DEF(divu_i64, 1, 2, 0, 0) > DEF(rem_i64, 1, 2, 0, 0) > DEF(remu_i64, 1, 2, 0, 0) > diff --git a/tcg/optimize.c b/tcg/optimize.c > index f8d9a4d90e..127d0f9390 100644 > --- a/tcg/optimize.c > +++ b/tcg/optimize.c > @@ -544,13 +544,15 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type, > muls64(&l64, &h64, x, y); > return h64; > > - case INDEX_op_div_i32: > + case INDEX_op_divs: > /* Avoid crashing on divide by zero, otherwise undefined. */ > - return (int32_t)x / ((int32_t)y ? : 1); > + if (type == TCG_TYPE_I32) { > + return (int32_t)x / ((int32_t)y ? : 1); > + } > + return (int64_t)x / ((int64_t)y ? : 1); > + > case INDEX_op_divu_i32: > return (uint32_t)x / ((uint32_t)y ? : 1); > - case INDEX_op_div_i64: > - return (int64_t)x / ((int64_t)y ? : 1); > case INDEX_op_divu_i64: > return (uint64_t)x / ((uint64_t)y ? : 1); > > @@ -2893,7 +2895,7 @@ void tcg_optimize(TCGContext *s) > CASE_OP_32_64(deposit): > done = fold_deposit(&ctx, op); > break; > - CASE_OP_32_64(div): > + case INDEX_op_divs: > CASE_OP_32_64(divu): > done = fold_divide(&ctx, op); > break; > diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c > index 69e50f968f..9dba520d40 100644 > --- a/tcg/tcg-op.c > +++ b/tcg/tcg-op.c > @@ -601,8 +601,8 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) > > void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) > { > - if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) { > - tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); > + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { > + tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2); > } else if (TCG_TARGET_HAS_div2_i32) { > TCGv_i32 t0 = tcg_temp_ebb_new_i32(); > tcg_gen_sari_i32(t0, arg1, 31); > @@ -617,9 +617,9 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) > { > if (TCG_TARGET_HAS_rem_i32) { > tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); > - } else if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) { > + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { > TCGv_i32 t0 = tcg_temp_ebb_new_i32(); > - tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); > + tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2); > tcg_gen_mul_i32(t0, t0, arg2); > tcg_gen_sub_i32(ret, arg1, t0); > tcg_temp_free_i32(t0); > @@ -1969,8 +1969,8 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) > > void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) > { > - if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) { > - tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); > + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { > + tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2); > } else if (TCG_TARGET_HAS_div2_i64) { > TCGv_i64 t0 = tcg_temp_ebb_new_i64(); > tcg_gen_sari_i64(t0, arg1, 63); > @@ -1985,9 +1985,9 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) > { > if (TCG_TARGET_HAS_rem_i64) { > tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); > - } else if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) { > + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { > TCGv_i64 t0 = tcg_temp_ebb_new_i64(); > - tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); > + tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2); > tcg_gen_mul_i64(t0, t0, arg2); > tcg_gen_sub_i64(ret, arg1, t0); > tcg_temp_free_i64(t0); > diff --git a/tcg/tcg.c b/tcg/tcg.c > index 52a8842cd3..0edac806e7 100644 > --- a/tcg/tcg.c > +++ b/tcg/tcg.c > @@ -1020,8 +1020,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = { > OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add), > OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and), > OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc), > - OUTOP(INDEX_op_div_i32, TCGOutOpBinary, outop_divs), > - OUTOP(INDEX_op_div_i64, TCGOutOpBinary, outop_divs), > + OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs), > OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv), > OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul), > OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh), > @@ -5414,8 +5413,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) > case INDEX_op_add: > case INDEX_op_and: > case INDEX_op_andc: > - case INDEX_op_div_i32: > - case INDEX_op_div_i64: > + case INDEX_op_divs: > case INDEX_op_eqv: > case INDEX_op_mul: > case INDEX_op_mulsh: > diff --git a/tcg/tci.c b/tcg/tci.c > index 4ecbb2d335..4b3ca53bc5 100644 > --- a/tcg/tci.c > +++ b/tcg/tci.c > @@ -720,7 +720,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, > > /* Arithmetic operations (64 bit). */ > > - case INDEX_op_div_i64: > + case INDEX_op_divs: > tci_args_rrr(insn, &r0, &r1, &r2); > regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; > break; > @@ -1071,6 +1071,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) > case INDEX_op_add: > case INDEX_op_and: > case INDEX_op_andc: > + case INDEX_op_divs: > case INDEX_op_eqv: > case INDEX_op_mul: > case INDEX_op_nand: > @@ -1079,8 +1080,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) > case INDEX_op_orc: > case INDEX_op_sub: > case INDEX_op_xor: > - case INDEX_op_div_i32: > - case INDEX_op_div_i64: > case INDEX_op_rem_i32: > case INDEX_op_rem_i64: > case INDEX_op_divu_i32: > diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst > index fe922d1dac..a833b3b7b2 100644 > --- a/docs/devel/tcg-ops.rst > +++ b/docs/devel/tcg-ops.rst > @@ -277,7 +277,7 @@ Arithmetic > > - | *t0* = *t1* * *t2* > > - * - div_i32/i64 *t0*, *t1*, *t2* > + * - divs *t0*, *t1*, *t2* > > - | *t0* = *t1* / *t2* (signed) > | Undefined behavior if division by zero or overflow. > diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc > index c8e86a3253..4a556e2ce7 100644 > --- a/tcg/tci/tcg-target.c.inc > +++ b/tcg/tci/tcg-target.c.inc > @@ -651,7 +651,7 @@ static void tgen_divs(TCGContext *s, TCGType type, > { > TCGOpcode opc = (type == TCG_TYPE_I32 > ? INDEX_op_tci_divs32 > - : INDEX_op_div_i64); > + : INDEX_op_divs); > tcg_out_op_rrr(s, opc, a0, a1, a2); > } > Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h index a9d7938a52..6d4edd0b16 100644 --- a/include/tcg/tcg-opc.h +++ b/include/tcg/tcg-opc.h @@ -42,6 +42,7 @@ DEF(mov, 1, 1, 0, TCG_OPF_INT | TCG_OPF_NOT_PRESENT) DEF(add, 1, 2, 0, TCG_OPF_INT) DEF(and, 1, 2, 0, TCG_OPF_INT) DEF(andc, 1, 2, 0, TCG_OPF_INT) +DEF(divs, 1, 2, 0, TCG_OPF_INT) DEF(eqv, 1, 2, 0, TCG_OPF_INT) DEF(mul, 1, 2, 0, TCG_OPF_INT) DEF(mulsh, 1, 2, 0, TCG_OPF_INT) @@ -68,7 +69,6 @@ DEF(st8_i32, 0, 2, 1, 0) DEF(st16_i32, 0, 2, 1, 0) DEF(st_i32, 0, 2, 1, 0) /* arith */ -DEF(div_i32, 1, 2, 0, 0) DEF(divu_i32, 1, 2, 0, 0) DEF(rem_i32, 1, 2, 0, 0) DEF(remu_i32, 1, 2, 0, 0) @@ -116,7 +116,6 @@ DEF(st16_i64, 0, 2, 1, 0) DEF(st32_i64, 0, 2, 1, 0) DEF(st_i64, 0, 2, 1, 0) /* arith */ -DEF(div_i64, 1, 2, 0, 0) DEF(divu_i64, 1, 2, 0, 0) DEF(rem_i64, 1, 2, 0, 0) DEF(remu_i64, 1, 2, 0, 0) diff --git a/tcg/optimize.c b/tcg/optimize.c index f8d9a4d90e..127d0f9390 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -544,13 +544,15 @@ static uint64_t do_constant_folding_2(TCGOpcode op, TCGType type, muls64(&l64, &h64, x, y); return h64; - case INDEX_op_div_i32: + case INDEX_op_divs: /* Avoid crashing on divide by zero, otherwise undefined. */ - return (int32_t)x / ((int32_t)y ? : 1); + if (type == TCG_TYPE_I32) { + return (int32_t)x / ((int32_t)y ? : 1); + } + return (int64_t)x / ((int64_t)y ? : 1); + case INDEX_op_divu_i32: return (uint32_t)x / ((uint32_t)y ? : 1); - case INDEX_op_div_i64: - return (int64_t)x / ((int64_t)y ? : 1); case INDEX_op_divu_i64: return (uint64_t)x / ((uint64_t)y ? : 1); @@ -2893,7 +2895,7 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(deposit): done = fold_deposit(&ctx, op); break; - CASE_OP_32_64(div): + case INDEX_op_divs: CASE_OP_32_64(divu): done = fold_divide(&ctx, op); break; diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 69e50f968f..9dba520d40 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -601,8 +601,8 @@ void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) { - tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { + tcg_gen_op3_i32(INDEX_op_divs, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); @@ -617,9 +617,9 @@ void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); - } else if (tcg_op_supported(INDEX_op_div_i32, TCG_TYPE_I32, 0)) { + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I32, 0)) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); - tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_divs, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); @@ -1969,8 +1969,8 @@ void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { - if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) { - tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); + if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { + tcg_gen_op3_i64(INDEX_op_divs, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); @@ -1985,9 +1985,9 @@ void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); - } else if (tcg_op_supported(INDEX_op_div_i64, TCG_TYPE_I64, 0)) { + } else if (tcg_op_supported(INDEX_op_divs, TCG_TYPE_I64, 0)) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); - tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_divs, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); diff --git a/tcg/tcg.c b/tcg/tcg.c index 52a8842cd3..0edac806e7 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -1020,8 +1020,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = { OUTOP(INDEX_op_add, TCGOutOpBinary, outop_add), OUTOP(INDEX_op_and, TCGOutOpBinary, outop_and), OUTOP(INDEX_op_andc, TCGOutOpBinary, outop_andc), - OUTOP(INDEX_op_div_i32, TCGOutOpBinary, outop_divs), - OUTOP(INDEX_op_div_i64, TCGOutOpBinary, outop_divs), + OUTOP(INDEX_op_divs, TCGOutOpBinary, outop_divs), OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv), OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul), OUTOP(INDEX_op_mulsh, TCGOutOpBinary, outop_mulsh), @@ -5414,8 +5413,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) case INDEX_op_add: case INDEX_op_and: case INDEX_op_andc: - case INDEX_op_div_i32: - case INDEX_op_div_i64: + case INDEX_op_divs: case INDEX_op_eqv: case INDEX_op_mul: case INDEX_op_mulsh: diff --git a/tcg/tci.c b/tcg/tci.c index 4ecbb2d335..4b3ca53bc5 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -720,7 +720,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, /* Arithmetic operations (64 bit). */ - case INDEX_op_div_i64: + case INDEX_op_divs: tci_args_rrr(insn, &r0, &r1, &r2); regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2]; break; @@ -1071,6 +1071,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_add: case INDEX_op_and: case INDEX_op_andc: + case INDEX_op_divs: case INDEX_op_eqv: case INDEX_op_mul: case INDEX_op_nand: @@ -1079,8 +1080,6 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) case INDEX_op_orc: case INDEX_op_sub: case INDEX_op_xor: - case INDEX_op_div_i32: - case INDEX_op_div_i64: case INDEX_op_rem_i32: case INDEX_op_rem_i64: case INDEX_op_divu_i32: diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst index fe922d1dac..a833b3b7b2 100644 --- a/docs/devel/tcg-ops.rst +++ b/docs/devel/tcg-ops.rst @@ -277,7 +277,7 @@ Arithmetic - | *t0* = *t1* * *t2* - * - div_i32/i64 *t0*, *t1*, *t2* + * - divs *t0*, *t1*, *t2* - | *t0* = *t1* / *t2* (signed) | Undefined behavior if division by zero or overflow. diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc index c8e86a3253..4a556e2ce7 100644 --- a/tcg/tci/tcg-target.c.inc +++ b/tcg/tci/tcg-target.c.inc @@ -651,7 +651,7 @@ static void tgen_divs(TCGContext *s, TCGType type, { TCGOpcode opc = (type == TCG_TYPE_I32 ? INDEX_op_tci_divs32 - : INDEX_op_div_i64); + : INDEX_op_divs); tcg_out_op_rrr(s, opc, a0, a1, a2); }
Rename to INDEX_op_divs to emphasize signed inputs, and mirroring INDEX_op_divu_*. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 3 +-- tcg/optimize.c | 12 +++++++----- tcg/tcg-op.c | 16 ++++++++-------- tcg/tcg.c | 6 ++---- tcg/tci.c | 5 ++--- docs/devel/tcg-ops.rst | 2 +- tcg/tci/tcg-target.c.inc | 2 +- 7 files changed, 22 insertions(+), 24 deletions(-)