diff mbox series

[v3,34/48] tcg/optimize: Split out fold_to_not

Message ID 20211021210539.825582-35-richard.henderson@linaro.org
State New
Headers show
Series tcg: optimize redundant sign extensions | expand

Commit Message

Richard Henderson Oct. 21, 2021, 9:05 p.m. UTC
Split out the conditional conversion from a more complex logical
operation to a simple NOT.  Create a couple more helpers to make
this easy for the outer-most logical operations.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 tcg/optimize.c | 154 +++++++++++++++++++++++++++----------------------
 1 file changed, 86 insertions(+), 68 deletions(-)

-- 
2.25.1

Comments

Luis Fernando Fujita Pires Oct. 25, 2021, 2:17 p.m. UTC | #1
From: Richard Henderson <richard.henderson@linaro.org>

> Split out the conditional conversion from a more complex logical operation to a

> simple NOT.  Create a couple more helpers to make this easy for the outer-most

> logical operations.

> 

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  tcg/optimize.c | 154 +++++++++++++++++++++++++++----------------------

>  1 file changed, 86 insertions(+), 68 deletions(-)


>  static bool fold_eqv(OptContext *ctx, TCGOp *op)  {

> -    return fold_const2(ctx, op);

> +    if (fold_const2(ctx, op) ||

> +        fold_xi_to_not(ctx, op, 0)) {


Should be fold_ix_to_not (not fold xi_to_not).

>  static bool fold_orc(OptContext *ctx, TCGOp *op)  {

> -    return fold_const2(ctx, op);

> +    if (fold_const2(ctx, op) ||

> +        fold_xi_to_not(ctx, op, 0)) {


Same here.

--
Luis Pires
Instituto de Pesquisas ELDORADO
Aviso Legal - Disclaimer <https://www.eldorado.org.br/disclaimer.html>
Richard Henderson Oct. 25, 2021, 5:31 p.m. UTC | #2
On 10/25/21 7:17 AM, Luis Fernando Fujita Pires wrote:
> From: Richard Henderson <richard.henderson@linaro.org>

>> Split out the conditional conversion from a more complex logical operation to a

>> simple NOT.  Create a couple more helpers to make this easy for the outer-most

>> logical operations.

>>

>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>> ---

>>   tcg/optimize.c | 154 +++++++++++++++++++++++++++----------------------

>>   1 file changed, 86 insertions(+), 68 deletions(-)

> 

>>   static bool fold_eqv(OptContext *ctx, TCGOp *op)  {

>> -    return fold_const2(ctx, op);

>> +    if (fold_const2(ctx, op) ||

>> +        fold_xi_to_not(ctx, op, 0)) {

> 

> Should be fold_ix_to_not (not fold xi_to_not).


No, because for eqv we expect the second operand to be the constant -- eqv is commutative.

> 

>>   static bool fold_orc(OptContext *ctx, TCGOp *op)  {

>> -    return fold_const2(ctx, op);

>> +    if (fold_const2(ctx, op) ||

>> +        fold_xi_to_not(ctx, op, 0)) {


But for orc you are correct.  Thanks.


r~
Luis Fernando Fujita Pires Oct. 25, 2021, 6:13 p.m. UTC | #3
From: Richard Henderson <richard.henderson@linaro.org>


> >>   static bool fold_eqv(OptContext *ctx, TCGOp *op)  {

> >> -    return fold_const2(ctx, op);

> >> +    if (fold_const2(ctx, op) ||

> >> +        fold_xi_to_not(ctx, op, 0)) {

> >

> > Should be fold_ix_to_not (not fold xi_to_not).

> 

> No, because for eqv we expect the second operand to be the constant -- eqv is

> commutative.


Ah, got it! The previous code was wrong, and I failed to notice that eqv would've had its arguments swapped to have the constant as second.

--
Luis Pires
Instituto de Pesquisas ELDORADO
Aviso Legal - Disclaimer <https://www.eldorado.org.br/disclaimer.html>
Richard Henderson Oct. 25, 2021, 6:35 p.m. UTC | #4
On 10/25/21 11:13 AM, Luis Fernando Fujita Pires wrote:
> From: Richard Henderson <richard.henderson@linaro.org>

> 

>>>>    static bool fold_eqv(OptContext *ctx, TCGOp *op)  {

>>>> -    return fold_const2(ctx, op);

>>>> +    if (fold_const2(ctx, op) ||

>>>> +        fold_xi_to_not(ctx, op, 0)) {

>>>

>>> Should be fold_ix_to_not (not fold xi_to_not).

>>

>> No, because for eqv we expect the second operand to be the constant -- eqv is

>> commutative.

> 

> Ah, got it! The previous code was wrong, and I failed to notice that eqv would've had its arguments swapped to have the constant as second.


Ah!  I failed to notice that the previous code was wrong.  ;-)


r~
diff mbox series

Patch

diff --git a/tcg/optimize.c b/tcg/optimize.c
index c8b6afc745..71b4c3edb4 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -694,6 +694,52 @@  static bool fold_const2(OptContext *ctx, TCGOp *op)
     return false;
 }
 
+/*
+ * Convert @op to NOT, if NOT is supported by the host.
+ * Return true f the conversion is successful, which will still
+ * indicate that the processing is complete.
+ */
+static bool fold_not(OptContext *ctx, TCGOp *op);
+static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
+{
+    TCGOpcode not_op;
+    bool have_not;
+
+    switch (ctx->type) {
+    case TCG_TYPE_I32:
+        not_op = INDEX_op_not_i32;
+        have_not = TCG_TARGET_HAS_not_i32;
+        break;
+    case TCG_TYPE_I64:
+        not_op = INDEX_op_not_i64;
+        have_not = TCG_TARGET_HAS_not_i64;
+        break;
+    case TCG_TYPE_V64:
+    case TCG_TYPE_V128:
+    case TCG_TYPE_V256:
+        not_op = INDEX_op_not_vec;
+        have_not = TCG_TARGET_HAS_not_vec;
+        break;
+    default:
+        g_assert_not_reached();
+    }
+    if (have_not) {
+        op->opc = not_op;
+        op->args[1] = op->args[idx];
+        return fold_not(ctx, op);
+    }
+    return false;
+}
+
+/* If the binary operation has first argument @i, fold to NOT. */
+static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+    if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+        return fold_to_not(ctx, op, 2);
+    }
+    return false;
+}
+
 /* If the binary operation has second argument @i, fold to @i. */
 static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
 {
@@ -703,6 +749,15 @@  static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
     return false;
 }
 
+/* If the binary operation has second argument @i, fold to NOT. */
+static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+    if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+        return fold_to_not(ctx, op, 1);
+    }
+    return false;
+}
+
 /* If the binary operation has both arguments equal, fold to @i. */
 static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
 {
@@ -781,7 +836,8 @@  static bool fold_and(OptContext *ctx, TCGOp *op)
 static bool fold_andc(OptContext *ctx, TCGOp *op)
 {
     if (fold_const2(ctx, op) ||
-        fold_xx_to_i(ctx, op, 0)) {
+        fold_xx_to_i(ctx, op, 0) ||
+        fold_ix_to_not(ctx, op, -1)) {
         return true;
     }
     return false;
@@ -982,7 +1038,11 @@  static bool fold_dup2(OptContext *ctx, TCGOp *op)
 
 static bool fold_eqv(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xi_to_not(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_extract(OptContext *ctx, TCGOp *op)
@@ -1120,7 +1180,11 @@  static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
 
 static bool fold_nand(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xi_to_not(ctx, op, -1)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_neg(OptContext *ctx, TCGOp *op)
@@ -1130,12 +1194,22 @@  static bool fold_neg(OptContext *ctx, TCGOp *op)
 
 static bool fold_nor(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xi_to_not(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_not(OptContext *ctx, TCGOp *op)
 {
-    return fold_const1(ctx, op);
+    if (fold_const1(ctx, op)) {
+        return true;
+    }
+
+    /* Because of fold_to_not, we want to always return true, via finish. */
+    finish_folding(ctx, op);
+    return true;
 }
 
 static bool fold_or(OptContext *ctx, TCGOp *op)
@@ -1149,7 +1223,11 @@  static bool fold_or(OptContext *ctx, TCGOp *op)
 
 static bool fold_orc(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xi_to_not(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
@@ -1280,7 +1358,8 @@  static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
 static bool fold_xor(OptContext *ctx, TCGOp *op)
 {
     if (fold_const2(ctx, op) ||
-        fold_xx_to_i(ctx, op, 0)) {
+        fold_xx_to_i(ctx, op, 0) ||
+        fold_xi_to_not(ctx, op, -1)) {
         return true;
     }
     return false;
@@ -1434,67 +1513,6 @@  void tcg_optimize(TCGContext *s)
                 }
             }
             break;
-        CASE_OP_32_64_VEC(xor):
-        CASE_OP_32_64(nand):
-            if (!arg_is_const(op->args[1])
-                && arg_is_const(op->args[2])
-                && arg_info(op->args[2])->val == -1) {
-                i = 1;
-                goto try_not;
-            }
-            break;
-        CASE_OP_32_64(nor):
-            if (!arg_is_const(op->args[1])
-                && arg_is_const(op->args[2])
-                && arg_info(op->args[2])->val == 0) {
-                i = 1;
-                goto try_not;
-            }
-            break;
-        CASE_OP_32_64_VEC(andc):
-            if (!arg_is_const(op->args[2])
-                && arg_is_const(op->args[1])
-                && arg_info(op->args[1])->val == -1) {
-                i = 2;
-                goto try_not;
-            }
-            break;
-        CASE_OP_32_64_VEC(orc):
-        CASE_OP_32_64(eqv):
-            if (!arg_is_const(op->args[2])
-                && arg_is_const(op->args[1])
-                && arg_info(op->args[1])->val == 0) {
-                i = 2;
-                goto try_not;
-            }
-            break;
-        try_not:
-            {
-                TCGOpcode not_op;
-                bool have_not;
-
-                switch (ctx.type) {
-                default:
-                    not_op = INDEX_op_not_vec;
-                    have_not = TCG_TARGET_HAS_not_vec;
-                    break;
-                case TCG_TYPE_I64:
-                    not_op = INDEX_op_not_i64;
-                    have_not = TCG_TARGET_HAS_not_i64;
-                    break;
-                case TCG_TYPE_I32:
-                    not_op = INDEX_op_not_i32;
-                    have_not = TCG_TARGET_HAS_not_i32;
-                    break;
-                }
-                if (!have_not) {
-                    break;
-                }
-                op->opc = not_op;
-                reset_temp(op->args[0]);
-                op->args[1] = op->args[i];
-                continue;
-            }
         default:
             break;
         }