diff mbox

[RFC,PR40921] Convert x + (-y * z * z) into x - y * z * z

Message ID 56CFC02F.2070801@linaro.org
State New
Headers show

Commit Message

Kugan Vivekanandarajah Feb. 26, 2016, 3:02 a.m. UTC
Hi,

This is an attempt to fix missed optimization: x + (-y * z * z) => x - y 
* z * z as reported in PR40921.

Regression tested and bootstrapped on x86-64-linux-gnu with no new 
regressions.

Is this OK for next stage1?

Thanks,
Kugan


gcc/ChangeLog:

2016-02-26  Kugan Vivekanandarajah  <kuganv@linaro.org>

	PR middle-end/40921
	* tree-ssa-reassoc.c (propagate_neg_to_sub_or_add): New.
	(reassociate_bb): Call propagate_neg_to_sub_or_add.


gcc/testsuite/ChangeLog:

2016-02-26  Kugan Vivekanandarajah  <kuganv@linaro.org>

	PR middle-end/40921
	* gcc.dg/tree-ssa/pr40921.c: New test.
diff mbox

Patch

diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr40921.c b/gcc/testsuite/gcc.dg/tree-ssa/pr40921.c
index e69de29..6a3529b 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr40921.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr40921.c
@@ -0,0 +1,11 @@ 
+
+/* PR middle-end/40921.  */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-reassoc1 -fno-rounding-math" } */
+
+double foo (double x, double y, double z)
+{
+    return x + (-y * z*z);
+}
+
+/* { dg-final { scan-tree-dump-times "= -" 0 "reassoc1" } } */
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index e54700e..f99635b 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -4784,6 +4784,78 @@  transform_stmt_to_multiply (gimple_stmt_iterator *gsi, gimple *stmt,
     }
 }
 
+/* Propagate NEGATE_EXPR to MINUS_EXPR/PLUS_EXPR when the neageted
+   expression is multiplied and used in MINUS_EXPR/PLUS_EXPR.  */
+static void
+propagate_neg_to_sub_or_add (gimple_stmt_iterator *gsi, gimple *stmt)
+{
+  tree lhs = gimple_assign_lhs (stmt);
+  tree rhs1, rhs2, mult_lhs;
+  gimple *use_stmt;
+  gimple *use_stmt2;
+  use_operand_p use;
+  enum tree_code code;
+  gassign *g;
+
+  /* Note that -frounding-math should disable the proposed
+     optimization.  */
+  if (flag_rounding_math)
+    return;
+
+  if (!single_imm_use (lhs, &use, &use_stmt))
+    return;
+
+  if (!is_gimple_assign (use_stmt))
+    return;
+
+  code = gimple_assign_rhs_code (use_stmt);
+  if (code != MULT_EXPR)
+    return;
+  mult_lhs = gimple_assign_lhs (use_stmt);
+  while (code == MULT_EXPR)
+    {
+      if (!single_imm_use (mult_lhs, &use, &use_stmt2))
+	break;
+      if (!is_gimple_assign (use_stmt2))
+	break;
+      code = gimple_assign_rhs_code (use_stmt2);
+      mult_lhs = gimple_assign_lhs (use_stmt2);
+      use_stmt = use_stmt2;
+    }
+
+  if (code != PLUS_EXPR
+      && code != MINUS_EXPR)
+    return;
+
+  lhs = gimple_assign_lhs (use_stmt);
+  rhs1 = gimple_assign_rhs1 (use_stmt);
+  rhs2 = gimple_assign_rhs2 (use_stmt);
+
+  if (rhs1 == USE_FROM_PTR (use))
+    {
+      if (code == MINUS_EXPR)
+	return;
+      std::swap (rhs1, rhs2);
+      code = MINUS_EXPR;
+    }
+  else
+    {
+      if (code == PLUS_EXPR)
+	code = MINUS_EXPR;
+      else
+	code = PLUS_EXPR;
+    }
+
+  g = gimple_build_assign (lhs, code, rhs1, rhs2);
+  gimple_stmt_iterator gsi2 = gsi_for_stmt (use_stmt);
+  gsi_replace (&gsi2, g, true);
+
+  lhs = gimple_assign_lhs (stmt);
+  rhs1 = gimple_assign_rhs1 (stmt);
+  g = gimple_build_assign (lhs, SSA_NAME, rhs1);
+  gsi_replace (gsi, g, true);
+}
+
 /* Reassociate expressions in basic block BB and its post-dominator as
    children.
 
@@ -4809,6 +4881,11 @@  reassociate_bb (basic_block bb)
 	{
 	  tree lhs, rhs1, rhs2;
 	  enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+	  if (rhs_code == NEGATE_EXPR)
+	    {
+	      propagate_neg_to_sub_or_add (&gsi, stmt);
+	      continue;
+	    }
 
 	  /* If this is not a gimple binary expression, there is
 	     nothing for us to do with it.  */
@@ -4884,6 +4961,7 @@  reassociate_bb (basic_block bb)
 	      if (rhs_code == MULT_EXPR)
 		attempt_builtin_copysign (&ops);
 
+
 	      if (reassoc_insert_powi_p
 		  && rhs_code == MULT_EXPR
 		  && flag_unsafe_math_optimizations)