diff mbox series

[051/nnn] poly_int: emit_group_load/store

Message ID 87r2ttlqze.fsf@linaro.org
State New
Headers show
Series [051/nnn] poly_int: emit_group_load/store | expand

Commit Message

Richard Sandiford Oct. 23, 2017, 5:21 p.m. UTC
This patch changes the sizes passed to emit_group_load and
emit_group_store from int to poly_int64.


2017-10-23  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* expr.h (emit_group_load, emit_group_load_into_temps)
	(emit_group_store): Take the size as a poly_int64 rather than an int.
	* expr.c (emit_group_load_1, emit_group_load): Likewise.
	(emit_group_load_into_temp, emit_group_store): Likewise.

Comments

Jeff Law Dec. 5, 2017, 11:26 p.m. UTC | #1
On 10/23/2017 11:21 AM, Richard Sandiford wrote:
> This patch changes the sizes passed to emit_group_load and

> emit_group_store from int to poly_int64.

> 

> 

> 2017-10-23  Richard Sandiford  <richard.sandiford@linaro.org>

> 	    Alan Hayward  <alan.hayward@arm.com>

> 	    David Sherwood  <david.sherwood@arm.com>

> 

> gcc/

> 	* expr.h (emit_group_load, emit_group_load_into_temps)

> 	(emit_group_store): Take the size as a poly_int64 rather than an int.

> 	* expr.c (emit_group_load_1, emit_group_load): Likewise.

> 	(emit_group_load_into_temp, emit_group_store): Likewise.

> 

OK.
jeff
diff mbox series

Patch

Index: gcc/expr.h
===================================================================
--- gcc/expr.h	2017-10-23 17:18:56.434286222 +0100
+++ gcc/expr.h	2017-10-23 17:20:49.571719793 +0100
@@ -128,10 +128,10 @@  extern rtx gen_group_rtx (rtx);
 
 /* Load a BLKmode value into non-consecutive registers represented by a
    PARALLEL.  */
-extern void emit_group_load (rtx, rtx, tree, int);
+extern void emit_group_load (rtx, rtx, tree, poly_int64);
 
 /* Similarly, but load into new temporaries.  */
-extern rtx emit_group_load_into_temps (rtx, rtx, tree, int);
+extern rtx emit_group_load_into_temps (rtx, rtx, tree, poly_int64);
 
 /* Move a non-consecutive group of registers represented by a PARALLEL into
    a non-consecutive group of registers represented by a PARALLEL.  */
@@ -142,7 +142,7 @@  extern rtx emit_group_move_into_temps (r
 
 /* Store a BLKmode value from non-consecutive registers represented by a
    PARALLEL.  */
-extern void emit_group_store (rtx, rtx, tree, int);
+extern void emit_group_store (rtx, rtx, tree, poly_int64);
 
 extern rtx maybe_emit_group_store (rtx, tree);
 
Index: gcc/expr.c
===================================================================
--- gcc/expr.c	2017-10-23 17:18:57.860160878 +0100
+++ gcc/expr.c	2017-10-23 17:20:49.571719793 +0100
@@ -2095,7 +2095,8 @@  gen_group_rtx (rtx orig)
    into corresponding XEXP (XVECEXP (DST, 0, i), 0) element.  */
 
 static void
-emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
+emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
+		   poly_int64 ssize)
 {
   rtx src;
   int start, i;
@@ -2134,12 +2135,16 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
   for (i = start; i < XVECLEN (dst, 0); i++)
     {
       machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
-      HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
-      unsigned int bytelen = GET_MODE_SIZE (mode);
-      int shift = 0;
-
-      /* Handle trailing fragments that run over the size of the struct.  */
-      if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+      poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
+      poly_int64 bytelen = GET_MODE_SIZE (mode);
+      poly_int64 shift = 0;
+
+      /* Handle trailing fragments that run over the size of the struct.
+	 It's the target's responsibility to make sure that the fragment
+	 cannot be strictly smaller in some cases and strictly larger
+	 in others.  */
+      gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
+      if (known_size_p (ssize) && may_gt (bytepos + bytelen, ssize))
 	{
 	  /* Arrange to shift the fragment to where it belongs.
 	     extract_bit_field loads to the lsb of the reg.  */
@@ -2153,7 +2158,7 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
 	      )
 	    shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
 	  bytelen = ssize - bytepos;
-	  gcc_assert (bytelen > 0);
+	  gcc_assert (may_gt (bytelen, 0));
 	}
 
       /* If we won't be loading directly from memory, protect the real source
@@ -2177,33 +2182,34 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
       if (MEM_P (src)
 	  && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
 	      || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
-	  && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
-	  && bytelen == GET_MODE_SIZE (mode))
+	  && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
+	  && must_eq (bytelen, GET_MODE_SIZE (mode)))
 	{
 	  tmps[i] = gen_reg_rtx (mode);
 	  emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
 	}
       else if (COMPLEX_MODE_P (mode)
 	       && GET_MODE (src) == mode
-	       && bytelen == GET_MODE_SIZE (mode))
+	       && must_eq (bytelen, GET_MODE_SIZE (mode)))
 	/* Let emit_move_complex do the bulk of the work.  */
 	tmps[i] = src;
       else if (GET_CODE (src) == CONCAT)
 	{
-	  unsigned int slen = GET_MODE_SIZE (GET_MODE (src));
-	  unsigned int slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
-	  unsigned int elt = bytepos / slen0;
-	  unsigned int subpos = bytepos % slen0;
+	  poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
+	  poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
+	  unsigned int elt;
+	  poly_int64 subpos;
 
-	  if (subpos + bytelen <= slen0)
+	  if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
+	      && must_le (subpos + bytelen, slen0))
 	    {
 	      /* The following assumes that the concatenated objects all
 		 have the same size.  In this case, a simple calculation
 		 can be used to determine the object and the bit field
 		 to be extracted.  */
 	      tmps[i] = XEXP (src, elt);
-	      if (subpos != 0
-		  || subpos + bytelen != slen0
+	      if (maybe_nonzero (subpos)
+		  || may_ne (subpos + bytelen, slen0)
 		  || (!CONSTANT_P (tmps[i])
 		      && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
 		tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
@@ -2215,7 +2221,7 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
 	    {
 	      rtx mem;
 
-	      gcc_assert (!bytepos);
+	      gcc_assert (known_zero (bytepos));
 	      mem = assign_stack_temp (GET_MODE (src), slen);
 	      emit_move_insn (mem, src);
 	      tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
@@ -2234,23 +2240,21 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
 
 	  mem = assign_stack_temp (GET_MODE (src), slen);
 	  emit_move_insn (mem, src);
-	  tmps[i] = adjust_address (mem, mode, (int) bytepos);
+	  tmps[i] = adjust_address (mem, mode, bytepos);
 	}
       else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
                && XVECLEN (dst, 0) > 1)
         tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
       else if (CONSTANT_P (src))
 	{
-	  HOST_WIDE_INT len = (HOST_WIDE_INT) bytelen;
-
-	  if (len == ssize)
+	  if (must_eq (bytelen, ssize))
 	    tmps[i] = src;
 	  else
 	    {
 	      rtx first, second;
 
 	      /* TODO: const_wide_int can have sizes other than this...  */
-	      gcc_assert (2 * len == ssize);
+	      gcc_assert (must_eq (2 * bytelen, ssize));
 	      split_double (src, &first, &second);
 	      if (i)
 		tmps[i] = second;
@@ -2265,7 +2269,7 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
 				     bytepos * BITS_PER_UNIT, 1, NULL_RTX,
 				     mode, mode, false, NULL);
 
-      if (shift)
+      if (maybe_nonzero (shift))
 	tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
 				shift, tmps[i], 0);
     }
@@ -2277,7 +2281,7 @@  emit_group_load_1 (rtx *tmps, rtx dst, r
    if not known.  */
 
 void
-emit_group_load (rtx dst, rtx src, tree type, int ssize)
+emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
 {
   rtx *tmps;
   int i;
@@ -2300,7 +2304,7 @@  emit_group_load (rtx dst, rtx src, tree
    in the right place.  */
 
 rtx
-emit_group_load_into_temps (rtx parallel, rtx src, tree type, int ssize)
+emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
 {
   rtvec vec;
   int i;
@@ -2371,7 +2375,8 @@  emit_group_move_into_temps (rtx src)
    known.  */
 
 void
-emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED, int ssize)
+emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
+		  poly_int64 ssize)
 {
   rtx *tmps, dst;
   int start, finish, i;
@@ -2502,24 +2507,28 @@  emit_group_store (rtx orig_dst, rtx src,
   /* Process the pieces.  */
   for (i = start; i < finish; i++)
     {
-      HOST_WIDE_INT bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
+      poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
       machine_mode mode = GET_MODE (tmps[i]);
-      unsigned int bytelen = GET_MODE_SIZE (mode);
-      unsigned int adj_bytelen;
+      poly_int64 bytelen = GET_MODE_SIZE (mode);
+      poly_uint64 adj_bytelen;
       rtx dest = dst;
 
-      /* Handle trailing fragments that run over the size of the struct.  */
-      if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+      /* Handle trailing fragments that run over the size of the struct.
+	 It's the target's responsibility to make sure that the fragment
+	 cannot be strictly smaller in some cases and strictly larger
+	 in others.  */
+      gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
+      if (known_size_p (ssize) && may_gt (bytepos + bytelen, ssize))
 	adj_bytelen = ssize - bytepos;
       else
 	adj_bytelen = bytelen;
 
       if (GET_CODE (dst) == CONCAT)
 	{
-	  if (bytepos + adj_bytelen
-	      <= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))
+	  if (must_le (bytepos + adj_bytelen,
+		       GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
 	    dest = XEXP (dst, 0);
-	  else if (bytepos >= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))
+	  else if (must_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
 	    {
 	      bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
 	      dest = XEXP (dst, 1);
@@ -2529,7 +2538,7 @@  emit_group_store (rtx orig_dst, rtx src,
 	      machine_mode dest_mode = GET_MODE (dest);
 	      machine_mode tmp_mode = GET_MODE (tmps[i]);
 
-	      gcc_assert (bytepos == 0 && XVECLEN (src, 0));
+	      gcc_assert (known_zero (bytepos) && XVECLEN (src, 0));
 
 	      if (GET_MODE_ALIGNMENT (dest_mode)
 		  >= GET_MODE_ALIGNMENT (tmp_mode))
@@ -2554,7 +2563,7 @@  emit_group_store (rtx orig_dst, rtx src,
 	}
 
       /* Handle trailing fragments that run over the size of the struct.  */
-      if (ssize >= 0 && bytepos + (HOST_WIDE_INT) bytelen > ssize)
+      if (known_size_p (ssize) && may_gt (bytepos + bytelen, ssize))
 	{
 	  /* store_bit_field always takes its value from the lsb.
 	     Move the fragment to the lsb if it's not already there.  */
@@ -2567,7 +2576,7 @@  emit_group_store (rtx orig_dst, rtx src,
 #endif
 	      )
 	    {
-	      int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
+	      poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
 	      tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
 				      shift, tmps[i], 0);
 	    }
@@ -2583,8 +2592,9 @@  emit_group_store (rtx orig_dst, rtx src,
       else if (MEM_P (dest)
 	       && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
 		   || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
-	       && bytepos * BITS_PER_UNIT % GET_MODE_ALIGNMENT (mode) == 0
-	       && bytelen == GET_MODE_SIZE (mode))
+	       && multiple_p (bytepos * BITS_PER_UNIT,
+			      GET_MODE_ALIGNMENT (mode))
+	       && must_eq (bytelen, GET_MODE_SIZE (mode)))
 	emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
 
       else