diff mbox series

[AArch64] Generalise aarch64_simd_valid_immediate for SVE

Message ID 87372spw8r.fsf@linaro.org
State New
Headers show
Series [AArch64] Generalise aarch64_simd_valid_immediate for SVE | expand

Commit Message

Richard Sandiford Jan. 26, 2018, 1:47 p.m. UTC
The current aarch64_simd_valid_immediate code predates the move
to the new CONST_VECTOR representation, so for variable-length SVE
it only handles duplicates of single elements, rather than duplicates
of repeating patterns.

This patch removes the restriction.  It means that the validity
of a duplicated constant depends only on the bit pattern, not on
the mode used to represent it.

The patch is needed by a later big-endian fix.

Tested on aarch64_be-elf and aarch64-linux-gnu.  OK to install?

Richard


2018-01-26  Richard Sandiford  <richard.sandiford@linaro.org>

gcc/
	* config/aarch64/aarch64.c (aarch64_simd_valid_immediate): Handle
	all CONST_VECTOR_DUPLICATE_P vectors, not just those with a single
	duplicated element.

Comments

James Greenhalgh Feb. 1, 2018, 10:55 a.m. UTC | #1
On Fri, Jan 26, 2018 at 01:47:48PM +0000, Richard Sandiford wrote:
> The current aarch64_simd_valid_immediate code predates the move

> to the new CONST_VECTOR representation, so for variable-length SVE

> it only handles duplicates of single elements, rather than duplicates

> of repeating patterns.

> 

> This patch removes the restriction.  It means that the validity

> of a duplicated constant depends only on the bit pattern, not on

> the mode used to represent it.

> 

> The patch is needed by a later big-endian fix.

> 

> Tested on aarch64_be-elf and aarch64-linux-gnu.  OK to install?


OK.

Thanks,
James

> 2018-01-26  Richard Sandiford  <richard.sandiford@linaro.org>

> 

> gcc/

> 	* config/aarch64/aarch64.c (aarch64_simd_valid_immediate): Handle

> 	all CONST_VECTOR_DUPLICATE_P vectors, not just those with a single

> 	duplicated element.

> 

> Index: gcc/config/aarch64/aarch64.c

> ===================================================================

> --- gcc/config/aarch64/aarch64.c	2018-01-26 13:32:54.240529011 +0000

> +++ gcc/config/aarch64/aarch64.c	2018-01-26 13:46:00.955822193 +0000

> @@ -13164,10 +13164,11 @@ aarch64_simd_valid_immediate (rtx op, si

>      return false;

>  

>    scalar_mode elt_mode = GET_MODE_INNER (mode);

> -  rtx elt = NULL, base, step;

> +  rtx base, step;

>    unsigned int n_elts;

> -  if (const_vec_duplicate_p (op, &elt))

> -    n_elts = 1;

> +  if (GET_CODE (op) == CONST_VECTOR

> +      && CONST_VECTOR_DUPLICATE_P (op))

> +    n_elts = CONST_VECTOR_NPATTERNS (op);

>    else if ((vec_flags & VEC_SVE_DATA)

>  	   && const_vec_series_p (op, &base, &step))

>      {

> @@ -13192,14 +13193,17 @@ aarch64_simd_valid_immediate (rtx op, si

>  	    || op == CONSTM1_RTX (mode));

>  

>    scalar_float_mode elt_float_mode;

> -  if (elt

> -      && is_a <scalar_float_mode> (elt_mode, &elt_float_mode)

> -      && (aarch64_float_const_zero_rtx_p (elt)

> -	  || aarch64_float_const_representable_p (elt)))

> +  if (n_elts == 1

> +      && is_a <scalar_float_mode> (elt_mode, &elt_float_mode))

>      {

> -      if (info)

> -	*info = simd_immediate_info (elt_float_mode, elt);

> -      return true;

> +      rtx elt = CONST_VECTOR_ENCODED_ELT (op, 0);

> +      if (aarch64_float_const_zero_rtx_p (elt)

> +	  || aarch64_float_const_representable_p (elt))

> +	{

> +	  if (info)

> +	    *info = simd_immediate_info (elt_float_mode, elt);

> +	  return true;

> +	}

>      }

>  

>    unsigned int elt_size = GET_MODE_SIZE (elt_mode);

> @@ -13214,11 +13218,11 @@ aarch64_simd_valid_immediate (rtx op, si

>    bytes.reserve (n_elts * elt_size);

>    for (unsigned int i = 0; i < n_elts; i++)

>      {

> -      if (!elt || n_elts != 1)

> -	/* The vector is provided in gcc endian-neutral fashion.

> -	   For aarch64_be, it must be laid out in the vector register

> -	   in reverse order.  */

> -	elt = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? (n_elts - 1 - i) : i);

> +      /* The vector is provided in gcc endian-neutral fashion.

> +	 For aarch64_be Advanced SIMD, it must be laid out in the vector

> +	 register in reverse order.  */

> +      bool swap_p = ((vec_flags & VEC_ADVSIMD) != 0 && BYTES_BIG_ENDIAN);

> +      rtx elt = CONST_VECTOR_ELT (op, swap_p ? (n_elts - 1 - i) : i);

>  

>        if (elt_mode != elt_int_mode)

>  	elt = gen_lowpart (elt_int_mode, elt);
diff mbox series

Patch

Index: gcc/config/aarch64/aarch64.c
===================================================================
--- gcc/config/aarch64/aarch64.c	2018-01-26 13:32:54.240529011 +0000
+++ gcc/config/aarch64/aarch64.c	2018-01-26 13:46:00.955822193 +0000
@@ -13164,10 +13164,11 @@  aarch64_simd_valid_immediate (rtx op, si
     return false;
 
   scalar_mode elt_mode = GET_MODE_INNER (mode);
-  rtx elt = NULL, base, step;
+  rtx base, step;
   unsigned int n_elts;
-  if (const_vec_duplicate_p (op, &elt))
-    n_elts = 1;
+  if (GET_CODE (op) == CONST_VECTOR
+      && CONST_VECTOR_DUPLICATE_P (op))
+    n_elts = CONST_VECTOR_NPATTERNS (op);
   else if ((vec_flags & VEC_SVE_DATA)
 	   && const_vec_series_p (op, &base, &step))
     {
@@ -13192,14 +13193,17 @@  aarch64_simd_valid_immediate (rtx op, si
 	    || op == CONSTM1_RTX (mode));
 
   scalar_float_mode elt_float_mode;
-  if (elt
-      && is_a <scalar_float_mode> (elt_mode, &elt_float_mode)
-      && (aarch64_float_const_zero_rtx_p (elt)
-	  || aarch64_float_const_representable_p (elt)))
+  if (n_elts == 1
+      && is_a <scalar_float_mode> (elt_mode, &elt_float_mode))
     {
-      if (info)
-	*info = simd_immediate_info (elt_float_mode, elt);
-      return true;
+      rtx elt = CONST_VECTOR_ENCODED_ELT (op, 0);
+      if (aarch64_float_const_zero_rtx_p (elt)
+	  || aarch64_float_const_representable_p (elt))
+	{
+	  if (info)
+	    *info = simd_immediate_info (elt_float_mode, elt);
+	  return true;
+	}
     }
 
   unsigned int elt_size = GET_MODE_SIZE (elt_mode);
@@ -13214,11 +13218,11 @@  aarch64_simd_valid_immediate (rtx op, si
   bytes.reserve (n_elts * elt_size);
   for (unsigned int i = 0; i < n_elts; i++)
     {
-      if (!elt || n_elts != 1)
-	/* The vector is provided in gcc endian-neutral fashion.
-	   For aarch64_be, it must be laid out in the vector register
-	   in reverse order.  */
-	elt = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? (n_elts - 1 - i) : i);
+      /* The vector is provided in gcc endian-neutral fashion.
+	 For aarch64_be Advanced SIMD, it must be laid out in the vector
+	 register in reverse order.  */
+      bool swap_p = ((vec_flags & VEC_ADVSIMD) != 0 && BYTES_BIG_ENDIAN);
+      rtx elt = CONST_VECTOR_ELT (op, swap_p ? (n_elts - 1 - i) : i);
 
       if (elt_mode != elt_int_mode)
 	elt = gen_lowpart (elt_int_mode, elt);