diff mbox series

[07/nn,AArch64] Pass number of units to aarch64_reverse_mask

Message ID 8760b0btz8.fsf@linaro.org
State New
Headers show
Series [07/nn,AArch64] Pass number of units to aarch64_reverse_mask | expand

Commit Message

Richard Sandiford Oct. 27, 2017, 1:28 p.m. UTC
This patch passes the number of units to aarch64_reverse_mask,
which avoids a to_constant () once GET_MODE_NUNITS is variable.


2017-10-26  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* config/aarch64/aarch64-protos.h (aarch64_reverse_mask): Take
	the number of units too.
	* config/aarch64/aarch64.c (aarch64_reverse_mask): Likewise.
	* config/aarch64/aarch64-simd.md (vec_load_lanesoi<mode>)
	(vec_store_lanesoi<mode>, vec_load_lanesci<mode>)
	(vec_store_lanesci<mode>, vec_load_lanesxi<mode>)
	(vec_store_lanesxi<mode>): Update accordingly.

Comments

James Greenhalgh Nov. 2, 2017, 9:55 a.m. UTC | #1
On Fri, Oct 27, 2017 at 02:28:27PM +0100, Richard Sandiford wrote:
> This patch passes the number of units to aarch64_reverse_mask,

> which avoids a to_constant () once GET_MODE_NUNITS is variable.


OK

Reviewed-by: James Greenhalgh <james.greenhalgh@arm.com>


Thanks,
James

> 

> 

> 2017-10-26  Richard Sandiford  <richard.sandiford@linaro.org>

> 	    Alan Hayward  <alan.hayward@arm.com>

> 	    David Sherwood  <david.sherwood@arm.com>

> 

> gcc/

> 	* config/aarch64/aarch64-protos.h (aarch64_reverse_mask): Take

> 	the number of units too.

> 	* config/aarch64/aarch64.c (aarch64_reverse_mask): Likewise.

> 	* config/aarch64/aarch64-simd.md (vec_load_lanesoi<mode>)

> 	(vec_store_lanesoi<mode>, vec_load_lanesci<mode>)

> 	(vec_store_lanesci<mode>, vec_load_lanesxi<mode>)

> 	(vec_store_lanesxi<mode>): Update accordingly.

> 

> Index: gcc/config/aarch64/aarch64-protos.h

> ===================================================================

> --- gcc/config/aarch64/aarch64-protos.h	2017-10-27 14:12:00.601693018 +0100

> +++ gcc/config/aarch64/aarch64-protos.h	2017-10-27 14:12:04.192082112 +0100

> @@ -365,7 +365,7 @@ bool aarch64_mask_and_shift_for_ubfiz_p

>  bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);

>  bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);

>  bool aarch64_mov_operand_p (rtx, machine_mode);

> -rtx aarch64_reverse_mask (machine_mode);

> +rtx aarch64_reverse_mask (machine_mode, unsigned int);

>  bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT);

>  char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);

>  char *aarch64_output_simd_mov_immediate (rtx, unsigned,

> Index: gcc/config/aarch64/aarch64.c

> ===================================================================

> --- gcc/config/aarch64/aarch64.c	2017-10-27 14:12:00.603550436 +0100

> +++ gcc/config/aarch64/aarch64.c	2017-10-27 14:12:04.193939530 +0100

> @@ -13945,16 +13945,18 @@ aarch64_vectorize_vec_perm_const_ok (mac

>    return ret;

>  }

>  

> +/* Generate a byte permute mask for a register of mode MODE,

> +   which has NUNITS units.  */

> +

>  rtx

> -aarch64_reverse_mask (machine_mode mode)

> +aarch64_reverse_mask (machine_mode mode, unsigned int nunits)

>  {

>    /* We have to reverse each vector because we dont have

>       a permuted load that can reverse-load according to ABI rules.  */

>    rtx mask;

>    rtvec v = rtvec_alloc (16);

> -  int i, j;

> -  int nunits = GET_MODE_NUNITS (mode);

> -  int usize = GET_MODE_UNIT_SIZE (mode);

> +  unsigned int i, j;

> +  unsigned int usize = GET_MODE_UNIT_SIZE (mode);

>  

>    gcc_assert (BYTES_BIG_ENDIAN);

>    gcc_assert (AARCH64_VALID_SIMD_QREG_MODE (mode));

> Index: gcc/config/aarch64/aarch64-simd.md

> ===================================================================

> --- gcc/config/aarch64/aarch64-simd.md	2017-10-27 14:12:00.602621727 +0100

> +++ gcc/config/aarch64/aarch64-simd.md	2017-10-27 14:12:04.193010821 +0100

> @@ -4632,7 +4632,7 @@ (define_expand "vec_load_lanesoi<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (OImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_simd_ld2<mode> (tmp, operands[1]));

>        emit_insn (gen_aarch64_rev_reglistoi (operands[0], tmp, mask));

>      }

> @@ -4676,7 +4676,7 @@ (define_expand "vec_store_lanesoi<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (OImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_rev_reglistoi (tmp, operands[1], mask));

>        emit_insn (gen_aarch64_simd_st2<mode> (operands[0], tmp));

>      }

> @@ -4730,7 +4730,7 @@ (define_expand "vec_load_lanesci<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (CImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_simd_ld3<mode> (tmp, operands[1]));

>        emit_insn (gen_aarch64_rev_reglistci (operands[0], tmp, mask));

>      }

> @@ -4774,7 +4774,7 @@ (define_expand "vec_store_lanesci<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (CImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_rev_reglistci (tmp, operands[1], mask));

>        emit_insn (gen_aarch64_simd_st3<mode> (operands[0], tmp));

>      }

> @@ -4828,7 +4828,7 @@ (define_expand "vec_load_lanesxi<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (XImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_simd_ld4<mode> (tmp, operands[1]));

>        emit_insn (gen_aarch64_rev_reglistxi (operands[0], tmp, mask));

>      }

> @@ -4872,7 +4872,7 @@ (define_expand "vec_store_lanesxi<mode>"

>    if (BYTES_BIG_ENDIAN)

>      {

>        rtx tmp = gen_reg_rtx (XImode);

> -      rtx mask = aarch64_reverse_mask (<MODE>mode);

> +      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);

>        emit_insn (gen_aarch64_rev_reglistxi (tmp, operands[1], mask));

>        emit_insn (gen_aarch64_simd_st4<mode> (operands[0], tmp));

>      }
diff mbox series

Patch

Index: gcc/config/aarch64/aarch64-protos.h
===================================================================
--- gcc/config/aarch64/aarch64-protos.h	2017-10-27 14:12:00.601693018 +0100
+++ gcc/config/aarch64/aarch64-protos.h	2017-10-27 14:12:04.192082112 +0100
@@ -365,7 +365,7 @@  bool aarch64_mask_and_shift_for_ubfiz_p
 bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
 bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
 bool aarch64_mov_operand_p (rtx, machine_mode);
-rtx aarch64_reverse_mask (machine_mode);
+rtx aarch64_reverse_mask (machine_mode, unsigned int);
 bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT);
 char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
 char *aarch64_output_simd_mov_immediate (rtx, unsigned,
Index: gcc/config/aarch64/aarch64.c
===================================================================
--- gcc/config/aarch64/aarch64.c	2017-10-27 14:12:00.603550436 +0100
+++ gcc/config/aarch64/aarch64.c	2017-10-27 14:12:04.193939530 +0100
@@ -13945,16 +13945,18 @@  aarch64_vectorize_vec_perm_const_ok (mac
   return ret;
 }
 
+/* Generate a byte permute mask for a register of mode MODE,
+   which has NUNITS units.  */
+
 rtx
-aarch64_reverse_mask (machine_mode mode)
+aarch64_reverse_mask (machine_mode mode, unsigned int nunits)
 {
   /* We have to reverse each vector because we dont have
      a permuted load that can reverse-load according to ABI rules.  */
   rtx mask;
   rtvec v = rtvec_alloc (16);
-  int i, j;
-  int nunits = GET_MODE_NUNITS (mode);
-  int usize = GET_MODE_UNIT_SIZE (mode);
+  unsigned int i, j;
+  unsigned int usize = GET_MODE_UNIT_SIZE (mode);
 
   gcc_assert (BYTES_BIG_ENDIAN);
   gcc_assert (AARCH64_VALID_SIMD_QREG_MODE (mode));
Index: gcc/config/aarch64/aarch64-simd.md
===================================================================
--- gcc/config/aarch64/aarch64-simd.md	2017-10-27 14:12:00.602621727 +0100
+++ gcc/config/aarch64/aarch64-simd.md	2017-10-27 14:12:04.193010821 +0100
@@ -4632,7 +4632,7 @@  (define_expand "vec_load_lanesoi<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (OImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_simd_ld2<mode> (tmp, operands[1]));
       emit_insn (gen_aarch64_rev_reglistoi (operands[0], tmp, mask));
     }
@@ -4676,7 +4676,7 @@  (define_expand "vec_store_lanesoi<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (OImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_rev_reglistoi (tmp, operands[1], mask));
       emit_insn (gen_aarch64_simd_st2<mode> (operands[0], tmp));
     }
@@ -4730,7 +4730,7 @@  (define_expand "vec_load_lanesci<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (CImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_simd_ld3<mode> (tmp, operands[1]));
       emit_insn (gen_aarch64_rev_reglistci (operands[0], tmp, mask));
     }
@@ -4774,7 +4774,7 @@  (define_expand "vec_store_lanesci<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (CImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_rev_reglistci (tmp, operands[1], mask));
       emit_insn (gen_aarch64_simd_st3<mode> (operands[0], tmp));
     }
@@ -4828,7 +4828,7 @@  (define_expand "vec_load_lanesxi<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (XImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_simd_ld4<mode> (tmp, operands[1]));
       emit_insn (gen_aarch64_rev_reglistxi (operands[0], tmp, mask));
     }
@@ -4872,7 +4872,7 @@  (define_expand "vec_store_lanesxi<mode>"
   if (BYTES_BIG_ENDIAN)
     {
       rtx tmp = gen_reg_rtx (XImode);
-      rtx mask = aarch64_reverse_mask (<MODE>mode);
+      rtx mask = aarch64_reverse_mask (<MODE>mode, <nunits>);
       emit_insn (gen_aarch64_rev_reglistxi (tmp, operands[1], mask));
       emit_insn (gen_aarch64_simd_st4<mode> (operands[0], tmp));
     }