diff mbox series

[2/4,AArch64] Testsuite markup for SVE

Message ID 871slfntff.fsf@linaro.org
State New
Headers show
Series Add SVE support | expand

Commit Message

Richard Sandiford Nov. 3, 2017, 5:49 p.m. UTC
This patch adds new target selectors for SVE and updates existing
selectors accordingly.  It also XFAILs some tests that don't yet
work for some SVE modes; most of these go away with follow-on
vectorisation enhancements.


2017-11-03  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/testsuite/
	* lib/target-supports.exp (check_effective_target_aarch64_sve)
	(aarch64_sve_bits, check_effective_target_aarch64_sve_hw)
	(aarch64_sve_hw_bits, check_effective_target_aarch64_sve256_hw):
	New procedures.
	(check_effective_target_vect_perm): Handle SVE.
	(check_effective_target_vect_perm_byte): Likewise.
	(check_effective_target_vect_perm_short): Likewise.
	(check_effective_target_vect_widen_sum_hi_to_si_pattern): Likewise.
	(check_effective_target_vect_widen_mult_qi_to_hi): Likewise.
	(check_effective_target_vect_widen_mult_hi_to_si): Likewise.
	(check_effective_target_vect_element_align_preferred): Likewise.
	(check_effective_target_vect_align_stack_vars): Likewise.
	(check_effective_target_vect_load_lanes): Likewise.
	(check_effective_target_vect_masked_store): Likewise.
	(available_vector_sizes): Use aarch64_sve_bits for SVE.
	* gcc.dg/vect/tree-vect.h (VECTOR_BITS): Define appropriately
	for SVE.
	* gcc.dg/tree-ssa/ssa-dom-cse-2.c: Add SVE XFAIL.
	* gcc.dg/vect/bb-slp-pr69907.c: Likewise.
	* gcc.dg/vect/no-vfa-vect-depend-2.c: Likewise.
	* gcc.dg/vect/no-vfa-vect-depend-3.c: Likewise.
	* gcc.dg/vect/slp-23.c: Likewise.
	* gcc.dg/vect/slp-25.c: Likewise.
	* gcc.dg/vect/slp-perm-5.c: Likewise.
	* gcc.dg/vect/slp-perm-6.c: Likewise.
	* gcc.dg/vect/slp-perm-9.c: Likewise.
	* gcc.dg/vect/slp-reduc-3.c: Likewise.
	* gcc.dg/vect/vect-114.c: Likewise.
	* gcc.dg/vect/vect-119.c: Likewise.
	* gcc.dg/vect/vect-cselim-1.c: Likewise.
	* gcc.dg/vect/vect-live-slp-1.c: Likewise.
	* gcc.dg/vect/vect-live-slp-2.c: Likewise.
	* gcc.dg/vect/vect-live-slp-3.c: Likewise.
	* gcc.dg/vect/vect-mult-const-pattern-1.c: Likewise.
	* gcc.dg/vect/vect-mult-const-pattern-2.c: Likewise.
	* gcc.dg/vect/vect-over-widen-1-big-array.c: Likewise.
	* gcc.dg/vect/vect-over-widen-1.c: Likewise.
	* gcc.dg/vect/vect-over-widen-3-big-array.c: Likewise.
	* gcc.dg/vect/vect-over-widen-4-big-array.c: Likewise.
	* gcc.dg/vect/vect-over-widen-4.c: Likewise.

Comments

James Greenhalgh Jan. 6, 2018, 5:58 p.m. UTC | #1
On Fri, Nov 03, 2017 at 05:49:56PM +0000, Richard Sandiford wrote:
> This patch adds new target selectors for SVE and updates existing

> selectors accordingly.  It also XFAILs some tests that don't yet

> work for some SVE modes; most of these go away with follow-on

> vectorisation enhancements.


OK.

Thanks,
James
diff mbox series

Patch

Index: gcc/testsuite/lib/target-supports.exp
===================================================================
--- gcc/testsuite/lib/target-supports.exp	2017-11-03 17:22:13.533564036 +0000
+++ gcc/testsuite/lib/target-supports.exp	2017-11-03 17:24:09.475993817 +0000
@@ -3350,6 +3350,35 @@  proc check_effective_target_aarch64_litt
     }]
 }
 
+# Return 1 if this is an AArch64 target supporting SVE.
+proc check_effective_target_aarch64_sve { } {
+    if { ![istarget aarch64*-*-*] } {
+	return 0
+    }
+    return [check_no_compiler_messages aarch64_sve assembly {
+	#if !defined (__ARM_FEATURE_SVE)
+	#error FOO
+	#endif
+    }]
+}
+
+# Return the size in bits of an SVE vector, or 0 if the size is variable.
+proc aarch64_sve_bits { } {
+    return [check_cached_effective_target aarch64_sve_bits {
+	global tool
+
+	set src dummy[pid].c
+	set f [open $src "w"]
+	puts $f "int bits = __ARM_FEATURE_SVE_BITS;"
+	close $f
+	set output [${tool}_target_compile $src "" preprocess ""]
+	file delete $src
+
+	regsub {.*bits = ([^;]*);.*} $output {\1} bits
+	expr { $bits }
+    }]
+}
+
 # Return 1 if this is a compiler supporting ARC atomic operations
 proc check_effective_target_arc_atomic { } {
     return [check_no_compiler_messages arc_atomic assembly {
@@ -4275,6 +4304,49 @@  proc check_effective_target_arm_neon_hw
     } [add_options_for_arm_neon ""]]
 }
 
+# Return true if this is an AArch64 target that can run SVE code.
+
+proc check_effective_target_aarch64_sve_hw { } {
+    if { ![istarget aarch64*-*-*] } {
+	return 0
+    }
+    return [check_runtime aarch64_sve_hw_available {
+	int
+	main (void)
+	{
+	  asm volatile ("ptrue p0.b");
+	  return 0;
+	}
+    }]
+}
+
+# Return true if this is an AArch64 target that can run SVE code and
+# if its SVE vectors have exactly BITS bits.
+
+proc aarch64_sve_hw_bits { bits } {
+    if { ![check_effective_target_aarch64_sve_hw] } {
+	return 0
+    }
+    return [check_runtime aarch64_sve${bits}_hw [subst {
+	int
+	main (void)
+	{
+	  int res;
+	  asm volatile ("cntd %0" : "=r" (res));
+	  if (res * 64 != $bits)
+	    __builtin_abort ();
+	  return 0;
+	}
+    }]]
+}
+
+# Return true if this is an AArch64 target that can run SVE code and
+# if its SVE vectors have exactly 256 bits.
+
+proc check_effective_target_aarch64_sve256_hw { } {
+    return [aarch64_sve_hw_bits 256]
+}
+
 proc check_effective_target_arm_neonv2_hw { } {
     return [check_runtime arm_neon_hwv2_available {
 	#include "arm_neon.h"
@@ -5531,7 +5603,8 @@  proc check_effective_target_vect_perm {
     } else {
 	set et_vect_perm_saved($et_index) 0
         if { [is-effective-target arm_neon]
-	     || [istarget aarch64*-*-*]
+	     || ([istarget aarch64*-*-*]
+		 && ![check_effective_target_vect_variable_length])
 	     || [istarget powerpc*-*-*]
              || [istarget spu-*-*]
 	     || [istarget i?86-*-*] || [istarget x86_64-*-*]
@@ -5636,7 +5709,8 @@  proc check_effective_target_vect_perm_by
         if { ([is-effective-target arm_neon]
 	      && [is-effective-target arm_little_endian])
 	     || ([istarget aarch64*-*-*]
-		 && [is-effective-target aarch64_little_endian])
+		 && [is-effective-target aarch64_little_endian]
+		 && ![check_effective_target_vect_variable_length])
 	     || [istarget powerpc*-*-*]
 	     || [istarget spu-*-*]
 	     || ([istarget mips-*.*]
@@ -5675,7 +5749,8 @@  proc check_effective_target_vect_perm_sh
         if { ([is-effective-target arm_neon]
 	      && [is-effective-target arm_little_endian])
 	     || ([istarget aarch64*-*-*]
-		 && [is-effective-target aarch64_little_endian])
+		 && [is-effective-target aarch64_little_endian]
+		 && ![check_effective_target_vect_variable_length])
 	     || [istarget powerpc*-*-*]
 	     || [istarget spu-*-*]
 	     || ([istarget mips*-*-*]
@@ -5735,7 +5810,8 @@  proc check_effective_target_vect_widen_s
     } else {
 	set et_vect_widen_sum_hi_to_si_pattern_saved($et_index) 0
         if { [istarget powerpc*-*-*]
-             || [istarget aarch64*-*-*]
+             || ([istarget aarch64*-*-*]
+		 && ![check_effective_target_aarch64_sve])
 	     || [is-effective-target arm_neon]
              || [istarget ia64-*-*] } {
 	    set et_vect_widen_sum_hi_to_si_pattern_saved($et_index) 1
@@ -5847,7 +5923,8 @@  proc check_effective_target_vect_widen_m
 	    set et_vect_widen_mult_qi_to_hi_saved($et_index) 0
 	}
         if { [istarget powerpc*-*-*]
-              || [istarget aarch64*-*-*]
+              || ([istarget aarch64*-*-*]
+		  && ![check_effective_target_aarch64_sve])
               || [is-effective-target arm_neon]
 	      || ([istarget s390*-*-*]
 		  && [check_effective_target_s390_vx]) } {
@@ -5885,7 +5962,8 @@  proc check_effective_target_vect_widen_m
         if { [istarget powerpc*-*-*]
 	     || [istarget spu-*-*]
 	     || [istarget ia64-*-*]
-	     || [istarget aarch64*-*-*]
+	     || ([istarget aarch64*-*-*]
+		 && ![check_effective_target_aarch64_sve])
 	     || [istarget i?86-*-*] || [istarget x86_64-*-*]
 	     || [is-effective-target arm_neon]
 	     || ([istarget s390*-*-*]
@@ -6347,12 +6425,16 @@  proc check_effective_target_vect_natural
 # alignment during vectorization.
 
 proc check_effective_target_vect_element_align_preferred { } {
-    return [check_effective_target_vect_variable_length]
+    return [expr { [check_effective_target_aarch64_sve]
+		   && [check_effective_target_vect_variable_length] }]
 }
 
 # Return 1 if we can align stack data to the preferred vector alignment.
 
 proc check_effective_target_vect_align_stack_vars { } {
+    if { [check_effective_target_aarch64_sve] } {
+	return [check_effective_target_vect_variable_length]
+    }
     return 1
 }
 
@@ -6424,7 +6506,8 @@  proc check_effective_target_vect_load_la
     } else {
 	set et_vect_load_lanes 0
 	if { ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok])
-	     || [istarget aarch64*-*-*] } {
+	     || ([istarget aarch64*-*-*]
+		 && ![check_effective_target_aarch64_sve]) } {
 	    set et_vect_load_lanes 1
 	}
     }
@@ -6436,7 +6519,7 @@  proc check_effective_target_vect_load_la
 # Return 1 if the target supports vector masked stores.
 
 proc check_effective_target_vect_masked_store { } {
-    return 0
+    return [check_effective_target_aarch64_sve]
 }
 
 # Return 1 if the target supports vector conditional operations, 0 otherwise.
@@ -6704,6 +6787,9 @@  foreach N {2 3 4 8} {
 proc available_vector_sizes { } {
     set result {}
     if { [istarget aarch64*-*-*] } {
+	if { [check_effective_target_aarch64_sve] } {
+	    lappend result [aarch64_sve_bits]
+	}
 	lappend result 128 64
     } elseif { [istarget arm*-*-*]
 		&& [check_effective_target_arm_neon_ok] } {
Index: gcc/testsuite/gcc.dg/vect/tree-vect.h
===================================================================
--- gcc/testsuite/gcc.dg/vect/tree-vect.h	2017-11-03 17:21:09.761094925 +0000
+++ gcc/testsuite/gcc.dg/vect/tree-vect.h	2017-11-03 17:24:09.472993942 +0000
@@ -76,4 +76,12 @@  check_vect (void)
   signal (SIGILL, SIG_DFL);
 }
 
-#define VECTOR_BITS 128
+#if defined (__ARM_FEATURE_SVE)
+#  if __ARM_FEATURE_SVE_BITS == 0
+#    define VECTOR_BITS 1024
+#  else
+#    define VECTOR_BITS __ARM_FEATURE_SVE_BITS
+#  endif
+#else
+#  define VECTOR_BITS 128
+#endif
Index: gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c
===================================================================
--- gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c	2017-02-23 19:54:09.000000000 +0000
+++ gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c	2017-11-03 17:24:09.471993983 +0000
@@ -25,4 +25,4 @@  foo ()
    but the loop reads only one element at a time, and DOM cannot resolve these.
    The same happens on powerpc depending on the SIMD support available.  */
 
-/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* } || { lp64 && { powerpc*-*-* sparc*-*-* riscv*-*-* } } } } } } */
+/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* } || { { lp64 && { powerpc*-*-* sparc*-*-* riscv*-*-* } } || aarch64_sve } } } } } */
Index: gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c	2017-11-03 17:21:09.758095046 +0000
+++ gcc/testsuite/gcc.dg/vect/bb-slp-pr69907.c	2017-11-03 17:24:09.471993983 +0000
@@ -17,4 +17,6 @@  void foo(unsigned *p1, unsigned short *p
     p1[n] = p2[n * 2];
 }
 
-/* { dg-final { scan-tree-dump "BB vectorization with gaps at the end of a load is not supported" "slp1" } } */
+/* Disable for SVE because for long or variable-length vectors we don't
+   get an unrolled epilogue loop.  */
+/* { dg-final { scan-tree-dump "BB vectorization with gaps at the end of a load is not supported" "slp1" { target { ! aarch64_sve } } } } */
Index: gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c	2015-06-02 23:53:38.000000000 +0100
+++ gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-2.c	2017-11-03 17:24:09.471993983 +0000
@@ -51,4 +51,7 @@  int main (void)
 }
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" {xfail { vect_no_align && { ! vect_hw_misalign } } } } } */
-/* { dg-final { scan-tree-dump-times "dependence distance negative" 1 "vect"  } } */
+/* Requires reverse for variable-length SVE, which is implemented for
+   by a later patch.  Until then we report it twice, once for SVE and
+   once for 128-bit Advanced SIMD.  */
+/* { dg-final { scan-tree-dump-times "dependence distance negative" 1 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c	2015-06-02 23:53:38.000000000 +0100
+++ gcc/testsuite/gcc.dg/vect/no-vfa-vect-depend-3.c	2017-11-03 17:24:09.472993942 +0000
@@ -183,4 +183,7 @@  int main ()
 }
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 4 "vect" {xfail { vect_no_align && { ! vect_hw_misalign } } } } } */
-/* { dg-final { scan-tree-dump-times "dependence distance negative" 4 "vect"  } } */
+/* f4 requires reverse for SVE, which is implemented by a later patch.
+   Until then we report it twice, once for SVE and once for 128-bit
+   Advanced SIMD.  */
+/* { dg-final { scan-tree-dump-times "dependence distance negative" 4 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/slp-23.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-23.c	2017-11-03 17:21:09.742095692 +0000
+++ gcc/testsuite/gcc.dg/vect/slp-23.c	2017-11-03 17:24:09.472993942 +0000
@@ -107,6 +107,8 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { vect_strided8 && { ! { vect_no_align} } } } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! { vect_strided8 || vect_no_align } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } } } } */
+/* We fail to vectorize the second loop with variable-length SVE but
+   fall back to 128-bit vectors, which does use SLP.  */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { ! vect_perm } xfail aarch64_sve } } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target vect_perm } } } */
   
Index: gcc/testsuite/gcc.dg/vect/slp-25.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-25.c	2017-11-03 17:21:09.814092784 +0000
+++ gcc/testsuite/gcc.dg/vect/slp-25.c	2017-11-03 17:24:09.472993942 +0000
@@ -57,4 +57,6 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect"  } } */
 /* { dg-final { scan-tree-dump-times "Vectorizing an unaligned access" 0 "vect" } } */
-/* { dg-final { scan-tree-dump-times "Alignment of access forced using peeling" 2 "vect" { xfail { { ! vect_unaligned_possible } || { ! vect_natural_alignment } } } } } */
+/* Needs store_lanes for SVE, otherwise falls back to Advanced SIMD.
+   Will be fixed when SVE LOAD_LANES support is added.  */
+/* { dg-final { scan-tree-dump-times "Alignment of access forced using peeling" 2 "vect" { xfail { { { ! vect_unaligned_possible } || { ! vect_natural_alignment } } && { ! { aarch64_sve && vect_variable_length } } } } } } */
Index: gcc/testsuite/gcc.dg/vect/slp-perm-5.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-perm-5.c	2017-11-03 17:21:09.792093672 +0000
+++ gcc/testsuite/gcc.dg/vect/slp-perm-5.c	2017-11-03 17:24:09.472993942 +0000
@@ -104,7 +104,9 @@  int main (int argc, const char* argv[])
 }
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect"  { target vect_perm } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_perm3_int && {! vect_load_lanes } } } } } */
+/* Fails for variable-length SVE because we fall back to Advanced SIMD
+   and use LD3/ST3.  Will be fixed when SVE LOAD_LANES support is added.  */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_perm3_int && {! vect_load_lanes } } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" { target vect_load_lanes } } } */
 /* { dg-final { scan-tree-dump "note: Built SLP cancelled: can use load/store-lanes" "vect" { target { vect_perm3_int && vect_load_lanes } } } } */
 /* { dg-final { scan-tree-dump "LOAD_LANES" "vect" { target vect_load_lanes } } } */
Index: gcc/testsuite/gcc.dg/vect/slp-perm-6.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-perm-6.c	2017-11-03 17:21:09.792093672 +0000
+++ gcc/testsuite/gcc.dg/vect/slp-perm-6.c	2017-11-03 17:24:09.472993942 +0000
@@ -103,7 +103,9 @@  int main (int argc, const char* argv[])
 }
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect"  { target vect_perm } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_perm3_int && {! vect_load_lanes } } } } } */
+/* Fails for variable-length SVE because we fall back to Advanced SIMD
+   and use LD3/ST3.  Will be fixed when SVE LOAD_LANES support is added.  */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_perm3_int && {! vect_load_lanes } } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target vect_load_lanes } } } */
 /* { dg-final { scan-tree-dump "note: Built SLP cancelled: can use load/store-lanes" "vect" { target { vect_perm3_int && vect_load_lanes } } } } */
 /* { dg-final { scan-tree-dump "LOAD_LANES" "vect" { target vect_load_lanes } } } */
Index: gcc/testsuite/gcc.dg/vect/slp-perm-9.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-perm-9.c	2017-11-03 17:21:09.792093672 +0000
+++ gcc/testsuite/gcc.dg/vect/slp-perm-9.c	2017-11-03 17:24:09.472993942 +0000
@@ -57,10 +57,11 @@  int main (int argc, const char* argv[])
   return 0;
 }
 
-/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 2 "vect" { target { ! { vect_perm_short || vect_load_lanes } } } } } */
+/* Fails for variable-length SVE because we fall back to Advanced SIMD
+   and use LD3/ST3.  Will be fixed when SVE LOAD_LANES support is added.  */
+/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 2 "vect" { target { ! { vect_perm_short || vect_load_lanes } } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { vect_perm_short || vect_load_lanes } } } } */
 /* { dg-final { scan-tree-dump-times "permutation requires at least three vectors" 1 "vect" { target { vect_perm_short && { ! vect_perm3_short } } } } } */
 /* { dg-final { scan-tree-dump-not "permutation requires at least three vectors" "vect" { target vect_perm3_short } } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect" { target { { ! vect_perm3_short } || vect_load_lanes } } } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { target { vect_perm3_short && { ! vect_load_lanes } } } } } */
-
Index: gcc/testsuite/gcc.dg/vect/slp-reduc-3.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/slp-reduc-3.c	2015-06-02 23:53:38.000000000 +0100
+++ gcc/testsuite/gcc.dg/vect/slp-reduc-3.c	2017-11-03 17:24:09.472993942 +0000
@@ -58,4 +58,7 @@  int main (void)
 /* The initialization loop in main also gets vectorized.  */
 /* { dg-final { scan-tree-dump-times "vect_recog_dot_prod_pattern: detected" 1 "vect" { xfail *-*-* } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 2 "vect" { target { vect_short_mult && { vect_widen_sum_hi_to_si  && vect_unpack } } } } } */ 
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { vect_widen_sum_hi_to_si_pattern ||  { ! vect_unpack } } } } } */
+/* We can't yet create the necessary SLP constant vector for variable-length
+   SVE and so fall back to Advanced SIMD.  This means that we repeat each
+   analysis note.  */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { vect_widen_sum_hi_to_si_pattern || { { ! vect_unpack } || { aarch64_sve && vect_variable_length } } } } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-114.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-114.c	2015-06-02 23:53:35.000000000 +0100
+++ gcc/testsuite/gcc.dg/vect/vect-114.c	2017-11-03 17:24:09.473993900 +0000
@@ -34,6 +34,9 @@  int main (void)
   return main1 ();
 }
 
-/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" { target { ! { vect_perm  } } } } } */
+/* Requires reverse for SVE, which is implemented by a later patch.
+   Until then we fall back to Advanced SIMD and successfully vectorize
+   the loop.  */
+/* { dg-final { scan-tree-dump-times "vectorized 0 loops" 1 "vect" { target { ! vect_perm } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_perm } } } */
 
Index: gcc/testsuite/gcc.dg/vect/vect-119.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-119.c	2015-08-05 22:28:34.000000000 +0100
+++ gcc/testsuite/gcc.dg/vect/vect-119.c	2017-11-03 17:24:09.473993900 +0000
@@ -25,4 +25,7 @@  unsigned int foo (const unsigned int x[O
   return sum;
 }
 
-/* { dg-final { scan-tree-dump-times "Detected interleaving load of size 2" 1 "vect" } } */
+/* Requires load-lanes for SVE, which is implemented by a later patch.
+   Until then we report it twice, once for SVE and once for 128-bit
+   Advanced SIMD.  */
+/* { dg-final { scan-tree-dump-times "Detected interleaving load of size 2" 1 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-cselim-1.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-cselim-1.c	2017-11-03 17:21:09.849091370 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-cselim-1.c	2017-11-03 17:24:09.473993900 +0000
@@ -83,4 +83,6 @@  main (void)
 }
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { ! vect_masked_store } xfail { { vect_no_align && { ! vect_hw_misalign } } || { ! vect_strided2 } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target { vect_masked_store } } } } */
+/* Fails for variable-length SVE because we can't yet handle the
+   interleaved load.  This is fixed by a later patch.  */
+/* { dg-final { scan-tree-dump-times "vectorized 2 loops" 1 "vect" { target vect_masked_store xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-live-slp-1.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-live-slp-1.c	2016-11-22 21:16:10.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-live-slp-1.c	2017-11-03 17:24:09.473993900 +0000
@@ -69,4 +69,7 @@  main (void)
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 4 "vect" } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 4 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 4 "vect" } } */
+/* We can't yet create the necessary SLP constant vector for variable-length
+   SVE and so fall back to Advanced SIMD.  This means that we repeat each
+   analysis note.  */
+/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 4 "vect" { xfail { aarch64_sve && vect_variable_length } } } }*/
Index: gcc/testsuite/gcc.dg/vect/vect-live-slp-2.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-live-slp-2.c	2016-11-22 21:16:10.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-live-slp-2.c	2017-11-03 17:24:09.473993900 +0000
@@ -63,4 +63,7 @@  main (void)
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 2 "vect" } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 2 "vect" } } */
+/* We can't yet create the necessary SLP constant vector for variable-length
+   SVE and so fall back to Advanced SIMD.  This means that we repeat each
+   analysis note.  */
+/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 2 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-live-slp-3.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-live-slp-3.c	2016-11-22 21:16:10.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-live-slp-3.c	2017-11-03 17:24:09.473993900 +0000
@@ -70,4 +70,7 @@  main (void)
 
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 4 "vect" } } */
 /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 4 "vect" } } */
-/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 4 "vect" } } */
+/* We can't yet create the necessary SLP constant vector for variable-length
+   SVE and so fall back to Advanced SIMD.  This means that we repeat each
+   analysis note.  */
+/* { dg-final { scan-tree-dump-times "vec_stmt_relevant_p: stmt live but not relevant" 4 "vect" { xfail { aarch64_sve && vect_variable_length } } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-1.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-1.c	2016-11-22 21:16:10.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-1.c	2017-11-03 17:24:09.473993900 +0000
@@ -37,5 +37,5 @@  main (void)
   return 0;
 }
 
-/* { dg-final { scan-tree-dump-times "vect_recog_mult_pattern: detected" 2 "vect"  { target aarch64*-*-* } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_mult_pattern: detected" 2 "vect" { target aarch64*-*-* xfail aarch64_sve } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect"  { target aarch64*-*-* } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-2.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-2.c	2016-11-22 21:16:10.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-mult-const-pattern-2.c	2017-11-03 17:24:09.473993900 +0000
@@ -36,5 +36,5 @@  main (void)
   return 0;
 }
 
-/* { dg-final { scan-tree-dump-times "vect_recog_mult_pattern: detected" 2 "vect"  { target aarch64*-*-* } } } */
+/* { dg-final { scan-tree-dump-times "vect_recog_mult_pattern: detected" 2 "vect"  { target aarch64*-*-* xfail aarch64_sve } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect"  { target aarch64*-*-* } } } */
Index: gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c	2016-01-13 13:48:42.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-over-widen-1-big-array.c	2017-11-03 17:24:09.473993900 +0000
@@ -59,6 +59,8 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } } } } */
+/* Requires LD4 for variable-length SVE.  Until that's supported we fall
+   back to Advanced SIMD, which does have widening shifts.  */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
 
Index: gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c	2017-11-03 17:21:09.762094884 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-over-widen-1.c	2017-11-03 17:24:09.474993858 +0000
@@ -63,7 +63,9 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } } } } */
+/* Requires LD4 for variable-length SVE.  Until that's supported we fall
+   back to Advanced SIMD, which does have widening shifts.  */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 8 "vect" { target vect_sizes_32B_16B } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
 
Index: gcc/testsuite/gcc.dg/vect/vect-over-widen-3-big-array.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-over-widen-3-big-array.c	2017-11-03 17:21:09.763094844 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-over-widen-3-big-array.c	2017-11-03 17:24:09.474993858 +0000
@@ -59,7 +59,9 @@  int main (void)
   return 0;
 }
 
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target { ! vect_widen_shift } } } } */
+/* Requires LD4 for variable-length SVE.  Until that's supported we fall
+   back to Advanced SIMD, which does have widening shifts.  */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target { ! vect_widen_shift } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 1 "vect" { target vect_widen_shift } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
 
Index: gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c	2016-01-13 13:48:42.000000000 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-over-widen-4-big-array.c	2017-11-03 17:24:09.474993858 +0000
@@ -63,6 +63,8 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } } } } */
+/* Requires LD4 for variable-length SVE.  Until that's supported we fall
+   back to Advanced SIMD, which does have widening shifts.  */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { ! vect_widen_shift } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
 
Index: gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c
===================================================================
--- gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c	2017-11-03 17:21:09.763094844 +0000
+++ gcc/testsuite/gcc.dg/vect/vect-over-widen-4.c	2017-11-03 17:24:09.474993858 +0000
@@ -67,7 +67,9 @@  int main (void)
 
 /* { dg-final { scan-tree-dump-times "vect_recog_widen_shift_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 2 "vect" { target vect_widen_shift } } } */
-/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } } } } */
+/* Requires LD4 for variable-length SVE.  Until that's supported we fall
+   back to Advanced SIMD, which does have widening shifts.  */
+/* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 4 "vect" { target { { ! vect_sizes_32B_16B } && { ! vect_widen_shift } } xfail { aarch64_sve && vect_variable_length } } } } */
 /* { dg-final { scan-tree-dump-times "vect_recog_over_widening_pattern: detected" 8 "vect" { target vect_sizes_32B_16B } } } */
 /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */