diff mbox series

[SVE,ACLE] Implements ACLE svmin/svmax variants

Message ID CAELXzTNUAxY=+jnc7k2e09E8cysX4vL0TstY-qRi9+Xk=sphmg@mail.gmail.com
State New
Headers show
Series [SVE,ACLE] Implements ACLE svmin/svmax variants | expand

Commit Message

Kugan Vivekanandarajah Aug. 28, 2018, 5:52 a.m. UTC
Hi,

Attached patch implements ACLE svmin/svmax variants and adds
respective testcases. Committed to SVE ACLE branch.

Thanks,
Kugan
diff mbox series

Patch

From 38ac9068ae7b82e6f34024ce3bb072a254f702a3 Mon Sep 17 00:00:00 2001
From: Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
Date: Wed, 15 Aug 2018 15:13:05 +1000
Subject: [PATCH 1/4] initial svmax/svmin ACLE implementation

Change-Id: I7a1c63a404152563cbc52b17271f4080f07f8f16
---
 gcc/config/aarch64/aarch64-sve-builtins.c          | 104 ++++++-
 gcc/config/aarch64/aarch64-sve-builtins.def        |   2 +
 gcc/config/aarch64/aarch64-sve.md                  |  43 +--
 gcc/config/aarch64/iterators.md                    |  48 +++-
 .../gcc.target/aarch64/sve-acle/asm/max_f16.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/max_f16_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_f32.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/max_f32_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_f64.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/max_f64_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_s16.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_s32.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_s64.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_s8.c       | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_u16.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_u32.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_u64.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/max_u8.c       | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_f16.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/min_f16_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_f32.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/min_f32_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_f64.c      | 306 +++++++++++++++++++++
 .../aarch64/sve-acle/asm/min_f64_notrap.c          | 302 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_s16.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_s32.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_s64.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_s8.c       | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_u16.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_u32.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_u64.c      | 286 +++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/min_u8.c       | 286 +++++++++++++++++++
 32 files changed, 8379 insertions(+), 42 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64_notrap.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u8.c

diff --git a/gcc/config/aarch64/aarch64-sve-builtins.c b/gcc/config/aarch64/aarch64-sve-builtins.c
index f16bd11..38f8f78 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.c
+++ b/gcc/config/aarch64/aarch64-sve-builtins.c
@@ -165,6 +165,8 @@  struct type_suffix_info {
 
   /* True if the suffix is for an integer type.  */
   unsigned int integer_p : 1;
+  /* True if the suffix is for an unsigned type.  */
+  unsigned int unsigned_p : 1;
 };
 
 /* Static information about a set of functions.  */
@@ -348,11 +350,15 @@  public:
 
 private:
   rtx expand_add (unsigned int);
+  rtx expand_max ();
+  rtx expand_min ();
   rtx expand_ptrue ();
   rtx expand_sub (bool);
 
+  rtx expand_signed_pred_op (rtx_code, rtx_code, int);
   rtx expand_via_unpred_direct_optab (optab, unsigned int);
   rtx expand_via_pred_direct_optab (optab, unsigned int, unsigned int);
+  rtx expand_via_pred_insn (insn_code, unsigned int, unsigned int);
   rtx expand_via_pred_x_insn (insn_code, unsigned int);
 
   bool try_negating_argument (unsigned int, machine_mode);
@@ -401,9 +407,10 @@  static const char *const pred_suffixes[NUM_PREDS + 1] = {
 static const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
 #define DEF_SVE_TYPE_SUFFIX(NAME, ACLE_TYPE, BITS) \
   { "_" #NAME, VECTOR_TYPE_ ## ACLE_TYPE, BITS, BITS / BITS_PER_UNIT, \
-    #NAME[0] == 's' || #NAME[0] == 'u' },
+    #NAME[0] == 's' || #NAME[0] == 'u', \
+    #NAME[0] == 'u'},
 #include "aarch64-sve-builtins.def"
-  { "", NUM_VECTOR_TYPES, 0, 0, false }
+  { "", NUM_VECTOR_TYPES, 0, 0, false, false }
 };
 
 /* _b8 _b16 _b32 _b64.  */
@@ -800,6 +807,8 @@  arm_sve_h_builder::get_attributes (const function_instance &instance)
   switch (instance.func)
     {
     case FUNC_svadd:
+    case FUNC_svmax:
+    case FUNC_svmin:
     case FUNC_svsub:
     case FUNC_svsubr:
       if (type_suffixes[instance.types[0]].integer_p)
@@ -1222,6 +1231,8 @@  gimple_folder::fold ()
   switch (m_fi.func)
     {
     case FUNC_svadd:
+    case FUNC_svmax:
+    case FUNC_svmin:
     case FUNC_svsub:
     case FUNC_svsubr:
     case NUM_FUNCS:
@@ -1282,6 +1293,12 @@  function_expander::expand ()
     case FUNC_svadd:
       return expand_add (1);
 
+    case FUNC_svmax:
+      return expand_max ();
+
+    case FUNC_svmin:
+      return expand_min ();
+
     case FUNC_svptrue:
       return expand_ptrue ();
 
@@ -1318,6 +1335,20 @@  function_expander::expand_add (unsigned int merge_argno)
   return expand_via_pred_direct_optab (cond_add_optab, 2, merge_argno);
 }
 
+/* Expand a call to svmax.  */
+rtx
+function_expander::expand_max ()
+{
+  return expand_signed_pred_op (SMAX, UMAX, UNSPEC_COND_FMAX);
+}
+
+/* Expand a call to svmmin.  */
+rtx
+function_expander::expand_min ()
+{
+  return expand_signed_pred_op (SMIN, UMIN, UNSPEC_COND_FMIN);
+}
+
 /* Expand a call to svptrue.  */
 rtx
 function_expander::expand_ptrue ()
@@ -1388,8 +1419,19 @@  function_expander::expand_via_pred_direct_optab (optab op, unsigned int nops,
 						 unsigned int merge_argno)
 {
   machine_mode mode = get_mode (0);
-  machine_mode pred_mode = get_pred_mode (0);
   insn_code icode = direct_optab_handler (op, mode);
+  return expand_via_pred_insn (icode, nops, merge_argno);
+}
+
+/* Implement the call using instruction ICODE.  The instruction takes
+   NOPS input operand (not counting the predicate and the fallback value).
+   Merging forms use argument MERGE_ARGNO as the fallback value.  */
+rtx
+function_expander::expand_via_pred_insn (insn_code icode, unsigned int nops,
+						 unsigned int merge_argno)
+{
+  machine_mode mode = get_mode (0);
+  machine_mode pred_mode = get_pred_mode (0);
 
   add_output_operand (mode);
   if (nops == 1 && m_fi.pred == PRED_m)
@@ -1433,16 +1475,60 @@  function_expander::expand_via_pred_x_insn (insn_code icode, unsigned int nops)
   /* Add a flag that indicates whether unpredicated instructions
      are allowed.  */
   rtx pred = m_ops[1].value;
-  if (FLOAT_MODE_P (mode)
-      && flag_trapping_math
-      && pred != CONST1_RTX (pred_mode))
-    add_integer_operand (SVE_FORBID_NEW_FAULTS);
-  else
-    add_integer_operand (SVE_ALLOW_NEW_FAULTS);
+  if (FLOAT_MODE_P (mode))
+    {
+      if (flag_trapping_math
+	  && pred != CONST1_RTX (pred_mode))
+	add_integer_operand (SVE_FORBID_NEW_FAULTS);
+      else
+	add_integer_operand (SVE_ALLOW_NEW_FAULTS);
+    }
 
   return generate_insn (icode);
 }
 
+/* Implement the call using an @aarch64_cond instruction for _x
+   predication and a @cond instruction for _z and _m predication.
+   The integer instructions are parameterized by an rtx_code while
+   the floating-point instructions are parameterized by an unspec code.
+   CODE_FOR_SINT is the rtx_code for signed integer operations,
+   CODE_FOR_UINT is the rtx_code for unsigned integer operations
+   and UNSPEC_COND is the unspec code for floating-point operations.  */
+rtx
+function_expander::expand_signed_pred_op (rtx_code code_for_sint,
+					  rtx_code code_for_uint,
+					  int unspec_cond)
+{
+  insn_code icode;
+
+  if (m_fi.pred == PRED_x)
+    {
+      if (type_suffixes[m_fi.types[0]].integer_p)
+	{
+	  if (type_suffixes[m_fi.types[0]].unsigned_p)
+	    icode = code_for_aarch64_pred (code_for_uint, get_mode (0));
+	  else
+	    icode = code_for_aarch64_pred (code_for_sint, get_mode (0));
+	}
+      else
+	icode = code_for_aarch64_pred (unspec_cond, get_mode (0));
+      return expand_via_pred_x_insn (icode, 2);
+    }
+  else
+    {
+      if (type_suffixes[m_fi.types[0]].integer_p)
+	{
+	  if (type_suffixes[m_fi.types[0]].unsigned_p)
+	    icode = code_for_cond (code_for_uint, get_mode (0));
+	  else
+	    icode = code_for_cond (code_for_sint, get_mode (0));
+	}
+      else
+	icode = code_for_cond (unspec_cond, get_mode (0));
+      return expand_via_pred_insn (icode, 2, 1);
+    }
+}
+
 /* Return true if argument I is a constant argument that can be negated
    at compile time, replacing it with the negated value if so.  MODE is the
    associated vector mode, but the argument could be a single element.  */
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.def b/gcc/config/aarch64/aarch64-sve-builtins.def
index fc92b7e..ae53dc7 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins.def
@@ -61,6 +61,8 @@  DEF_SVE_TYPE_SUFFIX (u64, svuint64_t, 64)
 
 /* List of functions, in alphabetical order.  */
 DEF_SVE_FUNCTION (svadd, binary_opt_n, all_data, mxz)
+DEF_SVE_FUNCTION (svmax, binary_opt_n, all_data, mxz)
+DEF_SVE_FUNCTION (svmin, binary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svptrue, inherent, all_pred, none)
 DEF_SVE_FUNCTION (svsub, binary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svsubr, binary_opt_n, all_data, mxz)
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index a7969a6..d6f8a6f 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1749,7 +1749,7 @@ 
 )
 
 ;; Integer MIN/MAX predicated with a PTRUE.
-(define_insn "*<su><maxmin><mode>3"
+(define_insn "@aarch64_pred_<optab><mode>"
   [(set (match_operand:SVE_I 0 "register_operand" "=w, ?&w")
 	(unspec:SVE_I
 	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
@@ -1797,25 +1797,25 @@ 
   [(set (match_operand:SVE_F 0 "register_operand")
 	(unspec:SVE_F
 	  [(match_dup 3)
-	   (unspec:SVE_F [(match_operand:SVE_F 1 "register_operand")
-			  (match_operand:SVE_F 2 "register_operand")]
-			 FMAXMIN_UNS)]
-	  UNSPEC_MERGE_PTRUE))]
+	   (const_int SVE_ALLOW_NEW_FAULTS)
+	   (match_operand:SVE_F 1 "register_operand")
+	   (match_operand:SVE_F 2 "register_operand")]
+	   SVE_COND_MAXMIN))]
   "TARGET_SVE"
   {
     operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
   }
 )
 
-;; fmin/fmax predicated with a PTRUE.
-(define_insn "*<maxmin_uns><mode>3"
+;; Predicated FMIN, FMAX, FMINNM and FMAXNM.
+(define_insn "@aarch64_pred_<maxmin_uns><mode>"
   [(set (match_operand:SVE_F 0 "register_operand" "=w, ?&w")
 	(unspec:SVE_F
-	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl")
-	   (unspec:SVE_F [(match_operand:SVE_F 2 "register_operand" "%0, w")
-			  (match_operand:SVE_F 3 "register_operand" "w, w")]
-			 FMAXMIN_UNS)]
-	  UNSPEC_MERGE_PTRUE))]
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl,  Upl")
+	   (match_operand:SI 4 "const_int_operand" "i, i")
+	   (match_operand:SVE_F 2 "register_operand" "%0, w")
+	   (match_operand:SVE_F 3 "register_operand" "w, w")]
+	   SVE_COND_MAXMIN))]
   "TARGET_SVE"
   "@
    <maxmin_uns_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
@@ -1824,7 +1824,7 @@ 
 )
 
 ;; Predicated integer operations with select.
-(define_expand "cond_<optab><mode>"
+(define_expand "@cond_<optab><mode>"
   [(set (match_operand:SVE_I 0 "register_operand")
 	(unspec:SVE_I
 	  [(match_operand:<VPRED> 1 "register_operand")
@@ -1836,7 +1836,7 @@ 
   "TARGET_SVE"
 )
 
-(define_expand "cond_<optab><mode>"
+(define_expand "@cond_<optab><mode>"
   [(set (match_operand:SVE_SDI 0 "register_operand")
 	(unspec:SVE_SDI
 	  [(match_operand:<VPRED> 1 "register_operand")
@@ -3040,7 +3040,7 @@ 
 )
 
 ;; Predicated floating-point operations with select.
-(define_expand "cond_<optab><mode>"
+(define_expand "@cond_<optab><mode>"
   [(set (match_operand:SVE_F 0 "register_operand")
 	(unspec:SVE_F
 	  [(match_operand:<VPRED> 1 "register_operand")
@@ -3118,19 +3118,22 @@ 
 
 ;; Predicated floating-point operations with select matching zero.
 (define_insn "*cond_<optab><mode>_z"
-  [(set (match_operand:SVE_F 0 "register_operand" "=&w,&w")
+  [(set (match_operand:SVE_F 0 "register_operand" "=&w,&w,&w")
 	(unspec:SVE_F
-	  [(match_operand:<VPRED> 1 "register_operand" "Upl,Upl")
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl,Upl,Upl")
 	   (unspec:SVE_F
 	     [(match_dup 1)
 	      (match_operand 5)
-	      (match_operand:SVE_F 2 "register_operand" "0,w")
-	      (match_operand:SVE_F 3 "register_operand" "w,w")]
+	      (match_operand:SVE_F 2 "register_operand" "0,w,w")
+	      (match_operand:SVE_F 3 "register_operand" "w,0,w")]
 	     SVE_COND_FP_BINARY)
 	   (match_operand:SVE_F 4 "aarch64_simd_imm_zero")]
 	  UNSPEC_SEL))]
   "TARGET_SVE"
-  "movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+  "@
+   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op_rev>\t%0.<Vetype>, %1/m, %0.<Vetype>, %2.<Vetype>
+   movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
   [(set_attr "movprfx" "yes")]
 )
 
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index beab9c3..781ab43 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -469,8 +469,10 @@ 
     UNSPEC_COND_SUB	; Used in aarch64-sve.md.
     UNSPEC_COND_MUL	; Used in aarch64-sve.md.
     UNSPEC_COND_DIV	; Used in aarch64-sve.md.
-    UNSPEC_COND_MAX	; Used in aarch64-sve.md.
-    UNSPEC_COND_MIN	; Used in aarch64-sve.md.
+    UNSPEC_COND_FMIN	; Used in aarch64-sve.md.
+    UNSPEC_COND_FMAX	; Used in aarch64-sve.md.
+    UNSPEC_COND_FMAXNM	; Used in aarch64-sve.md.
+    UNSPEC_COND_FMINNM	; Used in aarch64-sve.md.
     UNSPEC_COND_FMLA	; Used in aarch64-sve.md.
     UNSPEC_COND_FMLS	; Used in aarch64-sve.md.
     UNSPEC_COND_FNMLA	; Used in aarch64-sve.md.
@@ -1568,7 +1570,11 @@ 
 (define_int_iterator MUL_HIGHPART [UNSPEC_SMUL_HIGHPART UNSPEC_UMUL_HIGHPART])
 
 (define_int_iterator SVE_COND_FP_BINARY [UNSPEC_COND_MUL UNSPEC_COND_DIV
-					 UNSPEC_COND_MAX UNSPEC_COND_MIN])
+					 UNSPEC_COND_FMAX UNSPEC_COND_FMIN
+					 UNSPEC_COND_FMAXNM UNSPEC_COND_FMINNM])
+
+(define_int_iterator SVE_COND_MAXMIN [UNSPEC_COND_FMAXNM UNSPEC_COND_FMINNM
+				      UNSPEC_COND_FMAX UNSPEC_COND_FMIN])
 
 (define_int_iterator SVE_COND_FP_TERNARY [UNSPEC_COND_FMLA
 					  UNSPEC_COND_FMLS
@@ -1604,8 +1610,10 @@ 
 			(UNSPEC_XORV "xor")
 			(UNSPEC_COND_MUL "mul")
 			(UNSPEC_COND_DIV "div")
-			(UNSPEC_COND_MAX "smax")
-			(UNSPEC_COND_MIN "smin")
+			(UNSPEC_COND_FMAX "smax_nan")
+			(UNSPEC_COND_FMIN "smin_nan")
+			(UNSPEC_COND_FMAXNM "smax")
+			(UNSPEC_COND_FMINNM "smin")
 			(UNSPEC_COND_FMLA "fma")
 			(UNSPEC_COND_FMLS "fnma")
 			(UNSPEC_COND_FNMLA "fnms")
@@ -1622,7 +1630,11 @@ 
 			      (UNSPEC_FMINNMV "smin")
 			      (UNSPEC_FMINV "smin_nan")
 			      (UNSPEC_FMAXNM "fmax")
-			      (UNSPEC_FMINNM "fmin")])
+			      (UNSPEC_FMINNM "fmin")
+			      (UNSPEC_COND_FMAX "fmax")
+			      (UNSPEC_COND_FMIN "fmin")
+			      (UNSPEC_COND_FMAXNM "fmaxnm")
+			      (UNSPEC_COND_FMINNM "fminnm")])
 
 (define_int_attr  maxmin_uns_op [(UNSPEC_UMAXV "umax")
 				 (UNSPEC_UMINV "umin")
@@ -1635,7 +1647,11 @@ 
 				 (UNSPEC_FMINNMV "fminnm")
 				 (UNSPEC_FMINV "fmin")
 				 (UNSPEC_FMAXNM "fmaxnm")
-				 (UNSPEC_FMINNM "fminnm")])
+				 (UNSPEC_FMINNM "fminnm")
+				 (UNSPEC_COND_FMAX "fmax")
+				 (UNSPEC_COND_FMIN "fmin")
+				 (UNSPEC_COND_FMAXNM "fmaxnm")
+				 (UNSPEC_COND_FMINNM "fminnm")])
 
 (define_int_attr bit_reduc_op [(UNSPEC_ANDV "andv")
 			       (UNSPEC_IORV "orv")
@@ -1824,13 +1840,17 @@ 
 
 (define_int_attr sve_fp_op [(UNSPEC_COND_MUL "fmul")
 			    (UNSPEC_COND_DIV "fdiv")
-			    (UNSPEC_COND_MAX "fmaxnm")
-			    (UNSPEC_COND_MIN "fminnm")])
+			    (UNSPEC_COND_FMAX "fmax")
+			    (UNSPEC_COND_FMIN "fmin")
+			    (UNSPEC_COND_FMAXNM "fmaxnm")
+			    (UNSPEC_COND_FMINNM "fminnm")])
 
 (define_int_attr sve_fp_op_rev [(UNSPEC_COND_MUL "fmul")
 			        (UNSPEC_COND_DIV "fdivr")
-			        (UNSPEC_COND_MAX "fmaxnm")
-			        (UNSPEC_COND_MIN "fminnm")])
+			        (UNSPEC_COND_FMAX "fmax")
+			        (UNSPEC_COND_FMIN "fmin")
+			        (UNSPEC_COND_FMAXNM "fmaxnm")
+			        (UNSPEC_COND_FMINNM "fminnm")])
 
 (define_int_attr sve_fmla_op [(UNSPEC_COND_FMLA "fmla")
 			      (UNSPEC_COND_FMLS "fmls")
@@ -1844,5 +1864,7 @@ 
 
 (define_int_attr commutative [(UNSPEC_COND_MUL "true")
 			      (UNSPEC_COND_DIV "false")
-			      (UNSPEC_COND_MIN "true")
-			      (UNSPEC_COND_MAX "true")])
+			      (UNSPEC_COND_FMIN "true")
+			      (UNSPEC_COND_FMAX "true")
+			      (UNSPEC_COND_FMINNM "true")
+			      (UNSPEC_COND_FMAXNM "true")])
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16.c
new file mode 100644
index 0000000..ed39be4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f16_m_tied1:
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_m_tied1, svfloat16_t,
+		z0 = svmax_f16_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f16_m_tied2, svfloat16_t,
+		z1 = svmax_f16_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f16_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_m_untied, svfloat16_t,
+		z0 = svmax_f16_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_m_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_f16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_m_untied, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_h0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_m_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_h0_f16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_m_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f16_m_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_m_tied1, svfloat16_t,
+		z0 = svmax_n_f16_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f16_m_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_m_untied, svfloat16_t,
+		z0 = svmax_n_f16_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_z_tied1, svfloat16_t,
+		z0 = svmax_f16_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_f16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_z_tied2, svfloat16_t,
+		z1 = svmax_f16_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_z_untied, svfloat16_t,
+		z0 = svmax_f16_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_z_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_f16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_z_untied, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_h0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_z_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_h0_f16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_z_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f16_z_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_z_tied1, svfloat16_t,
+		z0 = svmax_n_f16_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_f16_z_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_z_untied, svfloat16_t,
+		z0 = svmax_n_f16_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_f16_x_tied1:
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_tied1, svfloat16_t,
+		z0 = svmax_f16_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f16_x_tied2:
+**	fmax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_tied2, svfloat16_t,
+		z1 = svmax_f16_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f16_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_untied, svfloat16_t,
+		z2 = svmax_f16_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_x_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_f16_x_untied:
+**	mov	z1\.h, w0
+**	fmax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_x_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_h0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_x_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_h0_f16_x_untied:
+**	mov	z2\.h, h0
+**	fmax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_x_untied, svfloat16_t, __fp16,
+		 z2 = svmax_n_f16_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f16_x_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_x_tied1, svfloat16_t,
+		z0 = svmax_n_f16_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f16_x_untied:
+**	fmov	z0\.h, #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_x_untied, svfloat16_t,
+		z0 = svmax_n_f16_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmax	z0\.h, \1/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_tied1, svfloat16_t,
+		z0 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z0 = svmax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_max_f16_x_tied2:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmax	z1\.h, \1/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_tied2, svfloat16_t,
+		z1 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z1 = svmax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_max_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.h, \1/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_untied, svfloat16_t,
+		z2 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z2 = svmax_x (svptrue_b16 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16_notrap.c
new file mode 100644
index 0000000..4fe78b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f16_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f16_m_tied1:
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_m_tied1, svfloat16_t,
+		z0 = svmax_f16_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f16_m_tied2, svfloat16_t,
+		z1 = svmax_f16_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f16_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_m_untied, svfloat16_t,
+		z0 = svmax_f16_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_m_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_f16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_m_untied, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_h0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_m_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_h0_f16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_m_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f16_m_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_m_tied1, svfloat16_t,
+		z0 = svmax_n_f16_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f16_m_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_m_untied, svfloat16_t,
+		z0 = svmax_n_f16_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_z_tied1, svfloat16_t,
+		z0 = svmax_f16_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f16_z_tied2, svfloat16_t,
+		z1 = svmax_f16_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_z_untied, svfloat16_t,
+		z0 = svmax_f16_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_z_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_f16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_z_untied, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_h0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_z_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_h0_f16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_z_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f16_z_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_z_untied, svfloat16_t,
+		z0 = svmax_n_f16_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_1_f16_z_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_z_tied1, svfloat16_t,
+		z0 = svmax_n_f16_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_f16_x_tied1:
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_tied1, svfloat16_t,
+		z0 = svmax_f16_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f16_x_tied2:
+**	fmax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_tied2, svfloat16_t,
+		z1 = svmax_f16_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f16_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_f16_x_untied, svfloat16_t,
+		z2 = svmax_f16_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_x_tied1, svfloat16_t, __fp16,
+		 z0 = svmax_n_f16_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_f16_x_untied:
+**	mov	z1\.h, w0
+**	fmax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f16_x_untied, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_h0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_x_tied1, svfloat16_t, __fp16,
+		 z1 = svmax_n_f16_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_h0_f16_x_untied:
+**	mov	z2\.h, h0
+**	fmax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_f16_x_untied, svfloat16_t, __fp16,
+		 z2 = svmax_n_f16_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f16_x_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_x_tied1, svfloat16_t,
+		z0 = svmax_n_f16_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f16_x_untied:
+**	fmov	z0\.h, #1.0(e\+0)?
+**	fmax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f16_x_untied, svfloat16_t,
+		z0 = svmax_n_f16_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmax	z0\.h, \1/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_tied1, svfloat16_t,
+		z0 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z0 = svmax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_max_f16_x_tied2:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmax	z1\.h, \1/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_tied2, svfloat16_t,
+		z1 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z1 = svmax_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_max_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.h, \1/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f16_x_untied, svfloat16_t,
+		z2 = svmax_f16_x (svptrue_b16 (), z0, z1),
+		z2 = svmax_x (svptrue_b16 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32.c
new file mode 100644
index 0000000..b51e07c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f32_m_tied1:
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_m_tied1, svfloat32_t,
+		z0 = svmax_f32_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f32_m_tied2, svfloat32_t,
+		z1 = svmax_f32_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f32_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_m_untied, svfloat32_t,
+		z0 = svmax_f32_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_m_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_f32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_m_untied, svfloat32_t, float,
+		 z0 = svmax_n_f32_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_s0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_m_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_s0_f32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_m_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f32_m_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_m_tied1, svfloat32_t,
+		z0 = svmax_n_f32_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f32_m_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_m_untied, svfloat32_t,
+		z0 = svmax_n_f32_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_z_tied1, svfloat32_t,
+		z0 = svmax_f32_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_f32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_z_tied2, svfloat32_t,
+		z1 = svmax_f32_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_z_untied, svfloat32_t,
+		z0 = svmax_f32_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_z_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_f32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_z_untied, svfloat32_t, float,
+		 z0 = svmax_n_f32_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_s0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_z_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_s0_f32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_z_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f32_z_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_z_tied1, svfloat32_t,
+		z0 = svmax_n_f32_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_f32_z_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_z_untied, svfloat32_t,
+		z0 = svmax_n_f32_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_f32_x_tied1:
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_tied1, svfloat32_t,
+		z0 = svmax_f32_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f32_x_tied2:
+**	fmax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_tied2, svfloat32_t,
+		z1 = svmax_f32_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f32_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_untied, svfloat32_t,
+		z2 = svmax_f32_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_x_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_f32_x_untied:
+**	mov	z1\.s, w0
+**	fmax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_x_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_s0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_x_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_s0_f32_x_untied:
+**	mov	z2\.s, s0
+**	fmax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_x_untied, svfloat32_t, float,
+		 z2 = svmax_n_f32_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f32_x_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_x_tied1, svfloat32_t,
+		z0 = svmax_n_f32_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f32_x_untied:
+**	fmov	z0\.s, #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_x_untied, svfloat32_t,
+		z0 = svmax_n_f32_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmax	z0\.s, \1/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_tied1, svfloat32_t,
+		z0 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z0 = svmax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_max_f32_x_tied2:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmax	z1\.s, \1/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_tied2, svfloat32_t,
+		z1 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z1 = svmax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_max_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.s, \1/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_untied, svfloat32_t,
+		z2 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z2 = svmax_x (svptrue_b32 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32_notrap.c
new file mode 100644
index 0000000..24c4c02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f32_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f32_m_tied1:
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_m_tied1, svfloat32_t,
+		z0 = svmax_f32_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f32_m_tied2, svfloat32_t,
+		z1 = svmax_f32_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f32_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_m_untied, svfloat32_t,
+		z0 = svmax_f32_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_m_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_f32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_m_untied, svfloat32_t, float,
+		 z0 = svmax_n_f32_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_s0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_m_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_s0_f32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_m_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f32_m_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_m_tied1, svfloat32_t,
+		z0 = svmax_n_f32_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f32_m_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_m_untied, svfloat32_t,
+		z0 = svmax_n_f32_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_z_tied1, svfloat32_t,
+		z0 = svmax_f32_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f32_z_tied2, svfloat32_t,
+		z1 = svmax_f32_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_z_untied, svfloat32_t,
+		z0 = svmax_f32_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_z_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_f32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_z_untied, svfloat32_t, float,
+		 z0 = svmax_n_f32_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_s0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_z_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_s0_f32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_z_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f32_z_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_z_tied1, svfloat32_t,
+		z0 = svmax_n_f32_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_f32_z_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_z_untied, svfloat32_t,
+		z0 = svmax_n_f32_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_f32_x_tied1:
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_tied1, svfloat32_t,
+		z0 = svmax_f32_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f32_x_tied2:
+**	fmax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_tied2, svfloat32_t,
+		z1 = svmax_f32_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f32_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_f32_x_untied, svfloat32_t,
+		z2 = svmax_f32_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_x_tied1, svfloat32_t, float,
+		 z0 = svmax_n_f32_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_f32_x_untied:
+**	mov	z1\.s, w0
+**	fmax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_f32_x_untied, svfloat32_t, float,
+		 z1 = svmax_n_f32_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_s0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_x_tied1, svfloat32_t, float,
+		 z1 = svmax_n_f32_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_s0_f32_x_untied:
+**	mov	z2\.s, s0
+**	fmax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_f32_x_untied, svfloat32_t, float,
+		 z2 = svmax_n_f32_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f32_x_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_x_tied1, svfloat32_t,
+		z0 = svmax_n_f32_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f32_x_untied:
+**	fmov	z0\.s, #1.0(e\+0)?
+**	fmax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f32_x_untied, svfloat32_t,
+		z0 = svmax_n_f32_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmax	z0\.s, \1/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_tied1, svfloat32_t,
+		z0 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z0 = svmax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_max_f32_x_tied2:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmax	z1\.s, \1/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_tied2, svfloat32_t,
+		z1 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z1 = svmax_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_max_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.s, \1/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f32_x_untied, svfloat32_t,
+		z2 = svmax_f32_x (svptrue_b32 (), z0, z1),
+		z2 = svmax_x (svptrue_b32 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64.c
new file mode 100644
index 0000000..519b490
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f64_m_tied1:
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_m_tied1, svfloat64_t,
+		z0 = svmax_f64_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f64_m_tied2, svfloat64_t,
+		z1 = svmax_f64_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f64_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_m_untied, svfloat64_t,
+		z0 = svmax_f64_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_x0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_m_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_x0_f64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_m_untied, svfloat64_t, double,
+		 z0 = svmax_n_f64_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_d0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_m_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_d0_f64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_m_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f64_m_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_m_tied1, svfloat64_t,
+		z0 = svmax_n_f64_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f64_m_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_m_untied, svfloat64_t,
+		z0 = svmax_n_f64_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_z_tied1, svfloat64_t,
+		z0 = svmax_f64_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_f64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_z_tied2, svfloat64_t,
+		z1 = svmax_f64_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_z_untied, svfloat64_t,
+		z0 = svmax_f64_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_x0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_z_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_x0_f64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_z_untied, svfloat64_t, double,
+		 z0 = svmax_n_f64_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_d0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_z_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_d0_f64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_z_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f64_z_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_z_tied1, svfloat64_t,
+		z0 = svmax_n_f64_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_f64_z_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_z_untied, svfloat64_t,
+		z0 = svmax_n_f64_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_f64_x_tied1:
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_tied1, svfloat64_t,
+		z0 = svmax_f64_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f64_x_tied2:
+**	fmax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_tied2, svfloat64_t,
+		z1 = svmax_f64_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f64_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_untied, svfloat64_t,
+		z2 = svmax_f64_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_x0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_x_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_x0_f64_x_untied:
+**	mov	z1\.d, x0
+**	fmax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_x_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_d0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_x_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_d0_f64_x_untied:
+**	mov	z2\.d, d0
+**	fmax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_x_untied, svfloat64_t, double,
+		 z2 = svmax_n_f64_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f64_x_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_x_tied1, svfloat64_t,
+		z0 = svmax_n_f64_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f64_x_untied:
+**	fmov	z0\.d, #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_x_untied, svfloat64_t,
+		z0 = svmax_n_f64_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmax	z0\.d, \1/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_tied1, svfloat64_t,
+		z0 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z0 = svmax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_max_f64_x_tied2:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmax	z1\.d, \1/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_tied2, svfloat64_t,
+		z1 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z1 = svmax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_max_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.d, \1/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_untied, svfloat64_t,
+		z2 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z2 = svmax_x (svptrue_b64 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64_notrap.c
new file mode 100644
index 0000000..8219e53
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_f64_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_f64_m_tied1:
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_m_tied1, svfloat64_t,
+		z0 = svmax_f64_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f64_m_tied2, svfloat64_t,
+		z1 = svmax_f64_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_f64_m_untied:
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_m_untied, svfloat64_t,
+		z0 = svmax_f64_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_x0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_m_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_x0_f64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_m_untied, svfloat64_t, double,
+		 z0 = svmax_n_f64_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_d0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_m_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_d0_f64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_m_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_f64_m_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_m_tied1, svfloat64_t,
+		z0 = svmax_n_f64_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_f64_m_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_m_untied, svfloat64_t,
+		z0 = svmax_n_f64_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_f64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_z_tied1, svfloat64_t,
+		z0 = svmax_f64_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_f64_z_tied2, svfloat64_t,
+		z1 = svmax_f64_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_z_untied, svfloat64_t,
+		z0 = svmax_f64_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_x0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_z_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_x0_f64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_z_untied, svfloat64_t, double,
+		 z0 = svmax_n_f64_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_d0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_z_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_d0_f64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_z_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_f64_z_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_z_tied1, svfloat64_t,
+		z0 = svmax_n_f64_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_f64_z_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_z_untied, svfloat64_t,
+		z0 = svmax_n_f64_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_f64_x_tied1:
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_tied1, svfloat64_t,
+		z0 = svmax_f64_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_f64_x_tied2:
+**	fmax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_tied2, svfloat64_t,
+		z1 = svmax_f64_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_f64_x_untied:
+**	movprfx	z2, z0
+**	fmax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_f64_x_untied, svfloat64_t,
+		z2 = svmax_f64_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_x0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_x_tied1, svfloat64_t, double,
+		 z0 = svmax_n_f64_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_x0_f64_x_untied:
+**	mov	z1\.d, x0
+**	fmax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_f64_x_untied, svfloat64_t, double,
+		 z1 = svmax_n_f64_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_d0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_x_tied1, svfloat64_t, double,
+		 z1 = svmax_n_f64_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_d0_f64_x_untied:
+**	mov	z2\.d, d0
+**	fmax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_f64_x_untied, svfloat64_t, double,
+		 z2 = svmax_n_f64_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_f64_x_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_x_tied1, svfloat64_t,
+		z0 = svmax_n_f64_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_f64_x_untied:
+**	fmov	z0\.d, #1.0(e\+0)?
+**	fmax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_f64_x_untied, svfloat64_t,
+		z0 = svmax_n_f64_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
+/*
+** ptrue_max_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmax	z0\.d, \1/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_tied1, svfloat64_t,
+		z0 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z0 = svmax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_max_f64_x_tied2:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmax	z1\.d, \1/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_tied2, svfloat64_t,
+		z1 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z1 = svmax_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_max_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	movprfx	z2, z0
+**	fmax	z2\.d, \1/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_max_f64_x_untied, svfloat64_t,
+		z2 = svmax_f64_x (svptrue_b64 (), z0, z1),
+		z2 = svmax_x (svptrue_b64 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s16.c
new file mode 100644
index 0000000..d0b8a21
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s16.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_s16_m_tied1:
+**	smax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_m_tied1, svint16_t,
+		z0 = svmax_s16_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_s16_m_tied2, svint16_t,
+		z1 = svmax_s16_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_s16_m_untied:
+**	movprfx	z0, z1
+**	smax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_m_untied, svint16_t,
+		z0 = svmax_s16_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_m_tied1, svint16_t, int16_t,
+		 z0 = svmax_n_s16_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_s16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_m_untied, svint16_t, int16_t,
+		 z0 = svmax_n_s16_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_h0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_m_tied1, svint16_t, int16_t,
+		 z1 = svmax_n_s16_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_h0_s16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	smax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_m_untied, svint16_t, int16_t,
+		 z1 = svmax_n_s16_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_s16_m_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_m_tied1, svint16_t,
+		z0 = svmax_n_s16_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_s16_m_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0, z1
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_m_untied, svint16_t,
+		z0 = svmax_n_s16_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_s16_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smax	z0\.h, p0/m, z0\.h, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_s16_m, svint16_t,
+		z0 = svmax_n_s16_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_s16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_z_tied1, svint16_t,
+		z0 = svmax_s16_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_s16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_z_tied2, svint16_t,
+		z1 = svmax_s16_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_z_untied, svint16_t,
+		z0 = svmax_s16_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_z_tied1, svint16_t, int16_t,
+		 z0 = svmax_n_s16_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_s16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_z_untied, svint16_t, int16_t,
+		 z0 = svmax_n_s16_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_h0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_z_tied1, svint16_t, int16_t,
+		 z1 = svmax_n_s16_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_h0_s16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	smax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_z_untied, svint16_t, int16_t,
+		 z1 = svmax_n_s16_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_s16_z_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_z_tied1, svint16_t,
+		z0 = svmax_n_s16_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_s16_z_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_z_untied, svint16_t,
+		z0 = svmax_n_s16_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_s16_x_tied1:
+**	smax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_x_tied1, svint16_t,
+		z0 = svmax_s16_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_s16_x_tied2:
+**	smax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_x_tied2, svint16_t,
+		z1 = svmax_s16_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_s16_x_untied:
+**	movprfx	z2, z0
+**	smax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_s16_x_untied, svint16_t,
+		z2 = svmax_s16_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_x_tied1, svint16_t, int16_t,
+		 z0 = svmax_n_s16_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_s16_x_untied:
+**	mov	z1\.h, w0
+**	smax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s16_x_untied, svint16_t, int16_t,
+		 z1 = svmax_n_s16_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_h0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_x_tied1, svint16_t, int16_t,
+		 z1 = svmax_n_s16_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_h0_s16_x_untied:
+**	mov	z2\.h, h0
+**	smax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_s16_x_untied, svint16_t, int16_t,
+		 z2 = svmax_n_s16_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_s16_x_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	smax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_x_tied1, svint16_t,
+		z0 = svmax_n_s16_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_s16_x_untied:
+**	mov	z0\.h, #1
+**	smax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s16_x_untied, svint16_t,
+		z0 = svmax_n_s16_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s32.c
new file mode 100644
index 0000000..b23c02b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s32.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_s32_m_tied1:
+**	smax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_m_tied1, svint32_t,
+		z0 = svmax_s32_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_s32_m_tied2, svint32_t,
+		z1 = svmax_s32_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_s32_m_untied:
+**	movprfx	z0, z1
+**	smax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_m_untied, svint32_t,
+		z0 = svmax_s32_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_m_tied1, svint32_t, int32_t,
+		 z0 = svmax_n_s32_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_s32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_m_untied, svint32_t, int32_t,
+		 z0 = svmax_n_s32_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_s0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_m_tied1, svint32_t, int32_t,
+		 z1 = svmax_n_s32_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_s0_s32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	smax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_m_untied, svint32_t, int32_t,
+		 z1 = svmax_n_s32_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_s32_m_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_m_tied1, svint32_t,
+		z0 = svmax_n_s32_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_s32_m_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0, z1
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_m_untied, svint32_t,
+		z0 = svmax_n_s32_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_s32_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smax	z0\.s, p0/m, z0\.s, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_s32_m, svint32_t,
+		z0 = svmax_n_s32_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_s32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_z_tied1, svint32_t,
+		z0 = svmax_s32_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_s32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_z_tied2, svint32_t,
+		z1 = svmax_s32_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_z_untied, svint32_t,
+		z0 = svmax_s32_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_z_tied1, svint32_t, int32_t,
+		 z0 = svmax_n_s32_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_s32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_z_untied, svint32_t, int32_t,
+		 z0 = svmax_n_s32_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_s0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_z_tied1, svint32_t, int32_t,
+		 z1 = svmax_n_s32_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_s0_s32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	smax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_z_untied, svint32_t, int32_t,
+		 z1 = svmax_n_s32_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_s32_z_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_z_tied1, svint32_t,
+		z0 = svmax_n_s32_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_s32_z_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_z_untied, svint32_t,
+		z0 = svmax_n_s32_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_s32_x_tied1:
+**	smax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_x_tied1, svint32_t,
+		z0 = svmax_s32_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_s32_x_tied2:
+**	smax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_x_tied2, svint32_t,
+		z1 = svmax_s32_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_s32_x_untied:
+**	movprfx	z2, z0
+**	smax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_s32_x_untied, svint32_t,
+		z2 = svmax_s32_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_x_tied1, svint32_t, int32_t,
+		 z0 = svmax_n_s32_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_s32_x_untied:
+**	mov	z1\.s, w0
+**	smax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s32_x_untied, svint32_t, int32_t,
+		 z1 = svmax_n_s32_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_s0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_x_tied1, svint32_t, int32_t,
+		 z1 = svmax_n_s32_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_s0_s32_x_untied:
+**	mov	z2\.s, s0
+**	smax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_s32_x_untied, svint32_t, int32_t,
+		 z2 = svmax_n_s32_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_s32_x_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	smax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_x_tied1, svint32_t,
+		z0 = svmax_n_s32_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_s32_x_untied:
+**	mov	z0\.s, #1
+**	smax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s32_x_untied, svint32_t,
+		z0 = svmax_n_s32_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s64.c
new file mode 100644
index 0000000..9554d7f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s64.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_s64_m_tied1:
+**	smax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_m_tied1, svint64_t,
+		z0 = svmax_s64_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_s64_m_tied2, svint64_t,
+		z1 = svmax_s64_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_s64_m_untied:
+**	movprfx	z0, z1
+**	smax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_m_untied, svint64_t,
+		z0 = svmax_s64_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_x0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_m_tied1, svint64_t, int64_t,
+		 z0 = svmax_n_s64_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_x0_s64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_m_untied, svint64_t, int64_t,
+		 z0 = svmax_n_s64_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_d0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_m_tied1, svint64_t, int64_t,
+		 z1 = svmax_n_s64_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_d0_s64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	smax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_m_untied, svint64_t, int64_t,
+		 z1 = svmax_n_s64_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_s64_m_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_m_tied1, svint64_t,
+		z0 = svmax_n_s64_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_s64_m_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0, z1
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_m_untied, svint64_t,
+		z0 = svmax_n_s64_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_s64_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smax	z0\.d, p0/m, z0\.d, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_s64_m, svint64_t,
+		z0 = svmax_n_s64_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_s64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_z_tied1, svint64_t,
+		z0 = svmax_s64_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_s64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_z_tied2, svint64_t,
+		z1 = svmax_s64_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_z_untied, svint64_t,
+		z0 = svmax_s64_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_x0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_z_tied1, svint64_t, int64_t,
+		 z0 = svmax_n_s64_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_x0_s64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_z_untied, svint64_t, int64_t,
+		 z0 = svmax_n_s64_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_d0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_z_tied1, svint64_t, int64_t,
+		 z1 = svmax_n_s64_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_d0_s64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	smax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_z_untied, svint64_t, int64_t,
+		 z1 = svmax_n_s64_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_s64_z_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_z_tied1, svint64_t,
+		z0 = svmax_n_s64_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_s64_z_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_z_untied, svint64_t,
+		z0 = svmax_n_s64_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_s64_x_tied1:
+**	smax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_x_tied1, svint64_t,
+		z0 = svmax_s64_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_s64_x_tied2:
+**	smax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_x_tied2, svint64_t,
+		z1 = svmax_s64_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_s64_x_untied:
+**	movprfx	z2, z0
+**	smax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_s64_x_untied, svint64_t,
+		z2 = svmax_s64_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_x0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_x_tied1, svint64_t, int64_t,
+		 z0 = svmax_n_s64_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_x0_s64_x_untied:
+**	mov	z1\.d, x0
+**	smax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_s64_x_untied, svint64_t, int64_t,
+		 z1 = svmax_n_s64_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_d0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_x_tied1, svint64_t, int64_t,
+		 z1 = svmax_n_s64_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_d0_s64_x_untied:
+**	mov	z2\.d, d0
+**	smax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_s64_x_untied, svint64_t, int64_t,
+		 z2 = svmax_n_s64_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_s64_x_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	smax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_x_tied1, svint64_t,
+		z0 = svmax_n_s64_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_s64_x_untied:
+**	mov	z0\.d, #1
+**	smax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s64_x_untied, svint64_t,
+		z0 = svmax_n_s64_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s8.c
new file mode 100644
index 0000000..98437c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_s8.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_s8_m_tied1:
+**	smax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_m_tied1, svint8_t,
+		z0 = svmax_s8_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_s8_m_tied2, svint8_t,
+		z1 = svmax_s8_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_s8_m_untied:
+**	movprfx	z0, z1
+**	smax	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_m_untied, svint8_t,
+		z0 = svmax_s8_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_m_tied1, svint8_t, int8_t,
+		 z0 = svmax_n_s8_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_s8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_m_untied, svint8_t, int8_t,
+		 z0 = svmax_n_s8_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_b0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_m_tied1, svint8_t, int8_t,
+		 z1 = svmax_n_s8_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_b0_s8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	smax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_m_untied, svint8_t, int8_t,
+		 z1 = svmax_n_s8_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_s8_m_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_m_tied1, svint8_t,
+		z0 = svmax_n_s8_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_s8_m_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0, z1
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_m_untied, svint8_t,
+		z0 = svmax_n_s8_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_s8_m:
+**	mov	(z[0-9]+\.b), #(-1|255)
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_s8_m, svint8_t,
+		z0 = svmax_n_s8_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_s8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_z_tied1, svint8_t,
+		z0 = svmax_s8_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_s8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_z_tied2, svint8_t,
+		z1 = svmax_s8_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smax	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_z_untied, svint8_t,
+		z0 = svmax_s8_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_z_tied1, svint8_t, int8_t,
+		 z0 = svmax_n_s8_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_s8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_z_untied, svint8_t, int8_t,
+		 z0 = svmax_n_s8_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_b0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_z_tied1, svint8_t, int8_t,
+		 z1 = svmax_n_s8_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_b0_s8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z2\.b
+**	smax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_z_untied, svint8_t, int8_t,
+		 z1 = svmax_n_s8_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_s8_z_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_z_tied1, svint8_t,
+		z0 = svmax_n_s8_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_s8_z_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_z_untied, svint8_t,
+		z0 = svmax_n_s8_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_s8_x_tied1:
+**	smax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_x_tied1, svint8_t,
+		z0 = svmax_s8_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_s8_x_tied2:
+**	smax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_x_tied2, svint8_t,
+		z1 = svmax_s8_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_s8_x_untied:
+**	movprfx	z2, z0
+**	smax	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_s8_x_untied, svint8_t,
+		z2 = svmax_s8_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_x_tied1, svint8_t, int8_t,
+		 z0 = svmax_n_s8_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_s8_x_untied:
+**	mov	z1\.b, w0
+**	smax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_s8_x_untied, svint8_t, int8_t,
+		 z1 = svmax_n_s8_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_b0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_x_tied1, svint8_t, int8_t,
+		 z1 = svmax_n_s8_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_b0_s8_x_untied:
+**	mov	z2\.b, b0
+**	smax	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_s8_x_untied, svint8_t, int8_t,
+		 z2 = svmax_n_s8_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_s8_x_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	smax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_x_tied1, svint8_t,
+		z0 = svmax_n_s8_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_s8_x_untied:
+**	mov	z0\.b, #1
+**	smax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_s8_x_untied, svint8_t,
+		z0 = svmax_n_s8_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u16.c
new file mode 100644
index 0000000..696f1a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u16.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_u16_m_tied1:
+**	umax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_m_tied1, svuint16_t,
+		z0 = svmax_u16_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_u16_m_tied2, svuint16_t,
+		z1 = svmax_u16_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_u16_m_untied:
+**	movprfx	z0, z1
+**	umax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_m_untied, svuint16_t,
+		z0 = svmax_u16_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_m_tied1, svuint16_t, uint16_t,
+		 z0 = svmax_n_u16_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_u16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_m_untied, svuint16_t, uint16_t,
+		 z0 = svmax_n_u16_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_h0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_m_tied1, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_h0_u16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	umax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_m_untied, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_u16_m_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_m_tied1, svuint16_t,
+		z0 = svmax_n_u16_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_u16_m_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0, z1
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_m_untied, svuint16_t,
+		z0 = svmax_n_u16_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_u16_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umax	z0\.h, p0/m, z0\.h, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_u16_m, svuint16_t,
+		z0 = svmax_n_u16_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_u16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_z_tied1, svuint16_t,
+		z0 = svmax_u16_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_u16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_z_tied2, svuint16_t,
+		z1 = svmax_u16_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_u16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umax	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_z_untied, svuint16_t,
+		z0 = svmax_u16_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_z_tied1, svuint16_t, uint16_t,
+		 z0 = svmax_n_u16_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_u16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_z_untied, svuint16_t, uint16_t,
+		 z0 = svmax_n_u16_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_h0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_z_tied1, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_h0_u16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	umax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_z_untied, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_u16_z_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_z_tied1, svuint16_t,
+		z0 = svmax_n_u16_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_u16_z_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_z_untied, svuint16_t,
+		z0 = svmax_n_u16_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_u16_x_tied1:
+**	umax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_x_tied1, svuint16_t,
+		z0 = svmax_u16_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_u16_x_tied2:
+**	umax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_x_tied2, svuint16_t,
+		z1 = svmax_u16_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_u16_x_untied:
+**	movprfx	z2, z0
+**	umax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_u16_x_untied, svuint16_t,
+		z2 = svmax_u16_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_x_tied1, svuint16_t, uint16_t,
+		 z0 = svmax_n_u16_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_u16_x_untied:
+**	mov	z1\.h, w0
+**	umax	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u16_x_untied, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_h0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umax	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_x_tied1, svuint16_t, uint16_t,
+		 z1 = svmax_n_u16_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_h0_u16_x_untied:
+**	mov	z2\.h, h0
+**	umax	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (max_h0_u16_x_untied, svuint16_t, uint16_t,
+		 z2 = svmax_n_u16_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_u16_x_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	umax	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_x_tied1, svuint16_t,
+		z0 = svmax_n_u16_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_u16_x_untied:
+**	mov	z0\.h, #1
+**	umax	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u16_x_untied, svuint16_t,
+		z0 = svmax_n_u16_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u32.c
new file mode 100644
index 0000000..37298d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u32.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_u32_m_tied1:
+**	umax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_m_tied1, svuint32_t,
+		z0 = svmax_u32_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_u32_m_tied2, svuint32_t,
+		z1 = svmax_u32_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_u32_m_untied:
+**	movprfx	z0, z1
+**	umax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_m_untied, svuint32_t,
+		z0 = svmax_u32_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_m_tied1, svuint32_t, uint32_t,
+		 z0 = svmax_n_u32_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_u32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_m_untied, svuint32_t, uint32_t,
+		 z0 = svmax_n_u32_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_s0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_m_tied1, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_s0_u32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	umax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_m_untied, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_u32_m_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_m_tied1, svuint32_t,
+		z0 = svmax_n_u32_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_u32_m_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0, z1
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_m_untied, svuint32_t,
+		z0 = svmax_n_u32_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_u32_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umax	z0\.s, p0/m, z0\.s, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_u32_m, svuint32_t,
+		z0 = svmax_n_u32_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_u32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_z_tied1, svuint32_t,
+		z0 = svmax_u32_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_u32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_z_tied2, svuint32_t,
+		z1 = svmax_u32_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_u32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umax	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_z_untied, svuint32_t,
+		z0 = svmax_u32_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_z_tied1, svuint32_t, uint32_t,
+		 z0 = svmax_n_u32_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_u32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_z_untied, svuint32_t, uint32_t,
+		 z0 = svmax_n_u32_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_s0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_z_tied1, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_s0_u32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	umax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_z_untied, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_u32_z_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_z_tied1, svuint32_t,
+		z0 = svmax_n_u32_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_u32_z_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_z_untied, svuint32_t,
+		z0 = svmax_n_u32_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_u32_x_tied1:
+**	umax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_x_tied1, svuint32_t,
+		z0 = svmax_u32_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_u32_x_tied2:
+**	umax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_x_tied2, svuint32_t,
+		z1 = svmax_u32_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_u32_x_untied:
+**	movprfx	z2, z0
+**	umax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_u32_x_untied, svuint32_t,
+		z2 = svmax_u32_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_x_tied1, svuint32_t, uint32_t,
+		 z0 = svmax_n_u32_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_u32_x_untied:
+**	mov	z1\.s, w0
+**	umax	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u32_x_untied, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_s0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umax	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_x_tied1, svuint32_t, uint32_t,
+		 z1 = svmax_n_u32_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_s0_u32_x_untied:
+**	mov	z2\.s, s0
+**	umax	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (max_s0_u32_x_untied, svuint32_t, uint32_t,
+		 z2 = svmax_n_u32_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_u32_x_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	umax	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_x_tied1, svuint32_t,
+		z0 = svmax_n_u32_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_u32_x_untied:
+**	mov	z0\.s, #1
+**	umax	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u32_x_untied, svuint32_t,
+		z0 = svmax_n_u32_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u64.c
new file mode 100644
index 0000000..f8931aa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u64.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_u64_m_tied1:
+**	umax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_m_tied1, svuint64_t,
+		z0 = svmax_u64_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_u64_m_tied2, svuint64_t,
+		z1 = svmax_u64_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_u64_m_untied:
+**	movprfx	z0, z1
+**	umax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_m_untied, svuint64_t,
+		z0 = svmax_u64_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_x0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_m_tied1, svuint64_t, uint64_t,
+		 z0 = svmax_n_u64_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_x0_u64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_m_untied, svuint64_t, uint64_t,
+		 z0 = svmax_n_u64_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_d0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_m_tied1, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_d0_u64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	umax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_m_untied, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_u64_m_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_m_tied1, svuint64_t,
+		z0 = svmax_n_u64_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_u64_m_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0, z1
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_m_untied, svuint64_t,
+		z0 = svmax_n_u64_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_u64_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umax	z0\.d, p0/m, z0\.d, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_u64_m, svuint64_t,
+		z0 = svmax_n_u64_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_u64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_z_tied1, svuint64_t,
+		z0 = svmax_u64_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_u64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_z_tied2, svuint64_t,
+		z1 = svmax_u64_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_u64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umax	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_z_untied, svuint64_t,
+		z0 = svmax_u64_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_x0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_z_tied1, svuint64_t, uint64_t,
+		 z0 = svmax_n_u64_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_x0_u64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_z_untied, svuint64_t, uint64_t,
+		 z0 = svmax_n_u64_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_d0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_z_tied1, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_d0_u64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	umax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_z_untied, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_u64_z_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_z_tied1, svuint64_t,
+		z0 = svmax_n_u64_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_u64_z_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_z_untied, svuint64_t,
+		z0 = svmax_n_u64_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_u64_x_tied1:
+**	umax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_x_tied1, svuint64_t,
+		z0 = svmax_u64_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_u64_x_tied2:
+**	umax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_x_tied2, svuint64_t,
+		z1 = svmax_u64_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_u64_x_untied:
+**	movprfx	z2, z0
+**	umax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_u64_x_untied, svuint64_t,
+		z2 = svmax_u64_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_x0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_x_tied1, svuint64_t, uint64_t,
+		 z0 = svmax_n_u64_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_x0_u64_x_untied:
+**	mov	z1\.d, x0
+**	umax	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_x0_u64_x_untied, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_d0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umax	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_x_tied1, svuint64_t, uint64_t,
+		 z1 = svmax_n_u64_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_d0_u64_x_untied:
+**	mov	z2\.d, d0
+**	umax	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (max_d0_u64_x_untied, svuint64_t, uint64_t,
+		 z2 = svmax_n_u64_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_u64_x_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	umax	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_x_tied1, svuint64_t,
+		z0 = svmax_n_u64_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_u64_x_untied:
+**	mov	z0\.d, #1
+**	umax	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u64_x_untied, svuint64_t,
+		z0 = svmax_n_u64_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u8.c
new file mode 100644
index 0000000..4e5f3d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/max_u8.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** max_u8_m_tied1:
+**	umax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_m_tied1, svuint8_t,
+		z0 = svmax_u8_m (p0, z0, z1),
+		z0 = svmax_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (max_u8_m_tied2, svuint8_t,
+		z1 = svmax_u8_m (p0, z0, z1),
+		z1 = svmax_m (p0, z0, z1))
+
+/*
+** max_u8_m_untied:
+**	movprfx	z0, z1
+**	umax	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_m_untied, svuint8_t,
+		z0 = svmax_u8_m (p0, z1, z2),
+		z0 = svmax_m (p0, z1, z2))
+
+/*
+** max_w0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_m_tied1, svuint8_t, uint8_t,
+		 z0 = svmax_n_u8_m (p0, z0, x0),
+		 z0 = svmax_m (p0, z0, x0))
+
+/*
+** max_w0_u8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_m_untied, svuint8_t, uint8_t,
+		 z0 = svmax_n_u8_m (p0, z1, x0),
+		 z0 = svmax_m (p0, z1, x0))
+
+/*
+** max_b0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_m_tied1, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_m (p0, z1, d0),
+		 z1 = svmax_m (p0, z1, d0))
+
+/*
+** max_b0_u8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	umax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_m_untied, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_m (p0, z2, d0),
+		 z1 = svmax_m (p0, z2, d0))
+
+/*
+** max_1_u8_m_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_m_tied1, svuint8_t,
+		z0 = svmax_n_u8_m (p0, z0, 1),
+		z0 = svmax_m (p0, z0, 1))
+
+/*
+** max_1_u8_m_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0, z1
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_m_untied, svuint8_t,
+		z0 = svmax_n_u8_m (p0, z1, 1),
+		z0 = svmax_m (p0, z1, 1))
+
+/*
+** max_m1_u8_m:
+**	mov	(z[0-9]+\.b), #(-1|255)
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_m1_u8_m, svuint8_t,
+		z0 = svmax_n_u8_m (p0, z0, -1),
+		z0 = svmax_m (p0, z0, -1))
+
+/*
+** max_u8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_z_tied1, svuint8_t,
+		z0 = svmax_u8_z (p0, z0, z1),
+		z0 = svmax_z (p0, z0, z1))
+
+/*
+** max_u8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_z_tied2, svuint8_t,
+		z1 = svmax_u8_z (p0, z0, z1),
+		z1 = svmax_z (p0, z0, z1))
+
+/*
+** max_u8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umax	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_z_untied, svuint8_t,
+		z0 = svmax_u8_z (p0, z1, z2),
+		z0 = svmax_z (p0, z1, z2))
+
+/*
+** max_w0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_z_tied1, svuint8_t, uint8_t,
+		 z0 = svmax_n_u8_z (p0, z0, x0),
+		 z0 = svmax_z (p0, z0, x0))
+
+/*
+** max_w0_u8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_z_untied, svuint8_t, uint8_t,
+		 z0 = svmax_n_u8_z (p0, z1, x0),
+		 z0 = svmax_z (p0, z1, x0))
+
+/*
+** max_b0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_z_tied1, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_z (p0, z1, d0),
+		 z1 = svmax_z (p0, z1, d0))
+
+/*
+** max_b0_u8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z2\.b
+**	umax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_z_untied, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_z (p0, z2, d0),
+		 z1 = svmax_z (p0, z2, d0))
+
+/*
+** max_1_u8_z_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_z_tied1, svuint8_t,
+		z0 = svmax_n_u8_z (p0, z0, 1),
+		z0 = svmax_z (p0, z0, 1))
+
+/*
+** max_1_u8_z_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_z_untied, svuint8_t,
+		z0 = svmax_n_u8_z (p0, z1, 1),
+		z0 = svmax_z (p0, z1, 1))
+
+/*
+** max_u8_x_tied1:
+**	umax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_x_tied1, svuint8_t,
+		z0 = svmax_u8_x (p0, z0, z1),
+		z0 = svmax_x (p0, z0, z1))
+
+/*
+** max_u8_x_tied2:
+**	umax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_x_tied2, svuint8_t,
+		z1 = svmax_u8_x (p0, z0, z1),
+		z1 = svmax_x (p0, z0, z1))
+
+/*
+** max_u8_x_untied:
+**	movprfx	z2, z0
+**	umax	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_u8_x_untied, svuint8_t,
+		z2 = svmax_u8_x (p0, z0, z1),
+		z2 = svmax_x (p0, z0, z1))
+
+/*
+** max_w0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_x_tied1, svuint8_t, uint8_t,
+		 z0 = svmax_n_u8_x (p0, z0, x0),
+		 z0 = svmax_x (p0, z0, x0))
+
+/*
+** max_w0_u8_x_untied:
+**	mov	z1\.b, w0
+**	umax	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (max_w0_u8_x_untied, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_x (p0, z0, x0),
+		 z1 = svmax_x (p0, z0, x0))
+
+/*
+** max_b0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umax	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_x_tied1, svuint8_t, uint8_t,
+		 z1 = svmax_n_u8_x (p0, z1, d0),
+		 z1 = svmax_x (p0, z1, d0))
+
+/*
+** max_b0_u8_x_untied:
+**	mov	z2\.b, b0
+**	umax	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (max_b0_u8_x_untied, svuint8_t, uint8_t,
+		 z2 = svmax_n_u8_x (p0, z1, d0),
+		 z2 = svmax_x (p0, z1, d0))
+
+/*
+** max_1_u8_x_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	umax	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_x_tied1, svuint8_t,
+		z0 = svmax_n_u8_x (p0, z0, 1),
+		z0 = svmax_x (p0, z0, 1))
+
+/*
+** max_1_u8_x_untied:
+**	mov	z0\.b, #1
+**	umax	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (max_1_u8_x_untied, svuint8_t,
+		z0 = svmax_n_u8_x (p0, z1, 1),
+		z0 = svmax_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16.c
new file mode 100644
index 0000000..3510763
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f16_m_tied1:
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_m_tied1, svfloat16_t,
+		z0 = svmin_f16_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f16_m_tied2, svfloat16_t,
+		z1 = svmin_f16_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f16_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_m_untied, svfloat16_t,
+		z0 = svmin_f16_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_m_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_f16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_m_untied, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_h0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_m_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_h0_f16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_m_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f16_m_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_m_tied1, svfloat16_t,
+		z0 = svmin_n_f16_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f16_m_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_m_untied, svfloat16_t,
+		z0 = svmin_n_f16_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_z_tied1, svfloat16_t,
+		z0 = svmin_f16_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_f16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_z_tied2, svfloat16_t,
+		z1 = svmin_f16_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_z_untied, svfloat16_t,
+		z0 = svmin_f16_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_z_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_f16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_z_untied, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_h0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_z_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_h0_f16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_z_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f16_z_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_z_tied1, svfloat16_t,
+		z0 = svmin_n_f16_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f16_z_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_z_untied, svfloat16_t,
+		z0 = svmin_n_f16_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f16_x_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_x_tied1, svfloat16_t,
+		z0 = svmin_n_f16_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f16_x_untied:
+**	fmov	z0\.h, #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_x_untied, svfloat16_t,
+		z0 = svmin_n_f16_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f16_x_tied1:
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_tied1, svfloat16_t,
+		z0 = svmin_f16_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f16_x_tied2:
+**	fmin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_tied2, svfloat16_t,
+		z1 = svmin_f16_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f16_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_untied, svfloat16_t,
+		z2 = svmin_f16_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_x_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_f16_x_untied:
+**	mov	z1\.h, w0
+**	fmin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_x_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_h0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_x_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_h0_f16_x_untied:
+**	mov	z2\.h, h0
+**	fmin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_x_untied, svfloat16_t, __fp16,
+		 z2 = svmin_n_f16_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmin	z0\.h, \1/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_tied1, svfloat16_t,
+		z0 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z0 = svmin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_min_f16_x_tied2:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmin	z1\.h, \1/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_tied2, svfloat16_t,
+		z1 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z1 = svmin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_min_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.h, \1/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_untied, svfloat16_t,
+		z2 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z2 = svmin_x (svptrue_b16 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16_notrap.c
new file mode 100644
index 0000000..f5f84f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f16_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f16_m_tied1:
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_m_tied1, svfloat16_t,
+		z0 = svmin_f16_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f16_m_tied2, svfloat16_t,
+		z1 = svmin_f16_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f16_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_m_untied, svfloat16_t,
+		z0 = svmin_f16_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_m_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_f16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_m_untied, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_h0_f16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_m_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_h0_f16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_m_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f16_m_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_m_tied1, svfloat16_t,
+		z0 = svmin_n_f16_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f16_m_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_m_untied, svfloat16_t,
+		z0 = svmin_n_f16_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_z_tied1, svfloat16_t,
+		z0 = svmin_f16_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f16_z_tied2, svfloat16_t,
+		z1 = svmin_f16_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_z_untied, svfloat16_t,
+		z0 = svmin_f16_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_z_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_f16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_z_untied, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_h0_f16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_z_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_h0_f16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_z_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f16_z_untied:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z1\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_z_untied, svfloat16_t,
+		z0 = svmin_n_f16_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f16_z_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_z_tied1, svfloat16_t,
+		z0 = svmin_n_f16_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f16_x_tied1:
+**	fmov	(z[0-9]+\.h), #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_x_tied1, svfloat16_t,
+		z0 = svmin_n_f16_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f16_x_untied:
+**	fmov	z0\.h, #1.0(e\+0)?
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f16_x_untied, svfloat16_t,
+		z0 = svmin_n_f16_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f16_x_tied1:
+**	fmin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_tied1, svfloat16_t,
+		z0 = svmin_f16_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f16_x_tied2:
+**	fmin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_tied2, svfloat16_t,
+		z1 = svmin_f16_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f16_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_f16_x_untied, svfloat16_t,
+		z2 = svmin_f16_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	fmin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_x_tied1, svfloat16_t, __fp16,
+		 z0 = svmin_n_f16_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_f16_x_untied:
+**	mov	z1\.h, w0
+**	fmin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f16_x_untied, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_h0_f16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	fmin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_x_tied1, svfloat16_t, __fp16,
+		 z1 = svmin_n_f16_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_h0_f16_x_untied:
+**	mov	z2\.h, h0
+**	fmin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_f16_x_untied, svfloat16_t, __fp16,
+		 z2 = svmin_n_f16_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmin	z0\.h, \1/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_tied1, svfloat16_t,
+		z0 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z0 = svmin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_min_f16_x_tied2:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fmin	z1\.h, \1/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_tied2, svfloat16_t,
+		z1 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z1 = svmin_x (svptrue_b16 (), z0, z1))
+
+/*
+** ptrue_min_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.h, \1/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f16_x_untied, svfloat16_t,
+		z2 = svmin_f16_x (svptrue_b16 (), z0, z1),
+		z2 = svmin_x (svptrue_b16 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32.c
new file mode 100644
index 0000000..9ce1e02
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f32_m_tied1:
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_m_tied1, svfloat32_t,
+		z0 = svmin_f32_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f32_m_tied2, svfloat32_t,
+		z1 = svmin_f32_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f32_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_m_untied, svfloat32_t,
+		z0 = svmin_f32_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_m_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_f32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_m_untied, svfloat32_t, float,
+		 z0 = svmin_n_f32_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_s0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_m_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_s0_f32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_m_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f32_m_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_m_tied1, svfloat32_t,
+		z0 = svmin_n_f32_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f32_m_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_m_untied, svfloat32_t,
+		z0 = svmin_n_f32_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_z_tied1, svfloat32_t,
+		z0 = svmin_f32_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_f32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_z_tied2, svfloat32_t,
+		z1 = svmin_f32_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_z_untied, svfloat32_t,
+		z0 = svmin_f32_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_z_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_f32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_z_untied, svfloat32_t, float,
+		 z0 = svmin_n_f32_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_s0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_z_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_s0_f32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_z_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f32_z_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_z_tied1, svfloat32_t,
+		z0 = svmin_n_f32_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f32_z_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_z_untied, svfloat32_t,
+		z0 = svmin_n_f32_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f32_x_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_x_tied1, svfloat32_t,
+		z0 = svmin_n_f32_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f32_x_untied:
+**	fmov	z0\.s, #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_x_untied, svfloat32_t,
+		z0 = svmin_n_f32_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f32_x_tied1:
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_tied1, svfloat32_t,
+		z0 = svmin_f32_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f32_x_tied2:
+**	fmin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_tied2, svfloat32_t,
+		z1 = svmin_f32_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f32_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_untied, svfloat32_t,
+		z2 = svmin_f32_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_x_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_f32_x_untied:
+**	mov	z1\.s, w0
+**	fmin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_x_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_s0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_x_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_s0_f32_x_untied:
+**	mov	z2\.s, s0
+**	fmin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_x_untied, svfloat32_t, float,
+		 z2 = svmin_n_f32_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmin	z0\.s, \1/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_tied1, svfloat32_t,
+		z0 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z0 = svmin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_min_f32_x_tied2:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmin	z1\.s, \1/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_tied2, svfloat32_t,
+		z1 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z1 = svmin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_min_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.s, \1/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_untied, svfloat32_t,
+		z2 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z2 = svmin_x (svptrue_b32 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32_notrap.c
new file mode 100644
index 0000000..f6e88b6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f32_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f32_m_tied1:
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_m_tied1, svfloat32_t,
+		z0 = svmin_f32_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f32_m_tied2, svfloat32_t,
+		z1 = svmin_f32_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f32_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_m_untied, svfloat32_t,
+		z0 = svmin_f32_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_m_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_f32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_m_untied, svfloat32_t, float,
+		 z0 = svmin_n_f32_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_s0_f32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_m_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_s0_f32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_m_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f32_m_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_m_tied1, svfloat32_t,
+		z0 = svmin_n_f32_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f32_m_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_m_untied, svfloat32_t,
+		z0 = svmin_n_f32_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_z_tied1, svfloat32_t,
+		z0 = svmin_f32_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f32_z_tied2, svfloat32_t,
+		z1 = svmin_f32_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_z_untied, svfloat32_t,
+		z0 = svmin_f32_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_z_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_f32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_z_untied, svfloat32_t, float,
+		 z0 = svmin_n_f32_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_s0_f32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_z_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_s0_f32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_z_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f32_z_untied:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z1\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_z_untied, svfloat32_t,
+		z0 = svmin_n_f32_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f32_z_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_z_tied1, svfloat32_t,
+		z0 = svmin_n_f32_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f32_x_tied1:
+**	fmov	(z[0-9]+\.s), #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_x_tied1, svfloat32_t,
+		z0 = svmin_n_f32_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f32_x_untied:
+**	fmov	z0\.s, #1.0(e\+0)?
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f32_x_untied, svfloat32_t,
+		z0 = svmin_n_f32_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f32_x_tied1:
+**	fmin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_tied1, svfloat32_t,
+		z0 = svmin_f32_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f32_x_tied2:
+**	fmin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_tied2, svfloat32_t,
+		z1 = svmin_f32_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f32_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_f32_x_untied, svfloat32_t,
+		z2 = svmin_f32_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	fmin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_x_tied1, svfloat32_t, float,
+		 z0 = svmin_n_f32_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_f32_x_untied:
+**	mov	z1\.s, w0
+**	fmin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_f32_x_untied, svfloat32_t, float,
+		 z1 = svmin_n_f32_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_s0_f32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	fmin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_x_tied1, svfloat32_t, float,
+		 z1 = svmin_n_f32_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_s0_f32_x_untied:
+**	mov	z2\.s, s0
+**	fmin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_f32_x_untied, svfloat32_t, float,
+		 z2 = svmin_n_f32_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmin	z0\.s, \1/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_tied1, svfloat32_t,
+		z0 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z0 = svmin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_min_f32_x_tied2:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fmin	z1\.s, \1/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_tied2, svfloat32_t,
+		z1 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z1 = svmin_x (svptrue_b32 (), z0, z1))
+
+/*
+** ptrue_min_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.s, \1/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f32_x_untied, svfloat32_t,
+		z2 = svmin_f32_x (svptrue_b32 (), z0, z1),
+		z2 = svmin_x (svptrue_b32 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64.c
new file mode 100644
index 0000000..c1f2272
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64.c
@@ -0,0 +1,306 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f64_m_tied1:
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_m_tied1, svfloat64_t,
+		z0 = svmin_f64_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f64_m_tied2, svfloat64_t,
+		z1 = svmin_f64_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f64_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_m_untied, svfloat64_t,
+		z0 = svmin_f64_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_x0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_m_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_x0_f64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_m_untied, svfloat64_t, double,
+		 z0 = svmin_n_f64_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_d0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_m_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_d0_f64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_m_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f64_m_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_m_tied1, svfloat64_t,
+		z0 = svmin_n_f64_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f64_m_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_m_untied, svfloat64_t,
+		z0 = svmin_n_f64_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_z_tied1, svfloat64_t,
+		z0 = svmin_f64_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_f64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_z_tied2, svfloat64_t,
+		z1 = svmin_f64_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_z_untied, svfloat64_t,
+		z0 = svmin_f64_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_x0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_z_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_x0_f64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_z_untied, svfloat64_t, double,
+		 z0 = svmin_n_f64_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_d0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_z_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_d0_f64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_z_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f64_z_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_z_tied1, svfloat64_t,
+		z0 = svmin_n_f64_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f64_z_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_z_untied, svfloat64_t,
+		z0 = svmin_n_f64_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f64_x_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_x_tied1, svfloat64_t,
+		z0 = svmin_n_f64_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f64_x_untied:
+**	fmov	z0\.d, #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_x_untied, svfloat64_t,
+		z0 = svmin_n_f64_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f64_x_tied1:
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_tied1, svfloat64_t,
+		z0 = svmin_f64_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f64_x_tied2:
+**	fmin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_tied2, svfloat64_t,
+		z1 = svmin_f64_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f64_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_untied, svfloat64_t,
+		z2 = svmin_f64_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_x0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_x_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_x0_f64_x_untied:
+**	mov	z1\.d, x0
+**	fmin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_x_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_d0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_x_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_d0_f64_x_untied:
+**	mov	z2\.d, d0
+**	fmin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_x_untied, svfloat64_t, double,
+		 z2 = svmin_n_f64_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmin	z0\.d, \1/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_tied1, svfloat64_t,
+		z0 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z0 = svmin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_min_f64_x_tied2:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmin	z1\.d, \1/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_tied2, svfloat64_t,
+		z1 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z1 = svmin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_min_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.d, \1/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_untied, svfloat64_t,
+		z2 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z2 = svmin_x (svptrue_b64 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64_notrap.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64_notrap.c
new file mode 100644
index 0000000..877f7ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_f64_notrap.c
@@ -0,0 +1,302 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+/* { dg-additional-options "-fno-trapping-math" } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_f64_m_tied1:
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_m_tied1, svfloat64_t,
+		z0 = svmin_f64_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f64_m_tied2, svfloat64_t,
+		z1 = svmin_f64_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_f64_m_untied:
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_m_untied, svfloat64_t,
+		z0 = svmin_f64_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_x0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_m_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_x0_f64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_m_untied, svfloat64_t, double,
+		 z0 = svmin_n_f64_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_d0_f64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_m_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_d0_f64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_m_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_f64_m_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_m_tied1, svfloat64_t,
+		z0 = svmin_n_f64_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_f64_m_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0, z1
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_m_untied, svfloat64_t,
+		z0 = svmin_n_f64_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_f64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_z_tied1, svfloat64_t,
+		z0 = svmin_f64_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_f64_z_tied2, svfloat64_t,
+		z1 = svmin_f64_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_z_untied, svfloat64_t,
+		z0 = svmin_f64_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_x0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_z_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_x0_f64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_z_untied, svfloat64_t, double,
+		 z0 = svmin_n_f64_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_d0_f64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_z_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_d0_f64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_z_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_f64_z_untied:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z1\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_z_untied, svfloat64_t,
+		z0 = svmin_n_f64_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_f64_z_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_z_tied1, svfloat64_t,
+		z0 = svmin_n_f64_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_f64_x_tied1:
+**	fmov	(z[0-9]+\.d), #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_x_tied1, svfloat64_t,
+		z0 = svmin_n_f64_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_f64_x_untied:
+**	fmov	z0\.d, #1.0(e\+0)?
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_f64_x_untied, svfloat64_t,
+		z0 = svmin_n_f64_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
+/*
+** min_f64_x_tied1:
+**	fmin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_tied1, svfloat64_t,
+		z0 = svmin_f64_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_f64_x_tied2:
+**	fmin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_tied2, svfloat64_t,
+		z1 = svmin_f64_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_f64_x_untied:
+**	movprfx	z2, z0
+**	fmin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_f64_x_untied, svfloat64_t,
+		z2 = svmin_f64_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_x0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	fmin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_x_tied1, svfloat64_t, double,
+		 z0 = svmin_n_f64_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_x0_f64_x_untied:
+**	mov	z1\.d, x0
+**	fmin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_f64_x_untied, svfloat64_t, double,
+		 z1 = svmin_n_f64_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_d0_f64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	fmin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_x_tied1, svfloat64_t, double,
+		 z1 = svmin_n_f64_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_d0_f64_x_untied:
+**	mov	z2\.d, d0
+**	fmin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_f64_x_untied, svfloat64_t, double,
+		 z2 = svmin_n_f64_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** ptrue_min_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmin	z0\.d, \1/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_tied1, svfloat64_t,
+		z0 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z0 = svmin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_min_f64_x_tied2:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fmin	z1\.d, \1/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_tied2, svfloat64_t,
+		z1 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z1 = svmin_x (svptrue_b64 (), z0, z1))
+
+/*
+** ptrue_min_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	movprfx	z2, z0
+**	fmin	z2\.d, \1/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_min_f64_x_untied, svfloat64_t,
+		z2 = svmin_f64_x (svptrue_b64 (), z0, z1),
+		z2 = svmin_x (svptrue_b64 (), z0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s16.c
new file mode 100644
index 0000000..37fca96
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s16.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_s16_m_tied1:
+**	smin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_m_tied1, svint16_t,
+		z0 = svmin_s16_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_s16_m_tied2, svint16_t,
+		z1 = svmin_s16_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_s16_m_untied:
+**	movprfx	z0, z1
+**	smin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_m_untied, svint16_t,
+		z0 = svmin_s16_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_m_tied1, svint16_t, int16_t,
+		 z0 = svmin_n_s16_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_s16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_m_untied, svint16_t, int16_t,
+		 z0 = svmin_n_s16_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_h0_s16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_m_tied1, svint16_t, int16_t,
+		 z1 = svmin_n_s16_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_h0_s16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	smin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_m_untied, svint16_t, int16_t,
+		 z1 = svmin_n_s16_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_s16_m_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_m_tied1, svint16_t,
+		z0 = svmin_n_s16_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_s16_m_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0, z1
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_m_untied, svint16_t,
+		z0 = svmin_n_s16_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_s16_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smin	z0\.h, p0/m, z0\.h, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_s16_m, svint16_t,
+		z0 = svmin_n_s16_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_s16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_z_tied1, svint16_t,
+		z0 = svmin_s16_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_s16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_z_tied2, svint16_t,
+		z1 = svmin_s16_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_z_untied, svint16_t,
+		z0 = svmin_s16_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_z_tied1, svint16_t, int16_t,
+		 z0 = svmin_n_s16_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_s16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_z_untied, svint16_t, int16_t,
+		 z0 = svmin_n_s16_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_h0_s16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	smin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_z_tied1, svint16_t, int16_t,
+		 z1 = svmin_n_s16_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_h0_s16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	smin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_z_untied, svint16_t, int16_t,
+		 z1 = svmin_n_s16_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_s16_z_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z0\.h
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_z_tied1, svint16_t,
+		z0 = svmin_n_s16_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_s16_z_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z1\.h
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_z_untied, svint16_t,
+		z0 = svmin_n_s16_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_s16_x_tied1:
+**	smin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_x_tied1, svint16_t,
+		z0 = svmin_s16_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_s16_x_tied2:
+**	smin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_x_tied2, svint16_t,
+		z1 = svmin_s16_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_s16_x_untied:
+**	movprfx	z2, z0
+**	smin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_s16_x_untied, svint16_t,
+		z2 = svmin_s16_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_x_tied1, svint16_t, int16_t,
+		 z0 = svmin_n_s16_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_s16_x_untied:
+**	mov	z1\.h, w0
+**	smin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s16_x_untied, svint16_t, int16_t,
+		 z1 = svmin_n_s16_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_h0_s16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	smin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_x_tied1, svint16_t, int16_t,
+		 z1 = svmin_n_s16_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_h0_s16_x_untied:
+**	mov	z2\.h, h0
+**	smin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_s16_x_untied, svint16_t, int16_t,
+		 z2 = svmin_n_s16_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_s16_x_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	smin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_x_tied1, svint16_t,
+		z0 = svmin_n_s16_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_s16_x_untied:
+**	mov	z0\.h, #1
+**	smin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s16_x_untied, svint16_t,
+		z0 = svmin_n_s16_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s32.c
new file mode 100644
index 0000000..63b95bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s32.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_s32_m_tied1:
+**	smin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_m_tied1, svint32_t,
+		z0 = svmin_s32_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_s32_m_tied2, svint32_t,
+		z1 = svmin_s32_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_s32_m_untied:
+**	movprfx	z0, z1
+**	smin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_m_untied, svint32_t,
+		z0 = svmin_s32_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_m_tied1, svint32_t, int32_t,
+		 z0 = svmin_n_s32_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_s32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_m_untied, svint32_t, int32_t,
+		 z0 = svmin_n_s32_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_s0_s32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_m_tied1, svint32_t, int32_t,
+		 z1 = svmin_n_s32_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_s0_s32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	smin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_m_untied, svint32_t, int32_t,
+		 z1 = svmin_n_s32_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_s32_m_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_m_tied1, svint32_t,
+		z0 = svmin_n_s32_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_s32_m_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0, z1
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_m_untied, svint32_t,
+		z0 = svmin_n_s32_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_s32_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smin	z0\.s, p0/m, z0\.s, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_s32_m, svint32_t,
+		z0 = svmin_n_s32_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_s32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_z_tied1, svint32_t,
+		z0 = svmin_s32_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_s32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_z_tied2, svint32_t,
+		z1 = svmin_s32_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_z_untied, svint32_t,
+		z0 = svmin_s32_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_z_tied1, svint32_t, int32_t,
+		 z0 = svmin_n_s32_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_s32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_z_untied, svint32_t, int32_t,
+		 z0 = svmin_n_s32_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_s0_s32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	smin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_z_tied1, svint32_t, int32_t,
+		 z1 = svmin_n_s32_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_s0_s32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	smin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_z_untied, svint32_t, int32_t,
+		 z1 = svmin_n_s32_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_s32_z_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z0\.s
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_z_tied1, svint32_t,
+		z0 = svmin_n_s32_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_s32_z_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z1\.s
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_z_untied, svint32_t,
+		z0 = svmin_n_s32_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_s32_x_tied1:
+**	smin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_x_tied1, svint32_t,
+		z0 = svmin_s32_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_s32_x_tied2:
+**	smin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_x_tied2, svint32_t,
+		z1 = svmin_s32_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_s32_x_untied:
+**	movprfx	z2, z0
+**	smin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_s32_x_untied, svint32_t,
+		z2 = svmin_s32_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_x_tied1, svint32_t, int32_t,
+		 z0 = svmin_n_s32_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_s32_x_untied:
+**	mov	z1\.s, w0
+**	smin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s32_x_untied, svint32_t, int32_t,
+		 z1 = svmin_n_s32_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_s0_s32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	smin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_x_tied1, svint32_t, int32_t,
+		 z1 = svmin_n_s32_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_s0_s32_x_untied:
+**	mov	z2\.s, s0
+**	smin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_s32_x_untied, svint32_t, int32_t,
+		 z2 = svmin_n_s32_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_s32_x_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	smin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_x_tied1, svint32_t,
+		z0 = svmin_n_s32_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_s32_x_untied:
+**	mov	z0\.s, #1
+**	smin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s32_x_untied, svint32_t,
+		z0 = svmin_n_s32_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s64.c
new file mode 100644
index 0000000..bea796a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s64.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_s64_m_tied1:
+**	smin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_m_tied1, svint64_t,
+		z0 = svmin_s64_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_s64_m_tied2, svint64_t,
+		z1 = svmin_s64_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_s64_m_untied:
+**	movprfx	z0, z1
+**	smin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_m_untied, svint64_t,
+		z0 = svmin_s64_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_x0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_m_tied1, svint64_t, int64_t,
+		 z0 = svmin_n_s64_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_x0_s64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_m_untied, svint64_t, int64_t,
+		 z0 = svmin_n_s64_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_d0_s64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_m_tied1, svint64_t, int64_t,
+		 z1 = svmin_n_s64_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_d0_s64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	smin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_m_untied, svint64_t, int64_t,
+		 z1 = svmin_n_s64_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_s64_m_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_m_tied1, svint64_t,
+		z0 = svmin_n_s64_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_s64_m_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0, z1
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_m_untied, svint64_t,
+		z0 = svmin_n_s64_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_s64_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	smin	z0\.d, p0/m, z0\.d, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_s64_m, svint64_t,
+		z0 = svmin_n_s64_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_s64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_z_tied1, svint64_t,
+		z0 = svmin_s64_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_s64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_z_tied2, svint64_t,
+		z1 = svmin_s64_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_z_untied, svint64_t,
+		z0 = svmin_s64_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_x0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_z_tied1, svint64_t, int64_t,
+		 z0 = svmin_n_s64_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_x0_s64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_z_untied, svint64_t, int64_t,
+		 z0 = svmin_n_s64_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_d0_s64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	smin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_z_tied1, svint64_t, int64_t,
+		 z1 = svmin_n_s64_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_d0_s64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	smin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_z_untied, svint64_t, int64_t,
+		 z1 = svmin_n_s64_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_s64_z_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z0\.d
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_z_tied1, svint64_t,
+		z0 = svmin_n_s64_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_s64_z_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z1\.d
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_z_untied, svint64_t,
+		z0 = svmin_n_s64_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_s64_x_tied1:
+**	smin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_x_tied1, svint64_t,
+		z0 = svmin_s64_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_s64_x_tied2:
+**	smin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_x_tied2, svint64_t,
+		z1 = svmin_s64_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_s64_x_untied:
+**	movprfx	z2, z0
+**	smin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_s64_x_untied, svint64_t,
+		z2 = svmin_s64_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_x0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_x_tied1, svint64_t, int64_t,
+		 z0 = svmin_n_s64_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_x0_s64_x_untied:
+**	mov	z1\.d, x0
+**	smin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_s64_x_untied, svint64_t, int64_t,
+		 z1 = svmin_n_s64_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_d0_s64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	smin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_x_tied1, svint64_t, int64_t,
+		 z1 = svmin_n_s64_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_d0_s64_x_untied:
+**	mov	z2\.d, d0
+**	smin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_s64_x_untied, svint64_t, int64_t,
+		 z2 = svmin_n_s64_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_s64_x_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	smin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_x_tied1, svint64_t,
+		z0 = svmin_n_s64_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_s64_x_untied:
+**	mov	z0\.d, #1
+**	smin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s64_x_untied, svint64_t,
+		z0 = svmin_n_s64_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s8.c
new file mode 100644
index 0000000..35401db
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_s8.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_s8_m_tied1:
+**	smin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_m_tied1, svint8_t,
+		z0 = svmin_s8_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_s8_m_tied2, svint8_t,
+		z1 = svmin_s8_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_s8_m_untied:
+**	movprfx	z0, z1
+**	smin	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_m_untied, svint8_t,
+		z0 = svmin_s8_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_m_tied1, svint8_t, int8_t,
+		 z0 = svmin_n_s8_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_s8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_m_untied, svint8_t, int8_t,
+		 z0 = svmin_n_s8_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_b0_s8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_m_tied1, svint8_t, int8_t,
+		 z1 = svmin_n_s8_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_b0_s8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	smin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_m_untied, svint8_t, int8_t,
+		 z1 = svmin_n_s8_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_s8_m_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_m_tied1, svint8_t,
+		z0 = svmin_n_s8_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_s8_m_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0, z1
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_m_untied, svint8_t,
+		z0 = svmin_n_s8_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_s8_m:
+**	mov	(z[0-9]+\.b), #(-1|255)
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_s8_m, svint8_t,
+		z0 = svmin_n_s8_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_s8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_z_tied1, svint8_t,
+		z0 = svmin_s8_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_s8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_z_tied2, svint8_t,
+		z1 = svmin_s8_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smin	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_z_untied, svint8_t,
+		z0 = svmin_s8_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_z_tied1, svint8_t, int8_t,
+		 z0 = svmin_n_s8_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_s8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_z_untied, svint8_t, int8_t,
+		 z0 = svmin_n_s8_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_b0_s8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	smin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_z_tied1, svint8_t, int8_t,
+		 z1 = svmin_n_s8_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_b0_s8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z2\.b
+**	smin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_z_untied, svint8_t, int8_t,
+		 z1 = svmin_n_s8_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_s8_z_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z1\.b
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_z_untied, svint8_t,
+		z0 = svmin_n_s8_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_s8_z_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z0\.b
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_z_tied1, svint8_t,
+		z0 = svmin_n_s8_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_s8_x_tied1:
+**	smin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_x_tied1, svint8_t,
+		z0 = svmin_s8_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_s8_x_tied2:
+**	smin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_x_tied2, svint8_t,
+		z1 = svmin_s8_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_s8_x_untied:
+**	movprfx	z2, z0
+**	smin	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_s8_x_untied, svint8_t,
+		z2 = svmin_s8_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_x_tied1, svint8_t, int8_t,
+		 z0 = svmin_n_s8_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_s8_x_untied:
+**	mov	z1\.b, w0
+**	smin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_s8_x_untied, svint8_t, int8_t,
+		 z1 = svmin_n_s8_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_b0_s8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	smin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_x_tied1, svint8_t, int8_t,
+		 z1 = svmin_n_s8_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_b0_s8_x_untied:
+**	mov	z2\.b, b0
+**	smin	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_s8_x_untied, svint8_t, int8_t,
+		 z2 = svmin_n_s8_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_s8_x_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	smin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_x_tied1, svint8_t,
+		z0 = svmin_n_s8_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_s8_x_untied:
+**	mov	z0\.b, #1
+**	smin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_s8_x_untied, svint8_t,
+		z0 = svmin_n_s8_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u16.c
new file mode 100644
index 0000000..00b5351
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u16.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_u16_m_tied1:
+**	umin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_m_tied1, svuint16_t,
+		z0 = svmin_u16_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_u16_m_tied2, svuint16_t,
+		z1 = svmin_u16_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_u16_m_untied:
+**	movprfx	z0, z1
+**	umin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_m_untied, svuint16_t,
+		z0 = svmin_u16_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_m_tied1, svuint16_t, uint16_t,
+		 z0 = svmin_n_u16_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_u16_m_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0, z1
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_m_untied, svuint16_t, uint16_t,
+		 z0 = svmin_n_u16_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_h0_u16_m_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_m_tied1, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_h0_u16_m_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1, z2
+**	umin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_m_untied, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_u16_m_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_m_tied1, svuint16_t,
+		z0 = svmin_n_u16_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_u16_m_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0, z1
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_m_untied, svuint16_t,
+		z0 = svmin_n_u16_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_u16_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umin	z0\.h, p0/m, z0\.h, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_u16_m, svuint16_t,
+		z0 = svmin_n_u16_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_u16_z_tied1:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_z_tied1, svuint16_t,
+		z0 = svmin_u16_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_u16_z_tied2:
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_z_tied2, svuint16_t,
+		z1 = svmin_u16_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_u16_z_untied:
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umin	z0\.h, p0/m, z0\.h, z2\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_z_untied, svuint16_t,
+		z0 = svmin_u16_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_z_tied1, svuint16_t, uint16_t,
+		 z0 = svmin_n_u16_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_u16_z_untied:
+**	mov	(z[0-9]+\.h), w0
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_z_untied, svuint16_t, uint16_t,
+		 z0 = svmin_n_u16_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_h0_u16_z_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z1\.h
+**	umin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_z_tied1, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_h0_u16_z_untied:
+**	mov	(z[0-9]+\.h), h0
+**	movprfx	z1\.h, p0/z, z2\.h
+**	umin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_z_untied, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_u16_z_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z0\.h
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_z_tied1, svuint16_t,
+		z0 = svmin_n_u16_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_u16_z_untied:
+**	mov	(z[0-9]+\.h), #1
+**	movprfx	z0\.h, p0/z, z1\.h
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_z_untied, svuint16_t,
+		z0 = svmin_n_u16_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_u16_x_tied1:
+**	umin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_x_tied1, svuint16_t,
+		z0 = svmin_u16_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_u16_x_tied2:
+**	umin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_x_tied2, svuint16_t,
+		z1 = svmin_u16_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_u16_x_untied:
+**	movprfx	z2, z0
+**	umin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_u16_x_untied, svuint16_t,
+		z2 = svmin_u16_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), w0
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_x_tied1, svuint16_t, uint16_t,
+		 z0 = svmin_n_u16_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_u16_x_untied:
+**	mov	z1\.h, w0
+**	umin	z1\.h, p0/m, z1\.h, z0\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u16_x_untied, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_h0_u16_x_tied1:
+**	mov	(z[0-9]+\.h), h0
+**	umin	z1\.h, p0/m, z1\.h, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_x_tied1, svuint16_t, uint16_t,
+		 z1 = svmin_n_u16_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_h0_u16_x_untied:
+**	mov	z2\.h, h0
+**	umin	z2\.h, p0/m, z2\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_ZS (min_h0_u16_x_untied, svuint16_t, uint16_t,
+		 z2 = svmin_n_u16_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_u16_x_tied1:
+**	mov	(z[0-9]+\.h), #1
+**	umin	z0\.h, p0/m, z0\.h, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_x_tied1, svuint16_t,
+		z0 = svmin_n_u16_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_u16_x_untied:
+**	mov	z0\.h, #1
+**	umin	z0\.h, p0/m, z0\.h, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u16_x_untied, svuint16_t,
+		z0 = svmin_n_u16_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u32.c
new file mode 100644
index 0000000..91c7025
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u32.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_u32_m_tied1:
+**	umin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_m_tied1, svuint32_t,
+		z0 = svmin_u32_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_u32_m_tied2, svuint32_t,
+		z1 = svmin_u32_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_u32_m_untied:
+**	movprfx	z0, z1
+**	umin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_m_untied, svuint32_t,
+		z0 = svmin_u32_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_m_tied1, svuint32_t, uint32_t,
+		 z0 = svmin_n_u32_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_u32_m_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0, z1
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_m_untied, svuint32_t, uint32_t,
+		 z0 = svmin_n_u32_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_s0_u32_m_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_m_tied1, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_s0_u32_m_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1, z2
+**	umin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_m_untied, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_u32_m_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_m_tied1, svuint32_t,
+		z0 = svmin_n_u32_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_u32_m_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0, z1
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_m_untied, svuint32_t,
+		z0 = svmin_n_u32_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_u32_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umin	z0\.s, p0/m, z0\.s, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_u32_m, svuint32_t,
+		z0 = svmin_n_u32_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_u32_z_tied1:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_z_tied1, svuint32_t,
+		z0 = svmin_u32_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_u32_z_tied2:
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_z_tied2, svuint32_t,
+		z1 = svmin_u32_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_u32_z_untied:
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umin	z0\.s, p0/m, z0\.s, z2\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_z_untied, svuint32_t,
+		z0 = svmin_u32_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_z_tied1, svuint32_t, uint32_t,
+		 z0 = svmin_n_u32_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_u32_z_untied:
+**	mov	(z[0-9]+\.s), w0
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_z_untied, svuint32_t, uint32_t,
+		 z0 = svmin_n_u32_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_s0_u32_z_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z1\.s
+**	umin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_z_tied1, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_s0_u32_z_untied:
+**	mov	(z[0-9]+\.s), s0
+**	movprfx	z1\.s, p0/z, z2\.s
+**	umin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_z_untied, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_u32_z_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z0\.s
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_z_tied1, svuint32_t,
+		z0 = svmin_n_u32_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_u32_z_untied:
+**	mov	(z[0-9]+\.s), #1
+**	movprfx	z0\.s, p0/z, z1\.s
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_z_untied, svuint32_t,
+		z0 = svmin_n_u32_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_u32_x_tied1:
+**	umin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_x_tied1, svuint32_t,
+		z0 = svmin_u32_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_u32_x_tied2:
+**	umin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_x_tied2, svuint32_t,
+		z1 = svmin_u32_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_u32_x_untied:
+**	movprfx	z2, z0
+**	umin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_u32_x_untied, svuint32_t,
+		z2 = svmin_u32_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), w0
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_x_tied1, svuint32_t, uint32_t,
+		 z0 = svmin_n_u32_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_u32_x_untied:
+**	mov	z1\.s, w0
+**	umin	z1\.s, p0/m, z1\.s, z0\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u32_x_untied, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_s0_u32_x_tied1:
+**	mov	(z[0-9]+\.s), s0
+**	umin	z1\.s, p0/m, z1\.s, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_x_tied1, svuint32_t, uint32_t,
+		 z1 = svmin_n_u32_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_s0_u32_x_untied:
+**	mov	z2\.s, s0
+**	umin	z2\.s, p0/m, z2\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_ZS (min_s0_u32_x_untied, svuint32_t, uint32_t,
+		 z2 = svmin_n_u32_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_u32_x_tied1:
+**	mov	(z[0-9]+\.s), #1
+**	umin	z0\.s, p0/m, z0\.s, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_x_tied1, svuint32_t,
+		z0 = svmin_n_u32_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_u32_x_untied:
+**	mov	z0\.s, #1
+**	umin	z0\.s, p0/m, z0\.s, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u32_x_untied, svuint32_t,
+		z0 = svmin_n_u32_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u64.c
new file mode 100644
index 0000000..4d14b68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u64.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_u64_m_tied1:
+**	umin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_m_tied1, svuint64_t,
+		z0 = svmin_u64_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_u64_m_tied2, svuint64_t,
+		z1 = svmin_u64_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_u64_m_untied:
+**	movprfx	z0, z1
+**	umin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_m_untied, svuint64_t,
+		z0 = svmin_u64_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_x0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_m_tied1, svuint64_t, uint64_t,
+		 z0 = svmin_n_u64_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_x0_u64_m_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0, z1
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_m_untied, svuint64_t, uint64_t,
+		 z0 = svmin_n_u64_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_d0_u64_m_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_m_tied1, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_d0_u64_m_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1, z2
+**	umin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_m_untied, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_u64_m_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_m_tied1, svuint64_t,
+		z0 = svmin_n_u64_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_u64_m_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0, z1
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_m_untied, svuint64_t,
+		z0 = svmin_n_u64_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_u64_m:
+**	mov	(z[0-9]+)\.b, #(-1|255)
+**	umin	z0\.d, p0/m, z0\.d, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_u64_m, svuint64_t,
+		z0 = svmin_n_u64_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_u64_z_tied1:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_z_tied1, svuint64_t,
+		z0 = svmin_u64_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_u64_z_tied2:
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_z_tied2, svuint64_t,
+		z1 = svmin_u64_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_u64_z_untied:
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umin	z0\.d, p0/m, z0\.d, z2\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_z_untied, svuint64_t,
+		z0 = svmin_u64_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_x0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_z_tied1, svuint64_t, uint64_t,
+		 z0 = svmin_n_u64_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_x0_u64_z_untied:
+**	mov	(z[0-9]+\.d), x0
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_z_untied, svuint64_t, uint64_t,
+		 z0 = svmin_n_u64_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_d0_u64_z_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z1\.d
+**	umin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_z_tied1, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_d0_u64_z_untied:
+**	mov	(z[0-9]+\.d), d0
+**	movprfx	z1\.d, p0/z, z2\.d
+**	umin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_z_untied, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_u64_z_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z0\.d
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_z_tied1, svuint64_t,
+		z0 = svmin_n_u64_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_1_u64_z_untied:
+**	mov	(z[0-9]+\.d), #1
+**	movprfx	z0\.d, p0/z, z1\.d
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_z_untied, svuint64_t,
+		z0 = svmin_n_u64_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_u64_x_tied1:
+**	umin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_x_tied1, svuint64_t,
+		z0 = svmin_u64_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_u64_x_tied2:
+**	umin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_x_tied2, svuint64_t,
+		z1 = svmin_u64_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_u64_x_untied:
+**	movprfx	z2, z0
+**	umin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_u64_x_untied, svuint64_t,
+		z2 = svmin_u64_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_x0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), x0
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_x_tied1, svuint64_t, uint64_t,
+		 z0 = svmin_n_u64_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_x0_u64_x_untied:
+**	mov	z1\.d, x0
+**	umin	z1\.d, p0/m, z1\.d, z0\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_x0_u64_x_untied, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_d0_u64_x_tied1:
+**	mov	(z[0-9]+\.d), d0
+**	umin	z1\.d, p0/m, z1\.d, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_x_tied1, svuint64_t, uint64_t,
+		 z1 = svmin_n_u64_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_d0_u64_x_untied:
+**	mov	z2\.d, d0
+**	umin	z2\.d, p0/m, z2\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_ZS (min_d0_u64_x_untied, svuint64_t, uint64_t,
+		 z2 = svmin_n_u64_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_u64_x_tied1:
+**	mov	(z[0-9]+\.d), #1
+**	umin	z0\.d, p0/m, z0\.d, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_x_tied1, svuint64_t,
+		z0 = svmin_n_u64_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_u64_x_untied:
+**	mov	z0\.d, #1
+**	umin	z0\.d, p0/m, z0\.d, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u64_x_untied, svuint64_t,
+		z0 = svmin_n_u64_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u8.c
new file mode 100644
index 0000000..017702e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/min_u8.c
@@ -0,0 +1,286 @@ 
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** min_u8_m_tied1:
+**	umin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_m_tied1, svuint8_t,
+		z0 = svmin_u8_m (p0, z0, z1),
+		z0 = svmin_m (p0, z0, z1))
+
+/* Bad RA choice: no preferred output sequence.  */
+TEST_UNIFORM_Z (min_u8_m_tied2, svuint8_t,
+		z1 = svmin_u8_m (p0, z0, z1),
+		z1 = svmin_m (p0, z0, z1))
+
+/*
+** min_u8_m_untied:
+**	movprfx	z0, z1
+**	umin	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_m_untied, svuint8_t,
+		z0 = svmin_u8_m (p0, z1, z2),
+		z0 = svmin_m (p0, z1, z2))
+
+/*
+** min_w0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_m_tied1, svuint8_t, uint8_t,
+		 z0 = svmin_n_u8_m (p0, z0, x0),
+		 z0 = svmin_m (p0, z0, x0))
+
+/*
+** min_w0_u8_m_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0, z1
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_m_untied, svuint8_t, uint8_t,
+		 z0 = svmin_n_u8_m (p0, z1, x0),
+		 z0 = svmin_m (p0, z1, x0))
+
+/*
+** min_b0_u8_m_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_m_tied1, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_m (p0, z1, d0),
+		 z1 = svmin_m (p0, z1, d0))
+
+/*
+** min_b0_u8_m_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1, z2
+**	umin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_m_untied, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_m (p0, z2, d0),
+		 z1 = svmin_m (p0, z2, d0))
+
+/*
+** min_1_u8_m_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_m_tied1, svuint8_t,
+		z0 = svmin_n_u8_m (p0, z0, 1),
+		z0 = svmin_m (p0, z0, 1))
+
+/*
+** min_1_u8_m_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0, z1
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_m_untied, svuint8_t,
+		z0 = svmin_n_u8_m (p0, z1, 1),
+		z0 = svmin_m (p0, z1, 1))
+
+/*
+** min_m1_u8_m:
+**	mov	(z[0-9]+\.b), #(-1|255)
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_m1_u8_m, svuint8_t,
+		z0 = svmin_n_u8_m (p0, z0, -1),
+		z0 = svmin_m (p0, z0, -1))
+
+/*
+** min_u8_z_tied1:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_z_tied1, svuint8_t,
+		z0 = svmin_u8_z (p0, z0, z1),
+		z0 = svmin_z (p0, z0, z1))
+
+/*
+** min_u8_z_tied2:
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_z_tied2, svuint8_t,
+		z1 = svmin_u8_z (p0, z0, z1),
+		z1 = svmin_z (p0, z0, z1))
+
+/*
+** min_u8_z_untied:
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umin	z0\.b, p0/m, z0\.b, z2\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_z_untied, svuint8_t,
+		z0 = svmin_u8_z (p0, z1, z2),
+		z0 = svmin_z (p0, z1, z2))
+
+/*
+** min_w0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_z_tied1, svuint8_t, uint8_t,
+		 z0 = svmin_n_u8_z (p0, z0, x0),
+		 z0 = svmin_z (p0, z0, x0))
+
+/*
+** min_w0_u8_z_untied:
+**	mov	(z[0-9]+\.b), w0
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_z_untied, svuint8_t, uint8_t,
+		 z0 = svmin_n_u8_z (p0, z1, x0),
+		 z0 = svmin_z (p0, z1, x0))
+
+/*
+** min_b0_u8_z_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z1\.b
+**	umin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_z_tied1, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_z (p0, z1, d0),
+		 z1 = svmin_z (p0, z1, d0))
+
+/*
+** min_b0_u8_z_untied:
+**	mov	(z[0-9]+\.b), b0
+**	movprfx	z1\.b, p0/z, z2\.b
+**	umin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_z_untied, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_z (p0, z2, d0),
+		 z1 = svmin_z (p0, z2, d0))
+
+/*
+** min_1_u8_z_untied:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z1\.b
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_z_untied, svuint8_t,
+		z0 = svmin_n_u8_z (p0, z1, 1),
+		z0 = svmin_z (p0, z1, 1))
+
+/*
+** min_1_u8_z_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	movprfx	z0\.b, p0/z, z0\.b
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_z_tied1, svuint8_t,
+		z0 = svmin_n_u8_z (p0, z0, 1),
+		z0 = svmin_z (p0, z0, 1))
+
+/*
+** min_u8_x_tied1:
+**	umin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_x_tied1, svuint8_t,
+		z0 = svmin_u8_x (p0, z0, z1),
+		z0 = svmin_x (p0, z0, z1))
+
+/*
+** min_u8_x_tied2:
+**	umin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_x_tied2, svuint8_t,
+		z1 = svmin_u8_x (p0, z0, z1),
+		z1 = svmin_x (p0, z0, z1))
+
+/*
+** min_u8_x_untied:
+**	movprfx	z2, z0
+**	umin	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_u8_x_untied, svuint8_t,
+		z2 = svmin_u8_x (p0, z0, z1),
+		z2 = svmin_x (p0, z0, z1))
+
+/*
+** min_w0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), w0
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_x_tied1, svuint8_t, uint8_t,
+		 z0 = svmin_n_u8_x (p0, z0, x0),
+		 z0 = svmin_x (p0, z0, x0))
+
+/*
+** min_w0_u8_x_untied:
+**	mov	z1\.b, w0
+**	umin	z1\.b, p0/m, z1\.b, z0\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (min_w0_u8_x_untied, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_x (p0, z0, x0),
+		 z1 = svmin_x (p0, z0, x0))
+
+/*
+** min_b0_u8_x_tied1:
+**	mov	(z[0-9]+\.b), b0
+**	umin	z1\.b, p0/m, z1\.b, \1
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_x_tied1, svuint8_t, uint8_t,
+		 z1 = svmin_n_u8_x (p0, z1, d0),
+		 z1 = svmin_x (p0, z1, d0))
+
+/*
+** min_b0_u8_x_untied:
+**	mov	z2\.b, b0
+**	umin	z2\.b, p0/m, z2\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_ZS (min_b0_u8_x_untied, svuint8_t, uint8_t,
+		 z2 = svmin_n_u8_x (p0, z1, d0),
+		 z2 = svmin_x (p0, z1, d0))
+
+/*
+** min_1_u8_x_tied1:
+**	mov	(z[0-9]+\.b), #1
+**	umin	z0\.b, p0/m, z0\.b, \1
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_x_tied1, svuint8_t,
+		z0 = svmin_n_u8_x (p0, z0, 1),
+		z0 = svmin_x (p0, z0, 1))
+
+/*
+** min_1_u8_x_untied:
+**	mov	z0\.b, #1
+**	umin	z0\.b, p0/m, z0\.b, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (min_1_u8_x_untied, svuint8_t,
+		z0 = svmin_n_u8_x (p0, z1, 1),
+		z0 = svmin_x (p0, z1, 1))
+
-- 
2.7.4