@@ -263,3 +263,21 @@ DEF_HELPER_FLAGS_3(sme2_uzp4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sme2_uzp4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sme2_uzp4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sme2_uzp4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
@@ -1590,6 +1590,66 @@ SQCVT4(sme2_sqcvtu_dh, int64_t, uint16_t, H8, H2, do_usat_h)
#undef SQCVT4
+#define SQRSHR2(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 2 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(i) + n] = SAT(RSHR(s1[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHR2(sme2_sqrshr_sh, int32_t, int16_t, H4, H2, do_srshr, do_ssat_b)
+SQRSHR2(sme2_uqrshr_sh, uint32_t, uint16_t, H4, H2, do_urshr, do_usat_b)
+SQRSHR2(sme2_sqrshru_sh, int32_t, uint16_t, H4, H2, do_srshr, do_usat_b)
+
+#undef SQRSHR2
+
+#define SQRSHR4(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 4 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(i) + n] = SAT(RSHR(s1[HW(i)], shift)); \
+ d[HN(i) + 2 * n] = SAT(RSHR(s2[HW(i)], shift)); \
+ d[HN(i) + 3 * n] = SAT(RSHR(s3[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHR4(sme2_sqrshr_sb, int32_t, int8_t, H4, H2, do_srshr, do_ssat_b)
+SQRSHR4(sme2_uqrshr_sb, uint32_t, uint8_t, H4, H2, do_urshr, do_usat_b)
+SQRSHR4(sme2_sqrshru_sb, int32_t, uint8_t, H4, H2, do_srshr, do_usat_b)
+
+SQRSHR4(sme2_sqrshr_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h)
+SQRSHR4(sme2_uqrshr_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h)
+SQRSHR4(sme2_sqrshru_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h)
+
+#undef SQRSHR4
+
/* Convert and interleave */
void HELPER(sme2_bfcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
{
@@ -1654,6 +1714,40 @@ SQCVTN4(sme2_sqcvtun_dh, int64_t, uint16_t, H8, H2, do_usat_h)
#undef SQCVTN4
+#define SQRSHRN4(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 4 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(4 * i + 0)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(4 * i + 1)] = SAT(RSHR(s1[HW(i)], shift)); \
+ d[HN(4 * i + 2)] = SAT(RSHR(s2[HW(i)], shift)); \
+ d[HN(4 * i + 3)] = SAT(RSHR(s3[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHRN4(sme2_sqrshrn_sb, int32_t, int8_t, H4, H1, do_srshr, do_ssat_b)
+SQRSHRN4(sme2_uqrshrn_sb, uint32_t, uint8_t, H4, H1, do_urshr, do_usat_b)
+SQRSHRN4(sme2_sqrshrun_sb, int32_t, uint8_t, H4, H1, do_srshr, do_usat_b)
+
+SQRSHRN4(sme2_sqrshrn_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h)
+SQRSHRN4(sme2_uqrshrn_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h)
+SQRSHRN4(sme2_sqrshrun_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h)
+
+#undef SQRSHRN4
+
/* Expand and convert */
void HELPER(sme2_fcvt_w)(void *vd, void *vs, float_status *fpst, uint32_t desc)
{
@@ -1398,3 +1398,32 @@ static gen_helper_gvec_2 * const uzp4_fns[] = {
gen_helper_sme2_uzp4_q,
};
TRANS_FEAT(UZP_4, aa64_sme2, do_zipuzp_4, a, uzp4_fns)
+
+static bool do_zz_rshr(DisasContext *s, arg_rshr *a, gen_helper_gvec_2 *fn)
+{
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ svl, svl, a->shift, fn);
+ }
+ return true;
+}
+
+TRANS_FEAT(SQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sh)
+TRANS_FEAT(UQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sh)
+TRANS_FEAT(SQRSHRU_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sh)
+
+TRANS_FEAT(SQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sb)
+TRANS_FEAT(SQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_dh)
+TRANS_FEAT(UQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sb)
+TRANS_FEAT(UQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_dh)
+TRANS_FEAT(SQRSHRU_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sb)
+TRANS_FEAT(SQRSHRU_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_dh)
+
+TRANS_FEAT(SQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_sb)
+TRANS_FEAT(SQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_dh)
+TRANS_FEAT(UQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_sb)
+TRANS_FEAT(UQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_dh)
+TRANS_FEAT(SQRSHRUN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_sb)
+TRANS_FEAT(SQRSHRUN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_dh)
@@ -810,3 +810,36 @@ UZP_4 11000001 esz:2 1 10110 111000 ...00 ... 10 \
&zz_e zd=%zd_ax4 zn=%zn_ax4
UZP_4 11000001 001 10111 111000 ...00 ... 10 \
&zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4
+
+### SME2 Multi-vector SVE Constructive Binary
+
+&rshr zd zn shift
+
+%rshr_sh_shift 16:4 !function=rsub_16
+%rshr_sb_shift 16:5 !function=rsub_32
+%rshr_dh_shift 22:1 16:5 !function=rsub_64
+
+@rshr_sh ........ .... .... ...... ..... zd:5 \
+ &rshr zn=%zn_ax2 shift=%rshr_sh_shift
+@rshr_sb ........ ... ..... ...... ..... zd:5 \
+ &rshr zn=%zn_ax4 shift=%rshr_sb_shift
+@rshr_dh ........ ... ..... ...... ..... zd:5 \
+ &rshr zn=%zn_ax4 shift=%rshr_dh_shift
+
+SQRSHR_sh 11000001 1110 .... 110101 ....0 ..... @rshr_sh
+UQRSHR_sh 11000001 1110 .... 110101 ....1 ..... @rshr_sh
+SQRSHRU_sh 11000001 1111 .... 110101 ....0 ..... @rshr_sh
+
+SQRSHR_sb 11000001 011 ..... 110110 ...00 ..... @rshr_sb
+SQRSHR_dh 11000001 1.1 ..... 110110 ...00 ..... @rshr_dh
+UQRSHR_sb 11000001 011 ..... 110110 ...01 ..... @rshr_sb
+UQRSHR_dh 11000001 1.1 ..... 110110 ...01 ..... @rshr_dh
+SQRSHRU_sb 11000001 011 ..... 110110 ...10 ..... @rshr_sb
+SQRSHRU_dh 11000001 1.1 ..... 110110 ...10 ..... @rshr_dh
+
+SQRSHRN_sb 11000001 011 ..... 110111 ...00 ..... @rshr_sb
+SQRSHRN_dh 11000001 1.1 ..... 110111 ...00 ..... @rshr_dh
+UQRSHRN_sb 11000001 011 ..... 110111 ...01 ..... @rshr_sb
+UQRSHRN_dh 11000001 1.1 ..... 110111 ...01 ..... @rshr_dh
+SQRSHRUN_sb 11000001 011 ..... 110111 ...10 ..... @rshr_sb
+SQRSHRUN_dh 11000001 1.1 ..... 110111 ...10 ..... @rshr_dh
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/tcg/helper-sme.h | 18 +++++++ target/arm/tcg/sme_helper.c | 94 ++++++++++++++++++++++++++++++++++ target/arm/tcg/translate-sme.c | 29 +++++++++++ target/arm/tcg/sme.decode | 33 ++++++++++++ 4 files changed, 174 insertions(+)