@@ -220,3 +220,21 @@ DEF_HELPER_FLAGS_4(sme2_fcvt_n, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sme2_fcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sme2_fcvt_w, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_4(sme2_fcvtl, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
@@ -1532,6 +1532,64 @@ void HELPER(sme2_fcvt_n)(void *vd, void *vs, float_status *fpst, uint32_t desc)
}
}
+#define SQCVT2(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 2 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(s0[HW(i)]); \
+ d[HN(i) + n] = SAT(s1[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVT2(sme2_sqcvt_sh, int32_t, int16_t, H4, H2, do_ssat_h)
+SQCVT2(sme2_uqcvt_sh, uint32_t, uint16_t, H4, H2, do_usat_h)
+SQCVT2(sme2_sqcvtu_sh, int32_t, uint16_t, H4, H2, do_usat_h)
+
+#undef SQCVT2
+
+#define SQCVT4(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 4 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(s0[HW(i)]); \
+ d[HN(i) + n] = SAT(s1[HW(i)]); \
+ d[HN(i) + 2 * n] = SAT(s2[HW(i)]); \
+ d[HN(i) + 3 * n] = SAT(s3[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVT4(sme2_sqcvt_sb, int32_t, int8_t, H4, H2, do_ssat_b)
+SQCVT4(sme2_uqcvt_sb, uint32_t, uint8_t, H4, H2, do_usat_b)
+SQCVT4(sme2_sqcvtu_sb, int32_t, uint8_t, H4, H2, do_usat_b)
+
+SQCVT4(sme2_sqcvt_dh, int64_t, int16_t, H8, H2, do_ssat_h)
+SQCVT4(sme2_uqcvt_dh, uint64_t, uint16_t, H8, H2, do_usat_h)
+SQCVT4(sme2_sqcvtu_dh, int64_t, uint16_t, H8, H2, do_usat_h)
+
+#undef SQCVT4
+
/* Convert and interleave */
void HELPER(sme2_bfcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
{
@@ -1563,6 +1621,39 @@ void HELPER(sme2_fcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
}
}
+#define SQCVTN4(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch __attribute__((uninitialized)); \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if ((vd - vs) < 4 * sizeof(ARMVectorReg)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(4 * i + 0)] = SAT(s0[HW(i)]); \
+ d[HN(4 * i + 1)] = SAT(s1[HW(i)]); \
+ d[HN(4 * i + 2)] = SAT(s2[HW(i)]); \
+ d[HN(4 * i + 3)] = SAT(s3[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVTN4(sme2_sqcvtn_sb, int32_t, int8_t, H4, H1, do_ssat_b)
+SQCVTN4(sme2_uqcvtn_sb, uint32_t, uint8_t, H4, H1, do_usat_b)
+SQCVTN4(sme2_sqcvtun_sb, int32_t, uint8_t, H4, H1, do_usat_b)
+
+SQCVTN4(sme2_sqcvtn_dh, int64_t, int16_t, H8, H2, do_ssat_h)
+SQCVTN4(sme2_uqcvtn_dh, uint64_t, uint16_t, H8, H2, do_usat_h)
+SQCVTN4(sme2_sqcvtun_dh, int64_t, uint16_t, H8, H2, do_usat_h)
+
+#undef SQCVTN4
+
/* Expand and convert */
void HELPER(sme2_fcvt_w)(void *vd, void *vs, float_status *fpst, uint32_t desc)
{
@@ -1312,3 +1312,38 @@ TRANS_FEAT(FRINTM, aa64_sme2, do_zz_fpst, a, float_round_down,
FPST_A64, gen_helper_gvec_vrint_rm_s)
TRANS_FEAT(FRINTA, aa64_sme2, do_zz_fpst, a, float_round_ties_away,
FPST_A64, gen_helper_gvec_vrint_rm_s)
+
+static bool do_zz(DisasContext *s, arg_zz_n *a, int data,
+ gen_helper_gvec_2 *fn)
+{
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+
+ for (int i = 0, n = a->n; i < n; ++i) {
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd + i),
+ vec_full_reg_offset(s, a->zn + i),
+ svl, svl, data, fn);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(SQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sh)
+TRANS_FEAT(UQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sh)
+TRANS_FEAT(SQCVTU_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sh)
+
+TRANS_FEAT(SQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sb)
+TRANS_FEAT(UQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sb)
+TRANS_FEAT(SQCVTU_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sb)
+
+TRANS_FEAT(SQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_dh)
+TRANS_FEAT(UQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_dh)
+TRANS_FEAT(SQCVTU_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_dh)
+
+TRANS_FEAT(SQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_sb)
+TRANS_FEAT(UQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_sb)
+TRANS_FEAT(SQCVTUN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_sb)
+
+TRANS_FEAT(SQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_dh)
+TRANS_FEAT(UQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_dh)
+TRANS_FEAT(SQCVTUN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_dh)
@@ -725,6 +725,8 @@ FMLS_nx_d 11000001 1101 .... 1 .. 00. ...00 10 ... @azx_4x1_i1_o3
&zz_n zd zn n
@zz_1x2 ........ ... ..... ...... ..... zd:5 \
&zz_n n=1 zn=%zn_ax2
+@zz_1x4 ........ ... ..... ...... ..... zd:5 \
+ &zz_n n=1 zn=%zn_ax4
@zz_2x1 ........ ... ..... ...... zn:5 ..... \
&zz_n n=1 zd=%zd_ax2
@zz_2x2 ........ ... ..... ...... .... . ..... \
@@ -759,3 +761,23 @@ FRINTM 11000001 101 01010 111000 ....0 ....0 @zz_2x2
FRINTM 11000001 101 11010 111000 ...00 ...00 @zz_4x4
FRINTA 11000001 101 01100 111000 ....0 ....0 @zz_2x2
FRINTA 11000001 101 11100 111000 ...00 ...00 @zz_4x4
+
+SQCVT_sh 11000001 001 00011 111000 ....0 ..... @zz_1x2
+UQCVT_sh 11000001 001 00011 111000 ....1 ..... @zz_1x2
+SQCVTU_sh 11000001 011 00011 111000 ....0 ..... @zz_1x2
+
+SQCVT_sb 11000001 001 10011 111000 ...00 ..... @zz_1x4
+UQCVT_sb 11000001 001 10011 111000 ...01 ..... @zz_1x4
+SQCVTU_sb 11000001 011 10011 111000 ...00 ..... @zz_1x4
+
+SQCVT_dh 11000001 101 10011 111000 ...00 ..... @zz_1x4
+UQCVT_dh 11000001 101 10011 111000 ...01 ..... @zz_1x4
+SQCVTU_dh 11000001 111 10011 111000 ...00 ..... @zz_1x4
+
+SQCVTN_sb 11000001 001 10011 111000 ...10 ..... @zz_1x4
+UQCVTN_sb 11000001 001 10011 111000 ...11 ..... @zz_1x4
+SQCVTUN_sb 11000001 011 10011 111000 ...10 ..... @zz_1x4
+
+SQCVTN_dh 11000001 101 10011 111000 ...10 ..... @zz_1x4
+UQCVTN_dh 11000001 101 10011 111000 ...11 ..... @zz_1x4
+SQCVTUN_dh 11000001 111 10011 111000 ...10 ..... @zz_1x4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/tcg/helper-sme.h | 18 +++++++ target/arm/tcg/sme_helper.c | 91 ++++++++++++++++++++++++++++++++++ target/arm/tcg/translate-sme.c | 35 +++++++++++++ target/arm/tcg/sme.decode | 22 ++++++++ 4 files changed, 166 insertions(+)