@@ -339,6 +339,24 @@ DEF_HELPER_FLAGS_4(sve_neg_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_neg_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_neg_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mla_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve_mls_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sve_mls_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sve_and_pred, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_bic_pred, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve_eor_pred, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
@@ -855,6 +855,64 @@ DO_ZPZI_D(sve_asrd_d, int64_t, DO_ASRD)
#undef DO_SHL
#undef DO_ASRD
+/* Fully general four-operand expander, controlled by a predicate.
+ */
+#define DO_ZPZZZ(NAME, TYPE, H, OP) \
+void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
+ void *vg, uint32_t desc) \
+{ \
+ intptr_t iv = 0, ib = 0, opr_sz = simd_oprsz(desc); \
+ for (iv = ib = 0; iv < opr_sz; iv += 16, ib += 2) { \
+ uint16_t pg = *(uint16_t *)(vg + H2(ib)); \
+ intptr_t i = 0; \
+ do { \
+ if (pg & 1) { \
+ TYPE nn = *(TYPE *)(vn + iv + H(i)); \
+ TYPE mm = *(TYPE *)(vm + iv + H(i)); \
+ TYPE aa = *(TYPE *)(va + iv + H(i)); \
+ *(TYPE *)(vd + iv + H(i)) = OP(aa, nn, mm); \
+ } \
+ i += sizeof(TYPE), pg >>= sizeof(TYPE); \
+ } while (pg); \
+ } \
+}
+
+/* Similarly, specialized for 64-bit operands. */
+#define DO_ZPZZZ_D(NAME, TYPE, OP) \
+void HELPER(NAME)(void *vd, void *va, void *vn, void *vm, \
+ void *vg, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8; \
+ TYPE *d = vd, *a = va, *n = vn, *m = vm; \
+ uint8_t *pg = vg; \
+ for (i = 0; i < opr_sz; i += 1) { \
+ if (pg[H1(i)] & 1) { \
+ TYPE aa = a[i], nn = n[i], mm = m[i]; \
+ d[i] = OP(aa, nn, mm); \
+ } \
+ } \
+}
+
+#define DO_MLA(A, N, M) (A + N * M)
+#define DO_MLS(A, N, M) (A - N * M)
+
+DO_ZPZZZ(sve_mla_b, uint8_t, H1, DO_MLA)
+DO_ZPZZZ(sve_mls_b, uint8_t, H1, DO_MLS)
+
+DO_ZPZZZ(sve_mla_h, uint16_t, H1_2, DO_MLA)
+DO_ZPZZZ(sve_mls_h, uint16_t, H1_2, DO_MLS)
+
+DO_ZPZZZ(sve_mla_s, uint32_t, H1_4, DO_MLA)
+DO_ZPZZZ(sve_mls_s, uint32_t, H1_4, DO_MLS)
+
+DO_ZPZZZ_D(sve_mla_d, uint64_t, DO_MLA)
+DO_ZPZZZ_D(sve_mls_d, uint64_t, DO_MLS)
+
+#undef DO_MLA
+#undef DO_MLS
+#undef DO_ZPZZZ
+#undef DO_ZPZZZ_D
+
void HELPER(sve_ldr)(CPUARMState *env, void *d, target_ulong addr, uint32_t len)
{
intptr_t i, len_align = QEMU_ALIGN_DOWN(len, 8);
@@ -520,6 +520,33 @@ void trans_ASRD(DisasContext *s, arg_rpri_esz *a, uint32_t insn)
}
}
+static void do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
+ gen_helper_gvec_5 *fn)
+{
+ unsigned vsz = size_for_gvec(vec_full_reg_size(s));
+ tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->ra),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ pred_full_reg_offset(s, a->pg),
+ vsz, vsz, 0, fn);
+}
+
+#define DO_ZPZZZ(NAME, name) \
+void trans_##NAME(DisasContext *s, arg_rprrr_esz *a, uint32_t insn) \
+{ \
+ static gen_helper_gvec_5 * const fns[4] = { \
+ gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
+ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
+ }; \
+ do_zpzzz_ool(s, a, fns[a->esz]); \
+}
+
+DO_ZPZZZ(MLA, mla)
+DO_ZPZZZ(MLS, mls)
+
+#undef DO_ZPZZZ
+
static uint64_t pred_esz_mask[4] = {
0xffffffffffffffffull, 0x5555555555555555ull,
0x1111111111111111ull, 0x0101010101010101ull
@@ -45,6 +45,7 @@
&rrr_esz rd rn rm esz
&rpr_esz rd pg rn esz
&rprr_esz rd pg rn rm esz
+&rprrr_esz rd pg rn rm ra esz
&rpri_esz rd pg rn imm esz
&pred_set rd pat esz i s
@@ -62,6 +63,10 @@
@rdn_pg_rm_esz ........ esz:2 ... ... ... pg:3 rm:5 rd:5 &rprr_esz rn=%reg_movprfx
@rdm_pg_rn_esz ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rprr_esz rm=%reg_movprfx
+# Three register operand, with governing predicate, vector element size
+@rda_pg_rn_rm_esz ........ esz:2 . rm:5 ... pg:3 rn:5 rd:5 &rprrr_esz ra=%reg_movprfx
+@rdn_pg_ra_rm_esz ........ esz:2 . rm:5 ... pg:3 ra:5 rd:5 &rprrr_esz rn=%reg_movprfx
+
# One register operand, with governing predicate, vector element size
@rd_pg_rn_esz ........ esz:2 ... ... ... pg:3 rn:5 rd:5 &rpr_esz
@@ -168,6 +173,16 @@ UXTH 00000100 .. 010 011 101 ... ..... ..... @rd_pg_rn_esz # Note size > 1
SXTW 00000100 .. 010 100 101 ... ..... ..... @rd_pg_rn_esz # Note size == 3
UXTW 00000100 .. 010 101 101 ... ..... ..... @rd_pg_rn_esz # Note size == 3
+### SVE Integer Multiply-Add Group
+
+# SVE integer multiply-add writing addend (predicated)
+MLA 00000100 .. 0 ..... 010 ... ..... ..... @rda_pg_rn_rm_esz
+MLS 00000100 .. 0 ..... 011 ... ..... ..... @rda_pg_rn_rm_esz
+
+# SVE integer multiply-add writing multiplicand (predicated)
+MLA 00000100 .. 0 ..... 110 ... ..... ..... @rdn_pg_ra_rm_esz # MAD
+MLS 00000100 .. 0 ..... 111 ... ..... ..... @rdn_pg_ra_rm_esz # MSB
+
### SVE Logical - Unpredicated Group
# SVE bitwise logical operations (unpredicated)
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper-sve.h | 18 ++++++++++++++ target/arm/sve_helper.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++ target/arm/translate-sve.c | 27 +++++++++++++++++++++ target/arm/sve.def | 15 ++++++++++++ 4 files changed, 118 insertions(+) -- 2.14.3