@@ -719,6 +719,8 @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -652,6 +652,7 @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn
REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn
REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn
RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn
+REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0
# SVE vector splice (predicated, destructive)
SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm
@@ -931,6 +931,22 @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64)
DO_ZPZ_D(sve_revw_d, uint64_t, wswap64)
+void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc)
+{
+ intptr_t i, opr_sz = simd_oprsz(desc) / 8;
+ uint64_t *d = vd, *n = vn;
+ uint8_t *pg = vg;
+
+ for (i = 0; i < opr_sz; i += 2) {
+ if (pg[H1(i)] & 1) {
+ uint64_t n0 = n[i + 0];
+ uint64_t n1 = n[i + 1];
+ d[i + 0] = n1;
+ d[i + 1] = n0;
+ }
+ }
+}
+
DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8)
DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16)
DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32)
@@ -2901,6 +2901,8 @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
+TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
+
TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
gen_helper_sve_splice, a, a->esz)