diff mbox series

[v6,33/35] target/arm: Implement SVE dot product (indexed)

Message ID 20180627043328.11531-34-richard.henderson@linaro.org
State New
Headers show
Series target/arm SVE patches | expand

Commit Message

Richard Henderson June 27, 2018, 4:33 a.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


---
v6: Rearrange the loops.  The compiler does well with this form
    and hopefully they are also easier to read.
---
 target/arm/helper.h        |   5 ++
 target/arm/translate-sve.c |  18 ++++++
 target/arm/vec_helper.c    | 124 +++++++++++++++++++++++++++++++++++++
 target/arm/sve.decode      |   8 ++-
 4 files changed, 154 insertions(+), 1 deletion(-)

-- 
2.17.1

Comments

Peter Maydell June 28, 2018, 1:07 p.m. UTC | #1
On 27 June 2018 at 05:33, Richard Henderson
<richard.henderson@linaro.org> wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>

> ---

> v6: Rearrange the loops.  The compiler does well with this form

>     and hopefully they are also easier to read.

> ---


Definitely easier to read, thanks.

> diff --git a/target/arm/sve.decode b/target/arm/sve.decode

> index 35415bfb6c..e10b689454 100644

> --- a/target/arm/sve.decode

> +++ b/target/arm/sve.decode

> @@ -726,7 +726,13 @@ UMIN_zzi        00100101 .. 101 011 110 ........ .....          @rdn_i8u

>  MUL_zzi         00100101 .. 110 000 110 ........ .....          @rdn_i8s

>

>  # SVE integer dot product (unpredicated)

> -DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5

> +DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5      ra=%reg_movprfx


Shouldn't this have been in a different patch ?

> +

> +# SVE integer dot product (indexed)

> +DOT_zzx         01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \

> +                sz=0 ra=%reg_movprfx

> +DOT_zzx         01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \

> +                sz=1 ra=%reg_movprfx

>

>  # SVE floating-point complex add (predicated)

>  FCADD           01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \

> --

> 2.17.1

>


Otherwise
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>


thanks
-- PMM
Alex Bennée June 28, 2018, 2:04 p.m. UTC | #2
Richard Henderson <richard.henderson@linaro.org> writes:

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>

> ---

> v6: Rearrange the loops.  The compiler does well with this form

>     and hopefully they are also easier to read.

> ---

>  target/arm/helper.h        |   5 ++

>  target/arm/translate-sve.c |  18 ++++++

>  target/arm/vec_helper.c    | 124 +++++++++++++++++++++++++++++++++++++

>  target/arm/sve.decode      |   8 ++-

>  4 files changed, 154 insertions(+), 1 deletion(-)

>

> diff --git a/target/arm/helper.h b/target/arm/helper.h

> index e23ce7ff19..59e8c3bd1b 100644

> --- a/target/arm/helper.h

> +++ b/target/arm/helper.h

> @@ -588,6 +588,11 @@ DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

>

> +DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

> +

>  DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,

>                     void, ptr, ptr, ptr, ptr, i32)

>  DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,

> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c

> index 8a2bd1f8c5..3cff71cae8 100644

> --- a/target/arm/translate-sve.c

> +++ b/target/arm/translate-sve.c

> @@ -3440,6 +3440,24 @@ static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a, uint32_t insn)

>      return true;

>  }

>

> +static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a, uint32_t insn)

> +{

> +    static gen_helper_gvec_3 * const fns[2][2] = {

> +        { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },

> +        { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }

> +    };

> +

> +    if (sve_access_check(s)) {

> +        unsigned vsz = vec_full_reg_size(s);

> +        tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),

> +                           vec_full_reg_offset(s, a->rn),

> +                           vec_full_reg_offset(s, a->rm),

> +                           vsz, vsz, a->index, fns[a->u][a->sz]);

> +    }

> +    return true;

> +}

> +

> +

>  /*

>   *** SVE Floating Point Multiply-Add Indexed Group

>   */

> diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c

> index c16a30c3b5..37f338732e 100644

> --- a/target/arm/vec_helper.c

> +++ b/target/arm/vec_helper.c

> @@ -261,6 +261,130 @@ void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)

>      clear_tail(d, opr_sz, simd_maxsz(desc));

>  }

>

> +void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)

> +{

> +    intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;

> +    intptr_t index = simd_data(desc);

> +    uint32_t *d = vd;

> +    int8_t *n = vn;

> +    int8_t *m_indexed = (int8_t *)vm + index * 4;

> +

> +    /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.

> +     * Otherwise opr_sz is a multiple of 16.

> +     */

> +    segend = MIN(4, opr_sz_4);

> +    i = 0;

> +    do {

> +        int8_t m0 = m_indexed[i * 4 + 0];

> +        int8_t m1 = m_indexed[i * 4 + 1];

> +        int8_t m2 = m_indexed[i * 4 + 2];

> +        int8_t m3 = m_indexed[i * 4 + 3];

> +

> +        do {

> +            d[i] += n[i * 4 + 0] * m0

> +                  + n[i * 4 + 1] * m1

> +                  + n[i * 4 + 2] * m2

> +                  + n[i * 4 + 3] * m3;

> +        } while (++i < segend);

> +        segend = i + 4;

> +    } while (i < opr_sz_4);

> +

> +    clear_tail(d, opr_sz, simd_maxsz(desc));

> +}

> +

> +void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)

> +{

> +    intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;

> +    intptr_t index = simd_data(desc);

> +    uint32_t *d = vd;

> +    uint8_t *n = vn;

> +    uint8_t *m_indexed = (uint8_t *)vm + index * 4;

> +

> +    /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.

> +     * Otherwise opr_sz is a multiple of 16.

> +     */

> +    segend = MIN(4, opr_sz_4);

> +    i = 0;

> +    do {

> +        uint8_t m0 = m_indexed[i * 4 + 0];

> +        uint8_t m1 = m_indexed[i * 4 + 1];

> +        uint8_t m2 = m_indexed[i * 4 + 2];

> +        uint8_t m3 = m_indexed[i * 4 + 3];

> +

> +        do {

> +            d[i] += n[i * 4 + 0] * m0

> +                  + n[i * 4 + 1] * m1

> +                  + n[i * 4 + 2] * m2

> +                  + n[i * 4 + 3] * m3;

> +        } while (++i < segend);

> +        segend = i + 4;

> +    } while (i < opr_sz_4);

> +

> +    clear_tail(d, opr_sz, simd_maxsz(desc));

> +}

> +

> +void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)

> +{

> +    intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;

> +    intptr_t index = simd_data(desc);

> +    uint64_t *d = vd;

> +    int16_t *n = vn;

> +    int16_t *m_indexed = (int16_t *)vm + index * 4;

> +

> +    /* This is supported by SVE only, so opr_sz is always a multiple of 16.

> +     * Process the entire segment all at once, writing back the results

> +     * only after we've consumed all of the inputs.

> +     */

> +    for (i = 0; i < opr_sz_8 ; i += 2) {

> +        uint64_t d0, d1;

> +

> +        d0  = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];

> +        d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];

> +        d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];

> +        d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];

> +        d1  = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];

> +        d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];

> +        d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];

> +        d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];

> +

> +        d[i + 0] += d0;

> +        d[i + 1] += d1;

> +    }


Looking at the dissembler output I guess the metrics don't make it worth
the compiler vectorising any of this which is a shame.

Anyway:

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>



> +

> +    clear_tail(d, opr_sz, simd_maxsz(desc));

> +}

> +

> +void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)

> +{

> +    intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;

> +    intptr_t index = simd_data(desc);

> +    uint64_t *d = vd;

> +    uint16_t *n = vn;

> +    uint16_t *m_indexed = (uint16_t *)vm + index * 4;

> +

> +    /* This is supported by SVE only, so opr_sz is always a multiple of 16.

> +     * Process the entire segment all at once, writing back the results

> +     * only after we've consumed all of the inputs.

> +     */

> +    for (i = 0; i < opr_sz_8 ; i += 2) {

> +        uint64_t d0, d1;

> +

> +        d0  = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];

> +        d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];

> +        d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];

> +        d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];

> +        d1  = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];

> +        d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];

> +        d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];

> +        d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];

> +

> +        d[i + 0] += d0;

> +        d[i + 1] += d1;

> +    }

> +

> +    clear_tail(d, opr_sz, simd_maxsz(desc));

> +}

> +

>  void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,

>                           void *vfpst, uint32_t desc)

>  {

> diff --git a/target/arm/sve.decode b/target/arm/sve.decode

> index 35415bfb6c..e10b689454 100644

> --- a/target/arm/sve.decode

> +++ b/target/arm/sve.decode

> @@ -726,7 +726,13 @@ UMIN_zzi        00100101 .. 101 011 110 ........ .....          @rdn_i8u

>  MUL_zzi         00100101 .. 110 000 110 ........ .....          @rdn_i8s

>

>  # SVE integer dot product (unpredicated)

> -DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5

> +DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5      ra=%reg_movprfx

> +

> +# SVE integer dot product (indexed)

> +DOT_zzx         01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \

> +                sz=0 ra=%reg_movprfx

> +DOT_zzx         01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \

> +                sz=1 ra=%reg_movprfx

>

>  # SVE floating-point complex add (predicated)

>  FCADD           01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \



--
Alex Bennée
Richard Henderson June 28, 2018, 3:47 p.m. UTC | #3
On 06/28/2018 07:04 AM, Alex Bennée wrote:
> Looking at the dissembler output I guess the metrics don't make it worth

> the compiler vectorising any of this which is a shame.


I think dot product pattern recognition is still being worked on.  At least
that's what I recall from a conversation from Richard Sandiford a few weeks ago.


r~
Richard Henderson June 28, 2018, 3:57 p.m. UTC | #4
On 06/28/2018 06:07 AM, Peter Maydell wrote:
>>  # SVE integer dot product (unpredicated)

>> -DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5

>> +DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx

> 

> Shouldn't this have been in a different patch ?


Dammit, you pointed this out last time and I missed it
when going back through all of the nits.

Do you want me to re-spin all or part of the patch set?


r~
Peter Maydell June 28, 2018, 4:07 p.m. UTC | #5
On 28 June 2018 at 16:57, Richard Henderson
<richard.henderson@linaro.org> wrote:
> On 06/28/2018 06:07 AM, Peter Maydell wrote:

>>>  # SVE integer dot product (unpredicated)

>>> -DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5

>>> +DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 ra=%reg_movprfx

>>

>> Shouldn't this have been in a different patch ?

>

> Dammit, you pointed this out last time and I missed it

> when going back through all of the nits.

>

> Do you want me to re-spin all or part of the patch set?


No, I've just made the fix locally in target-arm.next.

-- PMM
diff mbox series

Patch

diff --git a/target/arm/helper.h b/target/arm/helper.h
index e23ce7ff19..59e8c3bd1b 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -588,6 +588,11 @@  DEF_HELPER_FLAGS_4(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 
+DEF_HELPER_FLAGS_4(gvec_sdot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_udot_idx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sdot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_udot_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
                    void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 8a2bd1f8c5..3cff71cae8 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -3440,6 +3440,24 @@  static bool trans_DOT_zzz(DisasContext *s, arg_DOT_zzz *a, uint32_t insn)
     return true;
 }
 
+static bool trans_DOT_zzx(DisasContext *s, arg_DOT_zzx *a, uint32_t insn)
+{
+    static gen_helper_gvec_3 * const fns[2][2] = {
+        { gen_helper_gvec_sdot_idx_b, gen_helper_gvec_sdot_idx_h },
+        { gen_helper_gvec_udot_idx_b, gen_helper_gvec_udot_idx_h }
+    };
+
+    if (sve_access_check(s)) {
+        unsigned vsz = vec_full_reg_size(s);
+        tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+                           vec_full_reg_offset(s, a->rn),
+                           vec_full_reg_offset(s, a->rm),
+                           vsz, vsz, a->index, fns[a->u][a->sz]);
+    }
+    return true;
+}
+
+
 /*
  *** SVE Floating Point Multiply-Add Indexed Group
  */
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index c16a30c3b5..37f338732e 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -261,6 +261,130 @@  void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc)
     clear_tail(d, opr_sz, simd_maxsz(desc));
 }
 
+void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
+    intptr_t index = simd_data(desc);
+    uint32_t *d = vd;
+    int8_t *n = vn;
+    int8_t *m_indexed = (int8_t *)vm + index * 4;
+
+    /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
+     * Otherwise opr_sz is a multiple of 16.
+     */
+    segend = MIN(4, opr_sz_4);
+    i = 0;
+    do {
+        int8_t m0 = m_indexed[i * 4 + 0];
+        int8_t m1 = m_indexed[i * 4 + 1];
+        int8_t m2 = m_indexed[i * 4 + 2];
+        int8_t m3 = m_indexed[i * 4 + 3];
+
+        do {
+            d[i] += n[i * 4 + 0] * m0
+                  + n[i * 4 + 1] * m1
+                  + n[i * 4 + 2] * m2
+                  + n[i * 4 + 3] * m3;
+        } while (++i < segend);
+        segend = i + 4;
+    } while (i < opr_sz_4);
+
+    clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4;
+    intptr_t index = simd_data(desc);
+    uint32_t *d = vd;
+    uint8_t *n = vn;
+    uint8_t *m_indexed = (uint8_t *)vm + index * 4;
+
+    /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
+     * Otherwise opr_sz is a multiple of 16.
+     */
+    segend = MIN(4, opr_sz_4);
+    i = 0;
+    do {
+        uint8_t m0 = m_indexed[i * 4 + 0];
+        uint8_t m1 = m_indexed[i * 4 + 1];
+        uint8_t m2 = m_indexed[i * 4 + 2];
+        uint8_t m3 = m_indexed[i * 4 + 3];
+
+        do {
+            d[i] += n[i * 4 + 0] * m0
+                  + n[i * 4 + 1] * m1
+                  + n[i * 4 + 2] * m2
+                  + n[i * 4 + 3] * m3;
+        } while (++i < segend);
+        segend = i + 4;
+    } while (i < opr_sz_4);
+
+    clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
+    intptr_t index = simd_data(desc);
+    uint64_t *d = vd;
+    int16_t *n = vn;
+    int16_t *m_indexed = (int16_t *)vm + index * 4;
+
+    /* This is supported by SVE only, so opr_sz is always a multiple of 16.
+     * Process the entire segment all at once, writing back the results
+     * only after we've consumed all of the inputs.
+     */
+    for (i = 0; i < opr_sz_8 ; i += 2) {
+        uint64_t d0, d1;
+
+        d0  = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0];
+        d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1];
+        d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2];
+        d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3];
+        d1  = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0];
+        d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1];
+        d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2];
+        d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3];
+
+        d[i + 0] += d0;
+        d[i + 1] += d1;
+    }
+
+    clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
+void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc)
+{
+    intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8;
+    intptr_t index = simd_data(desc);
+    uint64_t *d = vd;
+    uint16_t *n = vn;
+    uint16_t *m_indexed = (uint16_t *)vm + index * 4;
+
+    /* This is supported by SVE only, so opr_sz is always a multiple of 16.
+     * Process the entire segment all at once, writing back the results
+     * only after we've consumed all of the inputs.
+     */
+    for (i = 0; i < opr_sz_8 ; i += 2) {
+        uint64_t d0, d1;
+
+        d0  = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0];
+        d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1];
+        d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2];
+        d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3];
+        d1  = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0];
+        d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1];
+        d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2];
+        d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3];
+
+        d[i + 0] += d0;
+        d[i + 1] += d1;
+    }
+
+    clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
 void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
                          void *vfpst, uint32_t desc)
 {
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 35415bfb6c..e10b689454 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -726,7 +726,13 @@  UMIN_zzi        00100101 .. 101 011 110 ........ .....          @rdn_i8u
 MUL_zzi         00100101 .. 110 000 110 ........ .....          @rdn_i8s
 
 # SVE integer dot product (unpredicated)
-DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5
+DOT_zzz         01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5      ra=%reg_movprfx
+
+# SVE integer dot product (indexed)
+DOT_zzx         01000100 101 index:2 rm:3 00000 u:1 rn:5 rd:5 \
+                sz=0 ra=%reg_movprfx
+DOT_zzx         01000100 111 index:1 rm:4 00000 u:1 rn:5 rd:5 \
+                sz=1 ra=%reg_movprfx
 
 # SVE floating-point complex add (predicated)
 FCADD           01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \