diff mbox series

[v2,05/67] target/arm: Implement SVE load vector/predicate

Message ID 20180217182323.25885-6-richard.henderson@linaro.org
State New
Headers show
Series target/arm: Scalable Vector Extension | expand

Commit Message

Richard Henderson Feb. 17, 2018, 6:22 p.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/translate-sve.c | 132 +++++++++++++++++++++++++++++++++++++++++++++
 target/arm/sve.decode      |  22 +++++++-
 2 files changed, 153 insertions(+), 1 deletion(-)

-- 
2.14.3

Comments

Peter Maydell Feb. 22, 2018, 6:20 p.m. UTC | #1
On 17 February 2018 at 18:22, Richard Henderson
<richard.henderson@linaro.org> wrote:
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/translate-sve.c | 132 +++++++++++++++++++++++++++++++++++++++++++++

>  target/arm/sve.decode      |  22 +++++++-

>  2 files changed, 153 insertions(+), 1 deletion(-)

>

> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c

> index 50cf2a1fdd..c0cccfda6f 100644

> --- a/target/arm/translate-sve.c

> +++ b/target/arm/translate-sve.c

> @@ -46,6 +46,19 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,

>   * Implement all of the translator functions referenced by the decoder.

>   */

>

> +/* Return the offset info CPUARMState of the predicate vector register Pn.

> + * Note for this purpose, FFR is P16.  */


Missing newline before */.

> +/*

> + *** SVE Memory - 32-bit Gather and Unsized Contiguous Group

> + */

> +

> +/* Subroutine loading a vector register at VOFS of LEN bytes.

> + * The load should begin at the address Rn + IMM.

> + */

> +

> +#if UINTPTR_MAX == UINT32_MAX

> +# define ptr i32

> +#else

> +# define ptr i64

> +#endif


I think that rather than doing this we should improve the
provision that tcg/tcg-op.h has for *_ptr() wrappers, so
from the target's point of view it has a proper tcg_const_local_ptr()
and tcg_gen_brcondi_ptr(), same as for _i32, _i64 and _tl.

> +

> +static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,

> +                   int rn, int imm)

> +{

> +    uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);

> +    uint32_t len_remain = len % 8;

> +    uint32_t nparts = len / 8 + ctpop8(len_remain);

> +    int midx = get_mem_index(s);

> +    TCGv_i64 addr, t0, t1;

> +

> +    addr = tcg_temp_new_i64();

> +    t0 = tcg_temp_new_i64();

> +

> +    /* Note that unpredicated load/store of vector/predicate registers

> +     * are defined as a stream of bytes, which equates to little-endian

> +     * operations on larger quantities.  There is no nice way to force

> +     * a little-endian load for aarch64_be-linux-user out of line.

> +     *

> +     * Attempt to keep code expansion to a minimum by limiting the

> +     * amount of unrolling done.

> +     */

> +    if (nparts <= 4) {

> +        int i;

> +

> +        for (i = 0; i < len_align; i += 8) {

> +            tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);

> +            tcg_gen_st_i64(t0, cpu_env, vofs + i);

> +        }

> +    } else {

> +        TCGLabel *loop = gen_new_label();

> +        TCGv_ptr i = TCGV_NAT_TO_PTR(glue(tcg_const_local_, ptr)(0));

> +        TCGv_ptr dest;

> +

> +        gen_set_label(loop);

> +

> +        /* Minimize the number of local temps that must be re-read from

> +         * the stack each iteration.  Instead, re-compute values other

> +         * than the loop counter.

> +         */

> +        dest = tcg_temp_new_ptr();

> +        tcg_gen_addi_ptr(dest, i, imm);

> +#if UINTPTR_MAX == UINT32_MAX

> +        tcg_gen_extu_i32_i64(addr, TCGV_PTR_TO_NAT(dest));

> +        tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));

> +#else

> +        tcg_gen_add_i64(addr, TCGV_PTR_TO_NAT(dest), cpu_reg_sp(s, rn));

> +#endif


Can we avoid the ifdef by creating a tcg_gen_ext_ptr_i64() (similar
to but opposite in effect to the existing tcg_gen_ext_i32_ptr()) ?

> +

> +        tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);

> +

> +        tcg_gen_add_ptr(dest, cpu_env, i);

> +        tcg_gen_addi_ptr(i, i, 8);

> +        tcg_gen_st_i64(t0, dest, vofs);

> +        tcg_temp_free_ptr(dest);

> +

> +        glue(tcg_gen_brcondi_, ptr)(TCG_COND_LTU, TCGV_PTR_TO_NAT(i),

> +                                    len_align, loop);

> +        tcg_temp_free_ptr(i);

> +    }

> +

> +    /* Predicate register loads can be any multiple of 2.

> +     * Note that we still store the entire 64-bit unit into cpu_env.

> +     */

> +    if (len_remain) {

> +        tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align);

> +

> +        switch (len_remain) {

> +        case 2:

> +        case 4:

> +        case 8:

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain));

> +            break;

> +

> +        case 6:

> +            t1 = tcg_temp_new_i64();

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL);

> +            tcg_gen_addi_i64(addr, addr, 4);

> +            tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW);

> +            tcg_gen_deposit_i64(t0, t0, t1, 32, 32);

> +            tcg_temp_free_i64(t1);

> +            break;

> +

> +        default:

> +            g_assert_not_reached();

> +        }

> +        tcg_gen_st_i64(t0, cpu_env, vofs + len_align);

> +    }

> +    tcg_temp_free_i64(addr);

> +    tcg_temp_free_i64(t0);

> +}

> +

> +#undef ptr


>

> +&rri           rd rn imm

>  &rrr_esz       rd rn rm esz

>

>  ###########################################################################

> @@ -31,7 +37,13 @@

>  # reduce the amount of duplication between instruction patterns.

>

>  # Three operand with unused vector element size

> -@rd_rn_rm_e0   ........ ... rm:5  ... ...  rn:5 rd:5           &rrr_esz esz=0

> +@rd_rn_rm_e0   ........ ... rm:5 ... ... rn:5 rd:5             &rrr_esz esz=0


This change looks like it should be squashed into a previous patch?

> +

> +# Basic Load/Store with 9-bit immediate offset

> +@pd_rn_i9      ........ ........ ...... rn:5 . rd:4    \

> +               &rri imm=%imm9_16_10

> +@rd_rn_i9      ........ ........ ...... rn:5 rd:5      \

> +               &rri imm=%imm9_16_10

>


thanks
-- PMM
Richard Henderson Feb. 22, 2018, 7:31 p.m. UTC | #2
On 02/22/2018 10:20 AM, Peter Maydell wrote:
> On 17 February 2018 at 18:22, Richard Henderson

> <richard.henderson@linaro.org> wrote:

>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>> ---

>>  target/arm/translate-sve.c | 132 +++++++++++++++++++++++++++++++++++++++++++++

>>  target/arm/sve.decode      |  22 +++++++-

>>  2 files changed, 153 insertions(+), 1 deletion(-)

>>

>> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c

>> index 50cf2a1fdd..c0cccfda6f 100644

>> --- a/target/arm/translate-sve.c

>> +++ b/target/arm/translate-sve.c

>> @@ -46,6 +46,19 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,

>>   * Implement all of the translator functions referenced by the decoder.

>>   */

>>

>> +/* Return the offset info CPUARMState of the predicate vector register Pn.

>> + * Note for this purpose, FFR is P16.  */

> 

> Missing newline before */.

> 

>> +/*

>> + *** SVE Memory - 32-bit Gather and Unsized Contiguous Group

>> + */

>> +

>> +/* Subroutine loading a vector register at VOFS of LEN bytes.

>> + * The load should begin at the address Rn + IMM.

>> + */

>> +

>> +#if UINTPTR_MAX == UINT32_MAX

>> +# define ptr i32

>> +#else

>> +# define ptr i64

>> +#endif

> 

> I think that rather than doing this we should improve the

> provision that tcg/tcg-op.h has for *_ptr() wrappers, so

> from the target's point of view it has a proper tcg_const_local_ptr()

> and tcg_gen_brcondi_ptr(), same as for _i32, _i64 and _tl.

> 

>> +

>> +static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,

>> +                   int rn, int imm)

>> +{

>> +    uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);

>> +    uint32_t len_remain = len % 8;

>> +    uint32_t nparts = len / 8 + ctpop8(len_remain);

>> +    int midx = get_mem_index(s);

>> +    TCGv_i64 addr, t0, t1;

>> +

>> +    addr = tcg_temp_new_i64();

>> +    t0 = tcg_temp_new_i64();

>> +

>> +    /* Note that unpredicated load/store of vector/predicate registers

>> +     * are defined as a stream of bytes, which equates to little-endian

>> +     * operations on larger quantities.  There is no nice way to force

>> +     * a little-endian load for aarch64_be-linux-user out of line.

>> +     *

>> +     * Attempt to keep code expansion to a minimum by limiting the

>> +     * amount of unrolling done.

>> +     */

>> +    if (nparts <= 4) {

>> +        int i;

>> +

>> +        for (i = 0; i < len_align; i += 8) {

>> +            tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);

>> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);

>> +            tcg_gen_st_i64(t0, cpu_env, vofs + i);

>> +        }

>> +    } else {

>> +        TCGLabel *loop = gen_new_label();

>> +        TCGv_ptr i = TCGV_NAT_TO_PTR(glue(tcg_const_local_, ptr)(0));

>> +        TCGv_ptr dest;

>> +

>> +        gen_set_label(loop);

>> +

>> +        /* Minimize the number of local temps that must be re-read from

>> +         * the stack each iteration.  Instead, re-compute values other

>> +         * than the loop counter.

>> +         */

>> +        dest = tcg_temp_new_ptr();

>> +        tcg_gen_addi_ptr(dest, i, imm);

>> +#if UINTPTR_MAX == UINT32_MAX

>> +        tcg_gen_extu_i32_i64(addr, TCGV_PTR_TO_NAT(dest));

>> +        tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));

>> +#else

>> +        tcg_gen_add_i64(addr, TCGV_PTR_TO_NAT(dest), cpu_reg_sp(s, rn));

>> +#endif

> 

> Can we avoid the ifdef by creating a tcg_gen_ext_ptr_i64() (similar

> to but opposite in effect to the existing tcg_gen_ext_i32_ptr()) ?


Ok, I will improve tcg.h as necessary for better support of TCGv_ptr.

>> -@rd_rn_rm_e0   ........ ... rm:5  ... ...  rn:5 rd:5           &rrr_esz esz=0

>> +@rd_rn_rm_e0   ........ ... rm:5 ... ... rn:5 rd:5             &rrr_esz esz=0

> 

> This change looks like it should be squashed into a previous patch?


Ho hum, I thought I got all of these.  I'll take another look through.


r~
Alex Bennée April 3, 2018, 9:26 a.m. UTC | #3
Richard Henderson <richard.henderson@linaro.org> writes:

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/translate-sve.c | 132 +++++++++++++++++++++++++++++++++++++++++++++

>  target/arm/sve.decode      |  22 +++++++-

>  2 files changed, 153 insertions(+), 1 deletion(-)

>

> diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c

> index 50cf2a1fdd..c0cccfda6f 100644

> --- a/target/arm/translate-sve.c

> +++ b/target/arm/translate-sve.c

> @@ -46,6 +46,19 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,

>   * Implement all of the translator functions referenced by the decoder.

>   */

>

> +/* Return the offset info CPUARMState of the predicate vector register Pn.

> + * Note for this purpose, FFR is P16.  */

> +static inline int pred_full_reg_offset(DisasContext *s, int regno)

> +{

> +    return offsetof(CPUARMState, vfp.pregs[regno]);

> +}


You don't use it yet but probably worth a:

static inline int ffr_full_reg_offset(DisasContext *s)
{
    return pred_full_reg_offset(s, 16);
}

here when you get to it to avoid the magic 16 appearing in the main code.

> +

> +/* Return the byte size of the whole predicate register, VL / 64.  */

> +static inline int pred_full_reg_size(DisasContext *s)

> +{

> +    return s->sve_len >> 3;

> +}

> +

>  /* Invoke a vector expander on two Zregs.  */

>  static void do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn,

>                           int esz, int rd, int rn)

> @@ -97,3 +110,122 @@ static void trans_BIC_zzz(DisasContext *s, arg_BIC_zzz *a, uint32_t insn)

>  {

>      do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm);

>  }

> +

> +/*

> + *** SVE Memory - 32-bit Gather and Unsized Contiguous Group

> + */

> +

> +/* Subroutine loading a vector register at VOFS of LEN bytes.

> + * The load should begin at the address Rn + IMM.

> + */

> +

> +#if UINTPTR_MAX == UINT32_MAX

> +# define ptr i32

> +#else

> +# define ptr i64

> +#endif


This seems superfluous, don't we already have TCGv_ptr for this reason?

> +

> +static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,

> +                   int rn, int imm)

> +{

> +    uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);

> +    uint32_t len_remain = len % 8;

> +    uint32_t nparts = len / 8 + ctpop8(len_remain);

> +    int midx = get_mem_index(s);

> +    TCGv_i64 addr, t0, t1;

> +

> +    addr = tcg_temp_new_i64();

> +    t0 = tcg_temp_new_i64();

> +

> +    /* Note that unpredicated load/store of vector/predicate registers

> +     * are defined as a stream of bytes, which equates to little-endian

> +     * operations on larger quantities.  There is no nice way to force

> +     * a little-endian load for aarch64_be-linux-user out of line.

> +     *

> +     * Attempt to keep code expansion to a minimum by limiting the

> +     * amount of unrolling done.

> +     */

> +    if (nparts <= 4) {

> +        int i;

> +

> +        for (i = 0; i < len_align; i += 8) {

> +            tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);

> +            tcg_gen_st_i64(t0, cpu_env, vofs + i);

> +        }

> +    } else {

> +        TCGLabel *loop = gen_new_label();

> +        TCGv_ptr i = TCGV_NAT_TO_PTR(glue(tcg_const_local_, ptr)(0));

> +        TCGv_ptr dest;

> +

> +        gen_set_label(loop);

> +

> +        /* Minimize the number of local temps that must be re-read from

> +         * the stack each iteration.  Instead, re-compute values other

> +         * than the loop counter.

> +         */

> +        dest = tcg_temp_new_ptr();

> +        tcg_gen_addi_ptr(dest, i, imm);

> +#if UINTPTR_MAX == UINT32_MAX

> +        tcg_gen_extu_i32_i64(addr, TCGV_PTR_TO_NAT(dest));

> +        tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));

> +#else

> +        tcg_gen_add_i64(addr, TCGV_PTR_TO_NAT(dest), cpu_reg_sp(s, rn));

> +#endif

> +

> +        tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);

> +

> +        tcg_gen_add_ptr(dest, cpu_env, i);

> +        tcg_gen_addi_ptr(i, i, 8);

> +        tcg_gen_st_i64(t0, dest, vofs);

> +        tcg_temp_free_ptr(dest);

> +

> +        glue(tcg_gen_brcondi_, ptr)(TCG_COND_LTU, TCGV_PTR_TO_NAT(i),

> +                                    len_align, loop);


If this is the only use for ptr I wonder if it would just make more
sense to #if/else here.

> +        tcg_temp_free_ptr(i);

> +    }

> +

> +    /* Predicate register loads can be any multiple of 2.

> +     * Note that we still store the entire 64-bit unit into cpu_env.

> +     */

> +    if (len_remain) {

> +        tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align);

> +

> +        switch (len_remain) {

> +        case 2:

> +        case 4:

> +        case 8:

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain));

> +            break;

> +

> +        case 6:

> +            t1 = tcg_temp_new_i64();

> +            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL);

> +            tcg_gen_addi_i64(addr, addr, 4);

> +            tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW);

> +            tcg_gen_deposit_i64(t0, t0, t1, 32, 32);

> +            tcg_temp_free_i64(t1);

> +            break;

> +

> +        default:

> +            g_assert_not_reached();

> +        }

> +        tcg_gen_st_i64(t0, cpu_env, vofs + len_align);

> +    }

> +    tcg_temp_free_i64(addr);

> +    tcg_temp_free_i64(t0);

> +}

> +

> +#undef ptr

> +

> +static void trans_LDR_zri(DisasContext *s, arg_rri *a, uint32_t insn)

> +{

> +    int size = vec_full_reg_size(s);

> +    do_ldr(s, vec_full_reg_offset(s, a->rd), size, a->rn, a->imm * size);

> +}

> +

> +static void trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)

> +{

> +    int size = pred_full_reg_size(s);

> +    do_ldr(s, pred_full_reg_offset(s, a->rd), size, a->rn, a->imm * size);

> +}

> diff --git a/target/arm/sve.decode b/target/arm/sve.decode

> index 2c13a6024a..0c6a7ba34d 100644

> --- a/target/arm/sve.decode

> +++ b/target/arm/sve.decode

> @@ -19,11 +19,17 @@

>  # This file is processed by scripts/decodetree.py

>  #

>

> +###########################################################################

> +# Named fields.  These are primarily for disjoint fields.

> +

> +%imm9_16_10	16:s6 10:3

> +

>  ###########################################################################

>  # Named attribute sets.  These are used to make nice(er) names

>  # when creating helpers common to those for the individual

>  # instruction patterns.

>

> +&rri		rd rn imm

>  &rrr_esz	rd rn rm esz

>

>  ###########################################################################

> @@ -31,7 +37,13 @@

>  # reduce the amount of duplication between instruction patterns.

>

>  # Three operand with unused vector element size

> -@rd_rn_rm_e0	........ ... rm:5  ... ...  rn:5 rd:5		&rrr_esz esz=0

> +@rd_rn_rm_e0	........ ... rm:5 ... ... rn:5 rd:5		&rrr_esz esz=0

> +

> +# Basic Load/Store with 9-bit immediate offset

> +@pd_rn_i9	........ ........ ...... rn:5 . rd:4	\

> +		&rri imm=%imm9_16_10

> +@rd_rn_i9	........ ........ ...... rn:5 rd:5	\

> +		&rri imm=%imm9_16_10

>

>  ###########################################################################

>  # Instruction patterns.  Grouped according to the SVE encodingindex.xhtml.

> @@ -43,3 +55,11 @@ AND_zzz		00000100 00 1 ..... 001 100 ..... .....		@rd_rn_rm_e0

>  ORR_zzz		00000100 01 1 ..... 001 100 ..... .....		@rd_rn_rm_e0

>  EOR_zzz		00000100 10 1 ..... 001 100 ..... .....		@rd_rn_rm_e0

>  BIC_zzz		00000100 11 1 ..... 001 100 ..... .....		@rd_rn_rm_e0

> +

> +### SVE Memory - 32-bit Gather and Unsized Contiguous Group

> +

> +# SVE load predicate register

> +LDR_pri		10000101 10 ...... 000 ... ..... 0 ....		@pd_rn_i9

> +

> +# SVE load vector register

> +LDR_zri		10000101 10 ...... 010 ... ..... .....		@rd_rn_i9



--
Alex Bennée
Richard Henderson April 6, 2018, 1:23 a.m. UTC | #4
On 04/03/2018 07:26 PM, Alex Bennée wrote:
> You don't use it yet but probably worth a:

> 

> static inline int ffr_full_reg_offset(DisasContext *s)

> {

>     return pred_full_reg_offset(s, 16);

> }

> 

> here when you get to it to avoid the magic 16 appearing in the main code.


Hum.  Most of the places that ffr is touched is in sve.decode.
I could certainly enhance the grammar there to allow a symbol
there instead of a number.

But I don't think treating ffr differently from a regular pr
register, as above, is a good idea.  At best I would use

  pred_full_reg_offset(s, FFR_PRED_NUM)

or something.


r~
Alex Bennée April 6, 2018, 1:03 p.m. UTC | #5
Richard Henderson <richard.henderson@linaro.org> writes:

> On 04/03/2018 07:26 PM, Alex Bennée wrote:

>> You don't use it yet but probably worth a:

>>

>> static inline int ffr_full_reg_offset(DisasContext *s)

>> {

>>     return pred_full_reg_offset(s, 16);

>> }

>>

>> here when you get to it to avoid the magic 16 appearing in the main code.

>

> Hum.  Most of the places that ffr is touched is in sve.decode.

> I could certainly enhance the grammar there to allow a symbol

> there instead of a number.

>

> But I don't think treating ffr differently from a regular pr

> register, as above, is a good idea.  At best I would use

>

>   pred_full_reg_offset(s, FFR_PRED_NUM)


That would a fine alternative ;-)

--
Alex Bennée
diff mbox series

Patch

diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 50cf2a1fdd..c0cccfda6f 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -46,6 +46,19 @@  typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
  * Implement all of the translator functions referenced by the decoder.
  */
 
+/* Return the offset info CPUARMState of the predicate vector register Pn.
+ * Note for this purpose, FFR is P16.  */
+static inline int pred_full_reg_offset(DisasContext *s, int regno)
+{
+    return offsetof(CPUARMState, vfp.pregs[regno]);
+}
+
+/* Return the byte size of the whole predicate register, VL / 64.  */
+static inline int pred_full_reg_size(DisasContext *s)
+{
+    return s->sve_len >> 3;
+}
+
 /* Invoke a vector expander on two Zregs.  */
 static void do_vector2_z(DisasContext *s, GVecGen2Fn *gvec_fn,
                          int esz, int rd, int rn)
@@ -97,3 +110,122 @@  static void trans_BIC_zzz(DisasContext *s, arg_BIC_zzz *a, uint32_t insn)
 {
     do_vector3_z(s, tcg_gen_gvec_andc, 0, a->rd, a->rn, a->rm);
 }
+
+/*
+ *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
+ */
+
+/* Subroutine loading a vector register at VOFS of LEN bytes.
+ * The load should begin at the address Rn + IMM.
+ */
+
+#if UINTPTR_MAX == UINT32_MAX
+# define ptr i32
+#else
+# define ptr i64
+#endif
+
+static void do_ldr(DisasContext *s, uint32_t vofs, uint32_t len,
+                   int rn, int imm)
+{
+    uint32_t len_align = QEMU_ALIGN_DOWN(len, 8);
+    uint32_t len_remain = len % 8;
+    uint32_t nparts = len / 8 + ctpop8(len_remain);
+    int midx = get_mem_index(s);
+    TCGv_i64 addr, t0, t1;
+
+    addr = tcg_temp_new_i64();
+    t0 = tcg_temp_new_i64();
+
+    /* Note that unpredicated load/store of vector/predicate registers
+     * are defined as a stream of bytes, which equates to little-endian
+     * operations on larger quantities.  There is no nice way to force
+     * a little-endian load for aarch64_be-linux-user out of line.
+     *
+     * Attempt to keep code expansion to a minimum by limiting the
+     * amount of unrolling done.
+     */
+    if (nparts <= 4) {
+        int i;
+
+        for (i = 0; i < len_align; i += 8) {
+            tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + i);
+            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);
+            tcg_gen_st_i64(t0, cpu_env, vofs + i);
+        }
+    } else {
+        TCGLabel *loop = gen_new_label();
+        TCGv_ptr i = TCGV_NAT_TO_PTR(glue(tcg_const_local_, ptr)(0));
+        TCGv_ptr dest;
+
+        gen_set_label(loop);
+
+        /* Minimize the number of local temps that must be re-read from
+         * the stack each iteration.  Instead, re-compute values other
+         * than the loop counter.
+         */
+        dest = tcg_temp_new_ptr();
+        tcg_gen_addi_ptr(dest, i, imm);
+#if UINTPTR_MAX == UINT32_MAX
+        tcg_gen_extu_i32_i64(addr, TCGV_PTR_TO_NAT(dest));
+        tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, rn));
+#else
+        tcg_gen_add_i64(addr, TCGV_PTR_TO_NAT(dest), cpu_reg_sp(s, rn));
+#endif
+
+        tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEQ);
+
+        tcg_gen_add_ptr(dest, cpu_env, i);
+        tcg_gen_addi_ptr(i, i, 8);
+        tcg_gen_st_i64(t0, dest, vofs);
+        tcg_temp_free_ptr(dest);
+
+        glue(tcg_gen_brcondi_, ptr)(TCG_COND_LTU, TCGV_PTR_TO_NAT(i),
+                                    len_align, loop);
+        tcg_temp_free_ptr(i);
+    }
+
+    /* Predicate register loads can be any multiple of 2.
+     * Note that we still store the entire 64-bit unit into cpu_env.
+     */
+    if (len_remain) {
+        tcg_gen_addi_i64(addr, cpu_reg_sp(s, rn), imm + len_align);
+
+        switch (len_remain) {
+        case 2:
+        case 4:
+        case 8:
+            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LE | ctz32(len_remain));
+            break;
+
+        case 6:
+            t1 = tcg_temp_new_i64();
+            tcg_gen_qemu_ld_i64(t0, addr, midx, MO_LEUL);
+            tcg_gen_addi_i64(addr, addr, 4);
+            tcg_gen_qemu_ld_i64(t1, addr, midx, MO_LEUW);
+            tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
+            tcg_temp_free_i64(t1);
+            break;
+
+        default:
+            g_assert_not_reached();
+        }
+        tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
+    }
+    tcg_temp_free_i64(addr);
+    tcg_temp_free_i64(t0);
+}
+
+#undef ptr
+
+static void trans_LDR_zri(DisasContext *s, arg_rri *a, uint32_t insn)
+{
+    int size = vec_full_reg_size(s);
+    do_ldr(s, vec_full_reg_offset(s, a->rd), size, a->rn, a->imm * size);
+}
+
+static void trans_LDR_pri(DisasContext *s, arg_rri *a, uint32_t insn)
+{
+    int size = pred_full_reg_size(s);
+    do_ldr(s, pred_full_reg_offset(s, a->rd), size, a->rn, a->imm * size);
+}
diff --git a/target/arm/sve.decode b/target/arm/sve.decode
index 2c13a6024a..0c6a7ba34d 100644
--- a/target/arm/sve.decode
+++ b/target/arm/sve.decode
@@ -19,11 +19,17 @@ 
 # This file is processed by scripts/decodetree.py
 #
 
+###########################################################################
+# Named fields.  These are primarily for disjoint fields.
+
+%imm9_16_10	16:s6 10:3
+
 ###########################################################################
 # Named attribute sets.  These are used to make nice(er) names
 # when creating helpers common to those for the individual
 # instruction patterns.
 
+&rri		rd rn imm
 &rrr_esz	rd rn rm esz
 
 ###########################################################################
@@ -31,7 +37,13 @@ 
 # reduce the amount of duplication between instruction patterns.
 
 # Three operand with unused vector element size
-@rd_rn_rm_e0	........ ... rm:5  ... ...  rn:5 rd:5		&rrr_esz esz=0
+@rd_rn_rm_e0	........ ... rm:5 ... ... rn:5 rd:5		&rrr_esz esz=0
+
+# Basic Load/Store with 9-bit immediate offset
+@pd_rn_i9	........ ........ ...... rn:5 . rd:4	\
+		&rri imm=%imm9_16_10
+@rd_rn_i9	........ ........ ...... rn:5 rd:5	\
+		&rri imm=%imm9_16_10
 
 ###########################################################################
 # Instruction patterns.  Grouped according to the SVE encodingindex.xhtml.
@@ -43,3 +55,11 @@  AND_zzz		00000100 00 1 ..... 001 100 ..... .....		@rd_rn_rm_e0
 ORR_zzz		00000100 01 1 ..... 001 100 ..... .....		@rd_rn_rm_e0
 EOR_zzz		00000100 10 1 ..... 001 100 ..... .....		@rd_rn_rm_e0
 BIC_zzz		00000100 11 1 ..... 001 100 ..... .....		@rd_rn_rm_e0
+
+### SVE Memory - 32-bit Gather and Unsized Contiguous Group
+
+# SVE load predicate register
+LDR_pri		10000101 10 ...... 000 ... ..... 0 ....		@pd_rn_i9
+
+# SVE load vector register
+LDR_zri		10000101 10 ...... 010 ... ..... .....		@rd_rn_i9