@@ -128,6 +128,12 @@ static inline int vec_full_reg_size(DisasContext *s)
return s->vl;
}
+/* Return the byte size of the vector register, SVL / 8. */
+static inline int streaming_vec_reg_size(DisasContext *s)
+{
+ return s->svl;
+}
+
/*
* Return the offset info CPUARMState of the predicate vector register Pn.
* Note for this purpose, FFR is P16.
@@ -143,6 +149,12 @@ static inline int pred_full_reg_size(DisasContext *s)
return s->vl >> 3;
}
+/* Return the byte size of the predicate register, SVL / 64. */
+static inline int streaming_pred_reg_size(DisasContext *s)
+{
+ return s->svl >> 3;
+}
+
/*
* Round up the size of a register to a size allowed by
* the tcg vector infrastructure. Any operation which uses this
@@ -449,14 +449,17 @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5
# SVE index generation (register start, register increment)
INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm
-### SVE Stack Allocation Group
+### SVE / Streaming SVE Stack Allocation Group
# SVE stack frame adjustment
ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6
+ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6
ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6
+ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6
# SVE stack frame size
RDVL 00000100 101 11111 01010 imm:s6 rd:5
+RDSVL 00000100 101 11111 01011 imm:s6 rd:5
### SVE Bitwise Shift - Unpredicated Group
@@ -1286,6 +1286,19 @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
return true;
}
+static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
+ }
+ return true;
+}
+
static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
{
if (!dc_isar_feature(aa64_sve, s)) {
@@ -1299,6 +1312,19 @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
return true;
}
+static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 rd = cpu_reg_sp(s, a->rd);
+ TCGv_i64 rn = cpu_reg_sp(s, a->rn);
+ tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
+ }
+ return true;
+}
+
static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
{
if (!dc_isar_feature(aa64_sve, s)) {
@@ -1311,6 +1337,18 @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
return true;
}
+static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
+{
+ if (!dc_isar_feature(aa64_sme, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s)) {
+ TCGv_i64 reg = cpu_reg(s, a->rd);
+ tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
+ }
+ return true;
+}
+
/*
*** SVE Compute Vector Address Group
*/