diff mbox series

[51/55] target/arm: Implement MVE VADC, VSBC

Message ID 20210607165821.9892-52-peter.maydell@linaro.org
State Superseded
Headers show
Series target/arm: First slice of MVE implementation | expand

Commit Message

Peter Maydell June 7, 2021, 4:58 p.m. UTC
Implement the MVE VADC and VSBC insns.  These perform an
add-with-carry or subtract-with-carry of the 32-bit elements in each
lane of the input vectors, where the carry-out of each add is the
carry-in of the next.  The initial carry input is either 1 or is from
FPSCR.C; the carry out at the end is written back to FPSCR.C.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

---
 target/arm/helper-mve.h    |  3 ++
 target/arm/mve.decode      |  6 ++++
 target/arm/mve_helper.c    | 30 +++++++++++++++++
 target/arm/translate-mve.c | 69 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 108 insertions(+)

-- 
2.20.1

Comments

Richard Henderson June 9, 2021, 9:06 p.m. UTC | #1
On 6/7/21 9:58 AM, Peter Maydell wrote:
> +#define DO_VADC(OP, INV)                                                \

> +    uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vd,         \

> +                                    void *vn, void *vm, uint32_t nzcv)  \

> +    {                                                                   \

> +        uint32_t *d = vd, *n = vn, *m = vm;                             \

> +        uint16_t mask = mve_element_mask(env);                          \

> +        unsigned e;                                                     \

> +        int carry = (nzcv & FPCR_C) ? 1 : 0;                            \

> +        /* If we do no additions at all the flags are preserved */      \

> +        bool updates_flags = (mask & 0x1111) != 0;                      \

> +        for (e = 0; e < 16 / 4; e++, mask >>= 4) {                      \

> +            uint64_t r = (uint64_t)n[H4(e)] + INV(m[H4(e)]) + carry;    \

> +            if (mask & 1) {                                             \

> +                carry = r >> 32;                                        \

> +            }                                                           \

> +            uint64_t bytemask = mask_to_bytemask4(mask);                \

> +            d[H4(e)] &= ~bytemask;                                      \

> +            d[H4(e)] |= (r & bytemask);                                 \

> +        }                                                               \

> +        mve_advance_vpt(env);                                           \

> +        if (updates_flags) {                                            \

> +            nzcv = carry ? FPCR_C : 0;                                  \

> +        }                                                               \

> +        return nzcv;                                                    \

> +    }

...
> +    /*

> +     * This insn is subject to beat-wise execution.  Partial execution

> +     * of an I=1 (initial carry input fixed) insn which does not

> +     * execute the first beat must start with the current FPSCR.NZCV

> +     * value, not the fixed constant input.

> +     */

> +    if (a->i && !mve_skip_first_beat(s)) {

> +        /* Carry input is 0 (VADCI) or 1 (VSBCI), NZV zeroed */

> +        nzcv = tcg_const_i32(fixed_carry);

> +    } else {

> +        /* Carry input from existing NZCV flag values */

> +        nzcv = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);

> +        tcg_gen_andi_i32(nzcv, nzcv, FPCR_NZCV_MASK);

> +    }

> +    qd = mve_qreg_ptr(a->qd);

> +    qn = mve_qreg_ptr(a->qn);

> +    qm = mve_qreg_ptr(a->qm);

> +    fn(nzcv, cpu_env, qd, qn, qm, nzcv);

> +    fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);

> +    tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);

> +    tcg_gen_or_i32(fpscr, fpscr, nzcv);

> +    store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);


Hmm.  It seems like you're having to work extra hard in tcg to extract and 
store nzcv.

How about four helper functions instead of 2.  E.g.

static void do_vadc(CPUARMState *env, uint32_t *d,
                     uint32_t *n, uint32_t *m,
                     uint32_t inv, uint32_t carry_in,
                     bool update_flags)
{
     uint16_t mask = mve_element_mask(env);
     unsigned e;

     /* If any additions trigger, we will update flags. */
     if (mask & 0x1111) {
         update_flags = true;
     }

     for (e = 0; e < 16 / 4; e++, mask >>= 4) {
         uint32_t bmask = mask_to_bytemask4(mask);
         uint64_t r = carry_in;
         r += n[H4(e)];
         r += m[H4(e)] ^ inv;
         if (mask & 1) {
             carry_in = r >> 32;
         }
         d[H4(e)] = (d[H4(e)] & ~bmask) | ((uint32_t)r & bmask);
     }

     if (update_flags) {
         /* Store C, clear NZV. */
         env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
         env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
     }
     mve_advance_vpt(env);                                           }

void HELPER(mve_vadc)(CPUARMState *env, void *vd,
                       void *vn, void *vm)
{
     bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
     do_vadc(env, vd, vn, vm, 0, carry_in, false);
}

void HELPER(mve_vsbc)(CPUARMState *env, void *vd,
                       void *vn, void *vm)
{
     bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
     do_vadc(env, vd, vn, vm, -1, carry_in, false);
}

void HELPER(mve_vadci)(CPUARMState *env, void *vd,
                        void *vn, void *vm)
{
     do_vadc(env, vd, vn, vm, 0, 0, true);
}

void HELPER(mve_vsbci)(CPUARMState *env, void *vd,
                       void *vn, void *vm)
{
     do_vadc(env, vd, vn, vm, -1, 1, true);
}


r~
diff mbox series

Patch

diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index cd2cc6252f8..686e5d9a39b 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -248,6 +248,9 @@  DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
 DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
 DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
 
+DEF_HELPER_FLAGS_5(mve_vadc, TCG_CALL_NO_WG, i32, env, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(mve_vsbc, TCG_CALL_NO_WG, i32, env, ptr, ptr, ptr, i32)
+
 DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 6b969902df0..6a4aae7a1fc 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -30,6 +30,7 @@ 
 &1op qd qm size
 &2op qd qm qn size
 &2scalar qd qn rm size
+&vadc qd qm qn i
 
 @vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
 # Note that both Rn and Qd are 3 bits only (no D bit)
@@ -42,6 +43,8 @@ 
 @2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
      size=%size_28
 
+@vadc .... .... .... .... ... i:1 .... .... .... &vadc qd=%qd qm=%qm qn=%qn
+
 # The _rev suffix indicates that Vn and Vm are reversed. This is
 # the case for shifts. In the Arm ARM these insns are documented
 # with the Vm and Vn fields in their usual places, but in the
@@ -160,6 +163,9 @@  VQDMULLT         111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28
 VRHADD_S         111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
 VRHADD_U         111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
 
+VADC             1110 1110 0 . 11 ... 0 ... . 1111 . 0 . 0 ... 0 @vadc
+VSBC             1111 1110 0 . 11 ... 0 ... . 1111 . 0 . 0 ... 0 @vadc
+
 # Vector miscellaneous
 
 VCLS             1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index c9434479604..e07f12c8389 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -580,6 +580,36 @@  DO_2OP_U(vrshlu, DO_VRSHLU)
 DO_2OP_S(vrhadds, DO_RHADD_S)
 DO_2OP_U(vrhaddu, DO_RHADD_U)
 
+#define DO_VADC(OP, INV)                                                \
+    uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vd,         \
+                                    void *vn, void *vm, uint32_t nzcv)  \
+    {                                                                   \
+        uint32_t *d = vd, *n = vn, *m = vm;                             \
+        uint16_t mask = mve_element_mask(env);                          \
+        unsigned e;                                                     \
+        int carry = (nzcv & FPCR_C) ? 1 : 0;                            \
+        /* If we do no additions at all the flags are preserved */      \
+        bool updates_flags = (mask & 0x1111) != 0;                      \
+        for (e = 0; e < 16 / 4; e++, mask >>= 4) {                      \
+            uint64_t r = (uint64_t)n[H4(e)] + INV(m[H4(e)]) + carry;    \
+            if (mask & 1) {                                             \
+                carry = r >> 32;                                        \
+            }                                                           \
+            uint64_t bytemask = mask_to_bytemask4(mask);                \
+            d[H4(e)] &= ~bytemask;                                      \
+            d[H4(e)] |= (r & bytemask);                                 \
+        }                                                               \
+        mve_advance_vpt(env);                                           \
+        if (updates_flags) {                                            \
+            nzcv = carry ? FPCR_C : 0;                                  \
+        }                                                               \
+        return nzcv;                                                    \
+    }
+
+/* VSBC differs only in inverting op2 before the additiona */
+DO_VADC(vadc, )
+DO_VADC(vsbc, DO_NOT)
+
 static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
 {
     if (val > max) {
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 9a88583385f..2ed499a6de2 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -33,6 +33,7 @@  typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
 typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
 typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
 typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64);
+typedef void MVEGenADCFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
 
 /* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
 static inline long mve_qreg_offset(unsigned reg)
@@ -737,3 +738,71 @@  static bool trans_VPST(DisasContext *s, arg_VPST *a)
     }
     return true;
 }
+
+static bool do_vadc(DisasContext *s, arg_vadc *a, MVEGenADCFn fn,
+                    uint32_t fixed_carry)
+{
+    /*
+     * VADC and VSBC: these perform an add-with-carry or subtract-with-carry
+     * of the 32-bit elements in each lane of the input vectors, where the
+     * carry-out of each add is the carry-in of the next.  The initial carry
+     * input is either fixed (for the I variant: 0 for VADCI, 1 for VSBCI,
+     * passed in as fixed_carry) or is from FPSCR.C; the carry out at the
+     * end is written back to FPSCR.C.
+     */
+
+    TCGv_ptr qd, qn, qm;
+    TCGv_i32 nzcv, fpscr;
+
+    if (!dc_isar_feature(aa32_mve, s)) {
+        return false;
+    }
+    if (a->qd > 7 || a->qn > 7 || a->qm > 7 || !fn) {
+        return false;
+    }
+    if (!mve_eci_check(s)) {
+        return true;
+    }
+    if (!vfp_access_check(s)) {
+        return true;
+    }
+
+    /*
+     * This insn is subject to beat-wise execution.  Partial execution
+     * of an I=1 (initial carry input fixed) insn which does not
+     * execute the first beat must start with the current FPSCR.NZCV
+     * value, not the fixed constant input.
+     */
+    if (a->i && !mve_skip_first_beat(s)) {
+        /* Carry input is 0 (VADCI) or 1 (VSBCI), NZV zeroed */
+        nzcv = tcg_const_i32(fixed_carry);
+    } else {
+        /* Carry input from existing NZCV flag values */
+        nzcv = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+        tcg_gen_andi_i32(nzcv, nzcv, FPCR_NZCV_MASK);
+    }
+    qd = mve_qreg_ptr(a->qd);
+    qn = mve_qreg_ptr(a->qn);
+    qm = mve_qreg_ptr(a->qm);
+    fn(nzcv, cpu_env, qd, qn, qm, nzcv);
+    fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+    tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
+    tcg_gen_or_i32(fpscr, fpscr, nzcv);
+    store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
+    tcg_temp_free_i32(nzcv);
+    tcg_temp_free_ptr(qd);
+    tcg_temp_free_ptr(qn);
+    tcg_temp_free_ptr(qm);
+    mve_update_eci(s);
+    return true;
+}
+
+static bool trans_VADC(DisasContext *s, arg_vadc *a)
+{
+    return do_vadc(s, a, gen_helper_mve_vadc, 0);
+}
+
+static bool trans_VSBC(DisasContext *s, arg_vadc *a)
+{
+    return do_vadc(s, a, gen_helper_mve_vsbc, FPCR_C);
+}