diff mbox series

[v7,25/42] target/arm: Implement helper_mte_check1

Message ID 20200603011317.473934-26-richard.henderson@linaro.org
State Superseded
Headers show
Series [v7,01/42] target/arm: Add isar tests for mte | expand

Commit Message

Richard Henderson June 3, 2020, 1:13 a.m. UTC
Fill out the stub that was added earlier.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 target/arm/internals.h  |  47 +++++++++++++++
 target/arm/mte_helper.c | 126 +++++++++++++++++++++++++++++++++++++++-
 2 files changed, 172 insertions(+), 1 deletion(-)

-- 
2.25.1

Comments

Peter Maydell June 18, 2020, 4:37 p.m. UTC | #1
On Wed, 3 Jun 2020 at 02:13, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> Fill out the stub that was added earlier.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/internals.h  |  47 +++++++++++++++

>  target/arm/mte_helper.c | 126 +++++++++++++++++++++++++++++++++++++++-

>  2 files changed, 172 insertions(+), 1 deletion(-)

>

> diff --git a/target/arm/internals.h b/target/arm/internals.h

> index fb92ef6b84..8ae80f3945 100644

> --- a/target/arm/internals.h

> +++ b/target/arm/internals.h

> @@ -1318,6 +1318,9 @@ FIELD(MTEDESC, WRITE, 8, 1)

>  FIELD(MTEDESC, ESIZE, 9, 5)

>  FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */

>

> +bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);

> +uint64_t mte_check1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);

> +

>  static inline int allocation_tag_from_addr(uint64_t ptr)

>  {

>      return extract64(ptr, 56, 4);

> @@ -1328,4 +1331,48 @@ static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)

>      return deposit64(ptr, 56, 4, rtag);

>  }

>

> +/* Return true if tbi bits mean that the access is checked.  */

> +static inline bool tbi_check(uint32_t desc, int bit55)

> +{

> +    return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;

> +}

> +

> +/* Return true if tcma bits mean that the access is unchecked.  */

> +static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)


It's a bit confusing that one of these foo_check()s returns
"true for checked access" and the other one returns "true
for unchecked access"...

thanks
-- PMM
Richard Henderson June 18, 2020, 5:32 p.m. UTC | #2
On 6/18/20 9:37 AM, Peter Maydell wrote:
>> +/* Return true if tbi bits mean that the access is checked.  */

>> +static inline bool tbi_check(uint32_t desc, int bit55)

>> +{

>> +    return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;

>> +}

>> +

>> +/* Return true if tcma bits mean that the access is unchecked.  */

>> +static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)

> 

> It's a bit confusing that one of these foo_check()s returns

> "true for checked access" and the other one returns "true

> for unchecked access"...


Yes, but that's true of the hardware bits too.  I tried to reverse them but
then got confused.


r~
Peter Maydell June 19, 2020, 1:44 p.m. UTC | #3
On Wed, 3 Jun 2020 at 02:13, Richard Henderson
<richard.henderson@linaro.org> wrote:
>

> Fill out the stub that was added earlier.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  target/arm/internals.h  |  47 +++++++++++++++

>  target/arm/mte_helper.c | 126 +++++++++++++++++++++++++++++++++++++++-

>  2 files changed, 172 insertions(+), 1 deletion(-)



> +/*

> + * For TBI, ideally, we would do nothing.  Proper behaviour on fault is

> + * for the tag to be present in the FAR_ELx register.  But for user-only

> + * mode, we do not have a TLB with which to implement this, so we must

> + * remote the top byte.


"remove"

> + */

> +static inline uint64_t useronly_clean_ptr(uint64_t ptr)

> +{

> +    /* TBI is known to be enabled. */

> +#ifdef CONFIG_USER_ONLY

> +    ptr = sextract64(ptr, 0, 56);

> +#endif

> +    return ptr;

> +}

> +


> +/* No-fault version of mte_check1, to be used by SVE for MemSingleNF. */

> +bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)


If this is a no-fault version, why do we need to pass in the ra ?

> +{

> +    int bit55 = extract64(ptr, 55, 1);

> +

> +    /* If TBI is disabled, the access is unchecked. */

> +    if (unlikely(!tbi_check(desc, bit55))) {

> +        return true;

> +    }

> +

> +    return mte_probe1_int(env, desc, ptr, ra, bit55);

> +}


thanks
-- PMM
Richard Henderson June 19, 2020, 5:07 p.m. UTC | #4
On 6/19/20 6:44 AM, Peter Maydell wrote:
>> +/* No-fault version of mte_check1, to be used by SVE for MemSingleNF. */

>> +bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)

> 

> If this is a no-fault version, why do we need to pass in the ra ?


Excellent question.

At first blush this doesn't actually implement no-fault at all, since it looks
as if probe_access_flags would in fact fault.  Except that within sve, we've
already probed the page and this is just to handle the mte check.

I think I'll remove the argument, pass 0 down to allocation_tag_mem, assert
that the page is valid, and document the process.


r~
diff mbox series

Patch

diff --git a/target/arm/internals.h b/target/arm/internals.h
index fb92ef6b84..8ae80f3945 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1318,6 +1318,9 @@  FIELD(MTEDESC, WRITE, 8, 1)
 FIELD(MTEDESC, ESIZE, 9, 5)
 FIELD(MTEDESC, TSIZE, 14, 10)  /* mte_checkN only */
 
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
+uint64_t mte_check1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
+
 static inline int allocation_tag_from_addr(uint64_t ptr)
 {
     return extract64(ptr, 56, 4);
@@ -1328,4 +1331,48 @@  static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
     return deposit64(ptr, 56, 4, rtag);
 }
 
+/* Return true if tbi bits mean that the access is checked.  */
+static inline bool tbi_check(uint32_t desc, int bit55)
+{
+    return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
+}
+
+/* Return true if tcma bits mean that the access is unchecked.  */
+static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
+{
+    /*
+     * We had extracted bit55 and ptr_tag for other reasons, so fold
+     * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
+     */
+    bool match = ((ptr_tag + bit55) & 0xf) == 0;
+    bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
+    return tcma && match;
+}
+
+/*
+ * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
+ * for the tag to be present in the FAR_ELx register.  But for user-only
+ * mode, we do not have a TLB with which to implement this, so we must
+ * remote the top byte.
+ */
+static inline uint64_t useronly_clean_ptr(uint64_t ptr)
+{
+    /* TBI is known to be enabled. */
+#ifdef CONFIG_USER_ONLY
+    ptr = sextract64(ptr, 0, 56);
+#endif
+    return ptr;
+}
+
+static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
+{
+#ifdef CONFIG_USER_ONLY
+    int64_t clean_ptr = sextract64(ptr, 0, 56);
+    if (tbi_check(desc, clean_ptr < 0)) {
+        ptr = clean_ptr;
+    }
+#endif
+    return ptr;
+}
+
 #endif
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 907a12b366..72ff5543cf 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -359,12 +359,136 @@  void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
     }
 }
 
+/* Record a tag check failure.  */
+static void mte_check_fail(CPUARMState *env, int mmu_idx,
+                           uint64_t dirty_ptr, uintptr_t ra)
+{
+    ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
+    int el, reg_el, tcf, select;
+    uint64_t sctlr;
+
+    reg_el = regime_el(env, arm_mmu_idx);
+    sctlr = env->cp15.sctlr_el[reg_el];
+
+    switch (arm_mmu_idx) {
+    case ARMMMUIdx_E10_0:
+    case ARMMMUIdx_E20_0:
+        el = 0;
+        tcf = extract64(sctlr, 38, 2);
+        break;
+    default:
+        el = reg_el;
+        tcf = extract64(sctlr, 40, 2);
+    }
+
+    switch (tcf) {
+    case 1:
+        /*
+         * Tag check fail causes a synchronous exception.
+         *
+         * In restore_state_to_opc, we set the exception syndrome
+         * for the load or store operation.  Unwind first so we
+         * may overwrite that with the syndrome for the tag check.
+         */
+        cpu_restore_state(env_cpu(env), ra, true);
+        env->exception.vaddress = dirty_ptr;
+        raise_exception(env, EXCP_DATA_ABORT,
+                        syn_data_abort_no_iss(el != 0, 0, 0, 0, 0, 0, 0x11),
+                        exception_target_el(env));
+        /* noreturn, but fall through to the assert anyway */
+
+    case 0:
+        /*
+         * Tag check fail does not affect the PE.
+         * We eliminate this case by not setting MTE_ACTIVE
+         * in tb_flags, so that we never make this runtime call.
+         */
+        g_assert_not_reached();
+
+    case 2:
+        /* Tag check fail causes asynchronous flag set.  */
+        mmu_idx = arm_mmu_idx_el(env, el);
+        if (regime_has_2_ranges(mmu_idx)) {
+            select = extract64(dirty_ptr, 55, 1);
+        } else {
+            select = 0;
+        }
+        env->cp15.tfsr_el[el] |= 1 << select;
+        break;
+
+    default:
+        /* Case 3: Reserved. */
+        qemu_log_mask(LOG_GUEST_ERROR,
+                      "Tag check failure with SCTLR_EL%d.TCF%s "
+                      "set to reserved value %d\n",
+                      reg_el, el ? "" : "0", tcf);
+        break;
+    }
+}
+
 /*
  * Perform an MTE checked access for a single logical or atomic access.
  */
+static bool mte_probe1_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
+                           uintptr_t ra, int bit55)
+{
+    int mem_tag, mmu_idx, ptr_tag, size;
+    MMUAccessType type;
+    uint8_t *mem;
+
+    ptr_tag = allocation_tag_from_addr(ptr);
+
+    if (tcma_check(desc, bit55, ptr_tag)) {
+        return true;
+    }
+
+    mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+    type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
+    size = FIELD_EX32(desc, MTEDESC, ESIZE);
+
+    mem = allocation_tag_mem(env, mmu_idx, ptr, type, size,
+                             MMU_DATA_LOAD, 1, ra);
+    if (!mem) {
+        return true;
+    }
+
+    mem_tag = load_tag1(ptr, mem);
+    return ptr_tag == mem_tag;
+}
+
+/* No-fault version of mte_check1, to be used by SVE for MemSingleNF. */
+bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
+{
+    int bit55 = extract64(ptr, 55, 1);
+
+    /* If TBI is disabled, the access is unchecked. */
+    if (unlikely(!tbi_check(desc, bit55))) {
+        return true;
+    }
+
+    return mte_probe1_int(env, desc, ptr, ra, bit55);
+}
+
+uint64_t mte_check1(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
+{
+    int bit55 = extract64(ptr, 55, 1);
+
+    /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
+    if (unlikely(!tbi_check(desc, bit55))) {
+        return ptr;
+    }
+
+    if (unlikely(!mte_probe1_int(env, desc, ptr, ra, bit55))) {
+        int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+        mte_check_fail(env, mmu_idx, ptr, ra);
+    }
+
+    return useronly_clean_ptr(ptr);
+}
+
 uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr)
 {
-    return ptr;
+    return mte_check1(env, desc, ptr, GETPC());
 }
 
 /*