diff mbox series

[PULL,05/24] target/arm: Use probe_access_full for BTI

Message ID 20221020122146.3177980-6-peter.maydell@linaro.org
State Accepted
Commit 937f2245596de9026ca8ae017ef47889523c4326
Headers show
Series [PULL,01/24] hw/char/pl011: fix baud rate calculation | expand

Commit Message

Peter Maydell Oct. 20, 2022, 12:21 p.m. UTC
From: Richard Henderson <richard.henderson@linaro.org>

Add a field to TARGET_PAGE_ENTRY_EXTRA to hold the guarded bit.
In is_guarded_page, use probe_access_full instead of just guessing
that the tlb entry is still present.  Also handles the FIXME about
executing from device memory.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20221011031911.2408754-4-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
 target/arm/cpu-param.h     |  9 +++++----
 target/arm/cpu.h           | 13 -------------
 target/arm/internals.h     |  1 +
 target/arm/ptw.c           |  7 ++++---
 target/arm/translate-a64.c | 21 ++++++++++-----------
 5 files changed, 20 insertions(+), 31 deletions(-)

Comments

Peter Maydell April 6, 2023, 12:25 p.m. UTC | #1
On Thu, 20 Oct 2022 at 13:21, Peter Maydell <peter.maydell@linaro.org> wrote:
>
> From: Richard Henderson <richard.henderson@linaro.org>
>
> Add a field to TARGET_PAGE_ENTRY_EXTRA to hold the guarded bit.
> In is_guarded_page, use probe_access_full instead of just guessing
> that the tlb entry is still present.  Also handles the FIXME about
> executing from device memory.

Hi, Richard -- Coverity spotted a problem (CID 1507929) with
this addition of 'guarded' to the ARMCacheAttrs struct, and
then looking at the code I noticed another one...

> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index 9566364dcae..c3c3920ded2 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -1095,6 +1095,7 @@ typedef struct ARMCacheAttrs {
>      unsigned int attrs:8;
>      unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
>      bool is_s2_format:1;
> +    bool guarded:1;              /* guarded bit of the v8-64 PTE */
>  } ARMCacheAttrs;

The one Coverity spots is that combine_cacheattrs() was never
updated to do anything with 'guarded' so the struct value it
returns now has an uninitialized value for that field.
Since the GP bit only exists at stage 1 presumably this should be
   ret.guarded = s1.guarded;
? (If I'm not misreading the code this is an actual bug
because we'll use the field for the case where s1 and s2
are enabled.)

The issue I noticed is that in ptw.c when we set the
'guarded' field we do it like this:

    if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
        result->f.guarded = extract64(attrs, 50, 1); /* GP */
    }

The GP bit only exists in stage 1 descriptors but we don't check
that here, so we will set the 'guarded' bit in the result if
the guest (incorrectly) sets the RES0 bit 50 in a stage 2 descriptor.
We should move this check into the else clause of the immediately
following "if (regime_is_stage2(mmu_idx)) ..." I think.

(It looks like this one pre-dates this patch to shift to
using f.guarded.)

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 38347b0d208..f4338fd10e4 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -36,12 +36,13 @@ 
  *
  * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
  * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
- * For shareability, as in the SH field of the VMSAv8-64 PTEs.
+ * For shareability and guarded, as in the SH and GP fields respectively
+ * of the VMSAv8-64 PTEs.
  */
 # define TARGET_PAGE_ENTRY_EXTRA  \
-     uint8_t pte_attrs;           \
-     uint8_t shareability;
-
+    uint8_t pte_attrs;            \
+    uint8_t shareability;         \
+    bool guarded;
 #endif
 
 #define NB_MMU_MODES 8
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 9a358c410be..9df7adbe81f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3388,19 +3388,6 @@  static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
 /* Shared between translate-sve.c and sve_helper.c.  */
 extern const uint64_t pred_esz_masks[5];
 
-/* Helper for the macros below, validating the argument type. */
-static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
-{
-    return x;
-}
-
-/*
- * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
- * Using these should be a bit more self-documenting than using the
- * generic target bits directly.
- */
-#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
-
 /*
  * AArch64 usage of the PAGE_TARGET_* bits for linux-user.
  * Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 9566364dcae..c3c3920ded2 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1095,6 +1095,7 @@  typedef struct ARMCacheAttrs {
     unsigned int attrs:8;
     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
     bool is_s2_format:1;
+    bool guarded:1;              /* guarded bit of the v8-64 PTE */
 } ARMCacheAttrs;
 
 /* Fields that are valid upon success. */
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 23f16f4ff7f..2d182d62e5a 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -1313,9 +1313,10 @@  static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
          */
         result->f.attrs.secure = false;
     }
-    /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
-    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
-        arm_tlb_bti_gp(&result->f.attrs) = true;
+
+    /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB.  */
+    if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+        result->f.guarded = guarded;
     }
 
     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 5b67375f4ec..60ff753d817 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14601,22 +14601,21 @@  static bool is_guarded_page(CPUARMState *env, DisasContext *s)
 #ifdef CONFIG_USER_ONLY
     return page_get_flags(addr) & PAGE_BTI;
 #else
+    CPUTLBEntryFull *full;
+    void *host;
     int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
-    unsigned int index = tlb_index(env, mmu_idx, addr);
-    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+    int flags;
 
     /*
      * We test this immediately after reading an insn, which means
-     * that any normal page must be in the TLB.  The only exception
-     * would be for executing from flash or device memory, which
-     * does not retain the TLB entry.
-     *
-     * FIXME: Assume false for those, for now.  We could use
-     * arm_cpu_get_phys_page_attrs_debug to re-read the page
-     * table entry even for that case.
+     * that the TLB entry must be present and valid, and thus this
+     * access will never raise an exception.
      */
-    return (tlb_hit(entry->addr_code, addr) &&
-            arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
+    flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
+                              false, &host, &full, 0);
+    assert(!(flags & TLB_INVALID_MASK));
+
+    return full->guarded;
 #endif
 }