diff mbox series

[03/13] accel/tcg: Store some tlb flags in CPUTLBEntryFull

Message ID 20230223204342.1093632-4-richard.henderson@linaro.org
State Superseded
Headers show
Series {tcg,aarch64}: Add TLB_CHECK_ALIGNED | expand

Commit Message

Richard Henderson Feb. 23, 2023, 8:43 p.m. UTC
We have run out of bits we can use within the CPUTLBEntry comparators,
as TLB_FLAGS_MASK cannot overlap alignment.

Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from
the comparator.  A new TLB_FORCE_SLOW bit is set within the comparator
as an indication that the slow path must be used.

Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK.  Since we are out of bits,
we cannot create a new bit without moving an old one.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-all.h  | 21 ++++++++--
 include/exec/cpu-defs.h |  6 +++
 accel/tcg/cputlb.c      | 93 ++++++++++++++++++++++++-----------------
 3 files changed, 77 insertions(+), 43 deletions(-)

Comments

Peter Maydell March 3, 2023, 4:45 p.m. UTC | #1
On Thu, 23 Feb 2023 at 20:46, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> We have run out of bits we can use within the CPUTLBEntry comparators,
> as TLB_FLAGS_MASK cannot overlap alignment.
>
> Store slow_flags[] in CPUTLBEntryFull, and merge with the flags from
> the comparator.  A new TLB_FORCE_SLOW bit is set within the comparator
> as an indication that the slow path must be used.
>
> Move TLB_BSWAP to TLB_SLOW_FLAGS_MASK.  Since we are out of bits,
> we cannot create a new bit without moving an old one.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---


> @@ -1249,36 +1265,27 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
>       * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
>       */
>      desc->fulltlb[index] = *full;
> -    desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
> -    desc->fulltlb[index].phys_addr = paddr_page;
> +    full = &desc->fulltlb[index];
> +    full->xlat_section = iotlb - vaddr_page;
> +    full->phys_addr = paddr_page;
>
>      /* Now calculate the new entry */
>      tn.addend = addend - vaddr_page;
> -    if (prot & PAGE_READ) {
> -        tn.addr_read = address;
> -        if (wp_flags & BP_MEM_READ) {
> -            tn.addr_read |= TLB_WATCHPOINT;
> -        }
> -    } else {
> -        tn.addr_read = -1;
> -    }
>
> -    if (prot & PAGE_EXEC) {
> -        tn.addr_code = address;
> -    } else {
> -        tn.addr_code = -1;
> -    }
> +    tlb_set_compare(full, &tn, vaddr_page, read_flags,
> +                    MMU_INST_FETCH, prot & PAGE_EXEC);
>
> -    tn.addr_write = -1;
> -    if (prot & PAGE_WRITE) {
> -        tn.addr_write = write_address;
> -        if (prot & PAGE_WRITE_INV) {
> -            tn.addr_write |= TLB_INVALID_MASK;
> -        }
> -        if (wp_flags & BP_MEM_WRITE) {
> -            tn.addr_write |= TLB_WATCHPOINT;
> -        }
> +    if (wp_flags & BP_MEM_READ) {
> +        read_flags |= TLB_WATCHPOINT;
>      }
> +    tlb_set_compare(full, &tn, vaddr_page, read_flags,
> +                    MMU_DATA_LOAD, prot & PAGE_READ);
> +
> +    if (wp_flags & BP_MEM_WRITE) {
> +        write_flags |= TLB_WATCHPOINT;
> +    }
> +    tlb_set_compare(full, &tn, vaddr_page, write_flags, MMU_DATA_STORE,
> +                    (prot & PAGE_WRITE) && !(prot & PAGE_WRITE_INV));

So in the old code, if PAGE_WRITE_INV then we set up the
addr_write field as normal, it just also has the TLB_INVALID_MASK bit
set. In the new code we won't do that, we'll set addr_write to -1.
I'm not fully familiar with the cputlb.c code, but doesn't this
break the code in probe_access_internal(), which assumes that
it can call tlb_fill (which will come through here) and then
fish out the TLB entry, clear out the TLB_INVALID_MASK bit and
use the TLB entry as a one-off ?

thanks
-- PMM
Richard Henderson March 5, 2023, 6:20 p.m. UTC | #2
On 3/3/23 08:45, Peter Maydell wrote:
>> +
>> +    if (wp_flags & BP_MEM_WRITE) {
>> +        write_flags |= TLB_WATCHPOINT;
>> +    }
>> +    tlb_set_compare(full, &tn, vaddr_page, write_flags, MMU_DATA_STORE,
>> +                    (prot & PAGE_WRITE) && !(prot & PAGE_WRITE_INV));
> 
> So in the old code, if PAGE_WRITE_INV then we set up the
> addr_write field as normal, it just also has the TLB_INVALID_MASK bit
> set. In the new code we won't do that, we'll set addr_write to -1.

Gah.  I must have had some sort of rebase fumble, because I know I fixed this, and the 
WRITE_INV test should be above, not in the predicate.


r~
diff mbox series

Patch

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 2eb1176538..080cb3112e 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -380,17 +380,30 @@  CPUArchState *cpu_copy(CPUArchState *env);
 #define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
 /* Set if TLB entry contains a watchpoint.  */
 #define TLB_WATCHPOINT      (1 << (TARGET_PAGE_BITS_MIN - 4))
-/* Set if TLB entry requires byte swap.  */
-#define TLB_BSWAP           (1 << (TARGET_PAGE_BITS_MIN - 5))
+/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
+#define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
 /* Set if TLB entry writes ignored.  */
 #define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 6))
 
-/* Use this mask to check interception with an alignment mask
+/*
+ * Use this mask to check interception with an alignment mask
  * in a TCG backend.
  */
 #define TLB_FLAGS_MASK \
     (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
-    | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
+    | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+
+/*
+ * Flags stored in CPUTLBEntryFull.slow_flags[x].
+ * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
+ */
+/* Set if TLB entry requires byte swap.  */
+#define TLB_BSWAP            (1 << 0)
+
+#define TLB_SLOW_FLAGS_MASK  TLB_BSWAP
+
+/* The two sets of flags must not overlap. */
+QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
 
 /**
  * tlb_hit_page: return true if page aligned @addr is a hit against the
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 7ce3bcb06b..ef10c625d4 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -170,6 +170,12 @@  typedef struct CPUTLBEntryFull {
     /* @lg_page_size contains the log2 of the page size. */
     uint8_t lg_page_size;
 
+    /*
+     * Additional tlb flags for use by the slow path. If non-zero,
+     * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+     */
+    uint8_t slow_flags[3];
+
     /*
      * Allow target-specific additions to this structure.
      * This may be used to cache items from the guest cpu
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 169adc0262..e9848b3ab6 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1106,6 +1106,24 @@  static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
     env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
 }
 
+static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
+                                   target_ulong address, int flags,
+                                   MMUAccessType access_type, bool enable)
+{
+    if (enable) {
+        address |= flags & TLB_FLAGS_MASK;
+        flags &= TLB_SLOW_FLAGS_MASK;
+        if (flags) {
+            address |= TLB_FORCE_SLOW;
+        }
+    } else {
+        address = -1;
+        flags = 0;
+    }
+    ent->addr_idx[access_type] = address;
+    full->slow_flags[access_type] = flags;
+}
+
 /*
  * Add a new TLB entry. At most one entry for a given virtual address
  * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
@@ -1121,9 +1139,7 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
     CPUTLB *tlb = env_tlb(env);
     CPUTLBDesc *desc = &tlb->d[mmu_idx];
     MemoryRegionSection *section;
-    unsigned int index;
-    target_ulong address;
-    target_ulong write_address;
+    unsigned int index, read_flags, write_flags;
     uintptr_t addend;
     CPUTLBEntry *te, tn;
     hwaddr iotlb, xlat, sz, paddr_page;
@@ -1152,13 +1168,13 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
               " prot=%x idx=%d\n",
               vaddr, full->phys_addr, prot, mmu_idx);
 
-    address = vaddr_page;
+    read_flags = 0;
     if (full->lg_page_size < TARGET_PAGE_BITS) {
         /* Repeat the MMU check and TLB fill on every access.  */
-        address |= TLB_INVALID_MASK;
+        read_flags |= TLB_INVALID_MASK;
     }
     if (full->attrs.byte_swap) {
-        address |= TLB_BSWAP;
+        read_flags |= TLB_BSWAP;
     }
 
     is_ram = memory_region_is_ram(section->mr);
@@ -1172,7 +1188,7 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
         addend = 0;
     }
 
-    write_address = address;
+    write_flags = read_flags;
     if (is_ram) {
         iotlb = memory_region_get_ram_addr(section->mr) + xlat;
         /*
@@ -1181,9 +1197,9 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
          */
         if (prot & PAGE_WRITE) {
             if (section->readonly) {
-                write_address |= TLB_DISCARD_WRITE;
+                write_flags |= TLB_DISCARD_WRITE;
             } else if (cpu_physical_memory_is_clean(iotlb)) {
-                write_address |= TLB_NOTDIRTY;
+                write_flags |= TLB_NOTDIRTY;
             }
         }
     } else {
@@ -1194,9 +1210,9 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
          * Reads to romd devices go through the ram_ptr found above,
          * but of course reads to I/O must go through MMIO.
          */
-        write_address |= TLB_MMIO;
+        write_flags |= TLB_MMIO;
         if (!is_romd) {
-            address = write_address;
+            read_flags = write_flags;
         }
     }
 
@@ -1249,36 +1265,27 @@  void tlb_set_page_full(CPUState *cpu, int mmu_idx,
      * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
      */
     desc->fulltlb[index] = *full;
-    desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
-    desc->fulltlb[index].phys_addr = paddr_page;
+    full = &desc->fulltlb[index];
+    full->xlat_section = iotlb - vaddr_page;
+    full->phys_addr = paddr_page;
 
     /* Now calculate the new entry */
     tn.addend = addend - vaddr_page;
-    if (prot & PAGE_READ) {
-        tn.addr_read = address;
-        if (wp_flags & BP_MEM_READ) {
-            tn.addr_read |= TLB_WATCHPOINT;
-        }
-    } else {
-        tn.addr_read = -1;
-    }
 
-    if (prot & PAGE_EXEC) {
-        tn.addr_code = address;
-    } else {
-        tn.addr_code = -1;
-    }
+    tlb_set_compare(full, &tn, vaddr_page, read_flags,
+                    MMU_INST_FETCH, prot & PAGE_EXEC);
 
-    tn.addr_write = -1;
-    if (prot & PAGE_WRITE) {
-        tn.addr_write = write_address;
-        if (prot & PAGE_WRITE_INV) {
-            tn.addr_write |= TLB_INVALID_MASK;
-        }
-        if (wp_flags & BP_MEM_WRITE) {
-            tn.addr_write |= TLB_WATCHPOINT;
-        }
+    if (wp_flags & BP_MEM_READ) {
+        read_flags |= TLB_WATCHPOINT;
     }
+    tlb_set_compare(full, &tn, vaddr_page, read_flags,
+                    MMU_DATA_LOAD, prot & PAGE_READ);
+
+    if (wp_flags & BP_MEM_WRITE) {
+        write_flags |= TLB_WATCHPOINT;
+    }
+    tlb_set_compare(full, &tn, vaddr_page, write_flags, MMU_DATA_STORE,
+                    (prot & PAGE_WRITE) && !(prot & PAGE_WRITE_INV));
 
     copy_tlb_helper_locked(te, &tn);
     tlb_n_used_entries_inc(env, mmu_idx);
@@ -1508,7 +1515,8 @@  static int probe_access_internal(CPUArchState *env, target_ulong addr,
     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
     target_ulong tlb_addr = tlb_read_idx(entry, access_type);
     target_ulong page_addr = addr & TARGET_PAGE_MASK;
-    int flags = TLB_FLAGS_MASK;
+    int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+    CPUTLBEntryFull *full;
 
     if (!tlb_hit_page(tlb_addr, page_addr)) {
         if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
@@ -1537,7 +1545,8 @@  static int probe_access_internal(CPUArchState *env, target_ulong addr,
     }
     flags &= tlb_addr;
 
-    *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+    *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+    flags |= full->slow_flags[access_type];
 
     /* Fold all "mmio-like" bits into TLB_MMIO.  This is not RAM.  */
     if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
@@ -1744,6 +1753,8 @@  static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
     CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
     target_ulong tlb_addr = tlb_read_idx(entry, access_type);
     bool maybe_resized = false;
+    CPUTLBEntryFull *full;
+    int flags;
 
     /* If the TLB entry is for a different page, reload and try again.  */
     if (!tlb_hit(tlb_addr, addr)) {
@@ -1757,8 +1768,12 @@  static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
         tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
     }
 
-    data->flags = tlb_addr & TLB_FLAGS_MASK;
-    data->full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+    full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+    flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
+    flags |= full->slow_flags[access_type];
+
+    data->full = full;
+    data->flags = flags;
     /* Compute haddr speculatively; depending on flags it might be invalid. */
     data->haddr = (void *)((uintptr_t)addr + entry->addend);