diff mbox series

[16/23] include/exec/tlb-common: Move CPUTLBEntryFull from hw/core/cpu.h

Message ID 20241009150855.804605-17-richard.henderson@linaro.org
State New
Headers show
Series accel/tcg: Convert victim tlb to IntervalTree | expand

Commit Message

Richard Henderson Oct. 9, 2024, 3:08 p.m. UTC
CPUTLBEntryFull structures are no longer directly included within
the CPUState structure.  Move the structure definition out of cpu.h
to reduce visibility.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/tlb-common.h | 63 +++++++++++++++++++++++++++++++++++++++
 include/hw/core/cpu.h     | 63 ---------------------------------------
 2 files changed, 63 insertions(+), 63 deletions(-)

Comments

Pierrick Bouvier Oct. 10, 2024, 12:17 a.m. UTC | #1
On 10/9/24 08:08, Richard Henderson wrote:
> CPUTLBEntryFull structures are no longer directly included within
> the CPUState structure.  Move the structure definition out of cpu.h
> to reduce visibility.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   include/exec/tlb-common.h | 63 +++++++++++++++++++++++++++++++++++++++
>   include/hw/core/cpu.h     | 63 ---------------------------------------
>   2 files changed, 63 insertions(+), 63 deletions(-)
> 
> diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
> index dc5a5faa0b..300f9fae67 100644
> --- a/include/exec/tlb-common.h
> +++ b/include/exec/tlb-common.h
> @@ -53,4 +53,67 @@ typedef struct CPUTLBDescFast {
>       CPUTLBEntry *table;
>   } CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
>   
> +/*
> + * The full TLB entry, which is not accessed by generated TCG code,
> + * so the layout is not as critical as that of CPUTLBEntry. This is
> + * also why we don't want to combine the two structs.
> + */
> +struct CPUTLBEntryFull {
> +    /*
> +     * @xlat_section contains:
> +     *  - in the lower TARGET_PAGE_BITS, a physical section number
> +     *  - with the lower TARGET_PAGE_BITS masked off, an offset which
> +     *    must be added to the virtual address to obtain:
> +     *     + the ram_addr_t of the target RAM (if the physical section
> +     *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
> +     *     + the offset within the target MemoryRegion (otherwise)
> +     */
> +    hwaddr xlat_section;
> +
> +    /*
> +     * @phys_addr contains the physical address in the address space
> +     * given by cpu_asidx_from_attrs(cpu, @attrs).
> +     */
> +    hwaddr phys_addr;
> +
> +    /* @attrs contains the memory transaction attributes for the page. */
> +    MemTxAttrs attrs;
> +
> +    /* @prot contains the complete protections for the page. */
> +    uint8_t prot;
> +
> +    /* @lg_page_size contains the log2 of the page size. */
> +    uint8_t lg_page_size;
> +
> +    /* Additional tlb flags requested by tlb_fill. */
> +    uint8_t tlb_fill_flags;
> +
> +    /*
> +     * Additional tlb flags for use by the slow path. If non-zero,
> +     * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
> +     */
> +    uint8_t slow_flags[MMU_ACCESS_COUNT];
> +
> +    /*
> +     * Allow target-specific additions to this structure.
> +     * This may be used to cache items from the guest cpu
> +     * page tables for later use by the implementation.
> +     */
> +    union {
> +        /*
> +         * Cache the attrs and shareability fields from the page table entry.
> +         *
> +         * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
> +         * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
> +         * For shareability and guarded, as in the SH and GP fields respectively
> +         * of the VMSAv8-64 PTEs.
> +         */
> +        struct {
> +            uint8_t pte_attrs;
> +            uint8_t shareability;
> +            bool guarded;
> +        } arm;
> +    } extra;
> +};
> +
>   #endif /* EXEC_TLB_COMMON_H */
> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
> index 87b864f5c4..6b1c2bfadd 100644
> --- a/include/hw/core/cpu.h
> +++ b/include/hw/core/cpu.h
> @@ -198,69 +198,6 @@ struct CPUClass {
>    */
>   #define NB_MMU_MODES 16
>   
> -/*
> - * The full TLB entry, which is not accessed by generated TCG code,
> - * so the layout is not as critical as that of CPUTLBEntry. This is
> - * also why we don't want to combine the two structs.
> - */
> -struct CPUTLBEntryFull {
> -    /*
> -     * @xlat_section contains:
> -     *  - in the lower TARGET_PAGE_BITS, a physical section number
> -     *  - with the lower TARGET_PAGE_BITS masked off, an offset which
> -     *    must be added to the virtual address to obtain:
> -     *     + the ram_addr_t of the target RAM (if the physical section
> -     *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
> -     *     + the offset within the target MemoryRegion (otherwise)
> -     */
> -    hwaddr xlat_section;
> -
> -    /*
> -     * @phys_addr contains the physical address in the address space
> -     * given by cpu_asidx_from_attrs(cpu, @attrs).
> -     */
> -    hwaddr phys_addr;
> -
> -    /* @attrs contains the memory transaction attributes for the page. */
> -    MemTxAttrs attrs;
> -
> -    /* @prot contains the complete protections for the page. */
> -    uint8_t prot;
> -
> -    /* @lg_page_size contains the log2 of the page size. */
> -    uint8_t lg_page_size;
> -
> -    /* Additional tlb flags requested by tlb_fill. */
> -    uint8_t tlb_fill_flags;
> -
> -    /*
> -     * Additional tlb flags for use by the slow path. If non-zero,
> -     * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
> -     */
> -    uint8_t slow_flags[MMU_ACCESS_COUNT];
> -
> -    /*
> -     * Allow target-specific additions to this structure.
> -     * This may be used to cache items from the guest cpu
> -     * page tables for later use by the implementation.
> -     */
> -    union {
> -        /*
> -         * Cache the attrs and shareability fields from the page table entry.
> -         *
> -         * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
> -         * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
> -         * For shareability and guarded, as in the SH and GP fields respectively
> -         * of the VMSAv8-64 PTEs.
> -         */
> -        struct {
> -            uint8_t pte_attrs;
> -            uint8_t shareability;
> -            bool guarded;
> -        } arm;
> -    } extra;
> -};
> -
>   /*
>    * Data elements that are per MMU mode, minus the bits accessed by
>    * the TCG fast path.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff mbox series

Patch

diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
index dc5a5faa0b..300f9fae67 100644
--- a/include/exec/tlb-common.h
+++ b/include/exec/tlb-common.h
@@ -53,4 +53,67 @@  typedef struct CPUTLBDescFast {
     CPUTLBEntry *table;
 } CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
 
+/*
+ * The full TLB entry, which is not accessed by generated TCG code,
+ * so the layout is not as critical as that of CPUTLBEntry. This is
+ * also why we don't want to combine the two structs.
+ */
+struct CPUTLBEntryFull {
+    /*
+     * @xlat_section contains:
+     *  - in the lower TARGET_PAGE_BITS, a physical section number
+     *  - with the lower TARGET_PAGE_BITS masked off, an offset which
+     *    must be added to the virtual address to obtain:
+     *     + the ram_addr_t of the target RAM (if the physical section
+     *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
+     *     + the offset within the target MemoryRegion (otherwise)
+     */
+    hwaddr xlat_section;
+
+    /*
+     * @phys_addr contains the physical address in the address space
+     * given by cpu_asidx_from_attrs(cpu, @attrs).
+     */
+    hwaddr phys_addr;
+
+    /* @attrs contains the memory transaction attributes for the page. */
+    MemTxAttrs attrs;
+
+    /* @prot contains the complete protections for the page. */
+    uint8_t prot;
+
+    /* @lg_page_size contains the log2 of the page size. */
+    uint8_t lg_page_size;
+
+    /* Additional tlb flags requested by tlb_fill. */
+    uint8_t tlb_fill_flags;
+
+    /*
+     * Additional tlb flags for use by the slow path. If non-zero,
+     * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
+     */
+    uint8_t slow_flags[MMU_ACCESS_COUNT];
+
+    /*
+     * Allow target-specific additions to this structure.
+     * This may be used to cache items from the guest cpu
+     * page tables for later use by the implementation.
+     */
+    union {
+        /*
+         * Cache the attrs and shareability fields from the page table entry.
+         *
+         * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
+         * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
+         * For shareability and guarded, as in the SH and GP fields respectively
+         * of the VMSAv8-64 PTEs.
+         */
+        struct {
+            uint8_t pte_attrs;
+            uint8_t shareability;
+            bool guarded;
+        } arm;
+    } extra;
+};
+
 #endif /* EXEC_TLB_COMMON_H */
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 87b864f5c4..6b1c2bfadd 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -198,69 +198,6 @@  struct CPUClass {
  */
 #define NB_MMU_MODES 16
 
-/*
- * The full TLB entry, which is not accessed by generated TCG code,
- * so the layout is not as critical as that of CPUTLBEntry. This is
- * also why we don't want to combine the two structs.
- */
-struct CPUTLBEntryFull {
-    /*
-     * @xlat_section contains:
-     *  - in the lower TARGET_PAGE_BITS, a physical section number
-     *  - with the lower TARGET_PAGE_BITS masked off, an offset which
-     *    must be added to the virtual address to obtain:
-     *     + the ram_addr_t of the target RAM (if the physical section
-     *       number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
-     *     + the offset within the target MemoryRegion (otherwise)
-     */
-    hwaddr xlat_section;
-
-    /*
-     * @phys_addr contains the physical address in the address space
-     * given by cpu_asidx_from_attrs(cpu, @attrs).
-     */
-    hwaddr phys_addr;
-
-    /* @attrs contains the memory transaction attributes for the page. */
-    MemTxAttrs attrs;
-
-    /* @prot contains the complete protections for the page. */
-    uint8_t prot;
-
-    /* @lg_page_size contains the log2 of the page size. */
-    uint8_t lg_page_size;
-
-    /* Additional tlb flags requested by tlb_fill. */
-    uint8_t tlb_fill_flags;
-
-    /*
-     * Additional tlb flags for use by the slow path. If non-zero,
-     * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
-     */
-    uint8_t slow_flags[MMU_ACCESS_COUNT];
-
-    /*
-     * Allow target-specific additions to this structure.
-     * This may be used to cache items from the guest cpu
-     * page tables for later use by the implementation.
-     */
-    union {
-        /*
-         * Cache the attrs and shareability fields from the page table entry.
-         *
-         * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
-         * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
-         * For shareability and guarded, as in the SH and GP fields respectively
-         * of the VMSAv8-64 PTEs.
-         */
-        struct {
-            uint8_t pte_attrs;
-            uint8_t shareability;
-            bool guarded;
-        } arm;
-    } extra;
-};
-
 /*
  * Data elements that are per MMU mode, minus the bits accessed by
  * the TCG fast path.