diff mbox series

[v2,09/13] accel/tcg: Move @tb_jmp_cache from CPUState to TCG AccelCPUState

Message ID 20240429213050.55177-10-philmd@linaro.org
State Superseded
Headers show
Series exec: Rework around CPUState user fields (part 2) | expand

Commit Message

Philippe Mathieu-Daudé April 29, 2024, 9:30 p.m. UTC
@tb_jmp_cache is specific to TCG accelerator, move it to
its AccelCPUState.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20240428221450.26460-21-philmd@linaro.org>
---
 accel/tcg/tb-jmp-cache.h  | 4 ++--
 accel/tcg/vcpu-state.h    | 2 ++
 include/hw/core/cpu.h     | 2 --
 include/qemu/typedefs.h   | 1 -
 accel/tcg/cpu-exec.c      | 7 +++----
 accel/tcg/cputlb.c        | 2 +-
 accel/tcg/tb-maint.c      | 2 +-
 accel/tcg/translate-all.c | 5 +++--
 8 files changed, 12 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
index 184bb3e3e2..c3a505e394 100644
--- a/accel/tcg/tb-jmp-cache.h
+++ b/accel/tcg/tb-jmp-cache.h
@@ -22,12 +22,12 @@ 
  * non-NULL value of 'tb'.  Strictly speaking pc is only needed for
  * CF_PCREL, but it's used always for simplicity.
  */
-struct CPUJumpCache {
+typedef struct CPUJumpCache {
     struct rcu_head rcu;
     struct {
         TranslationBlock *tb;
         vaddr pc;
     } array[TB_JMP_CACHE_SIZE];
-};
+} CPUJumpCache;
 
 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
diff --git a/accel/tcg/vcpu-state.h b/accel/tcg/vcpu-state.h
index 51e54ca535..0cb58ba734 100644
--- a/accel/tcg/vcpu-state.h
+++ b/accel/tcg/vcpu-state.h
@@ -7,6 +7,7 @@ 
 #define ACCEL_TCG_VCPU_STATE_H
 
 #include "hw/core/cpu.h"
+#include "tb-jmp-cache.h"
 
 /**
  * AccelCPUState: vCPU fields specific to TCG accelerator
@@ -16,6 +17,7 @@  struct AccelCPUState {
     uint32_t cflags_next_tb;
 
     sigjmp_buf jmp_env;
+    CPUJumpCache tb_jmp_cache;
 
 #ifdef CONFIG_USER_ONLY
     TaskState *ts;
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index f3cbb944eb..6e6e946b66 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -483,8 +483,6 @@  struct CPUState {
     AddressSpace *as;
     MemoryRegion *memory;
 
-    CPUJumpCache *tb_jmp_cache;
-
     GArray *gdb_regs;
     int gdb_num_regs;
     int gdb_num_g_regs;
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 36f2825725..daf9009332 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -44,7 +44,6 @@  typedef struct CPUAddressSpace CPUAddressSpace;
 typedef struct CPUArchState CPUArchState;
 typedef struct CPUPluginState CPUPluginState;
 typedef struct CpuInfoFast CpuInfoFast;
-typedef struct CPUJumpCache CPUJumpCache;
 typedef struct CPUState CPUState;
 typedef struct CPUTLBEntryFull CPUTLBEntryFull;
 typedef struct DeviceListener DeviceListener;
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 55235d3e5e..8f8e1fa948 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -261,7 +261,7 @@  static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
     tcg_debug_assert(!(cflags & CF_INVALID));
 
     hash = tb_jmp_cache_hash_func(pc);
-    jc = cpu->tb_jmp_cache;
+    jc = &cpu->accel->tb_jmp_cache;
 
     tb = qatomic_read(&jc->array[hash].tb);
     if (likely(tb &&
@@ -1004,7 +1004,7 @@  cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                  * for the fast lookup
                  */
                 h = tb_jmp_cache_hash_func(pc);
-                jc = cpu->tb_jmp_cache;
+                jc = &cpu->accel->tb_jmp_cache;
                 jc->array[h].pc = pc;
                 qatomic_set(&jc->array[h].tb, tb);
             }
@@ -1083,7 +1083,6 @@  bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
         tcg_target_initialized = true;
     }
 
-    cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
     tlb_init(cpu);
 #ifndef CONFIG_USER_ONLY
     tcg_iommu_init_notifier_list(cpu);
@@ -1101,5 +1100,5 @@  void tcg_exec_unrealizefn(CPUState *cpu)
 #endif /* !CONFIG_USER_ONLY */
 
     tlb_destroy(cpu);
-    g_free_rcu(cpu->tb_jmp_cache, rcu);
+    g_free_rcu(&cpu->accel->tb_jmp_cache, rcu);
 }
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index cdb3e12dfb..eaa60d1da2 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -156,7 +156,7 @@  static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
 
 static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
 {
-    CPUJumpCache *jc = cpu->tb_jmp_cache;
+    CPUJumpCache *jc = &cpu->accel->tb_jmp_cache;
     int i, i0;
 
     if (unlikely(!jc)) {
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 2d5faca9fd..83758648f2 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -888,7 +888,7 @@  static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
         uint32_t h = tb_jmp_cache_hash_func(tb->pc);
 
         CPU_FOREACH(cpu) {
-            CPUJumpCache *jc = cpu->tb_jmp_cache;
+            CPUJumpCache *jc = &cpu->accel->tb_jmp_cache;
 
             if (qatomic_read(&jc->array[h].tb) == tb) {
                 qatomic_set(&jc->array[h].tb, NULL);
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 3a8199a761..ca1e193633 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -652,13 +652,14 @@  void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
  */
 void tcg_flush_jmp_cache(CPUState *cpu)
 {
-    CPUJumpCache *jc = cpu->tb_jmp_cache;
+    CPUJumpCache *jc;
 
     /* During early initialization, the cache may not yet be allocated. */
-    if (unlikely(jc == NULL)) {
+    if (unlikely(cpu->accel == NULL)) {
         return;
     }
 
+    jc = &cpu->accel->tb_jmp_cache;
     for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
         qatomic_set(&jc->array[i].tb, NULL);
     }