diff mbox series

[PULL,08/62] accel/tcg: Replace `TARGET_TB_PCREL` with `CF_PCREL`

Message ID 20230301025643.1227244-9-richard.henderson@linaro.org
State New
Headers show
Series [PULL,01/62] exec/helper-head: Include missing "fpu/softfloat-types.h" header | expand

Commit Message

Richard Henderson March 1, 2023, 2:55 a.m. UTC
From: Anton Johansson via <qemu-devel@nongnu.org>

Signed-off-by: Anton Johansson <anjo@rev.ng>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Message-Id: <20230227135202.9710-5-anjo@rev.ng>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/internal.h      | 10 ++++----
 accel/tcg/tb-jmp-cache.h  | 48 +++++++++++++++++++--------------------
 accel/tcg/cpu-exec.c      |  8 +++----
 accel/tcg/perf.c          |  2 +-
 accel/tcg/tb-maint.c      |  8 +++----
 accel/tcg/translate-all.c | 14 ++++++------
 6 files changed, 44 insertions(+), 46 deletions(-)
diff mbox series

Patch

diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
index 130d7fd564..5daa4cf0d7 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal.h
@@ -57,11 +57,11 @@  void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
 /* Return the current PC from CPU, which may be cached in TB. */
 static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
 {
-#if TARGET_TB_PCREL
-    return cpu->cc->get_pc(cpu);
-#else
-    return tb_pc(tb);
-#endif
+    if (tb_cflags(tb) & CF_PCREL) {
+        return cpu->cc->get_pc(cpu);
+    } else {
+        return tb_pc(tb);
+    }
 }
 
 extern int64_t max_delay;
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
index b3f6e78835..083939b302 100644
--- a/accel/tcg/tb-jmp-cache.h
+++ b/accel/tcg/tb-jmp-cache.h
@@ -14,53 +14,51 @@ 
 
 /*
  * Accessed in parallel; all accesses to 'tb' must be atomic.
- * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
- * a load_acquire/store_release to 'tb'.
+ * For CF_PCREL, accesses to 'pc' must be protected by a
+ * load_acquire/store_release to 'tb'.
  */
 struct CPUJumpCache {
     struct rcu_head rcu;
     struct {
         TranslationBlock *tb;
-#if TARGET_TB_PCREL
         target_ulong pc;
-#endif
     } array[TB_JMP_CACHE_SIZE];
 };
 
 static inline TranslationBlock *
-tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
 {
-#if TARGET_TB_PCREL
-    /* Use acquire to ensure current load of pc from jc. */
-    return qatomic_load_acquire(&jc->array[hash].tb);
-#else
-    /* Use rcu_read to ensure current load of pc from *tb. */
-    return qatomic_rcu_read(&jc->array[hash].tb);
-#endif
+    if (cflags & CF_PCREL) {
+        /* Use acquire to ensure current load of pc from jc. */
+        return qatomic_load_acquire(&jc->array[hash].tb);
+    } else {
+        /* Use rcu_read to ensure current load of pc from *tb. */
+        return qatomic_rcu_read(&jc->array[hash].tb);
+    }
 }
 
 static inline target_ulong
 tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
 {
-#if TARGET_TB_PCREL
-    return jc->array[hash].pc;
-#else
-    return tb_pc(tb);
-#endif
+    if (tb_cflags(tb) & CF_PCREL) {
+        return jc->array[hash].pc;
+    } else {
+        return tb_pc(tb);
+    }
 }
 
 static inline void
 tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
                  TranslationBlock *tb, target_ulong pc)
 {
-#if TARGET_TB_PCREL
-    jc->array[hash].pc = pc;
-    /* Use store_release on tb to ensure pc is written first. */
-    qatomic_store_release(&jc->array[hash].tb, tb);
-#else
-    /* Use the pc value already stored in tb->pc. */
-    qatomic_set(&jc->array[hash].tb, tb);
-#endif
+    if (tb_cflags(tb) & CF_PCREL) {
+        jc->array[hash].pc = pc;
+        /* Use store_release on tb to ensure pc is written first. */
+        qatomic_store_release(&jc->array[hash].tb, tb);
+    } else{
+        /* Use the pc value already stored in tb->pc. */
+        qatomic_set(&jc->array[hash].tb, tb);
+    }
 }
 
 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index ef557e57aa..013856f103 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -183,7 +183,7 @@  static bool tb_lookup_cmp(const void *p, const void *d)
     const TranslationBlock *tb = p;
     const struct tb_desc *desc = d;
 
-    if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
+    if ((tb_cflags(tb) & CF_PCREL || tb_pc(tb) == desc->pc) &&
         tb_page_addr0(tb) == desc->page_addr0 &&
         tb->cs_base == desc->cs_base &&
         tb->flags == desc->flags &&
@@ -235,7 +235,7 @@  static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
         return NULL;
     }
     desc.page_addr0 = phys_pc;
-    h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
+    h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
                      flags, cflags, *cpu->trace_dstate);
     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
 }
@@ -254,7 +254,7 @@  static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
 
     hash = tb_jmp_cache_hash_func(pc);
     jc = cpu->tb_jmp_cache;
-    tb = tb_jmp_cache_get_tb(jc, hash);
+    tb = tb_jmp_cache_get_tb(jc, cflags, hash);
 
     if (likely(tb &&
                tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
@@ -457,7 +457,7 @@  cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
         if (cc->tcg_ops->synchronize_from_tb) {
             cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
         } else {
-            assert(!TARGET_TB_PCREL);
+            tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
             assert(cc->set_pc);
             cc->set_pc(cpu, tb_pc(last_tb));
         }
diff --git a/accel/tcg/perf.c b/accel/tcg/perf.c
index ae19f6e28f..65e35ea3b9 100644
--- a/accel/tcg/perf.c
+++ b/accel/tcg/perf.c
@@ -328,7 +328,7 @@  void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
     for (insn = 0; insn < tb->icount; insn++) {
         /* FIXME: This replicates the restore_state_to_opc() logic. */
         q[insn].address = tcg_ctx->gen_insn_data[insn][0];
-        if (TARGET_TB_PCREL) {
+        if (tb_cflags(tb) & CF_PCREL) {
             q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
         } else {
 #if defined(TARGET_I386)
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index b3d6529ae2..2dbc2ce172 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -44,7 +44,7 @@  static bool tb_cmp(const void *ap, const void *bp)
     const TranslationBlock *a = ap;
     const TranslationBlock *b = bp;
 
-    return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
+    return ((tb_cflags(a) & CF_PCREL || tb_pc(a) == tb_pc(b)) &&
             a->cs_base == b->cs_base &&
             a->flags == b->flags &&
             (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
@@ -847,7 +847,7 @@  static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
 {
     CPUState *cpu;
 
-    if (TARGET_TB_PCREL) {
+    if (tb_cflags(tb) & CF_PCREL) {
         /* A TB may be at any virtual address */
         CPU_FOREACH(cpu) {
             tcg_flush_jmp_cache(cpu);
@@ -885,7 +885,7 @@  static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
 
     /* remove the TB from the hash list */
     phys_pc = tb_page_addr0(tb);
-    h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
+    h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb_pc(tb)),
                      tb->flags, orig_cflags, tb->trace_vcpu_dstate);
     if (!qht_remove(&tb_ctx.htable, tb, h)) {
         return;
@@ -966,7 +966,7 @@  TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
     tb_record(tb, p, p2);
 
     /* add in the hash table */
-    h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
+    h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb_pc(tb)),
                      tb->flags, tb->cflags, tb->trace_vcpu_dstate);
     qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
 
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 84f129337b..7969c1ee4c 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -134,7 +134,7 @@  static int encode_search(TranslationBlock *tb, uint8_t *block)
 
         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
             if (i == 0) {
-                prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
+                prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb_pc(tb) : 0);
             } else {
                 prev = tcg_ctx->gen_insn_data[i - 1][j];
             }
@@ -169,7 +169,7 @@  static int cpu_unwind_data_from_tb(TranslationBlock *tb, uintptr_t host_pc,
     }
 
     memset(data, 0, sizeof(uint64_t) * TARGET_INSN_START_WORDS);
-    if (!TARGET_TB_PCREL) {
+    if (!(tb_cflags(tb) & CF_PCREL)) {
         data[0] = tb_pc(tb);
     }
 
@@ -340,9 +340,9 @@  TranslationBlock *tb_gen_code(CPUState *cpu,
 
     gen_code_buf = tcg_ctx->code_gen_ptr;
     tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
-#if !TARGET_TB_PCREL
-    tb->pc = pc;
-#endif
+    if (!(cflags & CF_PCREL)) {
+        tb->pc = pc;
+    }
     tb->cs_base = cs_base;
     tb->flags = flags;
     tb->cflags = cflags;
@@ -407,8 +407,8 @@  TranslationBlock *tb_gen_code(CPUState *cpu,
     tb->tc.size = gen_code_size;
 
     /*
-     * For TARGET_TB_PCREL, attribute all executions of the generated
-     * code to its first mapping.
+     * For CF_PCREL, attribute all executions of the generated code
+     * to its first mapping.
      */
     perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));