@@ -137,6 +137,7 @@ typedef struct CPUIOTLBEntry {
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
+ size_t tlb_flush_count; \
target_ulong tlb_flush_addr; \
target_ulong tlb_flush_mask; \
target_ulong vtlb_index; \
@@ -23,7 +23,6 @@
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
-extern int tlb_flush_count;
-
+size_t tlb_flush_count(void);
#endif
#endif
@@ -92,8 +92,18 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
}
}
-/* statistics */
-int tlb_flush_count;
+size_t tlb_flush_count(void)
+{
+ CPUState *cpu;
+ size_t count = 0;
+
+ CPU_FOREACH(cpu) {
+ CPUArchState *env = cpu->env_ptr;
+
+ count += atomic_read(&env->tlb_flush_count);
+ }
+ return count;
+}
/* This is OK because CPU architectures generally permit an
* implementation to drop entries from the TLB at any time, so
@@ -112,7 +122,8 @@ static void tlb_flush_nocheck(CPUState *cpu)
}
assert_cpu_is_self(cpu);
- tlb_debug("(count: %d)\n", tlb_flush_count++);
+ atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
+ tlb_debug("(count: %zu)\n", tlb_flush_count());
tb_lock();
@@ -1936,7 +1936,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
cpu_fprintf(f, "TB invalidate count %d\n",
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
- cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
+ cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tcg_dump_info(f, cpu_fprintf);
tb_unlock();