diff mbox

[v6,11/19] cputlb: introduce tlb_flush_* async work.

Message ID 20161109145748.27282-12-alex.bennee@linaro.org
State New
Headers show

Commit Message

Alex Bennée Nov. 9, 2016, 2:57 p.m. UTC
From: KONRAD Frederic <fred.konrad@greensocs.com>


Some architectures allow to flush the tlb of other VCPUs. This is not a problem
when we have only one thread for all VCPUs but it definitely needs to be an
asynchronous work when we are in true multithreaded work.

We take the tb_lock() when doing this to avoid racing with other threads
which may be invalidating TB's at the same time. The alternative would
be to use proper atomic primitives to clear the tlb entries en-mass.

This patch doesn't do anything to protect other cputlb function being
called in MTTCG mode making cross vCPU changes.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>

[AJB: remove need for g_malloc on defer, make check fixes, tb_lock]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>


---
v6 (base patches)
  - don't use cmpxchg_bool (we drop it later anyway)
  - use RUN_ON_CPU macros instead of inlines
  - bug out of tlb_flush if !tcg_enabled() (MacOSX make check failure)
v5 (base patches)
  - take tb_lock() for memset
  - ensure tb_flush_page properly asyncs work for other vCPUs
  - use run_on_cpu_data
v4 (base_patches)
  - brought forward from arm enabling series
  - restore pending_tlb_flush flag
v1
  - Remove tlb_flush_all just do the check in tlb_flush.
  - remove the need to g_malloc
  - tlb_flush calls direct if !cpu->created

fixup! cputlb: introduce tlb_flush_* async work.
---
 cputlb.c                | 90 +++++++++++++++++++++++++++++++++++++++++--------
 include/exec/exec-all.h |  1 +
 include/qom/cpu.h       |  6 ++++
 3 files changed, 83 insertions(+), 14 deletions(-)

-- 
2.10.1

Comments

Richard Henderson Nov. 10, 2016, 4:48 p.m. UTC | #1
On 11/09/2016 03:57 PM, Alex Bennée wrote:
> +void tlb_flush_page_all(target_ulong addr)


It's a nit, but when I read this I think all pages, not all cpus.
Can we rename this tlb_fluch_page_all_cpus?

Otherwise,

Reviewed-by: Richard Henderson <rth@twiddle.net>



r~
Alex Bennée Nov. 10, 2016, 5:34 p.m. UTC | #2
Richard Henderson <rth@twiddle.net> writes:

> On 11/09/2016 03:57 PM, Alex Bennée wrote:

>> +void tlb_flush_page_all(target_ulong addr)

>

> It's a nit, but when I read this I think all pages, not all cpus.

> Can we rename this tlb_fluch_page_all_cpus?


So to properly support ARM TLB flush semantics I want to move some of
the looping in the helpers into cputlb.c so I'm thinking we'll have:

tlb_flush_page_all_cpus
tlb_flush_by_mmuidx_all_cpus
tlb_flush_page_by_mmuidx_all_cpus

Which will have the initial parameters of at least

  CPUState *src, bool sync

Where src is the source vCPU of the flush request and sync will cause
the source vCPU to schedule its work as safe work and do a
cpu_loop_exit. This will allow the helpers to ensure TLB flushes are in
a known state after executing the helper.

In fact for ARM we'll be able to put off the reckoning until a DMB
instruction comes along and we can force synchronisation at that point
but I'm assuming there must be other architectures with stricter
requirements.


>

> Otherwise,

>

> Reviewed-by: Richard Henderson <rth@twiddle.net>

>

>

> r~



--
Alex Bennée
Richard Henderson Nov. 10, 2016, 5:40 p.m. UTC | #3
On 11/10/2016 06:34 PM, Alex Bennée wrote:
> So to properly support ARM TLB flush semantics I want to move some of

> the looping in the helpers into cputlb.c so I'm thinking we'll have:

>

> tlb_flush_page_all_cpus

> tlb_flush_by_mmuidx_all_cpus

> tlb_flush_page_by_mmuidx_all_cpus


Sounds good, thanks.

> In fact for ARM we'll be able to put off the reckoning until a DMB

> instruction comes along and we can force synchronisation at that point

> but I'm assuming there must be other architectures with stricter

> requirements.


Yes, I can think of at least one arch for which the cross-cpu flush must finish 
before the source cpu continues.


r~
diff mbox

Patch

diff --git a/cputlb.c b/cputlb.c
index 30c7c37..d75bf8f 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -64,28 +64,29 @@ 
         }                                                         \
     } while (0)
 
+/* run_on_cpu_data.target_ptr should always be big enough for a
+ * target_ulong even on 32 bit builds */
+QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
+
 /* statistics */
 int tlb_flush_count;
 
-/* NOTE:
- * If flush_global is true (the usual case), flush all tlb entries.
- * If flush_global is false, flush (at least) all tlb entries not
- * marked global.
- *
- * Since QEMU doesn't currently implement a global/not-global flag
- * for tlb entries, at the moment tlb_flush() will also flush all
- * tlb entries in the flush_global == false case. This is OK because
- * CPU architectures generally permit an implementation to drop
- * entries from the TLB at any time, so flushing more entries than
- * required is only an efficiency issue, not a correctness issue.
- */
-void tlb_flush(CPUState *cpu, int flush_global)
+static void tlb_flush_nocheck(CPUState *cpu, int flush_global)
 {
     CPUArchState *env = cpu->env_ptr;
 
+    /* The QOM tests will trigger tlb_flushes without setting up TCG
+     * so we bug out here in that case.
+     */
+    if (!tcg_enabled()) {
+        return;
+    }
+
     assert_cpu_is_self(cpu);
     tlb_debug("(%d)\n", flush_global);
 
+    tb_lock();
+
     memset(env->tlb_table, -1, sizeof(env->tlb_table));
     memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
@@ -94,6 +95,39 @@  void tlb_flush(CPUState *cpu, int flush_global)
     env->tlb_flush_addr = -1;
     env->tlb_flush_mask = 0;
     tlb_flush_count++;
+
+    tb_unlock();
+
+    atomic_mb_set(&cpu->pending_tlb_flush, false);
+}
+
+static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
+{
+    tlb_flush_nocheck(cpu, data.host_int);
+}
+
+/* NOTE:
+ * If flush_global is true (the usual case), flush all tlb entries.
+ * If flush_global is false, flush (at least) all tlb entries not
+ * marked global.
+ *
+ * Since QEMU doesn't currently implement a global/not-global flag
+ * for tlb entries, at the moment tlb_flush() will also flush all
+ * tlb entries in the flush_global == false case. This is OK because
+ * CPU architectures generally permit an implementation to drop
+ * entries from the TLB at any time, so flushing more entries than
+ * required is only an efficiency issue, not a correctness issue.
+ */
+void tlb_flush(CPUState *cpu, int flush_global)
+{
+    if (cpu->created && !qemu_cpu_is_self(cpu)) {
+        if (atomic_cmpxchg(&cpu->pending_tlb_flush, false, true) == true) {
+            async_run_on_cpu(cpu, tlb_flush_global_async_work,
+                             RUN_ON_CPU_HOST_INT(flush_global));
+        }
+    } else {
+        tlb_flush_nocheck(cpu, flush_global);
+    }
 }
 
 static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
@@ -103,6 +137,8 @@  static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
     assert_cpu_is_self(cpu);
     tlb_debug("start\n");
 
+    tb_lock();
+
     for (;;) {
         int mmu_idx = va_arg(argp, int);
 
@@ -117,6 +153,8 @@  static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
     }
 
     memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+
+    tb_unlock();
 }
 
 void tlb_flush_by_mmuidx(CPUState *cpu, ...)
@@ -139,13 +177,15 @@  static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
     }
 }
 
-void tlb_flush_page(CPUState *cpu, target_ulong addr)
+static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
 {
     CPUArchState *env = cpu->env_ptr;
+    target_ulong addr = (target_ulong) data.target_ptr;
     int i;
     int mmu_idx;
 
     assert_cpu_is_self(cpu);
+
     tlb_debug("page :" TARGET_FMT_lx "\n", addr);
 
     /* Check if we need to flush due to large pages.  */
@@ -175,6 +215,18 @@  void tlb_flush_page(CPUState *cpu, target_ulong addr)
     tb_flush_jmp_cache(cpu, addr);
 }
 
+void tlb_flush_page(CPUState *cpu, target_ulong addr)
+{
+    tlb_debug("page :" TARGET_FMT_lx "\n", addr);
+
+    if (!qemu_cpu_is_self(cpu)) {
+        async_run_on_cpu(cpu, tlb_flush_page_async_work,
+                         RUN_ON_CPU_TARGET_PTR(addr));
+    } else {
+        tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
+    }
+}
+
 void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
 {
     CPUArchState *env = cpu->env_ptr;
@@ -221,6 +273,16 @@  void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
     tb_flush_jmp_cache(cpu, addr);
 }
 
+void tlb_flush_page_all(target_ulong addr)
+{
+    CPUState *cpu;
+
+    CPU_FOREACH(cpu) {
+        async_run_on_cpu(cpu, tlb_flush_page_async_work,
+                         RUN_ON_CPU_TARGET_PTR(addr));
+    }
+}
+
 /* update the TLBs so that writes to code in the virtual page 'addr'
    can be detected */
 void tlb_protect_code(ram_addr_t ram_addr)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 37781e0..e4f7839 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -160,6 +160,7 @@  void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
 void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
                  uintptr_t retaddr);
+void tlb_flush_page_all(target_ulong addr);
 #else
 static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
 {
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 1735374..880ba42 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -393,6 +393,12 @@  struct CPUState {
        (absolute value) offset as small as possible.  This reduces code
        size, especially for hosts without large memory offsets.  */
     uint32_t tcg_exit_req;
+
+    /* The pending_tlb_flush flag is set and cleared atomically to
+     * avoid potential races. The aim of the flag is to avoid
+     * unnecessary flushes.
+     */
+    bool pending_tlb_flush;
 };
 
 QTAILQ_HEAD(CPUTailQ, CPUState);