diff mbox

[RFC,v4,15/28] tcg: rename tcg_current_cpu to tcg_current_rr_cpu

Message ID 1470929064-4092-16-git-send-email-alex.bennee@linaro.org
State New
Headers show

Commit Message

Alex Bennée Aug. 11, 2016, 3:24 p.m. UTC
..and make the definition local to cpus. In preparation for MTTCG the
concept of a global tcg_current_cpu will no longer make sense. However
we still need to keep track of it in the single-threaded case to be able
to exit quickly when required.

qemu_cpu_kick_no_halt() moves and becomes qemu_cpu_kick_rr_cpu() to
emphasise its use-case. qemu_cpu_kick now kicks the relevant cpu as
well as qemu_kick_rr_cpu() which will become a no-op in MTTCG.

For the time being the setting of the global exit_request remains.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>


---
v4:
  - keep global exit_request setting for now
  - fix merge conflicts
---
 cpu-exec-common.c       |  3 ++-
 cpu-exec.c              |  3 ---
 cpus.c                  | 41 ++++++++++++++++++++++++-----------------
 include/exec/exec-all.h |  1 -
 4 files changed, 26 insertions(+), 22 deletions(-)

-- 
2.7.4
diff mbox

Patch

diff --git a/cpu-exec-common.c b/cpu-exec-common.c
index 0bec55a..e220ba7 100644
--- a/cpu-exec-common.c
+++ b/cpu-exec-common.c
@@ -24,7 +24,6 @@ 
 #include "exec/memory-internal.h"
 
 bool exit_request;
-CPUState *tcg_current_cpu;
 int tcg_pending_threads;
 
 /* exit the current TB, but without causing any exception to be raised */
@@ -140,6 +139,7 @@  void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
     struct qemu_work_item wi;
     bool done = false;
 
+    /* Always true when using tcg RR scheduling from a vCPU context */
     if (qemu_cpu_is_self(cpu)) {
         func(cpu, data);
         return;
@@ -163,6 +163,7 @@  void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
 {
     struct qemu_work_item wi;
 
+    /* Always true when using tcg RR scheduling from a vCPU context */
     if (qemu_cpu_is_self(cpu)) {
         func(cpu, data);
         return;
diff --git a/cpu-exec.c b/cpu-exec.c
index 93a0eb1..42fbf7d 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -578,7 +578,6 @@  int cpu_exec(CPUState *cpu)
         return EXCP_HALTED;
     }
 
-    atomic_mb_set(&tcg_current_cpu, cpu);
     rcu_read_lock();
 
     if (unlikely(atomic_mb_read(&exit_request))) {
@@ -637,7 +636,5 @@  int cpu_exec(CPUState *cpu)
     /* fail safe : never use current_cpu outside cpu_exec() */
     current_cpu = NULL;
 
-    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
-    atomic_set(&tcg_current_cpu, NULL);
     return ret;
 }
diff --git a/cpus.c b/cpus.c
index 8c49d6c..732b21f 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1193,7 +1193,6 @@  static void deal_with_unplugged_cpus(void)
  * This is done explicitly rather than relying on side-effects
  * elsewhere.
  */
-static void qemu_cpu_kick_no_halt(void);
 
 #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
 
@@ -1202,11 +1201,27 @@  static inline int64_t qemu_tcg_next_kick(void)
     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
 }
 
+/* only used in single-thread tcg mode */
+static CPUState *tcg_current_rr_cpu;
+
+/* Kick the currently round-robin scheduled vCPU */
+static void qemu_cpu_kick_rr_cpu(void)
+{
+    CPUState *cpu;
+    atomic_mb_set(&exit_request, 1);
+    do {
+        cpu = atomic_mb_read(&tcg_current_rr_cpu);
+        if (cpu) {
+            cpu_exit(cpu);
+        }
+    } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
+}
+
 static void kick_tcg_thread(void *opaque)
 {
     QEMUTimer *self = *(QEMUTimer **) opaque;
     timer_mod(self, qemu_tcg_next_kick());
-    qemu_cpu_kick_no_halt();
+    qemu_cpu_kick_rr_cpu();
 }
 
 static void *qemu_tcg_cpu_thread_fn(void *arg)
@@ -1257,6 +1272,7 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
         }
 
         for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
+            atomic_mb_set(&tcg_current_rr_cpu, cpu);
 
             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
                               (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
@@ -1278,6 +1294,8 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
             }
 
         } /* for cpu.. */
+        /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
+        atomic_set(&tcg_current_rr_cpu, NULL);
 
         /* Pairs with smp_wmb in qemu_cpu_kick.  */
         atomic_mb_set(&exit_request, 0);
@@ -1315,24 +1333,13 @@  static void qemu_cpu_kick_thread(CPUState *cpu)
 #endif
 }
 
-static void qemu_cpu_kick_no_halt(void)
-{
-    CPUState *cpu;
-    /* Ensure whatever caused the exit has reached the CPU threads before
-     * writing exit_request.
-     */
-    atomic_mb_set(&exit_request, 1);
-    cpu = atomic_mb_read(&tcg_current_cpu);
-    if (cpu) {
-        cpu_exit(cpu);
-    }
-}
-
 void qemu_cpu_kick(CPUState *cpu)
 {
     qemu_cond_broadcast(cpu->halt_cond);
     if (tcg_enabled()) {
-        qemu_cpu_kick_no_halt();
+        cpu_exit(cpu);
+        /* Also ensure current RR cpu is kicked */
+        qemu_cpu_kick_rr_cpu();
     } else {
         qemu_cpu_kick_thread(cpu);
     }
@@ -1373,7 +1380,7 @@  void qemu_mutex_lock_iothread(void)
         atomic_dec(&iothread_requesting_mutex);
     } else {
         if (qemu_mutex_trylock(&qemu_global_mutex)) {
-            qemu_cpu_kick_no_halt();
+            qemu_cpu_kick_rr_cpu();
             qemu_mutex_lock(&qemu_global_mutex);
         }
         atomic_dec(&iothread_requesting_mutex);
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index d73b0e6..3547dfa 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -408,7 +408,6 @@  bool memory_region_is_unassigned(MemoryRegion *mr);
 extern int singlestep;
 
 /* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
-extern CPUState *tcg_current_cpu;
 extern int tcg_pending_threads;
 extern bool exit_request;