diff mbox

[RFC,v2,07/11] tcg: cpus rm tcg_exec_all()

Message ID 1459870344-16773-8-git-send-email-alex.bennee@linaro.org
State New
Headers show

Commit Message

Alex Bennée April 5, 2016, 3:32 p.m. UTC
In preparation for multi-threaded TCG we remove tcg_exec_all and move
all the CPU cycling into the main thread function. When MTTCG is enabled
we shall use a separate thread function which only handles one vCPU.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>


---
v2
  - update timer calls to new API on rebase
---
 cpus.c | 63 +++++++++++++++++++++++++++++----------------------------------
 1 file changed, 29 insertions(+), 34 deletions(-)

-- 
2.7.4

Comments

Alex Bennée May 26, 2016, 1:10 p.m. UTC | #1
Sergey Fedorov <serge.fdrv@gmail.com> writes:

> On 05/04/16 18:32, Alex Bennée wrote:

>> diff --git a/cpus.c b/cpus.c

>> index e118fdf..46732a5 100644

>> --- a/cpus.c

>> +++ b/cpus.c

> (snip)

>> @@ -1109,7 +1108,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)

>>  #endif

>>  }

>>

>> -static void tcg_exec_all(void);

>> +static int tcg_cpu_exec(CPUState *cpu);

>

> Why don't just move tcg_cpu_exec() here and avoid this forward

> declaration. Such forward declarations of static functions are a bit

> annoying :)


Sounds like a plan.

>

>>

>>  static void *qemu_tcg_cpu_thread_fn(void *arg)

>>  {

>> @@ -1140,8 +1139,35 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)

>>      /* process any pending work */

>>      atomic_mb_set(&exit_request, 1);

>>

>> +    cpu = first_cpu;

>> +

>>      while (1) {

>> -        tcg_exec_all();

>> +        /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */

>> +        qemu_account_warp_timer();

>> +

>> +        if (!cpu) {

>> +            cpu = first_cpu;

>> +        }

>> +

>> +        for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {

>

> Maybe a "while" cycle would be a bit neater here, like:

>

>     while (cpu != NULL && !exit_request) {

>         /* ... */

>         cpu = CPU_NEXT(cpu);

>     }


Yeah, I prefer the while to non-standard for loops.

>

>

>> +

>> +            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,

>> +                              (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);

>> +

>> +            if (cpu_can_run(cpu)) {

>> +                int r = tcg_cpu_exec(cpu);

>> +                if (r == EXCP_DEBUG) {

>> +                    cpu_handle_guest_debug(cpu);

>> +                    break;

>> +                }

>> +            } else if (cpu->stop || cpu->stopped) {

>> +                break;

>> +            }

>> +

>> +        } /* for cpu.. */

>> +

>> +        /* Pairs with smp_wmb in qemu_cpu_kick.  */

>

> While at it, we could also fix this comment like this:

>

>     /* Pairs with atomic_mb_read() in cpu_exec(). */


Will do.

>

>> +        atomic_mb_set(&exit_request, 0);

>>

>>          if (use_icount) {

>>              int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);

>

> Kind regards,

> Sergey


Thanks,


--
Alex Bennée
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index e118fdf..46732a5 100644
--- a/cpus.c
+++ b/cpus.c
@@ -67,7 +67,6 @@ 
 
 #endif /* CONFIG_LINUX */
 
-static CPUState *next_cpu;
 int64_t max_delay;
 int64_t max_advance;
 
@@ -1109,7 +1108,7 @@  static void *qemu_dummy_cpu_thread_fn(void *arg)
 #endif
 }
 
-static void tcg_exec_all(void);
+static int tcg_cpu_exec(CPUState *cpu);
 
 static void *qemu_tcg_cpu_thread_fn(void *arg)
 {
@@ -1140,8 +1139,35 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
     /* process any pending work */
     atomic_mb_set(&exit_request, 1);
 
+    cpu = first_cpu;
+
     while (1) {
-        tcg_exec_all();
+        /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
+        qemu_account_warp_timer();
+
+        if (!cpu) {
+            cpu = first_cpu;
+        }
+
+        for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
+
+            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
+                              (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
+
+            if (cpu_can_run(cpu)) {
+                int r = tcg_cpu_exec(cpu);
+                if (r == EXCP_DEBUG) {
+                    cpu_handle_guest_debug(cpu);
+                    break;
+                }
+            } else if (cpu->stop || cpu->stopped) {
+                break;
+            }
+
+        } /* for cpu.. */
+
+        /* Pairs with smp_wmb in qemu_cpu_kick.  */
+        atomic_mb_set(&exit_request, 0);
 
         if (use_icount) {
             int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
@@ -1500,37 +1526,6 @@  static int tcg_cpu_exec(CPUState *cpu)
     return ret;
 }
 
-static void tcg_exec_all(void)
-{
-    int r;
-
-    /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
-    qemu_account_warp_timer();
-
-    if (next_cpu == NULL) {
-        next_cpu = first_cpu;
-    }
-    for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
-        CPUState *cpu = next_cpu;
-
-        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
-                          (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
-
-        if (cpu_can_run(cpu)) {
-            r = tcg_cpu_exec(cpu);
-            if (r == EXCP_DEBUG) {
-                cpu_handle_guest_debug(cpu);
-                break;
-            }
-        } else if (cpu->stop || cpu->stopped) {
-            break;
-        }
-    }
-
-    /* Pairs with smp_wmb in qemu_cpu_kick.  */
-    atomic_mb_set(&exit_request, 0);
-}
-
 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
 {
     /* XXX: implement xxx_cpu_list for targets that still miss it */