diff mbox

[v5,14/33] tcg: add kick timer for single-threaded vCPU emulation

Message ID 20161027151030.20863-15-alex.bennee@linaro.org
State Superseded
Headers show

Commit Message

Alex Bennée Oct. 27, 2016, 3:10 p.m. UTC
Currently we rely on the side effect of the main loop grabbing the
iothread_mutex to give any long running basic block chains a kick to
ensure the next vCPU is scheduled. As this code is being re-factored and
rationalised we now do it explicitly here.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>


---
v2
  - re-base fixes
  - get_ticks_per_sec() -> NANOSECONDS_PER_SEC
v3
  - add define for TCG_KICK_FREQ
  - fix checkpatch warning
v4
  - wrap next calc in inline qemu_tcg_next_kick() instead of macro
v5
  - move all kick code into own section
  - use global for timer
  - add helper functions to start/stop timer
  - stop timer when all cores paused
---
 cpus.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

-- 
2.10.1

Comments

Frederic Konrad Oct. 27, 2016, 3:30 p.m. UTC | #1
Hi Alex,

This is nice! Do we actually need to do qemu_cpu_kick_no_halt() in the
timer handler?

Thanks,
Fred

Le 27/10/2016 à 17:10, Alex Bennée a écrit :
> Currently we rely on the side effect of the main loop grabbing the

> iothread_mutex to give any long running basic block chains a kick to

> ensure the next vCPU is scheduled. As this code is being re-factored and

> rationalised we now do it explicitly here.

>

> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

>

> ---

> v2

>   - re-base fixes

>   - get_ticks_per_sec() -> NANOSECONDS_PER_SEC

> v3

>   - add define for TCG_KICK_FREQ

>   - fix checkpatch warning

> v4

>   - wrap next calc in inline qemu_tcg_next_kick() instead of macro

> v5

>   - move all kick code into own section

>   - use global for timer

>   - add helper functions to start/stop timer

>   - stop timer when all cores paused

> ---

>  cpus.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

>  1 file changed, 60 insertions(+)

>

> diff --git a/cpus.c b/cpus.c

> index aedec7c..ad4ab68 100644

> --- a/cpus.c

> +++ b/cpus.c

> @@ -735,6 +735,52 @@ void configure_icount(QemuOpts *opts, Error **errp)

>  }

>

>  /***********************************************************/

> +/* TCG vCPU kick timer

> + *

> + * The kick timer is responsible for moving single threaded vCPU

> + * emulation on to the next vCPU. If more than one vCPU is running a

> + * timer event with force a cpu->exit so the next vCPU can get

> + * scheduled.

> + *

> + * The timer is removed if all vCPUs are idle and restarted again once

> + * idleness is complete.

> + */

> +

> +static QEMUTimer *tcg_kick_vcpu_timer;

> +

> +static void qemu_cpu_kick_no_halt(void);

> +

> +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)

> +

> +static inline int64_t qemu_tcg_next_kick(void)

> +{

> +    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;

> +}

> +

> +static void kick_tcg_thread(void *opaque)

> +{

> +    timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());

> +    qemu_cpu_kick_no_halt();

> +}

> +

> +static void start_tcg_kick_timer(void)

> +{

> +    if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {

> +        tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,  kick_tcg_thread, NULL);

> +        timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());

> +    }

> +}

> +

> +static void stop_tcg_kick_timer(void)

> +{

> +    if (tcg_kick_vcpu_timer) {

> +        timer_del(tcg_kick_vcpu_timer);

> +        tcg_kick_vcpu_timer = NULL;

> +    }

> +}

> +

> +

> +/***********************************************************/

>  void hw_error(const char *fmt, ...)

>  {

>      va_list ap;

> @@ -988,9 +1034,12 @@ static void qemu_wait_io_event_common(CPUState *cpu)

>  static void qemu_tcg_wait_io_event(CPUState *cpu)

>  {

>      while (all_cpu_threads_idle()) {

> +        stop_tcg_kick_timer();

>          qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);

>      }

>

> +    start_tcg_kick_timer();

> +

>      while (iothread_requesting_mutex) {

>          qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);

>      }

> @@ -1178,6 +1227,15 @@ static void deal_with_unplugged_cpus(void)

>      }

>  }

>

> +/* Single-threaded TCG

> + *

> + * In the single-threaded case each vCPU is simulated in turn. If

> + * there is more than a single vCPU we create a simple timer to kick

> + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.

> + * This is done explicitly rather than relying on side-effects

> + * elsewhere.

> + */

> +

>  static void *qemu_tcg_cpu_thread_fn(void *arg)

>  {

>      CPUState *cpu = arg;

> @@ -1204,6 +1262,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)

>          }

>      }

>

> +    start_tcg_kick_timer();

> +

>      /* process any pending work */

>      atomic_mb_set(&exit_request, 1);

>

>
Alex Bennée Oct. 27, 2016, 3:35 p.m. UTC | #2
KONRAD Frederic <fred.konrad@greensocs.com> writes:

> Hi Alex,

>

> This is nice! Do we actually need to do qemu_cpu_kick_no_halt() in the

> timer handler?


It becomes qemu_cpu_kick_rr_cpu() in later patches and joins the rest of
this code.

>

> Thanks,

> Fred

>

> Le 27/10/2016 à 17:10, Alex Bennée a écrit :

>> Currently we rely on the side effect of the main loop grabbing the

>> iothread_mutex to give any long running basic block chains a kick to

>> ensure the next vCPU is scheduled. As this code is being re-factored and

>> rationalised we now do it explicitly here.

>>

>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

>>

>> ---

>> v2

>>   - re-base fixes

>>   - get_ticks_per_sec() -> NANOSECONDS_PER_SEC

>> v3

>>   - add define for TCG_KICK_FREQ

>>   - fix checkpatch warning

>> v4

>>   - wrap next calc in inline qemu_tcg_next_kick() instead of macro

>> v5

>>   - move all kick code into own section

>>   - use global for timer

>>   - add helper functions to start/stop timer

>>   - stop timer when all cores paused

>> ---

>>  cpus.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

>>  1 file changed, 60 insertions(+)

>>

>> diff --git a/cpus.c b/cpus.c

>> index aedec7c..ad4ab68 100644

>> --- a/cpus.c

>> +++ b/cpus.c

>> @@ -735,6 +735,52 @@ void configure_icount(QemuOpts *opts, Error **errp)

>>  }

>>

>>  /***********************************************************/

>> +/* TCG vCPU kick timer

>> + *

>> + * The kick timer is responsible for moving single threaded vCPU

>> + * emulation on to the next vCPU. If more than one vCPU is running a

>> + * timer event with force a cpu->exit so the next vCPU can get

>> + * scheduled.

>> + *

>> + * The timer is removed if all vCPUs are idle and restarted again once

>> + * idleness is complete.

>> + */

>> +

>> +static QEMUTimer *tcg_kick_vcpu_timer;

>> +

>> +static void qemu_cpu_kick_no_halt(void);

>> +

>> +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)

>> +

>> +static inline int64_t qemu_tcg_next_kick(void)

>> +{

>> +    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;

>> +}

>> +

>> +static void kick_tcg_thread(void *opaque)

>> +{

>> +    timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());

>> +    qemu_cpu_kick_no_halt();

>> +}

>> +

>> +static void start_tcg_kick_timer(void)

>> +{

>> +    if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {

>> +        tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,  kick_tcg_thread, NULL);

>> +        timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());

>> +    }

>> +}

>> +

>> +static void stop_tcg_kick_timer(void)

>> +{

>> +    if (tcg_kick_vcpu_timer) {

>> +        timer_del(tcg_kick_vcpu_timer);

>> +        tcg_kick_vcpu_timer = NULL;

>> +    }

>> +}

>> +

>> +

>> +/***********************************************************/

>>  void hw_error(const char *fmt, ...)

>>  {

>>      va_list ap;

>> @@ -988,9 +1034,12 @@ static void qemu_wait_io_event_common(CPUState *cpu)

>>  static void qemu_tcg_wait_io_event(CPUState *cpu)

>>  {

>>      while (all_cpu_threads_idle()) {

>> +        stop_tcg_kick_timer();

>>          qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);

>>      }

>>

>> +    start_tcg_kick_timer();

>> +

>>      while (iothread_requesting_mutex) {

>>          qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);

>>      }

>> @@ -1178,6 +1227,15 @@ static void deal_with_unplugged_cpus(void)

>>      }

>>  }

>>

>> +/* Single-threaded TCG

>> + *

>> + * In the single-threaded case each vCPU is simulated in turn. If

>> + * there is more than a single vCPU we create a simple timer to kick

>> + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.

>> + * This is done explicitly rather than relying on side-effects

>> + * elsewhere.

>> + */

>> +

>>  static void *qemu_tcg_cpu_thread_fn(void *arg)

>>  {

>>      CPUState *cpu = arg;

>> @@ -1204,6 +1262,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)

>>          }

>>      }

>>

>> +    start_tcg_kick_timer();

>> +

>>      /* process any pending work */

>>      atomic_mb_set(&exit_request, 1);

>>

>>



--
Alex Bennée
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index aedec7c..ad4ab68 100644
--- a/cpus.c
+++ b/cpus.c
@@ -735,6 +735,52 @@  void configure_icount(QemuOpts *opts, Error **errp)
 }
 
 /***********************************************************/
+/* TCG vCPU kick timer
+ *
+ * The kick timer is responsible for moving single threaded vCPU
+ * emulation on to the next vCPU. If more than one vCPU is running a
+ * timer event with force a cpu->exit so the next vCPU can get
+ * scheduled.
+ *
+ * The timer is removed if all vCPUs are idle and restarted again once
+ * idleness is complete.
+ */
+
+static QEMUTimer *tcg_kick_vcpu_timer;
+
+static void qemu_cpu_kick_no_halt(void);
+
+#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
+
+static inline int64_t qemu_tcg_next_kick(void)
+{
+    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
+}
+
+static void kick_tcg_thread(void *opaque)
+{
+    timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
+    qemu_cpu_kick_no_halt();
+}
+
+static void start_tcg_kick_timer(void)
+{
+    if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
+        tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,  kick_tcg_thread, NULL);
+        timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
+    }
+}
+
+static void stop_tcg_kick_timer(void)
+{
+    if (tcg_kick_vcpu_timer) {
+        timer_del(tcg_kick_vcpu_timer);
+        tcg_kick_vcpu_timer = NULL;
+    }
+}
+
+
+/***********************************************************/
 void hw_error(const char *fmt, ...)
 {
     va_list ap;
@@ -988,9 +1034,12 @@  static void qemu_wait_io_event_common(CPUState *cpu)
 static void qemu_tcg_wait_io_event(CPUState *cpu)
 {
     while (all_cpu_threads_idle()) {
+        stop_tcg_kick_timer();
         qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
     }
 
+    start_tcg_kick_timer();
+
     while (iothread_requesting_mutex) {
         qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
     }
@@ -1178,6 +1227,15 @@  static void deal_with_unplugged_cpus(void)
     }
 }
 
+/* Single-threaded TCG
+ *
+ * In the single-threaded case each vCPU is simulated in turn. If
+ * there is more than a single vCPU we create a simple timer to kick
+ * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
+ * This is done explicitly rather than relying on side-effects
+ * elsewhere.
+ */
+
 static void *qemu_tcg_cpu_thread_fn(void *arg)
 {
     CPUState *cpu = arg;
@@ -1204,6 +1262,8 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
         }
     }
 
+    start_tcg_kick_timer();
+
     /* process any pending work */
     atomic_mb_set(&exit_request, 1);