diff mbox series

[02/19] cpu: convert queued work to a QSIMPLEQ

Message ID 20200522160755.886-3-robert.foley@linaro.org
State Superseded
Headers show
Series Add Thread Sanitizer support to QEMU | expand

Commit Message

Robert Foley May 22, 2020, 4:07 p.m. UTC
From: "Emilio G. Cota" <cota@braap.org>


Instead of open-coding it.

While at it, make sure that all accesses to the list are
performed while holding the list's lock.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

Signed-off-by: Emilio G. Cota <cota@braap.org>

Signed-off-by: Robert Foley <robert.foley@linaro.org>

---
 cpus-common.c         | 25 ++++++++-----------------
 cpus.c                | 14 ++++++++++++--
 hw/core/cpu.c         |  1 +
 include/hw/core/cpu.h |  6 +++---
 4 files changed, 24 insertions(+), 22 deletions(-)

-- 
2.17.1

Comments

Philippe Mathieu-Daudé May 24, 2020, 10:20 a.m. UTC | #1
On 5/22/20 6:07 PM, Robert Foley wrote:
> From: "Emilio G. Cota" <cota@braap.org>

> 

> Instead of open-coding it.


Please use a full sentence (repeating the patch subject):

"Convert queued work to a QSIMPLEQ instead of open-coding it."

(Not all email clients display the subject preceding the content).

Otherwise:
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>


> 

> While at it, make sure that all accesses to the list are

> performed while holding the list's lock.

> 

> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> Signed-off-by: Emilio G. Cota <cota@braap.org>

> Signed-off-by: Robert Foley <robert.foley@linaro.org>

> ---

>  cpus-common.c         | 25 ++++++++-----------------

>  cpus.c                | 14 ++++++++++++--

>  hw/core/cpu.c         |  1 +

>  include/hw/core/cpu.h |  6 +++---

>  4 files changed, 24 insertions(+), 22 deletions(-)

> 

> diff --git a/cpus-common.c b/cpus-common.c

> index 55d5df8923..210fc7fc39 100644

> --- a/cpus-common.c

> +++ b/cpus-common.c

> @@ -97,7 +97,7 @@ void cpu_list_remove(CPUState *cpu)

>  }

>  

>  struct qemu_work_item {

> -    struct qemu_work_item *next;

> +    QSIMPLEQ_ENTRY(qemu_work_item) node;

>      run_on_cpu_func func;

>      run_on_cpu_data data;

>      bool free, exclusive, done;

> @@ -106,13 +106,7 @@ struct qemu_work_item {

>  static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)

>  {

>      qemu_mutex_lock(&cpu->work_mutex);

> -    if (cpu->queued_work_first == NULL) {

> -        cpu->queued_work_first = wi;

> -    } else {

> -        cpu->queued_work_last->next = wi;

> -    }

> -    cpu->queued_work_last = wi;

> -    wi->next = NULL;

> +    QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);

>      wi->done = false;

>      qemu_mutex_unlock(&cpu->work_mutex);

>  

> @@ -306,17 +300,14 @@ void process_queued_cpu_work(CPUState *cpu)

>  {

>      struct qemu_work_item *wi;

>  

> -    if (cpu->queued_work_first == NULL) {

> +    qemu_mutex_lock(&cpu->work_mutex);

> +    if (QSIMPLEQ_EMPTY(&cpu->work_list)) {

> +        qemu_mutex_unlock(&cpu->work_mutex);

>          return;

>      }

> -

> -    qemu_mutex_lock(&cpu->work_mutex);

> -    while (cpu->queued_work_first != NULL) {

> -        wi = cpu->queued_work_first;

> -        cpu->queued_work_first = wi->next;

> -        if (!cpu->queued_work_first) {

> -            cpu->queued_work_last = NULL;

> -        }

> +    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {

> +        wi = QSIMPLEQ_FIRST(&cpu->work_list);

> +        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);

>          qemu_mutex_unlock(&cpu->work_mutex);

>          if (wi->exclusive) {

>              /* Running work items outside the BQL avoids the following deadlock:

> diff --git a/cpus.c b/cpus.c

> index 5670c96bcf..af44027549 100644

> --- a/cpus.c

> +++ b/cpus.c

> @@ -97,9 +97,19 @@ bool cpu_is_stopped(CPUState *cpu)

>      return cpu->stopped || !runstate_is_running();

>  }

>  

> +static inline bool cpu_work_list_empty(CPUState *cpu)

> +{

> +    bool ret;

> +

> +    qemu_mutex_lock(&cpu->work_mutex);

> +    ret = QSIMPLEQ_EMPTY(&cpu->work_list);

> +    qemu_mutex_unlock(&cpu->work_mutex);

> +    return ret;

> +}

> +

>  static bool cpu_thread_is_idle(CPUState *cpu)

>  {

> -    if (cpu->stop || cpu->queued_work_first) {

> +    if (cpu->stop || !cpu_work_list_empty(cpu)) {

>          return false;

>      }

>      if (cpu_is_stopped(cpu)) {

> @@ -1498,7 +1508,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)

>              cpu = first_cpu;

>          }

>  

> -        while (cpu && !cpu->queued_work_first && !cpu->exit_request) {

> +        while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {

>  

>              atomic_mb_set(&tcg_current_rr_cpu, cpu);

>              current_cpu = cpu;

> diff --git a/hw/core/cpu.c b/hw/core/cpu.c

> index 5284d384fb..77703d62b7 100644

> --- a/hw/core/cpu.c

> +++ b/hw/core/cpu.c

> @@ -368,6 +368,7 @@ static void cpu_common_initfn(Object *obj)

>      cpu->nr_threads = 1;

>  

>      qemu_mutex_init(&cpu->work_mutex);

> +    QSIMPLEQ_INIT(&cpu->work_list);

>      QTAILQ_INIT(&cpu->breakpoints);

>      QTAILQ_INIT(&cpu->watchpoints);

>  

> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h

> index 07f7698155..d78ff1d165 100644

> --- a/include/hw/core/cpu.h

> +++ b/include/hw/core/cpu.h

> @@ -331,8 +331,8 @@ struct qemu_work_item;

>   * @opaque: User data.

>   * @mem_io_pc: Host Program Counter at which the memory was accessed.

>   * @kvm_fd: vCPU file descriptor for KVM.

> - * @work_mutex: Lock to prevent multiple access to queued_work_*.

> - * @queued_work_first: First asynchronous work pending.

> + * @work_mutex: Lock to prevent multiple access to @work_list.

> + * @work_list: List of pending asynchronous work.

>   * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes

>   *                        to @trace_dstate).

>   * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).

> @@ -376,7 +376,7 @@ struct CPUState {

>      sigjmp_buf jmp_env;

>  

>      QemuMutex work_mutex;

> -    struct qemu_work_item *queued_work_first, *queued_work_last;

> +    QSIMPLEQ_HEAD(, qemu_work_item) work_list;

>  

>      CPUAddressSpace *cpu_ases;

>      int num_ases;

>
Robert Foley May 26, 2020, 3:01 p.m. UTC | #2
On Sun, 24 May 2020 at 06:21, Philippe Mathieu-Daudé <philmd@redhat.com> wrote:
>

> On 5/22/20 6:07 PM, Robert Foley wrote:

> > From: "Emilio G. Cota" <cota@braap.org>

> >

> > Instead of open-coding it.

>

> Please use a full sentence (repeating the patch subject):

>

> "Convert queued work to a QSIMPLEQ instead of open-coding it."

>

> (Not all email clients display the subject preceding the content).


OK, I will update this commit message.

Thanks & Regards,
-Rob
>

> Otherwise:

> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>

>

> >

> > While at it, make sure that all accesses to the list are

> > performed while holding the list's lock.

> >

> > Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

> > Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> > Signed-off-by: Emilio G. Cota <cota@braap.org>

> > Signed-off-by: Robert Foley <robert.foley@linaro.org>

> > ---

> >  cpus-common.c         | 25 ++++++++-----------------

> >  cpus.c                | 14 ++++++++++++--

> >  hw/core/cpu.c         |  1 +

> >  include/hw/core/cpu.h |  6 +++---

> >  4 files changed, 24 insertions(+), 22 deletions(-)

> >

> > diff --git a/cpus-common.c b/cpus-common.c

> > index 55d5df8923..210fc7fc39 100644

> > --- a/cpus-common.c

> > +++ b/cpus-common.c

> > @@ -97,7 +97,7 @@ void cpu_list_remove(CPUState *cpu)

> >  }

> >

> >  struct qemu_work_item {

> > -    struct qemu_work_item *next;

> > +    QSIMPLEQ_ENTRY(qemu_work_item) node;

> >      run_on_cpu_func func;

> >      run_on_cpu_data data;

> >      bool free, exclusive, done;

> > @@ -106,13 +106,7 @@ struct qemu_work_item {

> >  static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)

> >  {

> >      qemu_mutex_lock(&cpu->work_mutex);

> > -    if (cpu->queued_work_first == NULL) {

> > -        cpu->queued_work_first = wi;

> > -    } else {

> > -        cpu->queued_work_last->next = wi;

> > -    }

> > -    cpu->queued_work_last = wi;

> > -    wi->next = NULL;

> > +    QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);

> >      wi->done = false;

> >      qemu_mutex_unlock(&cpu->work_mutex);

> >

> > @@ -306,17 +300,14 @@ void process_queued_cpu_work(CPUState *cpu)

> >  {

> >      struct qemu_work_item *wi;

> >

> > -    if (cpu->queued_work_first == NULL) {

> > +    qemu_mutex_lock(&cpu->work_mutex);

> > +    if (QSIMPLEQ_EMPTY(&cpu->work_list)) {

> > +        qemu_mutex_unlock(&cpu->work_mutex);

> >          return;

> >      }

> > -

> > -    qemu_mutex_lock(&cpu->work_mutex);

> > -    while (cpu->queued_work_first != NULL) {

> > -        wi = cpu->queued_work_first;

> > -        cpu->queued_work_first = wi->next;

> > -        if (!cpu->queued_work_first) {

> > -            cpu->queued_work_last = NULL;

> > -        }

> > +    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {

> > +        wi = QSIMPLEQ_FIRST(&cpu->work_list);

> > +        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);

> >          qemu_mutex_unlock(&cpu->work_mutex);

> >          if (wi->exclusive) {

> >              /* Running work items outside the BQL avoids the following deadlock:

> > diff --git a/cpus.c b/cpus.c

> > index 5670c96bcf..af44027549 100644

> > --- a/cpus.c

> > +++ b/cpus.c

> > @@ -97,9 +97,19 @@ bool cpu_is_stopped(CPUState *cpu)

> >      return cpu->stopped || !runstate_is_running();

> >  }

> >

> > +static inline bool cpu_work_list_empty(CPUState *cpu)

> > +{

> > +    bool ret;

> > +

> > +    qemu_mutex_lock(&cpu->work_mutex);

> > +    ret = QSIMPLEQ_EMPTY(&cpu->work_list);

> > +    qemu_mutex_unlock(&cpu->work_mutex);

> > +    return ret;

> > +}

> > +

> >  static bool cpu_thread_is_idle(CPUState *cpu)

> >  {

> > -    if (cpu->stop || cpu->queued_work_first) {

> > +    if (cpu->stop || !cpu_work_list_empty(cpu)) {

> >          return false;

> >      }

> >      if (cpu_is_stopped(cpu)) {

> > @@ -1498,7 +1508,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)

> >              cpu = first_cpu;

> >          }

> >

> > -        while (cpu && !cpu->queued_work_first && !cpu->exit_request) {

> > +        while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {

> >

> >              atomic_mb_set(&tcg_current_rr_cpu, cpu);

> >              current_cpu = cpu;

> > diff --git a/hw/core/cpu.c b/hw/core/cpu.c

> > index 5284d384fb..77703d62b7 100644

> > --- a/hw/core/cpu.c

> > +++ b/hw/core/cpu.c

> > @@ -368,6 +368,7 @@ static void cpu_common_initfn(Object *obj)

> >      cpu->nr_threads = 1;

> >

> >      qemu_mutex_init(&cpu->work_mutex);

> > +    QSIMPLEQ_INIT(&cpu->work_list);

> >      QTAILQ_INIT(&cpu->breakpoints);

> >      QTAILQ_INIT(&cpu->watchpoints);

> >

> > diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h

> > index 07f7698155..d78ff1d165 100644

> > --- a/include/hw/core/cpu.h

> > +++ b/include/hw/core/cpu.h

> > @@ -331,8 +331,8 @@ struct qemu_work_item;

> >   * @opaque: User data.

> >   * @mem_io_pc: Host Program Counter at which the memory was accessed.

> >   * @kvm_fd: vCPU file descriptor for KVM.

> > - * @work_mutex: Lock to prevent multiple access to queued_work_*.

> > - * @queued_work_first: First asynchronous work pending.

> > + * @work_mutex: Lock to prevent multiple access to @work_list.

> > + * @work_list: List of pending asynchronous work.

> >   * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes

> >   *                        to @trace_dstate).

> >   * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).

> > @@ -376,7 +376,7 @@ struct CPUState {

> >      sigjmp_buf jmp_env;

> >

> >      QemuMutex work_mutex;

> > -    struct qemu_work_item *queued_work_first, *queued_work_last;

> > +    QSIMPLEQ_HEAD(, qemu_work_item) work_list;

> >

> >      CPUAddressSpace *cpu_ases;

> >      int num_ases;

> >

>
diff mbox series

Patch

diff --git a/cpus-common.c b/cpus-common.c
index 55d5df8923..210fc7fc39 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -97,7 +97,7 @@  void cpu_list_remove(CPUState *cpu)
 }
 
 struct qemu_work_item {
-    struct qemu_work_item *next;
+    QSIMPLEQ_ENTRY(qemu_work_item) node;
     run_on_cpu_func func;
     run_on_cpu_data data;
     bool free, exclusive, done;
@@ -106,13 +106,7 @@  struct qemu_work_item {
 static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
 {
     qemu_mutex_lock(&cpu->work_mutex);
-    if (cpu->queued_work_first == NULL) {
-        cpu->queued_work_first = wi;
-    } else {
-        cpu->queued_work_last->next = wi;
-    }
-    cpu->queued_work_last = wi;
-    wi->next = NULL;
+    QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
     wi->done = false;
     qemu_mutex_unlock(&cpu->work_mutex);
 
@@ -306,17 +300,14 @@  void process_queued_cpu_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
 
-    if (cpu->queued_work_first == NULL) {
+    qemu_mutex_lock(&cpu->work_mutex);
+    if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
+        qemu_mutex_unlock(&cpu->work_mutex);
         return;
     }
-
-    qemu_mutex_lock(&cpu->work_mutex);
-    while (cpu->queued_work_first != NULL) {
-        wi = cpu->queued_work_first;
-        cpu->queued_work_first = wi->next;
-        if (!cpu->queued_work_first) {
-            cpu->queued_work_last = NULL;
-        }
+    while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
+        wi = QSIMPLEQ_FIRST(&cpu->work_list);
+        QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
         qemu_mutex_unlock(&cpu->work_mutex);
         if (wi->exclusive) {
             /* Running work items outside the BQL avoids the following deadlock:
diff --git a/cpus.c b/cpus.c
index 5670c96bcf..af44027549 100644
--- a/cpus.c
+++ b/cpus.c
@@ -97,9 +97,19 @@  bool cpu_is_stopped(CPUState *cpu)
     return cpu->stopped || !runstate_is_running();
 }
 
+static inline bool cpu_work_list_empty(CPUState *cpu)
+{
+    bool ret;
+
+    qemu_mutex_lock(&cpu->work_mutex);
+    ret = QSIMPLEQ_EMPTY(&cpu->work_list);
+    qemu_mutex_unlock(&cpu->work_mutex);
+    return ret;
+}
+
 static bool cpu_thread_is_idle(CPUState *cpu)
 {
-    if (cpu->stop || cpu->queued_work_first) {
+    if (cpu->stop || !cpu_work_list_empty(cpu)) {
         return false;
     }
     if (cpu_is_stopped(cpu)) {
@@ -1498,7 +1508,7 @@  static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
             cpu = first_cpu;
         }
 
-        while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
+        while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
 
             atomic_mb_set(&tcg_current_rr_cpu, cpu);
             current_cpu = cpu;
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
index 5284d384fb..77703d62b7 100644
--- a/hw/core/cpu.c
+++ b/hw/core/cpu.c
@@ -368,6 +368,7 @@  static void cpu_common_initfn(Object *obj)
     cpu->nr_threads = 1;
 
     qemu_mutex_init(&cpu->work_mutex);
+    QSIMPLEQ_INIT(&cpu->work_list);
     QTAILQ_INIT(&cpu->breakpoints);
     QTAILQ_INIT(&cpu->watchpoints);
 
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 07f7698155..d78ff1d165 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -331,8 +331,8 @@  struct qemu_work_item;
  * @opaque: User data.
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
- * @queued_work_first: First asynchronous work pending.
+ * @work_mutex: Lock to prevent multiple access to @work_list.
+ * @work_list: List of pending asynchronous work.
  * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
  *                        to @trace_dstate).
  * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
@@ -376,7 +376,7 @@  struct CPUState {
     sigjmp_buf jmp_env;
 
     QemuMutex work_mutex;
-    struct qemu_work_item *queued_work_first, *queued_work_last;
+    QSIMPLEQ_HEAD(, qemu_work_item) work_list;
 
     CPUAddressSpace *cpu_ases;
     int num_ases;