diff mbox series

[v2,11/19] util/vfio-helpers: Let qemu_vfio_dma_map() propagate Error

Message ID 20201026105504.4023620-12-philmd@redhat.com
State New
Headers show
Series util/vfio-helpers: Allow using multiple MSIX IRQs | expand

Commit Message

Philippe Mathieu-Daudé Oct. 26, 2020, 10:54 a.m. UTC
Currently qemu_vfio_dma_map() displays errors on stderr.
When using management interface, this information is simply
lost. Pass qemu_vfio_dma_map() an Error* argument so it can
propagate the error to callers.

Reviewed-by: Fam Zheng <fam@euphon.net>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 include/qemu/vfio-helpers.h |  2 +-
 block/nvme.c                | 14 +++++++-------
 util/vfio-helpers.c         | 12 +++++++-----
 3 files changed, 15 insertions(+), 13 deletions(-)

Comments

Eric Auger Oct. 26, 2020, 7:58 p.m. UTC | #1
Hi Philippe,

On 10/26/20 11:54 AM, Philippe Mathieu-Daudé wrote:
> Currently qemu_vfio_dma_map() displays errors on stderr.

> When using management interface, this information is simply

> lost. Pass qemu_vfio_dma_map() an Error* argument so it can

Error** or simply error handle
> propagate the error to callers.

> 

> Reviewed-by: Fam Zheng <fam@euphon.net>

> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>

> ---

>  include/qemu/vfio-helpers.h |  2 +-

>  block/nvme.c                | 14 +++++++-------

>  util/vfio-helpers.c         | 12 +++++++-----

>  3 files changed, 15 insertions(+), 13 deletions(-)

> 

> diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h

> index 4491c8e1a6e..bde9495b254 100644

> --- a/include/qemu/vfio-helpers.h

> +++ b/include/qemu/vfio-helpers.h

> @@ -18,7 +18,7 @@ typedef struct QEMUVFIOState QEMUVFIOState;

>  QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);

>  void qemu_vfio_close(QEMUVFIOState *s);

>  int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,

> -                      bool temporary, uint64_t *iova_list);

> +                      bool temporary, uint64_t *iova_list, Error **errp);

>  int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);

>  void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);

>  void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,

> diff --git a/block/nvme.c b/block/nvme.c

> index 3b6d3972ec2..6f1ebdf031f 100644

> --- a/block/nvme.c

> +++ b/block/nvme.c

> @@ -167,9 +167,9 @@ static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,

>          return;

>      }

>      memset(q->queue, 0, bytes);

> -    r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);

> +    r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);

>      if (r) {

> -        error_setg(errp, "Cannot map queue");

> +        error_prepend(errp, "Cannot map queue: ");

>      }

>  }

>  

> @@ -223,7 +223,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,

>      q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);

>      r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,

>                            s->page_size * NVME_NUM_REQS,

> -                          false, &prp_list_iova);

> +                          false, &prp_list_iova, errp);

>      if (r) {

you may add an associated error_prepend(errp, "") here too to be consistent.
>          goto fail;

>      }

> @@ -514,9 +514,9 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)

>          error_setg(errp, "Cannot allocate buffer for identify response");

>          goto out;

>      }

> -    r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);

> +    r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova, errp);

>      if (r) {

> -        error_setg(errp, "Cannot map buffer for DMA");

> +        error_prepend(errp, "Cannot map buffer for DMA: ");

>          goto out;

>      }

>  

> @@ -1003,7 +1003,7 @@ try_map:

>          r = qemu_vfio_dma_map(s->vfio,

>                                qiov->iov[i].iov_base,

>                                qiov->iov[i].iov_len,

> -                              true, &iova);

> +                              true, &iova, NULL);

>          if (r == -ENOMEM && retry) {

>              retry = false;

>              trace_nvme_dma_flush_queue_wait(s);

> @@ -1450,7 +1450,7 @@ static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)

>      int ret;

>      BDRVNVMeState *s = bs->opaque;

>  

> -    ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);

> +    ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, NULL);

>      if (ret) {

>          /* FIXME: we may run out of IOVA addresses after repeated

>           * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap

> diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c

> index 73f7bfa7540..c03fe0b7156 100644

> --- a/util/vfio-helpers.c

> +++ b/util/vfio-helpers.c

> @@ -462,7 +462,7 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,

>  {

>      QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);

>      trace_qemu_vfio_ram_block_added(s, host, size);

> -    qemu_vfio_dma_map(s, host, size, false, NULL);

> +    qemu_vfio_dma_map(s, host, size, false, NULL, NULL);

>  }

>  

>  static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,

> @@ -477,6 +477,7 @@ static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,

>  

>  static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)

>  {

> +    Error *local_err = NULL;

>      void *host_addr = qemu_ram_get_host_addr(rb);

>      ram_addr_t length = qemu_ram_get_used_length(rb);

>      int ret;

> @@ -485,10 +486,11 @@ static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)

>      if (!host_addr) {

>          return 0;

>      }

> -    ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);

> +    ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL, &local_err);

>      if (ret) {

> -        fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",

> -                host_addr, (uint64_t)length);

> +        error_reportf_err(local_err,

> +                          "qemu_vfio_init_ramblock: failed %p %" PRId64 ":",

> +                          host_addr, (uint64_t)length);

>      }

>      return 0;

>  }

> @@ -724,7 +726,7 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)

>   * mapping status within this area is not allowed).

>   */

>  int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,

> -                      bool temporary, uint64_t *iova)

> +                      bool temporary, uint64_t *iova, Error **errp)

>  {

where do you set errp. You just add prepend messages above. If I am not
wrong errp must be non NULL to call error_prepend()

Thanks

Eric
>      int ret = 0;

>      int index;

>
diff mbox series

Patch

diff --git a/include/qemu/vfio-helpers.h b/include/qemu/vfio-helpers.h
index 4491c8e1a6e..bde9495b254 100644
--- a/include/qemu/vfio-helpers.h
+++ b/include/qemu/vfio-helpers.h
@@ -18,7 +18,7 @@  typedef struct QEMUVFIOState QEMUVFIOState;
 QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
 void qemu_vfio_close(QEMUVFIOState *s);
 int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
-                      bool temporary, uint64_t *iova_list);
+                      bool temporary, uint64_t *iova_list, Error **errp);
 int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
 void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
 void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
diff --git a/block/nvme.c b/block/nvme.c
index 3b6d3972ec2..6f1ebdf031f 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -167,9 +167,9 @@  static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
         return;
     }
     memset(q->queue, 0, bytes);
-    r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
+    r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
     if (r) {
-        error_setg(errp, "Cannot map queue");
+        error_prepend(errp, "Cannot map queue: ");
     }
 }
 
@@ -223,7 +223,7 @@  static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
     q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
     r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
                           s->page_size * NVME_NUM_REQS,
-                          false, &prp_list_iova);
+                          false, &prp_list_iova, errp);
     if (r) {
         goto fail;
     }
@@ -514,9 +514,9 @@  static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
         error_setg(errp, "Cannot allocate buffer for identify response");
         goto out;
     }
-    r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);
+    r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova, errp);
     if (r) {
-        error_setg(errp, "Cannot map buffer for DMA");
+        error_prepend(errp, "Cannot map buffer for DMA: ");
         goto out;
     }
 
@@ -1003,7 +1003,7 @@  try_map:
         r = qemu_vfio_dma_map(s->vfio,
                               qiov->iov[i].iov_base,
                               qiov->iov[i].iov_len,
-                              true, &iova);
+                              true, &iova, NULL);
         if (r == -ENOMEM && retry) {
             retry = false;
             trace_nvme_dma_flush_queue_wait(s);
@@ -1450,7 +1450,7 @@  static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
     int ret;
     BDRVNVMeState *s = bs->opaque;
 
-    ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
+    ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, NULL);
     if (ret) {
         /* FIXME: we may run out of IOVA addresses after repeated
          * bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c
index 73f7bfa7540..c03fe0b7156 100644
--- a/util/vfio-helpers.c
+++ b/util/vfio-helpers.c
@@ -462,7 +462,7 @@  static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
 {
     QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
     trace_qemu_vfio_ram_block_added(s, host, size);
-    qemu_vfio_dma_map(s, host, size, false, NULL);
+    qemu_vfio_dma_map(s, host, size, false, NULL, NULL);
 }
 
 static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
@@ -477,6 +477,7 @@  static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
 
 static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
 {
+    Error *local_err = NULL;
     void *host_addr = qemu_ram_get_host_addr(rb);
     ram_addr_t length = qemu_ram_get_used_length(rb);
     int ret;
@@ -485,10 +486,11 @@  static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
     if (!host_addr) {
         return 0;
     }
-    ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);
+    ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL, &local_err);
     if (ret) {
-        fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",
-                host_addr, (uint64_t)length);
+        error_reportf_err(local_err,
+                          "qemu_vfio_init_ramblock: failed %p %" PRId64 ":",
+                          host_addr, (uint64_t)length);
     }
     return 0;
 }
@@ -724,7 +726,7 @@  qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
  * mapping status within this area is not allowed).
  */
 int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
-                      bool temporary, uint64_t *iova)
+                      bool temporary, uint64_t *iova, Error **errp)
 {
     int ret = 0;
     int index;