@@ -19,7 +19,7 @@ QEMUVFIOState *qemu_vfio_open_pci(const char *device, size_t *min_page_size,
Error **errp);
void qemu_vfio_close(QEMUVFIOState *s);
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova_list);
+ bool temporary, uint64_t *iova_list, Error **errp);
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
@@ -167,9 +167,9 @@ static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
return;
}
memset(q->queue, 0, bytes);
- r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
+ r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
if (r) {
- error_setg(errp, "Cannot map queue");
+ error_prepend(errp, "Cannot map queue: ");
}
}
@@ -223,7 +223,7 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages,
s->page_size * NVME_NUM_REQS,
- false, &prp_list_iova);
+ false, &prp_list_iova, errp);
if (r) {
goto fail;
}
@@ -514,9 +514,9 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
}
- r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova);
+ r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova, errp);
if (r) {
- error_setg(errp, "Cannot map buffer for DMA");
+ error_prepend(errp, "Cannot map buffer for DMA: ");
goto out;
}
@@ -990,7 +990,7 @@ try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
qiov->iov[i].iov_len,
- true, &iova);
+ true, &iova, NULL);
if (r == -ENOMEM && retry) {
retry = false;
trace_nvme_dma_flush_queue_wait(s);
@@ -1437,7 +1437,7 @@ static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
int ret;
BDRVNVMeState *s = bs->opaque;
- ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
+ ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, NULL);
if (ret) {
/* FIXME: we may run out of IOVA addresses after repeated
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
@@ -486,7 +486,7 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
{
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
trace_qemu_vfio_ram_block_added(s, host, size);
- qemu_vfio_dma_map(s, host, size, false, NULL);
+ qemu_vfio_dma_map(s, host, size, false, NULL, NULL);
}
static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
@@ -501,6 +501,7 @@ static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
{
+ Error *local_err = NULL;
void *host_addr = qemu_ram_get_host_addr(rb);
ram_addr_t length = qemu_ram_get_used_length(rb);
int ret;
@@ -509,10 +510,11 @@ static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
if (!host_addr) {
return 0;
}
- ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);
+ ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL, &local_err);
if (ret) {
- fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",
- host_addr, (uint64_t)length);
+ error_reportf_err(local_err,
+ "qemu_vfio_init_ramblock: failed %p %" PRId64 ":",
+ host_addr, (uint64_t)length);
}
return 0;
}
@@ -754,7 +756,7 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
* mapping status within this area is not allowed).
*/
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova)
+ bool temporary, uint64_t *iova, Error **errp)
{
int ret = 0;
int index;