@@ -37,7 +37,6 @@ struct VHostUserBlk {
struct vhost_dev dev;
struct vhost_inflight *inflight;
VhostUserState vhost_user;
- struct vhost_virtqueue *vhost_vqs;
VirtQueue **virtqs;
/*
@@ -82,6 +82,10 @@ struct vhost_dev {
MemoryRegionSection *mem_sections;
int n_tmp_sections;
MemoryRegionSection *tmp_sections;
+ /**
+ * @vqs - internal to vhost_dev, allocated based on @nvqs
+ * @nvqs - number of @vqs to allocate.
+ */
struct vhost_virtqueue *vqs;
unsigned int nvqs;
/* the first virtqueue which would be used by this vhost dev */
@@ -156,6 +160,9 @@ struct vhost_net {
* negotiation of backend interface. Configuration of the VirtIO
* itself won't happen until the interface is started.
*
+ * If the initialisation fails it will call vhost_dev_cleanup() to
+ * tear down the interface and free memory.
+ *
* Return: 0 on success, non-zero on error while setting errp.
*/
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
@@ -165,6 +172,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
/**
* vhost_dev_cleanup() - tear down and cleanup vhost interface
* @hdev: the common vhost_dev structure
+ *
+ * This includes freeing internals such as @vqs
*/
void vhost_dev_cleanup(struct vhost_dev *hdev);
@@ -34,7 +34,6 @@ vhost_user_backend_dev_init(VhostUserBackend *b, VirtIODevice *vdev,
b->vdev = vdev;
b->dev.nvqs = nvqs;
- b->dev.vqs = g_new0(struct vhost_virtqueue, nvqs);
ret = vhost_dev_init(&b->dev, &b->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
errp);
@@ -332,7 +332,6 @@ static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
s->dev.num_queues = s->num_queues;
s->dev.nvqs = s->num_queues;
- s->dev.vqs = s->vhost_vqs;
s->dev.vq_index = 0;
s->dev.backend_features = 0;
@@ -480,7 +479,6 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
}
s->inflight = g_new0(struct vhost_inflight, 1);
- s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
retries = REALIZE_CONNECTION_RETRIES;
assert(!*errp);
@@ -504,8 +502,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
return;
virtio_err:
- g_free(s->vhost_vqs);
- s->vhost_vqs = NULL;
+ vhost_dev_cleanup(&s->dev);
g_free(s->inflight);
s->inflight = NULL;
for (i = 0; i < s->num_queues; i++) {
@@ -527,8 +524,6 @@ static void vhost_user_blk_device_unrealize(DeviceState *dev)
NULL, NULL, NULL, false);
vhost_dev_cleanup(&s->dev);
vhost_dev_free_inflight(s->inflight);
- g_free(s->vhost_vqs);
- s->vhost_vqs = NULL;
g_free(s->inflight);
s->inflight = NULL;
@@ -214,8 +214,6 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
}
vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
- vsc->dev.vqs = vqs;
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
@@ -94,7 +94,6 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = NULL;
Error *err = NULL;
int ret;
@@ -116,10 +115,8 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
}
vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
- vqs = vsc->dev.vqs;
ret = vhost_dev_init(&vsc->dev, &s->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
@@ -136,7 +133,6 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
free_vhost:
vhost_user_cleanup(&s->vhost_user);
- g_free(vqs);
free_virtio:
virtio_scsi_common_unrealize(dev);
}
@@ -146,13 +142,11 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
- struct vhost_virtqueue *vqs = vsc->dev.vqs;
/* This will stop the vhost backend. */
vhost_user_scsi_set_status(vdev, 0);
vhost_dev_cleanup(&vsc->dev);
- g_free(vqs);
virtio_scsi_common_unrealize(dev);
vhost_user_cleanup(&s->vhost_user);
@@ -54,7 +54,6 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
VhostVdpaDevice *v = VHOST_VDPA_DEVICE(vdev);
struct vhost_vdpa_iova_range iova_range;
uint16_t max_queue_size;
- struct vhost_virtqueue *vqs;
int i, ret;
if (!v->vhostdev) {
@@ -101,8 +100,6 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
}
v->dev.nvqs = v->num_queues;
- vqs = g_new0(struct vhost_virtqueue, v->dev.nvqs);
- v->dev.vqs = vqs;
v->dev.vq_index = 0;
v->dev.vq_index_end = v->dev.nvqs;
v->dev.backend_features = 0;
@@ -112,7 +109,7 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
if (ret < 0) {
error_setg(errp, "vhost-vdpa-device: get iova range failed: %s",
strerror(-ret));
- goto free_vqs;
+ goto out;
}
v->vdpa.iova_range = iova_range;
@@ -120,7 +117,7 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
if (ret < 0) {
error_setg(errp, "vhost-vdpa-device: vhost initialization failed: %s",
strerror(-ret));
- goto free_vqs;
+ goto out;
}
v->config_size = vhost_vdpa_device_get_u32(v->vhostfd,
@@ -160,8 +157,6 @@ free_config:
g_free(v->config);
vhost_cleanup:
vhost_dev_cleanup(&v->dev);
-free_vqs:
- g_free(vqs);
out:
qemu_close(v->vhostfd);
v->vhostfd = -1;
@@ -288,7 +288,6 @@ static void vub_device_realize(DeviceState *dev, Error **errp)
}
vub->vhost_dev.nvqs = vub->num_vqs;
- vub->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vub->vhost_dev.nvqs);
/* connect to backend */
ret = vhost_dev_init(&vub->vhost_dev, &vub->vhost_user,
@@ -306,12 +305,10 @@ static void vub_device_unrealize(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserBase *vub = VHOST_USER_BASE(dev);
- struct vhost_virtqueue *vhost_vqs = vub->vhost_dev.vqs;
/* This will stop vhost backend if appropriate. */
vub_set_status(vdev, 0);
vhost_dev_cleanup(&vub->vhost_dev);
- g_free(vhost_vqs);
do_vhost_user_cleanup(vdev, vub);
}
@@ -248,7 +248,6 @@ static void vuf_device_realize(DeviceState *dev, Error **errp)
/* 1 high prio queue, plus the number configured */
fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues;
- fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs);
ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
@@ -1392,6 +1392,8 @@ static bool vhost_init_virtqs(struct vhost_dev *hdev, uint32_t busyloop_timeout,
{
int i, r, n_initialized_vqs = 0;
+ hdev->vqs = g_new0(struct vhost_virtqueue, hdev->nvqs);
+
for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
if (r < 0) {
@@ -1530,9 +1532,13 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
trace_vhost_dev_cleanup(hdev);
- for (i = 0; i < hdev->nvqs; ++i) {
- vhost_virtqueue_cleanup(hdev->vqs + i);
+ if (hdev->vqs) {
+ for (i = 0; i < hdev->nvqs; ++i) {
+ vhost_virtqueue_cleanup(hdev->vqs + i);
+ }
+ g_free(hdev->vqs);
}
+
if (hdev->mem) {
/* those are only safe after successful init */
memory_listener_unregister(&hdev->memory_listener);
All the allocations are a function the number of vqs we are allocating so let the vhost code deal with it directly. This allows to eliminate some complexity of the clean-up code (because vhost_dev_init cleanups after itself if it fails). We can also places where we store copies of @vqs in child objects. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- include/hw/virtio/vhost-user-blk.h | 1 - include/hw/virtio/vhost.h | 9 +++++++++ backends/vhost-user.c | 1 - hw/block/vhost-user-blk.c | 7 +------ hw/scsi/vhost-scsi.c | 2 -- hw/scsi/vhost-user-scsi.c | 6 ------ hw/virtio/vdpa-dev.c | 9 ++------- hw/virtio/vhost-user-device.c | 3 --- hw/virtio/vhost-user-fs.c | 1 - hw/virtio/vhost.c | 10 ++++++++-- 10 files changed, 20 insertions(+), 29 deletions(-)