@@ -388,6 +388,17 @@ static void ublk_queue_deinit(struct ublk_queue *q)
int i;
int nr_ios = q->q_depth;
+ if (q->io_cmd_buf)
+ munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
+
+ for (i = 0; i < nr_ios; i++)
+ free(q->ios[i].buf_addr);
+}
+
+static void ublk_thread_deinit(struct ublk_queue *q)
+{
+ q->tid = 0;
+
io_uring_unregister_buffers(&q->ring);
io_uring_unregister_ring_fd(&q->ring);
@@ -397,28 +408,20 @@ static void ublk_queue_deinit(struct ublk_queue *q)
close(q->ring.ring_fd);
q->ring.ring_fd = -1;
}
-
- if (q->io_cmd_buf)
- munmap(q->io_cmd_buf, ublk_queue_cmd_buf_sz(q));
-
- for (i = 0; i < nr_ios; i++)
- free(q->ios[i].buf_addr);
}
static int ublk_queue_init(struct ublk_queue *q)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
- int i, ret = -1;
+ int i;
int cmd_buf_size, io_buf_size;
unsigned long off;
- int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
q->tgt_ops = dev->tgt.ops;
q->state = 0;
q->q_depth = depth;
q->cmd_inflight = 0;
- q->tid = gettid();
if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY) {
q->state |= UBLKSRV_NO_BUF;
@@ -452,6 +455,22 @@ static int ublk_queue_init(struct ublk_queue *q)
}
}
+ return 0;
+ fail:
+ ublk_queue_deinit(q);
+ ublk_err("ublk dev %d queue %d failed\n",
+ dev->dev_info.dev_id, q->q_id);
+ return -ENOMEM;
+}
+
+static int ublk_thread_init(struct ublk_queue *q)
+{
+ struct ublk_dev *dev = q->dev;
+ int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
+ int ret;
+
+ q->tid = gettid();
+
ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth,
IORING_SETUP_COOP_TASKRUN |
IORING_SETUP_SINGLE_ISSUER |
@@ -481,9 +500,9 @@ static int ublk_queue_init(struct ublk_queue *q)
}
return 0;
- fail:
- ublk_queue_deinit(q);
- ublk_err("ublk dev %d queue %d failed\n",
+fail:
+ ublk_thread_deinit(q);
+ ublk_err("ublk dev %d queue %d thread init failed\n",
dev->dev_info.dev_id, q->q_id);
return -ENOMEM;
}
@@ -740,9 +759,9 @@ static void *ublk_io_handler_fn(void *data)
int dev_id = q->dev->dev_info.dev_id;
int ret;
- ret = ublk_queue_init(q);
+ ret = ublk_thread_init(q);
if (ret) {
- ublk_err("ublk dev %d queue %d init queue failed\n",
+ ublk_err("ublk dev %d queue %d thread init failed\n",
dev_id, q->q_id);
return NULL;
}
@@ -761,7 +780,7 @@ static void *ublk_io_handler_fn(void *data)
} while (1);
ublk_dbg(UBLK_DBG_QUEUE, "ublk dev %d queue %d exited\n", dev_id, q->q_id);
- ublk_queue_deinit(q);
+ ublk_thread_deinit(q);
return NULL;
}
@@ -830,6 +849,13 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
dev->q[i].dev = dev;
dev->q[i].q_id = i;
+ ret = ublk_queue_init(&dev->q[i]);
+ if (ret) {
+ ublk_err("ublk dev %d queue %d init queue failed\n",
+ dinfo->dev_id, i);
+ goto fail;
+ }
+
qinfo[i].q = &dev->q[i];
qinfo[i].queue_sem = &queue_sem;
qinfo[i].affinity = &affinity_buf[i];
@@ -865,6 +891,8 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
for (i = 0; i < dinfo->nr_hw_queues; i++)
pthread_join(dev->q[i].thread, &thread_ret);
fail:
+ for (i = 0; i < dinfo->nr_hw_queues; i++)
+ ublk_queue_deinit(&dev->q[i]);
ublk_dev_unprep(dev);
ublk_dbg(UBLK_DBG_DEV, "%s exit\n", __func__);
Currently, each ublk server I/O handler thread initializes its own queue. However, as we move towards decoupled ublk_queues and ublk server threads, this model does not make sense anymore, as there will no longer be a concept of a thread having "its own" queue. So lift queue initialization out of the per-thread ublk_io_handler_fn and into a loop in ublk_start_daemon (which runs once for each device). There is a part of ublk_queue_init (ring initialization) which does actually need to happen on the thread that will use the ring; that is separated into a separate ublk_thread_init which is still called by each I/O handler thread. Signed-off-by: Uday Shankar <ushankar@purestorage.com> --- tools/testing/selftests/ublk/kublk.c | 58 ++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 15 deletions(-)