diff mbox series

[RFC,v5,08/27] nvme-tcp-offload: Add Timeout and ASYNC Support

Message ID 20210519111340.20613-9-smalin@marvell.com
State New
Headers show
Series NVMeTCP Offload ULP and QEDN Device Driver | expand

Commit Message

Shai Malin May 19, 2021, 11:13 a.m. UTC
In this patch, we present the nvme-tcp-offload timeout support
nvme_tcp_ofld_timeout() and ASYNC support
nvme_tcp_ofld_submit_async_event().

Acked-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/host/tcp-offload.c | 85 ++++++++++++++++++++++++++++++++-
 drivers/nvme/host/tcp-offload.h |  2 +
 2 files changed, 86 insertions(+), 1 deletion(-)

Comments

Himanshu Madhani May 21, 2021, 6:36 p.m. UTC | #1
On 5/19/21 6:13 AM, Shai Malin wrote:
> In this patch, we present the nvme-tcp-offload timeout support

> nvme_tcp_ofld_timeout() and ASYNC support

> nvme_tcp_ofld_submit_async_event().

> 

> Acked-by: Igor Russkikh <irusskikh@marvell.com>

> Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>

> Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>

> Signed-off-by: Michal Kalderon <mkalderon@marvell.com>

> Signed-off-by: Ariel Elior <aelior@marvell.com>

> Signed-off-by: Shai Malin <smalin@marvell.com>

> Reviewed-by: Hannes Reinecke <hare@suse.de>

> ---

>   drivers/nvme/host/tcp-offload.c | 85 ++++++++++++++++++++++++++++++++-

>   drivers/nvme/host/tcp-offload.h |  2 +

>   2 files changed, 86 insertions(+), 1 deletion(-)

> 

> diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c

> index 276b8475ac85..01b4c43cdaa5 100644

> --- a/drivers/nvme/host/tcp-offload.c

> +++ b/drivers/nvme/host/tcp-offload.c

> @@ -133,6 +133,26 @@ void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,

>   		nvme_complete_rq(rq);

>   }

>   

> +/**

> + * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback

> + * function for async request. Pointed to by nvme_tcp_ofld_req->done.

> + * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ.

> + * @req:	NVMeTCP offload request to complete.

> + * @result:     The nvme_result.

> + * @status:     The completion status.

> + *

> + * API function that allows the vendor specific offload driver to report request

> + * completions to the common offload layer.

> + */

> +void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req,

> +				  union nvme_result *result, __le16 status)

> +{

> +	struct nvme_tcp_ofld_queue *queue = req->queue;

> +	struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;

> +

> +	nvme_complete_async_event(&ctrl->nctrl, status, result);

> +}

> +

>   struct nvme_tcp_ofld_dev *

>   nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl)

>   {

> @@ -733,7 +753,23 @@ void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len)

>   

>   static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg)

>   {

> -	/* Placeholder - submit_async_event */

> +	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg);

> +	struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0];

> +	struct nvme_tcp_ofld_dev *dev = queue->dev;

> +	struct nvme_tcp_ofld_ops *ops = dev->ops;

> +

> +	ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event;

> +	ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;

> +	ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF;

> +

> +	nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd);

> +

> +	ctrl->async_req.async = true;

> +	ctrl->async_req.queue = queue;

> +	ctrl->async_req.last = true;

> +	ctrl->async_req.done = nvme_tcp_ofld_async_req_done;

> +

> +	ops->send_req(&ctrl->async_req);

>   }

>   

>   static void

> @@ -1039,6 +1075,51 @@ static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)

>   	return ops->poll_queue(queue);

>   }

>   

> +static void nvme_tcp_ofld_complete_timed_out(struct request *rq)

> +{

> +	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);

> +	struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl;

> +

> +	nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue));

> +	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {

> +		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;

> +		blk_mq_complete_request(rq);

> +	}

> +}

> +

> +static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved)

> +{

> +	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);

> +	struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;

> +

> +	dev_warn(ctrl->nctrl.device,

> +		 "queue %d: timeout request %#x type %d\n",

> +		 nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode);

> +

> +	if (ctrl->nctrl.state != NVME_CTRL_LIVE) {

> +		/*

> +		 * If we are resetting, connecting or deleting we should

> +		 * complete immediately because we may block controller

> +		 * teardown or setup sequence

> +		 * - ctrl disable/shutdown fabrics requests

> +		 * - connect requests

> +		 * - initialization admin requests

> +		 * - I/O requests that entered after unquiescing and

> +		 *   the controller stopped responding

> +		 *

> +		 * All other requests should be cancelled by the error

> +		 * recovery work, so it's fine that we fail it here.

> +		 */

> +		nvme_tcp_ofld_complete_timed_out(rq);

> +

> +		return BLK_EH_DONE;

> +	}

> +

> +	nvme_tcp_ofld_error_recovery(&ctrl->nctrl);

> +

> +	return BLK_EH_RESET_TIMER;

> +}

> +

>   static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {

>   	.queue_rq	= nvme_tcp_ofld_queue_rq,

>   	.commit_rqs     = nvme_tcp_ofld_commit_rqs,

> @@ -1046,6 +1127,7 @@ static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {

>   	.init_request	= nvme_tcp_ofld_init_request,

>   	.exit_request	= nvme_tcp_ofld_exit_request,

>   	.init_hctx	= nvme_tcp_ofld_init_hctx,

> +	.timeout	= nvme_tcp_ofld_timeout,

>   	.map_queues	= nvme_tcp_ofld_map_queues,

>   	.poll		= nvme_tcp_ofld_poll,

>   };

> @@ -1056,6 +1138,7 @@ static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {

>   	.init_request	= nvme_tcp_ofld_init_request,

>   	.exit_request	= nvme_tcp_ofld_exit_request,

>   	.init_hctx	= nvme_tcp_ofld_init_admin_hctx,

> +	.timeout	= nvme_tcp_ofld_timeout,

>   };

>   

>   static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {

> diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h

> index 2233d855aa10..f897b811c399 100644

> --- a/drivers/nvme/host/tcp-offload.h

> +++ b/drivers/nvme/host/tcp-offload.h

> @@ -117,6 +117,8 @@ struct nvme_tcp_ofld_ctrl {

>   	/* Connectivity params */

>   	struct nvme_tcp_ofld_ctrl_con_params conn_params;

>   

> +	struct nvme_tcp_ofld_req async_req;

> +

>   	/* Vendor specific driver context */

>   	void *private_data;

>   };

> 


Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>


-- 
Himanshu Madhani                                Oracle Linux Engineering
Sagi Grimberg May 21, 2021, 10:51 p.m. UTC | #2
This should be squashed in the I/O patch
Shai Malin May 24, 2021, 8:17 p.m. UTC | #3
On 5/22/21 1:51 AM, Sagi Grimberg wrote:
> This should be squashed in the I/O patch


Sure.
diff mbox series

Patch

diff --git a/drivers/nvme/host/tcp-offload.c b/drivers/nvme/host/tcp-offload.c
index 276b8475ac85..01b4c43cdaa5 100644
--- a/drivers/nvme/host/tcp-offload.c
+++ b/drivers/nvme/host/tcp-offload.c
@@ -133,6 +133,26 @@  void nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,
 		nvme_complete_rq(rq);
 }
 
+/**
+ * nvme_tcp_ofld_async_req_done() - NVMeTCP Offload request done callback
+ * function for async request. Pointed to by nvme_tcp_ofld_req->done.
+ * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQ.
+ * @req:	NVMeTCP offload request to complete.
+ * @result:     The nvme_result.
+ * @status:     The completion status.
+ *
+ * API function that allows the vendor specific offload driver to report request
+ * completions to the common offload layer.
+ */
+void nvme_tcp_ofld_async_req_done(struct nvme_tcp_ofld_req *req,
+				  union nvme_result *result, __le16 status)
+{
+	struct nvme_tcp_ofld_queue *queue = req->queue;
+	struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
+
+	nvme_complete_async_event(&ctrl->nctrl, status, result);
+}
+
 struct nvme_tcp_ofld_dev *
 nvme_tcp_ofld_lookup_dev(struct nvme_tcp_ofld_ctrl *ctrl)
 {
@@ -733,7 +753,23 @@  void nvme_tcp_ofld_map_data(struct nvme_command *c, u32 data_len)
 
 static void nvme_tcp_ofld_submit_async_event(struct nvme_ctrl *arg)
 {
-	/* Placeholder - submit_async_event */
+	struct nvme_tcp_ofld_ctrl *ctrl = to_tcp_ofld_ctrl(arg);
+	struct nvme_tcp_ofld_queue *queue = &ctrl->queues[0];
+	struct nvme_tcp_ofld_dev *dev = queue->dev;
+	struct nvme_tcp_ofld_ops *ops = dev->ops;
+
+	ctrl->async_req.nvme_cmd.common.opcode = nvme_admin_async_event;
+	ctrl->async_req.nvme_cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+	ctrl->async_req.nvme_cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+	nvme_tcp_ofld_set_sg_null(&ctrl->async_req.nvme_cmd);
+
+	ctrl->async_req.async = true;
+	ctrl->async_req.queue = queue;
+	ctrl->async_req.last = true;
+	ctrl->async_req.done = nvme_tcp_ofld_async_req_done;
+
+	ops->send_req(&ctrl->async_req);
 }
 
 static void
@@ -1039,6 +1075,51 @@  static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)
 	return ops->poll_queue(queue);
 }
 
+static void nvme_tcp_ofld_complete_timed_out(struct request *rq)
+{
+	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
+	struct nvme_ctrl *nctrl = &req->queue->ctrl->nctrl;
+
+	nvme_tcp_ofld_stop_queue(nctrl, nvme_tcp_ofld_qid(req->queue));
+	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
+		nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+		blk_mq_complete_request(rq);
+	}
+}
+
+static enum blk_eh_timer_return nvme_tcp_ofld_timeout(struct request *rq, bool reserved)
+{
+	struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
+	struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;
+
+	dev_warn(ctrl->nctrl.device,
+		 "queue %d: timeout request %#x type %d\n",
+		 nvme_tcp_ofld_qid(req->queue), rq->tag, req->nvme_cmd.common.opcode);
+
+	if (ctrl->nctrl.state != NVME_CTRL_LIVE) {
+		/*
+		 * If we are resetting, connecting or deleting we should
+		 * complete immediately because we may block controller
+		 * teardown or setup sequence
+		 * - ctrl disable/shutdown fabrics requests
+		 * - connect requests
+		 * - initialization admin requests
+		 * - I/O requests that entered after unquiescing and
+		 *   the controller stopped responding
+		 *
+		 * All other requests should be cancelled by the error
+		 * recovery work, so it's fine that we fail it here.
+		 */
+		nvme_tcp_ofld_complete_timed_out(rq);
+
+		return BLK_EH_DONE;
+	}
+
+	nvme_tcp_ofld_error_recovery(&ctrl->nctrl);
+
+	return BLK_EH_RESET_TIMER;
+}
+
 static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
 	.queue_rq	= nvme_tcp_ofld_queue_rq,
 	.commit_rqs     = nvme_tcp_ofld_commit_rqs,
@@ -1046,6 +1127,7 @@  static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
 	.init_request	= nvme_tcp_ofld_init_request,
 	.exit_request	= nvme_tcp_ofld_exit_request,
 	.init_hctx	= nvme_tcp_ofld_init_hctx,
+	.timeout	= nvme_tcp_ofld_timeout,
 	.map_queues	= nvme_tcp_ofld_map_queues,
 	.poll		= nvme_tcp_ofld_poll,
 };
@@ -1056,6 +1138,7 @@  static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {
 	.init_request	= nvme_tcp_ofld_init_request,
 	.exit_request	= nvme_tcp_ofld_exit_request,
 	.init_hctx	= nvme_tcp_ofld_init_admin_hctx,
+	.timeout	= nvme_tcp_ofld_timeout,
 };
 
 static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {
diff --git a/drivers/nvme/host/tcp-offload.h b/drivers/nvme/host/tcp-offload.h
index 2233d855aa10..f897b811c399 100644
--- a/drivers/nvme/host/tcp-offload.h
+++ b/drivers/nvme/host/tcp-offload.h
@@ -117,6 +117,8 @@  struct nvme_tcp_ofld_ctrl {
 	/* Connectivity params */
 	struct nvme_tcp_ofld_ctrl_con_params conn_params;
 
+	struct nvme_tcp_ofld_req async_req;
+
 	/* Vendor specific driver context */
 	void *private_data;
 };