diff mbox series

[1/9] nvme: use blk_mq_alloc_disk

Message ID 20210816131910.615153-2-hch@lst.de
State New
Headers show
Series [1/9] nvme: use blk_mq_alloc_disk | expand

Commit Message

Christoph Hellwig Aug. 16, 2021, 1:19 p.m. UTC
Switch to use the blk_mq_alloc_disk helper for allocating the
request_queue and gendisk.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c | 33 +++++++++++++--------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

Comments

Keith Busch Aug. 19, 2021, 2:54 p.m. UTC | #1
On Mon, Aug 16, 2021 at 03:19:02PM +0200, Christoph Hellwig wrote:
> @@ -3729,9 +3729,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,

>  	if (!ns)

>  		goto out_free_id;

>  

> -	ns->queue = blk_mq_init_queue(ctrl->tagset);

> -	if (IS_ERR(ns->queue))

> +	disk = blk_mq_alloc_disk(ctrl->tagset, ns);

> +	if (IS_ERR(disk))

>  		goto out_free_ns;

> +	disk->fops = &nvme_bdev_ops;

> +	disk->private_data = ns;

> +

> +	ns->disk = disk;

> +	ns->queue = disk->queue;

>  

>  	if (ctrl->opts && ctrl->opts->data_digest)

>  		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);

> @@ -3740,20 +3745,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,

>  	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)

>  		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

>  

> -	ns->queue->queuedata = ns;

>  	ns->ctrl = ctrl;

>  	kref_init(&ns->kref);


With this removal, I don't find queuedata being set anywhere, but
the driver still uses it in various places expecting 'ns'. Am I missing
something? Should all nvme's queuedata references be changed to
q->disk->private_data?
Keith Busch Aug. 19, 2021, 2:58 p.m. UTC | #2
On Thu, Aug 19, 2021 at 07:54:55AM -0700, Keith Busch wrote:
> On Mon, Aug 16, 2021 at 03:19:02PM +0200, Christoph Hellwig wrote:

> > @@ -3729,9 +3729,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,

> >  	if (!ns)

> >  		goto out_free_id;

> >  

> > -	ns->queue = blk_mq_init_queue(ctrl->tagset);

> > -	if (IS_ERR(ns->queue))

> > +	disk = blk_mq_alloc_disk(ctrl->tagset, ns);

> > +	if (IS_ERR(disk))

> >  		goto out_free_ns;

> > +	disk->fops = &nvme_bdev_ops;

> > +	disk->private_data = ns;

> > +

> > +	ns->disk = disk;

> > +	ns->queue = disk->queue;

> >  

> >  	if (ctrl->opts && ctrl->opts->data_digest)

> >  		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);

> > @@ -3740,20 +3745,12 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,

> >  	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)

> >  		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);

> >  

> > -	ns->queue->queuedata = ns;

> >  	ns->ctrl = ctrl;

> >  	kref_init(&ns->kref);

> 

> With this removal, I don't find queuedata being set anywhere, but

> the driver still uses it in various places expecting 'ns'. Am I missing

> something? Should all nvme's queuedata references be changed to

> q->disk->private_data?


Oops, I see the queuedata is set via blk_mq_alloc_disk().

Looks good.

Reviewed-by: Keith Busch <kbusch@kernel.org>
Luis Chamberlain Aug. 19, 2021, 10:57 p.m. UTC | #3
On Mon, Aug 16, 2021 at 03:19:02PM +0200, Christoph Hellwig wrote:
> Switch to use the blk_mq_alloc_disk helper for allocating the

> request_queue and gendisk.

> 

> Signed-off-by: Christoph Hellwig <hch@lst.de>

> ---

>  drivers/nvme/host/core.c | 33 +++++++++++++--------------------

>  1 file changed, 13 insertions(+), 20 deletions(-)

> 

> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c

> index 1478d825011d..a5878ba14c55 100644

> --- a/drivers/nvme/host/core.c

> +++ b/drivers/nvme/host/core.c

> @@ -3762,15 +3759,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,

>  	if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))

>  		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,

>  			ns->head->instance);

> -	ns->disk = disk;

>  

>  	if (nvme_update_ns_info(ns, id))

> -		goto out_put_disk;

> +		goto out_unlink_ns;

>  

>  	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {

>  		if (nvme_nvm_register(ns, disk->disk_name, node)) {

>  			dev_warn(ctrl->device, "LightNVM init failure\n");

> -			goto out_put_disk;

> +			goto out_unlink_ns;

>  		}

>  	}


This hunk will fail because of the now removed NVME_QUIRK_LIGHTNVM. The
last part of the patch  then can be removed to apply to linux-next.

  Luis
Christoph Hellwig Aug. 20, 2021, 4:06 a.m. UTC | #4
On Thu, Aug 19, 2021 at 03:57:23PM -0700, Luis Chamberlain wrote:
> >  		if (nvme_nvm_register(ns, disk->disk_name, node)) {

> >  			dev_warn(ctrl->device, "LightNVM init failure\n");

> > -			goto out_put_disk;

> > +			goto out_unlink_ns;

> >  		}

> >  	}

> 

> This hunk will fail because of the now removed NVME_QUIRK_LIGHTNVM. The

> last part of the patch  then can be removed to apply to linux-next.


So this is not in the for-5.15/block tree, just the drivers one.

Jens, do you want to start a -late branch ontop of the block and drivers
branches so that we can pre-resolve these mergeѕ?
Sagi Grimberg Aug. 23, 2021, 5:22 p.m. UTC | #5
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1478d825011d..a5878ba14c55 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3729,9 +3729,14 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 	if (!ns)
 		goto out_free_id;
 
-	ns->queue = blk_mq_init_queue(ctrl->tagset);
-	if (IS_ERR(ns->queue))
+	disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+	if (IS_ERR(disk))
 		goto out_free_ns;
+	disk->fops = &nvme_bdev_ops;
+	disk->private_data = ns;
+
+	ns->disk = disk;
+	ns->queue = disk->queue;
 
 	if (ctrl->opts && ctrl->opts->data_digest)
 		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@@ -3740,20 +3745,12 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 	if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
 		blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
 
-	ns->queue->queuedata = ns;
 	ns->ctrl = ctrl;
 	kref_init(&ns->kref);
 
 	if (nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED))
-		goto out_free_queue;
+		goto out_cleanup_disk;
 
-	disk = alloc_disk_node(0, node);
-	if (!disk)
-		goto out_unlink_ns;
-
-	disk->fops = &nvme_bdev_ops;
-	disk->private_data = ns;
-	disk->queue = ns->queue;
 	/*
 	 * Without the multipath code enabled, multiple controller per
 	 * subsystems are visible as devices and thus we cannot use the
@@ -3762,15 +3759,14 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 	if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
 		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
 			ns->head->instance);
-	ns->disk = disk;
 
 	if (nvme_update_ns_info(ns, id))
-		goto out_put_disk;
+		goto out_unlink_ns;
 
 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
 		if (nvme_nvm_register(ns, disk->disk_name, node)) {
 			dev_warn(ctrl->device, "LightNVM init failure\n");
-			goto out_put_disk;
+			goto out_unlink_ns;
 		}
 	}
 
@@ -3789,10 +3785,7 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 	kfree(id);
 
 	return;
- out_put_disk:
-	/* prevent double queue cleanup */
-	ns->disk->queue = NULL;
-	put_disk(ns->disk);
+
  out_unlink_ns:
 	mutex_lock(&ctrl->subsys->lock);
 	list_del_rcu(&ns->siblings);
@@ -3800,8 +3793,8 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
 		list_del_init(&ns->head->entry);
 	mutex_unlock(&ctrl->subsys->lock);
 	nvme_put_ns_head(ns->head);
- out_free_queue:
-	blk_cleanup_queue(ns->queue);
+ out_cleanup_disk:
+	blk_cleanup_disk(disk);
  out_free_ns:
 	kfree(ns);
  out_free_id: