diff mbox series

[v3,10/17] ufs: core: mcq: Use shared tags for MCQ mode

Message ID 2fea9d4f0b8dfc2e2c82d176f0c928b0525d8110.1666288432.git.quic_asutoshd@quicinc.com
State New
Headers show
Series Add Multi Circular Queue Support | expand

Commit Message

Asutosh Das Oct. 20, 2022, 6:03 p.m. UTC
Enable shared taggs for MCQ. For UFS, this should
not have a huge performance impact. It however
simplifies the MCQ implementation and reuses most of
the existing code in the issue and completion path.
Also add multiple queue mapping to map_queue().

Co-developed-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Can Guo <quic_cang@quicinc.com>
Signed-off-by: Asutosh Das <quic_asutoshd@quicinc.com>
---
 drivers/ufs/core/ufs-mcq.c |  2 ++
 drivers/ufs/core/ufshcd.c  | 31 +++++++++++++++++--------------
 2 files changed, 19 insertions(+), 14 deletions(-)

Comments

Bart Van Assche Oct. 28, 2022, 9:47 p.m. UTC | #1
On 10/20/22 11:03, Asutosh Das wrote:
> Enable shared taggs for MCQ. For UFS, this should

taggs -> tags

Otherwise this patch looks good to me. Hence:

Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Avri Altman Oct. 30, 2022, 1:06 p.m. UTC | #2
>  static int ufshcd_map_queues(struct Scsi_Host *shost)
This seems like an old version of ufshcd_map_queues - returns void now.
Needs rebase?

>  {
> -       int i, ret;
> +       int i, queue_offset = 0;
> +       struct ufs_hba *hba = shost_priv(shost);
> +
> +       if (!is_mcq_supported(hba)) {
> +               hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
> +               hba->nr_queues[HCTX_TYPE_READ] = 0;
> +               hba->nr_queues[HCTX_TYPE_POLL] = 1;
> +               hba->nr_hw_queues = 1;
> +       }
> 
>         for (i = 0; i < shost->nr_maps; i++) {
>                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
> 
> -               switch (i) {
> -               case HCTX_TYPE_DEFAULT:
> -               case HCTX_TYPE_POLL:
> -                       map->nr_queues = 1;
> -                       break;
> -               case HCTX_TYPE_READ:
> -                       map->nr_queues = 0;
> +               map->nr_queues = hba->nr_queues[i];
> +               if (!map->nr_queues)
>                         continue;
> -               default:
> -                       WARN_ON_ONCE(true);
> -               }
> -               map->queue_offset = 0;
> -               ret = blk_mq_map_queues(map);
Ditto.

Thanks,
Avri

> -               WARN_ON_ONCE(ret);
> +               map->queue_offset = queue_offset;
> +               if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
> +                       map->queue_offset = 0;
> +
> +               blk_mq_map_queues(map);
> +               queue_offset += map->nr_queues;
>         }
> 
>         return 0;
> --
> 2.7.4
Bart Van Assche Nov. 1, 2022, 4:29 p.m. UTC | #3
On 10/30/22 06:06, Avri Altman wrote:
>>   static int ufshcd_map_queues(struct Scsi_Host *shost)
> This seems like an old version of ufshcd_map_queues - returns void now.
> Needs rebase?

Hi Asutosh,

Please use the for-next branch of 
https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git/ when 
preparing SCSI patches for the next merge window.

Thanks,

Bart.
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 213e398..10a50eb 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -367,6 +367,7 @@  int ufshcd_mcq_init(struct ufs_hba *hba)
 {
 	int ret, i;
 	struct ufs_hw_queue *hwq;
+	struct Scsi_Host *host = hba->host;
 
 	ret = ufshcd_mcq_config_nr_queues(hba);
 	if (ret)
@@ -400,6 +401,7 @@  int ufshcd_mcq_init(struct ufs_hba *hba)
 	/* Give dev_cmd_queue the minimal number of entries */
 	hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
 
+	host->host_tagset = 1;
 	return 0;
 }
 
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index b254dc5..87201dd 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2728,25 +2728,28 @@  static inline bool is_device_wlun(struct scsi_device *sdev)
  */
 static int ufshcd_map_queues(struct Scsi_Host *shost)
 {
-	int i, ret;
+	int i, queue_offset = 0;
+	struct ufs_hba *hba = shost_priv(shost);
+
+	if (!is_mcq_supported(hba)) {
+		hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+		hba->nr_queues[HCTX_TYPE_READ] = 0;
+		hba->nr_queues[HCTX_TYPE_POLL] = 1;
+		hba->nr_hw_queues = 1;
+	}
 
 	for (i = 0; i < shost->nr_maps; i++) {
 		struct blk_mq_queue_map *map = &shost->tag_set.map[i];
 
-		switch (i) {
-		case HCTX_TYPE_DEFAULT:
-		case HCTX_TYPE_POLL:
-			map->nr_queues = 1;
-			break;
-		case HCTX_TYPE_READ:
-			map->nr_queues = 0;
+		map->nr_queues = hba->nr_queues[i];
+		if (!map->nr_queues)
 			continue;
-		default:
-			WARN_ON_ONCE(true);
-		}
-		map->queue_offset = 0;
-		ret = blk_mq_map_queues(map);
-		WARN_ON_ONCE(ret);
+		map->queue_offset = queue_offset;
+		if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+			map->queue_offset = 0;
+
+		blk_mq_map_queues(map);
+		queue_offset += map->nr_queues;
 	}
 
 	return 0;