diff mbox series

[V2,6/6] blk-mq: don't deactivate hctx if managed irq isn't used

Message ID 20210702150555.2401722-7-ming.lei@redhat.com
State New
Headers show
Series blk-mq: fix blk_mq_alloc_request_hctx | expand

Commit Message

Ming Lei July 2, 2021, 3:05 p.m. UTC
No need to deactivate one hctx if managed irq isn't used because
non-managed irq can be migrated to online cpus.

So we don't need to register cpu hotplug handler for
CPUHP_AP_BLK_MQ_ONLINE if managed irq isn't used.

Given BLK_MQ_F_MANAGED_IRQ is more generic and straightforward, it covers
!BLK_MQ_F_STACKING perfectly because managed irq is only used in
underlying controller, so remove BLK_MQ_F_STACKING.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq-debugfs.c | 1 -
 block/blk-mq.c         | 4 ++--
 drivers/block/loop.c   | 2 +-
 drivers/md/dm-rq.c     | 2 +-
 include/linux/blk-mq.h | 5 -----
 5 files changed, 4 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 17f57af3a4d6..3641a16c2910 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -245,7 +245,6 @@  static const char *const hctx_flag_name[] = {
 	HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
 	HCTX_FLAG_NAME(BLOCKING),
 	HCTX_FLAG_NAME(NO_SCHED),
-	HCTX_FLAG_NAME(STACKING),
 	HCTX_FLAG_NAME(TAG_HCTX_SHARED),
 	HCTX_FLAG_NAME(MANAGED_IRQ),
 };
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1d45d2922ca7..e672ebd93342 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2636,7 +2636,7 @@  static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 
 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
 {
-	if (!(hctx->flags & BLK_MQ_F_STACKING))
+	if (hctx->flags & BLK_MQ_F_MANAGED_IRQ)
 		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
 						    &hctx->cpuhp_online);
 	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
@@ -2731,7 +2731,7 @@  static int blk_mq_init_hctx(struct request_queue *q,
 {
 	hctx->queue_num = hctx_idx;
 
-	if (!(hctx->flags & BLK_MQ_F_STACKING))
+	if (hctx->flags & BLK_MQ_F_MANAGED_IRQ)
 		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
 				&hctx->cpuhp_online);
 	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cc0e8c39a48b..02509bc54242 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2268,7 +2268,7 @@  static int loop_add(struct loop_device **l, int i)
 	lo->tag_set.queue_depth = 128;
 	lo->tag_set.numa_node = NUMA_NO_NODE;
 	lo->tag_set.cmd_size = sizeof(struct loop_cmd);
-	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+	lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 	lo->tag_set.driver_data = lo;
 
 	err = blk_mq_alloc_tag_set(&lo->tag_set);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 0dbd48cbdff9..294d4858c067 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -540,7 +540,7 @@  int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
 	md->tag_set->ops = &dm_mq_ops;
 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
 	md->tag_set->numa_node = md->numa_node_id;
-	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
+	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
 	md->tag_set->driver_data = md;
 
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 62fc0393cc3a..d75ab9fd64fc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -397,11 +397,6 @@  struct blk_mq_ops {
 enum {
 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
-	/*
-	 * Set when this device requires underlying blk-mq device for
-	 * completing IO:
-	 */
-	BLK_MQ_F_STACKING	= 1 << 2,
 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
 	BLK_MQ_F_MANAGED_IRQ	= 1 << 4,
 	BLK_MQ_F_BLOCKING	= 1 << 5,