diff mbox series

[v2,05/11] blk-mq-sched: Rename blk_mq_sched_alloc_{tags -> map_and_request}()

Message ID 1628519378-211232-6-git-send-email-john.garry@huawei.com
State Superseded
Headers show
Series blk-mq: Reduce static requests memory footprint for shared sbitmap | expand

Commit Message

John Garry Aug. 9, 2021, 2:29 p.m. UTC
Function blk_mq_sched_alloc_tags() does same as
__blk_mq_alloc_map_and_request(), so give a similar name to be consistent.

Similarly rename label err_free_tags -> err_free_map_and_request.

Signed-off-by: John Garry <john.garry@huawei.com>

---
 block/blk-mq-sched.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

-- 
2.26.2

Comments

Ming Lei Aug. 18, 2021, 3:55 a.m. UTC | #1
On Mon, Aug 09, 2021 at 10:29:32PM +0800, John Garry wrote:
> Function blk_mq_sched_alloc_tags() does same as

> __blk_mq_alloc_map_and_request(), so give a similar name to be consistent.

> 

> Similarly rename label err_free_tags -> err_free_map_and_request.

> 

> Signed-off-by: John Garry <john.garry@huawei.com>


Reviewed-by: Ming Lei <ming.lei@redhat.com>


-- 
Ming
diff mbox series

Patch

diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 2231fb0d4c35..b4d7ad9a7a60 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -515,9 +515,9 @@  void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
 	percpu_ref_put(&q->q_usage_counter);
 }
 
-static int blk_mq_sched_alloc_tags(struct request_queue *q,
-				   struct blk_mq_hw_ctx *hctx,
-				   unsigned int hctx_idx)
+static int blk_mq_sched_alloc_map_and_request(struct request_queue *q,
+					      struct blk_mq_hw_ctx *hctx,
+					      unsigned int hctx_idx)
 {
 	struct blk_mq_tag_set *set = q->tag_set;
 	int ret;
@@ -609,15 +609,15 @@  int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 				   BLKDEV_DEFAULT_RQ);
 
 	queue_for_each_hw_ctx(q, hctx, i) {
-		ret = blk_mq_sched_alloc_tags(q, hctx, i);
+		ret = blk_mq_sched_alloc_map_and_request(q, hctx, i);
 		if (ret)
-			goto err_free_tags;
+			goto err_free_map_and_request;
 	}
 
 	if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
 		ret = blk_mq_init_sched_shared_sbitmap(q);
 		if (ret)
-			goto err_free_tags;
+			goto err_free_map_and_request;
 	}
 
 	ret = e->ops.init_sched(q, e);
@@ -645,7 +645,7 @@  int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
 err_free_sbitmap:
 	if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
 		blk_mq_exit_sched_shared_sbitmap(q);
-err_free_tags:
+err_free_map_and_request:
 	blk_mq_sched_free_requests(q);
 	blk_mq_sched_tags_teardown(q);
 	q->elevator = NULL;