diff mbox series

[BUGFIX/IMPROVEMENT,2/4] block, bfq: add missing invocations of bfqg_stats_update_io_add/remove

Message ID 20171113063410.3029-3-paolo.valente@linaro.org
State New
Headers show
Series block, bfq: increase sustainable IOPS and fix a bug | expand

Commit Message

Paolo Valente Nov. 13, 2017, 6:34 a.m. UTC
From: Luca Miccio <lucmiccio@gmail.com>


bfqg_stats_update_io_add and bfqg_stats_update_io_remove are to be
invoked, respectively, when an I/O request enters and when an I/O
request exits the scheduler. Unfortunately, bfq does not fully comply
with this scheme, because it does not invoke these functions for
requests that are inserted into or extracted from its priority
dispatch list. This commit fixes this mistake.

Tested-by: Lee Tibbert <lee.tibbert@gmail.com>

Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>

Signed-off-by: Paolo Valente <paolo.valente@linaro.org>

Signed-off-by: Luca Miccio <lucmiccio@gmail.com>

---
 block/bfq-iosched.c | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

-- 
2.10.0
diff mbox series

Patch

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 889a854..91703eb 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1359,7 +1359,6 @@  static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
 			bfqq->ttime.last_end_request +
 			bfqd->bfq_slice_idle * 3;
 
-	bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
 
 	/*
 	 * bfqq deserves to be weight-raised if:
@@ -1633,7 +1632,6 @@  static void bfq_remove_request(struct request_queue *q,
 	if (rq->cmd_flags & REQ_META)
 		bfqq->meta_pending--;
 
-	bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
 }
 
 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
@@ -1746,6 +1744,7 @@  static void bfq_requests_merged(struct request_queue *q, struct request *rq,
 		bfqq->next_rq = rq;
 
 	bfq_remove_request(q, next);
+	bfqg_stats_update_io_remove(bfqq_group(bfqq), next->cmd_flags);
 
 	spin_unlock_irq(&bfqq->bfqd->lock);
 end:
@@ -3700,6 +3699,9 @@  static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
 	spin_lock_irq(&bfqd->lock);
 
 	rq = __bfq_dispatch_request(hctx);
+	if (rq && RQ_BFQQ(rq))
+		bfqg_stats_update_io_remove(bfqq_group(RQ_BFQQ(rq)),
+					    rq->cmd_flags);
 	spin_unlock_irq(&bfqd->lock);
 
 	return rq;
@@ -4224,6 +4226,7 @@  static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 {
 	struct request_queue *q = hctx->queue;
 	struct bfq_data *bfqd = q->elevator->elevator_data;
+	struct bfq_queue *bfqq = RQ_BFQQ(rq);
 
 	spin_lock_irq(&bfqd->lock);
 	if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4243,6 +4246,12 @@  static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
 	} else {
 		__bfq_insert_request(bfqd, rq);
+		/*
+		 * Update bfqq, because, if a queue merge has occurred
+		 * in __bfq_insert_request, then rq has been
+		 * redirected into a new queue.
+		 */
+		bfqq = RQ_BFQQ(rq);
 
 		if (rq_mergeable(rq)) {
 			elv_rqhash_add(q, rq);
@@ -4251,6 +4260,9 @@  static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 		}
 	}
 
+	if (bfqq)
+		bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, rq->cmd_flags);
+
 	spin_unlock_irq(&bfqd->lock);
 }
 
@@ -4428,8 +4440,11 @@  static void bfq_finish_request(struct request *rq)
 		 * lock is held.
 		 */
 
-		if (!RB_EMPTY_NODE(&rq->rb_node))
+		if (!RB_EMPTY_NODE(&rq->rb_node)) {
 			bfq_remove_request(rq->q, rq);
+			bfqg_stats_update_io_remove(bfqq_group(bfqq),
+						    rq->cmd_flags);
+		}
 		bfq_put_rq_priv_body(bfqq);
 	}