diff mbox series

[3/3] mmc: block: Requeue on block size restrictions

Message ID f3b05a9103ba4c46ae78a96f8cdc700d@hyperstone.com
State New
Headers show
Series [1/3] block: Requeue req as head if driver touched it | expand

Commit Message

Christian Loehle Oct. 26, 2022, 7:30 a.m. UTC
The block layer does not conform to all our sector count restrictions, so
requeue in case we had to modify the number of blocks sent instead of
going through the normal completion.

Note that the normal completion used before does not lead to a bug,
this change is just the nicer thing to do.
An example of such a restriction is max_blk_count = 1 and 512 blksz,
but the block layer continues to use requests of size PAGE_SIZE.

Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
---
 drivers/mmc/core/block.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

Comments

Adrian Hunter Nov. 18, 2022, 11:34 a.m. UTC | #1
On 26/10/22 10:30, Christian Löhle wrote:
> The block layer does not conform to all our sector count restrictions, so
> requeue in case we had to modify the number of blocks sent instead of
> going through the normal completion.
> 
> Note that the normal completion used before does not lead to a bug,
> this change is just the nicer thing to do.

Can you elaborate on why it is "nicer"?

> An example of such a restriction is max_blk_count = 1 and 512 blksz,
> but the block layer continues to use requests of size PAGE_SIZE.
> 
> Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
> ---
>  drivers/mmc/core/block.c | 12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index 54cd009aee50..c434d3964880 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -1519,8 +1519,10 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
>  	/*
>  	 * Block layer timeouts race with completions which means the normal
>  	 * completion path cannot be used during recovery.
> +	 * Also do not use it if we had to modify the block count to satisfy
> +	 * host controller needs.
>  	 */
> -	if (mq->in_recovery)
> +	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req))
>  		mmc_blk_cqe_complete_rq(mq, req);
>  	else if (likely(!blk_should_fake_timeout(req->q)))
>  		blk_mq_complete_request(req);
> @@ -2051,8 +2053,10 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
>  	/*
>  	 * Block layer timeouts race with completions which means the normal
>  	 * completion path cannot be used during recovery.
> +	 * Also do not use it if we had to modify the block count to satisfy
> +	 * host controller needs.
>  	 */
> -	if (mq->in_recovery)
> +	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req))
>  		mmc_blk_cqe_complete_rq(mq, req);
>  	else if (likely(!blk_should_fake_timeout(req->q)))
>  		blk_mq_complete_request(req);
> @@ -2115,8 +2119,10 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
>  	/*
>  	 * Block layer timeouts race with completions which means the normal
>  	 * completion path cannot be used during recovery.
> +	 * Also do not use it if we had to modify the block count to satisfy
> +	 * host controller needs.
>  	 */
> -	if (mq->in_recovery) {
> +	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req)) {
>  		mmc_blk_mq_complete_rq(mq, req);
>  	} else if (likely(!blk_should_fake_timeout(req->q))) {
>  		if (can_sleep)
diff mbox series

Patch

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 54cd009aee50..c434d3964880 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1519,8 +1519,10 @@  static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
 	/*
 	 * Block layer timeouts race with completions which means the normal
 	 * completion path cannot be used during recovery.
+	 * Also do not use it if we had to modify the block count to satisfy
+	 * host controller needs.
 	 */
-	if (mq->in_recovery)
+	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req))
 		mmc_blk_cqe_complete_rq(mq, req);
 	else if (likely(!blk_should_fake_timeout(req->q)))
 		blk_mq_complete_request(req);
@@ -2051,8 +2053,10 @@  static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
 	/*
 	 * Block layer timeouts race with completions which means the normal
 	 * completion path cannot be used during recovery.
+	 * Also do not use it if we had to modify the block count to satisfy
+	 * host controller needs.
 	 */
-	if (mq->in_recovery)
+	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req))
 		mmc_blk_cqe_complete_rq(mq, req);
 	else if (likely(!blk_should_fake_timeout(req->q)))
 		blk_mq_complete_request(req);
@@ -2115,8 +2119,10 @@  static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
 	/*
 	 * Block layer timeouts race with completions which means the normal
 	 * completion path cannot be used during recovery.
+	 * Also do not use it if we had to modify the block count to satisfy
+	 * host controller needs.
 	 */
-	if (mq->in_recovery) {
+	if (mq->in_recovery || mrq->data->blocks != blk_rq_sectors(req)) {
 		mmc_blk_mq_complete_rq(mq, req);
 	} else if (likely(!blk_should_fake_timeout(req->q))) {
 		if (can_sleep)