diff mbox series

[V2,2/2] mmc: hsq: dynamic adjustment of hsq->depth

Message ID 20230823061734.27479-3-wenchao.chen@unisoc.com
State Superseded
Headers show
Series mmc: hsq: Dynamically adjust hsq_depth to improve performance | expand

Commit Message

Wenchao Chen Aug. 23, 2023, 6:17 a.m. UTC
Increasing hsq_depth improves random write performance.

Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com>
---
 drivers/mmc/host/mmc_hsq.c | 26 ++++++++++++++++++++++++++
 drivers/mmc/host/mmc_hsq.h |  2 ++
 2 files changed, 28 insertions(+)

Comments

Ulf Hansson Aug. 24, 2023, 10:36 a.m. UTC | #1
On Wed, 23 Aug 2023 at 08:18, Wenchao Chen <wenchao.chen@unisoc.com> wrote:
>
> Increasing hsq_depth improves random write performance.
>
> Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com>
> ---
>  drivers/mmc/host/mmc_hsq.c | 26 ++++++++++++++++++++++++++
>  drivers/mmc/host/mmc_hsq.h |  2 ++
>  2 files changed, 28 insertions(+)
>
> diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
> index 8556cacb21a1..8682a3d16a76 100644
> --- a/drivers/mmc/host/mmc_hsq.c
> +++ b/drivers/mmc/host/mmc_hsq.c
> @@ -21,6 +21,30 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
>         mmc->ops->request(mmc, hsq->mrq);
>  }
>
> +static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
> +{
> +       struct mmc_host *mmc = hsq->mmc;
> +       struct mmc_request *mrq;
> +       struct hsq_slot *slot;
> +       int need_change = 0;
> +       int tag;
> +
> +       for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
> +               slot = &hsq->slot[tag];
> +               mrq = slot->mrq;
> +               if (mrq && mrq->data && (mrq->data->blocks == HSQ_DATA_IS_4K)

This assumes mrq->data->blksz is 512 (which at least for now is always
the case), but perhaps better to compute the request size instead?
Hence:

"mrq->data->blksz * mrq->data->blocks == 4096"

> +                               && (mrq->data->flags & MMC_DATA_WRITE))
> +                       need_change++;
> +               else
> +                       break;
> +       }
> +
> +       if (need_change > 1)
> +               mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
> +       else
> +               mmc->hsq_depth = HSQ_NORMAL_DEPTH;
> +}
> +
>  static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
>  {
>         struct mmc_host *mmc = hsq->mmc;
> @@ -42,6 +66,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
>                 return;
>         }
>
> +       mmc_hsq_modify_threshold(hsq);
> +
>         slot = &hsq->slot[hsq->next_tag];
>         hsq->mrq = slot->mrq;
>         hsq->qcnt--;
> diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
> index aa5c4543b55f..fc031e38f1e0 100644
> --- a/drivers/mmc/host/mmc_hsq.h
> +++ b/drivers/mmc/host/mmc_hsq.h
> @@ -10,6 +10,8 @@
>   * flight to avoid a long latency.
>   */
>  #define HSQ_NORMAL_DEPTH       2
> +#define HSQ_PERFORMANCE_DEPTH  5
> +#define HSQ_DATA_IS_4K 8

Perhaps re-phrase the comment a few lines above to explain why/when
'5' can be good too.

>
>  struct hsq_slot {
>         struct mmc_request *mrq;

Kind regards
Uffe
Wenchao Chen Aug. 28, 2023, 8:59 a.m. UTC | #2
On Thu, Aug 24, 2023 at 6:37 PM Ulf Hansson <ulf.hansson@linaro.org> wrote:
>
> On Wed, 23 Aug 2023 at 08:18, Wenchao Chen <wenchao.chen@unisoc.com> wrote:
> >
> > Increasing hsq_depth improves random write performance.
> >
> > Signed-off-by: Wenchao Chen <wenchao.chen@unisoc.com>
> > ---
> >  drivers/mmc/host/mmc_hsq.c | 26 ++++++++++++++++++++++++++
> >  drivers/mmc/host/mmc_hsq.h |  2 ++
> >  2 files changed, 28 insertions(+)
> >
> > diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
> > index 8556cacb21a1..8682a3d16a76 100644
> > --- a/drivers/mmc/host/mmc_hsq.c
> > +++ b/drivers/mmc/host/mmc_hsq.c
> > @@ -21,6 +21,30 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
> >         mmc->ops->request(mmc, hsq->mrq);
> >  }
> >
> > +static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
> > +{
> > +       struct mmc_host *mmc = hsq->mmc;
> > +       struct mmc_request *mrq;
> > +       struct hsq_slot *slot;
> > +       int need_change = 0;
> > +       int tag;
> > +
> > +       for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
> > +               slot = &hsq->slot[tag];
> > +               mrq = slot->mrq;
> > +               if (mrq && mrq->data && (mrq->data->blocks == HSQ_DATA_IS_4K)
>
> This assumes mrq->data->blksz is 512 (which at least for now is always
> the case), but perhaps better to compute the request size instead?
> Hence:
>
> "mrq->data->blksz * mrq->data->blocks == 4096"
>

I will update it in the next version. Thanks.

> > +                               && (mrq->data->flags & MMC_DATA_WRITE))
> > +                       need_change++;
> > +               else
> > +                       break;
> > +       }
> > +
> > +       if (need_change > 1)
> > +               mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
> > +       else
> > +               mmc->hsq_depth = HSQ_NORMAL_DEPTH;
> > +}
> > +
> >  static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
> >  {
> >         struct mmc_host *mmc = hsq->mmc;
> > @@ -42,6 +66,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
> >                 return;
> >         }
> >
> > +       mmc_hsq_modify_threshold(hsq);
> > +
> >         slot = &hsq->slot[hsq->next_tag];
> >         hsq->mrq = slot->mrq;
> >         hsq->qcnt--;
> > diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
> > index aa5c4543b55f..fc031e38f1e0 100644
> > --- a/drivers/mmc/host/mmc_hsq.h
> > +++ b/drivers/mmc/host/mmc_hsq.h
> > @@ -10,6 +10,8 @@
> >   * flight to avoid a long latency.
> >   */
> >  #define HSQ_NORMAL_DEPTH       2
> > +#define HSQ_PERFORMANCE_DEPTH  5
> > +#define HSQ_DATA_IS_4K 8
>
> Perhaps re-phrase the comment a few lines above to explain why/when
> '5' can be good too.
>

Ok, I'll add that in the next version. Thanks.

For 4k random writes, we allow hsq_depth to increase to 5 for better
performance.

> >
> >  struct hsq_slot {
> >         struct mmc_request *mrq;
>
> Kind regards
> Uffe
diff mbox series

Patch

diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index 8556cacb21a1..8682a3d16a76 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -21,6 +21,30 @@  static void mmc_hsq_retry_handler(struct work_struct *work)
 	mmc->ops->request(mmc, hsq->mrq);
 }
 
+static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
+{
+	struct mmc_host *mmc = hsq->mmc;
+	struct mmc_request *mrq;
+	struct hsq_slot *slot;
+	int need_change = 0;
+	int tag;
+
+	for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+		slot = &hsq->slot[tag];
+		mrq = slot->mrq;
+		if (mrq && mrq->data && (mrq->data->blocks == HSQ_DATA_IS_4K)
+				&& (mrq->data->flags & MMC_DATA_WRITE))
+			need_change++;
+		else
+			break;
+	}
+
+	if (need_change > 1)
+		mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
+	else
+		mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+}
+
 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
 {
 	struct mmc_host *mmc = hsq->mmc;
@@ -42,6 +66,8 @@  static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
 		return;
 	}
 
+	mmc_hsq_modify_threshold(hsq);
+
 	slot = &hsq->slot[hsq->next_tag];
 	hsq->mrq = slot->mrq;
 	hsq->qcnt--;
diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
index aa5c4543b55f..fc031e38f1e0 100644
--- a/drivers/mmc/host/mmc_hsq.h
+++ b/drivers/mmc/host/mmc_hsq.h
@@ -10,6 +10,8 @@ 
  * flight to avoid a long latency.
  */
 #define HSQ_NORMAL_DEPTH	2
+#define HSQ_PERFORMANCE_DEPTH	5
+#define HSQ_DATA_IS_4K	8
 
 struct hsq_slot {
 	struct mmc_request *mrq;