diff mbox series

[v2,3/4] scsi: ufs: Cleanup and refactor clk-scaling feature

Message ID 20201216131639.4128-4-stanley.chu@mediatek.com
State New
Headers show
Series None | expand

Commit Message

Stanley Chu Dec. 16, 2020, 1:16 p.m. UTC
Manipulate clock scaling related stuff only if the host capability
supports clock scaling feature to avoid redundant code execution.

Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
---
 drivers/scsi/ufs/ufshcd.c | 64 ++++++++++++++++++++-------------------
 1 file changed, 33 insertions(+), 31 deletions(-)

Comments

Can Guo Dec. 18, 2020, 6:16 a.m. UTC | #1
On 2020-12-16 21:16, Stanley Chu wrote:
> Manipulate clock scaling related stuff only if the host capability

> supports clock scaling feature to avoid redundant code execution.

> 

> Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>

> ---

>  drivers/scsi/ufs/ufshcd.c | 64 ++++++++++++++++++++-------------------

>  1 file changed, 33 insertions(+), 31 deletions(-)

> 

> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c

> index 9cc16598136d..ce0528f2e2ed 100644

> --- a/drivers/scsi/ufs/ufshcd.c

> +++ b/drivers/scsi/ufs/ufshcd.c

> @@ -1448,9 +1448,6 @@ static void ufshcd_suspend_clkscaling(struct 

> ufs_hba *hba)

>  	unsigned long flags;

>  	bool suspend = false;

> 

> -	if (!ufshcd_is_clkscaling_supported(hba))

> -		return;

> -

>  	cancel_work_sync(&hba->clk_scaling.suspend_work);

>  	cancel_work_sync(&hba->clk_scaling.resume_work);

> 

> @@ -1470,9 +1467,6 @@ static void ufshcd_resume_clkscaling(struct 

> ufs_hba *hba)

>  	unsigned long flags;

>  	bool resume = false;

> 

> -	if (!ufshcd_is_clkscaling_supported(hba))

> -		return;

> -

>  	spin_lock_irqsave(hba->host->host_lock, flags);

>  	if (hba->clk_scaling.is_suspended) {

>  		resume = true;

> @@ -5642,6 +5636,26 @@ static inline void

> ufshcd_schedule_eh_work(struct ufs_hba *hba)

>  	}

>  }

> 

> +static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)

> +{

> +	down_write(&hba->clk_scaling_lock);

> +	hba->clk_scaling.is_allowed = allow;

> +	up_write(&hba->clk_scaling_lock);

> +}

> +

> +static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool 

> suspend)

> +{

> +	if (suspend) {

> +		if (hba->clk_scaling.is_enabled)

> +			ufshcd_suspend_clkscaling(hba);

> +		ufshcd_clk_scaling_allow(hba, false);

> +	} else {

> +		ufshcd_clk_scaling_allow(hba, true);

> +		if (hba->clk_scaling.is_enabled)

> +			ufshcd_resume_clkscaling(hba);

> +	}

> +}

> +

>  static void ufshcd_err_handling_prepare(struct ufs_hba *hba)

>  {

>  	pm_runtime_get_sync(hba->dev);

> @@ -5663,23 +5677,20 @@ static void ufshcd_err_handling_prepare(struct

> ufs_hba *hba)

>  		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);

>  	} else {

>  		ufshcd_hold(hba, false);

> -		if (hba->clk_scaling.is_enabled)

> +		if (ufshcd_is_clkscaling_supported(hba) &&

> +		    hba->clk_scaling.is_enabled)

>  			ufshcd_suspend_clkscaling(hba);

>  	}

> -	down_write(&hba->clk_scaling_lock);

> -	hba->clk_scaling.is_allowed = false;

> -	up_write(&hba->clk_scaling_lock);

> +	ufshcd_clk_scaling_allow(hba, false);

>  }

> 

>  static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)

>  {

>  	ufshcd_release(hba);

> 

> -	down_write(&hba->clk_scaling_lock);

> -	hba->clk_scaling.is_allowed = true;

> -	up_write(&hba->clk_scaling_lock);

> -	if (hba->clk_scaling.is_enabled)

> -		ufshcd_resume_clkscaling(hba);

> +	if (ufshcd_is_clkscaling_supported(hba))

> +		ufshcd_clk_scaling_suspend(hba, false);

> +

>  	pm_runtime_put(hba->dev);

>  }

> 

> @@ -8507,12 +8518,8 @@ static int ufshcd_suspend(struct ufs_hba *hba,

> enum ufs_pm_op pm_op)

>  	ufshcd_hold(hba, false);

>  	hba->clk_gating.is_suspended = true;

> 

> -	if (hba->clk_scaling.is_enabled)

> -		ufshcd_suspend_clkscaling(hba);

> -

> -	down_write(&hba->clk_scaling_lock);

> -	hba->clk_scaling.is_allowed = false;

> -	up_write(&hba->clk_scaling_lock);

> +	if (ufshcd_is_clkscaling_supported(hba))

> +		ufshcd_clk_scaling_suspend(hba, true);

> 

>  	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&

>  			req_link_state == UIC_LINK_ACTIVE_STATE) {

> @@ -8618,11 +8625,9 @@ static int ufshcd_suspend(struct ufs_hba *hba,

> enum ufs_pm_op pm_op)

>  	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))

>  		ufshcd_disable_auto_bkops(hba);

>  enable_gating:

> -	down_write(&hba->clk_scaling_lock);

> -	hba->clk_scaling.is_allowed = true;

> -	up_write(&hba->clk_scaling_lock);

> -	if (hba->clk_scaling.is_enabled)

> -		ufshcd_resume_clkscaling(hba);

> +	if (ufshcd_is_clkscaling_supported(hba))

> +		ufshcd_clk_scaling_suspend(hba, false);

> +

>  	hba->clk_gating.is_suspended = false;

>  	hba->dev_info.b_rpm_dev_flush_capable = false;

>  	ufshcd_release(hba);

> @@ -8719,11 +8724,8 @@ static int ufshcd_resume(struct ufs_hba *hba,

> enum ufs_pm_op pm_op)

> 

>  	hba->clk_gating.is_suspended = false;

> 

> -	down_write(&hba->clk_scaling_lock);

> -	hba->clk_scaling.is_allowed = true;

> -	up_write(&hba->clk_scaling_lock);

> -	if (hba->clk_scaling.is_enabled)

> -		ufshcd_resume_clkscaling(hba);

> +	if (ufshcd_is_clkscaling_supported(hba))

> +		ufshcd_clk_scaling_suspend(hba, false);

> 

>  	/* Enable Auto-Hibernate if configured */

>  	ufshcd_auto_hibern8_enable(hba);


Thanks for the cleanup

Reviewed-by: Can Guo <cang@codeaurora.org>
diff mbox series

Patch

diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9cc16598136d..ce0528f2e2ed 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1448,9 +1448,6 @@  static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 	unsigned long flags;
 	bool suspend = false;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
-		return;
-
 	cancel_work_sync(&hba->clk_scaling.suspend_work);
 	cancel_work_sync(&hba->clk_scaling.resume_work);
 
@@ -1470,9 +1467,6 @@  static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 	unsigned long flags;
 	bool resume = false;
 
-	if (!ufshcd_is_clkscaling_supported(hba))
-		return;
-
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->clk_scaling.is_suspended) {
 		resume = true;
@@ -5642,6 +5636,26 @@  static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
 	}
 }
 
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+{
+	down_write(&hba->clk_scaling_lock);
+	hba->clk_scaling.is_allowed = allow;
+	up_write(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+{
+	if (suspend) {
+		if (hba->clk_scaling.is_enabled)
+			ufshcd_suspend_clkscaling(hba);
+		ufshcd_clk_scaling_allow(hba, false);
+	} else {
+		ufshcd_clk_scaling_allow(hba, true);
+		if (hba->clk_scaling.is_enabled)
+			ufshcd_resume_clkscaling(hba);
+	}
+}
+
 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
 {
 	pm_runtime_get_sync(hba->dev);
@@ -5663,23 +5677,20 @@  static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
 		ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
 	} else {
 		ufshcd_hold(hba, false);
-		if (hba->clk_scaling.is_enabled)
+		if (ufshcd_is_clkscaling_supported(hba) &&
+		    hba->clk_scaling.is_enabled)
 			ufshcd_suspend_clkscaling(hba);
 	}
-	down_write(&hba->clk_scaling_lock);
-	hba->clk_scaling.is_allowed = false;
-	up_write(&hba->clk_scaling_lock);
+	ufshcd_clk_scaling_allow(hba, false);
 }
 
 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
 {
 	ufshcd_release(hba);
 
-	down_write(&hba->clk_scaling_lock);
-	hba->clk_scaling.is_allowed = true;
-	up_write(&hba->clk_scaling_lock);
-	if (hba->clk_scaling.is_enabled)
-		ufshcd_resume_clkscaling(hba);
+	if (ufshcd_is_clkscaling_supported(hba))
+		ufshcd_clk_scaling_suspend(hba, false);
+
 	pm_runtime_put(hba->dev);
 }
 
@@ -8507,12 +8518,8 @@  static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_hold(hba, false);
 	hba->clk_gating.is_suspended = true;
 
-	if (hba->clk_scaling.is_enabled)
-		ufshcd_suspend_clkscaling(hba);
-
-	down_write(&hba->clk_scaling_lock);
-	hba->clk_scaling.is_allowed = false;
-	up_write(&hba->clk_scaling_lock);
+	if (ufshcd_is_clkscaling_supported(hba))
+		ufshcd_clk_scaling_suspend(hba, true);
 
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
 			req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8618,11 +8625,9 @@  static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_gating:
-	down_write(&hba->clk_scaling_lock);
-	hba->clk_scaling.is_allowed = true;
-	up_write(&hba->clk_scaling_lock);
-	if (hba->clk_scaling.is_enabled)
-		ufshcd_resume_clkscaling(hba);
+	if (ufshcd_is_clkscaling_supported(hba))
+		ufshcd_clk_scaling_suspend(hba, false);
+
 	hba->clk_gating.is_suspended = false;
 	hba->dev_info.b_rpm_dev_flush_capable = false;
 	ufshcd_release(hba);
@@ -8719,11 +8724,8 @@  static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	hba->clk_gating.is_suspended = false;
 
-	down_write(&hba->clk_scaling_lock);
-	hba->clk_scaling.is_allowed = true;
-	up_write(&hba->clk_scaling_lock);
-	if (hba->clk_scaling.is_enabled)
-		ufshcd_resume_clkscaling(hba);
+	if (ufshcd_is_clkscaling_supported(hba))
+		ufshcd_clk_scaling_suspend(hba, false);
 
 	/* Enable Auto-Hibernate if configured */
 	ufshcd_auto_hibern8_enable(hba);