diff mbox series

[v3,2/4] soc: qcom: rpmhpd: Do proper power off when state synced

Message ID 20230327193829.3756640-3-abel.vesa@linaro.org
State New
Headers show
Series None | expand

Commit Message

Abel Vesa March 27, 2023, 7:38 p.m. UTC
Instead of aggregating different corner values on sync state callback,
call the genpd API for queuing up the power off. This will also mark the
domain as powered off in the debugfs genpd summary. Also, until sync
state has been reached, return busy on power off request, in order to
allow genpd core to know that the actual domain hasn't been powered of
from the "disable unused" late initcall.

Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
---
 drivers/soc/qcom/rpmhpd.c | 19 +++++++------------
 1 file changed, 7 insertions(+), 12 deletions(-)

Comments

Bjorn Andersson April 11, 2023, 3:06 a.m. UTC | #1
On Mon, Mar 27, 2023 at 10:38:27PM +0300, Abel Vesa wrote:
> Instead of aggregating different corner values on sync state callback,
> call the genpd API for queuing up the power off. This will also mark the
> domain as powered off in the debugfs genpd summary. Also, until sync
> state has been reached, return busy on power off request, in order to
> allow genpd core to know that the actual domain hasn't been powered of
> from the "disable unused" late initcall.
> 
> Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
> Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
> ---
>  drivers/soc/qcom/rpmhpd.c | 19 +++++++------------
>  1 file changed, 7 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
> index f20e2a49a669..ec7926820772 100644
> --- a/drivers/soc/qcom/rpmhpd.c
> +++ b/drivers/soc/qcom/rpmhpd.c
> @@ -649,8 +649,12 @@ static int rpmhpd_power_off(struct generic_pm_domain *domain)
>  	mutex_lock(&rpmhpd_lock);
>  
>  	ret = rpmhpd_aggregate_corner(pd, 0);
> -	if (!ret)
> -		pd->enabled = false;
> +	if (!ret) {
> +		if (!pd->state_synced)
> +			ret = -EBUSY;
> +		else
> +			pd->enabled = false;
> +	}
>  
>  	mutex_unlock(&rpmhpd_lock);
>  
> @@ -810,10 +814,8 @@ static void rpmhpd_sync_state(struct device *dev)
>  {
>  	const struct rpmhpd_desc *desc = of_device_get_match_data(dev);
>  	struct rpmhpd **rpmhpds = desc->rpmhpds;
> -	unsigned int corner;
>  	struct rpmhpd *pd;
>  	unsigned int i;
> -	int ret;
>  
>  	mutex_lock(&rpmhpd_lock);
>  	for (i = 0; i < desc->num_pds; i++) {
> @@ -822,14 +824,7 @@ static void rpmhpd_sync_state(struct device *dev)
>  			continue;
>  
>  		pd->state_synced = true;
> -		if (pd->enabled)
> -			corner = max(pd->corner, pd->enable_corner);

Note that the intent of this line is to lower the corner from max to
either a requested performance_state or the lowest non-disabled corner.
I don't think your solution maintains this behavior?

> -		else
> -			corner = 0;
> -
> -		ret = rpmhpd_aggregate_corner(pd, corner);
> -		if (ret)
> -			dev_err(dev, "failed to sync %s\n", pd->res_name);
> +		pm_genpd_queue_power_off(&pd->pd);

In the event that the power-domain has a single device attached, and no
subdomains, wouldn't pm_genpd_queue_power_off() pass straight through
all checks and turn off the power domain? Perhaps I'm just missing
something?

Regards,
Bjorn

>  	}
>  	mutex_unlock(&rpmhpd_lock);
>  }
> -- 
> 2.34.1
>
diff mbox series

Patch

diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index f20e2a49a669..ec7926820772 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -649,8 +649,12 @@  static int rpmhpd_power_off(struct generic_pm_domain *domain)
 	mutex_lock(&rpmhpd_lock);
 
 	ret = rpmhpd_aggregate_corner(pd, 0);
-	if (!ret)
-		pd->enabled = false;
+	if (!ret) {
+		if (!pd->state_synced)
+			ret = -EBUSY;
+		else
+			pd->enabled = false;
+	}
 
 	mutex_unlock(&rpmhpd_lock);
 
@@ -810,10 +814,8 @@  static void rpmhpd_sync_state(struct device *dev)
 {
 	const struct rpmhpd_desc *desc = of_device_get_match_data(dev);
 	struct rpmhpd **rpmhpds = desc->rpmhpds;
-	unsigned int corner;
 	struct rpmhpd *pd;
 	unsigned int i;
-	int ret;
 
 	mutex_lock(&rpmhpd_lock);
 	for (i = 0; i < desc->num_pds; i++) {
@@ -822,14 +824,7 @@  static void rpmhpd_sync_state(struct device *dev)
 			continue;
 
 		pd->state_synced = true;
-		if (pd->enabled)
-			corner = max(pd->corner, pd->enable_corner);
-		else
-			corner = 0;
-
-		ret = rpmhpd_aggregate_corner(pd, corner);
-		if (ret)
-			dev_err(dev, "failed to sync %s\n", pd->res_name);
+		pm_genpd_queue_power_off(&pd->pd);
 	}
 	mutex_unlock(&rpmhpd_lock);
 }