[4/4] PM / Domains: Propagate performance state updates

Message ID 7e1ea283f9eebce081af80ddb8d3ca5c9c76cd3b.1541399301.git.viresh.kumar@linaro.org
State New
Headers show
Series
  • Untitled series #16018
Related show

Commit Message

Viresh Kumar Nov. 5, 2018, 6:36 a.m.
This commit updates genpd core to start propagating performance state
updates to master domains that have their set_performance_state()
callback set.

A genpd handles two type of performance states now. The first one is the
performance state requirement put on the genpd by the devices and
sub-domains under it, which is already represented by
genpd->performance_state. The second type, introduced in this commit, is
the performance state requirement(s) put by the genpd on its master
domain(s). There is a separate value required for each master that the
genpd has and so a new field is added to the struct gpd_link
(link->performance_state), which represents the link between a genpd and
its master. The struct gpd_link also got another field
prev_performance_state, which is used by genpd core as a temporary
variable during transitions.

We need to propagate setting performance state while powering-on a genpd
as well, as we ignore performance state requirements from sub-domains
which are powered-off. For this reason _genpd_power_on() also received
the additional parameter, depth, which is used for hierarchical locking
within genpd.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

---
 drivers/base/power/domain.c | 107 +++++++++++++++++++++++++++++-------
 include/linux/pm_domain.h   |   4 ++
 2 files changed, 92 insertions(+), 19 deletions(-)

-- 
2.19.1.568.g152ad8e3369a

Comments

Rajendra Nayak Nov. 21, 2018, 5:03 a.m. | #1
Hi Viresh,

On 11/5/2018 12:06 PM, Viresh Kumar wrote:
> This commit updates genpd core to start propagating performance state

> updates to master domains that have their set_performance_state()

> callback set.

> 

> A genpd handles two type of performance states now. The first one is the

> performance state requirement put on the genpd by the devices and

> sub-domains under it, which is already represented by

> genpd->performance_state. The second type, introduced in this commit, is

> the performance state requirement(s) put by the genpd on its master

> domain(s). There is a separate value required for each master that the

> genpd has and so a new field is added to the struct gpd_link

> (link->performance_state), which represents the link between a genpd and

> its master. The struct gpd_link also got another field

> prev_performance_state, which is used by genpd core as a temporary

> variable during transitions.

> 

> We need to propagate setting performance state while powering-on a genpd

> as well, as we ignore performance state requirements from sub-domains

> which are powered-off. For this reason _genpd_power_on() also received

> the additional parameter, depth, which is used for hierarchical locking

> within genpd.

> 

> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

> ---

>   drivers/base/power/domain.c | 107 +++++++++++++++++++++++++++++-------

>   include/linux/pm_domain.h   |   4 ++

>   2 files changed, 92 insertions(+), 19 deletions(-)

> 

> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c

> index 6d2e9b3406f1..81e02c5f753f 100644

> --- a/drivers/base/power/domain.c

> +++ b/drivers/base/power/domain.c

> @@ -239,28 +239,86 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)

>   static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}

>   #endif

>   

> +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

> +					   unsigned int state, int depth);

> +

>   static int _genpd_set_performance_state(struct generic_pm_domain *genpd,

> -					unsigned int state)

> +					unsigned int state, int depth)

>   {

> +	struct generic_pm_domain *master;

> +	struct gpd_link *link;

> +	unsigned int mstate;

>   	int ret;

>   

>   	if (!genpd_status_on(genpd))

>   		goto out;


This check here would mean we only propogate performance state
to masters if the genpd is ON?

thanks,
Rajendra

>   

> +	/* Propagate to masters of genpd */

> +	list_for_each_entry(link, &genpd->slave_links, slave_node) {

> +		master = link->master;

> +

> +		if (!master->set_performance_state)

> +			continue;

> +

> +		/* Find master's performance state */

> +		mstate = dev_pm_opp_xlate_performance_state(genpd->opp_table,

> +				master->opp_table, state);

> +		if (unlikely(!mstate))

> +			goto err;

> +

> +		genpd_lock_nested(master, depth + 1);

> +

> +		link->prev_performance_state = link->performance_state;

> +		link->performance_state = mstate;

> +		ret = _genpd_reeval_performance_state(master, mstate, depth + 1);

> +		if (ret)

> +			link->performance_state = link->prev_performance_state;

> +

> +		genpd_unlock(master);

> +

> +		if (ret)

> +			goto err;

> +	}

> +

>   	ret = genpd->set_performance_state(genpd, state);

>   	if (ret)

> -		return ret;

> +		goto err;

>   

>   out:

>   	genpd->performance_state = state;

>   	return 0;

> +

> +err:

> +	/* Encountered an error, lets rollback */

> +	list_for_each_entry_continue_reverse(link, &genpd->slave_links,

> +					     slave_node) {

> +		master = link->master;

> +

> +		if (!master->set_performance_state)

> +			continue;

> +

> +		genpd_lock_nested(master, depth + 1);

> +

> +		mstate = link->prev_performance_state;

> +		link->performance_state = mstate;

> +

> +		if (_genpd_reeval_performance_state(master, mstate, depth + 1)) {

> +			pr_err("%s: Failed to roll back to %d performance state\n",

> +			       master->name, mstate);

> +		}

> +

> +		genpd_unlock(master);

> +	}

> +

> +	return ret;

>   }

>   

>   static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

> -					   unsigned int state)

> +					   unsigned int state, int depth)

>   {

>   	struct generic_pm_domain_data *pd_data;

>   	struct pm_domain_data *pdd;

> +	struct gpd_link *link;

>   

>   	/* New requested state is same as Max requested state */

>   	if (state == genpd->performance_state)

> @@ -278,21 +336,30 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

>   			state = pd_data->performance_state;

>   	}

>   

> -	if (state == genpd->performance_state)

> -		return 0;

> -

>   	/*

> -	 * We aren't propagating performance state changes of a subdomain to its

> -	 * masters as we don't have hardware that needs it. Over that, the

> -	 * performance states of subdomain and its masters may not have

> -	 * one-to-one mapping and would require additional information. We can

> -	 * get back to this once we have hardware that needs it. For that

> -	 * reason, we don't have to consider performance state of the subdomains

> -	 * of genpd here.

> +	 * Traverse all powered-on subdomains within the domain. This can be

> +	 * done without any additional locking as the link->performance_state

> +	 * field is protected by the master genpd->lock, which is already taken.

> +	 *

> +	 * Also note that link->performance_state (subdomain's performance state

> +	 * requirement to master domain) is different from

> +	 * link->slave->performance_state (current performance state requirement

> +	 * of the devices/sub-domains of the subdomain) and so can have a

> +	 * different value.

>   	 */

> +	list_for_each_entry(link, &genpd->master_links, master_node) {

> +		if (!genpd_status_on(link->slave))

> +			continue;

> +

> +		if (link->performance_state > state)

> +			state = link->performance_state;

> +	}

> +

> +	if (state == genpd->performance_state)

> +		return 0;

>   

>   update_state:

> -	return _genpd_set_performance_state(genpd, state);

> +	return _genpd_set_performance_state(genpd, state, depth);

>   }

>   

>   /**

> @@ -336,7 +403,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)

>   	prev = gpd_data->performance_state;

>   	gpd_data->performance_state = state;

>   

> -	ret = _genpd_reeval_performance_state(genpd, state);

> +	ret = _genpd_reeval_performance_state(genpd, state, 0);

>   	if (ret)

>   		gpd_data->performance_state = prev;

>   

> @@ -346,7 +413,8 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)

>   }

>   EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);

>   

> -static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)

> +static int

> +_genpd_power_on(struct generic_pm_domain *genpd, bool timed, int depth)

>   {

>   	unsigned int state_idx = genpd->state_idx;

>   	ktime_t time_start;

> @@ -367,7 +435,8 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)

>   	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));

>   

>   	if (unlikely(genpd->set_performance_state)) {

> -		ret = genpd->set_performance_state(genpd, genpd->performance_state);

> +		ret = _genpd_set_performance_state(genpd,

> +					genpd->performance_state, depth);

>   		if (ret) {

>   			pr_warn("%s: Failed to set performance state %d (%d)\n",

>   				genpd->name, genpd->performance_state, ret);

> @@ -557,7 +626,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)

>   		}

>   	}

>   

> -	ret = _genpd_power_on(genpd, true);

> +	ret = _genpd_power_on(genpd, true, depth);

>   	if (ret)

>   		goto err;

>   

> @@ -962,7 +1031,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,

>   			genpd_unlock(link->master);

>   	}

>   

> -	_genpd_power_on(genpd, false);

> +	_genpd_power_on(genpd, false, depth);

>   

>   	genpd->status = GPD_STATE_ACTIVE;

>   }

> diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h

> index 9ad101362aef..dd364abb649a 100644

> --- a/include/linux/pm_domain.h

> +++ b/include/linux/pm_domain.h

> @@ -136,6 +136,10 @@ struct gpd_link {

>   	struct list_head master_node;

>   	struct generic_pm_domain *slave;

>   	struct list_head slave_node;

> +

> +	/* Sub-domain's per-master domain performance state */

> +	unsigned int performance_state;

> +	unsigned int prev_performance_state;

>   };

>   

>   struct gpd_timing_data {

>
Rajendra Nayak Nov. 21, 2018, 5:31 a.m. | #2
On 11/21/2018 10:46 AM, Viresh Kumar wrote:
> On 21-11-18, 10:33, Rajendra Nayak wrote:

>> Hi Viresh,

>>

>> On 11/5/2018 12:06 PM, Viresh Kumar wrote:

>>> This commit updates genpd core to start propagating performance state

>>> updates to master domains that have their set_performance_state()

>>> callback set.

>>>

>>> A genpd handles two type of performance states now. The first one is the

>>> performance state requirement put on the genpd by the devices and

>>> sub-domains under it, which is already represented by

>>> genpd->performance_state. The second type, introduced in this commit, is

>>> the performance state requirement(s) put by the genpd on its master

>>> domain(s). There is a separate value required for each master that the

>>> genpd has and so a new field is added to the struct gpd_link

>>> (link->performance_state), which represents the link between a genpd and

>>> its master. The struct gpd_link also got another field

>>> prev_performance_state, which is used by genpd core as a temporary

>>> variable during transitions.

>>>

>>> We need to propagate setting performance state while powering-on a genpd

>>> as well, as we ignore performance state requirements from sub-domains

>>> which are powered-off. For this reason _genpd_power_on() also received

>>> the additional parameter, depth, which is used for hierarchical locking

>>> within genpd.

>>>

>>> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

>>> ---

>>>    drivers/base/power/domain.c | 107 +++++++++++++++++++++++++++++-------

>>>    include/linux/pm_domain.h   |   4 ++

>>>    2 files changed, 92 insertions(+), 19 deletions(-)

>>>

>>> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c

>>> index 6d2e9b3406f1..81e02c5f753f 100644

>>> --- a/drivers/base/power/domain.c

>>> +++ b/drivers/base/power/domain.c

>>> @@ -239,28 +239,86 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)

>>>    static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}

>>>    #endif

>>> +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

>>> +					   unsigned int state, int depth);

>>> +

>>>    static int _genpd_set_performance_state(struct generic_pm_domain *genpd,

>>> -					unsigned int state)

>>> +					unsigned int state, int depth)

>>>    {

>>> +	struct generic_pm_domain *master;

>>> +	struct gpd_link *link;

>>> +	unsigned int mstate;

>>>    	int ret;

>>>    	if (!genpd_status_on(genpd))

>>>    		goto out;

>>

>> This check here would mean we only propogate performance state

>> to masters if the genpd is ON?

> 

> Yeah, isn't that what should we do anyway? It is similar to clk-enable

> etc. Why increase the reference count if the device isn't enabled and

> isn't using the clock ?


I would think this is analogous to a driver calling clk_set_rate() first and
then a clk_enable(), which is certainly valid.
So my question is, if calling a dev_pm_genpd_set_performance_state()
and then runtime enabling the device would work (and take care of propagating the performance
state). In my testing I found it does not.

> 

> Also you can see that I have updated the genpd power-on code to start

> propagation to make sure we don't miss anything.

>
Viresh Kumar Nov. 21, 2018, 5:41 a.m. | #3
On 21-11-18, 11:01, Rajendra Nayak wrote:
> I would think this is analogous to a driver calling clk_set_rate() first and

> then a clk_enable(), which is certainly valid.

> So my question is, if calling a dev_pm_genpd_set_performance_state()

> and then runtime enabling the device would work (and take care of propagating the performance

> state).


Two things here..

- First, it should work.

- Second, we don't look at device RPM states but only if a genpd is on
  or off. Maybe we should look at device states as well, but then we
  need to initiate set-performance-state routine from runtime enable
  as well. But that is completely different work and would require its
  own series.

> In my testing I found it does not.


As I said, we don't do anything in RPM enable of the device currently,
but only during genpd_power_on(). So if the genpd was previously
disabled, RPM enable of the device should have done propagation of
states as well. Otherwise, we should already have take care of the
vote from the device. So, it should have worked.

Needs some investigation on why this isn't working for you.

-- 
viresh
Rajendra Nayak Nov. 21, 2018, 5:42 a.m. | #4
On 11/21/2018 11:01 AM, Rajendra Nayak wrote:
> 

> 

> On 11/21/2018 10:46 AM, Viresh Kumar wrote:

>> On 21-11-18, 10:33, Rajendra Nayak wrote:

>>> Hi Viresh,

>>>

>>> On 11/5/2018 12:06 PM, Viresh Kumar wrote:

>>>> This commit updates genpd core to start propagating performance state

>>>> updates to master domains that have their set_performance_state()

>>>> callback set.

>>>>

>>>> A genpd handles two type of performance states now. The first one is the

>>>> performance state requirement put on the genpd by the devices and

>>>> sub-domains under it, which is already represented by

>>>> genpd->performance_state. The second type, introduced in this commit, is

>>>> the performance state requirement(s) put by the genpd on its master

>>>> domain(s). There is a separate value required for each master that the

>>>> genpd has and so a new field is added to the struct gpd_link

>>>> (link->performance_state), which represents the link between a genpd and

>>>> its master. The struct gpd_link also got another field

>>>> prev_performance_state, which is used by genpd core as a temporary

>>>> variable during transitions.

>>>>

>>>> We need to propagate setting performance state while powering-on a genpd

>>>> as well, as we ignore performance state requirements from sub-domains

>>>> which are powered-off. For this reason _genpd_power_on() also received

>>>> the additional parameter, depth, which is used for hierarchical locking

>>>> within genpd.

>>>>

>>>> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

>>>> ---

>>>>    drivers/base/power/domain.c | 107 +++++++++++++++++++++++++++++-------

>>>>    include/linux/pm_domain.h   |   4 ++

>>>>    2 files changed, 92 insertions(+), 19 deletions(-)

>>>>

>>>> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c

>>>> index 6d2e9b3406f1..81e02c5f753f 100644

>>>> --- a/drivers/base/power/domain.c

>>>> +++ b/drivers/base/power/domain.c

>>>> @@ -239,28 +239,86 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)

>>>>    static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}

>>>>    #endif

>>>> +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

>>>> +                       unsigned int state, int depth);

>>>> +

>>>>    static int _genpd_set_performance_state(struct generic_pm_domain *genpd,

>>>> -                    unsigned int state)

>>>> +                    unsigned int state, int depth)

>>>>    {

>>>> +    struct generic_pm_domain *master;

>>>> +    struct gpd_link *link;

>>>> +    unsigned int mstate;

>>>>        int ret;

>>>>        if (!genpd_status_on(genpd))

>>>>            goto out;

>>>

>>> This check here would mean we only propogate performance state

>>> to masters if the genpd is ON?

>>

>> Yeah, isn't that what should we do anyway? It is similar to clk-enable

>> etc. Why increase the reference count if the device isn't enabled and

>> isn't using the clock ?

> 

> I would think this is analogous to a driver calling clk_set_rate() first and

> then a clk_enable(), which is certainly valid.

> So my question is, if calling a dev_pm_genpd_set_performance_state()

> and then runtime enabling the device would work (and take care of propagating the performance

> state). In my testing I found it does not.


And the reason for that seems to be that we update the genpd status to GPD_STATE_ACTIVE
*after* we try to set the performance state, so we always hit this check which bails out
thinking the genpd is not ON.

> 

>>

>> Also you can see that I have updated the genpd power-on code to start

>> propagation to make sure we don't miss anything.

>>
Rajendra Nayak Nov. 21, 2018, 6:51 a.m. | #5
On 11/21/2018 12:06 PM, Viresh Kumar wrote:
> On 21-11-18, 11:12, Rajendra Nayak wrote:

>> And the reason for that seems to be that we update the genpd status to GPD_STATE_ACTIVE

>> *after* we try to set the performance state, so we always hit this check which bails out

>> thinking the genpd is not ON.

> 

> Thanks for looking at it. Here is the (untested) fix, please try it

> out.


Thanks, yes, this does seem to work.

> 

> diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c

> index 84c13695af65..92be4a224b45 100644

> --- a/drivers/base/power/domain.c

> +++ b/drivers/base/power/domain.c

> @@ -250,9 +250,6 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,

>          unsigned int mstate;

>          int ret;

>   

> -       if (!genpd_status_on(genpd))

> -               goto out;

> -

>          /* Propagate to masters of genpd */

>          list_for_each_entry(link, &genpd->slave_links, slave_node) {

>                  master = link->master;

> @@ -286,7 +283,6 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,

>          if (ret)

>                  goto err;

>   

> -out:

>          genpd->performance_state = state;

>          return 0;

>   

> @@ -361,6 +357,11 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,

>                  return 0;

>   

>   update_state:

> +       if (!genpd_status_on(genpd)) {

> +               genpd->performance_state = state;

> +               return 0;

> +       }

> +

>          return _genpd_set_performance_state(genpd, state, depth);

>   }

>

Patch

diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6d2e9b3406f1..81e02c5f753f 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -239,28 +239,86 @@  static void genpd_update_accounting(struct generic_pm_domain *genpd)
 static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
 #endif
 
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+					   unsigned int state, int depth);
+
 static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
-					unsigned int state)
+					unsigned int state, int depth)
 {
+	struct generic_pm_domain *master;
+	struct gpd_link *link;
+	unsigned int mstate;
 	int ret;
 
 	if (!genpd_status_on(genpd))
 		goto out;
 
+	/* Propagate to masters of genpd */
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		master = link->master;
+
+		if (!master->set_performance_state)
+			continue;
+
+		/* Find master's performance state */
+		mstate = dev_pm_opp_xlate_performance_state(genpd->opp_table,
+				master->opp_table, state);
+		if (unlikely(!mstate))
+			goto err;
+
+		genpd_lock_nested(master, depth + 1);
+
+		link->prev_performance_state = link->performance_state;
+		link->performance_state = mstate;
+		ret = _genpd_reeval_performance_state(master, mstate, depth + 1);
+		if (ret)
+			link->performance_state = link->prev_performance_state;
+
+		genpd_unlock(master);
+
+		if (ret)
+			goto err;
+	}
+
 	ret = genpd->set_performance_state(genpd, state);
 	if (ret)
-		return ret;
+		goto err;
 
 out:
 	genpd->performance_state = state;
 	return 0;
+
+err:
+	/* Encountered an error, lets rollback */
+	list_for_each_entry_continue_reverse(link, &genpd->slave_links,
+					     slave_node) {
+		master = link->master;
+
+		if (!master->set_performance_state)
+			continue;
+
+		genpd_lock_nested(master, depth + 1);
+
+		mstate = link->prev_performance_state;
+		link->performance_state = mstate;
+
+		if (_genpd_reeval_performance_state(master, mstate, depth + 1)) {
+			pr_err("%s: Failed to roll back to %d performance state\n",
+			       master->name, mstate);
+		}
+
+		genpd_unlock(master);
+	}
+
+	return ret;
 }
 
 static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
-					   unsigned int state)
+					   unsigned int state, int depth)
 {
 	struct generic_pm_domain_data *pd_data;
 	struct pm_domain_data *pdd;
+	struct gpd_link *link;
 
 	/* New requested state is same as Max requested state */
 	if (state == genpd->performance_state)
@@ -278,21 +336,30 @@  static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
 			state = pd_data->performance_state;
 	}
 
-	if (state == genpd->performance_state)
-		return 0;
-
 	/*
-	 * We aren't propagating performance state changes of a subdomain to its
-	 * masters as we don't have hardware that needs it. Over that, the
-	 * performance states of subdomain and its masters may not have
-	 * one-to-one mapping and would require additional information. We can
-	 * get back to this once we have hardware that needs it. For that
-	 * reason, we don't have to consider performance state of the subdomains
-	 * of genpd here.
+	 * Traverse all powered-on subdomains within the domain. This can be
+	 * done without any additional locking as the link->performance_state
+	 * field is protected by the master genpd->lock, which is already taken.
+	 *
+	 * Also note that link->performance_state (subdomain's performance state
+	 * requirement to master domain) is different from
+	 * link->slave->performance_state (current performance state requirement
+	 * of the devices/sub-domains of the subdomain) and so can have a
+	 * different value.
 	 */
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		if (!genpd_status_on(link->slave))
+			continue;
+
+		if (link->performance_state > state)
+			state = link->performance_state;
+	}
+
+	if (state == genpd->performance_state)
+		return 0;
 
 update_state:
-	return _genpd_set_performance_state(genpd, state);
+	return _genpd_set_performance_state(genpd, state, depth);
 }
 
 /**
@@ -336,7 +403,7 @@  int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 	prev = gpd_data->performance_state;
 	gpd_data->performance_state = state;
 
-	ret = _genpd_reeval_performance_state(genpd, state);
+	ret = _genpd_reeval_performance_state(genpd, state, 0);
 	if (ret)
 		gpd_data->performance_state = prev;
 
@@ -346,7 +413,8 @@  int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
 }
 EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
 
-static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+static int
+_genpd_power_on(struct generic_pm_domain *genpd, bool timed, int depth)
 {
 	unsigned int state_idx = genpd->state_idx;
 	ktime_t time_start;
@@ -367,7 +435,8 @@  static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
 	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
 
 	if (unlikely(genpd->set_performance_state)) {
-		ret = genpd->set_performance_state(genpd, genpd->performance_state);
+		ret = _genpd_set_performance_state(genpd,
+					genpd->performance_state, depth);
 		if (ret) {
 			pr_warn("%s: Failed to set performance state %d (%d)\n",
 				genpd->name, genpd->performance_state, ret);
@@ -557,7 +626,7 @@  static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
 		}
 	}
 
-	ret = _genpd_power_on(genpd, true);
+	ret = _genpd_power_on(genpd, true, depth);
 	if (ret)
 		goto err;
 
@@ -962,7 +1031,7 @@  static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
 			genpd_unlock(link->master);
 	}
 
-	_genpd_power_on(genpd, false);
+	_genpd_power_on(genpd, false, depth);
 
 	genpd->status = GPD_STATE_ACTIVE;
 }
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9ad101362aef..dd364abb649a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -136,6 +136,10 @@  struct gpd_link {
 	struct list_head master_node;
 	struct generic_pm_domain *slave;
 	struct list_head slave_node;
+
+	/* Sub-domain's per-master domain performance state */
+	unsigned int performance_state;
+	unsigned int prev_performance_state;
 };
 
 struct gpd_timing_data {