diff mbox series

[V3,4/5] cpufreq: Register notifiers with the PM QoS framework

Message ID a275fdd9325f1b2cba046c79930ad59653674455.1560163748.git.viresh.kumar@linaro.org
State New
Headers show
Series [V3,1/5] PM / QOS: Pass request type to dev_pm_qos_{add|remove}_notifier() | expand

Commit Message

Viresh Kumar June 10, 2019, 10:51 a.m. UTC
This registers the notifiers for min/max frequency constraints with the
PM QoS framework. The constraints are also taken into consideration in
cpufreq_set_policy().

This also relocates cpufreq_policy_put_kobj() as it is required to be
called from cpufreq_policy_alloc() now.

No constraints are added until now though.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

---
 drivers/cpufreq/cpufreq.c | 139 +++++++++++++++++++++++++++++++-------
 include/linux/cpufreq.h   |   4 ++
 2 files changed, 120 insertions(+), 23 deletions(-)

-- 
2.21.0.rc0.269.g1a574e7a288b

Comments

Viresh Kumar June 17, 2019, 3:02 a.m. UTC | #1
On 14-06-19, 09:46, Matthias Kaehlcke wrote:
> Hi Viresh,

> 

> On Mon, Jun 10, 2019 at 04:21:35PM +0530, Viresh Kumar wrote:

> > This registers the notifiers for min/max frequency constraints with the

> > PM QoS framework. The constraints are also taken into consideration in

> > cpufreq_set_policy().

> > 

> > This also relocates cpufreq_policy_put_kobj() as it is required to be

> > called from cpufreq_policy_alloc() now.

> > 

> > No constraints are added until now though.

> > 

> > Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

> > ---

> >  drivers/cpufreq/cpufreq.c | 139 +++++++++++++++++++++++++++++++-------

> >  include/linux/cpufreq.h   |   4 ++

> >  2 files changed, 120 insertions(+), 23 deletions(-)

> > 

> > diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c

> > index 85ff958e01f1..547d221b2ff2 100644

> > --- a/drivers/cpufreq/cpufreq.c

> > +++ b/drivers/cpufreq/cpufreq.c

> > @@ -26,6 +26,7 @@

> >  #include <linux/kernel_stat.h>

> >  #include <linux/module.h>

> >  #include <linux/mutex.h>

> > +#include <linux/pm_qos.h>

> >  #include <linux/slab.h>

> >  #include <linux/suspend.h>

> >  #include <linux/syscore_ops.h>

> > @@ -1126,11 +1127,77 @@ static void handle_update(struct work_struct *work)

> >  	cpufreq_update_policy(cpu);

> >  }

> >  

> > +static void cpufreq_update_freq_work(struct work_struct *work)

> > +{

> > +	struct cpufreq_policy *policy =

> > +		container_of(work, struct cpufreq_policy, req_work);

> > +	struct cpufreq_policy new_policy = *policy;

> > +

> > +	/* We should read constraint values from QoS layer */

> > +	new_policy.min = 0;

> > +	new_policy.max = UINT_MAX;

> > +

> > +	down_write(&policy->rwsem);

> > +

> > +	if (!policy_is_inactive(policy))

> > +		cpufreq_set_policy(policy, &new_policy);

> > +

> > +	up_write(&policy->rwsem);

> > +}

> > +

> > +static int cpufreq_update_freq(struct cpufreq_policy *policy)

> > +{

> > +	schedule_work(&policy->req_work);

> 

> I think you need to add a cancel_work_sync() in cpufreq_policy_free()

> to make sure the work doesn't run after the policy has been freed.


Right, added this to the commit.

diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 547d221b2ff2..878add2cac3c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1289,6 +1289,8 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                                   DEV_PM_QOS_MAX_FREQUENCY);
        dev_pm_qos_remove_notifier(dev, &policy->nb_min,
                                   DEV_PM_QOS_MIN_FREQUENCY);
+       cancel_work_sync(&policy->req_work);
+
        cpufreq_policy_put_kobj(policy);
        free_cpumask_var(policy->real_cpus);
        free_cpumask_var(policy->related_cpus);

-- 
viresh
Ulf Hansson June 17, 2019, 9:23 a.m. UTC | #2
On Mon, 10 Jun 2019 at 12:52, Viresh Kumar <viresh.kumar@linaro.org> wrote:
>

> This registers the notifiers for min/max frequency constraints with the

> PM QoS framework. The constraints are also taken into consideration in

> cpufreq_set_policy().

>

> This also relocates cpufreq_policy_put_kobj() as it is required to be

> called from cpufreq_policy_alloc() now.

>

> No constraints are added until now though.

>

> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>


Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>


Kind regards
Uffe


> ---

>  drivers/cpufreq/cpufreq.c | 139 +++++++++++++++++++++++++++++++-------

>  include/linux/cpufreq.h   |   4 ++

>  2 files changed, 120 insertions(+), 23 deletions(-)

>

> diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c

> index 85ff958e01f1..547d221b2ff2 100644

> --- a/drivers/cpufreq/cpufreq.c

> +++ b/drivers/cpufreq/cpufreq.c

> @@ -26,6 +26,7 @@

>  #include <linux/kernel_stat.h>

>  #include <linux/module.h>

>  #include <linux/mutex.h>

> +#include <linux/pm_qos.h>

>  #include <linux/slab.h>

>  #include <linux/suspend.h>

>  #include <linux/syscore_ops.h>

> @@ -1126,11 +1127,77 @@ static void handle_update(struct work_struct *work)

>         cpufreq_update_policy(cpu);

>  }

>

> +static void cpufreq_update_freq_work(struct work_struct *work)

> +{

> +       struct cpufreq_policy *policy =

> +               container_of(work, struct cpufreq_policy, req_work);

> +       struct cpufreq_policy new_policy = *policy;

> +

> +       /* We should read constraint values from QoS layer */

> +       new_policy.min = 0;

> +       new_policy.max = UINT_MAX;

> +

> +       down_write(&policy->rwsem);

> +

> +       if (!policy_is_inactive(policy))

> +               cpufreq_set_policy(policy, &new_policy);

> +

> +       up_write(&policy->rwsem);

> +}

> +

> +static int cpufreq_update_freq(struct cpufreq_policy *policy)

> +{

> +       schedule_work(&policy->req_work);

> +       return 0;

> +}

> +

> +static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,

> +                               void *data)

> +{

> +       struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);

> +

> +       return cpufreq_update_freq(policy);

> +}

> +

> +static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,

> +                               void *data)

> +{

> +       struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);

> +

> +       return cpufreq_update_freq(policy);

> +}

> +

> +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)

> +{

> +       struct kobject *kobj;

> +       struct completion *cmp;

> +

> +       down_write(&policy->rwsem);

> +       cpufreq_stats_free_table(policy);

> +       kobj = &policy->kobj;

> +       cmp = &policy->kobj_unregister;

> +       up_write(&policy->rwsem);

> +       kobject_put(kobj);

> +

> +       /*

> +        * We need to make sure that the underlying kobj is

> +        * actually not referenced anymore by anybody before we

> +        * proceed with unloading.

> +        */

> +       pr_debug("waiting for dropping of refcount\n");

> +       wait_for_completion(cmp);

> +       pr_debug("wait complete\n");

> +}

> +

>  static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)

>  {

>         struct cpufreq_policy *policy;

> +       struct device *dev = get_cpu_device(cpu);

>         int ret;

>

> +       if (!dev)

> +               return NULL;

> +

>         policy = kzalloc(sizeof(*policy), GFP_KERNEL);

>         if (!policy)

>                 return NULL;

> @@ -1147,7 +1214,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)

>         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,

>                                    cpufreq_global_kobject, "policy%u", cpu);

>         if (ret) {

> -               pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);

> +               dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);

>                 /*

>                  * The entire policy object will be freed below, but the extra

>                  * memory allocated for the kobject name needs to be freed by

> @@ -1157,16 +1224,41 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)

>                 goto err_free_real_cpus;

>         }

>

> +       policy->nb_min.notifier_call = cpufreq_notifier_min;

> +       policy->nb_max.notifier_call = cpufreq_notifier_max;

> +

> +       ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,

> +                                     DEV_PM_QOS_MIN_FREQUENCY);

> +       if (ret) {

> +               dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",

> +                       ret, cpumask_pr_args(policy->cpus));

> +               goto err_kobj_remove;

> +       }

> +

> +       ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,

> +                                     DEV_PM_QOS_MAX_FREQUENCY);

> +       if (ret) {

> +               dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",

> +                       ret, cpumask_pr_args(policy->cpus));

> +               goto err_min_qos_notifier;

> +       }

> +

>         INIT_LIST_HEAD(&policy->policy_list);

>         init_rwsem(&policy->rwsem);

>         spin_lock_init(&policy->transition_lock);

>         init_waitqueue_head(&policy->transition_wait);

>         init_completion(&policy->kobj_unregister);

>         INIT_WORK(&policy->update, handle_update);

> +       INIT_WORK(&policy->req_work, cpufreq_update_freq_work);

>

>         policy->cpu = cpu;

>         return policy;

>

> +err_min_qos_notifier:

> +       dev_pm_qos_remove_notifier(dev, &policy->nb_min,

> +                                  DEV_PM_QOS_MIN_FREQUENCY);

> +err_kobj_remove:

> +       cpufreq_policy_put_kobj(policy);

>  err_free_real_cpus:

>         free_cpumask_var(policy->real_cpus);

>  err_free_rcpumask:

> @@ -1179,30 +1271,9 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)

>         return NULL;

>  }

>

> -static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)

> -{

> -       struct kobject *kobj;

> -       struct completion *cmp;

> -

> -       down_write(&policy->rwsem);

> -       cpufreq_stats_free_table(policy);

> -       kobj = &policy->kobj;

> -       cmp = &policy->kobj_unregister;

> -       up_write(&policy->rwsem);

> -       kobject_put(kobj);

> -

> -       /*

> -        * We need to make sure that the underlying kobj is

> -        * actually not referenced anymore by anybody before we

> -        * proceed with unloading.

> -        */

> -       pr_debug("waiting for dropping of refcount\n");

> -       wait_for_completion(cmp);

> -       pr_debug("wait complete\n");

> -}

> -

>  static void cpufreq_policy_free(struct cpufreq_policy *policy)

>  {

> +       struct device *dev = get_cpu_device(policy->cpu);

>         unsigned long flags;

>         int cpu;

>

> @@ -1214,6 +1285,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)

>                 per_cpu(cpufreq_cpu_data, cpu) = NULL;

>         write_unlock_irqrestore(&cpufreq_driver_lock, flags);

>

> +       dev_pm_qos_remove_notifier(dev, &policy->nb_max,

> +                                  DEV_PM_QOS_MAX_FREQUENCY);

> +       dev_pm_qos_remove_notifier(dev, &policy->nb_min,

> +                                  DEV_PM_QOS_MIN_FREQUENCY);

>         cpufreq_policy_put_kobj(policy);

>         free_cpumask_var(policy->real_cpus);

>         free_cpumask_var(policy->related_cpus);

> @@ -2290,6 +2365,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,

>                        struct cpufreq_policy *new_policy)

>  {

>         struct cpufreq_governor *old_gov;

> +       struct device *cpu_dev = get_cpu_device(policy->cpu);

> +       unsigned long min, max;

>         int ret;

>

>         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",

> @@ -2304,11 +2381,27 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,

>         if (new_policy->min > new_policy->max)

>                 return -EINVAL;

>

> +       /*

> +        * PM QoS framework collects all the requests from users and provide us

> +        * the final aggregated value here.

> +        */

> +       min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);

> +       max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);

> +

> +       if (min > new_policy->min)

> +               new_policy->min = min;

> +       if (max < new_policy->max)

> +               new_policy->max = max;

> +

>         /* verify the cpu speed can be set within this limit */

>         ret = cpufreq_driver->verify(new_policy);

>         if (ret)

>                 return ret;

>

> +       /*

> +        * The notifier-chain shall be removed once all the users of

> +        * CPUFREQ_ADJUST are moved to use the QoS framework.

> +        */

>         /* adjust if necessary - all reasons */

>         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,

>                         CPUFREQ_ADJUST, new_policy);

> diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h

> index d01a74fbc4db..0fe7678da9c2 100644

> --- a/include/linux/cpufreq.h

> +++ b/include/linux/cpufreq.h

> @@ -83,6 +83,7 @@ struct cpufreq_policy {

>

>         struct work_struct      update; /* if update_policy() needs to be

>                                          * called, but you're in IRQ context */

> +       struct work_struct      req_work;

>

>         struct cpufreq_user_policy user_policy;

>         struct cpufreq_frequency_table  *freq_table;

> @@ -147,6 +148,9 @@ struct cpufreq_policy {

>

>         /* Pointer to the cooling device if used for thermal mitigation */

>         struct thermal_cooling_device *cdev;

> +

> +       struct notifier_block nb_min;

> +       struct notifier_block nb_max;

>  };

>

>  struct cpufreq_freqs {

> --

> 2.21.0.rc0.269.g1a574e7a288b

>
Viresh Kumar June 18, 2019, 11:25 a.m. UTC | #3
On 18-06-19, 01:26, Rafael J. Wysocki wrote:
> On Monday, June 10, 2019 12:51:35 PM CEST Viresh Kumar wrote:

> > +static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,

> > +				void *data)

> > +{

> > +	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);

> > +

> > +	return cpufreq_update_freq(policy);

> > +}

> > +

> > +static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,

> > +				void *data)

> > +{

> > +	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);

> > +

> > +	return cpufreq_update_freq(policy);

> > +}

> 

> This is a bit convoluted.

> 

> Two different notifiers are registered basically for the same thing.

> 

> Any chance to use just one?


The way QoS is designed, it handles one value only at a time and we need two,
min/max. I thought a lot about it earlier and this is what I came up with :(

You have any suggestions here ?

-- 
viresh
diff mbox series

Patch

diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 85ff958e01f1..547d221b2ff2 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,6 +26,7 @@ 
 #include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/pm_qos.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
@@ -1126,11 +1127,77 @@  static void handle_update(struct work_struct *work)
 	cpufreq_update_policy(cpu);
 }
 
+static void cpufreq_update_freq_work(struct work_struct *work)
+{
+	struct cpufreq_policy *policy =
+		container_of(work, struct cpufreq_policy, req_work);
+	struct cpufreq_policy new_policy = *policy;
+
+	/* We should read constraint values from QoS layer */
+	new_policy.min = 0;
+	new_policy.max = UINT_MAX;
+
+	down_write(&policy->rwsem);
+
+	if (!policy_is_inactive(policy))
+		cpufreq_set_policy(policy, &new_policy);
+
+	up_write(&policy->rwsem);
+}
+
+static int cpufreq_update_freq(struct cpufreq_policy *policy)
+{
+	schedule_work(&policy->req_work);
+	return 0;
+}
+
+static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
+				void *data)
+{
+	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
+
+	return cpufreq_update_freq(policy);
+}
+
+static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
+				void *data)
+{
+	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
+
+	return cpufreq_update_freq(policy);
+}
+
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+	struct kobject *kobj;
+	struct completion *cmp;
+
+	down_write(&policy->rwsem);
+	cpufreq_stats_free_table(policy);
+	kobj = &policy->kobj;
+	cmp = &policy->kobj_unregister;
+	up_write(&policy->rwsem);
+	kobject_put(kobj);
+
+	/*
+	 * We need to make sure that the underlying kobj is
+	 * actually not referenced anymore by anybody before we
+	 * proceed with unloading.
+	 */
+	pr_debug("waiting for dropping of refcount\n");
+	wait_for_completion(cmp);
+	pr_debug("wait complete\n");
+}
+
 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
 	struct cpufreq_policy *policy;
+	struct device *dev = get_cpu_device(cpu);
 	int ret;
 
+	if (!dev)
+		return NULL;
+
 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
 	if (!policy)
 		return NULL;
@@ -1147,7 +1214,7 @@  static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
 				   cpufreq_global_kobject, "policy%u", cpu);
 	if (ret) {
-		pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+		dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
 		/*
 		 * The entire policy object will be freed below, but the extra
 		 * memory allocated for the kobject name needs to be freed by
@@ -1157,16 +1224,41 @@  static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 		goto err_free_real_cpus;
 	}
 
+	policy->nb_min.notifier_call = cpufreq_notifier_min;
+	policy->nb_max.notifier_call = cpufreq_notifier_max;
+
+	ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
+				      DEV_PM_QOS_MIN_FREQUENCY);
+	if (ret) {
+		dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
+			ret, cpumask_pr_args(policy->cpus));
+		goto err_kobj_remove;
+	}
+
+	ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
+				      DEV_PM_QOS_MAX_FREQUENCY);
+	if (ret) {
+		dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
+			ret, cpumask_pr_args(policy->cpus));
+		goto err_min_qos_notifier;
+	}
+
 	INIT_LIST_HEAD(&policy->policy_list);
 	init_rwsem(&policy->rwsem);
 	spin_lock_init(&policy->transition_lock);
 	init_waitqueue_head(&policy->transition_wait);
 	init_completion(&policy->kobj_unregister);
 	INIT_WORK(&policy->update, handle_update);
+	INIT_WORK(&policy->req_work, cpufreq_update_freq_work);
 
 	policy->cpu = cpu;
 	return policy;
 
+err_min_qos_notifier:
+	dev_pm_qos_remove_notifier(dev, &policy->nb_min,
+				   DEV_PM_QOS_MIN_FREQUENCY);
+err_kobj_remove:
+	cpufreq_policy_put_kobj(policy);
 err_free_real_cpus:
 	free_cpumask_var(policy->real_cpus);
 err_free_rcpumask:
@@ -1179,30 +1271,9 @@  static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 	return NULL;
 }
 
-static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
-{
-	struct kobject *kobj;
-	struct completion *cmp;
-
-	down_write(&policy->rwsem);
-	cpufreq_stats_free_table(policy);
-	kobj = &policy->kobj;
-	cmp = &policy->kobj_unregister;
-	up_write(&policy->rwsem);
-	kobject_put(kobj);
-
-	/*
-	 * We need to make sure that the underlying kobj is
-	 * actually not referenced anymore by anybody before we
-	 * proceed with unloading.
-	 */
-	pr_debug("waiting for dropping of refcount\n");
-	wait_for_completion(cmp);
-	pr_debug("wait complete\n");
-}
-
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
+	struct device *dev = get_cpu_device(policy->cpu);
 	unsigned long flags;
 	int cpu;
 
@@ -1214,6 +1285,10 @@  static void cpufreq_policy_free(struct cpufreq_policy *policy)
 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+	dev_pm_qos_remove_notifier(dev, &policy->nb_max,
+				   DEV_PM_QOS_MAX_FREQUENCY);
+	dev_pm_qos_remove_notifier(dev, &policy->nb_min,
+				   DEV_PM_QOS_MIN_FREQUENCY);
 	cpufreq_policy_put_kobj(policy);
 	free_cpumask_var(policy->real_cpus);
 	free_cpumask_var(policy->related_cpus);
@@ -2290,6 +2365,8 @@  int cpufreq_set_policy(struct cpufreq_policy *policy,
 		       struct cpufreq_policy *new_policy)
 {
 	struct cpufreq_governor *old_gov;
+	struct device *cpu_dev = get_cpu_device(policy->cpu);
+	unsigned long min, max;
 	int ret;
 
 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2304,11 +2381,27 @@  int cpufreq_set_policy(struct cpufreq_policy *policy,
 	if (new_policy->min > new_policy->max)
 		return -EINVAL;
 
+	/*
+	 * PM QoS framework collects all the requests from users and provide us
+	 * the final aggregated value here.
+	 */
+	min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
+	max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+
+	if (min > new_policy->min)
+		new_policy->min = min;
+	if (max < new_policy->max)
+		new_policy->max = max;
+
 	/* verify the cpu speed can be set within this limit */
 	ret = cpufreq_driver->verify(new_policy);
 	if (ret)
 		return ret;
 
+	/*
+	 * The notifier-chain shall be removed once all the users of
+	 * CPUFREQ_ADJUST are moved to use the QoS framework.
+	 */
 	/* adjust if necessary - all reasons */
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_ADJUST, new_policy);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d01a74fbc4db..0fe7678da9c2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -83,6 +83,7 @@  struct cpufreq_policy {
 
 	struct work_struct	update; /* if update_policy() needs to be
 					 * called, but you're in IRQ context */
+	struct work_struct	req_work;
 
 	struct cpufreq_user_policy user_policy;
 	struct cpufreq_frequency_table	*freq_table;
@@ -147,6 +148,9 @@  struct cpufreq_policy {
 
 	/* Pointer to the cooling device if used for thermal mitigation */
 	struct thermal_cooling_device *cdev;
+
+	struct notifier_block nb_min;
+	struct notifier_block nb_max;
 };
 
 struct cpufreq_freqs {