@@ -88,6 +88,17 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
return delta_ns >= sg_policy->freq_update_delay_ns;
}
+static void sugov_fast_switch(struct cpufreq_policy *policy,
+ unsigned int next_freq)
+{
+ next_freq = cpufreq_driver_fast_switch(policy, next_freq);
+ if (next_freq == CPUFREQ_ENTRY_INVALID)
+ return;
+
+ policy->cur = next_freq;
+ trace_cpu_frequency(next_freq, smp_processor_id());
+}
+
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
@@ -100,12 +111,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
}
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
- next_freq = cpufreq_driver_fast_switch(policy, next_freq);
- if (next_freq == CPUFREQ_ENTRY_INVALID)
- return;
-
- policy->cur = next_freq;
- trace_cpu_frequency(next_freq, smp_processor_id());
+ sugov_fast_switch(policy, next_freq);
} else if (sg_policy->next_freq != next_freq) {
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
@@ -303,9 +309,15 @@ static void sugov_work(struct kthread_work *work)
static void sugov_irq_work(struct irq_work *irq_work)
{
- struct sugov_policy *sg_policy;
+ struct sugov_policy *sg_policy = container_of(irq_work, struct
+ sugov_policy, irq_work);
+ struct cpufreq_policy *policy = sg_policy->policy;
- sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
+ if (policy->fast_switch_enabled) {
+ sugov_fast_switch(policy, sg_policy->next_freq);
+ sg_policy->work_in_progress = false;
+ return;
+ }
/*
* For RT and deadline tasks, the schedutil governor shoots the