diff mbox series

[v5,1/3] cpuidle: psci: Call cpu_cluster_pm_enter() on the last CPU

Message ID 20220216132830.32490-2-shawn.guo@linaro.org
State New
Headers show
Series Add Qualcomm MPM irqchip driver support | expand

Commit Message

Shawn Guo Feb. 16, 2022, 1:28 p.m. UTC
Make a call to cpu_cluster_pm_enter() on the last CPU going to low power
state (and cpu_cluster_pm_exit() on the firt CPU coming back), so that
platforms can be notified to set up hardware for getting into the cluster
low power state.

Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
---
 drivers/cpuidle/cpuidle-psci.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)
diff mbox series

Patch

diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b51b5df08450..c748c1a7d7b1 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -37,6 +37,7 @@  struct psci_cpuidle_data {
 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
 static DEFINE_PER_CPU(u32, domain_state);
 static bool psci_cpuidle_use_cpuhp;
+static atomic_t cpus_in_idle;
 
 void psci_set_domain_state(u32 state)
 {
@@ -67,6 +68,14 @@  static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
 	if (ret)
 		return -1;
 
+	if (atomic_inc_return(&cpus_in_idle) == num_online_cpus()) {
+		ret = cpu_cluster_pm_enter();
+		if (ret) {
+			ret = -1;
+			goto dec_atomic;
+		}
+	}
+
 	/* Do runtime PM to manage a hierarchical CPU toplogy. */
 	rcu_irq_enter_irqson();
 	if (s2idle)
@@ -88,6 +97,10 @@  static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
 		pm_runtime_get_sync(pd_dev);
 	rcu_irq_exit_irqson();
 
+	if (atomic_read(&cpus_in_idle) == num_online_cpus())
+		cpu_cluster_pm_exit();
+dec_atomic:
+	atomic_dec(&cpus_in_idle);
 	cpu_pm_exit();
 
 	/* Clear the domain state to start fresh when back from idle. */