diff mbox series

[1/1] PM: Start asynchronous suspend threads upfront

Message ID 20240618093507.2143-2-kaiyen.chang@intel.com
State New
Headers show
Series [1/1] PM: Start asynchronous suspend threads upfront | expand

Commit Message

Kaiyen Chang June 18, 2024, 9:35 a.m. UTC
Currently, when performing a suspend operation, all devices on the
dpm_list must wait for preceding "synchronous" devices to complete
before the main suspend thread can start their suspend routines,
even if they are "asynchronous". If the suspend routine of a
synchronous device must enter a waiting state for some reason, it
will cause the main suspend thread to wait as well, thereby
delaying the processing of all subsequent devices, including
asynchronous ones, thus ultimately extending the overall device
suspend time.

By starting the asynchronous suspend threads of asynchronous
devices upfront we effectively move those devices towards the
beginning of dpm_list, without breaking their ordering with respect
to their parents and children. As a result, even if the main
suspend thread enters a waiting state, these asynchronous threads
can continue to run without delay.

Signed-off-by: Kaiyen Chang <kaiyen.chang@intel.com>
---
 drivers/base/power/main.c | 90 +++++++++++++++++++++++++--------------
 1 file changed, 57 insertions(+), 33 deletions(-)
diff mbox series

Patch

diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4a67e83300e1..6ddd6ef36625 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1283,6 +1283,7 @@  static void async_suspend_noirq(void *data, async_cookie_t cookie)
 
 static int dpm_noirq_suspend_devices(pm_message_t state)
 {
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 	int error = 0;
 
@@ -1293,26 +1294,33 @@  static int dpm_noirq_suspend_devices(pm_message_t state)
 
 	mutex_lock(&dpm_list_mtx);
 
+	/*
+	 * Trigger the suspend of "async" devices upfront so they don't have to
+	 * wait for the "non-async" ones that don't depend on them.
+	 */
+
+	list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry)
+		dpm_async_fn(dev, async_suspend_noirq);
+
 	while (!list_empty(&dpm_late_early_list)) {
-		struct device *dev = to_device(dpm_late_early_list.prev);
+		dev = to_device(dpm_late_early_list.prev);
 
 		list_move(&dev->power.entry, &dpm_noirq_list);
 
-		if (dpm_async_fn(dev, async_suspend_noirq))
-			continue;
-
-		get_device(dev);
+		if (!dev->power.async_in_progress) {
+			get_device(dev);
 
-		mutex_unlock(&dpm_list_mtx);
+			mutex_unlock(&dpm_list_mtx);
 
-		error = device_suspend_noirq(dev, state, false);
+			error = device_suspend_noirq(dev, state, false);
 
-		put_device(dev);
+			put_device(dev);
 
-		mutex_lock(&dpm_list_mtx);
+			mutex_lock(&dpm_list_mtx);
 
-		if (error || async_error)
-			break;
+			if (error || async_error)
+				break;
+		}
 	}
 
 	mutex_unlock(&dpm_list_mtx);
@@ -1454,6 +1462,7 @@  static void async_suspend_late(void *data, async_cookie_t cookie)
  */
 int dpm_suspend_late(pm_message_t state)
 {
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 	int error = 0;
 
@@ -1466,26 +1475,33 @@  int dpm_suspend_late(pm_message_t state)
 
 	mutex_lock(&dpm_list_mtx);
 
+	/*
+	 * Trigger the suspend of "async" devices upfront so they don't have to
+	 * wait for the "non-async" ones that don't depend on them.
+	 */
+
+	list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry)
+		dpm_async_fn(dev, async_suspend_late);
+
 	while (!list_empty(&dpm_suspended_list)) {
-		struct device *dev = to_device(dpm_suspended_list.prev);
+		dev = to_device(dpm_suspended_list.prev);
 
 		list_move(&dev->power.entry, &dpm_late_early_list);
 
-		if (dpm_async_fn(dev, async_suspend_late))
-			continue;
-
-		get_device(dev);
+		if (!dev->power.async_in_progress) {
+			get_device(dev);
 
-		mutex_unlock(&dpm_list_mtx);
+			mutex_unlock(&dpm_list_mtx);
 
-		error = device_suspend_late(dev, state, false);
+			error = device_suspend_late(dev, state, false);
 
-		put_device(dev);
+			put_device(dev);
 
-		mutex_lock(&dpm_list_mtx);
+			mutex_lock(&dpm_list_mtx);
 
-		if (error || async_error)
-			break;
+			if (error || async_error)
+				break;
+		}
 	}
 
 	mutex_unlock(&dpm_list_mtx);
@@ -1719,6 +1735,7 @@  static void async_suspend(void *data, async_cookie_t cookie)
  */
 int dpm_suspend(pm_message_t state)
 {
+	struct device *dev;
 	ktime_t starttime = ktime_get();
 	int error = 0;
 
@@ -1733,26 +1750,33 @@  int dpm_suspend(pm_message_t state)
 
 	mutex_lock(&dpm_list_mtx);
 
+	/*
+	 * Trigger the suspend of "async" devices upfront so they don't have to
+	 * wait for the "non-async" ones that don't depend on them.
+	 */
+
+	list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry)
+		dpm_async_fn(dev, async_suspend);
+
 	while (!list_empty(&dpm_prepared_list)) {
-		struct device *dev = to_device(dpm_prepared_list.prev);
+		dev = to_device(dpm_prepared_list.prev);
 
 		list_move(&dev->power.entry, &dpm_suspended_list);
 
-		if (dpm_async_fn(dev, async_suspend))
-			continue;
-
-		get_device(dev);
+		if (!dev->power.async_in_progress) {
+			get_device(dev);
 
-		mutex_unlock(&dpm_list_mtx);
+			mutex_unlock(&dpm_list_mtx);
 
-		error = device_suspend(dev, state, false);
+			error = device_suspend(dev, state, false);
 
-		put_device(dev);
+			put_device(dev);
 
-		mutex_lock(&dpm_list_mtx);
+			mutex_lock(&dpm_list_mtx);
 
-		if (error || async_error)
-			break;
+			if (error || async_error)
+				break;
+		}
 	}
 
 	mutex_unlock(&dpm_list_mtx);