diff mbox series

[v2] interconnect: Use rt_mutex for icc_bw_lock

Message ID 20250506145159.1951159-1-quic_mdtipton@quicinc.com
State New
Headers show
Series [v2] interconnect: Use rt_mutex for icc_bw_lock | expand

Commit Message

Mike Tipton May 6, 2025, 2:51 p.m. UTC
The icc_set_bw() function is often used in latency sensitive paths to
scale BW on a per-frame basis by high priority clients such as GPU and
display. However, there are many low priority clients of icc_set_bw() as
well. This can lead to priority inversion and unacceptable delays for
the high priority clients. Which in the case of GPU and display can
result in frame drops and visual glitches.

To prevent this priority inversion, switch to using rt_mutex for
icc_bw_lock. This isn't needed for icc_lock since that's not used in the
critical, latency-sensitive voting paths.

Signed-off-by: Mike Tipton <quic_mdtipton@quicinc.com>
---

Since the original patch was posted a couple years ago, we've continued
to hit this for display and now for GPU as well. How frequently depends
heavily on the specific chip, product, and use case. Different
configurations hit it easier than others. But for both cases it results
in obvious visual glitches.

The paths being voted for (primarily DDR) are fundamentally shared
between clients of all types and priority levels. We can't control their
priorities, so aside from having those priorities inherited we're always
subject to these sorts of inversions.

The motivation isn't really for general performance improvement, but
instead to fix the rare cases of visual glitches and artifacts.

A similar patch was posted last year [1] to address similar problems.

[1] https://lore.kernel.org/all/20240220074300.10805-1-wangrumeng@xiaomi.corp-partner.google.com/

Changes in v2:
- Rebase onto linux-next.
- Select RT_MUTEXES in Kconfig.
- Only use rt_mutex for icc_bw_lock since now there are separate locks
  and icc_lock isn't in the critical path.
- Reword commit text.
- Link to v1: https://lore.kernel.org/all/20220906191423.30109-1-quic_mdtipton@quicinc.com/

 drivers/interconnect/Kconfig |  1 +
 drivers/interconnect/core.c  | 23 ++++++++++++-----------
 2 files changed, 13 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index f2e49bd97d31..f6fd5f2d7d40 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -1,6 +1,7 @@ 
 # SPDX-License-Identifier: GPL-2.0-only
 menuconfig INTERCONNECT
 	bool "On-Chip Interconnect management support"
+	select RT_MUTEXES
 	help
 	  Support for management of the on-chip interconnects.
 
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 1a41e59c77f8..2e86a3c95d1a 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -14,6 +14,7 @@ 
 #include <linux/interconnect-provider.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/rtmutex.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/overflow.h>
@@ -30,7 +31,7 @@  static LIST_HEAD(icc_providers);
 static int providers_count;
 static bool synced_state;
 static DEFINE_MUTEX(icc_lock);
-static DEFINE_MUTEX(icc_bw_lock);
+static DEFINE_RT_MUTEX(icc_bw_lock);
 static struct dentry *icc_debugfs_dir;
 
 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
@@ -178,7 +179,7 @@  static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
 
 	path->num_nodes = num_nodes;
 
-	mutex_lock(&icc_bw_lock);
+	rt_mutex_lock(&icc_bw_lock);
 
 	for (i = num_nodes - 1; i >= 0; i--) {
 		node->provider->users++;
@@ -190,7 +191,7 @@  static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
 		node = node->reverse;
 	}
 
-	mutex_unlock(&icc_bw_lock);
+	rt_mutex_unlock(&icc_bw_lock);
 
 	return path;
 }
@@ -704,7 +705,7 @@  int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
 		return -EINVAL;
 
-	mutex_lock(&icc_bw_lock);
+	rt_mutex_lock(&icc_bw_lock);
 
 	old_avg = path->reqs[0].avg_bw;
 	old_peak = path->reqs[0].peak_bw;
@@ -736,7 +737,7 @@  int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
 		apply_constraints(path);
 	}
 
-	mutex_unlock(&icc_bw_lock);
+	rt_mutex_unlock(&icc_bw_lock);
 
 	trace_icc_set_bw_end(path, ret);
 
@@ -798,7 +799,7 @@  void icc_put(struct icc_path *path)
 		pr_err("%s: error (%d)\n", __func__, ret);
 
 	mutex_lock(&icc_lock);
-	mutex_lock(&icc_bw_lock);
+	rt_mutex_lock(&icc_bw_lock);
 
 	for (i = 0; i < path->num_nodes; i++) {
 		node = path->reqs[i].node;
@@ -807,7 +808,7 @@  void icc_put(struct icc_path *path)
 			node->provider->users--;
 	}
 
-	mutex_unlock(&icc_bw_lock);
+	rt_mutex_unlock(&icc_bw_lock);
 	mutex_unlock(&icc_lock);
 
 	kfree(path->name);
@@ -1023,7 +1024,7 @@  void icc_node_add(struct icc_node *node, struct icc_provider *provider)
 		return;
 
 	mutex_lock(&icc_lock);
-	mutex_lock(&icc_bw_lock);
+	rt_mutex_lock(&icc_bw_lock);
 
 	node->provider = provider;
 	list_add_tail(&node->node_list, &provider->nodes);
@@ -1056,7 +1057,7 @@  void icc_node_add(struct icc_node *node, struct icc_provider *provider)
 	node->avg_bw = 0;
 	node->peak_bw = 0;
 
-	mutex_unlock(&icc_bw_lock);
+	rt_mutex_unlock(&icc_bw_lock);
 	mutex_unlock(&icc_lock);
 }
 EXPORT_SYMBOL_GPL(icc_node_add);
@@ -1182,7 +1183,7 @@  void icc_sync_state(struct device *dev)
 		return;
 
 	mutex_lock(&icc_lock);
-	mutex_lock(&icc_bw_lock);
+	rt_mutex_lock(&icc_bw_lock);
 	synced_state = true;
 	list_for_each_entry(p, &icc_providers, provider_list) {
 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
@@ -1195,7 +1196,7 @@  void icc_sync_state(struct device *dev)
 			}
 		}
 	}
-	mutex_unlock(&icc_bw_lock);
+	rt_mutex_unlock(&icc_bw_lock);
 	mutex_unlock(&icc_lock);
 }
 EXPORT_SYMBOL_GPL(icc_sync_state);