@@ -119,6 +119,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
.unlock = genpd_unlock_spin,
};
+static void genpd_lock_rawspin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->rslock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->rslock, flags);
+ genpd->rlock_flags = flags;
+}
+
+static void genpd_lock_nested_rawspin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->rslock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave_nested(&genpd->rslock, flags, depth);
+ genpd->rlock_flags = flags;
+}
+
+static int genpd_lock_interruptible_rawspin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->rslock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&genpd->rslock, flags);
+ genpd->rlock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_rawspin(struct generic_pm_domain *genpd)
+ __releases(&genpd->rslock)
+{
+ raw_spin_unlock_irqrestore(&genpd->rslock, genpd->rlock_flags);
+}
+
+static const struct genpd_lock_ops genpd_rawspin_ops = {
+ .lock = genpd_lock_rawspin,
+ .lock_nested = genpd_lock_nested_rawspin,
+ .lock_interruptible = genpd_lock_interruptible_rawspin,
+ .unlock = genpd_unlock_rawspin,
+};
+
#define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
@@ -2048,8 +2090,13 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd)
{
if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
- spin_lock_init(&genpd->slock);
- genpd->lock_ops = &genpd_spin_ops;
+ if (genpd->flags & GENPD_FLAG_RT_SAFE) {
+ raw_spin_lock_init(&genpd->rslock);
+ genpd->lock_ops = &genpd_rawspin_ops;
+ } else {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ }
} else {
mutex_init(&genpd->mlock);
genpd->lock_ops = &genpd_mtx_ops;
@@ -61,6 +61,14 @@
* GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
* components' next wakeup when determining the
* optimal idle state.
+ *
+ * GENPD_FLAG_RT_SAFE: When used with GENPD_FLAG_IRQ_SAFE, this informs
+ * genpd that its backend callbacks, ->power_on|off(),
+ * do not use other spinlocks. They might use
+ * raw_spinlocks or other pre-emption-disable
+ * methods, all of which are PREEMPT_RT safe. Note
+ * that, a genpd having this flag set, requires its
+ * masterdomains to also have it set.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -69,6 +77,7 @@
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
+#define GENPD_FLAG_RT_SAFE (1U << 7)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -164,6 +173,10 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
+ struct {
+ raw_spinlock_t rslock;
+ unsigned long rlock_flags;
+ };
};
};
Realtime kernels with PREEMPT_RT must use raw_spinlock_t, so add a flag allowing a power domain provider to indicate it is RT safe. The flag is supposed to be used with existing GENPD_FLAG_IRQ_SAFE. Cc: Adrien Thierry <athierry@redhat.com> Cc: Brian Masney <bmasney@redhat.com> Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> --- Independently from Adrien, I encountered the same problem around genpd when using PREEMPT_RT kernel. Previous patch by Adrien: https://lore.kernel.org/all/20220615203605.1068453-1-athierry@redhat.com/ --- drivers/base/power/domain.c | 51 +++++++++++++++++++++++++++++++++++-- include/linux/pm_domain.h | 13 ++++++++++ 2 files changed, 62 insertions(+), 2 deletions(-)