@@ -12,6 +12,8 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#ifdef CONFIG_SCHED_RT
+
#include <linux/linkage.h>
#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
@@ -98,4 +100,71 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
+#else /* CONFIG_SCHED_RT */
+
+/*
+ * We have no realtime task support and therefore no priority inversion
+ * may occur. Let's map RT mutexes using regular mutexes.
+ */
+
+#include <linux/mutex.h>
+
+struct rt_mutex {
+ struct mutex m;
+};
+
+#define __RT_MUTEX_INITIALIZER(m) \
+ { .m = __MUTEX_INITIALIZER(m) }
+
+#define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+
+static inline void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ static struct lock_class_key __key;
+ __mutex_init(&lock->m, name, &__key);
+}
+
+#define rt_mutex_init(mutex) __rt_mutex_init(mutex, #mutex)
+
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return mutex_is_locked(&lock->m);
+}
+
+static inline void rt_mutex_destroy(struct rt_mutex *lock)
+{
+ mutex_destroy(&lock->m);
+}
+
+static inline void rt_mutex_lock(struct rt_mutex *lock)
+{
+ mutex_lock(&lock->m);
+}
+
+static inline int rt_mutex_lock_interruptible(struct rt_mutex *lock)
+{
+ return mutex_lock_interruptible(&lock->m);
+}
+
+static inline int rt_mutex_trylock(struct rt_mutex *lock)
+{
+ return mutex_trylock(&lock->m);
+}
+
+static inline void rt_mutex_unlock(struct rt_mutex *lock)
+{
+ mutex_unlock(&lock->m);
+}
+
+static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+{
+ return 0;
+}
+#define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+#define rt_mutex_debug_task_free(t) do { } while (0)
+
+#endif /* CONFIG_SCHED_RT */
+
#endif
@@ -20,8 +20,10 @@ obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
+ifeq ($(CONFIG_SCHED_RT),y)
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+endif
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -1008,7 +1008,7 @@ menu "Lock Debugging (spinlocks, mutexes, etc...)"
config DEBUG_RT_MUTEXES
bool "RT Mutex debugging, deadlock detection"
- depends on DEBUG_KERNEL && RT_MUTEXES
+ depends on DEBUG_KERNEL && RT_MUTEXES && SCHED_RT
help
This allows rt mutex semantics violations and rt mutex related
deadlocks (lockups) to be detected and reported automatically.
With no actual RT task, there is no priority inversion issues to care about. We can therefore map RT mutexes to regular mutexes in that case and remain compatible with most users. Signed-off-by: Nicolas Pitre <nico@linaro.org> --- include/linux/rtmutex.h | 69 +++++++++++++++++++++++++++++++++++++++++++++++++ kernel/locking/Makefile | 2 ++ lib/Kconfig.debug | 2 +- 3 files changed, 72 insertions(+), 1 deletion(-) -- 2.9.4