diff mbox

[RFC,6/7] rcu: Inline preemptible RCU __rcu_read_lock()

Message ID 1334420437-19264-6-git-send-email-paulmck@linux.vnet.ibm.com
State New
Headers show

Commit Message

Paul E. McKenney April 14, 2012, 4:20 p.m. UTC
From: "Paul E. McKenney" <paul.mckenney@linaro.org>

Move __rcu_read_lock() from kernel/rcupdate.c to include/linux/rcupdate.h,
allowing the compiler to inline it.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
---
 include/linux/rcupdate.h |   13 ++++++++++++-
 kernel/rcupdate.c        |   12 ------------
 2 files changed, 12 insertions(+), 13 deletions(-)
diff mbox

Patch

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 89f7e97..9967b2b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -43,6 +43,7 @@ 
 #include <linux/completion.h>
 #include <linux/debugobjects.h>
 #include <linux/compiler.h>
+#include <linux/percpu.h>
 
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable; /* for sysctl */
@@ -150,7 +151,17 @@  DECLARE_PER_CPU(int, rcu_read_unlock_special);
 DECLARE_PER_CPU(struct task_struct *, rcu_current_task);
 #endif /* #ifdef CONFIG_PROVE_RCU */
 
-extern void __rcu_read_lock(void);
+/*
+ * Preemptible-RCU implementation for rcu_read_lock().  Just increment
+ * the per-CPU rcu_read_lock_nesting: Shared state and per-task state will
+ * be updated if we block.
+ */
+static inline void __rcu_read_lock(void)
+{
+	__this_cpu_inc(rcu_read_lock_nesting);
+	barrier(); /* Keep code within RCU read-side critical section. */
+}
+
 extern void __rcu_read_unlock(void);
 void synchronize_rcu(void);
 
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f77a5fc..d52c68e 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -59,18 +59,6 @@  DEFINE_PER_CPU(struct task_struct *, rcu_current_task);
 #endif /* #ifdef CONFIG_PROVE_RCU */
 
 /*
- * Preemptible-RCU implementation for rcu_read_lock().  Just increment
- * the per-CPU rcu_read_lock_nesting: Shared state and per-task state will
- * be updated if we block.
- */
-void __rcu_read_lock(void)
-{
-	__this_cpu_inc(rcu_read_lock_nesting);
-	barrier(); /* Keep code within RCU read-side critical section. */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
-/*
  * Tree-preemptible RCU implementation for rcu_read_unlock().
  * Decrement rcu_read_lock_nesting.  If the result is zero (outermost
  * rcu_read_unlock()) and rcu_read_unlock_special is non-zero, then