aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-03-27 19:02:08 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-05-02 17:43:23 -0400
commit616c310e83b872024271c915c1b9ab505b9efad9 (patch)
tree1339bc7b3bef920b4641a5af2f182e9dfa2a6632 /include/linux/sched.h
parent66f75a5d028beaf67c931435fdc3e7823125730c (diff)
rcu: Move PREEMPT_RCU preemption to switch_to() invocation
Currently, PREEMPT_RCU readers are enqueued upon entry to the scheduler. This is inefficient because enqueuing is required only if there is a context switch, and entry to the scheduler does not guarantee a context switch. The commit therefore moves the enqueuing to immediately precede the call to switch_to() from the scheduler. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 81a173c0897d..8f3fd945070f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1905,12 +1905,22 @@ static inline void rcu_copy_process(struct task_struct *p)
1905 INIT_LIST_HEAD(&p->rcu_node_entry); 1905 INIT_LIST_HEAD(&p->rcu_node_entry);
1906} 1906}
1907 1907
1908static inline void rcu_switch_from(struct task_struct *prev)
1909{
1910 if (prev->rcu_read_lock_nesting != 0)
1911 rcu_preempt_note_context_switch();
1912}
1913
1908#else 1914#else
1909 1915
1910static inline void rcu_copy_process(struct task_struct *p) 1916static inline void rcu_copy_process(struct task_struct *p)
1911{ 1917{
1912} 1918}
1913 1919
1920static inline void rcu_switch_from(struct task_struct *prev)
1921{
1922}
1923
1914#endif 1924#endif
1915 1925
1916#ifdef CONFIG_SMP 1926#ifdef CONFIG_SMP