aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-12-16 14:21:05 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-17 03:46:44 -0500
commit234da7bcdc7aaa935846534c3b726dbc79a9cdd5 (patch)
treea391afd465d6493a9f1bb274c225bab4d303aad0
parent416eb39556a03d1c7e52b0791e9052ccd71db241 (diff)
sched: Teach might_sleep() about preemptible RCU
In practice, it is harmless to voluntarily sleep in a rcu_read_lock() section if we are running under preempt rcu, but it is illegal if we build a kernel running non-preemptable rcu. Currently, might_sleep() doesn't notice sleepable operations under rcu_read_lock() sections if we are running under preemptable rcu because preempt_count() is left untouched after rcu_read_lock() in this case. But we want developers who test their changes under such config to notice the "sleeping while atomic" issues. So we add rcu_read_lock_nesting to prempt_count() in might_sleep() checks. [ v2: Handle rcu-tiny ] Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <1260991265-8451-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--kernel/sched.c2
3 files changed, 17 insertions, 1 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index c4ba9a78721e..96cc307ed9f4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
101{ 101{
102} 102}
103 103
104static inline int rcu_preempt_depth(void)
105{
106 return 0;
107}
108
104#endif /* __LINUX_RCUTINY_H */ 109#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c93eee5911b0..8044b1b94333 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void); 45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48/*
49 * Defined as macro as it is a very low level header
50 * included from areas that don't even know about current
51 */
52#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
53
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 54#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
49 55
50static inline void __rcu_read_lock(void) 56static inline void __rcu_read_lock(void)
@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
63{ 69{
64} 70}
65 71
72static inline int rcu_preempt_depth(void)
73{
74 return 0;
75}
76
66#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 77#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
67 78
68static inline void __rcu_read_lock_bh(void) 79static inline void __rcu_read_lock_bh(void)
diff --git a/kernel/sched.c b/kernel/sched.c
index af7dfa74e6bb..7be88a7be047 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -9682,7 +9682,7 @@ void __init sched_init(void)
9682#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9682#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9683static inline int preempt_count_equals(int preempt_offset) 9683static inline int preempt_count_equals(int preempt_offset)
9684{ 9684{
9685 int nested = preempt_count() & ~PREEMPT_ACTIVE; 9685 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
9686 9686
9687 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 9687 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9688} 9688}