diff options
| author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-03-03 10:46:57 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-03-04 06:09:19 -0500 |
| commit | 54dbf96c921513bf98484a20ef366d51944a4c4d (patch) | |
| tree | cb88fa12a8ac0e7cc8c6380781c46d1c6149c027 /include/linux | |
| parent | 8d53dd546f36073e0d29b0cfc24c665db301e3e7 (diff) | |
rcu: Suppress RCU lockdep warnings during early boot
RCU is used during very early boot, before RCU and lockdep have
been initialized. So make the underlying primitives
(rcu_read_lock_held(), rcu_read_lock_bh_held(),
rcu_read_lock_sched_held(), and rcu_dereference_check()) check
for early boot via the rcu_scheduler_active flag. This will
suppress false positives.
Also introduce a debug_lockdep_rcu_enabled() static inline
helper function, which tags the CONTINUE_PROVE_RCU case as
likely(), as suggested by Ingo Molnar.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267631219-8713-2-git-send-email-paulmck@linux.vnet.ibm.com>
[ v2: removed incomplete debug_lockdep_rcu_update() bits ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/rcupdate.h | 31 |
1 files changed, 22 insertions, 9 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index e22960ecb71a..75921b83c0ab 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -97,6 +97,11 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
| 97 | # define rcu_read_release_sched() \ | 97 | # define rcu_read_release_sched() \ |
| 98 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 98 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
| 99 | 99 | ||
| 100 | static inline int debug_lockdep_rcu_enabled(void) | ||
| 101 | { | ||
| 102 | return likely(rcu_scheduler_active && debug_locks); | ||
| 103 | } | ||
| 104 | |||
| 100 | /** | 105 | /** |
| 101 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 106 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
| 102 | * | 107 | * |
| @@ -104,12 +109,14 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
| 104 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 109 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
| 105 | * this assumes we are in an RCU read-side critical section unless it can | 110 | * this assumes we are in an RCU read-side critical section unless it can |
| 106 | * prove otherwise. | 111 | * prove otherwise. |
| 112 | * | ||
| 113 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
| 107 | */ | 114 | */ |
| 108 | static inline int rcu_read_lock_held(void) | 115 | static inline int rcu_read_lock_held(void) |
| 109 | { | 116 | { |
| 110 | if (debug_locks) | 117 | if (!debug_lockdep_rcu_enabled()) |
| 111 | return lock_is_held(&rcu_lock_map); | 118 | return 1; |
| 112 | return 1; | 119 | return lock_is_held(&rcu_lock_map); |
| 113 | } | 120 | } |
| 114 | 121 | ||
| 115 | /** | 122 | /** |
| @@ -119,12 +126,14 @@ static inline int rcu_read_lock_held(void) | |||
| 119 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | 126 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, |
| 120 | * this assumes we are in an RCU-bh read-side critical section unless it can | 127 | * this assumes we are in an RCU-bh read-side critical section unless it can |
| 121 | * prove otherwise. | 128 | * prove otherwise. |
| 129 | * | ||
| 130 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
| 122 | */ | 131 | */ |
| 123 | static inline int rcu_read_lock_bh_held(void) | 132 | static inline int rcu_read_lock_bh_held(void) |
| 124 | { | 133 | { |
| 125 | if (debug_locks) | 134 | if (!debug_lockdep_rcu_enabled()) |
| 126 | return lock_is_held(&rcu_bh_lock_map); | 135 | return 1; |
| 127 | return 1; | 136 | return lock_is_held(&rcu_bh_lock_map); |
| 128 | } | 137 | } |
| 129 | 138 | ||
| 130 | /** | 139 | /** |
| @@ -135,15 +144,19 @@ static inline int rcu_read_lock_bh_held(void) | |||
| 135 | * this assumes we are in an RCU-sched read-side critical section unless it | 144 | * this assumes we are in an RCU-sched read-side critical section unless it |
| 136 | * can prove otherwise. Note that disabling of preemption (including | 145 | * can prove otherwise. Note that disabling of preemption (including |
| 137 | * disabling irqs) counts as an RCU-sched read-side critical section. | 146 | * disabling irqs) counts as an RCU-sched read-side critical section. |
| 147 | * | ||
| 148 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
| 138 | */ | 149 | */ |
| 139 | #ifdef CONFIG_PREEMPT | 150 | #ifdef CONFIG_PREEMPT |
| 140 | static inline int rcu_read_lock_sched_held(void) | 151 | static inline int rcu_read_lock_sched_held(void) |
| 141 | { | 152 | { |
| 142 | int lockdep_opinion = 0; | 153 | int lockdep_opinion = 0; |
| 143 | 154 | ||
| 155 | if (!debug_lockdep_rcu_enabled()) | ||
| 156 | return 1; | ||
| 144 | if (debug_locks) | 157 | if (debug_locks) |
| 145 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 158 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
| 146 | return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; | 159 | return lockdep_opinion || preempt_count() != 0; |
| 147 | } | 160 | } |
| 148 | #else /* #ifdef CONFIG_PREEMPT */ | 161 | #else /* #ifdef CONFIG_PREEMPT */ |
| 149 | static inline int rcu_read_lock_sched_held(void) | 162 | static inline int rcu_read_lock_sched_held(void) |
| @@ -174,7 +187,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
| 174 | #ifdef CONFIG_PREEMPT | 187 | #ifdef CONFIG_PREEMPT |
| 175 | static inline int rcu_read_lock_sched_held(void) | 188 | static inline int rcu_read_lock_sched_held(void) |
| 176 | { | 189 | { |
| 177 | return preempt_count() != 0 || !rcu_scheduler_active; | 190 | return !rcu_scheduler_active || preempt_count() != 0; |
| 178 | } | 191 | } |
| 179 | #else /* #ifdef CONFIG_PREEMPT */ | 192 | #else /* #ifdef CONFIG_PREEMPT */ |
| 180 | static inline int rcu_read_lock_sched_held(void) | 193 | static inline int rcu_read_lock_sched_held(void) |
| @@ -198,7 +211,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 198 | */ | 211 | */ |
| 199 | #define rcu_dereference_check(p, c) \ | 212 | #define rcu_dereference_check(p, c) \ |
| 200 | ({ \ | 213 | ({ \ |
| 201 | if (debug_locks && !(c)) \ | 214 | if (debug_lockdep_rcu_enabled() && !(c)) \ |
| 202 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | 215 | lockdep_rcu_dereference(__FILE__, __LINE__); \ |
| 203 | rcu_dereference_raw(p); \ | 216 | rcu_dereference_raw(p); \ |
| 204 | }) | 217 | }) |
