diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-02-22 20:04:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 03:40:59 -0500 |
commit | 632ee200130899252508c478ad0e808222573fbc (patch) | |
tree | e3f44ca9daf2a2da2186419f703bc9cec8faa058 /include/linux/rcupdate.h | |
parent | 996de8c6fe95c5a9fc524241cc8f142ef0605d3d (diff) |
rcu: Introduce lockdep-based checking to RCU read-side primitives
Inspection is proving insufficient to catch all RCU misuses,
which is understandable given that rcu_dereference() might be
protected by any of four different flavors of RCU (RCU, RCU-bh,
RCU-sched, and SRCU), and might also/instead be protected by any
of a number of locking primitives. It is therefore time to
enlist the aid of lockdep.
This set of patches is inspired by earlier work by Peter
Zijlstra and Thomas Gleixner, and takes the following approach:
o Set up separate lockdep classes for RCU, RCU-bh, and RCU-sched.
o Set up separate lockdep classes for each instance of SRCU.
o Create primitives that check for being in an RCU read-side
critical section. These return exact answers if lockdep is
fully enabled, but if unsure, report being in an RCU read-side
critical section. (We want to avoid false positives!)
The primitives are:
For RCU: rcu_read_lock_held(void)
For RCU-bh: rcu_read_lock_bh_held(void)
For RCU-sched: rcu_read_lock_sched_held(void)
For SRCU: srcu_read_lock_held(struct srcu_struct *sp)
o Add rcu_dereference_check(), which takes a second argument
in which one places a boolean expression based on the above
primitives and/or lockdep_is_held().
o A new kernel configuration parameter, CONFIG_PROVE_RCU, enables
rcu_dereference_check(). This depends on CONFIG_PROVE_LOCKING,
and should be quite helpful during the transition period while
CONFIG_PROVE_RCU-unaware patches are in flight.
The existing rcu_dereference() primitive does no checking, but
upcoming patches will change that.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 126 |
1 files changed, 116 insertions, 10 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 24440f4bf476..e3d37efe2703 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -78,14 +78,120 @@ extern void rcu_init(void); | |||
78 | } while (0) | 78 | } while (0) |
79 | 79 | ||
80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
81 | |||
81 | extern struct lockdep_map rcu_lock_map; | 82 | extern struct lockdep_map rcu_lock_map; |
82 | # define rcu_read_acquire() \ | 83 | # define rcu_read_acquire() \ |
83 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 84 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
84 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 85 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
85 | #else | 86 | |
86 | # define rcu_read_acquire() do { } while (0) | 87 | extern struct lockdep_map rcu_bh_lock_map; |
87 | # define rcu_read_release() do { } while (0) | 88 | # define rcu_read_acquire_bh() \ |
88 | #endif | 89 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
90 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | ||
91 | |||
92 | extern struct lockdep_map rcu_sched_lock_map; | ||
93 | # define rcu_read_acquire_sched() \ | ||
94 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
95 | # define rcu_read_release_sched() \ | ||
96 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | ||
97 | |||
98 | /** | ||
99 | * rcu_read_lock_held - might we be in RCU read-side critical section? | ||
100 | * | ||
101 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
102 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
103 | * this assumes we are in an RCU read-side critical section unless it can | ||
104 | * prove otherwise. | ||
105 | */ | ||
106 | static inline int rcu_read_lock_held(void) | ||
107 | { | ||
108 | if (debug_locks) | ||
109 | return lock_is_held(&rcu_lock_map); | ||
110 | return 1; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
115 | * | ||
116 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
117 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
118 | * this assumes we are in an RCU-bh read-side critical section unless it can | ||
119 | * prove otherwise. | ||
120 | */ | ||
121 | static inline int rcu_read_lock_bh_held(void) | ||
122 | { | ||
123 | if (debug_locks) | ||
124 | return lock_is_held(&rcu_bh_lock_map); | ||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | ||
130 | * | ||
131 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | ||
132 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
133 | * this assumes we are in an RCU-sched read-side critical section unless it | ||
134 | * can prove otherwise. Note that disabling of preemption (including | ||
135 | * disabling irqs) counts as an RCU-sched read-side critical section. | ||
136 | */ | ||
137 | static inline int rcu_read_lock_sched_held(void) | ||
138 | { | ||
139 | int lockdep_opinion = 0; | ||
140 | |||
141 | if (debug_locks) | ||
142 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | ||
143 | return lockdep_opinion || preempt_count() != 0; | ||
144 | } | ||
145 | |||
146 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
147 | |||
148 | # define rcu_read_acquire() do { } while (0) | ||
149 | # define rcu_read_release() do { } while (0) | ||
150 | # define rcu_read_acquire_bh() do { } while (0) | ||
151 | # define rcu_read_release_bh() do { } while (0) | ||
152 | # define rcu_read_acquire_sched() do { } while (0) | ||
153 | # define rcu_read_release_sched() do { } while (0) | ||
154 | |||
155 | static inline int rcu_read_lock_held(void) | ||
156 | { | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | static inline int rcu_read_lock_bh_held(void) | ||
161 | { | ||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | static inline int rcu_read_lock_sched_held(void) | ||
166 | { | ||
167 | return preempt_count() != 0; | ||
168 | } | ||
169 | |||
170 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
171 | |||
172 | #ifdef CONFIG_PROVE_RCU | ||
173 | |||
174 | /** | ||
175 | * rcu_dereference_check - rcu_dereference with debug checking | ||
176 | * | ||
177 | * Do an rcu_dereference(), but check that the context is correct. | ||
178 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | ||
179 | * ensure that the rcu_dereference_check() executes within an RCU | ||
180 | * read-side critical section. It is also possible to check for | ||
181 | * locks being held, for example, by using lockdep_is_held(). | ||
182 | */ | ||
183 | #define rcu_dereference_check(p, c) \ | ||
184 | ({ \ | ||
185 | if (debug_locks) \ | ||
186 | WARN_ON_ONCE(!(c)); \ | ||
187 | rcu_dereference(p); \ | ||
188 | }) | ||
189 | |||
190 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
191 | |||
192 | #define rcu_dereference_check(p, c) rcu_dereference(p) | ||
193 | |||
194 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
89 | 195 | ||
90 | /** | 196 | /** |
91 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 197 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
@@ -160,7 +266,7 @@ static inline void rcu_read_lock_bh(void) | |||
160 | { | 266 | { |
161 | __rcu_read_lock_bh(); | 267 | __rcu_read_lock_bh(); |
162 | __acquire(RCU_BH); | 268 | __acquire(RCU_BH); |
163 | rcu_read_acquire(); | 269 | rcu_read_acquire_bh(); |
164 | } | 270 | } |
165 | 271 | ||
166 | /* | 272 | /* |
@@ -170,7 +276,7 @@ static inline void rcu_read_lock_bh(void) | |||
170 | */ | 276 | */ |
171 | static inline void rcu_read_unlock_bh(void) | 277 | static inline void rcu_read_unlock_bh(void) |
172 | { | 278 | { |
173 | rcu_read_release(); | 279 | rcu_read_release_bh(); |
174 | __release(RCU_BH); | 280 | __release(RCU_BH); |
175 | __rcu_read_unlock_bh(); | 281 | __rcu_read_unlock_bh(); |
176 | } | 282 | } |
@@ -188,7 +294,7 @@ static inline void rcu_read_lock_sched(void) | |||
188 | { | 294 | { |
189 | preempt_disable(); | 295 | preempt_disable(); |
190 | __acquire(RCU_SCHED); | 296 | __acquire(RCU_SCHED); |
191 | rcu_read_acquire(); | 297 | rcu_read_acquire_sched(); |
192 | } | 298 | } |
193 | 299 | ||
194 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 300 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
@@ -205,7 +311,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
205 | */ | 311 | */ |
206 | static inline void rcu_read_unlock_sched(void) | 312 | static inline void rcu_read_unlock_sched(void) |
207 | { | 313 | { |
208 | rcu_read_release(); | 314 | rcu_read_release_sched(); |
209 | __release(RCU_SCHED); | 315 | __release(RCU_SCHED); |
210 | preempt_enable(); | 316 | preempt_enable(); |
211 | } | 317 | } |