diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 90 |
1 files changed, 64 insertions, 26 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 3024050c82a1..db266bbed23f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
101 | # define rcu_read_release_sched() \ | 101 | # define rcu_read_release_sched() \ |
102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
103 | 103 | ||
104 | static inline int debug_lockdep_rcu_enabled(void) | 104 | extern int debug_lockdep_rcu_enabled(void); |
105 | { | ||
106 | return likely(rcu_scheduler_active && debug_locks); | ||
107 | } | ||
108 | 105 | ||
109 | /** | 106 | /** |
110 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
@@ -123,22 +120,11 @@ static inline int rcu_read_lock_held(void) | |||
123 | return lock_is_held(&rcu_lock_map); | 120 | return lock_is_held(&rcu_lock_map); |
124 | } | 121 | } |
125 | 122 | ||
126 | /** | 123 | /* |
127 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | 124 | * rcu_read_lock_bh_held() is defined out of line to avoid #include-file |
128 | * | 125 | * hell. |
129 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
130 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
131 | * this assumes we are in an RCU-bh read-side critical section unless it can | ||
132 | * prove otherwise. | ||
133 | * | ||
134 | * Check rcu_scheduler_active to prevent false positives during boot. | ||
135 | */ | 126 | */ |
136 | static inline int rcu_read_lock_bh_held(void) | 127 | extern int rcu_read_lock_bh_held(void); |
137 | { | ||
138 | if (!debug_lockdep_rcu_enabled()) | ||
139 | return 1; | ||
140 | return lock_is_held(&rcu_bh_lock_map); | ||
141 | } | ||
142 | 128 | ||
143 | /** | 129 | /** |
144 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 130 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? |
@@ -160,7 +146,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
160 | return 1; | 146 | return 1; |
161 | if (debug_locks) | 147 | if (debug_locks) |
162 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 148 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
163 | return lockdep_opinion || preempt_count() != 0; | 149 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
164 | } | 150 | } |
165 | #else /* #ifdef CONFIG_PREEMPT */ | 151 | #else /* #ifdef CONFIG_PREEMPT */ |
166 | static inline int rcu_read_lock_sched_held(void) | 152 | static inline int rcu_read_lock_sched_held(void) |
@@ -191,7 +177,7 @@ static inline int rcu_read_lock_bh_held(void) | |||
191 | #ifdef CONFIG_PREEMPT | 177 | #ifdef CONFIG_PREEMPT |
192 | static inline int rcu_read_lock_sched_held(void) | 178 | static inline int rcu_read_lock_sched_held(void) |
193 | { | 179 | { |
194 | return !rcu_scheduler_active || preempt_count() != 0; | 180 | return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled(); |
195 | } | 181 | } |
196 | #else /* #ifdef CONFIG_PREEMPT */ | 182 | #else /* #ifdef CONFIG_PREEMPT */ |
197 | static inline int rcu_read_lock_sched_held(void) | 183 | static inline int rcu_read_lock_sched_held(void) |
@@ -204,14 +190,34 @@ static inline int rcu_read_lock_sched_held(void) | |||
204 | 190 | ||
205 | #ifdef CONFIG_PROVE_RCU | 191 | #ifdef CONFIG_PROVE_RCU |
206 | 192 | ||
193 | extern int rcu_my_thread_group_empty(void); | ||
194 | |||
207 | /** | 195 | /** |
208 | * rcu_dereference_check - rcu_dereference with debug checking | 196 | * rcu_dereference_check - rcu_dereference with debug checking |
197 | * @p: The pointer to read, prior to dereferencing | ||
198 | * @c: The conditions under which the dereference will take place | ||
209 | * | 199 | * |
210 | * Do an rcu_dereference(), but check that the context is correct. | 200 | * Do an rcu_dereference(), but check that the conditions under which the |
211 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | 201 | * dereference will take place are correct. Typically the conditions indicate |
212 | * ensure that the rcu_dereference_check() executes within an RCU | 202 | * the various locking conditions that should be held at that point. The check |
213 | * read-side critical section. It is also possible to check for | 203 | * should return true if the conditions are satisfied. |
214 | * locks being held, for example, by using lockdep_is_held(). | 204 | * |
205 | * For example: | ||
206 | * | ||
207 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
208 | * lockdep_is_held(&foo->lock)); | ||
209 | * | ||
210 | * could be used to indicate to lockdep that foo->bar may only be dereferenced | ||
211 | * if either the RCU read lock is held, or that the lock required to replace | ||
212 | * the bar struct at foo->bar is held. | ||
213 | * | ||
214 | * Note that the list of conditions may also include indications of when a lock | ||
215 | * need not be held, for example during initialisation or destruction of the | ||
216 | * target struct: | ||
217 | * | ||
218 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
219 | * lockdep_is_held(&foo->lock) || | ||
220 | * atomic_read(&foo->usage) == 0); | ||
215 | */ | 221 | */ |
216 | #define rcu_dereference_check(p, c) \ | 222 | #define rcu_dereference_check(p, c) \ |
217 | ({ \ | 223 | ({ \ |
@@ -220,13 +226,45 @@ static inline int rcu_read_lock_sched_held(void) | |||
220 | rcu_dereference_raw(p); \ | 226 | rcu_dereference_raw(p); \ |
221 | }) | 227 | }) |
222 | 228 | ||
229 | /** | ||
230 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | ||
231 | * | ||
232 | * Return the value of the specified RCU-protected pointer, but omit | ||
233 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | ||
234 | * is useful in cases where update-side locks prevent the value of the | ||
235 | * pointer from changing. Please note that this primitive does -not- | ||
236 | * prevent the compiler from repeating this reference or combining it | ||
237 | * with other references, so it should not be used without protection | ||
238 | * of appropriate locks. | ||
239 | */ | ||
240 | #define rcu_dereference_protected(p, c) \ | ||
241 | ({ \ | ||
242 | if (debug_lockdep_rcu_enabled() && !(c)) \ | ||
243 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
244 | (p); \ | ||
245 | }) | ||
246 | |||
223 | #else /* #ifdef CONFIG_PROVE_RCU */ | 247 | #else /* #ifdef CONFIG_PROVE_RCU */ |
224 | 248 | ||
225 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 249 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) |
250 | #define rcu_dereference_protected(p, c) (p) | ||
226 | 251 | ||
227 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 252 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
228 | 253 | ||
229 | /** | 254 | /** |
255 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | ||
256 | * | ||
257 | * Return the value of the specified RCU-protected pointer, but omit the | ||
258 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
259 | * when the value of this pointer is accessed, but the pointer is not | ||
260 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
261 | * NULL. This may also be used in cases where update-side locks prevent | ||
262 | * the value of the pointer from changing, but rcu_dereference_protected() | ||
263 | * is a lighter-weight primitive for this use case. | ||
264 | */ | ||
265 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | ||
266 | |||
267 | /** | ||
230 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 268 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
231 | * | 269 | * |
232 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 270 | * When synchronize_rcu() is invoked on one CPU while other CPUs |