diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 115 |
1 files changed, 73 insertions, 42 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2cf4226ade7e..81c04f4348ec 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -51,6 +51,8 @@ extern int rcutorture_runnable; /* for sysctl */ | |||
51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 51 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
52 | extern void rcutorture_record_test_transition(void); | 52 | extern void rcutorture_record_test_transition(void); |
53 | extern void rcutorture_record_progress(unsigned long vernum); | 53 | extern void rcutorture_record_progress(unsigned long vernum); |
54 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
55 | struct rcu_head *rhp); | ||
54 | #else | 56 | #else |
55 | static inline void rcutorture_record_test_transition(void) | 57 | static inline void rcutorture_record_test_transition(void) |
56 | { | 58 | { |
@@ -58,6 +60,12 @@ static inline void rcutorture_record_test_transition(void) | |||
58 | static inline void rcutorture_record_progress(unsigned long vernum) | 60 | static inline void rcutorture_record_progress(unsigned long vernum) |
59 | { | 61 | { |
60 | } | 62 | } |
63 | #ifdef CONFIG_RCU_TRACE | ||
64 | extern void do_trace_rcu_torture_read(char *rcutorturename, | ||
65 | struct rcu_head *rhp); | ||
66 | #else | ||
67 | #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) | ||
68 | #endif | ||
61 | #endif | 69 | #endif |
62 | 70 | ||
63 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | 71 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
@@ -177,23 +185,10 @@ extern void rcu_sched_qs(int cpu); | |||
177 | extern void rcu_bh_qs(int cpu); | 185 | extern void rcu_bh_qs(int cpu); |
178 | extern void rcu_check_callbacks(int cpu, int user); | 186 | extern void rcu_check_callbacks(int cpu, int user); |
179 | struct notifier_block; | 187 | struct notifier_block; |
180 | 188 | extern void rcu_idle_enter(void); | |
181 | #ifdef CONFIG_NO_HZ | 189 | extern void rcu_idle_exit(void); |
182 | 190 | extern void rcu_irq_enter(void); | |
183 | extern void rcu_enter_nohz(void); | 191 | extern void rcu_irq_exit(void); |
184 | extern void rcu_exit_nohz(void); | ||
185 | |||
186 | #else /* #ifdef CONFIG_NO_HZ */ | ||
187 | |||
188 | static inline void rcu_enter_nohz(void) | ||
189 | { | ||
190 | } | ||
191 | |||
192 | static inline void rcu_exit_nohz(void) | ||
193 | { | ||
194 | } | ||
195 | |||
196 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
197 | 192 | ||
198 | /* | 193 | /* |
199 | * Infrastructure to implement the synchronize_() primitives in | 194 | * Infrastructure to implement the synchronize_() primitives in |
@@ -233,22 +228,30 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
233 | 228 | ||
234 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 229 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
235 | 230 | ||
236 | extern struct lockdep_map rcu_lock_map; | 231 | #ifdef CONFIG_PROVE_RCU |
237 | # define rcu_read_acquire() \ | 232 | extern int rcu_is_cpu_idle(void); |
238 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 233 | #else /* !CONFIG_PROVE_RCU */ |
239 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 234 | static inline int rcu_is_cpu_idle(void) |
235 | { | ||
236 | return 0; | ||
237 | } | ||
238 | #endif /* else !CONFIG_PROVE_RCU */ | ||
240 | 239 | ||
241 | extern struct lockdep_map rcu_bh_lock_map; | 240 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
242 | # define rcu_read_acquire_bh() \ | 241 | { |
243 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 242 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
244 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | 243 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); |
244 | } | ||
245 | 245 | ||
246 | extern struct lockdep_map rcu_sched_lock_map; | 246 | static inline void rcu_lock_release(struct lockdep_map *map) |
247 | # define rcu_read_acquire_sched() \ | 247 | { |
248 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 248 | WARN_ON_ONCE(rcu_is_cpu_idle()); |
249 | # define rcu_read_release_sched() \ | 249 | lock_release(map, 1, _THIS_IP_); |
250 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 250 | } |
251 | 251 | ||
252 | extern struct lockdep_map rcu_lock_map; | ||
253 | extern struct lockdep_map rcu_bh_lock_map; | ||
254 | extern struct lockdep_map rcu_sched_lock_map; | ||
252 | extern int debug_lockdep_rcu_enabled(void); | 255 | extern int debug_lockdep_rcu_enabled(void); |
253 | 256 | ||
254 | /** | 257 | /** |
@@ -262,11 +265,18 @@ extern int debug_lockdep_rcu_enabled(void); | |||
262 | * | 265 | * |
263 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot | 266 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
264 | * and while lockdep is disabled. | 267 | * and while lockdep is disabled. |
268 | * | ||
269 | * Note that rcu_read_lock() and the matching rcu_read_unlock() must | ||
270 | * occur in the same context, for example, it is illegal to invoke | ||
271 | * rcu_read_unlock() in process context if the matching rcu_read_lock() | ||
272 | * was invoked from within an irq handler. | ||
265 | */ | 273 | */ |
266 | static inline int rcu_read_lock_held(void) | 274 | static inline int rcu_read_lock_held(void) |
267 | { | 275 | { |
268 | if (!debug_lockdep_rcu_enabled()) | 276 | if (!debug_lockdep_rcu_enabled()) |
269 | return 1; | 277 | return 1; |
278 | if (rcu_is_cpu_idle()) | ||
279 | return 0; | ||
270 | return lock_is_held(&rcu_lock_map); | 280 | return lock_is_held(&rcu_lock_map); |
271 | } | 281 | } |
272 | 282 | ||
@@ -290,6 +300,19 @@ extern int rcu_read_lock_bh_held(void); | |||
290 | * | 300 | * |
291 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 301 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
292 | * and while lockdep is disabled. | 302 | * and while lockdep is disabled. |
303 | * | ||
304 | * Note that if the CPU is in the idle loop from an RCU point of | ||
305 | * view (ie: that we are in the section between rcu_idle_enter() and | ||
306 | * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU | ||
307 | * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs | ||
308 | * that are in such a section, considering these as in extended quiescent | ||
309 | * state, so such a CPU is effectively never in an RCU read-side critical | ||
310 | * section regardless of what RCU primitives it invokes. This state of | ||
311 | * affairs is required --- we need to keep an RCU-free window in idle | ||
312 | * where the CPU may possibly enter into low power mode. This way we can | ||
313 | * notice an extended quiescent state to other CPUs that started a grace | ||
314 | * period. Otherwise we would delay any grace period as long as we run in | ||
315 | * the idle task. | ||
293 | */ | 316 | */ |
294 | #ifdef CONFIG_PREEMPT_COUNT | 317 | #ifdef CONFIG_PREEMPT_COUNT |
295 | static inline int rcu_read_lock_sched_held(void) | 318 | static inline int rcu_read_lock_sched_held(void) |
@@ -298,6 +321,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
298 | 321 | ||
299 | if (!debug_lockdep_rcu_enabled()) | 322 | if (!debug_lockdep_rcu_enabled()) |
300 | return 1; | 323 | return 1; |
324 | if (rcu_is_cpu_idle()) | ||
325 | return 0; | ||
301 | if (debug_locks) | 326 | if (debug_locks) |
302 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | 327 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
303 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); | 328 | return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
@@ -311,12 +336,8 @@ static inline int rcu_read_lock_sched_held(void) | |||
311 | 336 | ||
312 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 337 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
313 | 338 | ||
314 | # define rcu_read_acquire() do { } while (0) | 339 | # define rcu_lock_acquire(a) do { } while (0) |
315 | # define rcu_read_release() do { } while (0) | 340 | # define rcu_lock_release(a) do { } while (0) |
316 | # define rcu_read_acquire_bh() do { } while (0) | ||
317 | # define rcu_read_release_bh() do { } while (0) | ||
318 | # define rcu_read_acquire_sched() do { } while (0) | ||
319 | # define rcu_read_release_sched() do { } while (0) | ||
320 | 341 | ||
321 | static inline int rcu_read_lock_held(void) | 342 | static inline int rcu_read_lock_held(void) |
322 | { | 343 | { |
@@ -637,7 +658,7 @@ static inline void rcu_read_lock(void) | |||
637 | { | 658 | { |
638 | __rcu_read_lock(); | 659 | __rcu_read_lock(); |
639 | __acquire(RCU); | 660 | __acquire(RCU); |
640 | rcu_read_acquire(); | 661 | rcu_lock_acquire(&rcu_lock_map); |
641 | } | 662 | } |
642 | 663 | ||
643 | /* | 664 | /* |
@@ -657,7 +678,7 @@ static inline void rcu_read_lock(void) | |||
657 | */ | 678 | */ |
658 | static inline void rcu_read_unlock(void) | 679 | static inline void rcu_read_unlock(void) |
659 | { | 680 | { |
660 | rcu_read_release(); | 681 | rcu_lock_release(&rcu_lock_map); |
661 | __release(RCU); | 682 | __release(RCU); |
662 | __rcu_read_unlock(); | 683 | __rcu_read_unlock(); |
663 | } | 684 | } |
@@ -673,12 +694,17 @@ static inline void rcu_read_unlock(void) | |||
673 | * critical sections in interrupt context can use just rcu_read_lock(), | 694 | * critical sections in interrupt context can use just rcu_read_lock(), |
674 | * though this should at least be commented to avoid confusing people | 695 | * though this should at least be commented to avoid confusing people |
675 | * reading the code. | 696 | * reading the code. |
697 | * | ||
698 | * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() | ||
699 | * must occur in the same context, for example, it is illegal to invoke | ||
700 | * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() | ||
701 | * was invoked from some other task. | ||
676 | */ | 702 | */ |
677 | static inline void rcu_read_lock_bh(void) | 703 | static inline void rcu_read_lock_bh(void) |
678 | { | 704 | { |
679 | local_bh_disable(); | 705 | local_bh_disable(); |
680 | __acquire(RCU_BH); | 706 | __acquire(RCU_BH); |
681 | rcu_read_acquire_bh(); | 707 | rcu_lock_acquire(&rcu_bh_lock_map); |
682 | } | 708 | } |
683 | 709 | ||
684 | /* | 710 | /* |
@@ -688,7 +714,7 @@ static inline void rcu_read_lock_bh(void) | |||
688 | */ | 714 | */ |
689 | static inline void rcu_read_unlock_bh(void) | 715 | static inline void rcu_read_unlock_bh(void) |
690 | { | 716 | { |
691 | rcu_read_release_bh(); | 717 | rcu_lock_release(&rcu_bh_lock_map); |
692 | __release(RCU_BH); | 718 | __release(RCU_BH); |
693 | local_bh_enable(); | 719 | local_bh_enable(); |
694 | } | 720 | } |
@@ -700,12 +726,17 @@ static inline void rcu_read_unlock_bh(void) | |||
700 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). | 726 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
701 | * Read-side critical sections can also be introduced by anything that | 727 | * Read-side critical sections can also be introduced by anything that |
702 | * disables preemption, including local_irq_disable() and friends. | 728 | * disables preemption, including local_irq_disable() and friends. |
729 | * | ||
730 | * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() | ||
731 | * must occur in the same context, for example, it is illegal to invoke | ||
732 | * rcu_read_unlock_sched() from process context if the matching | ||
733 | * rcu_read_lock_sched() was invoked from an NMI handler. | ||
703 | */ | 734 | */ |
704 | static inline void rcu_read_lock_sched(void) | 735 | static inline void rcu_read_lock_sched(void) |
705 | { | 736 | { |
706 | preempt_disable(); | 737 | preempt_disable(); |
707 | __acquire(RCU_SCHED); | 738 | __acquire(RCU_SCHED); |
708 | rcu_read_acquire_sched(); | 739 | rcu_lock_acquire(&rcu_sched_lock_map); |
709 | } | 740 | } |
710 | 741 | ||
711 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 742 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
@@ -722,7 +753,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
722 | */ | 753 | */ |
723 | static inline void rcu_read_unlock_sched(void) | 754 | static inline void rcu_read_unlock_sched(void) |
724 | { | 755 | { |
725 | rcu_read_release_sched(); | 756 | rcu_lock_release(&rcu_sched_lock_map); |
726 | __release(RCU_SCHED); | 757 | __release(RCU_SCHED); |
727 | preempt_enable(); | 758 | preempt_enable(); |
728 | } | 759 | } |