diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2012-02-01 13:30:46 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:06:09 -0500 |
commit | bde23c6892878e48f64de668660778991bc2fb56 (patch) | |
tree | 50d3ed77d58acc9098a2030be66728bc8c59f63e /include/linux/rcupdate.h | |
parent | 7129d383d9f46eb8276bee5fb46df63f09a70130 (diff) |
rcu: Convert WARN_ON_ONCE() in rcu_lock_acquire() to lockdep
The WARN_ON_ONCE() in rcu_lock_acquire() results in infinite recursion
on S390, and also doesn't print very much information. Remove this.
Updated patch to add lockdep-RCU assertions to RCU's read-side primitives.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 146d37d31778..6ee663c8745a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -248,13 +248,11 @@ static inline int rcu_is_cpu_idle(void) | |||
248 | 248 | ||
249 | static inline void rcu_lock_acquire(struct lockdep_map *map) | 249 | static inline void rcu_lock_acquire(struct lockdep_map *map) |
250 | { | 250 | { |
251 | WARN_ON_ONCE(rcu_is_cpu_idle()); | ||
252 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); | 251 | lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); |
253 | } | 252 | } |
254 | 253 | ||
255 | static inline void rcu_lock_release(struct lockdep_map *map) | 254 | static inline void rcu_lock_release(struct lockdep_map *map) |
256 | { | 255 | { |
257 | WARN_ON_ONCE(rcu_is_cpu_idle()); | ||
258 | lock_release(map, 1, _THIS_IP_); | 256 | lock_release(map, 1, _THIS_IP_); |
259 | } | 257 | } |
260 | 258 | ||
@@ -699,6 +697,8 @@ static inline void rcu_read_lock(void) | |||
699 | __rcu_read_lock(); | 697 | __rcu_read_lock(); |
700 | __acquire(RCU); | 698 | __acquire(RCU); |
701 | rcu_lock_acquire(&rcu_lock_map); | 699 | rcu_lock_acquire(&rcu_lock_map); |
700 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
701 | "rcu_read_lock() used illegally while idle"); | ||
702 | } | 702 | } |
703 | 703 | ||
704 | /* | 704 | /* |
@@ -718,6 +718,8 @@ static inline void rcu_read_lock(void) | |||
718 | */ | 718 | */ |
719 | static inline void rcu_read_unlock(void) | 719 | static inline void rcu_read_unlock(void) |
720 | { | 720 | { |
721 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
722 | "rcu_read_unlock() used illegally while idle"); | ||
721 | rcu_lock_release(&rcu_lock_map); | 723 | rcu_lock_release(&rcu_lock_map); |
722 | __release(RCU); | 724 | __release(RCU); |
723 | __rcu_read_unlock(); | 725 | __rcu_read_unlock(); |
@@ -745,6 +747,8 @@ static inline void rcu_read_lock_bh(void) | |||
745 | local_bh_disable(); | 747 | local_bh_disable(); |
746 | __acquire(RCU_BH); | 748 | __acquire(RCU_BH); |
747 | rcu_lock_acquire(&rcu_bh_lock_map); | 749 | rcu_lock_acquire(&rcu_bh_lock_map); |
750 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
751 | "rcu_read_lock_bh() used illegally while idle"); | ||
748 | } | 752 | } |
749 | 753 | ||
750 | /* | 754 | /* |
@@ -754,6 +758,8 @@ static inline void rcu_read_lock_bh(void) | |||
754 | */ | 758 | */ |
755 | static inline void rcu_read_unlock_bh(void) | 759 | static inline void rcu_read_unlock_bh(void) |
756 | { | 760 | { |
761 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
762 | "rcu_read_unlock_bh() used illegally while idle"); | ||
757 | rcu_lock_release(&rcu_bh_lock_map); | 763 | rcu_lock_release(&rcu_bh_lock_map); |
758 | __release(RCU_BH); | 764 | __release(RCU_BH); |
759 | local_bh_enable(); | 765 | local_bh_enable(); |
@@ -777,6 +783,8 @@ static inline void rcu_read_lock_sched(void) | |||
777 | preempt_disable(); | 783 | preempt_disable(); |
778 | __acquire(RCU_SCHED); | 784 | __acquire(RCU_SCHED); |
779 | rcu_lock_acquire(&rcu_sched_lock_map); | 785 | rcu_lock_acquire(&rcu_sched_lock_map); |
786 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
787 | "rcu_read_lock_sched() used illegally while idle"); | ||
780 | } | 788 | } |
781 | 789 | ||
782 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 790 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
@@ -793,6 +801,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
793 | */ | 801 | */ |
794 | static inline void rcu_read_unlock_sched(void) | 802 | static inline void rcu_read_unlock_sched(void) |
795 | { | 803 | { |
804 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | ||
805 | "rcu_read_unlock_sched() used illegally while idle"); | ||
796 | rcu_lock_release(&rcu_sched_lock_map); | 806 | rcu_lock_release(&rcu_sched_lock_map); |
797 | __release(RCU_SCHED); | 807 | __release(RCU_SCHED); |
798 | preempt_enable(); | 808 | preempt_enable(); |