diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-02-22 20:04:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 03:40:59 -0500 |
commit | 632ee200130899252508c478ad0e808222573fbc (patch) | |
tree | e3f44ca9daf2a2da2186419f703bc9cec8faa058 /include/linux | |
parent | 996de8c6fe95c5a9fc524241cc8f142ef0605d3d (diff) |
rcu: Introduce lockdep-based checking to RCU read-side primitives
Inspection is proving insufficient to catch all RCU misuses,
which is understandable given that rcu_dereference() might be
protected by any of four different flavors of RCU (RCU, RCU-bh,
RCU-sched, and SRCU), and might also/instead be protected by any
of a number of locking primitives. It is therefore time to
enlist the aid of lockdep.
This set of patches is inspired by earlier work by Peter
Zijlstra and Thomas Gleixner, and takes the following approach:
o Set up separate lockdep classes for RCU, RCU-bh, and RCU-sched.
o Set up separate lockdep classes for each instance of SRCU.
o Create primitives that check for being in an RCU read-side
critical section. These return exact answers if lockdep is
fully enabled, but if unsure, report being in an RCU read-side
critical section. (We want to avoid false positives!)
The primitives are:
For RCU: rcu_read_lock_held(void)
For RCU-bh: rcu_read_lock_bh_held(void)
For RCU-sched: rcu_read_lock_sched_held(void)
For SRCU: srcu_read_lock_held(struct srcu_struct *sp)
o Add rcu_dereference_check(), which takes a second argument
in which one places a boolean expression based on the above
primitives and/or lockdep_is_held().
o A new kernel configuration parameter, CONFIG_PROVE_RCU, enables
rcu_dereference_check(). This depends on CONFIG_PROVE_LOCKING,
and should be quite helpful during the transition period while
CONFIG_PROVE_RCU-unaware patches are in flight.
The existing rcu_dereference() primitive does no checking, but
upcoming patches will change that.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1266887105-1528-1-git-send-email-paulmck@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/rcupdate.h | 126 | ||||
-rw-r--r-- | include/linux/srcu.h | 87 |
2 files changed, 201 insertions, 12 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 24440f4bf476..e3d37efe2703 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -78,14 +78,120 @@ extern void rcu_init(void); | |||
78 | } while (0) | 78 | } while (0) |
79 | 79 | ||
80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 80 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
81 | |||
81 | extern struct lockdep_map rcu_lock_map; | 82 | extern struct lockdep_map rcu_lock_map; |
82 | # define rcu_read_acquire() \ | 83 | # define rcu_read_acquire() \ |
83 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | 84 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
84 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | 85 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) |
85 | #else | 86 | |
86 | # define rcu_read_acquire() do { } while (0) | 87 | extern struct lockdep_map rcu_bh_lock_map; |
87 | # define rcu_read_release() do { } while (0) | 88 | # define rcu_read_acquire_bh() \ |
88 | #endif | 89 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) |
90 | # define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_) | ||
91 | |||
92 | extern struct lockdep_map rcu_sched_lock_map; | ||
93 | # define rcu_read_acquire_sched() \ | ||
94 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
95 | # define rcu_read_release_sched() \ | ||
96 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | ||
97 | |||
98 | /** | ||
99 | * rcu_read_lock_held - might we be in RCU read-side critical section? | ||
100 | * | ||
101 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
102 | * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
103 | * this assumes we are in an RCU read-side critical section unless it can | ||
104 | * prove otherwise. | ||
105 | */ | ||
106 | static inline int rcu_read_lock_held(void) | ||
107 | { | ||
108 | if (debug_locks) | ||
109 | return lock_is_held(&rcu_lock_map); | ||
110 | return 1; | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
115 | * | ||
116 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
117 | * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
118 | * this assumes we are in an RCU-bh read-side critical section unless it can | ||
119 | * prove otherwise. | ||
120 | */ | ||
121 | static inline int rcu_read_lock_bh_held(void) | ||
122 | { | ||
123 | if (debug_locks) | ||
124 | return lock_is_held(&rcu_bh_lock_map); | ||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | ||
130 | * | ||
131 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an | ||
132 | * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
133 | * this assumes we are in an RCU-sched read-side critical section unless it | ||
134 | * can prove otherwise. Note that disabling of preemption (including | ||
135 | * disabling irqs) counts as an RCU-sched read-side critical section. | ||
136 | */ | ||
137 | static inline int rcu_read_lock_sched_held(void) | ||
138 | { | ||
139 | int lockdep_opinion = 0; | ||
140 | |||
141 | if (debug_locks) | ||
142 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | ||
143 | return lockdep_opinion || preempt_count() != 0; | ||
144 | } | ||
145 | |||
146 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
147 | |||
148 | # define rcu_read_acquire() do { } while (0) | ||
149 | # define rcu_read_release() do { } while (0) | ||
150 | # define rcu_read_acquire_bh() do { } while (0) | ||
151 | # define rcu_read_release_bh() do { } while (0) | ||
152 | # define rcu_read_acquire_sched() do { } while (0) | ||
153 | # define rcu_read_release_sched() do { } while (0) | ||
154 | |||
155 | static inline int rcu_read_lock_held(void) | ||
156 | { | ||
157 | return 1; | ||
158 | } | ||
159 | |||
160 | static inline int rcu_read_lock_bh_held(void) | ||
161 | { | ||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | static inline int rcu_read_lock_sched_held(void) | ||
166 | { | ||
167 | return preempt_count() != 0; | ||
168 | } | ||
169 | |||
170 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
171 | |||
172 | #ifdef CONFIG_PROVE_RCU | ||
173 | |||
174 | /** | ||
175 | * rcu_dereference_check - rcu_dereference with debug checking | ||
176 | * | ||
177 | * Do an rcu_dereference(), but check that the context is correct. | ||
178 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | ||
179 | * ensure that the rcu_dereference_check() executes within an RCU | ||
180 | * read-side critical section. It is also possible to check for | ||
181 | * locks being held, for example, by using lockdep_is_held(). | ||
182 | */ | ||
183 | #define rcu_dereference_check(p, c) \ | ||
184 | ({ \ | ||
185 | if (debug_locks) \ | ||
186 | WARN_ON_ONCE(!(c)); \ | ||
187 | rcu_dereference(p); \ | ||
188 | }) | ||
189 | |||
190 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
191 | |||
192 | #define rcu_dereference_check(p, c) rcu_dereference(p) | ||
193 | |||
194 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
89 | 195 | ||
90 | /** | 196 | /** |
91 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 197 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
@@ -160,7 +266,7 @@ static inline void rcu_read_lock_bh(void) | |||
160 | { | 266 | { |
161 | __rcu_read_lock_bh(); | 267 | __rcu_read_lock_bh(); |
162 | __acquire(RCU_BH); | 268 | __acquire(RCU_BH); |
163 | rcu_read_acquire(); | 269 | rcu_read_acquire_bh(); |
164 | } | 270 | } |
165 | 271 | ||
166 | /* | 272 | /* |
@@ -170,7 +276,7 @@ static inline void rcu_read_lock_bh(void) | |||
170 | */ | 276 | */ |
171 | static inline void rcu_read_unlock_bh(void) | 277 | static inline void rcu_read_unlock_bh(void) |
172 | { | 278 | { |
173 | rcu_read_release(); | 279 | rcu_read_release_bh(); |
174 | __release(RCU_BH); | 280 | __release(RCU_BH); |
175 | __rcu_read_unlock_bh(); | 281 | __rcu_read_unlock_bh(); |
176 | } | 282 | } |
@@ -188,7 +294,7 @@ static inline void rcu_read_lock_sched(void) | |||
188 | { | 294 | { |
189 | preempt_disable(); | 295 | preempt_disable(); |
190 | __acquire(RCU_SCHED); | 296 | __acquire(RCU_SCHED); |
191 | rcu_read_acquire(); | 297 | rcu_read_acquire_sched(); |
192 | } | 298 | } |
193 | 299 | ||
194 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ | 300 | /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
@@ -205,7 +311,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
205 | */ | 311 | */ |
206 | static inline void rcu_read_unlock_sched(void) | 312 | static inline void rcu_read_unlock_sched(void) |
207 | { | 313 | { |
208 | rcu_read_release(); | 314 | rcu_read_release_sched(); |
209 | __release(RCU_SCHED); | 315 | __release(RCU_SCHED); |
210 | preempt_enable(); | 316 | preempt_enable(); |
211 | } | 317 | } |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4765d97dcafb..adbe1670b366 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -35,6 +35,9 @@ struct srcu_struct { | |||
35 | int completed; | 35 | int completed; |
36 | struct srcu_struct_array *per_cpu_ref; | 36 | struct srcu_struct_array *per_cpu_ref; |
37 | struct mutex mutex; | 37 | struct mutex mutex; |
38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
39 | struct lockdep_map dep_map; | ||
40 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
38 | }; | 41 | }; |
39 | 42 | ||
40 | #ifndef CONFIG_PREEMPT | 43 | #ifndef CONFIG_PREEMPT |
@@ -43,12 +46,92 @@ struct srcu_struct { | |||
43 | #define srcu_barrier() | 46 | #define srcu_barrier() |
44 | #endif /* #else #ifndef CONFIG_PREEMPT */ | 47 | #endif /* #else #ifndef CONFIG_PREEMPT */ |
45 | 48 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
50 | |||
51 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | ||
52 | struct lock_class_key *key); | ||
53 | |||
54 | #define init_srcu_struct(sp) \ | ||
55 | ({ \ | ||
56 | static struct lock_class_key __srcu_key; \ | ||
57 | \ | ||
58 | __init_srcu_struct((sp), #sp, &__srcu_key); \ | ||
59 | }) | ||
60 | |||
61 | # define srcu_read_acquire(sp) \ | ||
62 | lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, NULL, _THIS_IP_) | ||
63 | # define srcu_read_release(sp) \ | ||
64 | lock_release(&(sp)->dep_map, 1, _THIS_IP_) | ||
65 | |||
66 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
67 | |||
46 | int init_srcu_struct(struct srcu_struct *sp); | 68 | int init_srcu_struct(struct srcu_struct *sp); |
69 | |||
70 | # define srcu_read_acquire(sp) do { } while (0) | ||
71 | # define srcu_read_release(sp) do { } while (0) | ||
72 | |||
73 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
74 | |||
47 | void cleanup_srcu_struct(struct srcu_struct *sp); | 75 | void cleanup_srcu_struct(struct srcu_struct *sp); |
48 | int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); | 76 | int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); |
49 | void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); | 77 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); |
50 | void synchronize_srcu(struct srcu_struct *sp); | 78 | void synchronize_srcu(struct srcu_struct *sp); |
51 | void synchronize_srcu_expedited(struct srcu_struct *sp); | 79 | void synchronize_srcu_expedited(struct srcu_struct *sp); |
52 | long srcu_batches_completed(struct srcu_struct *sp); | 80 | long srcu_batches_completed(struct srcu_struct *sp); |
53 | 81 | ||
82 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
83 | |||
84 | /** | ||
85 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | ||
86 | * | ||
87 | * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in | ||
88 | * an SRCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, | ||
89 | * this assumes we are in an SRCU read-side critical section unless it can | ||
90 | * prove otherwise. | ||
91 | */ | ||
92 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | ||
93 | { | ||
94 | if (debug_locks) | ||
95 | return lock_is_held(&sp->dep_map); | ||
96 | return 1; | ||
97 | } | ||
98 | |||
99 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
100 | |||
101 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | ||
102 | { | ||
103 | return 1; | ||
104 | } | ||
105 | |||
106 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
107 | |||
108 | /** | ||
109 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | ||
110 | * @sp: srcu_struct in which to register the new reader. | ||
111 | * | ||
112 | * Enter an SRCU read-side critical section. Note that SRCU read-side | ||
113 | * critical sections may be nested. | ||
114 | */ | ||
115 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | ||
116 | { | ||
117 | int retval = __srcu_read_lock(sp); | ||
118 | |||
119 | srcu_read_acquire(sp); | ||
120 | return retval; | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | ||
125 | * @sp: srcu_struct in which to unregister the old reader. | ||
126 | * @idx: return value from corresponding srcu_read_lock(). | ||
127 | * | ||
128 | * Exit an SRCU read-side critical section. | ||
129 | */ | ||
130 | static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) | ||
131 | __releases(sp) | ||
132 | { | ||
133 | srcu_read_release(sp); | ||
134 | __srcu_read_unlock(sp, idx); | ||
135 | } | ||
136 | |||
54 | #endif | 137 | #endif |