aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h117
1 files changed, 93 insertions, 24 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c84373626336..db266bbed23f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -41,6 +41,10 @@
41#include <linux/lockdep.h> 41#include <linux/lockdep.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43 43
44#ifdef CONFIG_RCU_TORTURE_TEST
45extern int rcutorture_runnable; /* for sysctl */
46#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
47
44/** 48/**
45 * struct rcu_head - callback structure for use with RCU 49 * struct rcu_head - callback structure for use with RCU
46 * @next: next update requests in a list 50 * @next: next update requests in a list
@@ -97,6 +101,8 @@ extern struct lockdep_map rcu_sched_lock_map;
97# define rcu_read_release_sched() \ 101# define rcu_read_release_sched() \
98 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) 102 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
99 103
104extern int debug_lockdep_rcu_enabled(void);
105
100/** 106/**
101 * rcu_read_lock_held - might we be in RCU read-side critical section? 107 * rcu_read_lock_held - might we be in RCU read-side critical section?
102 * 108 *
@@ -104,28 +110,21 @@ extern struct lockdep_map rcu_sched_lock_map;
104 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, 110 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
105 * this assumes we are in an RCU read-side critical section unless it can 111 * this assumes we are in an RCU read-side critical section unless it can
106 * prove otherwise. 112 * prove otherwise.
113 *
114 * Check rcu_scheduler_active to prevent false positives during boot.
107 */ 115 */
108static inline int rcu_read_lock_held(void) 116static inline int rcu_read_lock_held(void)
109{ 117{
110 if (debug_locks) 118 if (!debug_lockdep_rcu_enabled())
111 return lock_is_held(&rcu_lock_map); 119 return 1;
112 return 1; 120 return lock_is_held(&rcu_lock_map);
113} 121}
114 122
115/** 123/*
116 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? 124 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
117 * 125 * hell.
118 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
119 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
120 * this assumes we are in an RCU-bh read-side critical section unless it can
121 * prove otherwise.
122 */ 126 */
123static inline int rcu_read_lock_bh_held(void) 127extern int rcu_read_lock_bh_held(void);
124{
125 if (debug_locks)
126 return lock_is_held(&rcu_bh_lock_map);
127 return 1;
128}
129 128
130/** 129/**
131 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? 130 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
@@ -135,15 +134,26 @@ static inline int rcu_read_lock_bh_held(void)
135 * this assumes we are in an RCU-sched read-side critical section unless it 134 * this assumes we are in an RCU-sched read-side critical section unless it
136 * can prove otherwise. Note that disabling of preemption (including 135 * can prove otherwise. Note that disabling of preemption (including
137 * disabling irqs) counts as an RCU-sched read-side critical section. 136 * disabling irqs) counts as an RCU-sched read-side critical section.
137 *
138 * Check rcu_scheduler_active to prevent false positives during boot.
138 */ 139 */
140#ifdef CONFIG_PREEMPT
139static inline int rcu_read_lock_sched_held(void) 141static inline int rcu_read_lock_sched_held(void)
140{ 142{
141 int lockdep_opinion = 0; 143 int lockdep_opinion = 0;
142 144
145 if (!debug_lockdep_rcu_enabled())
146 return 1;
143 if (debug_locks) 147 if (debug_locks)
144 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 148 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
145 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; 149 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
146} 150}
151#else /* #ifdef CONFIG_PREEMPT */
152static inline int rcu_read_lock_sched_held(void)
153{
154 return 1;
155}
156#endif /* #else #ifdef CONFIG_PREEMPT */
147 157
148#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 158#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
149 159
@@ -164,38 +174,97 @@ static inline int rcu_read_lock_bh_held(void)
164 return 1; 174 return 1;
165} 175}
166 176
177#ifdef CONFIG_PREEMPT
167static inline int rcu_read_lock_sched_held(void) 178static inline int rcu_read_lock_sched_held(void)
168{ 179{
169 return preempt_count() != 0 || !rcu_scheduler_active; 180 return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
170} 181}
182#else /* #ifdef CONFIG_PREEMPT */
183static inline int rcu_read_lock_sched_held(void)
184{
185 return 1;
186}
187#endif /* #else #ifdef CONFIG_PREEMPT */
171 188
172#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 189#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
173 190
174#ifdef CONFIG_PROVE_RCU 191#ifdef CONFIG_PROVE_RCU
175 192
193extern int rcu_my_thread_group_empty(void);
194
176/** 195/**
177 * rcu_dereference_check - rcu_dereference with debug checking 196 * rcu_dereference_check - rcu_dereference with debug checking
197 * @p: The pointer to read, prior to dereferencing
198 * @c: The conditions under which the dereference will take place
199 *
200 * Do an rcu_dereference(), but check that the conditions under which the
201 * dereference will take place are correct. Typically the conditions indicate
202 * the various locking conditions that should be held at that point. The check
203 * should return true if the conditions are satisfied.
178 * 204 *
179 * Do an rcu_dereference(), but check that the context is correct. 205 * For example:
180 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to 206 *
181 * ensure that the rcu_dereference_check() executes within an RCU 207 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
182 * read-side critical section. It is also possible to check for 208 * lockdep_is_held(&foo->lock));
183 * locks being held, for example, by using lockdep_is_held(). 209 *
210 * could be used to indicate to lockdep that foo->bar may only be dereferenced
211 * if either the RCU read lock is held, or that the lock required to replace
212 * the bar struct at foo->bar is held.
213 *
214 * Note that the list of conditions may also include indications of when a lock
215 * need not be held, for example during initialisation or destruction of the
216 * target struct:
217 *
218 * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() ||
219 * lockdep_is_held(&foo->lock) ||
220 * atomic_read(&foo->usage) == 0);
184 */ 221 */
185#define rcu_dereference_check(p, c) \ 222#define rcu_dereference_check(p, c) \
186 ({ \ 223 ({ \
187 if (debug_locks && !(c)) \ 224 if (debug_lockdep_rcu_enabled() && !(c)) \
188 lockdep_rcu_dereference(__FILE__, __LINE__); \ 225 lockdep_rcu_dereference(__FILE__, __LINE__); \
189 rcu_dereference_raw(p); \ 226 rcu_dereference_raw(p); \
190 }) 227 })
191 228
229/**
230 * rcu_dereference_protected - fetch RCU pointer when updates prevented
231 *
232 * Return the value of the specified RCU-protected pointer, but omit
233 * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
234 * is useful in cases where update-side locks prevent the value of the
235 * pointer from changing. Please note that this primitive does -not-
236 * prevent the compiler from repeating this reference or combining it
237 * with other references, so it should not be used without protection
238 * of appropriate locks.
239 */
240#define rcu_dereference_protected(p, c) \
241 ({ \
242 if (debug_lockdep_rcu_enabled() && !(c)) \
243 lockdep_rcu_dereference(__FILE__, __LINE__); \
244 (p); \
245 })
246
192#else /* #ifdef CONFIG_PROVE_RCU */ 247#else /* #ifdef CONFIG_PROVE_RCU */
193 248
194#define rcu_dereference_check(p, c) rcu_dereference_raw(p) 249#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
250#define rcu_dereference_protected(p, c) (p)
195 251
196#endif /* #else #ifdef CONFIG_PROVE_RCU */ 252#endif /* #else #ifdef CONFIG_PROVE_RCU */
197 253
198/** 254/**
255 * rcu_access_pointer - fetch RCU pointer with no dereferencing
256 *
257 * Return the value of the specified RCU-protected pointer, but omit the
258 * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
259 * when the value of this pointer is accessed, but the pointer is not
260 * dereferenced, for example, when testing an RCU-protected pointer against
261 * NULL. This may also be used in cases where update-side locks prevent
262 * the value of the pointer from changing, but rcu_dereference_protected()
263 * is a lighter-weight primitive for this use case.
264 */
265#define rcu_access_pointer(p) ACCESS_ONCE(p)
266
267/**
199 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 268 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
200 * 269 *
201 * When synchronize_rcu() is invoked on one CPU while other CPUs 270 * When synchronize_rcu() is invoked on one CPU while other CPUs