aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rcupdate.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r--include/linux/rcupdate.h165
1 files changed, 150 insertions, 15 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 24440f4bf476..c84373626336 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -62,6 +62,8 @@ extern int sched_expedited_torture_stats(char *page);
62 62
63/* Internal to kernel */ 63/* Internal to kernel */
64extern void rcu_init(void); 64extern void rcu_init(void);
65extern int rcu_scheduler_active;
66extern void rcu_scheduler_starting(void);
65 67
66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 68#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
67#include <linux/rcutree.h> 69#include <linux/rcutree.h>
@@ -78,14 +80,120 @@ extern void rcu_init(void);
78} while (0) 80} while (0)
79 81
80#ifdef CONFIG_DEBUG_LOCK_ALLOC 82#ifdef CONFIG_DEBUG_LOCK_ALLOC
83
81extern struct lockdep_map rcu_lock_map; 84extern struct lockdep_map rcu_lock_map;
82# define rcu_read_acquire() \ 85# define rcu_read_acquire() \
83 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) 86 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
84# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) 87# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
85#else 88
86# define rcu_read_acquire() do { } while (0) 89extern struct lockdep_map rcu_bh_lock_map;
87# define rcu_read_release() do { } while (0) 90# define rcu_read_acquire_bh() \
88#endif 91 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
92# define rcu_read_release_bh() lock_release(&rcu_bh_lock_map, 1, _THIS_IP_)
93
94extern struct lockdep_map rcu_sched_lock_map;
95# define rcu_read_acquire_sched() \
96 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
97# define rcu_read_release_sched() \
98 lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
99
100/**
101 * rcu_read_lock_held - might we be in RCU read-side critical section?
102 *
103 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
104 * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
105 * this assumes we are in an RCU read-side critical section unless it can
106 * prove otherwise.
107 */
108static inline int rcu_read_lock_held(void)
109{
110 if (debug_locks)
111 return lock_is_held(&rcu_lock_map);
112 return 1;
113}
114
115/**
116 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
117 *
118 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
119 * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
120 * this assumes we are in an RCU-bh read-side critical section unless it can
121 * prove otherwise.
122 */
123static inline int rcu_read_lock_bh_held(void)
124{
125 if (debug_locks)
126 return lock_is_held(&rcu_bh_lock_map);
127 return 1;
128}
129
130/**
131 * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
132 *
133 * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in an
134 * RCU-sched read-side critical section. In absence of CONFIG_PROVE_LOCKING,
135 * this assumes we are in an RCU-sched read-side critical section unless it
136 * can prove otherwise. Note that disabling of preemption (including
137 * disabling irqs) counts as an RCU-sched read-side critical section.
138 */
139static inline int rcu_read_lock_sched_held(void)
140{
141 int lockdep_opinion = 0;
142
143 if (debug_locks)
144 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
145 return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
146}
147
148#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
149
150# define rcu_read_acquire() do { } while (0)
151# define rcu_read_release() do { } while (0)
152# define rcu_read_acquire_bh() do { } while (0)
153# define rcu_read_release_bh() do { } while (0)
154# define rcu_read_acquire_sched() do { } while (0)
155# define rcu_read_release_sched() do { } while (0)
156
157static inline int rcu_read_lock_held(void)
158{
159 return 1;
160}
161
162static inline int rcu_read_lock_bh_held(void)
163{
164 return 1;
165}
166
167static inline int rcu_read_lock_sched_held(void)
168{
169 return preempt_count() != 0 || !rcu_scheduler_active;
170}
171
172#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
173
174#ifdef CONFIG_PROVE_RCU
175
176/**
177 * rcu_dereference_check - rcu_dereference with debug checking
178 *
179 * Do an rcu_dereference(), but check that the context is correct.
180 * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to
181 * ensure that the rcu_dereference_check() executes within an RCU
182 * read-side critical section. It is also possible to check for
183 * locks being held, for example, by using lockdep_is_held().
184 */
185#define rcu_dereference_check(p, c) \
186 ({ \
187 if (debug_locks && !(c)) \
188 lockdep_rcu_dereference(__FILE__, __LINE__); \
189 rcu_dereference_raw(p); \
190 })
191
192#else /* #ifdef CONFIG_PROVE_RCU */
193
194#define rcu_dereference_check(p, c) rcu_dereference_raw(p)
195
196#endif /* #else #ifdef CONFIG_PROVE_RCU */
89 197
90/** 198/**
91 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 199 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
@@ -160,7 +268,7 @@ static inline void rcu_read_lock_bh(void)
160{ 268{
161 __rcu_read_lock_bh(); 269 __rcu_read_lock_bh();
162 __acquire(RCU_BH); 270 __acquire(RCU_BH);
163 rcu_read_acquire(); 271 rcu_read_acquire_bh();
164} 272}
165 273
166/* 274/*
@@ -170,7 +278,7 @@ static inline void rcu_read_lock_bh(void)
170 */ 278 */
171static inline void rcu_read_unlock_bh(void) 279static inline void rcu_read_unlock_bh(void)
172{ 280{
173 rcu_read_release(); 281 rcu_read_release_bh();
174 __release(RCU_BH); 282 __release(RCU_BH);
175 __rcu_read_unlock_bh(); 283 __rcu_read_unlock_bh();
176} 284}
@@ -188,7 +296,7 @@ static inline void rcu_read_lock_sched(void)
188{ 296{
189 preempt_disable(); 297 preempt_disable();
190 __acquire(RCU_SCHED); 298 __acquire(RCU_SCHED);
191 rcu_read_acquire(); 299 rcu_read_acquire_sched();
192} 300}
193 301
194/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 302/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -205,7 +313,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
205 */ 313 */
206static inline void rcu_read_unlock_sched(void) 314static inline void rcu_read_unlock_sched(void)
207{ 315{
208 rcu_read_release(); 316 rcu_read_release_sched();
209 __release(RCU_SCHED); 317 __release(RCU_SCHED);
210 preempt_enable(); 318 preempt_enable();
211} 319}
@@ -219,22 +327,49 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
219 327
220 328
221/** 329/**
222 * rcu_dereference - fetch an RCU-protected pointer in an 330 * rcu_dereference_raw - fetch an RCU-protected pointer
223 * RCU read-side critical section. This pointer may later 331 *
224 * be safely dereferenced. 332 * The caller must be within some flavor of RCU read-side critical
333 * section, or must be otherwise preventing the pointer from changing,
334 * for example, by holding an appropriate lock. This pointer may later
335 * be safely dereferenced. It is the caller's responsibility to have
336 * done the right thing, as this primitive does no checking of any kind.
225 * 337 *
226 * Inserts memory barriers on architectures that require them 338 * Inserts memory barriers on architectures that require them
227 * (currently only the Alpha), and, more importantly, documents 339 * (currently only the Alpha), and, more importantly, documents
228 * exactly which pointers are protected by RCU. 340 * exactly which pointers are protected by RCU.
229 */ 341 */
230 342#define rcu_dereference_raw(p) ({ \
231#define rcu_dereference(p) ({ \
232 typeof(p) _________p1 = ACCESS_ONCE(p); \ 343 typeof(p) _________p1 = ACCESS_ONCE(p); \
233 smp_read_barrier_depends(); \ 344 smp_read_barrier_depends(); \
234 (_________p1); \ 345 (_________p1); \
235 }) 346 })
236 347
237/** 348/**
349 * rcu_dereference - fetch an RCU-protected pointer, checking for RCU
350 *
351 * Makes rcu_dereference_check() do the dirty work.
352 */
353#define rcu_dereference(p) \
354 rcu_dereference_check(p, rcu_read_lock_held())
355
356/**
357 * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh
358 *
359 * Makes rcu_dereference_check() do the dirty work.
360 */
361#define rcu_dereference_bh(p) \
362 rcu_dereference_check(p, rcu_read_lock_bh_held())
363
364/**
365 * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched
366 *
367 * Makes rcu_dereference_check() do the dirty work.
368 */
369#define rcu_dereference_sched(p) \
370 rcu_dereference_check(p, rcu_read_lock_sched_held())
371
372/**
238 * rcu_assign_pointer - assign (publicize) a pointer to a newly 373 * rcu_assign_pointer - assign (publicize) a pointer to a newly
239 * initialized structure that will be dereferenced by RCU read-side 374 * initialized structure that will be dereferenced by RCU read-side
240 * critical sections. Returns the value assigned. 375 * critical sections. Returns the value assigned.