diff options
Diffstat (limited to 'include/linux/rcupdate.h')
-rw-r--r-- | include/linux/rcupdate.h | 574 |
1 files changed, 420 insertions, 154 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 83af1f8d8b74..99f9aa7c2804 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -41,11 +41,29 @@ | |||
41 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/debugobjects.h> | 43 | #include <linux/debugobjects.h> |
44 | #include <linux/compiler.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_RCU_TORTURE_TEST | 46 | #ifdef CONFIG_RCU_TORTURE_TEST |
46 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
47 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
48 | 49 | ||
50 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
51 | extern void rcutorture_record_test_transition(void); | ||
52 | extern void rcutorture_record_progress(unsigned long vernum); | ||
53 | #else | ||
54 | static inline void rcutorture_record_test_transition(void) | ||
55 | { | ||
56 | } | ||
57 | static inline void rcutorture_record_progress(unsigned long vernum) | ||
58 | { | ||
59 | } | ||
60 | #endif | ||
61 | |||
62 | #define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) | ||
63 | #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) | ||
64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | ||
65 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | ||
66 | |||
49 | /** | 67 | /** |
50 | * struct rcu_head - callback structure for use with RCU | 68 | * struct rcu_head - callback structure for use with RCU |
51 | * @next: next update requests in a list | 69 | * @next: next update requests in a list |
@@ -57,29 +75,91 @@ struct rcu_head { | |||
57 | }; | 75 | }; |
58 | 76 | ||
59 | /* Exported common interfaces */ | 77 | /* Exported common interfaces */ |
60 | extern void rcu_barrier(void); | 78 | extern void call_rcu_sched(struct rcu_head *head, |
79 | void (*func)(struct rcu_head *rcu)); | ||
80 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier_bh(void); | 81 | extern void rcu_barrier_bh(void); |
62 | extern void rcu_barrier_sched(void); | 82 | extern void rcu_barrier_sched(void); |
63 | extern void synchronize_sched_expedited(void); | 83 | |
64 | extern int sched_expedited_torture_stats(char *page); | 84 | static inline void __rcu_read_lock_bh(void) |
85 | { | ||
86 | local_bh_disable(); | ||
87 | } | ||
88 | |||
89 | static inline void __rcu_read_unlock_bh(void) | ||
90 | { | ||
91 | local_bh_enable(); | ||
92 | } | ||
93 | |||
94 | #ifdef CONFIG_PREEMPT_RCU | ||
95 | |||
96 | extern void __rcu_read_lock(void); | ||
97 | extern void __rcu_read_unlock(void); | ||
98 | void synchronize_rcu(void); | ||
99 | |||
100 | /* | ||
101 | * Defined as a macro as it is a very low level header included from | ||
102 | * areas that don't even know about current. This gives the rcu_read_lock() | ||
103 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other | ||
104 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. | ||
105 | */ | ||
106 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
107 | |||
108 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
109 | |||
110 | static inline void __rcu_read_lock(void) | ||
111 | { | ||
112 | preempt_disable(); | ||
113 | } | ||
114 | |||
115 | static inline void __rcu_read_unlock(void) | ||
116 | { | ||
117 | preempt_enable(); | ||
118 | } | ||
119 | |||
120 | static inline void synchronize_rcu(void) | ||
121 | { | ||
122 | synchronize_sched(); | ||
123 | } | ||
124 | |||
125 | static inline int rcu_preempt_depth(void) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
65 | 131 | ||
66 | /* Internal to kernel */ | 132 | /* Internal to kernel */ |
67 | extern void rcu_init(void); | 133 | extern void rcu_sched_qs(int cpu); |
134 | extern void rcu_bh_qs(int cpu); | ||
135 | extern void rcu_check_callbacks(int cpu, int user); | ||
136 | struct notifier_block; | ||
137 | |||
138 | #ifdef CONFIG_NO_HZ | ||
139 | |||
140 | extern void rcu_enter_nohz(void); | ||
141 | extern void rcu_exit_nohz(void); | ||
142 | |||
143 | #else /* #ifdef CONFIG_NO_HZ */ | ||
144 | |||
145 | static inline void rcu_enter_nohz(void) | ||
146 | { | ||
147 | } | ||
148 | |||
149 | static inline void rcu_exit_nohz(void) | ||
150 | { | ||
151 | } | ||
152 | |||
153 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
68 | 154 | ||
69 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 155 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
70 | #include <linux/rcutree.h> | 156 | #include <linux/rcutree.h> |
71 | #elif defined(CONFIG_TINY_RCU) | 157 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
72 | #include <linux/rcutiny.h> | 158 | #include <linux/rcutiny.h> |
73 | #else | 159 | #else |
74 | #error "Unknown RCU implementation specified to kernel configuration" | 160 | #error "Unknown RCU implementation specified to kernel configuration" |
75 | #endif | 161 | #endif |
76 | 162 | ||
77 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | ||
78 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | ||
79 | #define INIT_RCU_HEAD(ptr) do { \ | ||
80 | (ptr)->next = NULL; (ptr)->func = NULL; \ | ||
81 | } while (0) | ||
82 | |||
83 | /* | 163 | /* |
84 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic | 164 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
85 | * initialization and destruction of rcu_head on the stack. rcu_head structures | 165 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
@@ -120,14 +200,15 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
120 | extern int debug_lockdep_rcu_enabled(void); | 200 | extern int debug_lockdep_rcu_enabled(void); |
121 | 201 | ||
122 | /** | 202 | /** |
123 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 203 | * rcu_read_lock_held() - might we be in RCU read-side critical section? |
124 | * | 204 | * |
125 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | 205 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
126 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 206 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
127 | * this assumes we are in an RCU read-side critical section unless it can | 207 | * this assumes we are in an RCU read-side critical section unless it can |
128 | * prove otherwise. | 208 | * prove otherwise. This is useful for debug checks in functions that |
209 | * require that they be called within an RCU read-side critical section. | ||
129 | * | 210 | * |
130 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 211 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
131 | * and while lockdep is disabled. | 212 | * and while lockdep is disabled. |
132 | */ | 213 | */ |
133 | static inline int rcu_read_lock_held(void) | 214 | static inline int rcu_read_lock_held(void) |
@@ -144,14 +225,16 @@ static inline int rcu_read_lock_held(void) | |||
144 | extern int rcu_read_lock_bh_held(void); | 225 | extern int rcu_read_lock_bh_held(void); |
145 | 226 | ||
146 | /** | 227 | /** |
147 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 228 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
148 | * | 229 | * |
149 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | 230 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
150 | * RCU-sched read-side critical section. In absence of | 231 | * RCU-sched read-side critical section. In absence of |
151 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | 232 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
152 | * critical section unless it can prove otherwise. Note that disabling | 233 | * critical section unless it can prove otherwise. Note that disabling |
153 | * of preemption (including disabling irqs) counts as an RCU-sched | 234 | * of preemption (including disabling irqs) counts as an RCU-sched |
154 | * read-side critical section. | 235 | * read-side critical section. This is useful for debug checks in functions |
236 | * that required that they be called within an RCU-sched read-side | ||
237 | * critical section. | ||
155 | * | 238 | * |
156 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 239 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
157 | * and while lockdep is disabled. | 240 | * and while lockdep is disabled. |
@@ -211,7 +294,11 @@ static inline int rcu_read_lock_sched_held(void) | |||
211 | 294 | ||
212 | extern int rcu_my_thread_group_empty(void); | 295 | extern int rcu_my_thread_group_empty(void); |
213 | 296 | ||
214 | #define __do_rcu_dereference_check(c) \ | 297 | /** |
298 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
299 | * @c: condition to check | ||
300 | */ | ||
301 | #define rcu_lockdep_assert(c) \ | ||
215 | do { \ | 302 | do { \ |
216 | static bool __warned; \ | 303 | static bool __warned; \ |
217 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 304 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
@@ -220,41 +307,183 @@ extern int rcu_my_thread_group_empty(void); | |||
220 | } \ | 307 | } \ |
221 | } while (0) | 308 | } while (0) |
222 | 309 | ||
310 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
311 | |||
312 | #define rcu_lockdep_assert(c) do { } while (0) | ||
313 | |||
314 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
315 | |||
316 | /* | ||
317 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() | ||
318 | * and rcu_assign_pointer(). Some of these could be folded into their | ||
319 | * callers, but they are left separate in order to ease introduction of | ||
320 | * multiple flavors of pointers to match the multiple flavors of RCU | ||
321 | * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in | ||
322 | * the future. | ||
323 | */ | ||
324 | |||
325 | #ifdef __CHECKER__ | ||
326 | #define rcu_dereference_sparse(p, space) \ | ||
327 | ((void)(((typeof(*p) space *)p) == p)) | ||
328 | #else /* #ifdef __CHECKER__ */ | ||
329 | #define rcu_dereference_sparse(p, space) | ||
330 | #endif /* #else #ifdef __CHECKER__ */ | ||
331 | |||
332 | #define __rcu_access_pointer(p, space) \ | ||
333 | ({ \ | ||
334 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
335 | rcu_dereference_sparse(p, space); \ | ||
336 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
337 | }) | ||
338 | #define __rcu_dereference_check(p, c, space) \ | ||
339 | ({ \ | ||
340 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
341 | rcu_lockdep_assert(c); \ | ||
342 | rcu_dereference_sparse(p, space); \ | ||
343 | smp_read_barrier_depends(); \ | ||
344 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
345 | }) | ||
346 | #define __rcu_dereference_protected(p, c, space) \ | ||
347 | ({ \ | ||
348 | rcu_lockdep_assert(c); \ | ||
349 | rcu_dereference_sparse(p, space); \ | ||
350 | ((typeof(*p) __force __kernel *)(p)); \ | ||
351 | }) | ||
352 | |||
353 | #define __rcu_access_index(p, space) \ | ||
354 | ({ \ | ||
355 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
356 | rcu_dereference_sparse(p, space); \ | ||
357 | (_________p1); \ | ||
358 | }) | ||
359 | #define __rcu_dereference_index_check(p, c) \ | ||
360 | ({ \ | ||
361 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
362 | rcu_lockdep_assert(c); \ | ||
363 | smp_read_barrier_depends(); \ | ||
364 | (_________p1); \ | ||
365 | }) | ||
366 | #define __rcu_assign_pointer(p, v, space) \ | ||
367 | ({ \ | ||
368 | if (!__builtin_constant_p(v) || \ | ||
369 | ((v) != NULL)) \ | ||
370 | smp_wmb(); \ | ||
371 | (p) = (typeof(*v) __force space *)(v); \ | ||
372 | }) | ||
373 | |||
374 | |||
375 | /** | ||
376 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing | ||
377 | * @p: The pointer to read | ||
378 | * | ||
379 | * Return the value of the specified RCU-protected pointer, but omit the | ||
380 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
381 | * when the value of this pointer is accessed, but the pointer is not | ||
382 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
383 | * NULL. Although rcu_access_pointer() may also be used in cases where | ||
384 | * update-side locks prevent the value of the pointer from changing, you | ||
385 | * should instead use rcu_dereference_protected() for this use case. | ||
386 | */ | ||
387 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) | ||
388 | |||
223 | /** | 389 | /** |
224 | * rcu_dereference_check - rcu_dereference with debug checking | 390 | * rcu_dereference_check() - rcu_dereference with debug checking |
225 | * @p: The pointer to read, prior to dereferencing | 391 | * @p: The pointer to read, prior to dereferencing |
226 | * @c: The conditions under which the dereference will take place | 392 | * @c: The conditions under which the dereference will take place |
227 | * | 393 | * |
228 | * Do an rcu_dereference(), but check that the conditions under which the | 394 | * Do an rcu_dereference(), but check that the conditions under which the |
229 | * dereference will take place are correct. Typically the conditions indicate | 395 | * dereference will take place are correct. Typically the conditions |
230 | * the various locking conditions that should be held at that point. The check | 396 | * indicate the various locking conditions that should be held at that |
231 | * should return true if the conditions are satisfied. | 397 | * point. The check should return true if the conditions are satisfied. |
398 | * An implicit check for being in an RCU read-side critical section | ||
399 | * (rcu_read_lock()) is included. | ||
232 | * | 400 | * |
233 | * For example: | 401 | * For example: |
234 | * | 402 | * |
235 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 403 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
236 | * lockdep_is_held(&foo->lock)); | ||
237 | * | 404 | * |
238 | * could be used to indicate to lockdep that foo->bar may only be dereferenced | 405 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
239 | * if either the RCU read lock is held, or that the lock required to replace | 406 | * if either rcu_read_lock() is held, or that the lock required to replace |
240 | * the bar struct at foo->bar is held. | 407 | * the bar struct at foo->bar is held. |
241 | * | 408 | * |
242 | * Note that the list of conditions may also include indications of when a lock | 409 | * Note that the list of conditions may also include indications of when a lock |
243 | * need not be held, for example during initialisation or destruction of the | 410 | * need not be held, for example during initialisation or destruction of the |
244 | * target struct: | 411 | * target struct: |
245 | * | 412 | * |
246 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 413 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
247 | * lockdep_is_held(&foo->lock) || | ||
248 | * atomic_read(&foo->usage) == 0); | 414 | * atomic_read(&foo->usage) == 0); |
415 | * | ||
416 | * Inserts memory barriers on architectures that require them | ||
417 | * (currently only the Alpha), prevents the compiler from refetching | ||
418 | * (and from merging fetches), and, more importantly, documents exactly | ||
419 | * which pointers are protected by RCU and checks that the pointer is | ||
420 | * annotated as __rcu. | ||
249 | */ | 421 | */ |
250 | #define rcu_dereference_check(p, c) \ | 422 | #define rcu_dereference_check(p, c) \ |
251 | ({ \ | 423 | __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
252 | __do_rcu_dereference_check(c); \ | 424 | |
253 | rcu_dereference_raw(p); \ | 425 | /** |
254 | }) | 426 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
427 | * @p: The pointer to read, prior to dereferencing | ||
428 | * @c: The conditions under which the dereference will take place | ||
429 | * | ||
430 | * This is the RCU-bh counterpart to rcu_dereference_check(). | ||
431 | */ | ||
432 | #define rcu_dereference_bh_check(p, c) \ | ||
433 | __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) | ||
434 | |||
435 | /** | ||
436 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking | ||
437 | * @p: The pointer to read, prior to dereferencing | ||
438 | * @c: The conditions under which the dereference will take place | ||
439 | * | ||
440 | * This is the RCU-sched counterpart to rcu_dereference_check(). | ||
441 | */ | ||
442 | #define rcu_dereference_sched_check(p, c) \ | ||
443 | __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ | ||
444 | __rcu) | ||
445 | |||
446 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ | ||
255 | 447 | ||
256 | /** | 448 | /** |
257 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | 449 | * rcu_access_index() - fetch RCU index with no dereferencing |
450 | * @p: The index to read | ||
451 | * | ||
452 | * Return the value of the specified RCU-protected index, but omit the | ||
453 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
454 | * when the value of this index is accessed, but the index is not | ||
455 | * dereferenced, for example, when testing an RCU-protected index against | ||
456 | * -1. Although rcu_access_index() may also be used in cases where | ||
457 | * update-side locks prevent the value of the index from changing, you | ||
458 | * should instead use rcu_dereference_index_protected() for this use case. | ||
459 | */ | ||
460 | #define rcu_access_index(p) __rcu_access_index((p), __rcu) | ||
461 | |||
462 | /** | ||
463 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
464 | * @p: The pointer to read, prior to dereferencing | ||
465 | * @c: The conditions under which the dereference will take place | ||
466 | * | ||
467 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
468 | * This allows rcu_dereference_index_check() to be used on integers, | ||
469 | * which can then be used as array indices. Attempting to use | ||
470 | * rcu_dereference_check() on an integer will give compiler warnings | ||
471 | * because the sparse address-space mechanism relies on dereferencing | ||
472 | * the RCU-protected pointer. Dereferencing integers is not something | ||
473 | * that even gcc will put up with. | ||
474 | * | ||
475 | * Note that this function does not implicitly check for RCU read-side | ||
476 | * critical sections. If this function gains lots of uses, it might | ||
477 | * make sense to provide versions for each flavor of RCU, but it does | ||
478 | * not make sense as of early 2010. | ||
479 | */ | ||
480 | #define rcu_dereference_index_check(p, c) \ | ||
481 | __rcu_dereference_index_check((p), (c)) | ||
482 | |||
483 | /** | ||
484 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented | ||
485 | * @p: The pointer to read, prior to dereferencing | ||
486 | * @c: The conditions under which the dereference will take place | ||
258 | * | 487 | * |
259 | * Return the value of the specified RCU-protected pointer, but omit | 488 | * Return the value of the specified RCU-protected pointer, but omit |
260 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | 489 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
@@ -263,35 +492,61 @@ extern int rcu_my_thread_group_empty(void); | |||
263 | * prevent the compiler from repeating this reference or combining it | 492 | * prevent the compiler from repeating this reference or combining it |
264 | * with other references, so it should not be used without protection | 493 | * with other references, so it should not be used without protection |
265 | * of appropriate locks. | 494 | * of appropriate locks. |
495 | * | ||
496 | * This function is only for update-side use. Using this function | ||
497 | * when protected only by rcu_read_lock() will result in infrequent | ||
498 | * but very ugly failures. | ||
266 | */ | 499 | */ |
267 | #define rcu_dereference_protected(p, c) \ | 500 | #define rcu_dereference_protected(p, c) \ |
268 | ({ \ | 501 | __rcu_dereference_protected((p), (c), __rcu) |
269 | __do_rcu_dereference_check(c); \ | ||
270 | (p); \ | ||
271 | }) | ||
272 | 502 | ||
273 | #else /* #ifdef CONFIG_PROVE_RCU */ | 503 | /** |
504 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
505 | * @p: The pointer to read, prior to dereferencing | ||
506 | * @c: The conditions under which the dereference will take place | ||
507 | * | ||
508 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
509 | */ | ||
510 | #define rcu_dereference_bh_protected(p, c) \ | ||
511 | __rcu_dereference_protected((p), (c), __rcu) | ||
274 | 512 | ||
275 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 513 | /** |
276 | #define rcu_dereference_protected(p, c) (p) | 514 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented |
515 | * @p: The pointer to read, prior to dereferencing | ||
516 | * @c: The conditions under which the dereference will take place | ||
517 | * | ||
518 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
519 | */ | ||
520 | #define rcu_dereference_sched_protected(p, c) \ | ||
521 | __rcu_dereference_protected((p), (c), __rcu) | ||
277 | 522 | ||
278 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
279 | 523 | ||
280 | /** | 524 | /** |
281 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | 525 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
526 | * @p: The pointer to read, prior to dereferencing | ||
282 | * | 527 | * |
283 | * Return the value of the specified RCU-protected pointer, but omit the | 528 | * This is a simple wrapper around rcu_dereference_check(). |
284 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | 529 | */ |
285 | * when the value of this pointer is accessed, but the pointer is not | 530 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
286 | * dereferenced, for example, when testing an RCU-protected pointer against | 531 | |
287 | * NULL. This may also be used in cases where update-side locks prevent | 532 | /** |
288 | * the value of the pointer from changing, but rcu_dereference_protected() | 533 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
289 | * is a lighter-weight primitive for this use case. | 534 | * @p: The pointer to read, prior to dereferencing |
535 | * | ||
536 | * Makes rcu_dereference_check() do the dirty work. | ||
537 | */ | ||
538 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) | ||
539 | |||
540 | /** | ||
541 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing | ||
542 | * @p: The pointer to read, prior to dereferencing | ||
543 | * | ||
544 | * Makes rcu_dereference_check() do the dirty work. | ||
290 | */ | 545 | */ |
291 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | 546 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
292 | 547 | ||
293 | /** | 548 | /** |
294 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 549 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
295 | * | 550 | * |
296 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 551 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
297 | * are within RCU read-side critical sections, then the | 552 | * are within RCU read-side critical sections, then the |
@@ -302,7 +557,7 @@ extern int rcu_my_thread_group_empty(void); | |||
302 | * until after the all the other CPUs exit their critical sections. | 557 | * until after the all the other CPUs exit their critical sections. |
303 | * | 558 | * |
304 | * Note, however, that RCU callbacks are permitted to run concurrently | 559 | * Note, however, that RCU callbacks are permitted to run concurrently |
305 | * with RCU read-side critical sections. One way that this can happen | 560 | * with new RCU read-side critical sections. One way that this can happen |
306 | * is via the following sequence of events: (1) CPU 0 enters an RCU | 561 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
307 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | 562 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
308 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | 563 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
@@ -317,7 +572,20 @@ extern int rcu_my_thread_group_empty(void); | |||
317 | * will be deferred until the outermost RCU read-side critical section | 572 | * will be deferred until the outermost RCU read-side critical section |
318 | * completes. | 573 | * completes. |
319 | * | 574 | * |
320 | * It is illegal to block while in an RCU read-side critical section. | 575 | * You can avoid reading and understanding the next paragraph by |
576 | * following this rule: don't put anything in an rcu_read_lock() RCU | ||
577 | * read-side critical section that would block in a !PREEMPT kernel. | ||
578 | * But if you want the full story, read on! | ||
579 | * | ||
580 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it | ||
581 | * is illegal to block while in an RCU read-side critical section. In | ||
582 | * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) | ||
583 | * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may | ||
584 | * be preempted, but explicit blocking is illegal. Finally, in preemptible | ||
585 | * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, | ||
586 | * RCU read-side critical sections may be preempted and they may also | ||
587 | * block, but only when acquiring spinlocks that are subject to priority | ||
588 | * inheritance. | ||
321 | */ | 589 | */ |
322 | static inline void rcu_read_lock(void) | 590 | static inline void rcu_read_lock(void) |
323 | { | 591 | { |
@@ -337,7 +605,7 @@ static inline void rcu_read_lock(void) | |||
337 | */ | 605 | */ |
338 | 606 | ||
339 | /** | 607 | /** |
340 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 608 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
341 | * | 609 | * |
342 | * See rcu_read_lock() for more information. | 610 | * See rcu_read_lock() for more information. |
343 | */ | 611 | */ |
@@ -349,15 +617,16 @@ static inline void rcu_read_unlock(void) | |||
349 | } | 617 | } |
350 | 618 | ||
351 | /** | 619 | /** |
352 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 620 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
353 | * | 621 | * |
354 | * This is equivalent of rcu_read_lock(), but to be used when updates | 622 | * This is equivalent of rcu_read_lock(), but to be used when updates |
355 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | 623 | * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
356 | * consider completion of a softirq handler to be a quiescent state, | 624 | * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
357 | * a process in RCU read-side critical section must be protected by | 625 | * softirq handler to be a quiescent state, a process in RCU read-side |
358 | * disabling softirqs. Read-side critical sections in interrupt context | 626 | * critical section must be protected by disabling softirqs. Read-side |
359 | * can use just rcu_read_lock(). | 627 | * critical sections in interrupt context can use just rcu_read_lock(), |
360 | * | 628 | * though this should at least be commented to avoid confusing people |
629 | * reading the code. | ||
361 | */ | 630 | */ |
362 | static inline void rcu_read_lock_bh(void) | 631 | static inline void rcu_read_lock_bh(void) |
363 | { | 632 | { |
@@ -379,13 +648,12 @@ static inline void rcu_read_unlock_bh(void) | |||
379 | } | 648 | } |
380 | 649 | ||
381 | /** | 650 | /** |
382 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 651 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
383 | * | 652 | * |
384 | * Should be used with either | 653 | * This is equivalent of rcu_read_lock(), but to be used when updates |
385 | * - synchronize_sched() | 654 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
386 | * or | 655 | * Read-side critical sections can also be introduced by anything that |
387 | * - call_rcu_sched() and rcu_barrier_sched() | 656 | * disables preemption, including local_irq_disable() and friends. |
388 | * on the write-side to insure proper synchronization. | ||
389 | */ | 657 | */ |
390 | static inline void rcu_read_lock_sched(void) | 658 | static inline void rcu_read_lock_sched(void) |
391 | { | 659 | { |
@@ -420,54 +688,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
420 | preempt_enable_notrace(); | 688 | preempt_enable_notrace(); |
421 | } | 689 | } |
422 | 690 | ||
423 | |||
424 | /** | 691 | /** |
425 | * rcu_dereference_raw - fetch an RCU-protected pointer | 692 | * rcu_assign_pointer() - assign to RCU-protected pointer |
693 | * @p: pointer to assign to | ||
694 | * @v: value to assign (publish) | ||
426 | * | 695 | * |
427 | * The caller must be within some flavor of RCU read-side critical | 696 | * Assigns the specified value to the specified RCU-protected |
428 | * section, or must be otherwise preventing the pointer from changing, | 697 | * pointer, ensuring that any concurrent RCU readers will see |
429 | * for example, by holding an appropriate lock. This pointer may later | 698 | * any prior initialization. Returns the value assigned. |
430 | * be safely dereferenced. It is the caller's responsibility to have | ||
431 | * done the right thing, as this primitive does no checking of any kind. | ||
432 | * | ||
433 | * Inserts memory barriers on architectures that require them | ||
434 | * (currently only the Alpha), and, more importantly, documents | ||
435 | * exactly which pointers are protected by RCU. | ||
436 | */ | ||
437 | #define rcu_dereference_raw(p) ({ \ | ||
438 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
439 | smp_read_barrier_depends(); \ | ||
440 | (_________p1); \ | ||
441 | }) | ||
442 | |||
443 | /** | ||
444 | * rcu_dereference - fetch an RCU-protected pointer, checking for RCU | ||
445 | * | ||
446 | * Makes rcu_dereference_check() do the dirty work. | ||
447 | */ | ||
448 | #define rcu_dereference(p) \ | ||
449 | rcu_dereference_check(p, rcu_read_lock_held()) | ||
450 | |||
451 | /** | ||
452 | * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh | ||
453 | * | ||
454 | * Makes rcu_dereference_check() do the dirty work. | ||
455 | */ | ||
456 | #define rcu_dereference_bh(p) \ | ||
457 | rcu_dereference_check(p, rcu_read_lock_bh_held() || irqs_disabled()) | ||
458 | |||
459 | /** | ||
460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | ||
461 | * | ||
462 | * Makes rcu_dereference_check() do the dirty work. | ||
463 | */ | ||
464 | #define rcu_dereference_sched(p) \ | ||
465 | rcu_dereference_check(p, rcu_read_lock_sched_held()) | ||
466 | |||
467 | /** | ||
468 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | ||
469 | * initialized structure that will be dereferenced by RCU read-side | ||
470 | * critical sections. Returns the value assigned. | ||
471 | * | 699 | * |
472 | * Inserts memory barriers on architectures that require them | 700 | * Inserts memory barriers on architectures that require them |
473 | * (pretty much all of them other than x86), and also prevents | 701 | * (pretty much all of them other than x86), and also prevents |
@@ -476,14 +704,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
476 | * call documents which pointers will be dereferenced by RCU read-side | 704 | * call documents which pointers will be dereferenced by RCU read-side |
477 | * code. | 705 | * code. |
478 | */ | 706 | */ |
479 | |||
480 | #define rcu_assign_pointer(p, v) \ | 707 | #define rcu_assign_pointer(p, v) \ |
481 | ({ \ | 708 | __rcu_assign_pointer((p), (v), __rcu) |
482 | if (!__builtin_constant_p(v) || \ | 709 | |
483 | ((v) != NULL)) \ | 710 | /** |
484 | smp_wmb(); \ | 711 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
485 | (p) = (v); \ | 712 | * |
486 | }) | 713 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep |
714 | * splats. | ||
715 | */ | ||
716 | #define RCU_INIT_POINTER(p, v) \ | ||
717 | p = (typeof(*v) __force __rcu *)(v) | ||
487 | 718 | ||
488 | /* Infrastructure to implement the synchronize_() primitives. */ | 719 | /* Infrastructure to implement the synchronize_() primitives. */ |
489 | 720 | ||
@@ -494,26 +725,37 @@ struct rcu_synchronize { | |||
494 | 725 | ||
495 | extern void wakeme_after_rcu(struct rcu_head *head); | 726 | extern void wakeme_after_rcu(struct rcu_head *head); |
496 | 727 | ||
728 | #ifdef CONFIG_PREEMPT_RCU | ||
729 | |||
497 | /** | 730 | /** |
498 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 731 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
499 | * @head: structure to be used for queueing the RCU updates. | 732 | * @head: structure to be used for queueing the RCU updates. |
500 | * @func: actual update function to be invoked after the grace period | 733 | * @func: actual callback function to be invoked after the grace period |
501 | * | 734 | * |
502 | * The update function will be invoked some time after a full grace | 735 | * The callback function will be invoked some time after a full grace |
503 | * period elapses, in other words after all currently executing RCU | 736 | * period elapses, in other words after all pre-existing RCU read-side |
504 | * read-side critical sections have completed. RCU read-side critical | 737 | * critical sections have completed. However, the callback function |
738 | * might well execute concurrently with RCU read-side critical sections | ||
739 | * that started after call_rcu() was invoked. RCU read-side critical | ||
505 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 740 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
506 | * and may be nested. | 741 | * and may be nested. |
507 | */ | 742 | */ |
508 | extern void call_rcu(struct rcu_head *head, | 743 | extern void call_rcu(struct rcu_head *head, |
509 | void (*func)(struct rcu_head *head)); | 744 | void (*func)(struct rcu_head *head)); |
510 | 745 | ||
746 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
747 | |||
748 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
749 | #define call_rcu call_rcu_sched | ||
750 | |||
751 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
752 | |||
511 | /** | 753 | /** |
512 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 754 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
513 | * @head: structure to be used for queueing the RCU updates. | 755 | * @head: structure to be used for queueing the RCU updates. |
514 | * @func: actual update function to be invoked after the grace period | 756 | * @func: actual callback function to be invoked after the grace period |
515 | * | 757 | * |
516 | * The update function will be invoked some time after a full grace | 758 | * The callback function will be invoked some time after a full grace |
517 | * period elapses, in other words after all currently executing RCU | 759 | * period elapses, in other words after all currently executing RCU |
518 | * read-side critical sections have completed. call_rcu_bh() assumes | 760 | * read-side critical sections have completed. call_rcu_bh() assumes |
519 | * that the read-side critical sections end on completion of a softirq | 761 | * that the read-side critical sections end on completion of a softirq |
@@ -543,6 +785,7 @@ extern struct debug_obj_descr rcuhead_debug_descr; | |||
543 | 785 | ||
544 | static inline void debug_rcu_head_queue(struct rcu_head *head) | 786 | static inline void debug_rcu_head_queue(struct rcu_head *head) |
545 | { | 787 | { |
788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
546 | debug_object_activate(head, &rcuhead_debug_descr); | 789 | debug_object_activate(head, &rcuhead_debug_descr); |
547 | debug_object_active_state(head, &rcuhead_debug_descr, | 790 | debug_object_active_state(head, &rcuhead_debug_descr, |
548 | STATE_RCU_HEAD_READY, | 791 | STATE_RCU_HEAD_READY, |
@@ -566,37 +809,60 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
566 | } | 809 | } |
567 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
568 | 811 | ||
569 | #ifndef CONFIG_PROVE_RCU | 812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) |
570 | #define __do_rcu_dereference_check(c) do { } while (0) | 813 | { |
571 | #endif /* #ifdef CONFIG_PROVE_RCU */ | 814 | return offset < 4096; |
815 | } | ||
572 | 816 | ||
573 | #define __rcu_dereference_index_check(p, c) \ | 817 | static __always_inline |
574 | ({ \ | 818 | void __kfree_rcu(struct rcu_head *head, unsigned long offset) |
575 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | 819 | { |
576 | __do_rcu_dereference_check(c); \ | 820 | typedef void (*rcu_callback)(struct rcu_head *); |
577 | smp_read_barrier_depends(); \ | 821 | |
578 | (_________p1); \ | 822 | BUILD_BUG_ON(!__builtin_constant_p(offset)); |
579 | }) | 823 | |
824 | /* See the kfree_rcu() header comment. */ | ||
825 | BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); | ||
826 | |||
827 | call_rcu(head, (rcu_callback)offset); | ||
828 | } | ||
829 | |||
830 | extern void kfree(const void *); | ||
831 | |||
832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
833 | { | ||
834 | unsigned long offset = (unsigned long)head->func; | ||
835 | |||
836 | if (__is_kfree_rcu_offset(offset)) | ||
837 | kfree((void *)head - offset); | ||
838 | else | ||
839 | head->func(head); | ||
840 | } | ||
580 | 841 | ||
581 | /** | 842 | /** |
582 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | 843 | * kfree_rcu() - kfree an object after a grace period. |
583 | * @p: The pointer to read, prior to dereferencing | 844 | * @ptr: pointer to kfree |
584 | * @c: The conditions under which the dereference will take place | 845 | * @rcu_head: the name of the struct rcu_head within the type of @ptr. |
585 | * | 846 | * |
586 | * Similar to rcu_dereference_check(), but omits the sparse checking. | 847 | * Many rcu callbacks functions just call kfree() on the base structure. |
587 | * This allows rcu_dereference_index_check() to be used on integers, | 848 | * These functions are trivial, but their size adds up, and furthermore |
588 | * which can then be used as array indices. Attempting to use | 849 | * when they are used in a kernel module, that module must invoke the |
589 | * rcu_dereference_check() on an integer will give compiler warnings | 850 | * high-latency rcu_barrier() function at module-unload time. |
590 | * because the sparse address-space mechanism relies on dereferencing | 851 | * |
591 | * the RCU-protected pointer. Dereferencing integers is not something | 852 | * The kfree_rcu() function handles this issue. Rather than encoding a |
592 | * that even gcc will put up with. | 853 | * function address in the embedded rcu_head structure, kfree_rcu() instead |
593 | * | 854 | * encodes the offset of the rcu_head structure within the base structure. |
594 | * Note that this function does not implicitly check for RCU read-side | 855 | * Because the functions are not allowed in the low-order 4096 bytes of |
595 | * critical sections. If this function gains lots of uses, it might | 856 | * kernel virtual memory, offsets up to 4095 bytes can be accommodated. |
596 | * make sense to provide versions for each flavor of RCU, but it does | 857 | * If the offset is larger than 4095 bytes, a compile-time error will |
597 | * not make sense as of early 2010. | 858 | * be generated in __kfree_rcu(). If this error is triggered, you can |
859 | * either fall back to use of call_rcu() or rearrange the structure to | ||
860 | * position the rcu_head structure into the first 4096 bytes. | ||
861 | * | ||
862 | * Note that the allowable offset might decrease in the future, for example, | ||
863 | * to allow something like kmem_cache_free_rcu(). | ||
598 | */ | 864 | */ |
599 | #define rcu_dereference_index_check(p, c) \ | 865 | #define kfree_rcu(ptr, rcu_head) \ |
600 | __rcu_dereference_index_check((p), (c)) | 866 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
601 | 867 | ||
602 | #endif /* __LINUX_RCUPDATE_H */ | 868 | #endif /* __LINUX_RCUPDATE_H */ |