diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 10:26:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-26 10:26:53 -0400 |
| commit | 19b4a8d520a6e0176dd52aaa429261ad4fcaa545 (patch) | |
| tree | 6dcf5a780718fc50b9cd79cc803daa7c7e080a02 /include | |
| parent | 3cfef9524677a4ecb392d6fbffe6ebce6302f1d4 (diff) | |
| parent | 048b718029033af117870d3da47da12995be14a3 (diff) | |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits)
rcu: Move propagation of ->completed from rcu_start_gp() to rcu_report_qs_rsp()
rcu: Remove rcu_needs_cpu_flush() to avoid false quiescent states
rcu: Wire up RCU_BOOST_PRIO for rcutree
rcu: Make rcu_torture_boost() exit loops at end of test
rcu: Make rcu_torture_fqs() exit loops at end of test
rcu: Permit rt_mutex_unlock() with irqs disabled
rcu: Avoid having just-onlined CPU resched itself when RCU is idle
rcu: Suppress NMI backtraces when stall ends before dump
rcu: Prohibit grace periods during early boot
rcu: Simplify unboosting checks
rcu: Prevent early boot set_need_resched() from __rcu_pending()
rcu: Dump local stack if cannot dump all CPUs' stacks
rcu: Move __rcu_read_unlock()'s barrier() within if-statement
rcu: Improve rcu_assign_pointer() and RCU_INIT_POINTER() documentation
rcu: Make rcu_assign_pointer() unconditionally insert a memory barrier
rcu: Make rcu_implicit_dynticks_qs() locals be correct size
rcu: Eliminate in_irq() checks in rcu_enter_nohz()
nohz: Remove nohz_cpu_mask
rcu: Document interpretation of RCU-lockdep splats
rcu: Allow rcutorture's stat_interval parameter to be changed at runtime
...
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/lockdep.h | 2 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 300 | ||||
| -rw-r--r-- | include/linux/rcutiny.h | 20 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | include/linux/types.h | 10 | ||||
| -rw-r--r-- | include/trace/events/rcu.h | 459 |
7 files changed, 628 insertions, 169 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index ef820a3c378b..b6a56e37284c 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -548,7 +548,7 @@ do { \ | |||
| 548 | #endif | 548 | #endif |
| 549 | 549 | ||
| 550 | #ifdef CONFIG_PROVE_RCU | 550 | #ifdef CONFIG_PROVE_RCU |
| 551 | extern void lockdep_rcu_dereference(const char *file, const int line); | 551 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
| 552 | #endif | 552 | #endif |
| 553 | 553 | ||
| 554 | #endif /* __LINUX_LOCKDEP_H */ | 554 | #endif /* __LINUX_LOCKDEP_H */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8f4f881a0ad8..2cf4226ade7e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #ifndef __LINUX_RCUPDATE_H | 33 | #ifndef __LINUX_RCUPDATE_H |
| 34 | #define __LINUX_RCUPDATE_H | 34 | #define __LINUX_RCUPDATE_H |
| 35 | 35 | ||
| 36 | #include <linux/types.h> | ||
| 36 | #include <linux/cache.h> | 37 | #include <linux/cache.h> |
| 37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
| 38 | #include <linux/threads.h> | 39 | #include <linux/threads.h> |
| @@ -64,32 +65,74 @@ static inline void rcutorture_record_progress(unsigned long vernum) | |||
| 64 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | 65 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
| 65 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | 66 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
| 66 | 67 | ||
| 68 | /* Exported common interfaces */ | ||
| 69 | |||
| 70 | #ifdef CONFIG_PREEMPT_RCU | ||
| 71 | |||
| 67 | /** | 72 | /** |
| 68 | * struct rcu_head - callback structure for use with RCU | 73 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
| 69 | * @next: next update requests in a list | 74 | * @head: structure to be used for queueing the RCU updates. |
| 70 | * @func: actual update function to call after the grace period. | 75 | * @func: actual callback function to be invoked after the grace period |
| 76 | * | ||
| 77 | * The callback function will be invoked some time after a full grace | ||
| 78 | * period elapses, in other words after all pre-existing RCU read-side | ||
| 79 | * critical sections have completed. However, the callback function | ||
| 80 | * might well execute concurrently with RCU read-side critical sections | ||
| 81 | * that started after call_rcu() was invoked. RCU read-side critical | ||
| 82 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
| 83 | * and may be nested. | ||
| 71 | */ | 84 | */ |
| 72 | struct rcu_head { | 85 | extern void call_rcu(struct rcu_head *head, |
| 73 | struct rcu_head *next; | 86 | void (*func)(struct rcu_head *head)); |
| 74 | void (*func)(struct rcu_head *head); | ||
| 75 | }; | ||
| 76 | 87 | ||
| 77 | /* Exported common interfaces */ | 88 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 89 | |||
| 90 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
| 91 | #define call_rcu call_rcu_sched | ||
| 92 | |||
| 93 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
| 94 | |||
| 95 | /** | ||
| 96 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. | ||
| 97 | * @head: structure to be used for queueing the RCU updates. | ||
| 98 | * @func: actual callback function to be invoked after the grace period | ||
| 99 | * | ||
| 100 | * The callback function will be invoked some time after a full grace | ||
| 101 | * period elapses, in other words after all currently executing RCU | ||
| 102 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
| 103 | * that the read-side critical sections end on completion of a softirq | ||
| 104 | * handler. This means that read-side critical sections in process | ||
| 105 | * context must not be interrupted by softirqs. This interface is to be | ||
| 106 | * used when most of the read-side critical sections are in softirq context. | ||
| 107 | * RCU read-side critical sections are delimited by : | ||
| 108 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
| 109 | * OR | ||
| 110 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
| 111 | * These may be nested. | ||
| 112 | */ | ||
| 113 | extern void call_rcu_bh(struct rcu_head *head, | ||
| 114 | void (*func)(struct rcu_head *head)); | ||
| 115 | |||
| 116 | /** | ||
| 117 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | ||
| 118 | * @head: structure to be used for queueing the RCU updates. | ||
| 119 | * @func: actual callback function to be invoked after the grace period | ||
| 120 | * | ||
| 121 | * The callback function will be invoked some time after a full grace | ||
| 122 | * period elapses, in other words after all currently executing RCU | ||
| 123 | * read-side critical sections have completed. call_rcu_sched() assumes | ||
| 124 | * that the read-side critical sections end on enabling of preemption | ||
| 125 | * or on voluntary preemption. | ||
| 126 | * RCU read-side critical sections are delimited by : | ||
| 127 | * - rcu_read_lock_sched() and rcu_read_unlock_sched(), | ||
| 128 | * OR | ||
| 129 | * anything that disables preemption. | ||
| 130 | * These may be nested. | ||
| 131 | */ | ||
| 78 | extern void call_rcu_sched(struct rcu_head *head, | 132 | extern void call_rcu_sched(struct rcu_head *head, |
| 79 | void (*func)(struct rcu_head *rcu)); | 133 | void (*func)(struct rcu_head *rcu)); |
| 80 | extern void synchronize_sched(void); | ||
| 81 | extern void rcu_barrier_bh(void); | ||
| 82 | extern void rcu_barrier_sched(void); | ||
| 83 | |||
| 84 | static inline void __rcu_read_lock_bh(void) | ||
| 85 | { | ||
| 86 | local_bh_disable(); | ||
| 87 | } | ||
| 88 | 134 | ||
| 89 | static inline void __rcu_read_unlock_bh(void) | 135 | extern void synchronize_sched(void); |
| 90 | { | ||
| 91 | local_bh_enable(); | ||
| 92 | } | ||
| 93 | 136 | ||
| 94 | #ifdef CONFIG_PREEMPT_RCU | 137 | #ifdef CONFIG_PREEMPT_RCU |
| 95 | 138 | ||
| @@ -152,6 +195,15 @@ static inline void rcu_exit_nohz(void) | |||
| 152 | 195 | ||
| 153 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 196 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
| 154 | 197 | ||
| 198 | /* | ||
| 199 | * Infrastructure to implement the synchronize_() primitives in | ||
| 200 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | ||
| 201 | */ | ||
| 202 | |||
| 203 | typedef void call_rcu_func_t(struct rcu_head *head, | ||
| 204 | void (*func)(struct rcu_head *head)); | ||
| 205 | void wait_rcu_gp(call_rcu_func_t crf); | ||
| 206 | |||
| 155 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 207 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 156 | #include <linux/rcutree.h> | 208 | #include <linux/rcutree.h> |
| 157 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 209 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
| @@ -297,19 +349,31 @@ extern int rcu_my_thread_group_empty(void); | |||
| 297 | /** | 349 | /** |
| 298 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | 350 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met |
| 299 | * @c: condition to check | 351 | * @c: condition to check |
| 352 | * @s: informative message | ||
| 300 | */ | 353 | */ |
| 301 | #define rcu_lockdep_assert(c) \ | 354 | #define rcu_lockdep_assert(c, s) \ |
| 302 | do { \ | 355 | do { \ |
| 303 | static bool __warned; \ | 356 | static bool __warned; \ |
| 304 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 357 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
| 305 | __warned = true; \ | 358 | __warned = true; \ |
| 306 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | 359 | lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
| 307 | } \ | 360 | } \ |
| 308 | } while (0) | 361 | } while (0) |
| 309 | 362 | ||
| 363 | #define rcu_sleep_check() \ | ||
| 364 | do { \ | ||
| 365 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ | ||
| 366 | "Illegal context switch in RCU-bh" \ | ||
| 367 | " read-side critical section"); \ | ||
| 368 | rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ | ||
| 369 | "Illegal context switch in RCU-sched"\ | ||
| 370 | " read-side critical section"); \ | ||
| 371 | } while (0) | ||
| 372 | |||
| 310 | #else /* #ifdef CONFIG_PROVE_RCU */ | 373 | #else /* #ifdef CONFIG_PROVE_RCU */ |
| 311 | 374 | ||
| 312 | #define rcu_lockdep_assert(c) do { } while (0) | 375 | #define rcu_lockdep_assert(c, s) do { } while (0) |
| 376 | #define rcu_sleep_check() do { } while (0) | ||
| 313 | 377 | ||
| 314 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 378 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
| 315 | 379 | ||
| @@ -338,14 +402,16 @@ extern int rcu_my_thread_group_empty(void); | |||
| 338 | #define __rcu_dereference_check(p, c, space) \ | 402 | #define __rcu_dereference_check(p, c, space) \ |
| 339 | ({ \ | 403 | ({ \ |
| 340 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | 404 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ |
| 341 | rcu_lockdep_assert(c); \ | 405 | rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \ |
| 406 | " usage"); \ | ||
| 342 | rcu_dereference_sparse(p, space); \ | 407 | rcu_dereference_sparse(p, space); \ |
| 343 | smp_read_barrier_depends(); \ | 408 | smp_read_barrier_depends(); \ |
| 344 | ((typeof(*p) __force __kernel *)(_________p1)); \ | 409 | ((typeof(*p) __force __kernel *)(_________p1)); \ |
| 345 | }) | 410 | }) |
| 346 | #define __rcu_dereference_protected(p, c, space) \ | 411 | #define __rcu_dereference_protected(p, c, space) \ |
| 347 | ({ \ | 412 | ({ \ |
| 348 | rcu_lockdep_assert(c); \ | 413 | rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \ |
| 414 | " usage"); \ | ||
| 349 | rcu_dereference_sparse(p, space); \ | 415 | rcu_dereference_sparse(p, space); \ |
| 350 | ((typeof(*p) __force __kernel *)(p)); \ | 416 | ((typeof(*p) __force __kernel *)(p)); \ |
| 351 | }) | 417 | }) |
| @@ -359,15 +425,15 @@ extern int rcu_my_thread_group_empty(void); | |||
| 359 | #define __rcu_dereference_index_check(p, c) \ | 425 | #define __rcu_dereference_index_check(p, c) \ |
| 360 | ({ \ | 426 | ({ \ |
| 361 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | 427 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
| 362 | rcu_lockdep_assert(c); \ | 428 | rcu_lockdep_assert(c, \ |
| 429 | "suspicious rcu_dereference_index_check()" \ | ||
| 430 | " usage"); \ | ||
| 363 | smp_read_barrier_depends(); \ | 431 | smp_read_barrier_depends(); \ |
| 364 | (_________p1); \ | 432 | (_________p1); \ |
| 365 | }) | 433 | }) |
| 366 | #define __rcu_assign_pointer(p, v, space) \ | 434 | #define __rcu_assign_pointer(p, v, space) \ |
| 367 | ({ \ | 435 | ({ \ |
| 368 | if (!__builtin_constant_p(v) || \ | 436 | smp_wmb(); \ |
| 369 | ((v) != NULL)) \ | ||
| 370 | smp_wmb(); \ | ||
| 371 | (p) = (typeof(*v) __force space *)(v); \ | 437 | (p) = (typeof(*v) __force space *)(v); \ |
| 372 | }) | 438 | }) |
| 373 | 439 | ||
| @@ -500,26 +566,6 @@ extern int rcu_my_thread_group_empty(void); | |||
| 500 | #define rcu_dereference_protected(p, c) \ | 566 | #define rcu_dereference_protected(p, c) \ |
| 501 | __rcu_dereference_protected((p), (c), __rcu) | 567 | __rcu_dereference_protected((p), (c), __rcu) |
| 502 | 568 | ||
| 503 | /** | ||
| 504 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
| 505 | * @p: The pointer to read, prior to dereferencing | ||
| 506 | * @c: The conditions under which the dereference will take place | ||
| 507 | * | ||
| 508 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
| 509 | */ | ||
| 510 | #define rcu_dereference_bh_protected(p, c) \ | ||
| 511 | __rcu_dereference_protected((p), (c), __rcu) | ||
| 512 | |||
| 513 | /** | ||
| 514 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented | ||
| 515 | * @p: The pointer to read, prior to dereferencing | ||
| 516 | * @c: The conditions under which the dereference will take place | ||
| 517 | * | ||
| 518 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
| 519 | */ | ||
| 520 | #define rcu_dereference_sched_protected(p, c) \ | ||
| 521 | __rcu_dereference_protected((p), (c), __rcu) | ||
| 522 | |||
| 523 | 569 | ||
| 524 | /** | 570 | /** |
| 525 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing | 571 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
| @@ -630,7 +676,7 @@ static inline void rcu_read_unlock(void) | |||
| 630 | */ | 676 | */ |
| 631 | static inline void rcu_read_lock_bh(void) | 677 | static inline void rcu_read_lock_bh(void) |
| 632 | { | 678 | { |
| 633 | __rcu_read_lock_bh(); | 679 | local_bh_disable(); |
| 634 | __acquire(RCU_BH); | 680 | __acquire(RCU_BH); |
| 635 | rcu_read_acquire_bh(); | 681 | rcu_read_acquire_bh(); |
| 636 | } | 682 | } |
| @@ -644,7 +690,7 @@ static inline void rcu_read_unlock_bh(void) | |||
| 644 | { | 690 | { |
| 645 | rcu_read_release_bh(); | 691 | rcu_read_release_bh(); |
| 646 | __release(RCU_BH); | 692 | __release(RCU_BH); |
| 647 | __rcu_read_unlock_bh(); | 693 | local_bh_enable(); |
| 648 | } | 694 | } |
| 649 | 695 | ||
| 650 | /** | 696 | /** |
| @@ -698,11 +744,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 698 | * any prior initialization. Returns the value assigned. | 744 | * any prior initialization. Returns the value assigned. |
| 699 | * | 745 | * |
| 700 | * Inserts memory barriers on architectures that require them | 746 | * Inserts memory barriers on architectures that require them |
| 701 | * (pretty much all of them other than x86), and also prevents | 747 | * (which is most of them), and also prevents the compiler from |
| 702 | * the compiler from reordering the code that initializes the | 748 | * reordering the code that initializes the structure after the pointer |
| 703 | * structure after the pointer assignment. More importantly, this | 749 | * assignment. More importantly, this call documents which pointers |
| 704 | * call documents which pointers will be dereferenced by RCU read-side | 750 | * will be dereferenced by RCU read-side code. |
| 705 | * code. | 751 | * |
| 752 | * In some special cases, you may use RCU_INIT_POINTER() instead | ||
| 753 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due | ||
| 754 | * to the fact that it does not constrain either the CPU or the compiler. | ||
| 755 | * That said, using RCU_INIT_POINTER() when you should have used | ||
| 756 | * rcu_assign_pointer() is a very bad thing that results in | ||
| 757 | * impossible-to-diagnose memory corruption. So please be careful. | ||
| 758 | * See the RCU_INIT_POINTER() comment header for details. | ||
| 706 | */ | 759 | */ |
| 707 | #define rcu_assign_pointer(p, v) \ | 760 | #define rcu_assign_pointer(p, v) \ |
| 708 | __rcu_assign_pointer((p), (v), __rcu) | 761 | __rcu_assign_pointer((p), (v), __rcu) |
| @@ -710,105 +763,38 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 710 | /** | 763 | /** |
| 711 | * RCU_INIT_POINTER() - initialize an RCU protected pointer | 764 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
| 712 | * | 765 | * |
| 713 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep | 766 | * Initialize an RCU-protected pointer in special cases where readers |
| 714 | * splats. | 767 | * do not need ordering constraints on the CPU or the compiler. These |
| 768 | * special cases are: | ||
| 769 | * | ||
| 770 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- | ||
| 771 | * 2. The caller has taken whatever steps are required to prevent | ||
| 772 | * RCU readers from concurrently accessing this pointer -or- | ||
| 773 | * 3. The referenced data structure has already been exposed to | ||
| 774 | * readers either at compile time or via rcu_assign_pointer() -and- | ||
| 775 | * a. You have not made -any- reader-visible changes to | ||
| 776 | * this structure since then -or- | ||
| 777 | * b. It is OK for readers accessing this structure from its | ||
| 778 | * new location to see the old state of the structure. (For | ||
| 779 | * example, the changes were to statistical counters or to | ||
| 780 | * other state where exact synchronization is not required.) | ||
| 781 | * | ||
| 782 | * Failure to follow these rules governing use of RCU_INIT_POINTER() will | ||
| 783 | * result in impossible-to-diagnose memory corruption. As in the structures | ||
| 784 | * will look OK in crash dumps, but any concurrent RCU readers might | ||
| 785 | * see pre-initialized values of the referenced data structure. So | ||
| 786 | * please be very careful how you use RCU_INIT_POINTER()!!! | ||
| 787 | * | ||
| 788 | * If you are creating an RCU-protected linked structure that is accessed | ||
| 789 | * by a single external-to-structure RCU-protected pointer, then you may | ||
| 790 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected | ||
| 791 | * pointers, but you must use rcu_assign_pointer() to initialize the | ||
| 792 | * external-to-structure pointer -after- you have completely initialized | ||
| 793 | * the reader-accessible portions of the linked structure. | ||
| 715 | */ | 794 | */ |
| 716 | #define RCU_INIT_POINTER(p, v) \ | 795 | #define RCU_INIT_POINTER(p, v) \ |
| 717 | p = (typeof(*v) __force __rcu *)(v) | 796 | p = (typeof(*v) __force __rcu *)(v) |
| 718 | 797 | ||
| 719 | /* Infrastructure to implement the synchronize_() primitives. */ | ||
| 720 | |||
| 721 | struct rcu_synchronize { | ||
| 722 | struct rcu_head head; | ||
| 723 | struct completion completion; | ||
| 724 | }; | ||
| 725 | |||
| 726 | extern void wakeme_after_rcu(struct rcu_head *head); | ||
| 727 | |||
| 728 | #ifdef CONFIG_PREEMPT_RCU | ||
| 729 | |||
| 730 | /** | ||
| 731 | * call_rcu() - Queue an RCU callback for invocation after a grace period. | ||
| 732 | * @head: structure to be used for queueing the RCU updates. | ||
| 733 | * @func: actual callback function to be invoked after the grace period | ||
| 734 | * | ||
| 735 | * The callback function will be invoked some time after a full grace | ||
| 736 | * period elapses, in other words after all pre-existing RCU read-side | ||
| 737 | * critical sections have completed. However, the callback function | ||
| 738 | * might well execute concurrently with RCU read-side critical sections | ||
| 739 | * that started after call_rcu() was invoked. RCU read-side critical | ||
| 740 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
| 741 | * and may be nested. | ||
| 742 | */ | ||
| 743 | extern void call_rcu(struct rcu_head *head, | ||
| 744 | void (*func)(struct rcu_head *head)); | ||
| 745 | |||
| 746 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
| 747 | |||
| 748 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
| 749 | #define call_rcu call_rcu_sched | ||
| 750 | |||
| 751 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
| 752 | |||
| 753 | /** | ||
| 754 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. | ||
| 755 | * @head: structure to be used for queueing the RCU updates. | ||
| 756 | * @func: actual callback function to be invoked after the grace period | ||
| 757 | * | ||
| 758 | * The callback function will be invoked some time after a full grace | ||
| 759 | * period elapses, in other words after all currently executing RCU | ||
| 760 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
| 761 | * that the read-side critical sections end on completion of a softirq | ||
| 762 | * handler. This means that read-side critical sections in process | ||
| 763 | * context must not be interrupted by softirqs. This interface is to be | ||
| 764 | * used when most of the read-side critical sections are in softirq context. | ||
| 765 | * RCU read-side critical sections are delimited by : | ||
| 766 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
| 767 | * OR | ||
| 768 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
| 769 | * These may be nested. | ||
| 770 | */ | ||
| 771 | extern void call_rcu_bh(struct rcu_head *head, | ||
| 772 | void (*func)(struct rcu_head *head)); | ||
| 773 | |||
| 774 | /* | ||
| 775 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | ||
| 776 | * by call_rcu() and rcu callback execution, and are therefore not part of the | ||
| 777 | * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. | ||
| 778 | */ | ||
| 779 | |||
| 780 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
| 781 | # define STATE_RCU_HEAD_READY 0 | ||
| 782 | # define STATE_RCU_HEAD_QUEUED 1 | ||
| 783 | |||
| 784 | extern struct debug_obj_descr rcuhead_debug_descr; | ||
| 785 | |||
| 786 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
| 787 | { | ||
| 788 | WARN_ON_ONCE((unsigned long)head & 0x3); | ||
| 789 | debug_object_activate(head, &rcuhead_debug_descr); | ||
| 790 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
| 791 | STATE_RCU_HEAD_READY, | ||
| 792 | STATE_RCU_HEAD_QUEUED); | ||
| 793 | } | ||
| 794 | |||
| 795 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
| 796 | { | ||
| 797 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
| 798 | STATE_RCU_HEAD_QUEUED, | ||
| 799 | STATE_RCU_HEAD_READY); | ||
| 800 | debug_object_deactivate(head, &rcuhead_debug_descr); | ||
| 801 | } | ||
| 802 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 803 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
| 804 | { | ||
| 805 | } | ||
| 806 | |||
| 807 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
| 808 | { | ||
| 809 | } | ||
| 810 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 811 | |||
| 812 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) | 798 | static __always_inline bool __is_kfree_rcu_offset(unsigned long offset) |
| 813 | { | 799 | { |
| 814 | return offset < 4096; | 800 | return offset < 4096; |
| @@ -827,18 +813,6 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset) | |||
| 827 | call_rcu(head, (rcu_callback)offset); | 813 | call_rcu(head, (rcu_callback)offset); |
| 828 | } | 814 | } |
| 829 | 815 | ||
| 830 | extern void kfree(const void *); | ||
| 831 | |||
| 832 | static inline void __rcu_reclaim(struct rcu_head *head) | ||
| 833 | { | ||
| 834 | unsigned long offset = (unsigned long)head->func; | ||
| 835 | |||
| 836 | if (__is_kfree_rcu_offset(offset)) | ||
| 837 | kfree((void *)head - offset); | ||
| 838 | else | ||
| 839 | head->func(head); | ||
| 840 | } | ||
| 841 | |||
| 842 | /** | 816 | /** |
| 843 | * kfree_rcu() - kfree an object after a grace period. | 817 | * kfree_rcu() - kfree an object after a grace period. |
| 844 | * @ptr: pointer to kfree | 818 | * @ptr: pointer to kfree |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 52b3e0281fd0..00b7a5e493d2 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -27,9 +27,23 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_RCU_BOOST | ||
| 30 | static inline void rcu_init(void) | 31 | static inline void rcu_init(void) |
| 31 | { | 32 | { |
| 32 | } | 33 | } |
| 34 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
| 35 | void rcu_init(void); | ||
| 36 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
| 37 | |||
| 38 | static inline void rcu_barrier_bh(void) | ||
| 39 | { | ||
| 40 | wait_rcu_gp(call_rcu_bh); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline void rcu_barrier_sched(void) | ||
| 44 | { | ||
| 45 | wait_rcu_gp(call_rcu_sched); | ||
| 46 | } | ||
| 33 | 47 | ||
| 34 | #ifdef CONFIG_TINY_RCU | 48 | #ifdef CONFIG_TINY_RCU |
| 35 | 49 | ||
| @@ -45,9 +59,13 @@ static inline void rcu_barrier(void) | |||
| 45 | 59 | ||
| 46 | #else /* #ifdef CONFIG_TINY_RCU */ | 60 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 47 | 61 | ||
| 48 | void rcu_barrier(void); | ||
| 49 | void synchronize_rcu_expedited(void); | 62 | void synchronize_rcu_expedited(void); |
| 50 | 63 | ||
| 64 | static inline void rcu_barrier(void) | ||
| 65 | { | ||
| 66 | wait_rcu_gp(call_rcu); | ||
| 67 | } | ||
| 68 | |||
| 51 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | 69 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 52 | 70 | ||
| 53 | static inline void synchronize_rcu_bh(void) | 71 | static inline void synchronize_rcu_bh(void) |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index e65d06634dd8..67458468f1a8 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -67,6 +67,8 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | extern void rcu_barrier(void); | 69 | extern void rcu_barrier(void); |
| 70 | extern void rcu_barrier_bh(void); | ||
| 71 | extern void rcu_barrier_sched(void); | ||
| 70 | 72 | ||
| 71 | extern unsigned long rcutorture_testseq; | 73 | extern unsigned long rcutorture_testseq; |
| 72 | extern unsigned long rcutorture_vernum; | 74 | extern unsigned long rcutorture_vernum; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1be699dd32a5..ede8a6585e38 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -270,7 +270,6 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
| 270 | 270 | ||
| 271 | extern int runqueue_is_locked(int cpu); | 271 | extern int runqueue_is_locked(int cpu); |
| 272 | 272 | ||
| 273 | extern cpumask_var_t nohz_cpu_mask; | ||
| 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 273 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
| 275 | extern void select_nohz_load_balancer(int stop_tick); | 274 | extern void select_nohz_load_balancer(int stop_tick); |
| 276 | extern int get_nohz_timer_target(void); | 275 | extern int get_nohz_timer_target(void); |
| @@ -1260,9 +1259,6 @@ struct task_struct { | |||
| 1260 | #ifdef CONFIG_PREEMPT_RCU | 1259 | #ifdef CONFIG_PREEMPT_RCU |
| 1261 | int rcu_read_lock_nesting; | 1260 | int rcu_read_lock_nesting; |
| 1262 | char rcu_read_unlock_special; | 1261 | char rcu_read_unlock_special; |
| 1263 | #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) | ||
| 1264 | int rcu_boosted; | ||
| 1265 | #endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */ | ||
| 1266 | struct list_head rcu_node_entry; | 1262 | struct list_head rcu_node_entry; |
| 1267 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1263 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
| 1268 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1264 | #ifdef CONFIG_TREE_PREEMPT_RCU |
diff --git a/include/linux/types.h b/include/linux/types.h index 176da8c1fbb1..57a97234bec1 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -238,6 +238,16 @@ struct ustat { | |||
| 238 | char f_fpack[6]; | 238 | char f_fpack[6]; |
| 239 | }; | 239 | }; |
| 240 | 240 | ||
| 241 | /** | ||
| 242 | * struct rcu_head - callback structure for use with RCU | ||
| 243 | * @next: next update requests in a list | ||
| 244 | * @func: actual update function to call after the grace period. | ||
| 245 | */ | ||
| 246 | struct rcu_head { | ||
| 247 | struct rcu_head *next; | ||
| 248 | void (*func)(struct rcu_head *head); | ||
| 249 | }; | ||
| 250 | |||
| 241 | #endif /* __KERNEL__ */ | 251 | #endif /* __KERNEL__ */ |
| 242 | #endif /* __ASSEMBLY__ */ | 252 | #endif /* __ASSEMBLY__ */ |
| 243 | #endif /* _LINUX_TYPES_H */ | 253 | #endif /* _LINUX_TYPES_H */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h new file mode 100644 index 000000000000..669fbd62ec25 --- /dev/null +++ b/include/trace/events/rcu.h | |||
| @@ -0,0 +1,459 @@ | |||
| 1 | #undef TRACE_SYSTEM | ||
| 2 | #define TRACE_SYSTEM rcu | ||
| 3 | |||
| 4 | #if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 5 | #define _TRACE_RCU_H | ||
| 6 | |||
| 7 | #include <linux/tracepoint.h> | ||
| 8 | |||
| 9 | /* | ||
| 10 | * Tracepoint for start/end markers used for utilization calculations. | ||
| 11 | * By convention, the string is of the following forms: | ||
| 12 | * | ||
| 13 | * "Start <activity>" -- Mark the start of the specified activity, | ||
| 14 | * such as "context switch". Nesting is permitted. | ||
| 15 | * "End <activity>" -- Mark the end of the specified activity. | ||
| 16 | * | ||
| 17 | * An "@" character within "<activity>" is a comment character: Data | ||
| 18 | * reduction scripts will ignore the "@" and the remainder of the line. | ||
| 19 | */ | ||
| 20 | TRACE_EVENT(rcu_utilization, | ||
| 21 | |||
| 22 | TP_PROTO(char *s), | ||
| 23 | |||
| 24 | TP_ARGS(s), | ||
| 25 | |||
| 26 | TP_STRUCT__entry( | ||
| 27 | __field(char *, s) | ||
| 28 | ), | ||
| 29 | |||
| 30 | TP_fast_assign( | ||
| 31 | __entry->s = s; | ||
| 32 | ), | ||
| 33 | |||
| 34 | TP_printk("%s", __entry->s) | ||
| 35 | ); | ||
| 36 | |||
| 37 | #ifdef CONFIG_RCU_TRACE | ||
| 38 | |||
| 39 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Tracepoint for grace-period events: starting and ending a grace | ||
| 43 | * period ("start" and "end", respectively), a CPU noting the start | ||
| 44 | * of a new grace period or the end of an old grace period ("cpustart" | ||
| 45 | * and "cpuend", respectively), a CPU passing through a quiescent | ||
| 46 | * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" | ||
| 47 | * and "cpuofl", respectively), and a CPU being kicked for being too | ||
| 48 | * long in dyntick-idle mode ("kick"). | ||
| 49 | */ | ||
| 50 | TRACE_EVENT(rcu_grace_period, | ||
| 51 | |||
| 52 | TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent), | ||
| 53 | |||
| 54 | TP_ARGS(rcuname, gpnum, gpevent), | ||
| 55 | |||
| 56 | TP_STRUCT__entry( | ||
| 57 | __field(char *, rcuname) | ||
| 58 | __field(unsigned long, gpnum) | ||
| 59 | __field(char *, gpevent) | ||
| 60 | ), | ||
| 61 | |||
| 62 | TP_fast_assign( | ||
| 63 | __entry->rcuname = rcuname; | ||
| 64 | __entry->gpnum = gpnum; | ||
| 65 | __entry->gpevent = gpevent; | ||
| 66 | ), | ||
| 67 | |||
| 68 | TP_printk("%s %lu %s", | ||
| 69 | __entry->rcuname, __entry->gpnum, __entry->gpevent) | ||
| 70 | ); | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Tracepoint for grace-period-initialization events. These are | ||
| 74 | * distinguished by the type of RCU, the new grace-period number, the | ||
| 75 | * rcu_node structure level, the starting and ending CPU covered by the | ||
| 76 | * rcu_node structure, and the mask of CPUs that will be waited for. | ||
| 77 | * All but the type of RCU are extracted from the rcu_node structure. | ||
| 78 | */ | ||
| 79 | TRACE_EVENT(rcu_grace_period_init, | ||
| 80 | |||
| 81 | TP_PROTO(char *rcuname, unsigned long gpnum, u8 level, | ||
| 82 | int grplo, int grphi, unsigned long qsmask), | ||
| 83 | |||
| 84 | TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), | ||
| 85 | |||
| 86 | TP_STRUCT__entry( | ||
| 87 | __field(char *, rcuname) | ||
| 88 | __field(unsigned long, gpnum) | ||
| 89 | __field(u8, level) | ||
| 90 | __field(int, grplo) | ||
| 91 | __field(int, grphi) | ||
| 92 | __field(unsigned long, qsmask) | ||
| 93 | ), | ||
| 94 | |||
| 95 | TP_fast_assign( | ||
| 96 | __entry->rcuname = rcuname; | ||
| 97 | __entry->gpnum = gpnum; | ||
| 98 | __entry->level = level; | ||
| 99 | __entry->grplo = grplo; | ||
| 100 | __entry->grphi = grphi; | ||
| 101 | __entry->qsmask = qsmask; | ||
| 102 | ), | ||
| 103 | |||
| 104 | TP_printk("%s %lu %u %d %d %lx", | ||
| 105 | __entry->rcuname, __entry->gpnum, __entry->level, | ||
| 106 | __entry->grplo, __entry->grphi, __entry->qsmask) | ||
| 107 | ); | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Tracepoint for tasks blocking within preemptible-RCU read-side | ||
| 111 | * critical sections. Track the type of RCU (which one day might | ||
| 112 | * include SRCU), the grace-period number that the task is blocking | ||
| 113 | * (the current or the next), and the task's PID. | ||
| 114 | */ | ||
| 115 | TRACE_EVENT(rcu_preempt_task, | ||
| 116 | |||
| 117 | TP_PROTO(char *rcuname, int pid, unsigned long gpnum), | ||
| 118 | |||
| 119 | TP_ARGS(rcuname, pid, gpnum), | ||
| 120 | |||
| 121 | TP_STRUCT__entry( | ||
| 122 | __field(char *, rcuname) | ||
| 123 | __field(unsigned long, gpnum) | ||
| 124 | __field(int, pid) | ||
| 125 | ), | ||
| 126 | |||
| 127 | TP_fast_assign( | ||
| 128 | __entry->rcuname = rcuname; | ||
| 129 | __entry->gpnum = gpnum; | ||
| 130 | __entry->pid = pid; | ||
| 131 | ), | ||
| 132 | |||
| 133 | TP_printk("%s %lu %d", | ||
| 134 | __entry->rcuname, __entry->gpnum, __entry->pid) | ||
| 135 | ); | ||
| 136 | |||
| 137 | /* | ||
| 138 | * Tracepoint for tasks that blocked within a given preemptible-RCU | ||
| 139 | * read-side critical section exiting that critical section. Track the | ||
| 140 | * type of RCU (which one day might include SRCU) and the task's PID. | ||
| 141 | */ | ||
| 142 | TRACE_EVENT(rcu_unlock_preempted_task, | ||
| 143 | |||
| 144 | TP_PROTO(char *rcuname, unsigned long gpnum, int pid), | ||
| 145 | |||
| 146 | TP_ARGS(rcuname, gpnum, pid), | ||
| 147 | |||
| 148 | TP_STRUCT__entry( | ||
| 149 | __field(char *, rcuname) | ||
| 150 | __field(unsigned long, gpnum) | ||
| 151 | __field(int, pid) | ||
| 152 | ), | ||
| 153 | |||
| 154 | TP_fast_assign( | ||
| 155 | __entry->rcuname = rcuname; | ||
| 156 | __entry->gpnum = gpnum; | ||
| 157 | __entry->pid = pid; | ||
| 158 | ), | ||
| 159 | |||
| 160 | TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) | ||
| 161 | ); | ||
| 162 | |||
| 163 | /* | ||
| 164 | * Tracepoint for quiescent-state-reporting events. These are | ||
| 165 | * distinguished by the type of RCU, the grace-period number, the | ||
| 166 | * mask of quiescent lower-level entities, the rcu_node structure level, | ||
| 167 | * the starting and ending CPU covered by the rcu_node structure, and | ||
| 168 | * whether there are any blocked tasks blocking the current grace period. | ||
| 169 | * All but the type of RCU are extracted from the rcu_node structure. | ||
| 170 | */ | ||
| 171 | TRACE_EVENT(rcu_quiescent_state_report, | ||
| 172 | |||
| 173 | TP_PROTO(char *rcuname, unsigned long gpnum, | ||
| 174 | unsigned long mask, unsigned long qsmask, | ||
| 175 | u8 level, int grplo, int grphi, int gp_tasks), | ||
| 176 | |||
| 177 | TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), | ||
| 178 | |||
| 179 | TP_STRUCT__entry( | ||
| 180 | __field(char *, rcuname) | ||
| 181 | __field(unsigned long, gpnum) | ||
| 182 | __field(unsigned long, mask) | ||
| 183 | __field(unsigned long, qsmask) | ||
| 184 | __field(u8, level) | ||
| 185 | __field(int, grplo) | ||
| 186 | __field(int, grphi) | ||
| 187 | __field(u8, gp_tasks) | ||
| 188 | ), | ||
| 189 | |||
| 190 | TP_fast_assign( | ||
| 191 | __entry->rcuname = rcuname; | ||
| 192 | __entry->gpnum = gpnum; | ||
| 193 | __entry->mask = mask; | ||
| 194 | __entry->qsmask = qsmask; | ||
| 195 | __entry->level = level; | ||
| 196 | __entry->grplo = grplo; | ||
| 197 | __entry->grphi = grphi; | ||
| 198 | __entry->gp_tasks = gp_tasks; | ||
| 199 | ), | ||
| 200 | |||
| 201 | TP_printk("%s %lu %lx>%lx %u %d %d %u", | ||
| 202 | __entry->rcuname, __entry->gpnum, | ||
| 203 | __entry->mask, __entry->qsmask, __entry->level, | ||
| 204 | __entry->grplo, __entry->grphi, __entry->gp_tasks) | ||
| 205 | ); | ||
| 206 | |||
| 207 | /* | ||
| 208 | * Tracepoint for quiescent states detected by force_quiescent_state(). | ||
| 209 | * These trace events include the type of RCU, the grace-period number | ||
| 210 | * that was blocked by the CPU, the CPU itself, and the type of quiescent | ||
| 211 | * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, | ||
| 212 | * or "kick" when kicking a CPU that has been in dyntick-idle mode for | ||
| 213 | * too long. | ||
| 214 | */ | ||
| 215 | TRACE_EVENT(rcu_fqs, | ||
| 216 | |||
| 217 | TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent), | ||
| 218 | |||
| 219 | TP_ARGS(rcuname, gpnum, cpu, qsevent), | ||
| 220 | |||
| 221 | TP_STRUCT__entry( | ||
| 222 | __field(char *, rcuname) | ||
| 223 | __field(unsigned long, gpnum) | ||
| 224 | __field(int, cpu) | ||
| 225 | __field(char *, qsevent) | ||
| 226 | ), | ||
| 227 | |||
| 228 | TP_fast_assign( | ||
| 229 | __entry->rcuname = rcuname; | ||
| 230 | __entry->gpnum = gpnum; | ||
| 231 | __entry->cpu = cpu; | ||
| 232 | __entry->qsevent = qsevent; | ||
| 233 | ), | ||
| 234 | |||
| 235 | TP_printk("%s %lu %d %s", | ||
| 236 | __entry->rcuname, __entry->gpnum, | ||
| 237 | __entry->cpu, __entry->qsevent) | ||
| 238 | ); | ||
| 239 | |||
| 240 | #endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | ||
| 241 | |||
| 242 | /* | ||
| 243 | * Tracepoint for dyntick-idle entry/exit events. These take a string | ||
| 244 | * as argument: "Start" for entering dyntick-idle mode and "End" for | ||
| 245 | * leaving it. | ||
| 246 | */ | ||
| 247 | TRACE_EVENT(rcu_dyntick, | ||
| 248 | |||
| 249 | TP_PROTO(char *polarity), | ||
| 250 | |||
| 251 | TP_ARGS(polarity), | ||
| 252 | |||
| 253 | TP_STRUCT__entry( | ||
| 254 | __field(char *, polarity) | ||
| 255 | ), | ||
| 256 | |||
| 257 | TP_fast_assign( | ||
| 258 | __entry->polarity = polarity; | ||
| 259 | ), | ||
| 260 | |||
| 261 | TP_printk("%s", __entry->polarity) | ||
| 262 | ); | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Tracepoint for the registration of a single RCU callback function. | ||
| 266 | * The first argument is the type of RCU, the second argument is | ||
| 267 | * a pointer to the RCU callback itself, and the third element is the | ||
| 268 | * new RCU callback queue length for the current CPU. | ||
| 269 | */ | ||
| 270 | TRACE_EVENT(rcu_callback, | ||
| 271 | |||
| 272 | TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen), | ||
| 273 | |||
| 274 | TP_ARGS(rcuname, rhp, qlen), | ||
| 275 | |||
| 276 | TP_STRUCT__entry( | ||
| 277 | __field(char *, rcuname) | ||
| 278 | __field(void *, rhp) | ||
| 279 | __field(void *, func) | ||
| 280 | __field(long, qlen) | ||
| 281 | ), | ||
| 282 | |||
| 283 | TP_fast_assign( | ||
| 284 | __entry->rcuname = rcuname; | ||
| 285 | __entry->rhp = rhp; | ||
| 286 | __entry->func = rhp->func; | ||
| 287 | __entry->qlen = qlen; | ||
| 288 | ), | ||
| 289 | |||
| 290 | TP_printk("%s rhp=%p func=%pf %ld", | ||
| 291 | __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen) | ||
| 292 | ); | ||
| 293 | |||
| 294 | /* | ||
| 295 | * Tracepoint for the registration of a single RCU callback of the special | ||
| 296 | * kfree() form. The first argument is the RCU type, the second argument | ||
| 297 | * is a pointer to the RCU callback, the third argument is the offset | ||
| 298 | * of the callback within the enclosing RCU-protected data structure, | ||
| 299 | * and the fourth argument is the new RCU callback queue length for the | ||
| 300 | * current CPU. | ||
| 301 | */ | ||
| 302 | TRACE_EVENT(rcu_kfree_callback, | ||
| 303 | |||
| 304 | TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, | ||
| 305 | long qlen), | ||
| 306 | |||
| 307 | TP_ARGS(rcuname, rhp, offset, qlen), | ||
| 308 | |||
| 309 | TP_STRUCT__entry( | ||
| 310 | __field(char *, rcuname) | ||
| 311 | __field(void *, rhp) | ||
| 312 | __field(unsigned long, offset) | ||
| 313 | __field(long, qlen) | ||
| 314 | ), | ||
| 315 | |||
| 316 | TP_fast_assign( | ||
| 317 | __entry->rcuname = rcuname; | ||
| 318 | __entry->rhp = rhp; | ||
| 319 | __entry->offset = offset; | ||
| 320 | __entry->qlen = qlen; | ||
| 321 | ), | ||
| 322 | |||
| 323 | TP_printk("%s rhp=%p func=%ld %ld", | ||
| 324 | __entry->rcuname, __entry->rhp, __entry->offset, | ||
| 325 | __entry->qlen) | ||
| 326 | ); | ||
| 327 | |||
| 328 | /* | ||
| 329 | * Tracepoint for marking the beginning rcu_do_batch, performed to start | ||
| 330 | * RCU callback invocation. The first argument is the RCU flavor, | ||
| 331 | * the second is the total number of callbacks (including those that | ||
| 332 | * are not yet ready to be invoked), and the third argument is the | ||
| 333 | * current RCU-callback batch limit. | ||
| 334 | */ | ||
| 335 | TRACE_EVENT(rcu_batch_start, | ||
| 336 | |||
| 337 | TP_PROTO(char *rcuname, long qlen, int blimit), | ||
| 338 | |||
| 339 | TP_ARGS(rcuname, qlen, blimit), | ||
| 340 | |||
| 341 | TP_STRUCT__entry( | ||
| 342 | __field(char *, rcuname) | ||
| 343 | __field(long, qlen) | ||
| 344 | __field(int, blimit) | ||
| 345 | ), | ||
| 346 | |||
| 347 | TP_fast_assign( | ||
| 348 | __entry->rcuname = rcuname; | ||
| 349 | __entry->qlen = qlen; | ||
| 350 | __entry->blimit = blimit; | ||
| 351 | ), | ||
| 352 | |||
| 353 | TP_printk("%s CBs=%ld bl=%d", | ||
| 354 | __entry->rcuname, __entry->qlen, __entry->blimit) | ||
| 355 | ); | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Tracepoint for the invocation of a single RCU callback function. | ||
| 359 | * The first argument is the type of RCU, and the second argument is | ||
| 360 | * a pointer to the RCU callback itself. | ||
| 361 | */ | ||
| 362 | TRACE_EVENT(rcu_invoke_callback, | ||
| 363 | |||
| 364 | TP_PROTO(char *rcuname, struct rcu_head *rhp), | ||
| 365 | |||
| 366 | TP_ARGS(rcuname, rhp), | ||
| 367 | |||
| 368 | TP_STRUCT__entry( | ||
| 369 | __field(char *, rcuname) | ||
| 370 | __field(void *, rhp) | ||
| 371 | __field(void *, func) | ||
| 372 | ), | ||
| 373 | |||
| 374 | TP_fast_assign( | ||
| 375 | __entry->rcuname = rcuname; | ||
| 376 | __entry->rhp = rhp; | ||
| 377 | __entry->func = rhp->func; | ||
| 378 | ), | ||
| 379 | |||
| 380 | TP_printk("%s rhp=%p func=%pf", | ||
| 381 | __entry->rcuname, __entry->rhp, __entry->func) | ||
| 382 | ); | ||
| 383 | |||
| 384 | /* | ||
| 385 | * Tracepoint for the invocation of a single RCU callback of the special | ||
| 386 | * kfree() form. The first argument is the RCU flavor, the second | ||
| 387 | * argument is a pointer to the RCU callback, and the third argument | ||
| 388 | * is the offset of the callback within the enclosing RCU-protected | ||
| 389 | * data structure. | ||
| 390 | */ | ||
| 391 | TRACE_EVENT(rcu_invoke_kfree_callback, | ||
| 392 | |||
| 393 | TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset), | ||
| 394 | |||
| 395 | TP_ARGS(rcuname, rhp, offset), | ||
| 396 | |||
| 397 | TP_STRUCT__entry( | ||
| 398 | __field(char *, rcuname) | ||
| 399 | __field(void *, rhp) | ||
| 400 | __field(unsigned long, offset) | ||
| 401 | ), | ||
| 402 | |||
| 403 | TP_fast_assign( | ||
| 404 | __entry->rcuname = rcuname; | ||
| 405 | __entry->rhp = rhp; | ||
| 406 | __entry->offset = offset; | ||
| 407 | ), | ||
| 408 | |||
| 409 | TP_printk("%s rhp=%p func=%ld", | ||
| 410 | __entry->rcuname, __entry->rhp, __entry->offset) | ||
| 411 | ); | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Tracepoint for exiting rcu_do_batch after RCU callbacks have been | ||
| 415 | * invoked. The first argument is the name of the RCU flavor and | ||
| 416 | * the second argument is number of callbacks actually invoked. | ||
| 417 | */ | ||
| 418 | TRACE_EVENT(rcu_batch_end, | ||
| 419 | |||
| 420 | TP_PROTO(char *rcuname, int callbacks_invoked), | ||
| 421 | |||
| 422 | TP_ARGS(rcuname, callbacks_invoked), | ||
| 423 | |||
| 424 | TP_STRUCT__entry( | ||
| 425 | __field(char *, rcuname) | ||
| 426 | __field(int, callbacks_invoked) | ||
| 427 | ), | ||
| 428 | |||
| 429 | TP_fast_assign( | ||
| 430 | __entry->rcuname = rcuname; | ||
| 431 | __entry->callbacks_invoked = callbacks_invoked; | ||
| 432 | ), | ||
| 433 | |||
| 434 | TP_printk("%s CBs-invoked=%d", | ||
| 435 | __entry->rcuname, __entry->callbacks_invoked) | ||
| 436 | ); | ||
| 437 | |||
| 438 | #else /* #ifdef CONFIG_RCU_TRACE */ | ||
| 439 | |||
| 440 | #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) | ||
| 441 | #define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0) | ||
| 442 | #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) | ||
| 443 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) | ||
| 444 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) | ||
| 445 | #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) | ||
| 446 | #define trace_rcu_dyntick(polarity) do { } while (0) | ||
| 447 | #define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) | ||
| 448 | #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) | ||
| 449 | #define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) | ||
| 450 | #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) | ||
| 451 | #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) | ||
| 452 | #define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0) | ||
| 453 | |||
| 454 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | ||
| 455 | |||
| 456 | #endif /* _TRACE_RCU_H */ | ||
| 457 | |||
| 458 | /* This part must be outside protection */ | ||
| 459 | #include <trace/define_trace.h> | ||
