diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 20:00:04 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-11 20:00:04 -0500 |
| commit | 70fdcb83db15c85a0495b07dc55d9347a4c2efd9 (patch) | |
| tree | 5a7f073e87e14aacfc77a21a3bf37bc470143871 /include | |
| parent | edae583a6d4d1ad2eb73981787790993fef1bbad (diff) | |
| parent | 0e95c69bde1a5bf22acd53b356fe10d7bec6e2be (diff) | |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar:
"The main RCU changes in this cycle are:
- Idle entry/exit changes, to throttle callback execution and other
refinements to speed up kbuild, primarily to address performance
issues located by Tibor Billes.
- Grace-period related changes, primarily to aid in debugging,
inspired by an -rt debugging session.
- Code reorganization moving RCU's source files into its own
kernel/rcu/ directory.
- RCU documentation updates
- Miscellaneous fixes.
Note, the following commit:
5c889690aa08 mm: Place preemption point in do_mlockall() loop
is identical to the commit already in your tree via email:
22356f447ceb mm: Place preemption point in do_mlockall() loop
[ Your version of the changelog nicely demonstrates it how kernel oops
messages should be trimmed properly :-/ ]"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
rcu: Move RCU-related source code to kernel/rcu directory
rcu: Fix occurrence of "the the" in checklist.txt
kthread: Add pointer to vmstat-avoidance patch
rcu: Update stall-warning documentation
rcu: Consistent rcu_is_watching() naming
rcu: Change EXPORT_SYMBOL() to EXPORT_SYMBOL_GPL()
rcu: Is it safe to enter an RCU read-side critical section?
rcu: Throttle invoke_rcu_core() invocations due to non-lazy callbacks
rcu: Throttle rcu_try_advance_all_cbs() execution
rcu: Remove redundant code from rcu_cleanup_after_idle()
rcu: Fix CONFIG_RCU_NOCB_CPU_ALL panic on machines with sparse CPU mask
rcu: Avoid sparse warnings in rcu_nocb_wake trace event
rcu: Track rcu_nocb_kthread()'s sleeping and awakening
rcu: Distinguish between NOCB and non-NOCB rcu_callback trace events
rcu: Add tracing for rcuo no-CBs CPU wakeup handshake
rcu: Add tracing of normal (non-NOCB) grace-period requests
rcu: Add tracing to rcu_gp_kthread()
rcu: Flag lockless access to ->gp_flags with ACCESS_ONCE()
rcu: Prevent spurious-wakeup DoS attack on rcu_gp_kthread()
rcu: Improve grace-period start logic
...
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/rculist.h | 23 | ||||
| -rw-r--r-- | include/linux/rcupdate.h | 24 | ||||
| -rw-r--r-- | include/linux/rcutiny.h | 17 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 2 | ||||
| -rw-r--r-- | include/trace/events/rcu.h | 80 |
5 files changed, 118 insertions, 28 deletions
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4106721c4e5e..45a0a9e81478 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -19,6 +19,21 @@ | |||
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers | ||
| 23 | * @list: list to be initialized | ||
| 24 | * | ||
| 25 | * You should instead use INIT_LIST_HEAD() for normal initialization and | ||
| 26 | * cleanup tasks, when readers have no access to the list being initialized. | ||
| 27 | * However, if the list being initialized is visible to readers, you | ||
| 28 | * need to keep the compiler from being too mischievous. | ||
| 29 | */ | ||
| 30 | static inline void INIT_LIST_HEAD_RCU(struct list_head *list) | ||
| 31 | { | ||
| 32 | ACCESS_ONCE(list->next) = list; | ||
| 33 | ACCESS_ONCE(list->prev) = list; | ||
| 34 | } | ||
| 35 | |||
| 36 | /* | ||
| 22 | * return the ->next pointer of a list_head in an rcu safe | 37 | * return the ->next pointer of a list_head in an rcu safe |
| 23 | * way, we must not access it directly | 38 | * way, we must not access it directly |
| 24 | */ | 39 | */ |
| @@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 191 | if (list_empty(list)) | 206 | if (list_empty(list)) |
| 192 | return; | 207 | return; |
| 193 | 208 | ||
| 194 | /* "first" and "last" tracking list, so initialize it. */ | 209 | /* |
| 210 | * "first" and "last" tracking list, so initialize it. RCU readers | ||
| 211 | * have access to this list, so we must use INIT_LIST_HEAD_RCU() | ||
| 212 | * instead of INIT_LIST_HEAD(). | ||
| 213 | */ | ||
| 195 | 214 | ||
| 196 | INIT_LIST_HEAD(list); | 215 | INIT_LIST_HEAD_RCU(list); |
| 197 | 216 | ||
| 198 | /* | 217 | /* |
| 199 | * At this point, the list body still points to the source list. | 218 | * At this point, the list body still points to the source list. |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f1f1bc39346b..39cbb889e20d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, | |||
| 261 | rcu_irq_exit(); \ | 261 | rcu_irq_exit(); \ |
| 262 | } while (0) | 262 | } while (0) |
| 263 | 263 | ||
| 264 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) | ||
| 265 | extern bool __rcu_is_watching(void); | ||
| 266 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | ||
| 267 | |||
| 264 | /* | 268 | /* |
| 265 | * Infrastructure to implement the synchronize_() primitives in | 269 | * Infrastructure to implement the synchronize_() primitives in |
| 266 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. | 270 | * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
| @@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | |||
| 297 | } | 301 | } |
| 298 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 302 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 299 | 303 | ||
| 300 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) | ||
| 301 | extern int rcu_is_cpu_idle(void); | ||
| 302 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ | ||
| 303 | |||
| 304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) | 304 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
| 305 | bool rcu_lockdep_current_cpu_online(void); | 305 | bool rcu_lockdep_current_cpu_online(void); |
| 306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ | 306 | #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
| @@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void) | |||
| 351 | { | 351 | { |
| 352 | if (!debug_lockdep_rcu_enabled()) | 352 | if (!debug_lockdep_rcu_enabled()) |
| 353 | return 1; | 353 | return 1; |
| 354 | if (rcu_is_cpu_idle()) | 354 | if (!rcu_is_watching()) |
| 355 | return 0; | 355 | return 0; |
| 356 | if (!rcu_lockdep_current_cpu_online()) | 356 | if (!rcu_lockdep_current_cpu_online()) |
| 357 | return 0; | 357 | return 0; |
| @@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 402 | 402 | ||
| 403 | if (!debug_lockdep_rcu_enabled()) | 403 | if (!debug_lockdep_rcu_enabled()) |
| 404 | return 1; | 404 | return 1; |
| 405 | if (rcu_is_cpu_idle()) | 405 | if (!rcu_is_watching()) |
| 406 | return 0; | 406 | return 0; |
| 407 | if (!rcu_lockdep_current_cpu_online()) | 407 | if (!rcu_lockdep_current_cpu_online()) |
| 408 | return 0; | 408 | return 0; |
| @@ -771,7 +771,7 @@ static inline void rcu_read_lock(void) | |||
| 771 | __rcu_read_lock(); | 771 | __rcu_read_lock(); |
| 772 | __acquire(RCU); | 772 | __acquire(RCU); |
| 773 | rcu_lock_acquire(&rcu_lock_map); | 773 | rcu_lock_acquire(&rcu_lock_map); |
| 774 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 774 | rcu_lockdep_assert(rcu_is_watching(), |
| 775 | "rcu_read_lock() used illegally while idle"); | 775 | "rcu_read_lock() used illegally while idle"); |
| 776 | } | 776 | } |
| 777 | 777 | ||
| @@ -792,7 +792,7 @@ static inline void rcu_read_lock(void) | |||
| 792 | */ | 792 | */ |
| 793 | static inline void rcu_read_unlock(void) | 793 | static inline void rcu_read_unlock(void) |
| 794 | { | 794 | { |
| 795 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 795 | rcu_lockdep_assert(rcu_is_watching(), |
| 796 | "rcu_read_unlock() used illegally while idle"); | 796 | "rcu_read_unlock() used illegally while idle"); |
| 797 | rcu_lock_release(&rcu_lock_map); | 797 | rcu_lock_release(&rcu_lock_map); |
| 798 | __release(RCU); | 798 | __release(RCU); |
| @@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void) | |||
| 821 | local_bh_disable(); | 821 | local_bh_disable(); |
| 822 | __acquire(RCU_BH); | 822 | __acquire(RCU_BH); |
| 823 | rcu_lock_acquire(&rcu_bh_lock_map); | 823 | rcu_lock_acquire(&rcu_bh_lock_map); |
| 824 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 824 | rcu_lockdep_assert(rcu_is_watching(), |
| 825 | "rcu_read_lock_bh() used illegally while idle"); | 825 | "rcu_read_lock_bh() used illegally while idle"); |
| 826 | } | 826 | } |
| 827 | 827 | ||
| @@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void) | |||
| 832 | */ | 832 | */ |
| 833 | static inline void rcu_read_unlock_bh(void) | 833 | static inline void rcu_read_unlock_bh(void) |
| 834 | { | 834 | { |
| 835 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 835 | rcu_lockdep_assert(rcu_is_watching(), |
| 836 | "rcu_read_unlock_bh() used illegally while idle"); | 836 | "rcu_read_unlock_bh() used illegally while idle"); |
| 837 | rcu_lock_release(&rcu_bh_lock_map); | 837 | rcu_lock_release(&rcu_bh_lock_map); |
| 838 | __release(RCU_BH); | 838 | __release(RCU_BH); |
| @@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void) | |||
| 857 | preempt_disable(); | 857 | preempt_disable(); |
| 858 | __acquire(RCU_SCHED); | 858 | __acquire(RCU_SCHED); |
| 859 | rcu_lock_acquire(&rcu_sched_lock_map); | 859 | rcu_lock_acquire(&rcu_sched_lock_map); |
| 860 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 860 | rcu_lockdep_assert(rcu_is_watching(), |
| 861 | "rcu_read_lock_sched() used illegally while idle"); | 861 | "rcu_read_lock_sched() used illegally while idle"); |
| 862 | } | 862 | } |
| 863 | 863 | ||
| @@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) | |||
| 875 | */ | 875 | */ |
| 876 | static inline void rcu_read_unlock_sched(void) | 876 | static inline void rcu_read_unlock_sched(void) |
| 877 | { | 877 | { |
| 878 | rcu_lockdep_assert(!rcu_is_cpu_idle(), | 878 | rcu_lockdep_assert(rcu_is_watching(), |
| 879 | "rcu_read_unlock_sched() used illegally while idle"); | 879 | "rcu_read_unlock_sched() used illegally while idle"); |
| 880 | rcu_lock_release(&rcu_sched_lock_map); | 880 | rcu_lock_release(&rcu_sched_lock_map); |
| 881 | __release(RCU_SCHED); | 881 | __release(RCU_SCHED); |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e31005ee339e..09ebcbe9fd78 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void) | |||
| 132 | } | 132 | } |
| 133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 133 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 134 | 134 | ||
| 135 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) | ||
| 136 | |||
| 137 | static inline bool rcu_is_watching(void) | ||
| 138 | { | ||
| 139 | return __rcu_is_watching(); | ||
| 140 | } | ||
| 141 | |||
| 142 | #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
| 143 | |||
| 144 | static inline bool rcu_is_watching(void) | ||
| 145 | { | ||
| 146 | return true; | ||
| 147 | } | ||
| 148 | |||
| 149 | |||
| 150 | #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ | ||
| 151 | |||
| 135 | #endif /* __LINUX_RCUTINY_H */ | 152 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 226169d1bd2b..4b9c81548742 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -90,4 +90,6 @@ extern void exit_rcu(void); | |||
| 90 | extern void rcu_scheduler_starting(void); | 90 | extern void rcu_scheduler_starting(void); |
| 91 | extern int rcu_scheduler_active __read_mostly; | 91 | extern int rcu_scheduler_active __read_mostly; |
| 92 | 92 | ||
| 93 | extern bool rcu_is_watching(void); | ||
| 94 | |||
| 93 | #endif /* __LINUX_RCUTREE_H */ | 95 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index ee2376cfaab3..aca382266411 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h | |||
| @@ -39,15 +39,26 @@ TRACE_EVENT(rcu_utilization, | |||
| 39 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 39 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 40 | 40 | ||
| 41 | /* | 41 | /* |
| 42 | * Tracepoint for grace-period events: starting and ending a grace | 42 | * Tracepoint for grace-period events. Takes a string identifying the |
| 43 | * period ("start" and "end", respectively), a CPU noting the start | 43 | * RCU flavor, the grace-period number, and a string identifying the |
| 44 | * of a new grace period or the end of an old grace period ("cpustart" | 44 | * grace-period-related event as follows: |
| 45 | * and "cpuend", respectively), a CPU passing through a quiescent | 45 | * |
| 46 | * state ("cpuqs"), a CPU coming online or going offline ("cpuonl" | 46 | * "AccReadyCB": CPU acclerates new callbacks to RCU_NEXT_READY_TAIL. |
| 47 | * and "cpuofl", respectively), a CPU being kicked for being too | 47 | * "AccWaitCB": CPU accelerates new callbacks to RCU_WAIT_TAIL. |
| 48 | * long in dyntick-idle mode ("kick"), a CPU accelerating its new | 48 | * "newreq": Request a new grace period. |
| 49 | * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU | 49 | * "start": Start a grace period. |
| 50 | * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB"). | 50 | * "cpustart": CPU first notices a grace-period start. |
| 51 | * "cpuqs": CPU passes through a quiescent state. | ||
| 52 | * "cpuonl": CPU comes online. | ||
| 53 | * "cpuofl": CPU goes offline. | ||
| 54 | * "reqwait": GP kthread sleeps waiting for grace-period request. | ||
| 55 | * "reqwaitsig": GP kthread awakened by signal from reqwait state. | ||
| 56 | * "fqswait": GP kthread waiting until time to force quiescent states. | ||
| 57 | * "fqsstart": GP kthread starts forcing quiescent states. | ||
| 58 | * "fqsend": GP kthread done forcing quiescent states. | ||
| 59 | * "fqswaitsig": GP kthread awakened by signal from fqswait state. | ||
| 60 | * "end": End a grace period. | ||
| 61 | * "cpuend": CPU first notices a grace-period end. | ||
| 51 | */ | 62 | */ |
| 52 | TRACE_EVENT(rcu_grace_period, | 63 | TRACE_EVENT(rcu_grace_period, |
| 53 | 64 | ||
| @@ -161,6 +172,46 @@ TRACE_EVENT(rcu_grace_period_init, | |||
| 161 | ); | 172 | ); |
| 162 | 173 | ||
| 163 | /* | 174 | /* |
| 175 | * Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended | ||
| 176 | * to assist debugging of these handoffs. | ||
| 177 | * | ||
| 178 | * The first argument is the name of the RCU flavor, and the second is | ||
| 179 | * the number of the offloaded CPU are extracted. The third and final | ||
| 180 | * argument is a string as follows: | ||
| 181 | * | ||
| 182 | * "WakeEmpty": Wake rcuo kthread, first CB to empty list. | ||
| 183 | * "WakeOvf": Wake rcuo kthread, CB list is huge. | ||
| 184 | * "WakeNot": Don't wake rcuo kthread. | ||
| 185 | * "WakeNotPoll": Don't wake rcuo kthread because it is polling. | ||
| 186 | * "Poll": Start of new polling cycle for rcu_nocb_poll. | ||
| 187 | * "Sleep": Sleep waiting for CBs for !rcu_nocb_poll. | ||
| 188 | * "WokeEmpty": rcuo kthread woke to find empty list. | ||
| 189 | * "WokeNonEmpty": rcuo kthread woke to find non-empty list. | ||
| 190 | * "WaitQueue": Enqueue partially done, timed wait for it to complete. | ||
| 191 | * "WokeQueue": Partial enqueue now complete. | ||
| 192 | */ | ||
| 193 | TRACE_EVENT(rcu_nocb_wake, | ||
| 194 | |||
| 195 | TP_PROTO(const char *rcuname, int cpu, const char *reason), | ||
| 196 | |||
| 197 | TP_ARGS(rcuname, cpu, reason), | ||
| 198 | |||
| 199 | TP_STRUCT__entry( | ||
| 200 | __field(const char *, rcuname) | ||
| 201 | __field(int, cpu) | ||
| 202 | __field(const char *, reason) | ||
| 203 | ), | ||
| 204 | |||
| 205 | TP_fast_assign( | ||
| 206 | __entry->rcuname = rcuname; | ||
| 207 | __entry->cpu = cpu; | ||
| 208 | __entry->reason = reason; | ||
| 209 | ), | ||
| 210 | |||
| 211 | TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) | ||
| 212 | ); | ||
| 213 | |||
| 214 | /* | ||
| 164 | * Tracepoint for tasks blocking within preemptible-RCU read-side | 215 | * Tracepoint for tasks blocking within preemptible-RCU read-side |
| 165 | * critical sections. Track the type of RCU (which one day might | 216 | * critical sections. Track the type of RCU (which one day might |
| 166 | * include SRCU), the grace-period number that the task is blocking | 217 | * include SRCU), the grace-period number that the task is blocking |
| @@ -540,17 +591,17 @@ TRACE_EVENT(rcu_invoke_kfree_callback, | |||
| 540 | TRACE_EVENT(rcu_batch_end, | 591 | TRACE_EVENT(rcu_batch_end, |
| 541 | 592 | ||
| 542 | TP_PROTO(const char *rcuname, int callbacks_invoked, | 593 | TP_PROTO(const char *rcuname, int callbacks_invoked, |
| 543 | bool cb, bool nr, bool iit, bool risk), | 594 | char cb, char nr, char iit, char risk), |
| 544 | 595 | ||
| 545 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), | 596 | TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk), |
| 546 | 597 | ||
| 547 | TP_STRUCT__entry( | 598 | TP_STRUCT__entry( |
| 548 | __field(const char *, rcuname) | 599 | __field(const char *, rcuname) |
| 549 | __field(int, callbacks_invoked) | 600 | __field(int, callbacks_invoked) |
| 550 | __field(bool, cb) | 601 | __field(char, cb) |
| 551 | __field(bool, nr) | 602 | __field(char, nr) |
| 552 | __field(bool, iit) | 603 | __field(char, iit) |
| 553 | __field(bool, risk) | 604 | __field(char, risk) |
| 554 | ), | 605 | ), |
| 555 | 606 | ||
| 556 | TP_fast_assign( | 607 | TP_fast_assign( |
| @@ -656,6 +707,7 @@ TRACE_EVENT(rcu_barrier, | |||
| 656 | #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ | 707 | #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ |
| 657 | level, grplo, grphi, event) \ | 708 | level, grplo, grphi, event) \ |
| 658 | do { } while (0) | 709 | do { } while (0) |
| 710 | #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) | ||
| 659 | #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) | 711 | #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) |
| 660 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) | 712 | #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) |
| 661 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ | 713 | #define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ |
