aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-19 20:12:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:10:18 -0400
commit5928a2b60cfdbad730f93696acab142d0b607280 (patch)
tree49bb21c9219673e61bad7a7c9202c7f25f5fe1be /include
parent5ed59af85077d28875a3a137b21933aaf1b4cd50 (diff)
parentbdd4431c8d071491a68a65d9457996f222b5ecd3 (diff)
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU changes for v3.4 from Ingo Molnar. The major features of this series are: - making RCU more aggressive about entering dyntick-idle mode in order to improve energy efficiency - converting a few more call_rcu()s to kfree_rcu()s - applying a number of rcutree fixes and cleanups to rcutiny - removing CONFIG_SMP #ifdefs from treercu - allowing RCU CPU stall times to be set via sysfs - adding CPU-stall capability to rcutorture - adding more RCU-abuse diagnostics - updating documentation - fixing yet more issues located by the still-ongoing top-to-bottom inspection of RCU, this time with a special focus on the CPU-hotplug code path. * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) rcu: Stop spurious warnings from synchronize_sched_expedited rcu: Hold off RCU_FAST_NO_HZ after timer posted rcu: Eliminate softirq-mediated RCU_FAST_NO_HZ idle-entry loop rcu: Add RCU_NONIDLE() for idle-loop RCU read-side critical sections rcu: Allow nesting of rcu_idle_enter() and rcu_idle_exit() rcu: Remove redundant check for rcu_head misalignment PTR_ERR should be called before its argument is cleared. rcu: Convert WARN_ON_ONCE() in rcu_lock_acquire() to lockdep rcu: Trace only after NULL-pointer check rcu: Call out dangers of expedited RCU primitives rcu: Rework detection of use of RCU by offline CPUs lockdep: Add CPU-idle/offline warning to lockdep-RCU splat rcu: No interrupt disabling for rcu_prepare_for_idle() rcu: Move synchronize_sched_expedited() to rcutree.c rcu: Check for illegal use of RCU from offlined CPUs rcu: Update stall-warning documentation rcu: Add CPU-stall capability to rcutorture rcu: Make documentation give more realistic rcutorture duration rcutorture: Permit holding off CPU-hotplug operations during boot rcu: Print scheduling-clock information on RCU CPU stall-warning messages ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/rcupdate.h83
-rw-r--r--include/linux/rcutiny.h10
-rw-r--r--include/linux/rcutree.h19
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/trace/events/rcu.h63
6 files changed, 156 insertions, 37 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 81c04f4348ec..937217425c47 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -190,6 +190,33 @@ extern void rcu_idle_exit(void);
190extern void rcu_irq_enter(void); 190extern void rcu_irq_enter(void);
191extern void rcu_irq_exit(void); 191extern void rcu_irq_exit(void);
192 192
193/**
194 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
195 * @a: Code that RCU needs to pay attention to.
196 *
197 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
198 * in the inner idle loop, that is, between the rcu_idle_enter() and
199 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
200 * critical sections. However, things like powertop need tracepoints
201 * in the inner idle loop.
202 *
203 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
204 * will tell RCU that it needs to pay attending, invoke its argument
205 * (in this example, a call to the do_something_with_RCU() function),
206 * and then tell RCU to go back to ignoring this CPU. It is permissible
207 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
208 * quite limited. If deeper nesting is required, it will be necessary
209 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
210 *
211 * This macro may be used from process-level code only.
212 */
213#define RCU_NONIDLE(a) \
214 do { \
215 rcu_idle_exit(); \
216 do { a; } while (0); \
217 rcu_idle_enter(); \
218 } while (0)
219
193/* 220/*
194 * Infrastructure to implement the synchronize_() primitives in 221 * Infrastructure to implement the synchronize_() primitives in
195 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 222 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -226,6 +253,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
226} 253}
227#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 254#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
228 255
256#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
257bool rcu_lockdep_current_cpu_online(void);
258#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
259static inline bool rcu_lockdep_current_cpu_online(void)
260{
261 return 1;
262}
263#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
264
229#ifdef CONFIG_DEBUG_LOCK_ALLOC 265#ifdef CONFIG_DEBUG_LOCK_ALLOC
230 266
231#ifdef CONFIG_PROVE_RCU 267#ifdef CONFIG_PROVE_RCU
@@ -239,13 +275,11 @@ static inline int rcu_is_cpu_idle(void)
239 275
240static inline void rcu_lock_acquire(struct lockdep_map *map) 276static inline void rcu_lock_acquire(struct lockdep_map *map)
241{ 277{
242 WARN_ON_ONCE(rcu_is_cpu_idle());
243 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_); 278 lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
244} 279}
245 280
246static inline void rcu_lock_release(struct lockdep_map *map) 281static inline void rcu_lock_release(struct lockdep_map *map)
247{ 282{
248 WARN_ON_ONCE(rcu_is_cpu_idle());
249 lock_release(map, 1, _THIS_IP_); 283 lock_release(map, 1, _THIS_IP_);
250} 284}
251 285
@@ -270,6 +304,9 @@ extern int debug_lockdep_rcu_enabled(void);
270 * occur in the same context, for example, it is illegal to invoke 304 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock() 305 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler. 306 * was invoked from within an irq handler.
307 *
308 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
309 * offline from an RCU perspective, so check for those as well.
273 */ 310 */
274static inline int rcu_read_lock_held(void) 311static inline int rcu_read_lock_held(void)
275{ 312{
@@ -277,6 +314,8 @@ static inline int rcu_read_lock_held(void)
277 return 1; 314 return 1;
278 if (rcu_is_cpu_idle()) 315 if (rcu_is_cpu_idle())
279 return 0; 316 return 0;
317 if (!rcu_lockdep_current_cpu_online())
318 return 0;
280 return lock_is_held(&rcu_lock_map); 319 return lock_is_held(&rcu_lock_map);
281} 320}
282 321
@@ -313,6 +352,9 @@ extern int rcu_read_lock_bh_held(void);
313 * notice an extended quiescent state to other CPUs that started a grace 352 * notice an extended quiescent state to other CPUs that started a grace
314 * period. Otherwise we would delay any grace period as long as we run in 353 * period. Otherwise we would delay any grace period as long as we run in
315 * the idle task. 354 * the idle task.
355 *
356 * Similarly, we avoid claiming an SRCU read lock held if the current
357 * CPU is offline.
316 */ 358 */
317#ifdef CONFIG_PREEMPT_COUNT 359#ifdef CONFIG_PREEMPT_COUNT
318static inline int rcu_read_lock_sched_held(void) 360static inline int rcu_read_lock_sched_held(void)
@@ -323,6 +365,8 @@ static inline int rcu_read_lock_sched_held(void)
323 return 1; 365 return 1;
324 if (rcu_is_cpu_idle()) 366 if (rcu_is_cpu_idle())
325 return 0; 367 return 0;
368 if (!rcu_lockdep_current_cpu_online())
369 return 0;
326 if (debug_locks) 370 if (debug_locks)
327 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 371 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
328 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 372 return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
@@ -381,8 +425,22 @@ extern int rcu_my_thread_group_empty(void);
381 } \ 425 } \
382 } while (0) 426 } while (0)
383 427
428#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
429static inline void rcu_preempt_sleep_check(void)
430{
431 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
432 "Illegal context switch in RCU read-side "
433 "critical section");
434}
435#else /* #ifdef CONFIG_PROVE_RCU */
436static inline void rcu_preempt_sleep_check(void)
437{
438}
439#endif /* #else #ifdef CONFIG_PROVE_RCU */
440
384#define rcu_sleep_check() \ 441#define rcu_sleep_check() \
385 do { \ 442 do { \
443 rcu_preempt_sleep_check(); \
386 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ 444 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
387 "Illegal context switch in RCU-bh" \ 445 "Illegal context switch in RCU-bh" \
388 " read-side critical section"); \ 446 " read-side critical section"); \
@@ -470,6 +528,13 @@ extern int rcu_my_thread_group_empty(void);
470 * NULL. Although rcu_access_pointer() may also be used in cases where 528 * NULL. Although rcu_access_pointer() may also be used in cases where
471 * update-side locks prevent the value of the pointer from changing, you 529 * update-side locks prevent the value of the pointer from changing, you
472 * should instead use rcu_dereference_protected() for this use case. 530 * should instead use rcu_dereference_protected() for this use case.
531 *
532 * It is also permissible to use rcu_access_pointer() when read-side
533 * access to the pointer was removed at least one grace period ago, as
534 * is the case in the context of the RCU callback that is freeing up
535 * the data, or after a synchronize_rcu() returns. This can be useful
536 * when tearing down multi-linked structures after a grace period
537 * has elapsed.
473 */ 538 */
474#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) 539#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
475 540
@@ -659,6 +724,8 @@ static inline void rcu_read_lock(void)
659 __rcu_read_lock(); 724 __rcu_read_lock();
660 __acquire(RCU); 725 __acquire(RCU);
661 rcu_lock_acquire(&rcu_lock_map); 726 rcu_lock_acquire(&rcu_lock_map);
727 rcu_lockdep_assert(!rcu_is_cpu_idle(),
728 "rcu_read_lock() used illegally while idle");
662} 729}
663 730
664/* 731/*
@@ -678,6 +745,8 @@ static inline void rcu_read_lock(void)
678 */ 745 */
679static inline void rcu_read_unlock(void) 746static inline void rcu_read_unlock(void)
680{ 747{
748 rcu_lockdep_assert(!rcu_is_cpu_idle(),
749 "rcu_read_unlock() used illegally while idle");
681 rcu_lock_release(&rcu_lock_map); 750 rcu_lock_release(&rcu_lock_map);
682 __release(RCU); 751 __release(RCU);
683 __rcu_read_unlock(); 752 __rcu_read_unlock();
@@ -705,6 +774,8 @@ static inline void rcu_read_lock_bh(void)
705 local_bh_disable(); 774 local_bh_disable();
706 __acquire(RCU_BH); 775 __acquire(RCU_BH);
707 rcu_lock_acquire(&rcu_bh_lock_map); 776 rcu_lock_acquire(&rcu_bh_lock_map);
777 rcu_lockdep_assert(!rcu_is_cpu_idle(),
778 "rcu_read_lock_bh() used illegally while idle");
708} 779}
709 780
710/* 781/*
@@ -714,6 +785,8 @@ static inline void rcu_read_lock_bh(void)
714 */ 785 */
715static inline void rcu_read_unlock_bh(void) 786static inline void rcu_read_unlock_bh(void)
716{ 787{
788 rcu_lockdep_assert(!rcu_is_cpu_idle(),
789 "rcu_read_unlock_bh() used illegally while idle");
717 rcu_lock_release(&rcu_bh_lock_map); 790 rcu_lock_release(&rcu_bh_lock_map);
718 __release(RCU_BH); 791 __release(RCU_BH);
719 local_bh_enable(); 792 local_bh_enable();
@@ -737,6 +810,8 @@ static inline void rcu_read_lock_sched(void)
737 preempt_disable(); 810 preempt_disable();
738 __acquire(RCU_SCHED); 811 __acquire(RCU_SCHED);
739 rcu_lock_acquire(&rcu_sched_lock_map); 812 rcu_lock_acquire(&rcu_sched_lock_map);
813 rcu_lockdep_assert(!rcu_is_cpu_idle(),
814 "rcu_read_lock_sched() used illegally while idle");
740} 815}
741 816
742/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ 817/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -753,6 +828,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
753 */ 828 */
754static inline void rcu_read_unlock_sched(void) 829static inline void rcu_read_unlock_sched(void)
755{ 830{
831 rcu_lockdep_assert(!rcu_is_cpu_idle(),
832 "rcu_read_unlock_sched() used illegally while idle");
756 rcu_lock_release(&rcu_sched_lock_map); 833 rcu_lock_release(&rcu_sched_lock_map);
757 __release(RCU_SCHED); 834 __release(RCU_SCHED);
758 preempt_enable(); 835 preempt_enable();
@@ -841,7 +918,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
841 /* See the kfree_rcu() header comment. */ 918 /* See the kfree_rcu() header comment. */
842 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); 919 BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
843 920
844 call_rcu(head, (rcu_callback)offset); 921 kfree_call_rcu(head, (rcu_callback)offset);
845} 922}
846 923
847/** 924/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 00b7a5e493d2..e93df77176d1 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,13 +27,9 @@
27 27
28#include <linux/cache.h> 28#include <linux/cache.h>
29 29
30#ifdef CONFIG_RCU_BOOST
31static inline void rcu_init(void) 30static inline void rcu_init(void)
32{ 31{
33} 32}
34#else /* #ifdef CONFIG_RCU_BOOST */
35void rcu_init(void);
36#endif /* #else #ifdef CONFIG_RCU_BOOST */
37 33
38static inline void rcu_barrier_bh(void) 34static inline void rcu_barrier_bh(void)
39{ 35{
@@ -83,6 +79,12 @@ static inline void synchronize_sched_expedited(void)
83 synchronize_sched(); 79 synchronize_sched();
84} 80}
85 81
82static inline void kfree_call_rcu(struct rcu_head *head,
83 void (*func)(struct rcu_head *rcu))
84{
85 call_rcu(head, func);
86}
87
86#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
87 89
88static inline void rcu_preempt_note_context_switch(void) 90static inline void rcu_preempt_note_context_switch(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 67458468f1a8..e8ee5dd0854c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -61,6 +61,24 @@ extern void synchronize_rcu_bh(void);
61extern void synchronize_sched_expedited(void); 61extern void synchronize_sched_expedited(void);
62extern void synchronize_rcu_expedited(void); 62extern void synchronize_rcu_expedited(void);
63 63
64void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
65
66/**
67 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
68 *
69 * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
70 * approach to force the grace period to end quickly. This consumes
71 * significant time on all CPUs and is unfriendly to real-time workloads,
72 * so is thus not recommended for any sort of common-case code. In fact,
73 * if you are using synchronize_rcu_bh_expedited() in a loop, please
74 * restructure your code to batch your updates, and then use a single
75 * synchronize_rcu_bh() instead.
76 *
77 * Note that it is illegal to call this function while holding any lock
78 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
79 * to call this function from a CPU-hotplug notifier. Failing to observe
80 * these restriction will result in deadlock.
81 */
64static inline void synchronize_rcu_bh_expedited(void) 82static inline void synchronize_rcu_bh_expedited(void)
65{ 83{
66 synchronize_sched_expedited(); 84 synchronize_sched_expedited();
@@ -83,6 +101,7 @@ extern void rcu_sched_force_quiescent_state(void);
83/* A context switch is a grace period for RCU-sched and RCU-bh. */ 101/* A context switch is a grace period for RCU-sched and RCU-bh. */
84static inline int rcu_blocking_is_gp(void) 102static inline int rcu_blocking_is_gp(void)
85{ 103{
104 might_sleep(); /* Check for RCU read-side critical section. */
86 return num_online_cpus() == 1; 105 return num_online_cpus() == 1;
87} 106}
88 107
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0657368bd78f..f58889b8a608 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1863,8 +1863,7 @@ extern void task_clear_jobctl_pending(struct task_struct *task,
1863#ifdef CONFIG_PREEMPT_RCU 1863#ifdef CONFIG_PREEMPT_RCU
1864 1864
1865#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ 1865#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1866#define RCU_READ_UNLOCK_BOOSTED (1 << 1) /* boosted while in RCU read-side. */ 1866#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1867#define RCU_READ_UNLOCK_NEED_QS (1 << 2) /* RCU core needs CPU response. */
1868 1867
1869static inline void rcu_copy_process(struct task_struct *p) 1868static inline void rcu_copy_process(struct task_struct *p)
1870{ 1869{
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e1b005918bbb..d3d5fa54f25e 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -99,15 +99,18 @@ long srcu_batches_completed(struct srcu_struct *sp);
99 * power mode. This way we can notice an extended quiescent state to 99 * power mode. This way we can notice an extended quiescent state to
100 * other CPUs that started a grace period. Otherwise we would delay any 100 * other CPUs that started a grace period. Otherwise we would delay any
101 * grace period as long as we run in the idle task. 101 * grace period as long as we run in the idle task.
102 *
103 * Similarly, we avoid claiming an SRCU read lock held if the current
104 * CPU is offline.
102 */ 105 */
103static inline int srcu_read_lock_held(struct srcu_struct *sp) 106static inline int srcu_read_lock_held(struct srcu_struct *sp)
104{ 107{
105 if (rcu_is_cpu_idle())
106 return 0;
107
108 if (!debug_lockdep_rcu_enabled()) 108 if (!debug_lockdep_rcu_enabled())
109 return 1; 109 return 1;
110 110 if (rcu_is_cpu_idle())
111 return 0;
112 if (!rcu_lockdep_current_cpu_online())
113 return 0;
111 return lock_is_held(&sp->dep_map); 114 return lock_is_held(&sp->dep_map);
112} 115}
113 116
@@ -169,6 +172,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
169 int retval = __srcu_read_lock(sp); 172 int retval = __srcu_read_lock(sp);
170 173
171 rcu_lock_acquire(&(sp)->dep_map); 174 rcu_lock_acquire(&(sp)->dep_map);
175 rcu_lockdep_assert(!rcu_is_cpu_idle(),
176 "srcu_read_lock() used illegally while idle");
172 return retval; 177 return retval;
173} 178}
174 179
@@ -182,6 +187,8 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
182static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 187static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
183 __releases(sp) 188 __releases(sp)
184{ 189{
190 rcu_lockdep_assert(!rcu_is_cpu_idle(),
191 "srcu_read_unlock() used illegally while idle");
185 rcu_lock_release(&(sp)->dep_map); 192 rcu_lock_release(&(sp)->dep_map);
186 __srcu_read_unlock(sp, idx); 193 __srcu_read_unlock(sp, idx);
187} 194}
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d2d88bed891b..337099783f37 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
313/* 313/*
314 * Tracepoint for the registration of a single RCU callback function. 314 * Tracepoint for the registration of a single RCU callback function.
315 * The first argument is the type of RCU, the second argument is 315 * The first argument is the type of RCU, the second argument is
316 * a pointer to the RCU callback itself, and the third element is the 316 * a pointer to the RCU callback itself, the third element is the
317 * new RCU callback queue length for the current CPU. 317 * number of lazy callbacks queued, and the fourth element is the
318 * total number of callbacks queued.
318 */ 319 */
319TRACE_EVENT(rcu_callback, 320TRACE_EVENT(rcu_callback,
320 321
321 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen), 322 TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
323 long qlen),
322 324
323 TP_ARGS(rcuname, rhp, qlen), 325 TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
324 326
325 TP_STRUCT__entry( 327 TP_STRUCT__entry(
326 __field(char *, rcuname) 328 __field(char *, rcuname)
327 __field(void *, rhp) 329 __field(void *, rhp)
328 __field(void *, func) 330 __field(void *, func)
331 __field(long, qlen_lazy)
329 __field(long, qlen) 332 __field(long, qlen)
330 ), 333 ),
331 334
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
333 __entry->rcuname = rcuname; 336 __entry->rcuname = rcuname;
334 __entry->rhp = rhp; 337 __entry->rhp = rhp;
335 __entry->func = rhp->func; 338 __entry->func = rhp->func;
339 __entry->qlen_lazy = qlen_lazy;
336 __entry->qlen = qlen; 340 __entry->qlen = qlen;
337 ), 341 ),
338 342
339 TP_printk("%s rhp=%p func=%pf %ld", 343 TP_printk("%s rhp=%p func=%pf %ld/%ld",
340 __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen) 344 __entry->rcuname, __entry->rhp, __entry->func,
345 __entry->qlen_lazy, __entry->qlen)
341); 346);
342 347
343/* 348/*
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
345 * kfree() form. The first argument is the RCU type, the second argument 350 * kfree() form. The first argument is the RCU type, the second argument
346 * is a pointer to the RCU callback, the third argument is the offset 351 * is a pointer to the RCU callback, the third argument is the offset
347 * of the callback within the enclosing RCU-protected data structure, 352 * of the callback within the enclosing RCU-protected data structure,
348 * and the fourth argument is the new RCU callback queue length for the 353 * the fourth argument is the number of lazy callbacks queued, and the
349 * current CPU. 354 * fifth argument is the total number of callbacks queued.
350 */ 355 */
351TRACE_EVENT(rcu_kfree_callback, 356TRACE_EVENT(rcu_kfree_callback,
352 357
353 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset, 358 TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
354 long qlen), 359 long qlen_lazy, long qlen),
355 360
356 TP_ARGS(rcuname, rhp, offset, qlen), 361 TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
357 362
358 TP_STRUCT__entry( 363 TP_STRUCT__entry(
359 __field(char *, rcuname) 364 __field(char *, rcuname)
360 __field(void *, rhp) 365 __field(void *, rhp)
361 __field(unsigned long, offset) 366 __field(unsigned long, offset)
367 __field(long, qlen_lazy)
362 __field(long, qlen) 368 __field(long, qlen)
363 ), 369 ),
364 370
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
366 __entry->rcuname = rcuname; 372 __entry->rcuname = rcuname;
367 __entry->rhp = rhp; 373 __entry->rhp = rhp;
368 __entry->offset = offset; 374 __entry->offset = offset;
375 __entry->qlen_lazy = qlen_lazy;
369 __entry->qlen = qlen; 376 __entry->qlen = qlen;
370 ), 377 ),
371 378
372 TP_printk("%s rhp=%p func=%ld %ld", 379 TP_printk("%s rhp=%p func=%ld %ld/%ld",
373 __entry->rcuname, __entry->rhp, __entry->offset, 380 __entry->rcuname, __entry->rhp, __entry->offset,
374 __entry->qlen) 381 __entry->qlen_lazy, __entry->qlen)
375); 382);
376 383
377/* 384/*
378 * Tracepoint for marking the beginning rcu_do_batch, performed to start 385 * Tracepoint for marking the beginning rcu_do_batch, performed to start
379 * RCU callback invocation. The first argument is the RCU flavor, 386 * RCU callback invocation. The first argument is the RCU flavor,
380 * the second is the total number of callbacks (including those that 387 * the second is the number of lazy callbacks queued, the third is
381 * are not yet ready to be invoked), and the third argument is the 388 * the total number of callbacks queued, and the fourth argument is
382 * current RCU-callback batch limit. 389 * the current RCU-callback batch limit.
383 */ 390 */
384TRACE_EVENT(rcu_batch_start, 391TRACE_EVENT(rcu_batch_start,
385 392
386 TP_PROTO(char *rcuname, long qlen, int blimit), 393 TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
387 394
388 TP_ARGS(rcuname, qlen, blimit), 395 TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
389 396
390 TP_STRUCT__entry( 397 TP_STRUCT__entry(
391 __field(char *, rcuname) 398 __field(char *, rcuname)
399 __field(long, qlen_lazy)
392 __field(long, qlen) 400 __field(long, qlen)
393 __field(int, blimit) 401 __field(int, blimit)
394 ), 402 ),
395 403
396 TP_fast_assign( 404 TP_fast_assign(
397 __entry->rcuname = rcuname; 405 __entry->rcuname = rcuname;
406 __entry->qlen_lazy = qlen_lazy;
398 __entry->qlen = qlen; 407 __entry->qlen = qlen;
399 __entry->blimit = blimit; 408 __entry->blimit = blimit;
400 ), 409 ),
401 410
402 TP_printk("%s CBs=%ld bl=%d", 411 TP_printk("%s CBs=%ld/%ld bl=%d",
403 __entry->rcuname, __entry->qlen, __entry->blimit) 412 __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
413 __entry->blimit)
404); 414);
405 415
406/* 416/*
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
531#else /* #ifdef CONFIG_RCU_TRACE */ 541#else /* #ifdef CONFIG_RCU_TRACE */
532 542
533#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 543#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
534#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0) 544#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
545 qsmask) do { } while (0)
535#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 546#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
536#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 547#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
537#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) 548#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
549 grplo, grphi, gp_tasks) do { } \
550 while (0)
538#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 551#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
539#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0) 552#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
540#define trace_rcu_prep_idle(reason) do { } while (0) 553#define trace_rcu_prep_idle(reason) do { } while (0)
541#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) 554#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
542#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) 555#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
543#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) 556 do { } while (0)
557#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
558 do { } while (0)
544#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) 559#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
545#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 560#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
546#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 561#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \