aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-10-31 18:01:54 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:31:42 -0500
commit4145fa7fbee3ec1e61c52825b146192885d9759f (patch)
tree58e0767a59bf110acb99da3fb5c1f0f0b5170be8
parent9ceae0e248fb553c702d51d5275167d462f4efd2 (diff)
rcu: Deconfuse dynticks entry-exit tracing
The trace_rcu_dyntick() trace event did not print both the old and the new value of the nesting level, and furthermore printed only the low-order 32 bits of it. This could result in some confusion when interpreting trace-event dumps, so this commit prints both the old and the new value, prints the full 64 bits, and also selects the process-entry/exit increment to print nicely in hexadecimal. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--include/trace/events/rcu.h15
-rw-r--r--kernel/rcu.h7
-rw-r--r--kernel/rcutiny.c28
-rw-r--r--kernel/rcutree.c35
4 files changed, 53 insertions, 32 deletions
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 172620a92b1a..c29fb2f55909 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -246,21 +246,24 @@ TRACE_EVENT(rcu_fqs,
246 */ 246 */
247TRACE_EVENT(rcu_dyntick, 247TRACE_EVENT(rcu_dyntick,
248 248
249 TP_PROTO(char *polarity, int nesting), 249 TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
250 250
251 TP_ARGS(polarity, nesting), 251 TP_ARGS(polarity, oldnesting, newnesting),
252 252
253 TP_STRUCT__entry( 253 TP_STRUCT__entry(
254 __field(char *, polarity) 254 __field(char *, polarity)
255 __field(int, nesting) 255 __field(long long, oldnesting)
256 __field(long long, newnesting)
256 ), 257 ),
257 258
258 TP_fast_assign( 259 TP_fast_assign(
259 __entry->polarity = polarity; 260 __entry->polarity = polarity;
260 __entry->nesting = nesting; 261 __entry->oldnesting = oldnesting;
262 __entry->newnesting = newnesting;
261 ), 263 ),
262 264
263 TP_printk("%s %d", __entry->polarity, __entry->nesting) 265 TP_printk("%s %llx %llx", __entry->polarity,
266 __entry->oldnesting, __entry->newnesting)
264); 267);
265 268
266/* 269/*
@@ -470,7 +473,7 @@ TRACE_EVENT(rcu_torture_read,
470#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 473#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
471#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0) 474#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
472#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 475#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
473#define trace_rcu_dyntick(polarity, nesting) do { } while (0) 476#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
474#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0) 477#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
475#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0) 478#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
476#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0) 479#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
diff --git a/kernel/rcu.h b/kernel/rcu.h
index f600868d550d..aa88baab5f78 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -30,6 +30,13 @@
30#endif /* #else #ifdef CONFIG_RCU_TRACE */ 30#endif /* #else #ifdef CONFIG_RCU_TRACE */
31 31
32/* 32/*
33 * Process-level increment to ->dynticks_nesting field. This allows for
34 * architectures that use half-interrupts and half-exceptions from
35 * process context.
36 */
37#define DYNTICK_TASK_NESTING (LLONG_MAX / 2 - 1)
38
39/*
33 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 40 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
34 * by call_rcu() and rcu callback execution, and are therefore not part of the 41 * by call_rcu() and rcu callback execution, and are therefore not part of the
35 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. 42 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index b4e0b4981768..9b9bdf666fb5 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -53,20 +53,21 @@ static void __call_rcu(struct rcu_head *head,
53 53
54#include "rcutiny_plugin.h" 54#include "rcutiny_plugin.h"
55 55
56static long long rcu_dynticks_nesting = LLONG_MAX / 2; 56static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
57 57
58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ 58/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59static void rcu_idle_enter_common(void) 59static void rcu_idle_enter_common(long long oldval)
60{ 60{
61 if (rcu_dynticks_nesting) { 61 if (rcu_dynticks_nesting) {
62 RCU_TRACE(trace_rcu_dyntick("--=", rcu_dynticks_nesting)); 62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
63 return; 64 return;
64 } 65 }
65 RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting)); 66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
66 if (!idle_cpu(smp_processor_id())) { 67 if (!idle_cpu(smp_processor_id())) {
67 WARN_ON_ONCE(1); /* must be idle task! */ 68 WARN_ON_ONCE(1); /* must be idle task! */
68 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", 69 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
69 rcu_dynticks_nesting)); 70 oldval, rcu_dynticks_nesting));
70 ftrace_dump(DUMP_ALL); 71 ftrace_dump(DUMP_ALL);
71 } 72 }
72 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ 73 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
@@ -79,10 +80,12 @@ static void rcu_idle_enter_common(void)
79void rcu_idle_enter(void) 80void rcu_idle_enter(void)
80{ 81{
81 unsigned long flags; 82 unsigned long flags;
83 long long oldval;
82 84
83 local_irq_save(flags); 85 local_irq_save(flags);
86 oldval = rcu_dynticks_nesting;
84 rcu_dynticks_nesting = 0; 87 rcu_dynticks_nesting = 0;
85 rcu_idle_enter_common(); 88 rcu_idle_enter_common(oldval);
86 local_irq_restore(flags); 89 local_irq_restore(flags);
87} 90}
88 91
@@ -92,11 +95,13 @@ void rcu_idle_enter(void)
92void rcu_irq_exit(void) 95void rcu_irq_exit(void)
93{ 96{
94 unsigned long flags; 97 unsigned long flags;
98 long long oldval;
95 99
96 local_irq_save(flags); 100 local_irq_save(flags);
101 oldval = rcu_dynticks_nesting;
97 rcu_dynticks_nesting--; 102 rcu_dynticks_nesting--;
98 WARN_ON_ONCE(rcu_dynticks_nesting < 0); 103 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
99 rcu_idle_enter_common(); 104 rcu_idle_enter_common(oldval);
100 local_irq_restore(flags); 105 local_irq_restore(flags);
101} 106}
102 107
@@ -104,14 +109,15 @@ void rcu_irq_exit(void)
104static void rcu_idle_exit_common(long long oldval) 109static void rcu_idle_exit_common(long long oldval)
105{ 110{
106 if (oldval) { 111 if (oldval) {
107 RCU_TRACE(trace_rcu_dyntick("++=", rcu_dynticks_nesting)); 112 RCU_TRACE(trace_rcu_dyntick("++=",
113 oldval, rcu_dynticks_nesting));
108 return; 114 return;
109 } 115 }
110 RCU_TRACE(trace_rcu_dyntick("End", oldval)); 116 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
111 if (!idle_cpu(smp_processor_id())) { 117 if (!idle_cpu(smp_processor_id())) {
112 WARN_ON_ONCE(1); /* must be idle task! */ 118 WARN_ON_ONCE(1); /* must be idle task! */
113 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", 119 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
114 oldval)); 120 oldval, rcu_dynticks_nesting));
115 ftrace_dump(DUMP_ALL); 121 ftrace_dump(DUMP_ALL);
116 } 122 }
117} 123}
@@ -127,7 +133,7 @@ void rcu_idle_exit(void)
127 local_irq_save(flags); 133 local_irq_save(flags);
128 oldval = rcu_dynticks_nesting; 134 oldval = rcu_dynticks_nesting;
129 WARN_ON_ONCE(oldval != 0); 135 WARN_ON_ONCE(oldval != 0);
130 rcu_dynticks_nesting = LLONG_MAX / 2; 136 rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
131 rcu_idle_exit_common(oldval); 137 rcu_idle_exit_common(oldval);
132 local_irq_restore(flags); 138 local_irq_restore(flags);
133} 139}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 489b62a67d35..06e40dd53b23 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -196,7 +196,7 @@ void rcu_note_context_switch(int cpu)
196EXPORT_SYMBOL_GPL(rcu_note_context_switch); 196EXPORT_SYMBOL_GPL(rcu_note_context_switch);
197 197
198DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 198DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
199 .dynticks_nesting = LLONG_MAX / 2, 199 .dynticks_nesting = DYNTICK_TASK_NESTING,
200 .dynticks = ATOMIC_INIT(1), 200 .dynticks = ATOMIC_INIT(1),
201}; 201};
202 202
@@ -348,17 +348,17 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
348 * we really have entered idle, and must do the appropriate accounting. 348 * we really have entered idle, and must do the appropriate accounting.
349 * The caller must have disabled interrupts. 349 * The caller must have disabled interrupts.
350 */ 350 */
351static void rcu_idle_enter_common(struct rcu_dynticks *rdtp) 351static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
352{ 352{
353 if (rdtp->dynticks_nesting) { 353 if (rdtp->dynticks_nesting) {
354 trace_rcu_dyntick("--=", rdtp->dynticks_nesting); 354 trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
355 return; 355 return;
356 } 356 }
357 trace_rcu_dyntick("Start", rdtp->dynticks_nesting); 357 trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
358 if (!idle_cpu(smp_processor_id())) { 358 if (!idle_cpu(smp_processor_id())) {
359 WARN_ON_ONCE(1); /* must be idle task! */ 359 WARN_ON_ONCE(1); /* must be idle task! */
360 trace_rcu_dyntick("Error on entry: not idle task", 360 trace_rcu_dyntick("Error on entry: not idle task",
361 rdtp->dynticks_nesting); 361 oldval, rdtp->dynticks_nesting);
362 ftrace_dump(DUMP_ALL); 362 ftrace_dump(DUMP_ALL);
363 } 363 }
364 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 364 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
@@ -383,12 +383,14 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp)
383void rcu_idle_enter(void) 383void rcu_idle_enter(void)
384{ 384{
385 unsigned long flags; 385 unsigned long flags;
386 long long oldval;
386 struct rcu_dynticks *rdtp; 387 struct rcu_dynticks *rdtp;
387 388
388 local_irq_save(flags); 389 local_irq_save(flags);
389 rdtp = &__get_cpu_var(rcu_dynticks); 390 rdtp = &__get_cpu_var(rcu_dynticks);
391 oldval = rdtp->dynticks_nesting;
390 rdtp->dynticks_nesting = 0; 392 rdtp->dynticks_nesting = 0;
391 rcu_idle_enter_common(rdtp); 393 rcu_idle_enter_common(rdtp, oldval);
392 local_irq_restore(flags); 394 local_irq_restore(flags);
393} 395}
394 396
@@ -411,13 +413,15 @@ void rcu_idle_enter(void)
411void rcu_irq_exit(void) 413void rcu_irq_exit(void)
412{ 414{
413 unsigned long flags; 415 unsigned long flags;
416 long long oldval;
414 struct rcu_dynticks *rdtp; 417 struct rcu_dynticks *rdtp;
415 418
416 local_irq_save(flags); 419 local_irq_save(flags);
417 rdtp = &__get_cpu_var(rcu_dynticks); 420 rdtp = &__get_cpu_var(rcu_dynticks);
421 oldval = rdtp->dynticks_nesting;
418 rdtp->dynticks_nesting--; 422 rdtp->dynticks_nesting--;
419 WARN_ON_ONCE(rdtp->dynticks_nesting < 0); 423 WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
420 rcu_idle_enter_common(rdtp); 424 rcu_idle_enter_common(rdtp, oldval);
421 local_irq_restore(flags); 425 local_irq_restore(flags);
422} 426}
423 427
@@ -431,7 +435,7 @@ void rcu_irq_exit(void)
431static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) 435static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
432{ 436{
433 if (oldval) { 437 if (oldval) {
434 trace_rcu_dyntick("++=", rdtp->dynticks_nesting); 438 trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
435 return; 439 return;
436 } 440 }
437 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ 441 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
@@ -439,10 +443,11 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
439 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 443 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
440 smp_mb__after_atomic_inc(); /* See above. */ 444 smp_mb__after_atomic_inc(); /* See above. */
441 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 445 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
442 trace_rcu_dyntick("End", oldval); 446 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
443 if (!idle_cpu(smp_processor_id())) { 447 if (!idle_cpu(smp_processor_id())) {
444 WARN_ON_ONCE(1); /* must be idle task! */ 448 WARN_ON_ONCE(1); /* must be idle task! */
445 trace_rcu_dyntick("Error on exit: not idle task", oldval); 449 trace_rcu_dyntick("Error on exit: not idle task",
450 oldval, rdtp->dynticks_nesting);
446 ftrace_dump(DUMP_ALL); 451 ftrace_dump(DUMP_ALL);
447 } 452 }
448} 453}
@@ -453,8 +458,8 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
453 * Exit idle mode, in other words, -enter- the mode in which RCU 458 * Exit idle mode, in other words, -enter- the mode in which RCU
454 * read-side critical sections can occur. 459 * read-side critical sections can occur.
455 * 460 *
456 * We crowbar the ->dynticks_nesting field to LLONG_MAX/2 to allow for 461 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
457 * the possibility of usermode upcalls messing up our count 462 * allow for the possibility of usermode upcalls messing up our count
458 * of interrupt nesting level during the busy period that is just 463 * of interrupt nesting level during the busy period that is just
459 * now starting. 464 * now starting.
460 */ 465 */
@@ -468,7 +473,7 @@ void rcu_idle_exit(void)
468 rdtp = &__get_cpu_var(rcu_dynticks); 473 rdtp = &__get_cpu_var(rcu_dynticks);
469 oldval = rdtp->dynticks_nesting; 474 oldval = rdtp->dynticks_nesting;
470 WARN_ON_ONCE(oldval != 0); 475 WARN_ON_ONCE(oldval != 0);
471 rdtp->dynticks_nesting = LLONG_MAX / 2; 476 rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
472 rcu_idle_exit_common(rdtp, oldval); 477 rcu_idle_exit_common(rdtp, oldval);
473 local_irq_restore(flags); 478 local_irq_restore(flags);
474} 479}
@@ -2012,7 +2017,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2012 rdp->nxttail[i] = &rdp->nxtlist; 2017 rdp->nxttail[i] = &rdp->nxtlist;
2013 rdp->qlen = 0; 2018 rdp->qlen = 0;
2014 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 2019 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2015 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2); 2020 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
2016 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); 2021 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2017 rdp->cpu = cpu; 2022 rdp->cpu = cpu;
2018 rdp->rsp = rsp; 2023 rdp->rsp = rsp;
@@ -2040,7 +2045,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2040 rdp->qlen_last_fqs_check = 0; 2045 rdp->qlen_last_fqs_check = 0;
2041 rdp->n_force_qs_snap = rsp->n_force_qs; 2046 rdp->n_force_qs_snap = rsp->n_force_qs;
2042 rdp->blimit = blimit; 2047 rdp->blimit = blimit;
2043 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2); 2048 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
2044 WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1); 2049 WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1);
2045 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2050 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2046 2051