aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-25 09:36:56 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:21 -0400
commitd4c08f2ac311a360230eef7e5395b0ec8d8f0670 (patch)
tree06e425b8153e076fbe43b037cf4497ac50afddb7 /kernel/rcutree.c
parent965a002b4f1a458c5dcb334ec29f48a0046faa25 (diff)
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states, CPUs noticing grace-period start and end, grace-period initialization, call_rcu() invocation, tasks blocking in RCU read-side critical sections, tasks exiting those same critical sections, force_quiescent_state() detection of dyntick-idle and offline CPUs, CPUs entering and leaving dyntick-idle mode (except from NMIs), CPUs coming online and going offline, and CPUs being kicked for staying in dyntick-idle mode for too long (as in many weeks, even on 32-bit systems). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> rcu: Add the rcu flavor to callback trace events The earlier trace events for registering RCU callbacks and for invoking them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched). This commit adds the RCU flavor to those trace events. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c45
1 files changed, 42 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 4e24399cabcf..7e0282949f8a 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -166,6 +166,8 @@ void rcu_sched_qs(int cpu)
166 166
167 rdp->passed_quiesc_completed = rdp->gpnum - 1; 167 rdp->passed_quiesc_completed = rdp->gpnum - 1;
168 barrier(); 168 barrier();
169 if (rdp->passed_quiesc == 0)
170 trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
169 rdp->passed_quiesc = 1; 171 rdp->passed_quiesc = 1;
170} 172}
171 173
@@ -175,6 +177,8 @@ void rcu_bh_qs(int cpu)
175 177
176 rdp->passed_quiesc_completed = rdp->gpnum - 1; 178 rdp->passed_quiesc_completed = rdp->gpnum - 1;
177 barrier(); 179 barrier();
180 if (rdp->passed_quiesc == 0)
181 trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
178 rdp->passed_quiesc = 1; 182 rdp->passed_quiesc = 1;
179} 183}
180 184
@@ -319,6 +323,7 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
319 * trust its state not to change because interrupts are disabled. 323 * trust its state not to change because interrupts are disabled.
320 */ 324 */
321 if (cpu_is_offline(rdp->cpu)) { 325 if (cpu_is_offline(rdp->cpu)) {
326 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
322 rdp->offline_fqs++; 327 rdp->offline_fqs++;
323 return 1; 328 return 1;
324 } 329 }
@@ -359,6 +364,7 @@ void rcu_enter_nohz(void)
359 local_irq_restore(flags); 364 local_irq_restore(flags);
360 return; 365 return;
361 } 366 }
367 trace_rcu_dyntick("Start");
362 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 368 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
363 smp_mb__before_atomic_inc(); /* See above. */ 369 smp_mb__before_atomic_inc(); /* See above. */
364 atomic_inc(&rdtp->dynticks); 370 atomic_inc(&rdtp->dynticks);
@@ -396,6 +402,7 @@ void rcu_exit_nohz(void)
396 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 402 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
397 smp_mb__after_atomic_inc(); /* See above. */ 403 smp_mb__after_atomic_inc(); /* See above. */
398 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 404 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
405 trace_rcu_dyntick("End");
399 local_irq_restore(flags); 406 local_irq_restore(flags);
400} 407}
401 408
@@ -501,6 +508,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
501 * of the current RCU grace period. 508 * of the current RCU grace period.
502 */ 509 */
503 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) { 510 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
511 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
504 rdp->dynticks_fqs++; 512 rdp->dynticks_fqs++;
505 return 1; 513 return 1;
506 } 514 }
@@ -683,6 +691,7 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
683 * go looking for one. 691 * go looking for one.
684 */ 692 */
685 rdp->gpnum = rnp->gpnum; 693 rdp->gpnum = rnp->gpnum;
694 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
686 if (rnp->qsmask & rdp->grpmask) { 695 if (rnp->qsmask & rdp->grpmask) {
687 rdp->qs_pending = 1; 696 rdp->qs_pending = 1;
688 rdp->passed_quiesc = 0; 697 rdp->passed_quiesc = 0;
@@ -746,6 +755,7 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
746 755
747 /* Remember that we saw this grace-period completion. */ 756 /* Remember that we saw this grace-period completion. */
748 rdp->completed = rnp->completed; 757 rdp->completed = rnp->completed;
758 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
749 759
750 /* 760 /*
751 * If we were in an extended quiescent state, we may have 761 * If we were in an extended quiescent state, we may have
@@ -856,6 +866,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
856 866
857 /* Advance to a new grace period and initialize state. */ 867 /* Advance to a new grace period and initialize state. */
858 rsp->gpnum++; 868 rsp->gpnum++;
869 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
859 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); 870 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
860 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 871 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
861 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 872 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
@@ -870,6 +881,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
870 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 881 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
871 rcu_start_gp_per_cpu(rsp, rnp, rdp); 882 rcu_start_gp_per_cpu(rsp, rnp, rdp);
872 rcu_preempt_boost_start_gp(rnp); 883 rcu_preempt_boost_start_gp(rnp);
884 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
885 rnp->level, rnp->grplo,
886 rnp->grphi, rnp->qsmask);
873 raw_spin_unlock_irqrestore(&rnp->lock, flags); 887 raw_spin_unlock_irqrestore(&rnp->lock, flags);
874 return; 888 return;
875 } 889 }
@@ -906,6 +920,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
906 if (rnp == rdp->mynode) 920 if (rnp == rdp->mynode)
907 rcu_start_gp_per_cpu(rsp, rnp, rdp); 921 rcu_start_gp_per_cpu(rsp, rnp, rdp);
908 rcu_preempt_boost_start_gp(rnp); 922 rcu_preempt_boost_start_gp(rnp);
923 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
924 rnp->level, rnp->grplo,
925 rnp->grphi, rnp->qsmask);
909 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 926 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
910 } 927 }
911 928
@@ -939,6 +956,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
939 if (gp_duration > rsp->gp_max) 956 if (gp_duration > rsp->gp_max)
940 rsp->gp_max = gp_duration; 957 rsp->gp_max = gp_duration;
941 rsp->completed = rsp->gpnum; 958 rsp->completed = rsp->gpnum;
959 trace_rcu_grace_period(rsp->name, rsp->completed, "end");
942 rsp->signaled = RCU_GP_IDLE; 960 rsp->signaled = RCU_GP_IDLE;
943 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 961 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
944} 962}
@@ -967,6 +985,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
967 return; 985 return;
968 } 986 }
969 rnp->qsmask &= ~mask; 987 rnp->qsmask &= ~mask;
988 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
989 mask, rnp->qsmask, rnp->level,
990 rnp->grplo, rnp->grphi,
991 !!rnp->gp_tasks);
970 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 992 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
971 993
972 /* Other bits still set at this level, so done. */ 994 /* Other bits still set at this level, so done. */
@@ -1135,11 +1157,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1135 if (rnp->qsmaskinit != 0) { 1157 if (rnp->qsmaskinit != 0) {
1136 if (rnp != rdp->mynode) 1158 if (rnp != rdp->mynode)
1137 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1159 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1160 else
1161 trace_rcu_grace_period(rsp->name,
1162 rnp->gpnum + 1 -
1163 !!(rnp->qsmask & mask),
1164 "cpuofl");
1138 break; 1165 break;
1139 } 1166 }
1140 if (rnp == rdp->mynode) 1167 if (rnp == rdp->mynode) {
1168 trace_rcu_grace_period(rsp->name,
1169 rnp->gpnum + 1 -
1170 !!(rnp->qsmask & mask),
1171 "cpuofl");
1141 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 1172 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1142 else 1173 } else
1143 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1174 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1144 mask = rnp->grpmask; 1175 mask = rnp->grpmask;
1145 rnp = rnp->parent; 1176 rnp = rnp->parent;
@@ -1226,7 +1257,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1226 next = list->next; 1257 next = list->next;
1227 prefetch(next); 1258 prefetch(next);
1228 debug_rcu_head_unqueue(list); 1259 debug_rcu_head_unqueue(list);
1229 __rcu_reclaim(list); 1260 __rcu_reclaim(rsp->name, list);
1230 list = next; 1261 list = next;
1231 if (++count >= bl) 1262 if (++count >= bl)
1232 break; 1263 break;
@@ -1552,6 +1583,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1552 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1583 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1553 rdp->qlen++; 1584 rdp->qlen++;
1554 1585
1586 if (__is_kfree_rcu_offset((unsigned long)func))
1587 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1588 rdp->qlen);
1589 else
1590 trace_rcu_callback(rsp->name, head, rdp->qlen);
1591
1555 /* If interrupts were disabled, don't dive into RCU core. */ 1592 /* If interrupts were disabled, don't dive into RCU core. */
1556 if (irqs_disabled_flags(flags)) { 1593 if (irqs_disabled_flags(flags)) {
1557 local_irq_restore(flags); 1594 local_irq_restore(flags);
@@ -1850,6 +1887,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1850 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 1887 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1851#endif /* #ifdef CONFIG_NO_HZ */ 1888#endif /* #ifdef CONFIG_NO_HZ */
1852 rdp->cpu = cpu; 1889 rdp->cpu = cpu;
1890 rdp->rsp = rsp;
1853 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1891 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1854} 1892}
1855 1893
@@ -1898,6 +1936,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
1898 rdp->gpnum = rnp->completed; /* if GP in progress... */ 1936 rdp->gpnum = rnp->completed; /* if GP in progress... */
1899 rdp->completed = rnp->completed; 1937 rdp->completed = rnp->completed;
1900 rdp->passed_quiesc_completed = rnp->completed - 1; 1938 rdp->passed_quiesc_completed = rnp->completed - 1;
1939 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
1901 } 1940 }
1902 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ 1941 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
1903 rnp = rnp->parent; 1942 rnp = rnp->parent;