aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-25 09:36:56 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:21 -0400
commitd4c08f2ac311a360230eef7e5395b0ec8d8f0670 (patch)
tree06e425b8153e076fbe43b037cf4497ac50afddb7 /kernel/rcutree_plugin.h
parent965a002b4f1a458c5dcb334ec29f48a0046faa25 (diff)
rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states, CPUs noticing grace-period start and end, grace-period initialization, call_rcu() invocation, tasks blocking in RCU read-side critical sections, tasks exiting those same critical sections, force_quiescent_state() detection of dyntick-idle and offline CPUs, CPUs entering and leaving dyntick-idle mode (except from NMIs), CPUs coming online and going offline, and CPUs being kicked for staying in dyntick-idle mode for too long (as in many weeks, even on 32-bit systems). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> rcu: Add the rcu flavor to callback trace events The earlier trace events for registering RCU callbacks and for invoking them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched). This commit adds the RCU flavor to those trace events. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h22
1 files changed, 19 insertions, 3 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 94d9ca1e4061..bdb2e82f78d3 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -124,6 +124,8 @@ static void rcu_preempt_qs(int cpu)
124 124
125 rdp->passed_quiesc_completed = rdp->gpnum - 1; 125 rdp->passed_quiesc_completed = rdp->gpnum - 1;
126 barrier(); 126 barrier();
127 if (rdp->passed_quiesc == 0)
128 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
127 rdp->passed_quiesc = 1; 129 rdp->passed_quiesc = 1;
128 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 130 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
129} 131}
@@ -190,6 +192,11 @@ static void rcu_preempt_note_context_switch(int cpu)
190 if (rnp->qsmask & rdp->grpmask) 192 if (rnp->qsmask & rdp->grpmask)
191 rnp->gp_tasks = &t->rcu_node_entry; 193 rnp->gp_tasks = &t->rcu_node_entry;
192 } 194 }
195 trace_rcu_preempt_task(rdp->rsp->name,
196 t->pid,
197 (rnp->qsmask & rdp->grpmask)
198 ? rnp->gpnum
199 : rnp->gpnum + 1);
193 raw_spin_unlock_irqrestore(&rnp->lock, flags); 200 raw_spin_unlock_irqrestore(&rnp->lock, flags);
194 } else if (t->rcu_read_lock_nesting < 0 && 201 } else if (t->rcu_read_lock_nesting < 0 &&
195 t->rcu_read_unlock_special) { 202 t->rcu_read_unlock_special) {
@@ -344,6 +351,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
344 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 351 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
345 np = rcu_next_node_entry(t, rnp); 352 np = rcu_next_node_entry(t, rnp);
346 list_del_init(&t->rcu_node_entry); 353 list_del_init(&t->rcu_node_entry);
354 trace_rcu_unlock_preempted_task("rcu_preempt",
355 rnp->gpnum, t->pid);
347 if (&t->rcu_node_entry == rnp->gp_tasks) 356 if (&t->rcu_node_entry == rnp->gp_tasks)
348 rnp->gp_tasks = np; 357 rnp->gp_tasks = np;
349 if (&t->rcu_node_entry == rnp->exp_tasks) 358 if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -364,10 +373,17 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
364 * we aren't waiting on any CPUs, report the quiescent state. 373 * we aren't waiting on any CPUs, report the quiescent state.
365 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. 374 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
366 */ 375 */
367 if (empty) 376 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
368 raw_spin_unlock_irqrestore(&rnp->lock, flags); 377 trace_rcu_quiescent_state_report("preempt_rcu",
369 else 378 rnp->gpnum,
379 0, rnp->qsmask,
380 rnp->level,
381 rnp->grplo,
382 rnp->grphi,
383 !!rnp->gp_tasks);
370 rcu_report_unblock_qs_rnp(rnp, flags); 384 rcu_report_unblock_qs_rnp(rnp, flags);
385 } else
386 raw_spin_unlock_irqrestore(&rnp->lock, flags);
371 387
372#ifdef CONFIG_RCU_BOOST 388#ifdef CONFIG_RCU_BOOST
373 /* Unboost if we were boosted. */ 389 /* Unboost if we were boosted. */