aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-19 01:26:31 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:13 -0400
commit300df91ca9358f7f09298eec9503c12b32054ef7 (patch)
tree8917e0c2ee94a5385f4968094c0e1d9a7fdc0055 /kernel
parent29c00b4a1d9e277786120032aa8364631820d863 (diff)
rcu: Event-trace markers for computing RCU CPU utilization
This commit adds the trace_rcu_utilization() marker that is to be used to allow postprocessing scripts compute RCU's CPU utilization, give or take event-trace overhead. Note that we do not include RCU's dyntick-idle interface because event tracing requires RCU protection, which is not available in dyntick-idle mode. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 45dcc2036a1e..2a9643bd6ae9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -184,8 +184,10 @@ void rcu_bh_qs(int cpu)
184 */ 184 */
185void rcu_note_context_switch(int cpu) 185void rcu_note_context_switch(int cpu)
186{ 186{
187 trace_rcu_utilization("Start context switch");
187 rcu_sched_qs(cpu); 188 rcu_sched_qs(cpu);
188 rcu_preempt_note_context_switch(cpu); 189 rcu_preempt_note_context_switch(cpu);
190 trace_rcu_utilization("End context switch");
189} 191}
190EXPORT_SYMBOL_GPL(rcu_note_context_switch); 192EXPORT_SYMBOL_GPL(rcu_note_context_switch);
191 193
@@ -1275,6 +1277,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1275 */ 1277 */
1276void rcu_check_callbacks(int cpu, int user) 1278void rcu_check_callbacks(int cpu, int user)
1277{ 1279{
1280 trace_rcu_utilization("Start scheduler-tick");
1278 if (user || 1281 if (user ||
1279 (idle_cpu(cpu) && rcu_scheduler_active && 1282 (idle_cpu(cpu) && rcu_scheduler_active &&
1280 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1283 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1308,6 +1311,7 @@ void rcu_check_callbacks(int cpu, int user)
1308 rcu_preempt_check_callbacks(cpu); 1311 rcu_preempt_check_callbacks(cpu);
1309 if (rcu_pending(cpu)) 1312 if (rcu_pending(cpu))
1310 invoke_rcu_core(); 1313 invoke_rcu_core();
1314 trace_rcu_utilization("End scheduler-tick");
1311} 1315}
1312 1316
1313#ifdef CONFIG_SMP 1317#ifdef CONFIG_SMP
@@ -1369,10 +1373,14 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1369 unsigned long flags; 1373 unsigned long flags;
1370 struct rcu_node *rnp = rcu_get_root(rsp); 1374 struct rcu_node *rnp = rcu_get_root(rsp);
1371 1375
1372 if (!rcu_gp_in_progress(rsp)) 1376 trace_rcu_utilization("Start fqs");
1377 if (!rcu_gp_in_progress(rsp)) {
1378 trace_rcu_utilization("End fqs");
1373 return; /* No grace period in progress, nothing to force. */ 1379 return; /* No grace period in progress, nothing to force. */
1380 }
1374 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { 1381 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1375 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1382 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1383 trace_rcu_utilization("End fqs");
1376 return; /* Someone else is already on the job. */ 1384 return; /* Someone else is already on the job. */
1377 } 1385 }
1378 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) 1386 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
@@ -1421,11 +1429,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1421 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ 1429 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1422 rsp->fqs_need_gp = 0; 1430 rsp->fqs_need_gp = 0;
1423 rcu_start_gp(rsp, flags); /* releases rnp->lock */ 1431 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1432 trace_rcu_utilization("End fqs");
1424 return; 1433 return;
1425 } 1434 }
1426 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1435 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1427unlock_fqs_ret: 1436unlock_fqs_ret:
1428 raw_spin_unlock_irqrestore(&rsp->fqslock, flags); 1437 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1438 trace_rcu_utilization("End fqs");
1429} 1439}
1430 1440
1431#else /* #ifdef CONFIG_SMP */ 1441#else /* #ifdef CONFIG_SMP */
@@ -1481,6 +1491,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1481 */ 1491 */
1482static void rcu_process_callbacks(struct softirq_action *unused) 1492static void rcu_process_callbacks(struct softirq_action *unused)
1483{ 1493{
1494 trace_rcu_utilization("Start RCU core");
1484 __rcu_process_callbacks(&rcu_sched_state, 1495 __rcu_process_callbacks(&rcu_sched_state,
1485 &__get_cpu_var(rcu_sched_data)); 1496 &__get_cpu_var(rcu_sched_data));
1486 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1497 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
@@ -1488,6 +1499,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1488 1499
1489 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ 1500 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1490 rcu_needs_cpu_flush(); 1501 rcu_needs_cpu_flush();
1502 trace_rcu_utilization("End RCU core");
1491} 1503}
1492 1504
1493/* 1505/*
@@ -1910,6 +1922,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1910 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 1922 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1911 struct rcu_node *rnp = rdp->mynode; 1923 struct rcu_node *rnp = rdp->mynode;
1912 1924
1925 trace_rcu_utilization("Start CPU hotplug");
1913 switch (action) { 1926 switch (action) {
1914 case CPU_UP_PREPARE: 1927 case CPU_UP_PREPARE:
1915 case CPU_UP_PREPARE_FROZEN: 1928 case CPU_UP_PREPARE_FROZEN:
@@ -1945,6 +1958,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1945 default: 1958 default:
1946 break; 1959 break;
1947 } 1960 }
1961 trace_rcu_utilization("End CPU hotplug");
1948 return NOTIFY_OK; 1962 return NOTIFY_OK;
1949} 1963}
1950 1964