summaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-22 17:58:03 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:32:00 -0500
commit433cdddcd9ac5558068edd7f8d4707a70f7710f5 (patch)
tree7f3686eb64c9ee3a239e5920df588c70837c5637 /kernel/rcutree_plugin.h
parent045fb9315a2129023d70a0eecf0942e18fca4fcd (diff)
rcu: Add tracing for RCU_FAST_NO_HZ
This commit adds trace_rcu_prep_idle(), which is invoked from rcu_prepare_for_idle() and rcu_wake_cpu() to trace attempts on the part of RCU to force CPUs into dyntick-idle mode. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index b70ca8cc52e1..6467f5669ab7 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2031,10 +2031,13 @@ static void rcu_prepare_for_idle(int cpu)
2031 /* If no callbacks or in the holdoff period, enter dyntick-idle. */ 2031 /* If no callbacks or in the holdoff period, enter dyntick-idle. */
2032 if (!rcu_cpu_has_callbacks(cpu)) { 2032 if (!rcu_cpu_has_callbacks(cpu)) {
2033 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2033 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2034 trace_rcu_prep_idle("No callbacks");
2034 return; 2035 return;
2035 } 2036 }
2036 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) 2037 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2038 trace_rcu_prep_idle("In holdoff");
2037 return; 2039 return;
2040 }
2038 2041
2039 /* Check and update the rcu_dyntick_drain sequencing. */ 2042 /* Check and update the rcu_dyntick_drain sequencing. */
2040 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2043 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
@@ -2044,9 +2047,11 @@ static void rcu_prepare_for_idle(int cpu)
2044 /* We have hit the limit, so time to give up. */ 2047 /* We have hit the limit, so time to give up. */
2045 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2048 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2046 if (!rcu_pending(cpu)) { 2049 if (!rcu_pending(cpu)) {
2050 trace_rcu_prep_idle("Dyntick with callbacks");
2047 per_cpu(rcu_awake_at_gp_end, cpu) = 1; 2051 per_cpu(rcu_awake_at_gp_end, cpu) = 1;
2048 return; /* Nothing to do immediately. */ 2052 return; /* Nothing to do immediately. */
2049 } 2053 }
2054 trace_rcu_prep_idle("Begin holdoff");
2050 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2055 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2051 return; 2056 return;
2052 } 2057 }
@@ -2073,9 +2078,15 @@ static void rcu_prepare_for_idle(int cpu)
2073 c = c || per_cpu(rcu_bh_data, cpu).nxtlist; 2078 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
2074 } 2079 }
2075 2080
2076 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 2081 /*
2077 if (c) 2082 * If RCU callbacks are still pending, RCU still needs this CPU.
2083 * So try forcing the callbacks through the grace period.
2084 */
2085 if (c) {
2086 trace_rcu_prep_idle("More callbacks");
2078 invoke_rcu_core(); 2087 invoke_rcu_core();
2088 } else
2089 trace_rcu_prep_idle("Callbacks drained");
2079} 2090}
2080 2091
2081/* 2092/*
@@ -2085,6 +2096,7 @@ static void rcu_prepare_for_idle(int cpu)
2085 */ 2096 */
2086static void rcu_wake_cpu(void *unused) 2097static void rcu_wake_cpu(void *unused)
2087{ 2098{
2099 trace_rcu_prep_idle("CPU awakened at GP end");
2088 invoke_rcu_core(); 2100 invoke_rcu_core();
2089} 2101}
2090 2102