aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-02-28 14:02:21 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-24 23:55:20 -0400
commitc57afe80db4e169135eb675acc2d241e26cc064e (patch)
treeec011600725a2884efdca6f4c187ea7e3fa4d277 /kernel/rcutree_plugin.h
parent2ee3dc80660ac8285a37e662fd91b2e45c46f06a (diff)
rcu: Make RCU_FAST_NO_HZ account for pauses out of idle
Both Steven Rostedt's new idle-capable trace macros and the RCU_NONIDLE() macro can cause RCU to momentarily pause out of idle without the rest of the system being involved. This can cause rcu_prepare_for_idle() to run through its state machine too quickly, which can in turn result in needless scheduling-clock interrupts. This commit therefore adds code to enable rcu_prepare_for_idle() to distinguish between an initial entry to idle on the one hand (which needs to advance the rcu_prepare_for_idle() state machine) and an idle reentry due to idle-capable trace macros and RCU_NONIDLE() on the other hand (which should avoid advancing the rcu_prepare_for_idle() state machine). Additional state is maintained to allow the timer to be correctly reposted when returning after a momentary pause out of idle, and even more state is maintained to detect when new non-lazy callbacks have been enqueued (which may require re-evaluation of the approach to idleness). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h57
1 files changed, 53 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0f007b363dba..50c17975d4f4 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1938,6 +1938,14 @@ static void rcu_prepare_for_idle(int cpu)
1938{ 1938{
1939} 1939}
1940 1940
1941/*
1942 * Don't bother keeping a running count of the number of RCU callbacks
1943 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1944 */
1945static void rcu_idle_count_callbacks_posted(void)
1946{
1947}
1948
1941#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1949#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1942 1950
1943/* 1951/*
@@ -1981,6 +1989,10 @@ static void rcu_prepare_for_idle(int cpu)
1981static DEFINE_PER_CPU(int, rcu_dyntick_drain); 1989static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1982static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); 1990static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1983static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); 1991static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
1992static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
1993static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
1994static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
1995static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
1984 1996
1985/* 1997/*
1986 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no 1998 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
@@ -1993,6 +2005,8 @@ static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
1993 */ 2005 */
1994int rcu_needs_cpu(int cpu) 2006int rcu_needs_cpu(int cpu)
1995{ 2007{
2008 /* Flag a new idle sojourn to the idle-entry state machine. */
2009 per_cpu(rcu_idle_first_pass, cpu) = 1;
1996 /* If no callbacks, RCU doesn't need the CPU. */ 2010 /* If no callbacks, RCU doesn't need the CPU. */
1997 if (!rcu_cpu_has_callbacks(cpu)) 2011 if (!rcu_cpu_has_callbacks(cpu))
1998 return 0; 2012 return 0;
@@ -2096,6 +2110,26 @@ static void rcu_cleanup_after_idle(int cpu)
2096static void rcu_prepare_for_idle(int cpu) 2110static void rcu_prepare_for_idle(int cpu)
2097{ 2111{
2098 /* 2112 /*
2113 * If this is an idle re-entry, for example, due to use of
2114 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2115 * loop, then don't take any state-machine actions, unless the
2116 * momentary exit from idle queued additional non-lazy callbacks.
2117 * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks
2118 * pending.
2119 */
2120 if (!per_cpu(rcu_idle_first_pass, cpu) &&
2121 (per_cpu(rcu_nonlazy_posted, cpu) ==
2122 per_cpu(rcu_nonlazy_posted_snap, cpu))) {
2123 if (rcu_cpu_has_callbacks(cpu))
2124 mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
2125 per_cpu(rcu_idle_gp_timer_expires, cpu));
2126 return;
2127 }
2128 per_cpu(rcu_idle_first_pass, cpu) = 0;
2129 per_cpu(rcu_nonlazy_posted_snap, cpu) =
2130 per_cpu(rcu_nonlazy_posted, cpu) - 1;
2131
2132 /*
2099 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2133 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2100 * Also reset state to avoid prejudicing later attempts. 2134 * Also reset state to avoid prejudicing later attempts.
2101 */ 2135 */
@@ -2127,11 +2161,15 @@ static void rcu_prepare_for_idle(int cpu)
2127 per_cpu(rcu_dyntick_drain, cpu) = 0; 2161 per_cpu(rcu_dyntick_drain, cpu) = 0;
2128 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2162 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2129 if (rcu_cpu_has_nonlazy_callbacks(cpu)) 2163 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2130 mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2164 per_cpu(rcu_idle_gp_timer_expires, cpu) =
2131 jiffies + RCU_IDLE_GP_DELAY); 2165 jiffies + RCU_IDLE_GP_DELAY;
2132 else 2166 else
2133 mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2167 per_cpu(rcu_idle_gp_timer_expires, cpu) =
2134 jiffies + RCU_IDLE_LAZY_GP_DELAY); 2168 jiffies + RCU_IDLE_LAZY_GP_DELAY;
2169 mod_timer(&per_cpu(rcu_idle_gp_timer, cpu),
2170 per_cpu(rcu_idle_gp_timer_expires, cpu));
2171 per_cpu(rcu_nonlazy_posted_snap, cpu) =
2172 per_cpu(rcu_nonlazy_posted, cpu);
2135 return; /* Nothing more to do immediately. */ 2173 return; /* Nothing more to do immediately. */
2136 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2174 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2137 /* We have hit the limit, so time to give up. */ 2175 /* We have hit the limit, so time to give up. */
@@ -2171,6 +2209,17 @@ static void rcu_prepare_for_idle(int cpu)
2171 trace_rcu_prep_idle("Callbacks drained"); 2209 trace_rcu_prep_idle("Callbacks drained");
2172} 2210}
2173 2211
2212/*
2213 * Keep a running count of callbacks posted so that rcu_prepare_for_idle()
2214 * can detect when something out of the idle loop posts a callback.
2215 * Of course, it had better do so either from a trace event designed to
2216 * be called from idle or from within RCU_NONIDLE().
2217 */
2218static void rcu_idle_count_callbacks_posted(void)
2219{
2220 __this_cpu_add(rcu_nonlazy_posted, 1);
2221}
2222
2174#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2223#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2175 2224
2176#ifdef CONFIG_RCU_CPU_STALL_INFO 2225#ifdef CONFIG_RCU_CPU_STALL_INFO