diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-02-28 14:02:21 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-04-24 23:55:20 -0400 |
commit | c57afe80db4e169135eb675acc2d241e26cc064e (patch) | |
tree | ec011600725a2884efdca6f4c187ea7e3fa4d277 /kernel | |
parent | 2ee3dc80660ac8285a37e662fd91b2e45c46f06a (diff) |
rcu: Make RCU_FAST_NO_HZ account for pauses out of idle
Both Steven Rostedt's new idle-capable trace macros and the RCU_NONIDLE()
macro can cause RCU to momentarily pause out of idle without the rest
of the system being involved. This can cause rcu_prepare_for_idle()
to run through its state machine too quickly, which can in turn result
in needless scheduling-clock interrupts.
This commit therefore adds code to enable rcu_prepare_for_idle() to
distinguish between an initial entry to idle on the one hand (which needs
to advance the rcu_prepare_for_idle() state machine) and an idle reentry
due to idle-capable trace macros and RCU_NONIDLE() on the other hand
(which should avoid advancing the rcu_prepare_for_idle() state machine).
Additional state is maintained to allow the timer to be correctly reposted
when returning after a momentary pause out of idle, and even more state
is maintained to detect when new non-lazy callbacks have been enqueued
(which may require re-evaluation of the approach to idleness).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 2 | ||||
-rw-r--r-- | kernel/rcutree.h | 1 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 57 |
3 files changed, 56 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1050d6d3922c..403306b86e78 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1829,6 +1829,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1829 | rdp->qlen++; | 1829 | rdp->qlen++; |
1830 | if (lazy) | 1830 | if (lazy) |
1831 | rdp->qlen_lazy++; | 1831 | rdp->qlen_lazy++; |
1832 | else | ||
1833 | rcu_idle_count_callbacks_posted(); | ||
1832 | 1834 | ||
1833 | if (__is_kfree_rcu_offset((unsigned long)func)) | 1835 | if (__is_kfree_rcu_offset((unsigned long)func)) |
1834 | trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, | 1836 | trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index cdd1be0a4072..36ca28ecedc6 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -471,6 +471,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu); | |||
471 | static void rcu_prepare_for_idle_init(int cpu); | 471 | static void rcu_prepare_for_idle_init(int cpu); |
472 | static void rcu_cleanup_after_idle(int cpu); | 472 | static void rcu_cleanup_after_idle(int cpu); |
473 | static void rcu_prepare_for_idle(int cpu); | 473 | static void rcu_prepare_for_idle(int cpu); |
474 | static void rcu_idle_count_callbacks_posted(void); | ||
474 | static void print_cpu_stall_info_begin(void); | 475 | static void print_cpu_stall_info_begin(void); |
475 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); | 476 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); |
476 | static void print_cpu_stall_info_end(void); | 477 | static void print_cpu_stall_info_end(void); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0f007b363dba..50c17975d4f4 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1938,6 +1938,14 @@ static void rcu_prepare_for_idle(int cpu) | |||
1938 | { | 1938 | { |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /* | ||
1942 | * Don't bother keeping a running count of the number of RCU callbacks | ||
1943 | * posted because CONFIG_RCU_FAST_NO_HZ=n. | ||
1944 | */ | ||
1945 | static void rcu_idle_count_callbacks_posted(void) | ||
1946 | { | ||
1947 | } | ||
1948 | |||
1941 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1949 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1942 | 1950 | ||
1943 | /* | 1951 | /* |
@@ -1981,6 +1989,10 @@ static void rcu_prepare_for_idle(int cpu) | |||
1981 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 1989 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
1982 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 1990 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
1983 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | 1991 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); |
1992 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | ||
1993 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | ||
1994 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | ||
1995 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | ||
1984 | 1996 | ||
1985 | /* | 1997 | /* |
1986 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | 1998 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no |
@@ -1993,6 +2005,8 @@ static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | |||
1993 | */ | 2005 | */ |
1994 | int rcu_needs_cpu(int cpu) | 2006 | int rcu_needs_cpu(int cpu) |
1995 | { | 2007 | { |
2008 | /* Flag a new idle sojourn to the idle-entry state machine. */ | ||
2009 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
1996 | /* If no callbacks, RCU doesn't need the CPU. */ | 2010 | /* If no callbacks, RCU doesn't need the CPU. */ |
1997 | if (!rcu_cpu_has_callbacks(cpu)) | 2011 | if (!rcu_cpu_has_callbacks(cpu)) |
1998 | return 0; | 2012 | return 0; |
@@ -2096,6 +2110,26 @@ static void rcu_cleanup_after_idle(int cpu) | |||
2096 | static void rcu_prepare_for_idle(int cpu) | 2110 | static void rcu_prepare_for_idle(int cpu) |
2097 | { | 2111 | { |
2098 | /* | 2112 | /* |
2113 | * If this is an idle re-entry, for example, due to use of | ||
2114 | * RCU_NONIDLE() or the new idle-loop tracing API within the idle | ||
2115 | * loop, then don't take any state-machine actions, unless the | ||
2116 | * momentary exit from idle queued additional non-lazy callbacks. | ||
2117 | * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks | ||
2118 | * pending. | ||
2119 | */ | ||
2120 | if (!per_cpu(rcu_idle_first_pass, cpu) && | ||
2121 | (per_cpu(rcu_nonlazy_posted, cpu) == | ||
2122 | per_cpu(rcu_nonlazy_posted_snap, cpu))) { | ||
2123 | if (rcu_cpu_has_callbacks(cpu)) | ||
2124 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | ||
2125 | per_cpu(rcu_idle_gp_timer_expires, cpu)); | ||
2126 | return; | ||
2127 | } | ||
2128 | per_cpu(rcu_idle_first_pass, cpu) = 0; | ||
2129 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | ||
2130 | per_cpu(rcu_nonlazy_posted, cpu) - 1; | ||
2131 | |||
2132 | /* | ||
2099 | * If there are no callbacks on this CPU, enter dyntick-idle mode. | 2133 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2100 | * Also reset state to avoid prejudicing later attempts. | 2134 | * Also reset state to avoid prejudicing later attempts. |
2101 | */ | 2135 | */ |
@@ -2127,11 +2161,15 @@ static void rcu_prepare_for_idle(int cpu) | |||
2127 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2161 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
2128 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2162 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
2129 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) | 2163 | if (rcu_cpu_has_nonlazy_callbacks(cpu)) |
2130 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2164 | per_cpu(rcu_idle_gp_timer_expires, cpu) = |
2131 | jiffies + RCU_IDLE_GP_DELAY); | 2165 | jiffies + RCU_IDLE_GP_DELAY; |
2132 | else | 2166 | else |
2133 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2167 | per_cpu(rcu_idle_gp_timer_expires, cpu) = |
2134 | jiffies + RCU_IDLE_LAZY_GP_DELAY); | 2168 | jiffies + RCU_IDLE_LAZY_GP_DELAY; |
2169 | mod_timer(&per_cpu(rcu_idle_gp_timer, cpu), | ||
2170 | per_cpu(rcu_idle_gp_timer_expires, cpu)); | ||
2171 | per_cpu(rcu_nonlazy_posted_snap, cpu) = | ||
2172 | per_cpu(rcu_nonlazy_posted, cpu); | ||
2135 | return; /* Nothing more to do immediately. */ | 2173 | return; /* Nothing more to do immediately. */ |
2136 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2174 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2137 | /* We have hit the limit, so time to give up. */ | 2175 | /* We have hit the limit, so time to give up. */ |
@@ -2171,6 +2209,17 @@ static void rcu_prepare_for_idle(int cpu) | |||
2171 | trace_rcu_prep_idle("Callbacks drained"); | 2209 | trace_rcu_prep_idle("Callbacks drained"); |
2172 | } | 2210 | } |
2173 | 2211 | ||
2212 | /* | ||
2213 | * Keep a running count of callbacks posted so that rcu_prepare_for_idle() | ||
2214 | * can detect when something out of the idle loop posts a callback. | ||
2215 | * Of course, it had better do so either from a trace event designed to | ||
2216 | * be called from idle or from within RCU_NONIDLE(). | ||
2217 | */ | ||
2218 | static void rcu_idle_count_callbacks_posted(void) | ||
2219 | { | ||
2220 | __this_cpu_add(rcu_nonlazy_posted, 1); | ||
2221 | } | ||
2222 | |||
2174 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2223 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
2175 | 2224 | ||
2176 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 2225 | #ifdef CONFIG_RCU_CPU_STALL_INFO |