diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-05-03 18:38:10 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-05-09 17:26:57 -0400 |
commit | 98248a0e24327bc64eb7518145c44bff7bebebc3 (patch) | |
tree | 5ec88465d949558e42e83fd8321c725614c00269 /kernel | |
parent | 21e52e15666323078b8517a4312712579176b56f (diff) |
rcu: Explicitly initialize RCU_FAST_NO_HZ per-CPU variables
The current initialization of the RCU_FAST_NO_HZ per-CPU variables makes
needless and fragile assumptions about the initial value of things like
the jiffies counter. This commit therefore explicitly initializes all of
them that are better started with a non-zero value. It also adds some
comments describing the per-CPU state variables.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree_plugin.h | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index bbb43cad755e..7082ea93566f 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1986,12 +1986,19 @@ static void rcu_idle_count_callbacks_posted(void) | |||
1986 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 1986 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
1987 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ | 1987 | #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ |
1988 | 1988 | ||
1989 | /* Loop counter for rcu_prepare_for_idle(). */ | ||
1989 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 1990 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
1991 | /* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */ | ||
1990 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 1992 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
1993 | /* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */ | ||
1991 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); | 1994 | static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer); |
1995 | /* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */ | ||
1992 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); | 1996 | static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires); |
1997 | /* Enable special processing on first attempt to enter dyntick-idle mode. */ | ||
1993 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); | 1998 | static DEFINE_PER_CPU(bool, rcu_idle_first_pass); |
1999 | /* Running count of non-lazy callbacks posted, never decremented. */ | ||
1994 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); | 2000 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted); |
2001 | /* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */ | ||
1995 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); | 2002 | static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap); |
1996 | 2003 | ||
1997 | /* | 2004 | /* |
@@ -2092,8 +2099,11 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in) | |||
2092 | */ | 2099 | */ |
2093 | static void rcu_prepare_for_idle_init(int cpu) | 2100 | static void rcu_prepare_for_idle_init(int cpu) |
2094 | { | 2101 | { |
2102 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | ||
2095 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), | 2103 | setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), |
2096 | rcu_idle_gp_timer_func, cpu); | 2104 | rcu_idle_gp_timer_func, cpu); |
2105 | per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; | ||
2106 | per_cpu(rcu_idle_first_pass, cpu) = 1; | ||
2097 | } | 2107 | } |
2098 | 2108 | ||
2099 | /* | 2109 | /* |
@@ -2232,10 +2242,12 @@ static void rcu_prepare_for_idle(int cpu) | |||
2232 | } | 2242 | } |
2233 | 2243 | ||
2234 | /* | 2244 | /* |
2235 | * Keep a running count of callbacks posted so that rcu_prepare_for_idle() | 2245 | * Keep a running count of the number of non-lazy callbacks posted |
2236 | * can detect when something out of the idle loop posts a callback. | 2246 | * on this CPU. This running counter (which is never decremented) allows |
2237 | * Of course, it had better do so either from a trace event designed to | 2247 | * rcu_prepare_for_idle() to detect when something out of the idle loop |
2238 | * be called from idle or from within RCU_NONIDLE(). | 2248 | * posts a callback, even if an equal number of callbacks are invoked. |
2249 | * Of course, callbacks should only be posted from within a trace event | ||
2250 | * designed to be called from idle or from within RCU_NONIDLE(). | ||
2239 | */ | 2251 | */ |
2240 | static void rcu_idle_count_callbacks_posted(void) | 2252 | static void rcu_idle_count_callbacks_posted(void) |
2241 | { | 2253 | { |