diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-02-14 13:12:54 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-02-21 12:42:11 -0500 |
commit | c3ce910b1456a45fa88959af3735bd6b285e54af (patch) | |
tree | d0e74991e637dddbeb3a8ab70ecd983325133893 /kernel/rcutree_plugin.h | |
parent | 8a2ecf474d3ee8dd5d001490349e422cec52f39f (diff) |
rcu: Eliminate softirq-mediated RCU_FAST_NO_HZ idle-entry loop
If a softirq is pending, the current CPU has RCU callbacks pending,
and RCU does not immediately need anything from this CPU, then the
current code resets the RCU_FAST_NO_HZ state machine. This means that
upon exit from the subsequent softirq handler, RCU_FAST_NO_HZ will
try really hard to force RCU into dyntick-idle mode. And if the same
conditions hold after a few tries (determined by RCU_IDLE_OPT_FLUSHES),
the same situation can repeat, possibly endlessly. This scenario is
not particularly good for battery lifetime.
This commit therefore suppresses the early exit from the RCU_FAST_NO_HZ
state machine in the case where there is a softirq pending. This change
forces the state machine to retain its memory, and to enter holdoff if
this condition persists.
Reported-by: "Abou Gazala, Neven M" <neven.m.abou.gazala@intel.com>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f7ceadf4986e..392a65136a72 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -2133,7 +2133,8 @@ static void rcu_prepare_for_idle(int cpu) | |||
2133 | /* First time through, initialize the counter. */ | 2133 | /* First time through, initialize the counter. */ |
2134 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; | 2134 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; |
2135 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && | 2135 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && |
2136 | !rcu_pending(cpu)) { | 2136 | !rcu_pending(cpu) && |
2137 | !local_softirq_pending()) { | ||
2137 | /* Can we go dyntick-idle despite still having callbacks? */ | 2138 | /* Can we go dyntick-idle despite still having callbacks? */ |
2138 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2139 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2139 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2140 | per_cpu(rcu_dyntick_drain, cpu) = 0; |