diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-04-26 00:04:29 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-05-10 14:08:35 -0400 |
commit | 77e38ed347162423c6b72e06c865a121081c2bb6 (patch) | |
tree | 328b746af71b47c4af97e6ea06dc24b12528a955 /kernel/rcutree_plugin.h | |
parent | d14aada8e20bdf81ffd43f433b123972cf575b32 (diff) |
rcu: RCU_FAST_NO_HZ must check RCU dyntick state
The current version of RCU_FAST_NO_HZ reproduces the old CLASSIC_RCU
dyntick-idle bug, as it fails to detect CPUs that have interrupted
or NMIed out of dyntick-idle mode. Fix this by making rcu_needs_cpu()
check the state in the per-CPU rcu_dynticks variables, thus correctly
detecting the dyntick-idle state from an RCU perspective.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 0ae2339ab04d..9b18227e86e8 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1051,6 +1051,8 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
1051 | int rcu_needs_cpu(int cpu) | 1051 | int rcu_needs_cpu(int cpu) |
1052 | { | 1052 | { |
1053 | int c = 0; | 1053 | int c = 0; |
1054 | int snap; | ||
1055 | int snap_nmi; | ||
1054 | int thatcpu; | 1056 | int thatcpu; |
1055 | 1057 | ||
1056 | /* Check for being in the holdoff period. */ | 1058 | /* Check for being in the holdoff period. */ |
@@ -1058,12 +1060,18 @@ int rcu_needs_cpu(int cpu) | |||
1058 | return rcu_needs_cpu_quick_check(cpu); | 1060 | return rcu_needs_cpu_quick_check(cpu); |
1059 | 1061 | ||
1060 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 1062 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ |
1061 | for_each_cpu_not(thatcpu, nohz_cpu_mask) | 1063 | for_each_online_cpu(thatcpu) { |
1062 | if (cpu_online(thatcpu) && thatcpu != cpu) { | 1064 | if (thatcpu == cpu) |
1065 | continue; | ||
1066 | snap = per_cpu(rcu_dynticks, thatcpu)->dynticks; | ||
1067 | snap_nmi = per_cpu(rcu_dynticks, thatcpu)->dynticks_nmi; | ||
1068 | smp_mb(); /* Order sampling of snap with end of grace period. */ | ||
1069 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | ||
1063 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 1070 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
1064 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 1071 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
1065 | return rcu_needs_cpu_quick_check(cpu); | 1072 | return rcu_needs_cpu_quick_check(cpu); |
1066 | } | 1073 | } |
1074 | } | ||
1067 | 1075 | ||
1068 | /* Check and update the rcu_dyntick_drain sequencing. */ | 1076 | /* Check and update the rcu_dyntick_drain sequencing. */ |
1069 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 1077 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { |