diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-11-13 22:51:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-14 04:31:42 -0500 |
commit | 560d4bc0df9a5e63b980432282d8c2bd3559ec74 (patch) | |
tree | 1e44bf34973b217977d0b4d57d2b1ee1d4209f08 /kernel/rcutree.c | |
parent | 8e9aa8f067d2dcd9457980ced618e1cffbcfba46 (diff) |
rcu: Further cleanups of use of lastcomp
Now that a copy of the rsp->completed flag is available in all
rcu_node structures, make full use of it. It is still
legitimate to access rsp->completed while holding the root
rcu_node structure's lock, however.
Also, tighten up force_quiescent_state()'s checks for end of
current grace period.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1258170699933-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3df04381ea3e..24bbf2ce0605 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -817,7 +817,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
817 | 817 | ||
818 | rnp = rdp->mynode; | 818 | rnp = rdp->mynode; |
819 | spin_lock_irqsave(&rnp->lock, flags); | 819 | spin_lock_irqsave(&rnp->lock, flags); |
820 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | 820 | if (lastcomp != rnp->completed) { |
821 | 821 | ||
822 | /* | 822 | /* |
823 | * Someone beat us to it for this grace period, so leave. | 823 | * Someone beat us to it for this grace period, so leave. |
@@ -935,7 +935,6 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
935 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 935 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
936 | { | 936 | { |
937 | unsigned long flags; | 937 | unsigned long flags; |
938 | long lastcomp; | ||
939 | unsigned long mask; | 938 | unsigned long mask; |
940 | struct rcu_data *rdp = rsp->rda[cpu]; | 939 | struct rcu_data *rdp = rsp->rda[cpu]; |
941 | struct rcu_node *rnp; | 940 | struct rcu_node *rnp; |
@@ -971,7 +970,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
971 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 970 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
972 | rnp = rnp->parent; | 971 | rnp = rnp->parent; |
973 | } while (rnp != NULL); | 972 | } while (rnp != NULL); |
974 | lastcomp = rsp->completed; | ||
975 | 973 | ||
976 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 974 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
977 | 975 | ||
@@ -1145,7 +1143,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1145 | rcu_for_each_leaf_node(rsp, rnp) { | 1143 | rcu_for_each_leaf_node(rsp, rnp) { |
1146 | mask = 0; | 1144 | mask = 0; |
1147 | spin_lock_irqsave(&rnp->lock, flags); | 1145 | spin_lock_irqsave(&rnp->lock, flags); |
1148 | if (rsp->completed != lastcomp) { | 1146 | if (rnp->completed != lastcomp) { |
1149 | spin_unlock_irqrestore(&rnp->lock, flags); | 1147 | spin_unlock_irqrestore(&rnp->lock, flags); |
1150 | return 1; | 1148 | return 1; |
1151 | } | 1149 | } |
@@ -1159,7 +1157,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1159 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1157 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1160 | mask |= bit; | 1158 | mask |= bit; |
1161 | } | 1159 | } |
1162 | if (mask != 0 && rsp->completed == lastcomp) { | 1160 | if (mask != 0 && rnp->completed == lastcomp) { |
1163 | 1161 | ||
1164 | /* cpu_quiet_msk() releases rnp->lock. */ | 1162 | /* cpu_quiet_msk() releases rnp->lock. */ |
1165 | cpu_quiet_msk(mask, rsp, rnp, flags); | 1163 | cpu_quiet_msk(mask, rsp, rnp, flags); |
@@ -1196,7 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1196 | lastcomp = rsp->gpnum - 1; | 1194 | lastcomp = rsp->gpnum - 1; |
1197 | signaled = rsp->signaled; | 1195 | signaled = rsp->signaled; |
1198 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1196 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1199 | if (lastcomp == rsp->gpnum) { | 1197 | if(!rcu_gp_in_progress(rsp)) { |
1200 | rsp->n_force_qs_ngp++; | 1198 | rsp->n_force_qs_ngp++; |
1201 | spin_unlock(&rnp->lock); | 1199 | spin_unlock(&rnp->lock); |
1202 | goto unlock_ret; /* no GP in progress, time updated. */ | 1200 | goto unlock_ret; /* no GP in progress, time updated. */ |
@@ -1224,7 +1222,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1224 | /* Update state, record completion counter. */ | 1222 | /* Update state, record completion counter. */ |
1225 | forcenow = 0; | 1223 | forcenow = 0; |
1226 | spin_lock(&rnp->lock); | 1224 | spin_lock(&rnp->lock); |
1227 | if (lastcomp == rsp->completed && | 1225 | if (lastcomp + 1 == rsp->gpnum && |
1226 | lastcomp == rsp->completed && | ||
1228 | rsp->signaled == signaled) { | 1227 | rsp->signaled == signaled) { |
1229 | rsp->signaled = RCU_FORCE_QS; | 1228 | rsp->signaled = RCU_FORCE_QS; |
1230 | rsp->completed_fqs = lastcomp; | 1229 | rsp->completed_fqs = lastcomp; |