diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-01-04 18:09:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-13 03:06:02 -0500 |
commit | 07079d5357a4d53c2b13126c4a38fb40e6e04966 (patch) | |
tree | 1a97552a220a9bbdfceb1cda01c1ee5b92ce75bd /kernel/rcutree.c | |
parent | 559569acf94f538b56bd6eead80b439d6a78cdff (diff) |
rcu: Prohibit starting new grace periods while forcing quiescent states
Reduce the number and variety of race conditions by prohibiting
the start of a new grace period while force_quiescent_state() is
active. A new fqs_active flag in the rcu_state structure is used
to trace whether or not force_quiescent_state() is active, and
this new flag is tested by rcu_start_gp(). If the CPU that
closed out the last grace period needs another grace period,
this new grace period may be delayed up to one scheduling-clock
tick, but it will eventually get started.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <126264655052-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index d42ad30c4d70..41688ff60e07 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -659,7 +659,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
659 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 659 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; |
660 | struct rcu_node *rnp = rcu_get_root(rsp); | 660 | struct rcu_node *rnp = rcu_get_root(rsp); |
661 | 661 | ||
662 | if (!cpu_needs_another_gp(rsp, rdp)) { | 662 | if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { |
663 | if (rnp->completed == rsp->completed) { | 663 | if (rnp->completed == rsp->completed) { |
664 | spin_unlock_irqrestore(&rnp->lock, flags); | 664 | spin_unlock_irqrestore(&rnp->lock, flags); |
665 | return; | 665 | return; |
@@ -1195,6 +1195,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1195 | struct rcu_node *rnp = rcu_get_root(rsp); | 1195 | struct rcu_node *rnp = rcu_get_root(rsp); |
1196 | u8 signaled; | 1196 | u8 signaled; |
1197 | u8 forcenow; | 1197 | u8 forcenow; |
1198 | u8 gpdone; | ||
1198 | 1199 | ||
1199 | if (!rcu_gp_in_progress(rsp)) | 1200 | if (!rcu_gp_in_progress(rsp)) |
1200 | return; /* No grace period in progress, nothing to force. */ | 1201 | return; /* No grace period in progress, nothing to force. */ |
@@ -1206,15 +1207,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1206 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) | 1207 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) |
1207 | goto unlock_fqs_ret; /* no emergency and done recently. */ | 1208 | goto unlock_fqs_ret; /* no emergency and done recently. */ |
1208 | rsp->n_force_qs++; | 1209 | rsp->n_force_qs++; |
1209 | spin_lock(&rnp->lock); | 1210 | spin_lock(&rnp->lock); /* irqs already disabled */ |
1210 | lastcomp = rsp->gpnum - 1; | 1211 | lastcomp = rsp->gpnum - 1; |
1211 | signaled = rsp->signaled; | 1212 | signaled = rsp->signaled; |
1212 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1213 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1213 | if(!rcu_gp_in_progress(rsp)) { | 1214 | if(!rcu_gp_in_progress(rsp)) { |
1214 | rsp->n_force_qs_ngp++; | 1215 | rsp->n_force_qs_ngp++; |
1215 | spin_unlock(&rnp->lock); | 1216 | spin_unlock(&rnp->lock); /* irqs remain disabled */ |
1216 | goto unlock_fqs_ret; /* no GP in progress, time updated. */ | 1217 | goto unlock_fqs_ret; /* no GP in progress, time updated. */ |
1217 | } | 1218 | } |
1219 | rsp->fqs_active = 1; | ||
1218 | switch (signaled) { | 1220 | switch (signaled) { |
1219 | case RCU_GP_IDLE: | 1221 | case RCU_GP_IDLE: |
1220 | case RCU_GP_INIT: | 1222 | case RCU_GP_INIT: |
@@ -1223,15 +1225,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1223 | 1225 | ||
1224 | case RCU_SAVE_DYNTICK: | 1226 | case RCU_SAVE_DYNTICK: |
1225 | 1227 | ||
1226 | spin_unlock(&rnp->lock); | 1228 | spin_unlock(&rnp->lock); /* irqs remain disabled */ |
1227 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) | 1229 | if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) |
1228 | break; /* So gcc recognizes the dead code. */ | 1230 | break; /* So gcc recognizes the dead code. */ |
1229 | 1231 | ||
1230 | /* Record dyntick-idle state. */ | 1232 | /* Record dyntick-idle state. */ |
1231 | if (rcu_process_dyntick(rsp, lastcomp, | 1233 | gpdone = rcu_process_dyntick(rsp, lastcomp, |
1232 | dyntick_save_progress_counter)) | 1234 | dyntick_save_progress_counter); |
1233 | goto unlock_fqs_ret; | 1235 | spin_lock(&rnp->lock); /* irqs already disabled */ |
1234 | spin_lock(&rnp->lock); | 1236 | if (gpdone) |
1237 | break; | ||
1235 | /* fall into next case. */ | 1238 | /* fall into next case. */ |
1236 | 1239 | ||
1237 | case RCU_SAVE_COMPLETED: | 1240 | case RCU_SAVE_COMPLETED: |
@@ -1252,17 +1255,17 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1252 | case RCU_FORCE_QS: | 1255 | case RCU_FORCE_QS: |
1253 | 1256 | ||
1254 | /* Check dyntick-idle state, send IPI to laggarts. */ | 1257 | /* Check dyntick-idle state, send IPI to laggarts. */ |
1255 | spin_unlock(&rnp->lock); | 1258 | spin_unlock(&rnp->lock); /* irqs remain disabled */ |
1256 | if (rcu_process_dyntick(rsp, rsp->completed_fqs, | 1259 | gpdone = rcu_process_dyntick(rsp, rsp->completed_fqs, |
1257 | rcu_implicit_dynticks_qs)) | 1260 | rcu_implicit_dynticks_qs); |
1258 | goto unlock_fqs_ret; | ||
1259 | 1261 | ||
1260 | /* Leave state in case more forcing is required. */ | 1262 | /* Leave state in case more forcing is required. */ |
1261 | 1263 | ||
1262 | spin_lock(&rnp->lock); | 1264 | spin_lock(&rnp->lock); /* irqs already disabled */ |
1263 | break; | 1265 | break; |
1264 | } | 1266 | } |
1265 | spin_unlock(&rnp->lock); | 1267 | rsp->fqs_active = 0; |
1268 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
1266 | unlock_fqs_ret: | 1269 | unlock_fqs_ret: |
1267 | spin_unlock_irqrestore(&rsp->fqslock, flags); | 1270 | spin_unlock_irqrestore(&rsp->fqslock, flags); |
1268 | } | 1271 | } |