diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-11-02 16:52:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-09 22:11:17 -0500 |
commit | 281d150c5f8892f158747594ab49ce2823fd8b8c (patch) | |
tree | e46ed3e84545ec33f99f58f4b7211121bbfe3755 /kernel/rcutree.c | |
parent | 7e1a2766e67a529f62c8cfba0a47d63fc4f7fa8a (diff) |
rcu: Prepare for synchronization fixes: clean up for non-NO_HZ handling of ->completed counter
Impose a clear locking design on non-NO_HZ handling of the
->completed counter. This increases the distance between the
RCU and the CPU-hotplug mechanisms.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
Cc: <stable@kernel.org> # .32.x
LKML-Reference: <12571987491353-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 67 |
1 files changed, 30 insertions, 37 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 69f4efec3449..26249abf24dc 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -178,9 +178,29 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | |||
178 | return &rsp->node[0]; | 178 | return &rsp->node[0]; |
179 | } | 179 | } |
180 | 180 | ||
181 | /* | ||
182 | * Record the specified "completed" value, which is later used to validate | ||
183 | * dynticks counter manipulations and CPU-offline checks. Specify | ||
184 | * "rsp->completed - 1" to unconditionally invalidate any future dynticks | ||
185 | * manipulations and CPU-offline checks. Such invalidation is useful at | ||
186 | * the beginning of a grace period. | ||
187 | */ | ||
188 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
189 | { | ||
190 | rsp->dynticks_completed = comp; | ||
191 | } | ||
192 | |||
181 | #ifdef CONFIG_SMP | 193 | #ifdef CONFIG_SMP |
182 | 194 | ||
183 | /* | 195 | /* |
196 | * Recall the previously recorded value of the completion for dynticks. | ||
197 | */ | ||
198 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
199 | { | ||
200 | return rsp->dynticks_completed; | ||
201 | } | ||
202 | |||
203 | /* | ||
184 | * If the specified CPU is offline, tell the caller that it is in | 204 | * If the specified CPU is offline, tell the caller that it is in |
185 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | 205 | * a quiescent state. Otherwise, whack it with a reschedule IPI. |
186 | * Grace periods can end up waiting on an offline CPU when that | 206 | * Grace periods can end up waiting on an offline CPU when that |
@@ -337,28 +357,9 @@ void rcu_irq_exit(void) | |||
337 | set_need_resched(); | 357 | set_need_resched(); |
338 | } | 358 | } |
339 | 359 | ||
340 | /* | ||
341 | * Record the specified "completed" value, which is later used to validate | ||
342 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
343 | * unconditionally invalidate any future dynticks manipulations (which is | ||
344 | * useful at the beginning of a grace period). | ||
345 | */ | ||
346 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
347 | { | ||
348 | rsp->dynticks_completed = comp; | ||
349 | } | ||
350 | |||
351 | #ifdef CONFIG_SMP | 360 | #ifdef CONFIG_SMP |
352 | 361 | ||
353 | /* | 362 | /* |
354 | * Recall the previously recorded value of the completion for dynticks. | ||
355 | */ | ||
356 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
357 | { | ||
358 | return rsp->dynticks_completed; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Snapshot the specified CPU's dynticks counter so that we can later | 363 | * Snapshot the specified CPU's dynticks counter so that we can later |
363 | * credit them with an implicit quiescent state. Return 1 if this CPU | 364 | * credit them with an implicit quiescent state. Return 1 if this CPU |
364 | * is in dynticks idle mode, which is an extended quiescent state. | 365 | * is in dynticks idle mode, which is an extended quiescent state. |
@@ -421,24 +422,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
421 | 422 | ||
422 | #else /* #ifdef CONFIG_NO_HZ */ | 423 | #else /* #ifdef CONFIG_NO_HZ */ |
423 | 424 | ||
424 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
425 | { | ||
426 | } | ||
427 | |||
428 | #ifdef CONFIG_SMP | 425 | #ifdef CONFIG_SMP |
429 | 426 | ||
430 | /* | ||
431 | * If there are no dynticks, then the only way that a CPU can passively | ||
432 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
433 | * is a point in time during the prior (already finished) grace period, | ||
434 | * an offline CPU is always in a quiescent state, and thus can be | ||
435 | * unconditionally applied. So just return the current value of completed. | ||
436 | */ | ||
437 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
438 | { | ||
439 | return rsp->completed; | ||
440 | } | ||
441 | |||
442 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 427 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
443 | { | 428 | { |
444 | return 0; | 429 | return 0; |
@@ -1146,6 +1131,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1146 | long lastcomp; | 1131 | long lastcomp; |
1147 | struct rcu_node *rnp = rcu_get_root(rsp); | 1132 | struct rcu_node *rnp = rcu_get_root(rsp); |
1148 | u8 signaled; | 1133 | u8 signaled; |
1134 | u8 forcenow; | ||
1149 | 1135 | ||
1150 | if (!rcu_gp_in_progress(rsp)) | 1136 | if (!rcu_gp_in_progress(rsp)) |
1151 | return; /* No grace period in progress, nothing to force. */ | 1137 | return; /* No grace period in progress, nothing to force. */ |
@@ -1182,16 +1168,23 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1182 | if (rcu_process_dyntick(rsp, lastcomp, | 1168 | if (rcu_process_dyntick(rsp, lastcomp, |
1183 | dyntick_save_progress_counter)) | 1169 | dyntick_save_progress_counter)) |
1184 | goto unlock_ret; | 1170 | goto unlock_ret; |
1171 | /* fall into next case. */ | ||
1172 | |||
1173 | case RCU_SAVE_COMPLETED: | ||
1185 | 1174 | ||
1186 | /* Update state, record completion counter. */ | 1175 | /* Update state, record completion counter. */ |
1176 | forcenow = 0; | ||
1187 | spin_lock(&rnp->lock); | 1177 | spin_lock(&rnp->lock); |
1188 | if (lastcomp == rsp->completed && | 1178 | if (lastcomp == rsp->completed && |
1189 | rsp->signaled == RCU_SAVE_DYNTICK) { | 1179 | rsp->signaled == signaled) { |
1190 | rsp->signaled = RCU_FORCE_QS; | 1180 | rsp->signaled = RCU_FORCE_QS; |
1191 | dyntick_record_completed(rsp, lastcomp); | 1181 | dyntick_record_completed(rsp, lastcomp); |
1182 | forcenow = signaled == RCU_SAVE_COMPLETED; | ||
1192 | } | 1183 | } |
1193 | spin_unlock(&rnp->lock); | 1184 | spin_unlock(&rnp->lock); |
1194 | break; | 1185 | if (!forcenow) |
1186 | break; | ||
1187 | /* fall into next case. */ | ||
1195 | 1188 | ||
1196 | case RCU_FORCE_QS: | 1189 | case RCU_FORCE_QS: |
1197 | 1190 | ||