aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@in.ibm.com>2005-12-12 03:37:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-12-12 11:57:42 -0500
commitc3f5902325d3053986e7359f706581d8f032e72f (patch)
tree1d56b7626b83556982eafad69c597b180c2ef62b /kernel
parent89d46b8778f65223f732d82c0166e0abba20fb1e (diff)
[PATCH] Fix RCU race in access of nohz_cpu_mask
Accessing nohz_cpu_mask before incrementing rcp->cur is racy. It can cause tickless idle CPUs to be included in rsp->cpumask, which will extend graceperiods unnecessarily. Fix this race. It has been tested using extensions to RCU torture module that forces various CPUs to become idle. Signed-off-by: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Dipankar Sarma <dipankar@in.ibm.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcupdate.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f45b91723dc6..48d3bce465b8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -257,15 +257,23 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
257 257
258 if (rcp->next_pending && 258 if (rcp->next_pending &&
259 rcp->completed == rcp->cur) { 259 rcp->completed == rcp->cur) {
260 /* Can't change, since spin lock held. */
261 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
262
263 rcp->next_pending = 0; 260 rcp->next_pending = 0;
264 /* next_pending == 0 must be visible in __rcu_process_callbacks() 261 /*
265 * before it can see new value of cur. 262 * next_pending == 0 must be visible in
263 * __rcu_process_callbacks() before it can see new value of cur.
266 */ 264 */
267 smp_wmb(); 265 smp_wmb();
268 rcp->cur++; 266 rcp->cur++;
267
268 /*
269 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
270 * Barrier Otherwise it can cause tickless idle CPUs to be
271 * included in rsp->cpumask, which will extend graceperiods
272 * unnecessarily.
273 */
274 smp_mb();
275 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
276
269 } 277 }
270} 278}
271 279