diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2008-07-06 05:23:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 10:07:32 -0400 |
commit | 3cac97cbb14aed00d83eb33d4613b0fe3aaea863 (patch) | |
tree | ba775ff62be34c2a7c17149516509d6c7d70f4d5 /kernel | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
rcu classic: simplify the next pending batch
use a batch number(rcp->pending) instead of a flag(rcp->next_pending)
rcu_start_batch() need to change this flag, so mb()s is needed
for memory-access safe.
but(after this patch applied) rcu_start_batch() do not change
this batch number(rcp->pending), rcp->pending is managed by
__rcu_process_callbacks only, and troublesome mb()s are eliminated.
And codes look simpler and clearer.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcuclassic.c | 22 |
1 files changed, 8 insertions, 14 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 16eeeaa9d618..03726eb95193 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -60,12 +60,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
60 | static struct rcu_ctrlblk rcu_ctrlblk = { | 60 | static struct rcu_ctrlblk rcu_ctrlblk = { |
61 | .cur = -300, | 61 | .cur = -300, |
62 | .completed = -300, | 62 | .completed = -300, |
63 | .pending = -300, | ||
63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 64 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
64 | .cpumask = CPU_MASK_NONE, | 65 | .cpumask = CPU_MASK_NONE, |
65 | }; | 66 | }; |
66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 67 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
67 | .cur = -300, | 68 | .cur = -300, |
68 | .completed = -300, | 69 | .completed = -300, |
70 | .pending = -300, | ||
69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | 71 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
70 | .cpumask = CPU_MASK_NONE, | 72 | .cpumask = CPU_MASK_NONE, |
71 | }; | 73 | }; |
@@ -276,14 +278,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
276 | */ | 278 | */ |
277 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | 279 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
278 | { | 280 | { |
279 | if (rcp->next_pending && | 281 | if (rcp->cur != rcp->pending && |
280 | rcp->completed == rcp->cur) { | 282 | rcp->completed == rcp->cur) { |
281 | rcp->next_pending = 0; | ||
282 | /* | ||
283 | * next_pending == 0 must be visible in | ||
284 | * __rcu_process_callbacks() before it can see new value of cur. | ||
285 | */ | ||
286 | smp_wmb(); | ||
287 | rcp->cur++; | 283 | rcp->cur++; |
288 | 284 | ||
289 | /* | 285 | /* |
@@ -441,16 +437,14 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
441 | 437 | ||
442 | /* determine batch number */ | 438 | /* determine batch number */ |
443 | rdp->batch = rcp->cur + 1; | 439 | rdp->batch = rcp->cur + 1; |
444 | /* see the comment and corresponding wmb() in | ||
445 | * the rcu_start_batch() | ||
446 | */ | ||
447 | smp_rmb(); | ||
448 | 440 | ||
449 | if (!rcp->next_pending) { | 441 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
450 | /* and start it/schedule start if it's a new batch */ | 442 | /* and start it/schedule start if it's a new batch */ |
451 | spin_lock(&rcp->lock); | 443 | spin_lock(&rcp->lock); |
452 | rcp->next_pending = 1; | 444 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
453 | rcu_start_batch(rcp); | 445 | rcp->pending = rdp->batch; |
446 | rcu_start_batch(rcp); | ||
447 | } | ||
454 | spin_unlock(&rcp->lock); | 448 | spin_unlock(&rcp->lock); |
455 | } | 449 | } |
456 | } | 450 | } |