diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-09-08 18:54:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-17 18:04:54 -0400 |
commit | de078d875cc7fc709f7818f26d38389c04369826 (patch) | |
tree | 0e7ed4d9d11ae707cc3b4e1e9d511a6d78e9e29d | |
parent | de55a8958f6e3ef5ce5f0971b80bd44bfcac7cf1 (diff) |
rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable
Without this patch, tasks preempted in RCU read-side critical
sections can fail to block the grace period, given that
rnp->gpnum is used to determine which rnp->blocked_tasks[]
element the preempted task is enqueued on.
Before the patch, rnp->gpnum is always zero, so preempted tasks
are always enqueued on rnp->blocked_tasks[0], which is correct
only when the current CPU has not checked into the current
grace period and the grace-period number is even, or,
similarly, if the current CPU -has- checked into the current
grace period and the grace-period number is odd.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josht@linux.vnet.ibm.com
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
LKML-Reference: <12524504771622-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/rcutree.c | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6b11b07cfe7f..c634a92d1217 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -632,6 +632,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
632 | /* Special-case the common single-level case. */ | 632 | /* Special-case the common single-level case. */ |
633 | if (NUM_RCU_NODES == 1) { | 633 | if (NUM_RCU_NODES == 1) { |
634 | rnp->qsmask = rnp->qsmaskinit; | 634 | rnp->qsmask = rnp->qsmaskinit; |
635 | rnp->gpnum = rsp->gpnum; | ||
635 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 636 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
636 | spin_unlock_irqrestore(&rnp->lock, flags); | 637 | spin_unlock_irqrestore(&rnp->lock, flags); |
637 | return; | 638 | return; |
@@ -657,8 +658,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
657 | */ | 658 | */ |
658 | 659 | ||
659 | rnp_end = rsp->level[NUM_RCU_LVLS - 1]; | 660 | rnp_end = rsp->level[NUM_RCU_LVLS - 1]; |
660 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) | 661 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) { |
661 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | 662 | rnp_cur->qsmask = rnp_cur->qsmaskinit; |
663 | rnp->gpnum = rsp->gpnum; | ||
664 | } | ||
662 | 665 | ||
663 | /* | 666 | /* |
664 | * Now set up the leaf nodes. Here we must be careful. First, | 667 | * Now set up the leaf nodes. Here we must be careful. First, |
@@ -679,6 +682,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
679 | for (; rnp_cur < rnp_end; rnp_cur++) { | 682 | for (; rnp_cur < rnp_end; rnp_cur++) { |
680 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ | 683 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ |
681 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | 684 | rnp_cur->qsmask = rnp_cur->qsmaskinit; |
685 | rnp->gpnum = rsp->gpnum; | ||
682 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ | 686 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ |
683 | } | 687 | } |
684 | 688 | ||