aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-11-13 01:35:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-13 04:18:36 -0500
commitb32e9eb6ad29572b4451847d0e8227c9be2b6d69 (patch)
treed697f2c9dcf59d2defb6693866bf33cc45307dca /kernel/rcutree.c
parent0e0fc1c23e04c15e814763f2b366e92d87d8b95d (diff)
rcu: Accelerate callback processing on CPUs not detecting GP end
An earlier fix for a race resulted in a situation where the CPUs other than the CPU that detected the end of the grace period would not process their callbacks until the next grace period started. This means that these other CPUs would unnecessarily demand that an extra grace period be started. This patch eliminates this extra grace period and speeds callback processing by propagating rsp->completed to the rcu_node structures in the case where the CPU detecting the end of the grace period sees no reason to start a new grace period. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1258094104417-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d8024192c73b..b4efb9e36680 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -676,7 +676,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
676 struct rcu_node *rnp = rcu_get_root(rsp); 676 struct rcu_node *rnp = rcu_get_root(rsp);
677 677
678 if (!cpu_needs_another_gp(rsp, rdp)) { 678 if (!cpu_needs_another_gp(rsp, rdp)) {
679 spin_unlock_irqrestore(&rnp->lock, flags); 679 if (rnp->completed == rsp->completed) {
680 spin_unlock_irqrestore(&rnp->lock, flags);
681 return;
682 }
683 spin_unlock(&rnp->lock); /* irqs remain disabled. */
684
685 /*
686 * Propagate new ->completed value to rcu_node structures
687 * so that other CPUs don't have to wait until the start
688 * of the next grace period to process their callbacks.
689 */
690 rcu_for_each_node_breadth_first(rsp, rnp) {
691 spin_lock(&rnp->lock); /* irqs already disabled. */
692 rnp->completed = rsp->completed;
693 spin_unlock(&rnp->lock); /* irqs remain disabled. */
694 }
695 local_irq_restore(flags);
680 return; 696 return;
681 } 697 }
682 698