diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-06-25 11:41:11 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-23 10:41:54 -0400 |
commit | b4be093fee0200789df59b6c90e2d099a20f55b3 (patch) | |
tree | 5eb8367be67bde57671240bfeb149c4c670ffd1c | |
parent | 4cdfc175c25c89eedc08460b5e6239c2ec67fcb6 (diff) |
rcu: Allow RCU quiescent-state forcing to be preempted
RCU quiescent-state forcing is currently carried out without preemption
points, which can result in excessive latency spikes on large systems
(many hundreds or thousands of CPUs). This patch therefore inserts
a voluntary preemption point into force_qs_rnp(), which should greatly
reduce the magnitude of these spikes.
Reported-by: Mike Galbraith <mgalbraith@suse.de>
Reported-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r-- | kernel/rcutree.c | 1 |
1 files changed, 1 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 6182686de4a6..723e2e723074 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1767,6 +1767,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1767 | struct rcu_node *rnp; | 1767 | struct rcu_node *rnp; |
1768 | 1768 | ||
1769 | rcu_for_each_leaf_node(rsp, rnp) { | 1769 | rcu_for_each_leaf_node(rsp, rnp) { |
1770 | cond_resched(); | ||
1770 | mask = 0; | 1771 | mask = 0; |
1771 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1772 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1772 | if (!rcu_gp_in_progress(rsp)) { | 1773 | if (!rcu_gp_in_progress(rsp)) { |