aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-06-21 11:19:05 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-23 10:41:53 -0400
commitc856bafae7f5b3f59ac1d99279a9b99b3b36ad12 (patch)
tree2bc16cb69d397440c5082fd451d64db6efa67017 /kernel/rcutree.c
parentcabc49c1ff51baaf1958d501a7a616ce91245c93 (diff)
rcu: Allow RCU grace-period cleanup to be preempted
RCU grace-period cleanup is currently carried out with interrupts disabled, which can result in excessive latency spikes on large systems (many hundreds or thousands of CPUs). This patch therefore makes the RCU grace-period cleanup be preemptible, including voluntary preemption points, which should eliminate those latency spikes. Similar spikes from forcing of quiescent states will be dealt with similarly by later patches. Updated to replace uses of spin_lock_irqsave() with spin_lock_irq(), as suggested by Peter Zijlstra. Reported-by: Mike Galbraith <mgalbraith@suse.de> Reported-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52c3102dc5f7..ddc6acc85d26 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1151,7 +1151,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
1151 * completed. 1151 * completed.
1152 */ 1152 */
1153 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { 1153 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
1154 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1154 raw_spin_unlock_irq(&rnp->lock);
1155 1155
1156 /* 1156 /*
1157 * Propagate new ->completed value to rcu_node 1157 * Propagate new ->completed value to rcu_node
@@ -1160,14 +1160,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
1160 * to process their callbacks. 1160 * to process their callbacks.
1161 */ 1161 */
1162 rcu_for_each_node_breadth_first(rsp, rnp) { 1162 rcu_for_each_node_breadth_first(rsp, rnp) {
1163 /* irqs already disabled. */ 1163 raw_spin_lock_irq(&rnp->lock);
1164 raw_spin_lock(&rnp->lock);
1165 rnp->completed = rsp->gpnum; 1164 rnp->completed = rsp->gpnum;
1166 /* irqs remain disabled. */ 1165 raw_spin_unlock_irq(&rnp->lock);
1167 raw_spin_unlock(&rnp->lock); 1166 cond_resched();
1168 } 1167 }
1169 rnp = rcu_get_root(rsp); 1168 rnp = rcu_get_root(rsp);
1170 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1169 raw_spin_lock_irq(&rnp->lock);
1171 } 1170 }
1172 1171
1173 rsp->completed = rsp->gpnum; /* Declare grace period done. */ 1172 rsp->completed = rsp->gpnum; /* Declare grace period done. */