aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-04-14 20:39:26 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-05-10 14:08:35 -0400
commitd21670acab9fcb4bc74a40b68a6941059234c55c (patch)
tree6a4c054bc4dbadf0524b4e221889a8da558dbdaf /kernel/rcutree.c
parent4a90a0681cf6cd21cd444184302aa045156486b3 (diff)
rcu: reduce the number of spurious RCU_SOFTIRQ invocations
Lai Jiangshan noted that up to 10% of the RCU_SOFTIRQ are spurious, and traced this down to the fact that the current grace-period machinery will uselessly raise RCU_SOFTIRQ when a given CPU needs to go through a quiescent state, but has not yet done so. In this situation, there might well be nothing that RCU_SOFTIRQ can do, and the overhead can be worth worrying about in the ksoftirqd case. This patch therefore avoids raising RCU_SOFTIRQ in this situation. Changes since v1 (http://lkml.org/lkml/2010/3/30/122 from Lai Jiangshan): o Omit the rcu_qs_pending() prechecks, as they aren't that much less expensive than the quiescent-state checks. o Merge with the set_need_resched() patch that reduces IPIs. o Add the new n_rp_report_qs field to the rcu_pending tracing output. o Update the tracing documentation accordingly. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index c60fd74e7ec9..ba6996943e28 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1161,8 +1161,6 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1161 */ 1161 */
1162void rcu_check_callbacks(int cpu, int user) 1162void rcu_check_callbacks(int cpu, int user)
1163{ 1163{
1164 if (!rcu_pending(cpu))
1165 return; /* if nothing for RCU to do. */
1166 if (user || 1164 if (user ||
1167 (idle_cpu(cpu) && rcu_scheduler_active && 1165 (idle_cpu(cpu) && rcu_scheduler_active &&
1168 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1166 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1194,7 +1192,8 @@ void rcu_check_callbacks(int cpu, int user)
1194 rcu_bh_qs(cpu); 1192 rcu_bh_qs(cpu);
1195 } 1193 }
1196 rcu_preempt_check_callbacks(cpu); 1194 rcu_preempt_check_callbacks(cpu);
1197 raise_softirq(RCU_SOFTIRQ); 1195 if (rcu_pending(cpu))
1196 raise_softirq(RCU_SOFTIRQ);
1198} 1197}
1199 1198
1200#ifdef CONFIG_SMP 1199#ifdef CONFIG_SMP
@@ -1534,18 +1533,20 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1534 check_cpu_stall(rsp, rdp); 1533 check_cpu_stall(rsp, rdp);
1535 1534
1536 /* Is the RCU core waiting for a quiescent state from this CPU? */ 1535 /* Is the RCU core waiting for a quiescent state from this CPU? */
1537 if (rdp->qs_pending) { 1536 if (rdp->qs_pending && !rdp->passed_quiesc) {
1538 1537
1539 /* 1538 /*
1540 * If force_quiescent_state() coming soon and this CPU 1539 * If force_quiescent_state() coming soon and this CPU
1541 * needs a quiescent state, and this is either RCU-sched 1540 * needs a quiescent state, and this is either RCU-sched
1542 * or RCU-bh, force a local reschedule. 1541 * or RCU-bh, force a local reschedule.
1543 */ 1542 */
1543 rdp->n_rp_qs_pending++;
1544 if (!rdp->preemptable && 1544 if (!rdp->preemptable &&
1545 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, 1545 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1546 jiffies)) 1546 jiffies))
1547 set_need_resched(); 1547 set_need_resched();
1548 rdp->n_rp_qs_pending++; 1548 } else if (rdp->qs_pending && rdp->passed_quiesc) {
1549 rdp->n_rp_report_qs++;
1549 return 1; 1550 return 1;
1550 } 1551 }
1551 1552