aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-23 00:08:13 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:32:03 -0500
commit3ad0decf98d97b9039d8ed47cee287366b929cdf (patch)
tree3d6c02ceb148b55b9632422346978bf369e790e9 /kernel/rcutree_plugin.h
parentf535a607c13c7b674e0788ca5765779aa74a01c3 (diff)
rcu: Reduce latency of rcu_prepare_for_idle()
Re-enable interrupts across calls to quiescent-state functions and also across force_quiescent_state() to reduce latency. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h12
1 files changed, 7 insertions, 5 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 3d84dbc113d6..42ca5a400ae3 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2009,7 +2009,6 @@ int rcu_needs_cpu(int cpu)
2009 */ 2009 */
2010static void rcu_prepare_for_idle(int cpu) 2010static void rcu_prepare_for_idle(int cpu)
2011{ 2011{
2012 int c = 0;
2013 unsigned long flags; 2012 unsigned long flags;
2014 2013
2015 local_irq_save(flags); 2014 local_irq_save(flags);
@@ -2055,27 +2054,30 @@ static void rcu_prepare_for_idle(int cpu)
2055 */ 2054 */
2056#ifdef CONFIG_TREE_PREEMPT_RCU 2055#ifdef CONFIG_TREE_PREEMPT_RCU
2057 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 2056 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2057 local_irq_restore(flags);
2058 rcu_preempt_qs(cpu); 2058 rcu_preempt_qs(cpu);
2059 force_quiescent_state(&rcu_preempt_state, 0); 2059 force_quiescent_state(&rcu_preempt_state, 0);
2060 c = c || per_cpu(rcu_preempt_data, cpu).nxtlist; 2060 local_irq_save(flags);
2061 } 2061 }
2062#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2062#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2063 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2063 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2064 local_irq_restore(flags);
2064 rcu_sched_qs(cpu); 2065 rcu_sched_qs(cpu);
2065 force_quiescent_state(&rcu_sched_state, 0); 2066 force_quiescent_state(&rcu_sched_state, 0);
2066 c = c || per_cpu(rcu_sched_data, cpu).nxtlist; 2067 local_irq_save(flags);
2067 } 2068 }
2068 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 2069 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2070 local_irq_restore(flags);
2069 rcu_bh_qs(cpu); 2071 rcu_bh_qs(cpu);
2070 force_quiescent_state(&rcu_bh_state, 0); 2072 force_quiescent_state(&rcu_bh_state, 0);
2071 c = c || per_cpu(rcu_bh_data, cpu).nxtlist; 2073 local_irq_save(flags);
2072 } 2074 }
2073 2075
2074 /* 2076 /*
2075 * If RCU callbacks are still pending, RCU still needs this CPU. 2077 * If RCU callbacks are still pending, RCU still needs this CPU.
2076 * So try forcing the callbacks through the grace period. 2078 * So try forcing the callbacks through the grace period.
2077 */ 2079 */
2078 if (c) { 2080 if (rcu_cpu_has_callbacks(cpu)) {
2079 local_irq_restore(flags); 2081 local_irq_restore(flags);
2080 trace_rcu_prep_idle("More callbacks"); 2082 trace_rcu_prep_idle("More callbacks");
2081 invoke_rcu_core(); 2083 invoke_rcu_core();