aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-01-23 20:23:35 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-02-21 12:06:05 -0500
commitc0cfbbb0d4fca14b828a7635a59784adfba8989d (patch)
treea6201565b275d63b17df2a256e9b30c91f94cb6d /kernel
parent3d3b7db0a22085cfc05c3318b9874f7fb8266d18 (diff)
rcu: No interrupt disabling for rcu_prepare_for_idle()
The rcu_prepare_for_idle() function is always called with interrupts disabled, so there is no reason to disable interrupts again within rcu_prepare_for_idle(). Therefore, this commit removes all of the interrupt disabling, also removing a latent disabling-unbalance bug. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree_plugin.h18
1 files changed, 1 insertions, 17 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 98ce17cf1fb5..5e25ee327ccb 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2096,10 +2096,6 @@ static void rcu_cleanup_after_idle(int cpu)
2096 */ 2096 */
2097static void rcu_prepare_for_idle(int cpu) 2097static void rcu_prepare_for_idle(int cpu)
2098{ 2098{
2099 unsigned long flags;
2100
2101 local_irq_save(flags);
2102
2103 /* 2099 /*
2104 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2100 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2105 * Also reset state to avoid prejudicing later attempts. 2101 * Also reset state to avoid prejudicing later attempts.
@@ -2107,7 +2103,6 @@ static void rcu_prepare_for_idle(int cpu)
2107 if (!rcu_cpu_has_callbacks(cpu)) { 2103 if (!rcu_cpu_has_callbacks(cpu)) {
2108 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2104 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2109 per_cpu(rcu_dyntick_drain, cpu) = 0; 2105 per_cpu(rcu_dyntick_drain, cpu) = 0;
2110 local_irq_restore(flags);
2111 trace_rcu_prep_idle("No callbacks"); 2106 trace_rcu_prep_idle("No callbacks");
2112 return; 2107 return;
2113 } 2108 }
@@ -2117,7 +2112,6 @@ static void rcu_prepare_for_idle(int cpu)
2117 * refrained from disabling the scheduling-clock tick. 2112 * refrained from disabling the scheduling-clock tick.
2118 */ 2113 */
2119 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2114 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2120 local_irq_restore(flags);
2121 trace_rcu_prep_idle("In holdoff"); 2115 trace_rcu_prep_idle("In holdoff");
2122 return; 2116 return;
2123 } 2117 }
@@ -2142,7 +2136,6 @@ static void rcu_prepare_for_idle(int cpu)
2142 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2136 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2143 /* We have hit the limit, so time to give up. */ 2137 /* We have hit the limit, so time to give up. */
2144 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2138 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2145 local_irq_restore(flags);
2146 trace_rcu_prep_idle("Begin holdoff"); 2139 trace_rcu_prep_idle("Begin holdoff");
2147 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2140 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2148 return; 2141 return;
@@ -2154,23 +2147,17 @@ static void rcu_prepare_for_idle(int cpu)
2154 */ 2147 */
2155#ifdef CONFIG_TREE_PREEMPT_RCU 2148#ifdef CONFIG_TREE_PREEMPT_RCU
2156 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 2149 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2157 local_irq_restore(flags);
2158 rcu_preempt_qs(cpu); 2150 rcu_preempt_qs(cpu);
2159 force_quiescent_state(&rcu_preempt_state, 0); 2151 force_quiescent_state(&rcu_preempt_state, 0);
2160 local_irq_save(flags);
2161 } 2152 }
2162#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2153#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2163 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2154 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2164 local_irq_restore(flags);
2165 rcu_sched_qs(cpu); 2155 rcu_sched_qs(cpu);
2166 force_quiescent_state(&rcu_sched_state, 0); 2156 force_quiescent_state(&rcu_sched_state, 0);
2167 local_irq_save(flags);
2168 } 2157 }
2169 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 2158 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2170 local_irq_restore(flags);
2171 rcu_bh_qs(cpu); 2159 rcu_bh_qs(cpu);
2172 force_quiescent_state(&rcu_bh_state, 0); 2160 force_quiescent_state(&rcu_bh_state, 0);
2173 local_irq_save(flags);
2174 } 2161 }
2175 2162
2176 /* 2163 /*
@@ -2178,13 +2165,10 @@ static void rcu_prepare_for_idle(int cpu)
2178 * So try forcing the callbacks through the grace period. 2165 * So try forcing the callbacks through the grace period.
2179 */ 2166 */
2180 if (rcu_cpu_has_callbacks(cpu)) { 2167 if (rcu_cpu_has_callbacks(cpu)) {
2181 local_irq_restore(flags);
2182 trace_rcu_prep_idle("More callbacks"); 2168 trace_rcu_prep_idle("More callbacks");
2183 invoke_rcu_core(); 2169 invoke_rcu_core();
2184 } else { 2170 } else
2185 local_irq_restore(flags);
2186 trace_rcu_prep_idle("Callbacks drained"); 2171 trace_rcu_prep_idle("Callbacks drained");
2187 }
2188} 2172}
2189 2173
2190#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2174#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */