diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2011-08-01 17:25:06 -0400 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2012-01-20 18:55:05 -0500 |
commit | 4fa20439a80c008d33f2865b0db94dcb5da467e2 (patch) | |
tree | b11eb45155ffe12131716e7c4c8742276187fdf9 /arch/arm/kernel/process.c | |
parent | 3c0b2cef913c8f92b15a5a1fe7b611836f7f80bf (diff) |
ARM: clean up idle handlers
Let's factor out the need_resched() check instead of having it duplicated
in every pm_idle implementations to avoid inconsistencies (omap2_pm_idle
is missing it already).
The forceful re-enablement of IRQs after pm_idle has returned can go.
The warning certainly doesn't trigger for existing users.
To get rid of the pm_idle calling convention oddity, let's introduce
arm_pm_idle() allowing for the local_irq_enable() to be factored out
from SOC specific implementations. The default pm_idle function becomes
a wrapper for arm_pm_idle and it takes care of enabling IRQs closer to
where they are initially disabled.
And finally move the comment explaining the reason for that turning off
of IRQs to a more proper location.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Acked-and-tested-by: Jamie Iles <jamie@jamieiles.com>
Diffstat (limited to 'arch/arm/kernel/process.c')
-rw-r--r-- | arch/arm/kernel/process.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..ba9e7ef92bec 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -181,12 +181,16 @@ void cpu_idle_wait(void) | |||
181 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 181 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * This is our default idle handler. We need to disable | 184 | * This is our default idle handler. |
185 | * interrupts here to ensure we don't miss a wakeup call. | ||
186 | */ | 185 | */ |
186 | |||
187 | void (*arm_pm_idle)(void); | ||
188 | |||
187 | static void default_idle(void) | 189 | static void default_idle(void) |
188 | { | 190 | { |
189 | if (!need_resched()) | 191 | if (arm_pm_idle) |
192 | arm_pm_idle(); | ||
193 | else | ||
190 | arch_idle(); | 194 | arch_idle(); |
191 | local_irq_enable(); | 195 | local_irq_enable(); |
192 | } | 196 | } |
@@ -215,6 +219,10 @@ void cpu_idle(void) | |||
215 | cpu_die(); | 219 | cpu_die(); |
216 | #endif | 220 | #endif |
217 | 221 | ||
222 | /* | ||
223 | * We need to disable interrupts here | ||
224 | * to ensure we don't miss a wakeup call. | ||
225 | */ | ||
218 | local_irq_disable(); | 226 | local_irq_disable(); |
219 | #ifdef CONFIG_PL310_ERRATA_769419 | 227 | #ifdef CONFIG_PL310_ERRATA_769419 |
220 | wmb(); | 228 | wmb(); |
@@ -222,19 +230,18 @@ void cpu_idle(void) | |||
222 | if (hlt_counter) { | 230 | if (hlt_counter) { |
223 | local_irq_enable(); | 231 | local_irq_enable(); |
224 | cpu_relax(); | 232 | cpu_relax(); |
225 | } else { | 233 | } else if (!need_resched()) { |
226 | stop_critical_timings(); | 234 | stop_critical_timings(); |
227 | if (cpuidle_idle_call()) | 235 | if (cpuidle_idle_call()) |
228 | pm_idle(); | 236 | pm_idle(); |
229 | start_critical_timings(); | 237 | start_critical_timings(); |
230 | /* | 238 | /* |
231 | * This will eventually be removed - pm_idle | 239 | * pm_idle functions must always |
232 | * functions should always return with IRQs | 240 | * return with IRQs enabled. |
233 | * enabled. | ||
234 | */ | 241 | */ |
235 | WARN_ON(irqs_disabled()); | 242 | WARN_ON(irqs_disabled()); |
243 | } else | ||
236 | local_irq_enable(); | 244 | local_irq_enable(); |
237 | } | ||
238 | } | 245 | } |
239 | leds_event(led_idle_end); | 246 | leds_event(led_idle_end); |
240 | rcu_idle_exit(); | 247 | rcu_idle_exit(); |