aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-12-06 20:14:13 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:13 -0500
commitd331e739f5ad2aaa9d8553891ba6ca823bdbce37 (patch)
tree5da7042a022dd119c9b920ceb4101e7203bccb44
parenta0429d0d7a6116dedcb71d9128da904bf135f189 (diff)
[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
Idle callbacks has some races when enter_idle() sets isidle and subsequent interrupts that can happen on that CPU, before CPU goes to idle. Due to this, an IDLE_END can get called before IDLE_START. To avoid these races, disable interrupts before enter_idle and make sure that all idle routines do not enable interrupts before entering idle. Note that poll_idle() still has a this race as it has to enable interrupts before going to idle. But, all other idle routines have the race fixed. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/x86_64/kernel/process.c19
-rw-r--r--include/asm-x86_64/processor.h8
2 files changed, 25 insertions, 2 deletions
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 0b7b4caa4f74..a418ee4c8c62 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -127,6 +127,7 @@ static void default_idle(void)
127 */ 127 */
128static void poll_idle (void) 128static void poll_idle (void)
129{ 129{
130 local_irq_enable();
130 cpu_relax(); 131 cpu_relax();
131} 132}
132 133
@@ -208,6 +209,12 @@ void cpu_idle (void)
208 idle = default_idle; 209 idle = default_idle;
209 if (cpu_is_offline(smp_processor_id())) 210 if (cpu_is_offline(smp_processor_id()))
210 play_dead(); 211 play_dead();
212 /*
213 * Idle routines should keep interrupts disabled
214 * from here on, until they go to idle.
215 * Otherwise, idle callbacks can misfire.
216 */
217 local_irq_disable();
211 enter_idle(); 218 enter_idle();
212 idle(); 219 idle();
213 /* In many cases the interrupt that ended idle 220 /* In many cases the interrupt that ended idle
@@ -245,8 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
245/* Default MONITOR/MWAIT with no hints, used for default C1 state */ 252/* Default MONITOR/MWAIT with no hints, used for default C1 state */
246static void mwait_idle(void) 253static void mwait_idle(void)
247{ 254{
248 local_irq_enable(); 255 if (!need_resched()) {
249 mwait_idle_with_hints(0,0); 256 __monitor((void *)&current_thread_info()->flags, 0, 0);
257 smp_mb();
258 if (!need_resched())
259 __sti_mwait(0, 0);
260 else
261 local_irq_enable();
262 } else {
263 local_irq_enable();
264 }
250} 265}
251 266
252void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 267void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index cef17e0f828c..76552d72804c 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
475 : :"a" (eax), "c" (ecx)); 475 : :"a" (eax), "c" (ecx));
476} 476}
477 477
478static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
479{
480 /* "mwait %eax,%ecx;" */
481 asm volatile(
482 "sti; .byte 0x0f,0x01,0xc9;"
483 : :"a" (eax), "c" (ecx));
484}
485
478extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 486extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
479 487
480#define stack_current() \ 488#define stack_current() \