diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 33a0c11797de..9fd3137230d4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
285 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 285 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
286 | x86_platform.nmi_init(); | 286 | x86_platform.nmi_init(); |
287 | 287 | ||
288 | /* | ||
289 | * Wait until the cpu which brought this one up marked it | ||
290 | * online before enabling interrupts. If we don't do that then | ||
291 | * we can end up waking up the softirq thread before this cpu | ||
292 | * reached the active state, which makes the scheduler unhappy | ||
293 | * and schedule the softirq thread on the wrong cpu. This is | ||
294 | * only observable with forced threaded interrupts, but in | ||
295 | * theory it could also happen w/o them. It's just way harder | ||
296 | * to achieve. | ||
297 | */ | ||
298 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
299 | cpu_relax(); | ||
300 | |||
288 | /* enable local interrupts */ | 301 | /* enable local interrupts */ |
289 | local_irq_enable(); | 302 | local_irq_enable(); |
290 | 303 | ||