diff options
-rw-r--r-- | arch/arm/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/hexagon/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 13 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 |
5 files changed, 1 insertions, 29 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index cdeb727527d3..d616ed51e7a7 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -295,13 +295,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
295 | */ | 295 | */ |
296 | percpu_timer_setup(); | 296 | percpu_timer_setup(); |
297 | 297 | ||
298 | while (!cpu_active(cpu)) | ||
299 | cpu_relax(); | ||
300 | |||
301 | /* | ||
302 | * cpu_active bit is set, so it's safe to enalbe interrupts | ||
303 | * now. | ||
304 | */ | ||
305 | local_irq_enable(); | 298 | local_irq_enable(); |
306 | local_fiq_enable(); | 299 | local_fiq_enable(); |
307 | 300 | ||
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index c871a2cffaef..0123c63e9a3a 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c | |||
@@ -179,8 +179,6 @@ void __cpuinit start_secondary(void) | |||
179 | printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); | 179 | printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); |
180 | 180 | ||
181 | set_cpu_online(cpu, true); | 181 | set_cpu_online(cpu, true); |
182 | while (!cpumask_test_cpu(cpu, cpu_active_mask)) | ||
183 | cpu_relax(); | ||
184 | local_irq_enable(); | 182 | local_irq_enable(); |
185 | 183 | ||
186 | cpu_idle(); | 184 | cpu_idle(); |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 2398ce6b15ae..b0e28c47ab83 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -550,12 +550,6 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
550 | S390_lowcore.restart_psw.addr = | 550 | S390_lowcore.restart_psw.addr = |
551 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; | 551 | PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; |
552 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ | 552 | __ctl_set_bit(0, 28); /* Enable lowcore protection */ |
553 | /* | ||
554 | * Wait until the cpu which brought this one up marked it | ||
555 | * active before enabling interrupts. | ||
556 | */ | ||
557 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
558 | cpu_relax(); | ||
559 | local_irq_enable(); | 553 | local_irq_enable(); |
560 | /* cpu_idle will call schedule for us */ | 554 | /* cpu_idle will call schedule for us */ |
561 | cpu_idle(); | 555 | cpu_idle(); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 66d250c00d11..58f78165d308 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -291,19 +291,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
291 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 291 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
292 | x86_platform.nmi_init(); | 292 | x86_platform.nmi_init(); |
293 | 293 | ||
294 | /* | ||
295 | * Wait until the cpu which brought this one up marked it | ||
296 | * online before enabling interrupts. If we don't do that then | ||
297 | * we can end up waking up the softirq thread before this cpu | ||
298 | * reached the active state, which makes the scheduler unhappy | ||
299 | * and schedule the softirq thread on the wrong cpu. This is | ||
300 | * only observable with forced threaded interrupts, but in | ||
301 | * theory it could also happen w/o them. It's just way harder | ||
302 | * to achieve. | ||
303 | */ | ||
304 | while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) | ||
305 | cpu_relax(); | ||
306 | |||
307 | /* enable local interrupts */ | 294 | /* enable local interrupts */ |
308 | local_irq_enable(); | 295 | local_irq_enable(); |
309 | 296 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 95545126be1c..b1ccce819ce2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5410,7 +5410,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | |||
5410 | unsigned long action, void *hcpu) | 5410 | unsigned long action, void *hcpu) |
5411 | { | 5411 | { |
5412 | switch (action & ~CPU_TASKS_FROZEN) { | 5412 | switch (action & ~CPU_TASKS_FROZEN) { |
5413 | case CPU_ONLINE: | 5413 | case CPU_STARTING: |
5414 | case CPU_DOWN_FAILED: | 5414 | case CPU_DOWN_FAILED: |
5415 | set_cpu_active((long)hcpu, true); | 5415 | set_cpu_active((long)hcpu, true); |
5416 | return NOTIFY_OK; | 5416 | return NOTIFY_OK; |