aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/kernel/smpboot.c39
-rw-r--r--arch/x86/xen/smp.c46
4 files changed, 44 insertions, 45 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index d2b12988d2ed..bf2caa1dedc5 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -34,8 +34,6 @@ extern int _debug_hotplug_cpu(int cpu, int action);
34#endif 34#endif
35#endif 35#endif
36 36
37DECLARE_PER_CPU(int, cpu_state);
38
39int mwait_usable(const struct cpuinfo_x86 *); 37int mwait_usable(const struct cpuinfo_x86 *);
40 38
41#endif /* _ASM_X86_CPU_H */ 39#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 8cd1cc3bc835..a5cb4f6e9492 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -150,12 +150,12 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
150} 150}
151 151
152void cpu_disable_common(void); 152void cpu_disable_common(void);
153void cpu_die_common(unsigned int cpu);
154void native_smp_prepare_boot_cpu(void); 153void native_smp_prepare_boot_cpu(void);
155void native_smp_prepare_cpus(unsigned int max_cpus); 154void native_smp_prepare_cpus(unsigned int max_cpus);
156void native_smp_cpus_done(unsigned int max_cpus); 155void native_smp_cpus_done(unsigned int max_cpus);
157int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 156int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
158int native_cpu_disable(void); 157int native_cpu_disable(void);
158int common_cpu_die(unsigned int cpu);
159void native_cpu_die(unsigned int cpu); 159void native_cpu_die(unsigned int cpu);
160void native_play_dead(void); 160void native_play_dead(void);
161void play_dead_common(void); 161void play_dead_common(void);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index febc6aabc72e..c8fa34963ead 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,9 +77,6 @@
77#include <asm/realmode.h> 77#include <asm/realmode.h>
78#include <asm/misc.h> 78#include <asm/misc.h>
79 79
80/* State of each CPU */
81DEFINE_PER_CPU(int, cpu_state) = { 0 };
82
83/* Number of siblings per CPU package */ 80/* Number of siblings per CPU package */
84int smp_num_siblings = 1; 81int smp_num_siblings = 1;
85EXPORT_SYMBOL(smp_num_siblings); 82EXPORT_SYMBOL(smp_num_siblings);
@@ -257,7 +254,7 @@ static void notrace start_secondary(void *unused)
257 lock_vector_lock(); 254 lock_vector_lock();
258 set_cpu_online(smp_processor_id(), true); 255 set_cpu_online(smp_processor_id(), true);
259 unlock_vector_lock(); 256 unlock_vector_lock();
260 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 257 cpu_set_state_online(smp_processor_id());
261 x86_platform.nmi_init(); 258 x86_platform.nmi_init();
262 259
263 /* enable local interrupts */ 260 /* enable local interrupts */
@@ -948,7 +945,10 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
948 */ 945 */
949 mtrr_save_state(); 946 mtrr_save_state();
950 947
951 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 948 /* x86 CPUs take themselves offline, so delayed offline is OK. */
949 err = cpu_check_up_prepare(cpu);
950 if (err && err != -EBUSY)
951 return err;
952 952
953 /* the FPU context is blank, nobody can own it */ 953 /* the FPU context is blank, nobody can own it */
954 __cpu_disable_lazy_restore(cpu); 954 __cpu_disable_lazy_restore(cpu);
@@ -1191,7 +1191,7 @@ void __init native_smp_prepare_boot_cpu(void)
1191 switch_to_new_gdt(me); 1191 switch_to_new_gdt(me);
1192 /* already set me in cpu_online_mask in boot_cpu_init() */ 1192 /* already set me in cpu_online_mask in boot_cpu_init() */
1193 cpumask_set_cpu(me, cpu_callout_mask); 1193 cpumask_set_cpu(me, cpu_callout_mask);
1194 per_cpu(cpu_state, me) = CPU_ONLINE; 1194 cpu_set_state_online(me);
1195} 1195}
1196 1196
1197void __init native_smp_cpus_done(unsigned int max_cpus) 1197void __init native_smp_cpus_done(unsigned int max_cpus)
@@ -1318,14 +1318,10 @@ static void __ref remove_cpu_from_maps(int cpu)
1318 numa_remove_cpu(cpu); 1318 numa_remove_cpu(cpu);
1319} 1319}
1320 1320
1321static DEFINE_PER_CPU(struct completion, die_complete);
1322
1323void cpu_disable_common(void) 1321void cpu_disable_common(void)
1324{ 1322{
1325 int cpu = smp_processor_id(); 1323 int cpu = smp_processor_id();
1326 1324
1327 init_completion(&per_cpu(die_complete, smp_processor_id()));
1328
1329 remove_siblinginfo(cpu); 1325 remove_siblinginfo(cpu);
1330 1326
1331 /* It's now safe to remove this processor from the online map */ 1327 /* It's now safe to remove this processor from the online map */
@@ -1349,24 +1345,27 @@ int native_cpu_disable(void)
1349 return 0; 1345 return 0;
1350} 1346}
1351 1347
1352void cpu_die_common(unsigned int cpu) 1348int common_cpu_die(unsigned int cpu)
1353{ 1349{
1354 wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); 1350 int ret = 0;
1355}
1356 1351
1357void native_cpu_die(unsigned int cpu)
1358{
1359 /* We don't do anything here: idle task is faking death itself. */ 1352 /* We don't do anything here: idle task is faking death itself. */
1360 1353
1361 cpu_die_common(cpu);
1362
1363 /* They ack this in play_dead() by setting CPU_DEAD */ 1354 /* They ack this in play_dead() by setting CPU_DEAD */
1364 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1355 if (cpu_wait_death(cpu, 5)) {
1365 if (system_state == SYSTEM_RUNNING) 1356 if (system_state == SYSTEM_RUNNING)
1366 pr_info("CPU %u is now offline\n", cpu); 1357 pr_info("CPU %u is now offline\n", cpu);
1367 } else { 1358 } else {
1368 pr_err("CPU %u didn't die...\n", cpu); 1359 pr_err("CPU %u didn't die...\n", cpu);
1360 ret = -1;
1369 } 1361 }
1362
1363 return ret;
1364}
1365
1366void native_cpu_die(unsigned int cpu)
1367{
1368 common_cpu_die(cpu);
1370} 1369}
1371 1370
1372void play_dead_common(void) 1371void play_dead_common(void)
@@ -1375,10 +1374,8 @@ void play_dead_common(void)
1375 reset_lazy_tlbstate(); 1374 reset_lazy_tlbstate();
1376 amd_e400_remove_cpu(raw_smp_processor_id()); 1375 amd_e400_remove_cpu(raw_smp_processor_id());
1377 1376
1378 mb();
1379 /* Ack it */ 1377 /* Ack it */
1380 __this_cpu_write(cpu_state, CPU_DEAD); 1378 (void)cpu_report_death();
1381 complete(&per_cpu(die_complete, smp_processor_id()));
1382 1379
1383 /* 1380 /*
1384 * With physical CPU hotplug, we should halt the cpu 1381 * With physical CPU hotplug, we should halt the cpu
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 08e8489c47f1..1c5e760f34ca 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -90,14 +90,10 @@ static void cpu_bringup(void)
90 90
91 set_cpu_online(cpu, true); 91 set_cpu_online(cpu, true);
92 92
93 this_cpu_write(cpu_state, CPU_ONLINE); 93 cpu_set_state_online(cpu); /* Implies full memory barrier. */
94
95 wmb();
96 94
97 /* We can take interrupts now: we're officially "up". */ 95 /* We can take interrupts now: we're officially "up". */
98 local_irq_enable(); 96 local_irq_enable();
99
100 wmb(); /* make sure everything is out */
101} 97}
102 98
103/* 99/*
@@ -459,7 +455,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
459 xen_setup_timer(cpu); 455 xen_setup_timer(cpu);
460 xen_init_lock_cpu(cpu); 456 xen_init_lock_cpu(cpu);
461 457
462 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 458 /*
459 * PV VCPUs are always successfully taken down (see 'while' loop
460 * in xen_cpu_die()), so -EBUSY is an error.
461 */
462 rc = cpu_check_up_prepare(cpu);
463 if (rc)
464 return rc;
463 465
464 /* make sure interrupts start blocked */ 466 /* make sure interrupts start blocked */
465 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 467 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -479,10 +481,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
479 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 481 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
480 BUG_ON(rc); 482 BUG_ON(rc);
481 483
482 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 484 while (cpu_report_state(cpu) != CPU_ONLINE)
483 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 485 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
484 barrier();
485 }
486 486
487 return 0; 487 return 0;
488} 488}
@@ -511,11 +511,11 @@ static void xen_cpu_die(unsigned int cpu)
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513 513
514 cpu_die_common(cpu); 514 if (common_cpu_die(cpu) == 0) {
515 515 xen_smp_intr_free(cpu);
516 xen_smp_intr_free(cpu); 516 xen_uninit_lock_cpu(cpu);
517 xen_uninit_lock_cpu(cpu); 517 xen_teardown_timer(cpu);
518 xen_teardown_timer(cpu); 518 }
519} 519}
520 520
521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -747,6 +747,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
748{ 748{
749 int rc; 749 int rc;
750
751 /*
752 * This can happen if CPU was offlined earlier and
753 * offlining timed out in common_cpu_die().
754 */
755 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
756 xen_smp_intr_free(cpu);
757 xen_uninit_lock_cpu(cpu);
758 }
759
750 /* 760 /*
751 * xen_smp_intr_init() needs to run before native_cpu_up() 761 * xen_smp_intr_init() needs to run before native_cpu_up()
752 * so that IPI vectors are set up on the booting CPU before 762 * so that IPI vectors are set up on the booting CPU before
@@ -768,12 +778,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
768 return rc; 778 return rc;
769} 779}
770 780
771static void xen_hvm_cpu_die(unsigned int cpu)
772{
773 xen_cpu_die(cpu);
774 native_cpu_die(cpu);
775}
776
777void __init xen_hvm_smp_init(void) 781void __init xen_hvm_smp_init(void)
778{ 782{
779 if (!xen_have_vector_callback) 783 if (!xen_have_vector_callback)
@@ -781,7 +785,7 @@ void __init xen_hvm_smp_init(void)
781 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 785 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
782 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 786 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
783 smp_ops.cpu_up = xen_hvm_cpu_up; 787 smp_ops.cpu_up = xen_hvm_cpu_up;
784 smp_ops.cpu_die = xen_hvm_cpu_die; 788 smp_ops.cpu_die = xen_cpu_die;
785 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 789 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
786 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 790 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
787 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; 791 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;