diff options
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 86 |
1 files changed, 53 insertions, 33 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 565ebc65920e..763d815e27a0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -48,6 +48,8 @@ | |||
48 | #include <linux/err.h> | 48 | #include <linux/err.h> |
49 | #include <linux/nmi.h> | 49 | #include <linux/nmi.h> |
50 | #include <linux/tboot.h> | 50 | #include <linux/tboot.h> |
51 | #include <linux/stackprotector.h> | ||
52 | #include <linux/gfp.h> | ||
51 | 53 | ||
52 | #include <asm/acpi.h> | 54 | #include <asm/acpi.h> |
53 | #include <asm/desc.h> | 55 | #include <asm/desc.h> |
@@ -67,6 +69,7 @@ | |||
67 | #include <linux/mc146818rtc.h> | 69 | #include <linux/mc146818rtc.h> |
68 | 70 | ||
69 | #include <asm/smpboot_hooks.h> | 71 | #include <asm/smpboot_hooks.h> |
72 | #include <asm/i8259.h> | ||
70 | 73 | ||
71 | #ifdef CONFIG_X86_32 | 74 | #ifdef CONFIG_X86_32 |
72 | u8 apicid_2_node[MAX_APICID]; | 75 | u8 apicid_2_node[MAX_APICID]; |
@@ -240,7 +243,10 @@ static void __cpuinit smp_callin(void) | |||
240 | end_local_APIC_setup(); | 243 | end_local_APIC_setup(); |
241 | map_cpu_to_logical_apicid(); | 244 | map_cpu_to_logical_apicid(); |
242 | 245 | ||
243 | notify_cpu_starting(cpuid); | 246 | /* |
247 | * Need to setup vector mappings before we enable interrupts. | ||
248 | */ | ||
249 | setup_vector_irq(smp_processor_id()); | ||
244 | /* | 250 | /* |
245 | * Get our bogomips. | 251 | * Get our bogomips. |
246 | * | 252 | * |
@@ -257,6 +263,8 @@ static void __cpuinit smp_callin(void) | |||
257 | */ | 263 | */ |
258 | smp_store_cpu_info(cpuid); | 264 | smp_store_cpu_info(cpuid); |
259 | 265 | ||
266 | notify_cpu_starting(cpuid); | ||
267 | |||
260 | /* | 268 | /* |
261 | * Allow the master to continue. | 269 | * Allow the master to continue. |
262 | */ | 270 | */ |
@@ -286,9 +294,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
286 | check_tsc_sync_target(); | 294 | check_tsc_sync_target(); |
287 | 295 | ||
288 | if (nmi_watchdog == NMI_IO_APIC) { | 296 | if (nmi_watchdog == NMI_IO_APIC) { |
289 | disable_8259A_irq(0); | 297 | legacy_pic->chip->mask(0); |
290 | enable_NMI_through_LVT0(); | 298 | enable_NMI_through_LVT0(); |
291 | enable_8259A_irq(0); | 299 | legacy_pic->chip->unmask(0); |
292 | } | 300 | } |
293 | 301 | ||
294 | #ifdef CONFIG_X86_32 | 302 | #ifdef CONFIG_X86_32 |
@@ -315,15 +323,18 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
315 | */ | 323 | */ |
316 | ipi_call_lock(); | 324 | ipi_call_lock(); |
317 | lock_vector_lock(); | 325 | lock_vector_lock(); |
318 | __setup_vector_irq(smp_processor_id()); | ||
319 | set_cpu_online(smp_processor_id(), true); | 326 | set_cpu_online(smp_processor_id(), true); |
320 | unlock_vector_lock(); | 327 | unlock_vector_lock(); |
321 | ipi_call_unlock(); | 328 | ipi_call_unlock(); |
322 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 329 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
330 | x86_platform.nmi_init(); | ||
323 | 331 | ||
324 | /* enable local interrupts */ | 332 | /* enable local interrupts */ |
325 | local_irq_enable(); | 333 | local_irq_enable(); |
326 | 334 | ||
335 | /* to prevent fake stack check failure in clock setup */ | ||
336 | boot_init_stack_canary(); | ||
337 | |||
327 | x86_cpuinit.setup_percpu_clockev(); | 338 | x86_cpuinit.setup_percpu_clockev(); |
328 | 339 | ||
329 | wmb(); | 340 | wmb(); |
@@ -671,6 +682,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work) | |||
671 | complete(&c_idle->done); | 682 | complete(&c_idle->done); |
672 | } | 683 | } |
673 | 684 | ||
685 | /* reduce the number of lines printed when booting a large cpu count system */ | ||
686 | static void __cpuinit announce_cpu(int cpu, int apicid) | ||
687 | { | ||
688 | static int current_node = -1; | ||
689 | int node = cpu_to_node(cpu); | ||
690 | |||
691 | if (system_state == SYSTEM_BOOTING) { | ||
692 | if (node != current_node) { | ||
693 | if (current_node > (-1)) | ||
694 | pr_cont(" Ok.\n"); | ||
695 | current_node = node; | ||
696 | pr_info("Booting Node %3d, Processors ", node); | ||
697 | } | ||
698 | pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : ""); | ||
699 | return; | ||
700 | } else | ||
701 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", | ||
702 | node, cpu, apicid); | ||
703 | } | ||
704 | |||
674 | /* | 705 | /* |
675 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | 706 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
676 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | 707 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
@@ -687,7 +718,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 718 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
688 | }; | 719 | }; |
689 | 720 | ||
690 | INIT_WORK(&c_idle.work, do_fork_idle); | 721 | INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); |
691 | 722 | ||
692 | alternatives_smp_switch(1); | 723 | alternatives_smp_switch(1); |
693 | 724 | ||
@@ -713,6 +744,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
713 | 744 | ||
714 | if (IS_ERR(c_idle.idle)) { | 745 | if (IS_ERR(c_idle.idle)) { |
715 | printk("failed fork for CPU %d\n", cpu); | 746 | printk("failed fork for CPU %d\n", cpu); |
747 | destroy_work_on_stack(&c_idle.work); | ||
716 | return PTR_ERR(c_idle.idle); | 748 | return PTR_ERR(c_idle.idle); |
717 | } | 749 | } |
718 | 750 | ||
@@ -736,9 +768,8 @@ do_rest: | |||
736 | /* start_ip had better be page-aligned! */ | 768 | /* start_ip had better be page-aligned! */ |
737 | start_ip = setup_trampoline(); | 769 | start_ip = setup_trampoline(); |
738 | 770 | ||
739 | /* So we see what's up */ | 771 | /* So we see what's up */ |
740 | printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n", | 772 | announce_cpu(cpu, apicid); |
741 | cpu, apicid, start_ip); | ||
742 | 773 | ||
743 | /* | 774 | /* |
744 | * This grunge runs the startup process for | 775 | * This grunge runs the startup process for |
@@ -787,21 +818,17 @@ do_rest: | |||
787 | udelay(100); | 818 | udelay(100); |
788 | } | 819 | } |
789 | 820 | ||
790 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) { | 821 | if (cpumask_test_cpu(cpu, cpu_callin_mask)) |
791 | /* number CPUs logically, starting from 1 (BSP is 0) */ | 822 | pr_debug("CPU%d: has booted.\n", cpu); |
792 | pr_debug("OK.\n"); | 823 | else { |
793 | printk(KERN_INFO "CPU%d: ", cpu); | ||
794 | print_cpu_info(&cpu_data(cpu)); | ||
795 | pr_debug("CPU has booted.\n"); | ||
796 | } else { | ||
797 | boot_error = 1; | 824 | boot_error = 1; |
798 | if (*((volatile unsigned char *)trampoline_base) | 825 | if (*((volatile unsigned char *)trampoline_base) |
799 | == 0xA5) | 826 | == 0xA5) |
800 | /* trampoline started but...? */ | 827 | /* trampoline started but...? */ |
801 | printk(KERN_ERR "Stuck ??\n"); | 828 | pr_err("CPU%d: Stuck ??\n", cpu); |
802 | else | 829 | else |
803 | /* trampoline code not run */ | 830 | /* trampoline code not run */ |
804 | printk(KERN_ERR "Not responding.\n"); | 831 | pr_err("CPU%d: Not responding.\n", cpu); |
805 | if (apic->inquire_remote_apic) | 832 | if (apic->inquire_remote_apic) |
806 | apic->inquire_remote_apic(apicid); | 833 | apic->inquire_remote_apic(apicid); |
807 | } | 834 | } |
@@ -831,6 +858,7 @@ do_rest: | |||
831 | smpboot_restore_warm_reset_vector(); | 858 | smpboot_restore_warm_reset_vector(); |
832 | } | 859 | } |
833 | 860 | ||
861 | destroy_work_on_stack(&c_idle.work); | ||
834 | return boot_error; | 862 | return boot_error; |
835 | } | 863 | } |
836 | 864 | ||
@@ -1066,9 +1094,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1066 | set_cpu_sibling_map(0); | 1094 | set_cpu_sibling_map(0); |
1067 | 1095 | ||
1068 | enable_IR_x2apic(); | 1096 | enable_IR_x2apic(); |
1069 | #ifdef CONFIG_X86_64 | ||
1070 | default_setup_apic_routing(); | 1097 | default_setup_apic_routing(); |
1071 | #endif | ||
1072 | 1098 | ||
1073 | if (smp_sanity_check(max_cpus) < 0) { | 1099 | if (smp_sanity_check(max_cpus) < 0) { |
1074 | printk(KERN_INFO "SMP disabled\n"); | 1100 | printk(KERN_INFO "SMP disabled\n"); |
@@ -1196,11 +1222,12 @@ __init void prefill_possible_map(void) | |||
1196 | 1222 | ||
1197 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); | 1223 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
1198 | 1224 | ||
1199 | if (possible > CONFIG_NR_CPUS) { | 1225 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
1226 | if (possible > nr_cpu_ids) { | ||
1200 | printk(KERN_WARNING | 1227 | printk(KERN_WARNING |
1201 | "%d Processors exceeds NR_CPUS limit of %d\n", | 1228 | "%d Processors exceeds NR_CPUS limit of %d\n", |
1202 | possible, CONFIG_NR_CPUS); | 1229 | possible, nr_cpu_ids); |
1203 | possible = CONFIG_NR_CPUS; | 1230 | possible = nr_cpu_ids; |
1204 | } | 1231 | } |
1205 | 1232 | ||
1206 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | 1233 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", |
@@ -1250,16 +1277,7 @@ static void __ref remove_cpu_from_maps(int cpu) | |||
1250 | void cpu_disable_common(void) | 1277 | void cpu_disable_common(void) |
1251 | { | 1278 | { |
1252 | int cpu = smp_processor_id(); | 1279 | int cpu = smp_processor_id(); |
1253 | /* | ||
1254 | * HACK: | ||
1255 | * Allow any queued timer interrupts to get serviced | ||
1256 | * This is only a temporary solution until we cleanup | ||
1257 | * fixup_irqs as we do for IA64. | ||
1258 | */ | ||
1259 | local_irq_enable(); | ||
1260 | mdelay(1); | ||
1261 | 1280 | ||
1262 | local_irq_disable(); | ||
1263 | remove_siblinginfo(cpu); | 1281 | remove_siblinginfo(cpu); |
1264 | 1282 | ||
1265 | /* It's now safe to remove this processor from the online map */ | 1283 | /* It's now safe to remove this processor from the online map */ |
@@ -1300,14 +1318,16 @@ void native_cpu_die(unsigned int cpu) | |||
1300 | for (i = 0; i < 10; i++) { | 1318 | for (i = 0; i < 10; i++) { |
1301 | /* They ack this in play_dead by setting CPU_DEAD */ | 1319 | /* They ack this in play_dead by setting CPU_DEAD */ |
1302 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | 1320 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
1303 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | 1321 | if (system_state == SYSTEM_RUNNING) |
1322 | pr_info("CPU %u is now offline\n", cpu); | ||
1323 | |||
1304 | if (1 == num_online_cpus()) | 1324 | if (1 == num_online_cpus()) |
1305 | alternatives_smp_switch(0); | 1325 | alternatives_smp_switch(0); |
1306 | return; | 1326 | return; |
1307 | } | 1327 | } |
1308 | msleep(100); | 1328 | msleep(100); |
1309 | } | 1329 | } |
1310 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1330 | pr_err("CPU %u didn't die...\n", cpu); |
1311 | } | 1331 | } |
1312 | 1332 | ||
1313 | void play_dead_common(void) | 1333 | void play_dead_common(void) |