aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/boot/header.S3
-rw-r--r--arch/x86/kernel/cpu/common.c27
-rw-r--r--arch/x86/kernel/irq.c16
-rw-r--r--arch/x86/kernel/smpboot.c104
-rw-r--r--arch/x86/platform/efi/efi.c3
5 files changed, 64 insertions, 89 deletions
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 0ca9a5c362bc..84c223479e3c 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -375,8 +375,7 @@ xloadflags:
375# define XLF0 0 375# define XLF0 0
376#endif 376#endif
377 377
378#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \ 378#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
379 !defined(CONFIG_EFI_MIXED)
380 /* kernel/boot_param/ramdisk could be loaded above 4g */ 379 /* kernel/boot_param/ramdisk could be loaded above 4g */
381# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G 380# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
382#else 381#else
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a135239badb7..a4bcbacdbe0b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1221,6 +1221,17 @@ static void dbg_restore_debug_regs(void)
1221#define dbg_restore_debug_regs() 1221#define dbg_restore_debug_regs()
1222#endif /* ! CONFIG_KGDB */ 1222#endif /* ! CONFIG_KGDB */
1223 1223
1224static void wait_for_master_cpu(int cpu)
1225{
1226 /*
1227 * wait for ACK from master CPU before continuing
1228 * with AP initialization
1229 */
1230 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1231 while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1232 cpu_relax();
1233}
1234
1224/* 1235/*
1225 * cpu_init() initializes state that is per-CPU. Some data is already 1236 * cpu_init() initializes state that is per-CPU. Some data is already
1226 * initialized (naturally) in the bootstrap process, such as the GDT 1237 * initialized (naturally) in the bootstrap process, such as the GDT
@@ -1236,16 +1247,17 @@ void cpu_init(void)
1236 struct task_struct *me; 1247 struct task_struct *me;
1237 struct tss_struct *t; 1248 struct tss_struct *t;
1238 unsigned long v; 1249 unsigned long v;
1239 int cpu; 1250 int cpu = stack_smp_processor_id();
1240 int i; 1251 int i;
1241 1252
1253 wait_for_master_cpu(cpu);
1254
1242 /* 1255 /*
1243 * Load microcode on this cpu if a valid microcode is available. 1256 * Load microcode on this cpu if a valid microcode is available.
1244 * This is early microcode loading procedure. 1257 * This is early microcode loading procedure.
1245 */ 1258 */
1246 load_ucode_ap(); 1259 load_ucode_ap();
1247 1260
1248 cpu = stack_smp_processor_id();
1249 t = &per_cpu(init_tss, cpu); 1261 t = &per_cpu(init_tss, cpu);
1250 oist = &per_cpu(orig_ist, cpu); 1262 oist = &per_cpu(orig_ist, cpu);
1251 1263
@@ -1257,9 +1269,6 @@ void cpu_init(void)
1257 1269
1258 me = current; 1270 me = current;
1259 1271
1260 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
1261 panic("CPU#%d already initialized!\n", cpu);
1262
1263 pr_debug("Initializing CPU#%d\n", cpu); 1272 pr_debug("Initializing CPU#%d\n", cpu);
1264 1273
1265 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1274 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
@@ -1336,13 +1345,9 @@ void cpu_init(void)
1336 struct tss_struct *t = &per_cpu(init_tss, cpu); 1345 struct tss_struct *t = &per_cpu(init_tss, cpu);
1337 struct thread_struct *thread = &curr->thread; 1346 struct thread_struct *thread = &curr->thread;
1338 1347
1339 show_ucode_info_early(); 1348 wait_for_master_cpu(cpu);
1340 1349
1341 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1350 show_ucode_info_early();
1342 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1343 for (;;)
1344 local_irq_enable();
1345 }
1346 1351
1347 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1352 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1348 1353
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 283a76a9cc40..11ccfb0a63e7 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -17,6 +17,7 @@
17#include <asm/idle.h> 17#include <asm/idle.h>
18#include <asm/mce.h> 18#include <asm/mce.h>
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/desc.h>
20 21
21#define CREATE_TRACE_POINTS 22#define CREATE_TRACE_POINTS
22#include <asm/trace/irq_vectors.h> 23#include <asm/trace/irq_vectors.h>
@@ -334,10 +335,17 @@ int check_irq_vectors_for_cpu_disable(void)
334 for_each_online_cpu(cpu) { 335 for_each_online_cpu(cpu) {
335 if (cpu == this_cpu) 336 if (cpu == this_cpu)
336 continue; 337 continue;
337 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 338 /*
338 vector++) { 339 * We scan from FIRST_EXTERNAL_VECTOR to first system
339 if (per_cpu(vector_irq, cpu)[vector] < 0) 340 * vector. If the vector is marked in the used vectors
340 count++; 341 * bitmap or an irq is assigned to it, we don't count
342 * it as available.
343 */
344 for (vector = FIRST_EXTERNAL_VECTOR;
345 vector < first_system_vector; vector++) {
346 if (!test_bit(vector, used_vectors) &&
347 per_cpu(vector_irq, cpu)[vector] < 0)
348 count++;
341 } 349 }
342 } 350 }
343 351
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 34826934d4a7..bc52fac39dd3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -111,7 +111,6 @@ atomic_t init_deasserted;
111static void smp_callin(void) 111static void smp_callin(void)
112{ 112{
113 int cpuid, phys_id; 113 int cpuid, phys_id;
114 unsigned long timeout;
115 114
116 /* 115 /*
117 * If waken up by an INIT in an 82489DX configuration 116 * If waken up by an INIT in an 82489DX configuration
@@ -130,37 +129,6 @@ static void smp_callin(void)
130 * (This works even if the APIC is not enabled.) 129 * (This works even if the APIC is not enabled.)
131 */ 130 */
132 phys_id = read_apic_id(); 131 phys_id = read_apic_id();
133 if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
134 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
135 phys_id, cpuid);
136 }
137 pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
138
139 /*
140 * STARTUP IPIs are fragile beasts as they might sometimes
141 * trigger some glue motherboard logic. Complete APIC bus
142 * silence for 1 second, this overestimates the time the
143 * boot CPU is spending to send the up to 2 STARTUP IPIs
144 * by a factor of two. This should be enough.
145 */
146
147 /*
148 * Waiting 2s total for startup (udelay is not yet working)
149 */
150 timeout = jiffies + 2*HZ;
151 while (time_before(jiffies, timeout)) {
152 /*
153 * Has the boot CPU finished it's STARTUP sequence?
154 */
155 if (cpumask_test_cpu(cpuid, cpu_callout_mask))
156 break;
157 cpu_relax();
158 }
159
160 if (!time_before(jiffies, timeout)) {
161 panic("%s: CPU%d started up but did not get a callout!\n",
162 __func__, cpuid);
163 }
164 132
165 /* 133 /*
166 * the boot CPU has finished the init stage and is spinning 134 * the boot CPU has finished the init stage and is spinning
@@ -750,8 +718,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
750 unsigned long start_ip = real_mode_header->trampoline_start; 718 unsigned long start_ip = real_mode_header->trampoline_start;
751 719
752 unsigned long boot_error = 0; 720 unsigned long boot_error = 0;
753 int timeout;
754 int cpu0_nmi_registered = 0; 721 int cpu0_nmi_registered = 0;
722 unsigned long timeout;
755 723
756 /* Just in case we booted with a single CPU. */ 724 /* Just in case we booted with a single CPU. */
757 alternatives_enable_smp(); 725 alternatives_enable_smp();
@@ -799,6 +767,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
799 } 767 }
800 768
801 /* 769 /*
770 * AP might wait on cpu_callout_mask in cpu_init() with
771 * cpu_initialized_mask set if previous attempt to online
772 * it timed-out. Clear cpu_initialized_mask so that after
773 * INIT/SIPI it could start with a clean state.
774 */
775 cpumask_clear_cpu(cpu, cpu_initialized_mask);
776 smp_mb();
777
778 /*
802 * Wake up a CPU in difference cases: 779 * Wake up a CPU in difference cases:
803 * - Use the method in the APIC driver if it's defined 780 * - Use the method in the APIC driver if it's defined
804 * Otherwise, 781 * Otherwise,
@@ -810,58 +787,41 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
810 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, 787 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
811 &cpu0_nmi_registered); 788 &cpu0_nmi_registered);
812 789
790
813 if (!boot_error) { 791 if (!boot_error) {
814 /* 792 /*
815 * allow APs to start initializing. 793 * Wait 10s total for a response from AP
816 */ 794 */
817 pr_debug("Before Callout %d\n", cpu); 795 boot_error = -1;
818 cpumask_set_cpu(cpu, cpu_callout_mask); 796 timeout = jiffies + 10*HZ;
819 pr_debug("After Callout %d\n", cpu); 797 while (time_before(jiffies, timeout)) {
798 if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
799 /*
800 * Tell AP to proceed with initialization
801 */
802 cpumask_set_cpu(cpu, cpu_callout_mask);
803 boot_error = 0;
804 break;
805 }
806 udelay(100);
807 schedule();
808 }
809 }
820 810
811 if (!boot_error) {
821 /* 812 /*
822 * Wait 5s total for a response 813 * Wait till AP completes initial initialization
823 */ 814 */
824 for (timeout = 0; timeout < 50000; timeout++) { 815 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
825 if (cpumask_test_cpu(cpu, cpu_callin_mask))
826 break; /* It has booted */
827 udelay(100);
828 /* 816 /*
829 * Allow other tasks to run while we wait for the 817 * Allow other tasks to run while we wait for the
830 * AP to come online. This also gives a chance 818 * AP to come online. This also gives a chance
831 * for the MTRR work(triggered by the AP coming online) 819 * for the MTRR work(triggered by the AP coming online)
832 * to be completed in the stop machine context. 820 * to be completed in the stop machine context.
833 */ 821 */
822 udelay(100);
834 schedule(); 823 schedule();
835 } 824 }
836
837 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
838 print_cpu_msr(&cpu_data(cpu));
839 pr_debug("CPU%d: has booted.\n", cpu);
840 } else {
841 boot_error = 1;
842 if (*trampoline_status == 0xA5A5A5A5)
843 /* trampoline started but...? */
844 pr_err("CPU%d: Stuck ??\n", cpu);
845 else
846 /* trampoline code not run */
847 pr_err("CPU%d: Not responding\n", cpu);
848 if (apic->inquire_remote_apic)
849 apic->inquire_remote_apic(apicid);
850 }
851 }
852
853 if (boot_error) {
854 /* Try to put things back the way they were before ... */
855 numa_remove_cpu(cpu); /* was set by numa_add_cpu */
856
857 /* was set by do_boot_cpu() */
858 cpumask_clear_cpu(cpu, cpu_callout_mask);
859
860 /* was set by cpu_init() */
861 cpumask_clear_cpu(cpu, cpu_initialized_mask);
862
863 set_cpu_present(cpu, false);
864 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
865 } 825 }
866 826
867 /* mark "stuck" area as not stuck */ 827 /* mark "stuck" area as not stuck */
@@ -921,7 +881,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
921 881
922 err = do_boot_cpu(apicid, cpu, tidle); 882 err = do_boot_cpu(apicid, cpu, tidle);
923 if (err) { 883 if (err) {
924 pr_debug("do_boot_cpu failed %d\n", err); 884 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
925 return -EIO; 885 return -EIO;
926 } 886 }
927 887
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 3781dd39e8bd..4d36932ca4f2 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -919,6 +919,9 @@ static void __init save_runtime_map(void)
919 void *tmp, *p, *q = NULL; 919 void *tmp, *p, *q = NULL;
920 int count = 0; 920 int count = 0;
921 921
922 if (efi_enabled(EFI_OLD_MEMMAP))
923 return;
924
922 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 925 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
923 md = p; 926 md = p;
924 927