aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-03-19 13:25:51 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:02 -0400
commit6becedbb06072c5741d4057b9facecb4b3143711 (patch)
tree113749313f8f025ec7e5c44d27b3a516785e4608
parente481fcf8563d300e7f8875cae5fdc41941d29de0 (diff)
x86: minor adjustments for do_boot_cpu
This patch provides minor adjustments for do_boot_cpus in both architectures to allow for integration Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/smpboot_32.c22
-rw-r--r--arch/x86/kernel/smpboot_64.c15
2 files changed, 20 insertions, 17 deletions
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index bd2f8863efa2..5165b11d8aac 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -556,7 +556,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
556 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 556 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
557 */ 557 */
558{ 558{
559 unsigned long boot_error; 559 unsigned long boot_error = 0;
560 int timeout; 560 int timeout;
561 unsigned long start_eip; 561 unsigned long start_eip;
562 unsigned short nmi_high = 0, nmi_low = 0; 562 unsigned short nmi_high = 0, nmi_low = 0;
@@ -566,11 +566,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
566 }; 566 };
567 INIT_WORK(&c_idle.work, do_fork_idle); 567 INIT_WORK(&c_idle.work, do_fork_idle);
568 568
569 /* 569 alternatives_smp_switch(1);
570 * Save current MTRR state in case it was changed since early boot
571 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
572 */
573 mtrr_save_state();
574 570
575 c_idle.idle = get_idle_for_cpu(cpu); 571 c_idle.idle = get_idle_for_cpu(cpu);
576 572
@@ -607,8 +603,6 @@ do_rest:
607 /* start_eip had better be page-aligned! */ 603 /* start_eip had better be page-aligned! */
608 start_eip = setup_trampoline(); 604 start_eip = setup_trampoline();
609 605
610 alternatives_smp_switch(1);
611
612 /* So we see what's up */ 606 /* So we see what's up */
613 printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip); 607 printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
614 /* Stack for startup_32 can be just as for start_secondary onwards */ 608 /* Stack for startup_32 can be just as for start_secondary onwards */
@@ -628,6 +622,12 @@ do_rest:
628 store_NMI_vector(&nmi_high, &nmi_low); 622 store_NMI_vector(&nmi_high, &nmi_low);
629 623
630 smpboot_setup_warm_reset_vector(start_eip); 624 smpboot_setup_warm_reset_vector(start_eip);
625 /*
626 * Be paranoid about clearing APIC errors.
627 */
628 apic_write(APIC_ESR, 0);
629 apic_read(APIC_ESR);
630
631 631
632 /* 632 /*
633 * Starting actual IPI sequence... 633 * Starting actual IPI sequence...
@@ -864,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
864 return -EINVAL; 864 return -EINVAL;
865 } 865 }
866 866
867 /*
868 * Save current MTRR state in case it was changed since early boot
869 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
870 */
871 mtrr_save_state();
872
867 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 873 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
868 874
869 __smp_prepare_cpu(cpu); 875 __smp_prepare_cpu(cpu);
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e93fff42ec32..7d1b4cb380db 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -432,7 +432,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
432 */ 432 */
433static int __cpuinit do_boot_cpu(int cpu, int apicid) 433static int __cpuinit do_boot_cpu(int cpu, int apicid)
434{ 434{
435 unsigned long boot_error; 435 unsigned long boot_error = 0;
436 int timeout; 436 int timeout;
437 unsigned long start_rip; 437 unsigned long start_rip;
438 struct create_idle c_idle = { 438 struct create_idle c_idle = {
@@ -531,11 +531,6 @@ do_rest:
531 apic_read(APIC_ESR); 531 apic_read(APIC_ESR);
532 532
533 /* 533 /*
534 * Status is now clean
535 */
536 boot_error = 0;
537
538 /*
539 * Starting actual IPI sequence... 534 * Starting actual IPI sequence...
540 */ 535 */
541 boot_error = wakeup_secondary_via_INIT(apicid, start_rip); 536 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
@@ -564,7 +559,7 @@ do_rest:
564 print_cpu_info(&cpu_data(cpu)); 559 print_cpu_info(&cpu_data(cpu));
565 } else { 560 } else {
566 boot_error = 1; 561 boot_error = 1;
567 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) 562 if (*((volatile unsigned char *)trampoline_base)
568 == 0xA5) 563 == 0xA5)
569 /* trampoline started but...? */ 564 /* trampoline started but...? */
570 printk("Stuck ??\n"); 565 printk("Stuck ??\n");
@@ -583,10 +578,12 @@ do_rest:
583 cpu_clear(cpu, cpu_present_map); 578 cpu_clear(cpu, cpu_present_map);
584 cpu_clear(cpu, cpu_possible_map); 579 cpu_clear(cpu, cpu_possible_map);
585 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; 580 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
586 return -EIO;
587 } 581 }
588 582
589 return 0; 583 /* mark "stuck" area as not stuck */
584 *((volatile unsigned long *)trampoline_base) = 0;
585
586 return boot_error;
590} 587}
591 588
592cycles_t cacheflush_time; 589cycles_t cacheflush_time;