aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu/common.c')
-rw-r--r--arch/i386/kernel/cpu/common.c217
1 files changed, 61 insertions, 156 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index dcbbd0a8bfc2..794d593c47eb 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,15 +18,37 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <mach_apic.h> 19#include <mach_apic.h>
20#endif 20#endif
21#include <asm/pda.h>
22 21
23#include "cpu.h" 22#include "cpu.h"
24 23
25DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); 24DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
26EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); 25 [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
26 [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
27 [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
28 [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
29 /*
30 * Segments used for calling PnP BIOS have byte granularity.
31 * They code segments and data segments have fixed 64k limits,
32 * the transfer segment sizes are set at run time.
33 */
34 [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
35 [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
36 [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
37 [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
38 [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
39 /*
40 * The APM segments have byte granularity and their bases
41 * are set at run time. All have 64k limits.
42 */
43 [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
44 /* 16-bit code */
45 [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
46 [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
27 47
28struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly; 48 [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
29EXPORT_SYMBOL(_cpu_pda); 49 [GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
50} };
51EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
30 52
31static int cachesize_override __cpuinitdata = -1; 53static int cachesize_override __cpuinitdata = -1;
32static int disable_x86_fxsr __cpuinitdata; 54static int disable_x86_fxsr __cpuinitdata;
@@ -368,7 +390,7 @@ __setup("serialnumber", x86_serial_nr_setup);
368/* 390/*
369 * This does the hard work of actually picking apart the CPU stuff... 391 * This does the hard work of actually picking apart the CPU stuff...
370 */ 392 */
371void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 393static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
372{ 394{
373 int i; 395 int i;
374 396
@@ -479,15 +501,22 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
479 501
480 /* Init Machine Check Exception if available. */ 502 /* Init Machine Check Exception if available. */
481 mcheck_init(c); 503 mcheck_init(c);
504}
482 505
483 if (c == &boot_cpu_data) 506void __init identify_boot_cpu(void)
484 sysenter_setup(); 507{
508 identify_cpu(&boot_cpu_data);
509 sysenter_setup();
485 enable_sep_cpu(); 510 enable_sep_cpu();
511 mtrr_bp_init();
512}
486 513
487 if (c == &boot_cpu_data) 514void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
488 mtrr_bp_init(); 515{
489 else 516 BUG_ON(c == &boot_cpu_data);
490 mtrr_ap_init(); 517 identify_cpu(c);
518 enable_sep_cpu();
519 mtrr_ap_init();
491} 520}
492 521
493#ifdef CONFIG_X86_HT 522#ifdef CONFIG_X86_HT
@@ -601,129 +630,36 @@ void __init early_cpu_init(void)
601#endif 630#endif
602} 631}
603 632
604/* Make sure %gs is initialized properly in idle threads */ 633/* Make sure %fs is initialized properly in idle threads */
605struct pt_regs * __devinit idle_regs(struct pt_regs *regs) 634struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
606{ 635{
607 memset(regs, 0, sizeof(struct pt_regs)); 636 memset(regs, 0, sizeof(struct pt_regs));
608 regs->xfs = __KERNEL_PDA; 637 regs->xfs = __KERNEL_PERCPU;
609 return regs; 638 return regs;
610} 639}
611 640
612static __cpuinit int alloc_gdt(int cpu) 641/* Current gdt points %fs at the "master" per-cpu area: after this,
642 * it's on the real one. */
643void switch_to_new_gdt(void)
613{ 644{
614 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 645 struct Xgt_desc_struct gdt_descr;
615 struct desc_struct *gdt;
616 struct i386_pda *pda;
617
618 gdt = (struct desc_struct *)cpu_gdt_descr->address;
619 pda = cpu_pda(cpu);
620
621 /*
622 * This is a horrible hack to allocate the GDT. The problem
623 * is that cpu_init() is called really early for the boot CPU
624 * (and hence needs bootmem) but much later for the secondary
625 * CPUs, when bootmem will have gone away
626 */
627 if (NODE_DATA(0)->bdata->node_bootmem_map) {
628 BUG_ON(gdt != NULL || pda != NULL);
629
630 gdt = alloc_bootmem_pages(PAGE_SIZE);
631 pda = alloc_bootmem(sizeof(*pda));
632 /* alloc_bootmem(_pages) panics on failure, so no check */
633
634 memset(gdt, 0, PAGE_SIZE);
635 memset(pda, 0, sizeof(*pda));
636 } else {
637 /* GDT and PDA might already have been allocated if
638 this is a CPU hotplug re-insertion. */
639 if (gdt == NULL)
640 gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
641
642 if (pda == NULL)
643 pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
644
645 if (unlikely(!gdt || !pda)) {
646 free_pages((unsigned long)gdt, 0);
647 kfree(pda);
648 return 0;
649 }
650 }
651
652 cpu_gdt_descr->address = (unsigned long)gdt;
653 cpu_pda(cpu) = pda;
654
655 return 1;
656}
657 646
658/* Initial PDA used by boot CPU */ 647 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
659struct i386_pda boot_pda = { 648 gdt_descr.size = GDT_SIZE - 1;
660 ._pda = &boot_pda, 649 load_gdt(&gdt_descr);
661 .cpu_number = 0, 650 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
662 .pcurrent = &init_task,
663};
664
665static inline void set_kernel_fs(void)
666{
667 /* Set %fs for this CPU's PDA. Memory clobber is to create a
668 barrier with respect to any PDA operations, so the compiler
669 doesn't move any before here. */
670 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory");
671} 651}
672 652
673/* Initialize the CPU's GDT and PDA. The boot CPU does this for 653/*
674 itself, but secondaries find this done for them. */ 654 * cpu_init() initializes state that is per-CPU. Some data is already
675__cpuinit int init_gdt(int cpu, struct task_struct *idle) 655 * initialized (naturally) in the bootstrap process, such as the GDT
676{ 656 * and IDT. We reload them nevertheless, this function acts as a
677 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); 657 * 'CPU state barrier', nothing should get across.
678 struct desc_struct *gdt; 658 */
679 struct i386_pda *pda; 659void __cpuinit cpu_init(void)
680
681 /* For non-boot CPUs, the GDT and PDA should already have been
682 allocated. */
683 if (!alloc_gdt(cpu)) {
684 printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
685 return 0;
686 }
687
688 gdt = (struct desc_struct *)cpu_gdt_descr->address;
689 pda = cpu_pda(cpu);
690
691 BUG_ON(gdt == NULL || pda == NULL);
692
693 /*
694 * Initialize the per-CPU GDT with the boot GDT,
695 * and set up the GDT descriptor:
696 */
697 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
698 cpu_gdt_descr->size = GDT_SIZE - 1;
699
700 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
701 (u32 *)&gdt[GDT_ENTRY_PDA].b,
702 (unsigned long)pda, sizeof(*pda) - 1,
703 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
704
705 memset(pda, 0, sizeof(*pda));
706 pda->_pda = pda;
707 pda->cpu_number = cpu;
708 pda->pcurrent = idle;
709
710 return 1;
711}
712
713void __cpuinit cpu_set_gdt(int cpu)
714{
715 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
716
717 /* Reinit these anyway, even if they've already been done (on
718 the boot CPU, this will transition from the boot gdt+pda to
719 the real ones). */
720 load_gdt(cpu_gdt_descr);
721 set_kernel_fs();
722}
723
724/* Common CPU init for both boot and secondary CPUs */
725static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
726{ 660{
661 int cpu = smp_processor_id();
662 struct task_struct *curr = current;
727 struct tss_struct * t = &per_cpu(init_tss, cpu); 663 struct tss_struct * t = &per_cpu(init_tss, cpu);
728 struct thread_struct *thread = &curr->thread; 664 struct thread_struct *thread = &curr->thread;
729 665
@@ -744,6 +680,7 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
744 } 680 }
745 681
746 load_idt(&idt_descr); 682 load_idt(&idt_descr);
683 switch_to_new_gdt();
747 684
748 /* 685 /*
749 * Set up and load the per-CPU TSS and LDT 686 * Set up and load the per-CPU TSS and LDT
@@ -783,38 +720,6 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
783 mxcsr_feature_mask_init(); 720 mxcsr_feature_mask_init();
784} 721}
785 722
786/* Entrypoint to initialize secondary CPU */
787void __cpuinit secondary_cpu_init(void)
788{
789 int cpu = smp_processor_id();
790 struct task_struct *curr = current;
791
792 _cpu_init(cpu, curr);
793}
794
795/*
796 * cpu_init() initializes state that is per-CPU. Some data is already
797 * initialized (naturally) in the bootstrap process, such as the GDT
798 * and IDT. We reload them nevertheless, this function acts as a
799 * 'CPU state barrier', nothing should get across.
800 */
801void __cpuinit cpu_init(void)
802{
803 int cpu = smp_processor_id();
804 struct task_struct *curr = current;
805
806 /* Set up the real GDT and PDA, so we can transition from the
807 boot versions. */
808 if (!init_gdt(cpu, curr)) {
809 /* failed to allocate something; not much we can do... */
810 for (;;)
811 local_irq_enable();
812 }
813
814 cpu_set_gdt(cpu);
815 _cpu_init(cpu, curr);
816}
817
818#ifdef CONFIG_HOTPLUG_CPU 723#ifdef CONFIG_HOTPLUG_CPU
819void __cpuinit cpu_uninit(void) 724void __cpuinit cpu_uninit(void)
820{ 725{