diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2006-12-06 20:14:02 -0500 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-12-06 20:14:02 -0500 |
commit | 62111195800d80c66cdc69063ea3145878c99fbf (patch) | |
tree | 35bc9792b3ac232e70e106ff2f4c0193c3bb72ff /arch/i386/kernel/cpu/common.c | |
parent | 9ca36101a8d74704d78f10910f89d62de96f9dc8 (diff) |
[PATCH] i386: Initialize the per-CPU data area
When a CPU is brought up, a PDA and GDT are allocated for it. The GDT's
__KERNEL_PDA entry is pointed to the allocated PDA memory, so that all
references using this segment descriptor will refer to the PDA.
This patch rearranges CPU initialization a bit, so that the GDT/PDA are set up
as early as possible in cpu_init(). Also for secondary CPUs, GDT+PDA are
preallocated and initialized so all the secondary CPU needs to do is set up
the ldt and load %gs. This will be important once smp_processor_id() and
current use the PDA.
In all cases, the PDA is set up in head.S, before a CPU starts running C code,
so the PDA is always available.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Chuck Ebbert <76306.1226@compuserve.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Jan Beulich <jbeulich@novell.com>
Cc: Andi Kleen <ak@suse.de>
Cc: James Bottomley <James.Bottomley@SteelEye.com>
Cc: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/cpu/common.c')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 177 |
1 files changed, 134 insertions, 43 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 5532fc4e1bf0..2534e25ed745 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -18,12 +18,16 @@ | |||
18 | #include <asm/apic.h> | 18 | #include <asm/apic.h> |
19 | #include <mach_apic.h> | 19 | #include <mach_apic.h> |
20 | #endif | 20 | #endif |
21 | #include <asm/pda.h> | ||
21 | 22 | ||
22 | #include "cpu.h" | 23 | #include "cpu.h" |
23 | 24 | ||
24 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); | 25 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); |
25 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); |
26 | 27 | ||
28 | struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly; | ||
29 | EXPORT_SYMBOL(_cpu_pda); | ||
30 | |||
27 | static int cachesize_override __cpuinitdata = -1; | 31 | static int cachesize_override __cpuinitdata = -1; |
28 | static int disable_x86_fxsr __cpuinitdata; | 32 | static int disable_x86_fxsr __cpuinitdata; |
29 | static int disable_x86_serial_nr __cpuinitdata = 1; | 33 | static int disable_x86_serial_nr __cpuinitdata = 1; |
@@ -588,41 +592,16 @@ void __init early_cpu_init(void) | |||
588 | disable_pse = 1; | 592 | disable_pse = 1; |
589 | #endif | 593 | #endif |
590 | } | 594 | } |
591 | /* | 595 | |
592 | * cpu_init() initializes state that is per-CPU. Some data is already | 596 | __cpuinit int alloc_gdt(int cpu) |
593 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
594 | * and IDT. We reload them nevertheless, this function acts as a | ||
595 | * 'CPU state barrier', nothing should get across. | ||
596 | */ | ||
597 | void __cpuinit cpu_init(void) | ||
598 | { | 597 | { |
599 | int cpu = smp_processor_id(); | ||
600 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
601 | struct thread_struct *thread = ¤t->thread; | ||
602 | struct desc_struct *gdt; | ||
603 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 598 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
599 | struct desc_struct *gdt; | ||
600 | struct i386_pda *pda; | ||
604 | 601 | ||
605 | if (cpu_test_and_set(cpu, cpu_initialized)) { | 602 | gdt = (struct desc_struct *)cpu_gdt_descr->address; |
606 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | 603 | pda = cpu_pda(cpu); |
607 | for (;;) local_irq_enable(); | ||
608 | } | ||
609 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
610 | 604 | ||
611 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | ||
612 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
613 | if (tsc_disable && cpu_has_tsc) { | ||
614 | printk(KERN_NOTICE "Disabling TSC...\n"); | ||
615 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | ||
616 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | ||
617 | set_in_cr4(X86_CR4_TSD); | ||
618 | } | ||
619 | |||
620 | /* The CPU hotplug case */ | ||
621 | if (cpu_gdt_descr->address) { | ||
622 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
623 | memset(gdt, 0, PAGE_SIZE); | ||
624 | goto old_gdt; | ||
625 | } | ||
626 | /* | 605 | /* |
627 | * This is a horrible hack to allocate the GDT. The problem | 606 | * This is a horrible hack to allocate the GDT. The problem |
628 | * is that cpu_init() is called really early for the boot CPU | 607 | * is that cpu_init() is called really early for the boot CPU |
@@ -630,36 +609,117 @@ void __cpuinit cpu_init(void) | |||
630 | * CPUs, when bootmem will have gone away | 609 | * CPUs, when bootmem will have gone away |
631 | */ | 610 | */ |
632 | if (NODE_DATA(0)->bdata->node_bootmem_map) { | 611 | if (NODE_DATA(0)->bdata->node_bootmem_map) { |
633 | gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); | 612 | BUG_ON(gdt != NULL || pda != NULL); |
634 | /* alloc_bootmem_pages panics on failure, so no check */ | 613 | |
614 | gdt = alloc_bootmem_pages(PAGE_SIZE); | ||
615 | pda = alloc_bootmem(sizeof(*pda)); | ||
616 | /* alloc_bootmem(_pages) panics on failure, so no check */ | ||
617 | |||
635 | memset(gdt, 0, PAGE_SIZE); | 618 | memset(gdt, 0, PAGE_SIZE); |
619 | memset(pda, 0, sizeof(*pda)); | ||
636 | } else { | 620 | } else { |
637 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); | 621 | /* GDT and PDA might already have been allocated if |
638 | if (unlikely(!gdt)) { | 622 | this is a CPU hotplug re-insertion. */ |
639 | printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); | 623 | if (gdt == NULL) |
640 | for (;;) | 624 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); |
641 | local_irq_enable(); | 625 | |
626 | if (pda == NULL) | ||
627 | pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu)); | ||
628 | |||
629 | if (unlikely(!gdt || !pda)) { | ||
630 | free_pages((unsigned long)gdt, 0); | ||
631 | kfree(pda); | ||
632 | return 0; | ||
642 | } | 633 | } |
643 | } | 634 | } |
644 | old_gdt: | 635 | |
636 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
637 | cpu_pda(cpu) = pda; | ||
638 | |||
639 | return 1; | ||
640 | } | ||
641 | |||
642 | /* Initial PDA used by boot CPU */ | ||
643 | struct i386_pda boot_pda = { | ||
644 | ._pda = &boot_pda, | ||
645 | }; | ||
646 | |||
647 | /* Initialize the CPU's GDT and PDA. The boot CPU does this for | ||
648 | itself, but secondaries find this done for them. */ | ||
649 | __cpuinit int init_gdt(int cpu, struct task_struct *idle) | ||
650 | { | ||
651 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
652 | struct desc_struct *gdt; | ||
653 | struct i386_pda *pda; | ||
654 | |||
655 | /* For non-boot CPUs, the GDT and PDA should already have been | ||
656 | allocated. */ | ||
657 | if (!alloc_gdt(cpu)) { | ||
658 | printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu); | ||
659 | return 0; | ||
660 | } | ||
661 | |||
662 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
663 | pda = cpu_pda(cpu); | ||
664 | |||
665 | BUG_ON(gdt == NULL || pda == NULL); | ||
666 | |||
645 | /* | 667 | /* |
646 | * Initialize the per-CPU GDT with the boot GDT, | 668 | * Initialize the per-CPU GDT with the boot GDT, |
647 | * and set up the GDT descriptor: | 669 | * and set up the GDT descriptor: |
648 | */ | 670 | */ |
649 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); | 671 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
650 | cpu_gdt_descr->size = GDT_SIZE - 1; | 672 | cpu_gdt_descr->size = GDT_SIZE - 1; |
651 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
652 | 673 | ||
674 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a, | ||
675 | (u32 *)&gdt[GDT_ENTRY_PDA].b, | ||
676 | (unsigned long)pda, sizeof(*pda) - 1, | ||
677 | 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */ | ||
678 | |||
679 | memset(pda, 0, sizeof(*pda)); | ||
680 | pda->_pda = pda; | ||
681 | |||
682 | return 1; | ||
683 | } | ||
684 | |||
685 | /* Common CPU init for both boot and secondary CPUs */ | ||
686 | static void __cpuinit _cpu_init(int cpu, struct task_struct *curr) | ||
687 | { | ||
688 | struct tss_struct * t = &per_cpu(init_tss, cpu); | ||
689 | struct thread_struct *thread = &curr->thread; | ||
690 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
691 | |||
692 | /* Reinit these anyway, even if they've already been done (on | ||
693 | the boot CPU, this will transition from the boot gdt+pda to | ||
694 | the real ones). */ | ||
653 | load_gdt(cpu_gdt_descr); | 695 | load_gdt(cpu_gdt_descr); |
696 | |||
697 | if (cpu_test_and_set(cpu, cpu_initialized)) { | ||
698 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | ||
699 | for (;;) local_irq_enable(); | ||
700 | } | ||
701 | |||
702 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | ||
703 | |||
704 | if (cpu_has_vme || cpu_has_tsc || cpu_has_de) | ||
705 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); | ||
706 | if (tsc_disable && cpu_has_tsc) { | ||
707 | printk(KERN_NOTICE "Disabling TSC...\n"); | ||
708 | /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/ | ||
709 | clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); | ||
710 | set_in_cr4(X86_CR4_TSD); | ||
711 | } | ||
712 | |||
654 | load_idt(&idt_descr); | 713 | load_idt(&idt_descr); |
655 | 714 | ||
656 | /* | 715 | /* |
657 | * Set up and load the per-CPU TSS and LDT | 716 | * Set up and load the per-CPU TSS and LDT |
658 | */ | 717 | */ |
659 | atomic_inc(&init_mm.mm_count); | 718 | atomic_inc(&init_mm.mm_count); |
660 | current->active_mm = &init_mm; | 719 | curr->active_mm = &init_mm; |
661 | BUG_ON(current->mm); | 720 | if (curr->mm) |
662 | enter_lazy_tlb(&init_mm, current); | 721 | BUG(); |
722 | enter_lazy_tlb(&init_mm, curr); | ||
663 | 723 | ||
664 | load_esp0(t, thread); | 724 | load_esp0(t, thread); |
665 | set_tss_desc(cpu,t); | 725 | set_tss_desc(cpu,t); |
@@ -690,6 +750,37 @@ old_gdt: | |||
690 | mxcsr_feature_mask_init(); | 750 | mxcsr_feature_mask_init(); |
691 | } | 751 | } |
692 | 752 | ||
753 | /* Entrypoint to initialize secondary CPU */ | ||
754 | void __cpuinit secondary_cpu_init(void) | ||
755 | { | ||
756 | int cpu = smp_processor_id(); | ||
757 | struct task_struct *curr = current; | ||
758 | |||
759 | _cpu_init(cpu, curr); | ||
760 | } | ||
761 | |||
762 | /* | ||
763 | * cpu_init() initializes state that is per-CPU. Some data is already | ||
764 | * initialized (naturally) in the bootstrap process, such as the GDT | ||
765 | * and IDT. We reload them nevertheless, this function acts as a | ||
766 | * 'CPU state barrier', nothing should get across. | ||
767 | */ | ||
768 | void __cpuinit cpu_init(void) | ||
769 | { | ||
770 | int cpu = smp_processor_id(); | ||
771 | struct task_struct *curr = current; | ||
772 | |||
773 | /* Set up the real GDT and PDA, so we can transition from the | ||
774 | boot versions. */ | ||
775 | if (!init_gdt(cpu, curr)) { | ||
776 | /* failed to allocate something; not much we can do... */ | ||
777 | for (;;) | ||
778 | local_irq_enable(); | ||
779 | } | ||
780 | |||
781 | _cpu_init(cpu, curr); | ||
782 | } | ||
783 | |||
693 | #ifdef CONFIG_HOTPLUG_CPU | 784 | #ifdef CONFIG_HOTPLUG_CPU |
694 | void __cpuinit cpu_uninit(void) | 785 | void __cpuinit cpu_uninit(void) |
695 | { | 786 | { |