diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-05-02 13:27:10 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:10 -0400 |
commit | ae1ee11be77f51cedb6c569887dddc70c163ab6d (patch) | |
tree | e579a6a6d10c6835cab9af47a2795bf40f669da6 /arch/i386/kernel/cpu/common.c | |
parent | 8f9aeca7a081d81c4c9862be1e04f15b5ab5461f (diff) |
[PATCH] i386: Use per-cpu variables for GDT, PDA
Allocating PDA and GDT at boot is a pain. Using simple per-cpu variables adds
happiness (although we need the GDT page-aligned for Xen, which we do in a
followup patch).
[akpm@linux-foundation.org: build fix]
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel/cpu/common.c')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 94 |
1 files changed, 13 insertions, 81 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index dcbbd0a8bfc2..2335f4464ead 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -25,8 +25,10 @@ | |||
25 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); | 25 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); |
26 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); |
27 | 27 | ||
28 | struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly; | 28 | DEFINE_PER_CPU(struct desc_struct, cpu_gdt[GDT_ENTRIES]); |
29 | EXPORT_SYMBOL(_cpu_pda); | 29 | |
30 | DEFINE_PER_CPU(struct i386_pda, _cpu_pda); | ||
31 | EXPORT_PER_CPU_SYMBOL(_cpu_pda); | ||
30 | 32 | ||
31 | static int cachesize_override __cpuinitdata = -1; | 33 | static int cachesize_override __cpuinitdata = -1; |
32 | static int disable_x86_fxsr __cpuinitdata; | 34 | static int disable_x86_fxsr __cpuinitdata; |
@@ -609,52 +611,6 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
609 | return regs; | 611 | return regs; |
610 | } | 612 | } |
611 | 613 | ||
612 | static __cpuinit int alloc_gdt(int cpu) | ||
613 | { | ||
614 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
615 | struct desc_struct *gdt; | ||
616 | struct i386_pda *pda; | ||
617 | |||
618 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
619 | pda = cpu_pda(cpu); | ||
620 | |||
621 | /* | ||
622 | * This is a horrible hack to allocate the GDT. The problem | ||
623 | * is that cpu_init() is called really early for the boot CPU | ||
624 | * (and hence needs bootmem) but much later for the secondary | ||
625 | * CPUs, when bootmem will have gone away | ||
626 | */ | ||
627 | if (NODE_DATA(0)->bdata->node_bootmem_map) { | ||
628 | BUG_ON(gdt != NULL || pda != NULL); | ||
629 | |||
630 | gdt = alloc_bootmem_pages(PAGE_SIZE); | ||
631 | pda = alloc_bootmem(sizeof(*pda)); | ||
632 | /* alloc_bootmem(_pages) panics on failure, so no check */ | ||
633 | |||
634 | memset(gdt, 0, PAGE_SIZE); | ||
635 | memset(pda, 0, sizeof(*pda)); | ||
636 | } else { | ||
637 | /* GDT and PDA might already have been allocated if | ||
638 | this is a CPU hotplug re-insertion. */ | ||
639 | if (gdt == NULL) | ||
640 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); | ||
641 | |||
642 | if (pda == NULL) | ||
643 | pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu)); | ||
644 | |||
645 | if (unlikely(!gdt || !pda)) { | ||
646 | free_pages((unsigned long)gdt, 0); | ||
647 | kfree(pda); | ||
648 | return 0; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
653 | cpu_pda(cpu) = pda; | ||
654 | |||
655 | return 1; | ||
656 | } | ||
657 | |||
658 | /* Initial PDA used by boot CPU */ | 614 | /* Initial PDA used by boot CPU */ |
659 | struct i386_pda boot_pda = { | 615 | struct i386_pda boot_pda = { |
660 | ._pda = &boot_pda, | 616 | ._pda = &boot_pda, |
@@ -670,31 +626,17 @@ static inline void set_kernel_fs(void) | |||
670 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory"); | 626 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory"); |
671 | } | 627 | } |
672 | 628 | ||
673 | /* Initialize the CPU's GDT and PDA. The boot CPU does this for | 629 | /* Initialize the CPU's GDT and PDA. This is either the boot CPU doing itself |
674 | itself, but secondaries find this done for them. */ | 630 | (still using cpu_gdt_table), or a CPU doing it for a secondary which |
675 | __cpuinit int init_gdt(int cpu, struct task_struct *idle) | 631 | will soon come up. */ |
632 | __cpuinit void init_gdt(int cpu, struct task_struct *idle) | ||
676 | { | 633 | { |
677 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 634 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
678 | struct desc_struct *gdt; | 635 | struct desc_struct *gdt = per_cpu(cpu_gdt, cpu); |
679 | struct i386_pda *pda; | 636 | struct i386_pda *pda = &per_cpu(_cpu_pda, cpu); |
680 | |||
681 | /* For non-boot CPUs, the GDT and PDA should already have been | ||
682 | allocated. */ | ||
683 | if (!alloc_gdt(cpu)) { | ||
684 | printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu); | ||
685 | return 0; | ||
686 | } | ||
687 | 637 | ||
688 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
689 | pda = cpu_pda(cpu); | ||
690 | |||
691 | BUG_ON(gdt == NULL || pda == NULL); | ||
692 | |||
693 | /* | ||
694 | * Initialize the per-CPU GDT with the boot GDT, | ||
695 | * and set up the GDT descriptor: | ||
696 | */ | ||
697 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); | 638 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
639 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
698 | cpu_gdt_descr->size = GDT_SIZE - 1; | 640 | cpu_gdt_descr->size = GDT_SIZE - 1; |
699 | 641 | ||
700 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a, | 642 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a, |
@@ -706,17 +648,12 @@ __cpuinit int init_gdt(int cpu, struct task_struct *idle) | |||
706 | pda->_pda = pda; | 648 | pda->_pda = pda; |
707 | pda->cpu_number = cpu; | 649 | pda->cpu_number = cpu; |
708 | pda->pcurrent = idle; | 650 | pda->pcurrent = idle; |
709 | |||
710 | return 1; | ||
711 | } | 651 | } |
712 | 652 | ||
713 | void __cpuinit cpu_set_gdt(int cpu) | 653 | void __cpuinit cpu_set_gdt(int cpu) |
714 | { | 654 | { |
715 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 655 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
716 | 656 | ||
717 | /* Reinit these anyway, even if they've already been done (on | ||
718 | the boot CPU, this will transition from the boot gdt+pda to | ||
719 | the real ones). */ | ||
720 | load_gdt(cpu_gdt_descr); | 657 | load_gdt(cpu_gdt_descr); |
721 | set_kernel_fs(); | 658 | set_kernel_fs(); |
722 | } | 659 | } |
@@ -804,13 +741,8 @@ void __cpuinit cpu_init(void) | |||
804 | struct task_struct *curr = current; | 741 | struct task_struct *curr = current; |
805 | 742 | ||
806 | /* Set up the real GDT and PDA, so we can transition from the | 743 | /* Set up the real GDT and PDA, so we can transition from the |
807 | boot versions. */ | 744 | boot_gdt_table & boot_pda. */ |
808 | if (!init_gdt(cpu, curr)) { | 745 | init_gdt(cpu, curr); |
809 | /* failed to allocate something; not much we can do... */ | ||
810 | for (;;) | ||
811 | local_irq_enable(); | ||
812 | } | ||
813 | |||
814 | cpu_set_gdt(cpu); | 746 | cpu_set_gdt(cpu); |
815 | _cpu_init(cpu, curr); | 747 | _cpu_init(cpu, curr); |
816 | } | 748 | } |