diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-05-02 13:27:10 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:10 -0400 |
commit | ae1ee11be77f51cedb6c569887dddc70c163ab6d (patch) | |
tree | e579a6a6d10c6835cab9af47a2795bf40f669da6 /arch | |
parent | 8f9aeca7a081d81c4c9862be1e04f15b5ab5461f (diff) |
[PATCH] i386: Use per-cpu variables for GDT, PDA
Allocating PDA and GDT at boot is a pain. Using simple per-cpu variables adds
happiness (although we need the GDT page-aligned for Xen, which we do in a
followup patch).
[akpm@linux-foundation.org: build fix]
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 94 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 21 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 10 |
3 files changed, 15 insertions, 110 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index dcbbd0a8bfc2..2335f4464ead 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -25,8 +25,10 @@ | |||
25 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); | 25 | DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); |
26 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); |
27 | 27 | ||
28 | struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly; | 28 | DEFINE_PER_CPU(struct desc_struct, cpu_gdt[GDT_ENTRIES]); |
29 | EXPORT_SYMBOL(_cpu_pda); | 29 | |
30 | DEFINE_PER_CPU(struct i386_pda, _cpu_pda); | ||
31 | EXPORT_PER_CPU_SYMBOL(_cpu_pda); | ||
30 | 32 | ||
31 | static int cachesize_override __cpuinitdata = -1; | 33 | static int cachesize_override __cpuinitdata = -1; |
32 | static int disable_x86_fxsr __cpuinitdata; | 34 | static int disable_x86_fxsr __cpuinitdata; |
@@ -609,52 +611,6 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
609 | return regs; | 611 | return regs; |
610 | } | 612 | } |
611 | 613 | ||
612 | static __cpuinit int alloc_gdt(int cpu) | ||
613 | { | ||
614 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
615 | struct desc_struct *gdt; | ||
616 | struct i386_pda *pda; | ||
617 | |||
618 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
619 | pda = cpu_pda(cpu); | ||
620 | |||
621 | /* | ||
622 | * This is a horrible hack to allocate the GDT. The problem | ||
623 | * is that cpu_init() is called really early for the boot CPU | ||
624 | * (and hence needs bootmem) but much later for the secondary | ||
625 | * CPUs, when bootmem will have gone away | ||
626 | */ | ||
627 | if (NODE_DATA(0)->bdata->node_bootmem_map) { | ||
628 | BUG_ON(gdt != NULL || pda != NULL); | ||
629 | |||
630 | gdt = alloc_bootmem_pages(PAGE_SIZE); | ||
631 | pda = alloc_bootmem(sizeof(*pda)); | ||
632 | /* alloc_bootmem(_pages) panics on failure, so no check */ | ||
633 | |||
634 | memset(gdt, 0, PAGE_SIZE); | ||
635 | memset(pda, 0, sizeof(*pda)); | ||
636 | } else { | ||
637 | /* GDT and PDA might already have been allocated if | ||
638 | this is a CPU hotplug re-insertion. */ | ||
639 | if (gdt == NULL) | ||
640 | gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); | ||
641 | |||
642 | if (pda == NULL) | ||
643 | pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu)); | ||
644 | |||
645 | if (unlikely(!gdt || !pda)) { | ||
646 | free_pages((unsigned long)gdt, 0); | ||
647 | kfree(pda); | ||
648 | return 0; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
653 | cpu_pda(cpu) = pda; | ||
654 | |||
655 | return 1; | ||
656 | } | ||
657 | |||
658 | /* Initial PDA used by boot CPU */ | 614 | /* Initial PDA used by boot CPU */ |
659 | struct i386_pda boot_pda = { | 615 | struct i386_pda boot_pda = { |
660 | ._pda = &boot_pda, | 616 | ._pda = &boot_pda, |
@@ -670,31 +626,17 @@ static inline void set_kernel_fs(void) | |||
670 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory"); | 626 | asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_PDA) : "memory"); |
671 | } | 627 | } |
672 | 628 | ||
673 | /* Initialize the CPU's GDT and PDA. The boot CPU does this for | 629 | /* Initialize the CPU's GDT and PDA. This is either the boot CPU doing itself |
674 | itself, but secondaries find this done for them. */ | 630 | (still using cpu_gdt_table), or a CPU doing it for a secondary which |
675 | __cpuinit int init_gdt(int cpu, struct task_struct *idle) | 631 | will soon come up. */ |
632 | __cpuinit void init_gdt(int cpu, struct task_struct *idle) | ||
676 | { | 633 | { |
677 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 634 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
678 | struct desc_struct *gdt; | 635 | struct desc_struct *gdt = per_cpu(cpu_gdt, cpu); |
679 | struct i386_pda *pda; | 636 | struct i386_pda *pda = &per_cpu(_cpu_pda, cpu); |
680 | |||
681 | /* For non-boot CPUs, the GDT and PDA should already have been | ||
682 | allocated. */ | ||
683 | if (!alloc_gdt(cpu)) { | ||
684 | printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu); | ||
685 | return 0; | ||
686 | } | ||
687 | 637 | ||
688 | gdt = (struct desc_struct *)cpu_gdt_descr->address; | ||
689 | pda = cpu_pda(cpu); | ||
690 | |||
691 | BUG_ON(gdt == NULL || pda == NULL); | ||
692 | |||
693 | /* | ||
694 | * Initialize the per-CPU GDT with the boot GDT, | ||
695 | * and set up the GDT descriptor: | ||
696 | */ | ||
697 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); | 638 | memcpy(gdt, cpu_gdt_table, GDT_SIZE); |
639 | cpu_gdt_descr->address = (unsigned long)gdt; | ||
698 | cpu_gdt_descr->size = GDT_SIZE - 1; | 640 | cpu_gdt_descr->size = GDT_SIZE - 1; |
699 | 641 | ||
700 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a, | 642 | pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a, |
@@ -706,17 +648,12 @@ __cpuinit int init_gdt(int cpu, struct task_struct *idle) | |||
706 | pda->_pda = pda; | 648 | pda->_pda = pda; |
707 | pda->cpu_number = cpu; | 649 | pda->cpu_number = cpu; |
708 | pda->pcurrent = idle; | 650 | pda->pcurrent = idle; |
709 | |||
710 | return 1; | ||
711 | } | 651 | } |
712 | 652 | ||
713 | void __cpuinit cpu_set_gdt(int cpu) | 653 | void __cpuinit cpu_set_gdt(int cpu) |
714 | { | 654 | { |
715 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 655 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
716 | 656 | ||
717 | /* Reinit these anyway, even if they've already been done (on | ||
718 | the boot CPU, this will transition from the boot gdt+pda to | ||
719 | the real ones). */ | ||
720 | load_gdt(cpu_gdt_descr); | 657 | load_gdt(cpu_gdt_descr); |
721 | set_kernel_fs(); | 658 | set_kernel_fs(); |
722 | } | 659 | } |
@@ -804,13 +741,8 @@ void __cpuinit cpu_init(void) | |||
804 | struct task_struct *curr = current; | 741 | struct task_struct *curr = current; |
805 | 742 | ||
806 | /* Set up the real GDT and PDA, so we can transition from the | 743 | /* Set up the real GDT and PDA, so we can transition from the |
807 | boot versions. */ | 744 | boot_gdt_table & boot_pda. */ |
808 | if (!init_gdt(cpu, curr)) { | 745 | init_gdt(cpu, curr); |
809 | /* failed to allocate something; not much we can do... */ | ||
810 | for (;;) | ||
811 | local_irq_enable(); | ||
812 | } | ||
813 | |||
814 | cpu_set_gdt(cpu); | 746 | cpu_set_gdt(cpu); |
815 | _cpu_init(cpu, curr); | 747 | _cpu_init(cpu, curr); |
816 | } | 748 | } |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 7b14e88b555f..b36a5f174cc9 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -808,13 +808,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
808 | if (IS_ERR(idle)) | 808 | if (IS_ERR(idle)) |
809 | panic("failed fork for CPU %d", cpu); | 809 | panic("failed fork for CPU %d", cpu); |
810 | 810 | ||
811 | /* Pre-allocate and initialize the CPU's GDT and PDA so it | 811 | init_gdt(cpu, idle); |
812 | doesn't have to do any memory allocation during the | ||
813 | delicate CPU-bringup phase. */ | ||
814 | if (!init_gdt(cpu, idle)) { | ||
815 | printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu); | ||
816 | return -1; /* ? */ | ||
817 | } | ||
818 | 812 | ||
819 | idle->thread.eip = (unsigned long) start_secondary; | 813 | idle->thread.eip = (unsigned long) start_secondary; |
820 | /* start_eip had better be page-aligned! */ | 814 | /* start_eip had better be page-aligned! */ |
@@ -940,7 +934,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
940 | DECLARE_COMPLETION_ONSTACK(done); | 934 | DECLARE_COMPLETION_ONSTACK(done); |
941 | struct warm_boot_cpu_info info; | 935 | struct warm_boot_cpu_info info; |
942 | int apicid, ret; | 936 | int apicid, ret; |
943 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | ||
944 | 937 | ||
945 | apicid = x86_cpu_to_apicid[cpu]; | 938 | apicid = x86_cpu_to_apicid[cpu]; |
946 | if (apicid == BAD_APICID) { | 939 | if (apicid == BAD_APICID) { |
@@ -948,18 +941,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
948 | goto exit; | 941 | goto exit; |
949 | } | 942 | } |
950 | 943 | ||
951 | /* | ||
952 | * the CPU isn't initialized at boot time, allocate gdt table here. | ||
953 | * cpu_init will initialize it | ||
954 | */ | ||
955 | if (!cpu_gdt_descr->address) { | ||
956 | cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL); | ||
957 | if (!cpu_gdt_descr->address) | ||
958 | printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); | ||
959 | ret = -ENOMEM; | ||
960 | goto exit; | ||
961 | } | ||
962 | |||
963 | info.complete = &done; | 944 | info.complete = &done; |
964 | info.apicid = apicid; | 945 | info.apicid = apicid; |
965 | info.cpu = cpu; | 946 | info.cpu = cpu; |
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 74aeedf277f4..15d8132d4f5e 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c | |||
@@ -580,15 +580,7 @@ do_boot_cpu(__u8 cpu) | |||
580 | /* init_tasks (in sched.c) is indexed logically */ | 580 | /* init_tasks (in sched.c) is indexed logically */ |
581 | stack_start.esp = (void *) idle->thread.esp; | 581 | stack_start.esp = (void *) idle->thread.esp; |
582 | 582 | ||
583 | /* Pre-allocate and initialize the CPU's GDT and PDA so it | 583 | init_gdt(cpu, idle); |
584 | doesn't have to do any memory allocation during the | ||
585 | delicate CPU-bringup phase. */ | ||
586 | if (!init_gdt(cpu, idle)) { | ||
587 | printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu); | ||
588 | cpucount--; | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | irq_ctx_init(cpu); | 584 | irq_ctx_init(cpu); |
593 | 585 | ||
594 | /* Note: Don't modify initial ss override */ | 586 | /* Note: Don't modify initial ss override */ |