diff options
| author | Glauber de Oliveira Costa <gcosta@redhat.com> | 2008-03-19 13:25:23 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:01 -0400 |
| commit | 4fe29a85642544503cf81e9cf251ef0f4e65b162 (patch) | |
| tree | 1fe0ed2b1e80e7c638fc12e32806e90ba0272516 | |
| parent | fbac7fcbadc54cc5d374873a2e60e924a056d198 (diff) | |
x86: use specialized routine for setup per-cpu area
We use the same routing as x86_64, moved now to setup.c.
Just with a few ifdefs inside.
Note that this routing uses prefill_possible_map().
It has the very nice side effect of allowing hotplugging of
cpus that are marked as present but disabled by acpi bios.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/x86/Kconfig | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/setup.c | 103 | ||||
| -rw-r--r-- | arch/x86/kernel/setup64.c | 77 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot_32.c | 2 |
5 files changed, 107 insertions, 79 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dbdd3142215c..fd27048087b8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -117,7 +117,7 @@ config ARCH_HAS_CPU_RELAX | |||
| 117 | def_bool y | 117 | def_bool y |
| 118 | 118 | ||
| 119 | config HAVE_SETUP_PER_CPU_AREA | 119 | config HAVE_SETUP_PER_CPU_AREA |
| 120 | def_bool X86_64 | 120 | def_bool X86_64 || (X86_SMP && !X86_VOYAGER) |
| 121 | 121 | ||
| 122 | config ARCH_HIBERNATION_POSSIBLE | 122 | config ARCH_HIBERNATION_POSSIBLE |
| 123 | def_bool y | 123 | def_bool y |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c436e747f502..5d33509fd1c1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
| @@ -18,7 +18,7 @@ CFLAGS_tsc_64.o := $(nostackp) | |||
| 18 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o | 18 | obj-y := process_$(BITS).o signal_$(BITS).o entry_$(BITS).o |
| 19 | obj-y += traps_$(BITS).o irq_$(BITS).o | 19 | obj-y += traps_$(BITS).o irq_$(BITS).o |
| 20 | obj-y += time_$(BITS).o ioport.o ldt.o | 20 | obj-y += time_$(BITS).o ioport.o ldt.o |
| 21 | obj-y += setup_$(BITS).o i8259_$(BITS).o | 21 | obj-y += setup_$(BITS).o i8259_$(BITS).o setup.o |
| 22 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 22 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
| 23 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 23 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
| 24 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o | 24 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o setup64.o |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..1179aa06cdbf --- /dev/null +++ b/arch/x86/kernel/setup.c | |||
| @@ -0,0 +1,103 @@ | |||
| 1 | #include <linux/kernel.h> | ||
| 2 | #include <linux/module.h> | ||
| 3 | #include <linux/init.h> | ||
| 4 | #include <linux/bootmem.h> | ||
| 5 | #include <linux/percpu.h> | ||
| 6 | #include <asm/smp.h> | ||
| 7 | #include <asm/percpu.h> | ||
| 8 | #include <asm/sections.h> | ||
| 9 | #include <asm/processor.h> | ||
| 10 | #include <asm/setup.h> | ||
| 11 | #include <asm/topology.h> | ||
| 12 | |||
| 13 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
| 14 | /* | ||
| 15 | * Copy data used in early init routines from the initial arrays to the | ||
| 16 | * per cpu data areas. These arrays then become expendable and the | ||
| 17 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | ||
| 18 | */ | ||
| 19 | static void __init setup_per_cpu_maps(void) | ||
| 20 | { | ||
| 21 | int cpu; | ||
| 22 | |||
| 23 | for_each_possible_cpu(cpu) { | ||
| 24 | #ifdef CONFIG_SMP | ||
| 25 | if (per_cpu_offset(cpu)) { | ||
| 26 | #endif | ||
| 27 | per_cpu(x86_cpu_to_apicid, cpu) = | ||
| 28 | x86_cpu_to_apicid_init[cpu]; | ||
| 29 | per_cpu(x86_bios_cpu_apicid, cpu) = | ||
| 30 | x86_bios_cpu_apicid_init[cpu]; | ||
| 31 | #ifdef CONFIG_NUMA | ||
| 32 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
| 33 | x86_cpu_to_node_map_init[cpu]; | ||
| 34 | #endif | ||
| 35 | #ifdef CONFIG_SMP | ||
| 36 | } else | ||
| 37 | printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", | ||
| 38 | cpu); | ||
| 39 | #endif | ||
| 40 | } | ||
| 41 | |||
| 42 | /* indicate the early static arrays will soon be gone */ | ||
| 43 | x86_cpu_to_apicid_early_ptr = NULL; | ||
| 44 | x86_bios_cpu_apicid_early_ptr = NULL; | ||
| 45 | #ifdef CONFIG_NUMA | ||
| 46 | x86_cpu_to_node_map_early_ptr = NULL; | ||
| 47 | #endif | ||
| 48 | } | ||
| 49 | |||
| 50 | #ifdef CONFIG_X86_32 | ||
| 51 | /* | ||
| 52 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | ||
| 53 | * the same way | ||
| 54 | */ | ||
| 55 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
| 56 | EXPORT_SYMBOL(__per_cpu_offset); | ||
| 57 | #endif | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Great future plan: | ||
| 61 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
| 62 | * Always point %gs to its beginning | ||
| 63 | */ | ||
| 64 | void __init setup_per_cpu_areas(void) | ||
| 65 | { | ||
| 66 | int i; | ||
| 67 | unsigned long size; | ||
| 68 | |||
| 69 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 70 | prefill_possible_map(); | ||
| 71 | #endif | ||
| 72 | |||
| 73 | /* Copy section for each CPU (we discard the original) */ | ||
| 74 | size = PERCPU_ENOUGH_ROOM; | ||
| 75 | |||
| 76 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", | ||
| 77 | size); | ||
| 78 | for_each_cpu_mask(i, cpu_possible_map) { | ||
| 79 | char *ptr; | ||
| 80 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
| 81 | ptr = alloc_bootmem_pages(size); | ||
| 82 | #else | ||
| 83 | int node = early_cpu_to_node(i); | ||
| 84 | if (!node_online(node) || !NODE_DATA(node)) | ||
| 85 | ptr = alloc_bootmem_pages(size); | ||
| 86 | else | ||
| 87 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
| 88 | #endif | ||
| 89 | if (!ptr) | ||
| 90 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
| 91 | #ifdef CONFIG_X86_64 | ||
| 92 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
| 93 | #else | ||
| 94 | __per_cpu_offset[i] = ptr - __per_cpu_start; | ||
| 95 | #endif | ||
| 96 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
| 97 | } | ||
| 98 | |||
| 99 | /* setup percpu data maps early */ | ||
| 100 | setup_per_cpu_maps(); | ||
| 101 | } | ||
| 102 | |||
| 103 | #endif | ||
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c index e24c45677094..6b4e3262e8cb 100644 --- a/arch/x86/kernel/setup64.c +++ b/arch/x86/kernel/setup64.c | |||
| @@ -85,83 +85,6 @@ static int __init nonx32_setup(char *str) | |||
| 85 | } | 85 | } |
| 86 | __setup("noexec32=", nonx32_setup); | 86 | __setup("noexec32=", nonx32_setup); |
| 87 | 87 | ||
| 88 | /* | ||
| 89 | * Copy data used in early init routines from the initial arrays to the | ||
| 90 | * per cpu data areas. These arrays then become expendable and the | ||
| 91 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | ||
| 92 | */ | ||
| 93 | static void __init setup_per_cpu_maps(void) | ||
| 94 | { | ||
| 95 | int cpu; | ||
| 96 | |||
| 97 | for_each_possible_cpu(cpu) { | ||
| 98 | #ifdef CONFIG_SMP | ||
| 99 | if (per_cpu_offset(cpu)) { | ||
| 100 | #endif | ||
| 101 | per_cpu(x86_cpu_to_apicid, cpu) = | ||
| 102 | x86_cpu_to_apicid_init[cpu]; | ||
| 103 | per_cpu(x86_bios_cpu_apicid, cpu) = | ||
| 104 | x86_bios_cpu_apicid_init[cpu]; | ||
| 105 | #ifdef CONFIG_NUMA | ||
| 106 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
| 107 | x86_cpu_to_node_map_init[cpu]; | ||
| 108 | #endif | ||
| 109 | #ifdef CONFIG_SMP | ||
| 110 | } | ||
| 111 | else | ||
| 112 | printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", | ||
| 113 | cpu); | ||
| 114 | #endif | ||
| 115 | } | ||
| 116 | |||
| 117 | /* indicate the early static arrays will soon be gone */ | ||
| 118 | x86_cpu_to_apicid_early_ptr = NULL; | ||
| 119 | x86_bios_cpu_apicid_early_ptr = NULL; | ||
| 120 | #ifdef CONFIG_NUMA | ||
| 121 | x86_cpu_to_node_map_early_ptr = NULL; | ||
| 122 | #endif | ||
| 123 | } | ||
| 124 | |||
| 125 | /* | ||
| 126 | * Great future plan: | ||
| 127 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
| 128 | * Always point %gs to its beginning | ||
| 129 | */ | ||
| 130 | void __init setup_per_cpu_areas(void) | ||
| 131 | { | ||
| 132 | int i; | ||
| 133 | unsigned long size; | ||
| 134 | |||
| 135 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 136 | prefill_possible_map(); | ||
| 137 | #endif | ||
| 138 | |||
| 139 | /* Copy section for each CPU (we discard the original) */ | ||
| 140 | size = PERCPU_ENOUGH_ROOM; | ||
| 141 | |||
| 142 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size); | ||
| 143 | for_each_cpu_mask (i, cpu_possible_map) { | ||
| 144 | char *ptr; | ||
| 145 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
| 146 | ptr = alloc_bootmem_pages(size); | ||
| 147 | #else | ||
| 148 | int node = early_cpu_to_node(i); | ||
| 149 | |||
| 150 | if (!node_online(node) || !NODE_DATA(node)) | ||
| 151 | ptr = alloc_bootmem_pages(size); | ||
| 152 | else | ||
| 153 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
| 154 | #endif | ||
| 155 | if (!ptr) | ||
| 156 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
| 157 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
| 158 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
| 159 | } | ||
| 160 | |||
| 161 | /* setup percpu data maps early */ | ||
| 162 | setup_per_cpu_maps(); | ||
| 163 | } | ||
| 164 | |||
| 165 | void pda_init(int cpu) | 88 | void pda_init(int cpu) |
| 166 | { | 89 | { |
| 167 | struct x8664_pda *pda = cpu_pda(cpu); | 90 | struct x8664_pda *pda = cpu_pda(cpu); |
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 92a5df6190b5..bf5c9e9f26c1 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
| @@ -665,6 +665,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
| 665 | unmap_cpu_to_logical_apicid(cpu); | 665 | unmap_cpu_to_logical_apicid(cpu); |
| 666 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ | 666 | cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ |
| 667 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | 667 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ |
| 668 | cpu_clear(cpu, cpu_possible_map); | ||
| 668 | cpucount--; | 669 | cpucount--; |
| 669 | } else { | 670 | } else { |
| 670 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; | 671 | per_cpu(x86_cpu_to_apicid, cpu) = apicid; |
| @@ -743,6 +744,7 @@ EXPORT_SYMBOL(xquad_portio); | |||
| 743 | 744 | ||
| 744 | static void __init disable_smp(void) | 745 | static void __init disable_smp(void) |
| 745 | { | 746 | { |
| 747 | cpu_possible_map = cpumask_of_cpu(0); | ||
| 746 | smpboot_clear_io_apic_irqs(); | 748 | smpboot_clear_io_apic_irqs(); |
| 747 | phys_cpu_present_map = physid_mask_of_physid(0); | 749 | phys_cpu_present_map = physid_mask_of_physid(0); |
| 748 | map_cpu_to_logical_apicid(); | 750 | map_cpu_to_logical_apicid(); |
