From 4fe29a85642544503cf81e9cf251ef0f4e65b162 Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 19 Mar 2008 14:25:23 -0300 Subject: x86: use specialized routine for setup per-cpu area We use the same routing as x86_64, moved now to setup.c. Just with a few ifdefs inside. Note that this routing uses prefill_possible_map(). It has the very nice side effect of allowing hotplugging of cpus that are marked as present but disabled by acpi bios. Signed-off-by: Glauber Costa Signed-off-by: Ingo Molnar --- arch/x86/kernel/setup.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 arch/x86/kernel/setup.c (limited to 'arch/x86/kernel/setup.c') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..1179aa06cdbf --- /dev/null +++ b/arch/x86/kernel/setup.c @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +/* + * Copy data used in early init routines from the initial arrays to the + * per cpu data areas. These arrays then become expendable and the + * *_early_ptr's are zeroed indicating that the static arrays are gone. + */ +static void __init setup_per_cpu_maps(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { +#ifdef CONFIG_SMP + if (per_cpu_offset(cpu)) { +#endif + per_cpu(x86_cpu_to_apicid, cpu) = + x86_cpu_to_apicid_init[cpu]; + per_cpu(x86_bios_cpu_apicid, cpu) = + x86_bios_cpu_apicid_init[cpu]; +#ifdef CONFIG_NUMA + per_cpu(x86_cpu_to_node_map, cpu) = + x86_cpu_to_node_map_init[cpu]; +#endif +#ifdef CONFIG_SMP + } else + printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n", + cpu); +#endif + } + + /* indicate the early static arrays will soon be gone */ + x86_cpu_to_apicid_early_ptr = NULL; + x86_bios_cpu_apicid_early_ptr = NULL; +#ifdef CONFIG_NUMA + x86_cpu_to_node_map_early_ptr = NULL; +#endif +} + +#ifdef CONFIG_X86_32 +/* + * Great future not-so-futuristic plan: make i386 and x86_64 do it + * the same way + */ +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); +#endif + +/* + * Great future plan: + * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. + * Always point %gs to its beginning + */ +void __init setup_per_cpu_areas(void) +{ + int i; + unsigned long size; + +#ifdef CONFIG_HOTPLUG_CPU + prefill_possible_map(); +#endif + + /* Copy section for each CPU (we discard the original) */ + size = PERCPU_ENOUGH_ROOM; + + printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + size); + for_each_cpu_mask(i, cpu_possible_map) { + char *ptr; +#ifndef CONFIG_NEED_MULTIPLE_NODES + ptr = alloc_bootmem_pages(size); +#else + int node = early_cpu_to_node(i); + if (!node_online(node) || !NODE_DATA(node)) + ptr = alloc_bootmem_pages(size); + else + ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); +#endif + if (!ptr) + panic("Cannot allocate cpu data for CPU %d\n", i); +#ifdef CONFIG_X86_64 + cpu_pda(i)->data_offset = ptr - __per_cpu_start; +#else + __per_cpu_offset[i] = ptr - __per_cpu_start; +#endif + memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + } + + /* setup percpu data maps early */ + setup_per_cpu_maps(); +} + +#endif -- cgit v1.2.2