diff options
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r-- | arch/x86/kernel/setup.c | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c new file mode 100644 index 000000000000..ed157c90412e --- /dev/null +++ b/arch/x86/kernel/setup.c | |||
@@ -0,0 +1,113 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/bootmem.h> | ||
5 | #include <linux/percpu.h> | ||
6 | #include <asm/smp.h> | ||
7 | #include <asm/percpu.h> | ||
8 | #include <asm/sections.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/setup.h> | ||
11 | #include <asm/topology.h> | ||
12 | #include <asm/mpspec.h> | ||
13 | #include <asm/apicdef.h> | ||
14 | |||
15 | unsigned int num_processors; | ||
16 | unsigned disabled_cpus __cpuinitdata; | ||
17 | /* Processor that is doing the boot up */ | ||
18 | unsigned int boot_cpu_physical_apicid = -1U; | ||
19 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | ||
20 | |||
21 | physid_mask_t phys_cpu_present_map; | ||
22 | |||
23 | DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID; | ||
24 | EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid); | ||
25 | |||
26 | /* Bitmask of physically existing CPUs */ | ||
27 | physid_mask_t phys_cpu_present_map; | ||
28 | |||
29 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP) | ||
30 | /* | ||
31 | * Copy data used in early init routines from the initial arrays to the | ||
32 | * per cpu data areas. These arrays then become expendable and the | ||
33 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | ||
34 | */ | ||
35 | static void __init setup_per_cpu_maps(void) | ||
36 | { | ||
37 | int cpu; | ||
38 | |||
39 | for_each_possible_cpu(cpu) { | ||
40 | per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; | ||
41 | per_cpu(x86_bios_cpu_apicid, cpu) = | ||
42 | x86_bios_cpu_apicid_init[cpu]; | ||
43 | #ifdef CONFIG_NUMA | ||
44 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
45 | x86_cpu_to_node_map_init[cpu]; | ||
46 | #endif | ||
47 | } | ||
48 | |||
49 | /* indicate the early static arrays will soon be gone */ | ||
50 | x86_cpu_to_apicid_early_ptr = NULL; | ||
51 | x86_bios_cpu_apicid_early_ptr = NULL; | ||
52 | #ifdef CONFIG_NUMA | ||
53 | x86_cpu_to_node_map_early_ptr = NULL; | ||
54 | #endif | ||
55 | } | ||
56 | |||
57 | #ifdef CONFIG_X86_32 | ||
58 | /* | ||
59 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | ||
60 | * the same way | ||
61 | */ | ||
62 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | ||
63 | EXPORT_SYMBOL(__per_cpu_offset); | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Great future plan: | ||
68 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | ||
69 | * Always point %gs to its beginning | ||
70 | */ | ||
71 | void __init setup_per_cpu_areas(void) | ||
72 | { | ||
73 | int i; | ||
74 | unsigned long size; | ||
75 | |||
76 | #ifdef CONFIG_HOTPLUG_CPU | ||
77 | prefill_possible_map(); | ||
78 | #endif | ||
79 | |||
80 | /* Copy section for each CPU (we discard the original) */ | ||
81 | size = PERCPU_ENOUGH_ROOM; | ||
82 | printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", | ||
83 | size); | ||
84 | |||
85 | for_each_possible_cpu(i) { | ||
86 | char *ptr; | ||
87 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
88 | ptr = alloc_bootmem_pages(size); | ||
89 | #else | ||
90 | int node = early_cpu_to_node(i); | ||
91 | if (!node_online(node) || !NODE_DATA(node)) { | ||
92 | ptr = alloc_bootmem_pages(size); | ||
93 | printk(KERN_INFO | ||
94 | "cpu %d has no node or node-local memory\n", i); | ||
95 | } | ||
96 | else | ||
97 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
98 | #endif | ||
99 | if (!ptr) | ||
100 | panic("Cannot allocate cpu data for CPU %d\n", i); | ||
101 | #ifdef CONFIG_X86_64 | ||
102 | cpu_pda(i)->data_offset = ptr - __per_cpu_start; | ||
103 | #else | ||
104 | __per_cpu_offset[i] = ptr - __per_cpu_start; | ||
105 | #endif | ||
106 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | ||
107 | } | ||
108 | |||
109 | /* Setup percpu data maps */ | ||
110 | setup_per_cpu_maps(); | ||
111 | } | ||
112 | |||
113 | #endif | ||