aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup64.c')
-rw-r--r--arch/x86/kernel/setup64.c81
1 files changed, 4 insertions, 77 deletions
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index e24c45677094..4be499cd6a0d 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -23,6 +23,7 @@
23#include <asm/proto.h> 23#include <asm/proto.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/setup.h> 25#include <asm/setup.h>
26#include <asm/genapic.h>
26 27
27#ifndef CONFIG_DEBUG_BOOT_PARAMS 28#ifndef CONFIG_DEBUG_BOOT_PARAMS
28struct boot_params __initdata boot_params; 29struct boot_params __initdata boot_params;
@@ -85,83 +86,6 @@ static int __init nonx32_setup(char *str)
85} 86}
86__setup("noexec32=", nonx32_setup); 87__setup("noexec32=", nonx32_setup);
87 88
88/*
89 * Copy data used in early init routines from the initial arrays to the
90 * per cpu data areas. These arrays then become expendable and the
91 * *_early_ptr's are zeroed indicating that the static arrays are gone.
92 */
93static void __init setup_per_cpu_maps(void)
94{
95 int cpu;
96
97 for_each_possible_cpu(cpu) {
98#ifdef CONFIG_SMP
99 if (per_cpu_offset(cpu)) {
100#endif
101 per_cpu(x86_cpu_to_apicid, cpu) =
102 x86_cpu_to_apicid_init[cpu];
103 per_cpu(x86_bios_cpu_apicid, cpu) =
104 x86_bios_cpu_apicid_init[cpu];
105#ifdef CONFIG_NUMA
106 per_cpu(x86_cpu_to_node_map, cpu) =
107 x86_cpu_to_node_map_init[cpu];
108#endif
109#ifdef CONFIG_SMP
110 }
111 else
112 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
113 cpu);
114#endif
115 }
116
117 /* indicate the early static arrays will soon be gone */
118 x86_cpu_to_apicid_early_ptr = NULL;
119 x86_bios_cpu_apicid_early_ptr = NULL;
120#ifdef CONFIG_NUMA
121 x86_cpu_to_node_map_early_ptr = NULL;
122#endif
123}
124
125/*
126 * Great future plan:
127 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
128 * Always point %gs to its beginning
129 */
130void __init setup_per_cpu_areas(void)
131{
132 int i;
133 unsigned long size;
134
135#ifdef CONFIG_HOTPLUG_CPU
136 prefill_possible_map();
137#endif
138
139 /* Copy section for each CPU (we discard the original) */
140 size = PERCPU_ENOUGH_ROOM;
141
142 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
143 for_each_cpu_mask (i, cpu_possible_map) {
144 char *ptr;
145#ifndef CONFIG_NEED_MULTIPLE_NODES
146 ptr = alloc_bootmem_pages(size);
147#else
148 int node = early_cpu_to_node(i);
149
150 if (!node_online(node) || !NODE_DATA(node))
151 ptr = alloc_bootmem_pages(size);
152 else
153 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
154#endif
155 if (!ptr)
156 panic("Cannot allocate cpu data for CPU %d\n", i);
157 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
158 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
159 }
160
161 /* setup percpu data maps early */
162 setup_per_cpu_maps();
163}
164
165void pda_init(int cpu) 89void pda_init(int cpu)
166{ 90{
167 struct x8664_pda *pda = cpu_pda(cpu); 91 struct x8664_pda *pda = cpu_pda(cpu);
@@ -341,4 +265,7 @@ void __cpuinit cpu_init (void)
341 fpu_init(); 265 fpu_init();
342 266
343 raw_local_save_flags(kernel_eflags); 267 raw_local_save_flags(kernel_eflags);
268
269 if (is_uv_system())
270 uv_cpu_init();
344} 271}