aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-01-26 22:56:47 -0500
committerTejun Heo <tj@kernel.org>2009-01-26 22:56:47 -0500
commit0d77e7f04d5da160307f4f5c030a171e004f602b (patch)
tree7c76b29b6947a1ca88d2d45df960a97f4c7dc9ab /arch/x86/kernel/setup_percpu.c
parent5a611268b69f05262936dd177205acbce4471358 (diff)
x86: merge setup_per_cpu_maps() into setup_per_cpu_areas()
Impact: minor optimization Eliminates the need for two loops over possible cpus. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 90b8e154bb53..d0b1476490a7 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -97,33 +97,6 @@ static inline void setup_cpu_local_masks(void)
97#endif /* CONFIG_X86_32 */ 97#endif /* CONFIG_X86_32 */
98 98
99#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA 99#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
100/*
101 * Copy data used in early init routines from the initial arrays to the
102 * per cpu data areas. These arrays then become expendable and the
103 * *_early_ptr's are zeroed indicating that the static arrays are gone.
104 */
105static void __init setup_per_cpu_maps(void)
106{
107 int cpu;
108
109 for_each_possible_cpu(cpu) {
110 per_cpu(x86_cpu_to_apicid, cpu) =
111 early_per_cpu_map(x86_cpu_to_apicid, cpu);
112 per_cpu(x86_bios_cpu_apicid, cpu) =
113 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
114#ifdef X86_64_NUMA
115 per_cpu(x86_cpu_to_node_map, cpu) =
116 early_per_cpu_map(x86_cpu_to_node_map, cpu);
117#endif
118 }
119
120 /* indicate the early static arrays will soon be gone */
121 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
122 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
123#ifdef X86_64_NUMA
124 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
125#endif
126}
127 100
128#ifdef CONFIG_X86_64 101#ifdef CONFIG_X86_64
129unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { 102unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
@@ -181,6 +154,19 @@ void __init setup_per_cpu_areas(void)
181 per_cpu_offset(cpu) = ptr - __per_cpu_start; 154 per_cpu_offset(cpu) = ptr - __per_cpu_start;
182 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 155 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
183 per_cpu(cpu_number, cpu) = cpu; 156 per_cpu(cpu_number, cpu) = cpu;
157 /*
158 * Copy data used in early init routines from the initial arrays to the
159 * per cpu data areas. These arrays then become expendable and the
160 * *_early_ptr's are zeroed indicating that the static arrays are gone.
161 */
162 per_cpu(x86_cpu_to_apicid, cpu) =
163 early_per_cpu_map(x86_cpu_to_apicid, cpu);
164 per_cpu(x86_bios_cpu_apicid, cpu) =
165 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
166#ifdef X86_64_NUMA
167 per_cpu(x86_cpu_to_node_map, cpu) =
168 early_per_cpu_map(x86_cpu_to_node_map, cpu);
169#endif
184#ifdef CONFIG_X86_64 170#ifdef CONFIG_X86_64
185 per_cpu(irq_stack_ptr, cpu) = 171 per_cpu(irq_stack_ptr, cpu) =
186 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; 172 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
@@ -195,8 +181,12 @@ void __init setup_per_cpu_areas(void)
195 DBG("PERCPU: cpu %4d %p\n", cpu, ptr); 181 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
196 } 182 }
197 183
198 /* Setup percpu data maps */ 184 /* indicate the early static arrays will soon be gone */
199 setup_per_cpu_maps(); 185 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
186 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
187#ifdef X86_64_NUMA
188 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
189#endif
200 190
201 /* Setup node to cpumask map */ 191 /* Setup node to cpumask map */
202 setup_node_to_cpumask_map(); 192 setup_node_to_cpumask_map();