aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c53
1 files changed, 22 insertions, 31 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index cac68430d31f..ae0c0d3bb770 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -80,24 +80,6 @@ static void __init setup_per_cpu_maps(void)
80#endif 80#endif
81} 81}
82 82
83#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
84cpumask_t *cpumask_of_cpu_map __read_mostly;
85EXPORT_SYMBOL(cpumask_of_cpu_map);
86
87/* requires nr_cpu_ids to be initialized */
88static void __init setup_cpumask_of_cpu(void)
89{
90 int i;
91
92 /* alloc_bootmem zeroes memory */
93 cpumask_of_cpu_map = alloc_bootmem_low(sizeof(cpumask_t) * nr_cpu_ids);
94 for (i = 0; i < nr_cpu_ids; i++)
95 cpu_set(i, cpumask_of_cpu_map[i]);
96}
97#else
98static inline void setup_cpumask_of_cpu(void) { }
99#endif
100
101#ifdef CONFIG_X86_32 83#ifdef CONFIG_X86_32
102/* 84/*
103 * Great future not-so-futuristic plan: make i386 and x86_64 do it 85 * Great future not-so-futuristic plan: make i386 and x86_64 do it
@@ -158,35 +140,47 @@ static void __init setup_cpu_pda_map(void)
158 */ 140 */
159void __init setup_per_cpu_areas(void) 141void __init setup_per_cpu_areas(void)
160{ 142{
161 ssize_t size = PERCPU_ENOUGH_ROOM; 143 ssize_t size, old_size;
162 char *ptr; 144 char *ptr;
163 int cpu; 145 int cpu;
146 unsigned long align = 1;
164 147
165 /* Setup cpu_pda map */ 148 /* Setup cpu_pda map */
166 setup_cpu_pda_map(); 149 setup_cpu_pda_map();
167 150
168 /* Copy section for each CPU (we discard the original) */ 151 /* Copy section for each CPU (we discard the original) */
169 size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 align = max_t(unsigned long, PAGE_SIZE, align);
154 size = roundup(old_size, align);
170 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
171 size); 156 size);
172 157
173 for_each_possible_cpu(cpu) { 158 for_each_possible_cpu(cpu) {
174#ifndef CONFIG_NEED_MULTIPLE_NODES 159#ifndef CONFIG_NEED_MULTIPLE_NODES
175 ptr = alloc_bootmem_pages(size); 160 ptr = __alloc_bootmem(size, align,
161 __pa(MAX_DMA_ADDRESS));
176#else 162#else
177 int node = early_cpu_to_node(cpu); 163 int node = early_cpu_to_node(cpu);
178 if (!node_online(node) || !NODE_DATA(node)) { 164 if (!node_online(node) || !NODE_DATA(node)) {
179 ptr = alloc_bootmem_pages(size); 165 ptr = __alloc_bootmem(size, align,
166 __pa(MAX_DMA_ADDRESS));
180 printk(KERN_INFO 167 printk(KERN_INFO
181 "cpu %d has no node %d or node-local memory\n", 168 "cpu %d has no node %d or node-local memory\n",
182 cpu, node); 169 cpu, node);
170 if (ptr)
171 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
172 cpu, __pa(ptr));
173 }
174 else {
175 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
176 __pa(MAX_DMA_ADDRESS));
177 if (ptr)
178 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
179 cpu, node, __pa(ptr));
183 } 180 }
184 else
185 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
186#endif 181#endif
187 per_cpu_offset(cpu) = ptr - __per_cpu_start; 182 per_cpu_offset(cpu) = ptr - __per_cpu_start;
188 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
189
190 } 184 }
191 185
192 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
@@ -197,9 +191,6 @@ void __init setup_per_cpu_areas(void)
197 191
198 /* Setup node to cpumask map */ 192 /* Setup node to cpumask map */
199 setup_node_to_cpumask_map(); 193 setup_node_to_cpumask_map();
200
201 /* Setup cpumask_of_cpu map */
202 setup_cpumask_of_cpu();
203} 194}
204 195
205#endif 196#endif
@@ -227,8 +218,8 @@ static void __init setup_node_to_cpumask_map(void)
227 /* allocate the map */ 218 /* allocate the map */
228 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 219 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
229 220
230 Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n", 221 pr_debug("Node to cpumask map at %p for %d nodes\n",
231 map, nr_node_ids); 222 map, nr_node_ids);
232 223
233 /* node_to_cpumask() will now work */ 224 /* node_to_cpumask() will now work */
234 node_to_cpumask_map = map; 225 node_to_cpumask_map = map;
@@ -248,7 +239,7 @@ void __cpuinit numa_set_node(int cpu, int node)
248 per_cpu(x86_cpu_to_node_map, cpu) = node; 239 per_cpu(x86_cpu_to_node_map, cpu) = node;
249 240
250 else 241 else
251 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu); 242 pr_debug("Setting node for non-present cpu %d\n", cpu);
252} 243}
253 244
254void __cpuinit numa_clear_node(int cpu) 245void __cpuinit numa_clear_node(int cpu)