aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:30:37 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:30:37 -0500
commite3cfe529dd87dd3354789546676fef2e808822e4 (patch)
tree72ec6c9c1d0bdb0ebfcc082a237853773b1cae6d
parente1d9197862ff4d950dab45669b7f37b5ec3219d8 (diff)
x86: cleanup numa_64.c
Clean it up before applying more patches. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/numa_64.c213
1 files changed, 111 insertions, 102 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index e7f3f4e9ec85..4faed6a8f3ae 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic VM initialization for x86-64 NUMA setups. 2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */ 4 */
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/string.h> 7#include <linux/string.h>
@@ -24,6 +24,8 @@
24#endif 24#endif
25 25
26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27EXPORT_SYMBOL(node_data);
28
27bootmem_data_t plat_node_bdata[MAX_NUMNODES]; 29bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 30
29struct memnode memnode; 31struct memnode memnode;
@@ -31,16 +33,19 @@ struct memnode memnode;
31unsigned char cpu_to_node[NR_CPUS] __read_mostly = { 33unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
32 [0 ... NR_CPUS-1] = NUMA_NO_NODE 34 [0 ... NR_CPUS-1] = NUMA_NO_NODE
33}; 35};
36EXPORT_SYMBOL(cpu_to_node);
37
34unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { 38unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
35 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 39 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
36}; 40};
41
37cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; 42cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
43EXPORT_SYMBOL(node_to_cpumask);
38 44
39int numa_off __initdata; 45int numa_off __initdata;
40unsigned long __initdata nodemap_addr; 46unsigned long __initdata nodemap_addr;
41unsigned long __initdata nodemap_size; 47unsigned long __initdata nodemap_size;
42 48
43
44/* 49/*
45 * Given a shift value, try to populate memnodemap[] 50 * Given a shift value, try to populate memnodemap[]
46 * Returns : 51 * Returns :
@@ -48,12 +53,11 @@ unsigned long __initdata nodemap_size;
48 * 0 if memnodmap[] too small (of shift too small) 53 * 0 if memnodmap[] too small (of shift too small)
49 * -1 if node overlap or lost ram (shift too big) 54 * -1 if node overlap or lost ram (shift too big)
50 */ 55 */
51static int __init 56static int __init populate_memnodemap(const struct bootnode *nodes,
52populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift) 57 int numnodes, int shift)
53{ 58{
54 int i;
55 int res = -1;
56 unsigned long addr, end; 59 unsigned long addr, end;
60 int i, res = -1;
57 61
58 memset(memnodemap, 0xff, memnodemapsize); 62 memset(memnodemap, 0xff, memnodemapsize);
59 for (i = 0; i < numnodes; i++) { 63 for (i = 0; i < numnodes; i++) {
@@ -70,7 +74,7 @@ populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
70 addr += (1UL << shift); 74 addr += (1UL << shift);
71 } while (addr < end); 75 } while (addr < end);
72 res = 1; 76 res = 1;
73 } 77 }
74 return res; 78 return res;
75} 79}
76 80
@@ -105,8 +109,8 @@ static int __init allocate_cachealigned_memnodemap(void)
105 * The LSB of all start and end addresses in the node map is the value of the 109 * The LSB of all start and end addresses in the node map is the value of the
106 * maximum possible shift. 110 * maximum possible shift.
107 */ 111 */
108static int __init 112static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
109extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes) 113 int numnodes)
110{ 114{
111 int i, nodes_used = 0; 115 int i, nodes_used = 0;
112 unsigned long start, end; 116 unsigned long start, end;
@@ -141,10 +145,9 @@ int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
141 shift); 145 shift);
142 146
143 if (populate_memnodemap(nodes, numnodes, shift) != 1) { 147 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
144 printk(KERN_INFO 148 printk(KERN_INFO "Your memory is not aligned you need to "
145 "Your memory is not aligned you need to rebuild your kernel " 149 "rebuild your kernel with a bigger NODEMAPSIZE "
146 "with a bigger NODEMAPSIZE shift=%d\n", 150 "shift=%d\n", shift);
147 shift);
148 return -1; 151 return -1;
149 } 152 }
150 return shift; 153 return shift;
@@ -157,35 +160,37 @@ int early_pfn_to_nid(unsigned long pfn)
157} 160}
158#endif 161#endif
159 162
160static void * __init 163static void * __init early_node_mem(int nodeid, unsigned long start,
161early_node_mem(int nodeid, unsigned long start, unsigned long end, 164 unsigned long end, unsigned long size)
162 unsigned long size)
163{ 165{
164 unsigned long mem = find_e820_area(start, end, size); 166 unsigned long mem = find_e820_area(start, end, size);
165 void *ptr; 167 void *ptr;
168
166 if (mem != -1L) 169 if (mem != -1L)
167 return __va(mem); 170 return __va(mem);
168 ptr = __alloc_bootmem_nopanic(size, 171 ptr = __alloc_bootmem_nopanic(size,
169 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); 172 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
170 if (ptr == NULL) { 173 if (ptr == NULL) {
171 printk(KERN_ERR "Cannot find %lu bytes in node %d\n", 174 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
172 size, nodeid); 175 size, nodeid);
173 return NULL; 176 return NULL;
174 } 177 }
175 return ptr; 178 return ptr;
176} 179}
177 180
178/* Initialize bootmem allocator for a node */ 181/* Initialize bootmem allocator for a node */
179void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) 182void __init setup_node_bootmem(int nodeid, unsigned long start,
180{ 183 unsigned long end)
181 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; 184{
182 unsigned long nodedata_phys; 185 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size;
186 unsigned long bootmap_start, nodedata_phys;
183 void *bootmap; 187 void *bootmap;
184 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 188 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
185 189
186 start = round_up(start, ZONE_ALIGN); 190 start = round_up(start, ZONE_ALIGN);
187 191
188 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); 192 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
193 start, end);
189 194
190 start_pfn = start >> PAGE_SHIFT; 195 start_pfn = start >> PAGE_SHIFT;
191 end_pfn = end >> PAGE_SHIFT; 196 end_pfn = end >> PAGE_SHIFT;
@@ -201,75 +206,81 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
201 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; 206 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
202 207
203 /* Find a place for the bootmem map */ 208 /* Find a place for the bootmem map */
204 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 209 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
205 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 210 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
206 bootmap = early_node_mem(nodeid, bootmap_start, end, 211 bootmap = early_node_mem(nodeid, bootmap_start, end,
207 bootmap_pages<<PAGE_SHIFT); 212 bootmap_pages<<PAGE_SHIFT);
208 if (bootmap == NULL) { 213 if (bootmap == NULL) {
209 if (nodedata_phys < start || nodedata_phys >= end) 214 if (nodedata_phys < start || nodedata_phys >= end)
210 free_bootmem((unsigned long)node_data[nodeid],pgdat_size); 215 free_bootmem((unsigned long)node_data[nodeid],
216 pgdat_size);
211 node_data[nodeid] = NULL; 217 node_data[nodeid] = NULL;
212 return; 218 return;
213 } 219 }
214 bootmap_start = __pa(bootmap); 220 bootmap_start = __pa(bootmap);
215 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages); 221 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
216 222
217 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 223 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
218 bootmap_start >> PAGE_SHIFT, 224 bootmap_start >> PAGE_SHIFT,
219 start_pfn, end_pfn); 225 start_pfn, end_pfn);
220 226
221 free_bootmem_with_active_regions(nodeid, end); 227 free_bootmem_with_active_regions(nodeid, end);
222 228
223 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); 229 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
224 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); 230 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
231 bootmap_pages<<PAGE_SHIFT);
225#ifdef CONFIG_ACPI_NUMA 232#ifdef CONFIG_ACPI_NUMA
226 srat_reserve_add_area(nodeid); 233 srat_reserve_add_area(nodeid);
227#endif 234#endif
228 node_set_online(nodeid); 235 node_set_online(nodeid);
229} 236}
230 237
231/* Initialize final allocator for a zone */ 238/* Initialize final allocator for a zone */
232void __init setup_node_zones(int nodeid) 239void __init setup_node_zones(int nodeid)
233{ 240{
234 unsigned long start_pfn, end_pfn, memmapsize, limit; 241 unsigned long start_pfn, end_pfn, memmapsize, limit;
235 242
236 start_pfn = node_start_pfn(nodeid); 243 start_pfn = node_start_pfn(nodeid);
237 end_pfn = node_end_pfn(nodeid); 244 end_pfn = node_end_pfn(nodeid);
238 245
239 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", 246 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
240 nodeid, start_pfn, end_pfn); 247 nodeid, start_pfn, end_pfn);
241 248
242 /* Try to allocate mem_map at end to not fill up precious <4GB 249 /*
243 memory. */ 250 * Try to allocate mem_map at end to not fill up precious <4GB
251 * memory.
252 */
244 memmapsize = sizeof(struct page) * (end_pfn-start_pfn); 253 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
245 limit = end_pfn << PAGE_SHIFT; 254 limit = end_pfn << PAGE_SHIFT;
246#ifdef CONFIG_FLAT_NODE_MEM_MAP 255#ifdef CONFIG_FLAT_NODE_MEM_MAP
247 NODE_DATA(nodeid)->node_mem_map = 256 NODE_DATA(nodeid)->node_mem_map =
248 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata, 257 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
249 memmapsize, SMP_CACHE_BYTES, 258 memmapsize, SMP_CACHE_BYTES,
250 round_down(limit - memmapsize, PAGE_SIZE), 259 round_down(limit - memmapsize, PAGE_SIZE),
251 limit); 260 limit);
252#endif 261#endif
253} 262}
254 263
264/*
265 * There are unfortunately some poorly designed mainboards around that
266 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
267 * mapping. To avoid this fill in the mapping for all possible CPUs,
268 * as the number of CPUs is not known yet. We round robin the existing
269 * nodes.
270 */
255void __init numa_init_array(void) 271void __init numa_init_array(void)
256{ 272{
257 int rr, i; 273 int rr, i;
258 /* There are unfortunately some poorly designed mainboards around 274
259 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
260 mapping. To avoid this fill in the mapping for all possible
261 CPUs, as the number of CPUs is not known yet.
262 We round robin the existing nodes. */
263 rr = first_node(node_online_map); 275 rr = first_node(node_online_map);
264 for (i = 0; i < NR_CPUS; i++) { 276 for (i = 0; i < NR_CPUS; i++) {
265 if (cpu_to_node(i) != NUMA_NO_NODE) 277 if (cpu_to_node(i) != NUMA_NO_NODE)
266 continue; 278 continue;
267 numa_set_node(i, rr); 279 numa_set_node(i, rr);
268 rr = next_node(rr, node_online_map); 280 rr = next_node(rr, node_online_map);
269 if (rr == MAX_NUMNODES) 281 if (rr == MAX_NUMNODES)
270 rr = first_node(node_online_map); 282 rr = first_node(node_online_map);
271 } 283 }
272
273} 284}
274 285
275#ifdef CONFIG_NUMA_EMU 286#ifdef CONFIG_NUMA_EMU
@@ -277,15 +288,17 @@ void __init numa_init_array(void)
277char *cmdline __initdata; 288char *cmdline __initdata;
278 289
279/* 290/*
280 * Setups up nid to range from addr to addr + size. If the end boundary is 291 * Setups up nid to range from addr to addr + size. If the end
281 * greater than max_addr, then max_addr is used instead. The return value is 0 292 * boundary is greater than max_addr, then max_addr is used instead.
282 * if there is additional memory left for allocation past addr and -1 otherwise. 293 * The return value is 0 if there is additional memory left for
283 * addr is adjusted to be at the end of the node. 294 * allocation past addr and -1 otherwise. addr is adjusted to be at
295 * the end of the node.
284 */ 296 */
285static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, 297static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
286 u64 size, u64 max_addr) 298 u64 size, u64 max_addr)
287{ 299{
288 int ret = 0; 300 int ret = 0;
301
289 nodes[nid].start = *addr; 302 nodes[nid].start = *addr;
290 *addr += size; 303 *addr += size;
291 if (*addr >= max_addr) { 304 if (*addr >= max_addr) {
@@ -336,6 +349,7 @@ static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
336 349
337 for (i = node_start; i < num_nodes + node_start; i++) { 350 for (i = node_start; i < num_nodes + node_start; i++) {
338 u64 end = *addr + size; 351 u64 end = *addr + size;
352
339 if (i < big) 353 if (i < big)
340 end += FAKE_NODE_MIN_SIZE; 354 end += FAKE_NODE_MIN_SIZE;
341 /* 355 /*
@@ -381,14 +395,9 @@ static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
381static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) 395static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
382{ 396{
383 struct bootnode nodes[MAX_NUMNODES]; 397 struct bootnode nodes[MAX_NUMNODES];
384 u64 addr = start_pfn << PAGE_SHIFT; 398 u64 size, addr = start_pfn << PAGE_SHIFT;
385 u64 max_addr = end_pfn << PAGE_SHIFT; 399 u64 max_addr = end_pfn << PAGE_SHIFT;
386 int num_nodes = 0; 400 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
387 int coeff_flag;
388 int coeff = -1;
389 int num = 0;
390 u64 size;
391 int i;
392 401
393 memset(&nodes, 0, sizeof(nodes)); 402 memset(&nodes, 0, sizeof(nodes));
394 /* 403 /*
@@ -396,8 +405,9 @@ static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
396 * system RAM into N fake nodes. 405 * system RAM into N fake nodes.
397 */ 406 */
398 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { 407 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
399 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, 408 long n = simple_strtol(cmdline, NULL, 0);
400 simple_strtol(cmdline, NULL, 0)); 409
410 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
401 if (num_nodes < 0) 411 if (num_nodes < 0)
402 return num_nodes; 412 return num_nodes;
403 goto out; 413 goto out;
@@ -484,46 +494,47 @@ out:
484 for_each_node_mask(i, node_possible_map) { 494 for_each_node_mask(i, node_possible_map) {
485 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 495 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
486 nodes[i].end >> PAGE_SHIFT); 496 nodes[i].end >> PAGE_SHIFT);
487 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 497 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
488 } 498 }
489 acpi_fake_nodes(nodes, num_nodes); 499 acpi_fake_nodes(nodes, num_nodes);
490 numa_init_array(); 500 numa_init_array();
491 return 0; 501 return 0;
492} 502}
493#endif /* CONFIG_NUMA_EMU */ 503#endif /* CONFIG_NUMA_EMU */
494 504
495void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) 505void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
496{ 506{
497 int i; 507 int i;
498 508
499 nodes_clear(node_possible_map); 509 nodes_clear(node_possible_map);
500 510
501#ifdef CONFIG_NUMA_EMU 511#ifdef CONFIG_NUMA_EMU
502 if (cmdline && !numa_emulation(start_pfn, end_pfn)) 512 if (cmdline && !numa_emulation(start_pfn, end_pfn))
503 return; 513 return;
504 nodes_clear(node_possible_map); 514 nodes_clear(node_possible_map);
505#endif 515#endif
506 516
507#ifdef CONFIG_ACPI_NUMA 517#ifdef CONFIG_ACPI_NUMA
508 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, 518 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
509 end_pfn << PAGE_SHIFT)) 519 end_pfn << PAGE_SHIFT))
510 return; 520 return;
511 nodes_clear(node_possible_map); 521 nodes_clear(node_possible_map);
512#endif 522#endif
513 523
514#ifdef CONFIG_K8_NUMA 524#ifdef CONFIG_K8_NUMA
515 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) 525 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
526 end_pfn<<PAGE_SHIFT))
516 return; 527 return;
517 nodes_clear(node_possible_map); 528 nodes_clear(node_possible_map);
518#endif 529#endif
519 printk(KERN_INFO "%s\n", 530 printk(KERN_INFO "%s\n",
520 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 531 numa_off ? "NUMA turned off" : "No NUMA configuration found");
521 532
522 printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 533 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
523 start_pfn << PAGE_SHIFT, 534 start_pfn << PAGE_SHIFT,
524 end_pfn << PAGE_SHIFT); 535 end_pfn << PAGE_SHIFT);
525 /* setup dummy node covering all memory */ 536 /* setup dummy node covering all memory */
526 memnode_shift = 63; 537 memnode_shift = 63;
527 memnodemap = memnode.embedded_map; 538 memnodemap = memnode.embedded_map;
528 memnodemap[0] = 0; 539 memnodemap[0] = 0;
529 nodes_clear(node_online_map); 540 nodes_clear(node_online_map);
@@ -539,7 +550,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
539__cpuinit void numa_add_cpu(int cpu) 550__cpuinit void numa_add_cpu(int cpu)
540{ 551{
541 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); 552 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
542} 553}
543 554
544void __cpuinit numa_set_node(int cpu, int node) 555void __cpuinit numa_set_node(int cpu, int node)
545{ 556{
@@ -547,20 +558,22 @@ void __cpuinit numa_set_node(int cpu, int node)
547 cpu_to_node(cpu) = node; 558 cpu_to_node(cpu) = node;
548} 559}
549 560
550unsigned long __init numa_free_all_bootmem(void) 561unsigned long __init numa_free_all_bootmem(void)
551{ 562{
552 int i;
553 unsigned long pages = 0; 563 unsigned long pages = 0;
554 for_each_online_node(i) { 564 int i;
565
566 for_each_online_node(i)
555 pages += free_all_bootmem_node(NODE_DATA(i)); 567 pages += free_all_bootmem_node(NODE_DATA(i));
556 } 568
557 return pages; 569 return pages;
558} 570}
559 571
560void __init paging_init(void) 572void __init paging_init(void)
561{ 573{
562 int i;
563 unsigned long max_zone_pfns[MAX_NR_ZONES]; 574 unsigned long max_zone_pfns[MAX_NR_ZONES];
575 int i;
576
564 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 577 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
565 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 578 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
566 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 579 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
@@ -569,32 +582,30 @@ void __init paging_init(void)
569 sparse_memory_present_with_active_regions(MAX_NUMNODES); 582 sparse_memory_present_with_active_regions(MAX_NUMNODES);
570 sparse_init(); 583 sparse_init();
571 584
572 for_each_online_node(i) { 585 for_each_online_node(i)
573 setup_node_zones(i); 586 setup_node_zones(i);
574 }
575 587
576 free_area_init_nodes(max_zone_pfns); 588 free_area_init_nodes(max_zone_pfns);
577} 589}
578 590
579static __init int numa_setup(char *opt) 591static __init int numa_setup(char *opt)
580{ 592{
581 if (!opt) 593 if (!opt)
582 return -EINVAL; 594 return -EINVAL;
583 if (!strncmp(opt,"off",3)) 595 if (!strncmp(opt, "off", 3))
584 numa_off = 1; 596 numa_off = 1;
585#ifdef CONFIG_NUMA_EMU 597#ifdef CONFIG_NUMA_EMU
586 if (!strncmp(opt, "fake=", 5)) 598 if (!strncmp(opt, "fake=", 5))
587 cmdline = opt + 5; 599 cmdline = opt + 5;
588#endif 600#endif
589#ifdef CONFIG_ACPI_NUMA 601#ifdef CONFIG_ACPI_NUMA
590 if (!strncmp(opt,"noacpi",6)) 602 if (!strncmp(opt, "noacpi", 6))
591 acpi_numa = -1; 603 acpi_numa = -1;
592 if (!strncmp(opt,"hotadd=", 7)) 604 if (!strncmp(opt, "hotadd=", 7))
593 hotadd_percent = simple_strtoul(opt+7, NULL, 10); 605 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
594#endif 606#endif
595 return 0; 607 return 0;
596} 608}
597
598early_param("numa", numa_setup); 609early_param("numa", numa_setup);
599 610
600/* 611/*
@@ -612,20 +623,18 @@ early_param("numa", numa_setup);
612void __init init_cpu_to_node(void) 623void __init init_cpu_to_node(void)
613{ 624{
614 int i; 625 int i;
615 for (i = 0; i < NR_CPUS; i++) { 626
627 for (i = 0; i < NR_CPUS; i++) {
616 u8 apicid = x86_cpu_to_apicid_init[i]; 628 u8 apicid = x86_cpu_to_apicid_init[i];
629
617 if (apicid == BAD_APICID) 630 if (apicid == BAD_APICID)
618 continue; 631 continue;
619 if (apicid_to_node[apicid] == NUMA_NO_NODE) 632 if (apicid_to_node[apicid] == NUMA_NO_NODE)
620 continue; 633 continue;
621 numa_set_node(i,apicid_to_node[apicid]); 634 numa_set_node(i, apicid_to_node[apicid]);
622 } 635 }
623} 636}
624 637
625EXPORT_SYMBOL(cpu_to_node);
626EXPORT_SYMBOL(node_to_cpumask);
627EXPORT_SYMBOL(node_data);
628
629#ifdef CONFIG_DISCONTIGMEM 638#ifdef CONFIG_DISCONTIGMEM
630/* 639/*
631 * Functions to convert PFNs from/to per node page addresses. 640 * Functions to convert PFNs from/to per node page addresses.