diff options
Diffstat (limited to 'arch/x86/mm/numa_64.c')
-rw-r--r-- | arch/x86/mm/numa_64.c | 82 |
1 files changed, 48 insertions, 34 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 681bc0d59db5..c490448d716a 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -46,8 +46,6 @@ static unsigned long __initdata nodemap_size; | |||
46 | 46 | ||
47 | static struct numa_meminfo numa_meminfo __initdata; | 47 | static struct numa_meminfo numa_meminfo __initdata; |
48 | 48 | ||
49 | struct bootnode numa_nodes[MAX_NUMNODES] __initdata; | ||
50 | |||
51 | /* | 49 | /* |
52 | * Given a shift value, try to populate memnodemap[] | 50 | * Given a shift value, try to populate memnodemap[] |
53 | * Returns : | 51 | * Returns : |
@@ -349,17 +347,17 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi) | |||
349 | * Sanity check to catch more bad NUMA configurations (they are amazingly | 347 | * Sanity check to catch more bad NUMA configurations (they are amazingly |
350 | * common). Make sure the nodes cover all memory. | 348 | * common). Make sure the nodes cover all memory. |
351 | */ | 349 | */ |
352 | static int __init nodes_cover_memory(const struct bootnode *nodes) | 350 | static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) |
353 | { | 351 | { |
354 | unsigned long numaram, e820ram; | 352 | unsigned long numaram, e820ram; |
355 | int i; | 353 | int i; |
356 | 354 | ||
357 | numaram = 0; | 355 | numaram = 0; |
358 | for_each_node_mask(i, mem_nodes_parsed) { | 356 | for (i = 0; i < mi->nr_blks; i++) { |
359 | unsigned long s = nodes[i].start >> PAGE_SHIFT; | 357 | unsigned long s = mi->blk[i].start >> PAGE_SHIFT; |
360 | unsigned long e = nodes[i].end >> PAGE_SHIFT; | 358 | unsigned long e = mi->blk[i].end >> PAGE_SHIFT; |
361 | numaram += e - s; | 359 | numaram += e - s; |
362 | numaram -= __absent_pages_in_range(i, s, e); | 360 | numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); |
363 | if ((long)numaram < 0) | 361 | if ((long)numaram < 0) |
364 | numaram = 0; | 362 | numaram = 0; |
365 | } | 363 | } |
@@ -371,14 +369,14 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) | |||
371 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", | 369 | printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", |
372 | (numaram << PAGE_SHIFT) >> 20, | 370 | (numaram << PAGE_SHIFT) >> 20, |
373 | (e820ram << PAGE_SHIFT) >> 20); | 371 | (e820ram << PAGE_SHIFT) >> 20); |
374 | return 0; | 372 | return false; |
375 | } | 373 | } |
376 | return 1; | 374 | return true; |
377 | } | 375 | } |
378 | 376 | ||
379 | static int __init numa_register_memblks(struct numa_meminfo *mi) | 377 | static int __init numa_register_memblks(struct numa_meminfo *mi) |
380 | { | 378 | { |
381 | int i; | 379 | int i, j, nid; |
382 | 380 | ||
383 | /* Account for nodes with cpus and no memory */ | 381 | /* Account for nodes with cpus and no memory */ |
384 | nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); | 382 | nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed); |
@@ -398,23 +396,34 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
398 | 396 | ||
399 | /* for out of order entries */ | 397 | /* for out of order entries */ |
400 | sort_node_map(); | 398 | sort_node_map(); |
401 | if (!nodes_cover_memory(numa_nodes)) | 399 | if (!numa_meminfo_cover_memory(mi)) |
402 | return -EINVAL; | 400 | return -EINVAL; |
403 | 401 | ||
404 | init_memory_mapping_high(); | 402 | init_memory_mapping_high(); |
405 | 403 | ||
406 | /* Finally register nodes. */ | ||
407 | for_each_node_mask(i, node_possible_map) | ||
408 | setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); | ||
409 | |||
410 | /* | 404 | /* |
411 | * Try again in case setup_node_bootmem missed one due to missing | 405 | * Finally register nodes. Do it twice in case setup_node_bootmem |
412 | * bootmem. | 406 | * missed one due to missing bootmem. |
413 | */ | 407 | */ |
414 | for_each_node_mask(i, node_possible_map) | 408 | for (i = 0; i < 2; i++) { |
415 | if (!node_online(i)) | 409 | for_each_node_mask(nid, node_possible_map) { |
416 | setup_node_bootmem(i, numa_nodes[i].start, | 410 | u64 start = (u64)max_pfn << PAGE_SHIFT; |
417 | numa_nodes[i].end); | 411 | u64 end = 0; |
412 | |||
413 | if (node_online(nid)) | ||
414 | continue; | ||
415 | |||
416 | for (j = 0; j < mi->nr_blks; j++) { | ||
417 | if (nid != mi->blk[j].nid) | ||
418 | continue; | ||
419 | start = min(mi->blk[j].start, start); | ||
420 | end = max(mi->blk[j].end, end); | ||
421 | } | ||
422 | |||
423 | if (start < end) | ||
424 | setup_node_bootmem(nid, start, end); | ||
425 | } | ||
426 | } | ||
418 | 427 | ||
419 | return 0; | 428 | return 0; |
420 | } | 429 | } |
@@ -432,33 +441,41 @@ void __init numa_emu_cmdline(char *str) | |||
432 | 441 | ||
433 | int __init find_node_by_addr(unsigned long addr) | 442 | int __init find_node_by_addr(unsigned long addr) |
434 | { | 443 | { |
435 | int ret = NUMA_NO_NODE; | 444 | const struct numa_meminfo *mi = &numa_meminfo; |
436 | int i; | 445 | int i; |
437 | 446 | ||
438 | for_each_node_mask(i, mem_nodes_parsed) { | 447 | for (i = 0; i < mi->nr_blks; i++) { |
439 | /* | 448 | /* |
440 | * Find the real node that this emulated node appears on. For | 449 | * Find the real node that this emulated node appears on. For |
441 | * the sake of simplicity, we only use a real node's starting | 450 | * the sake of simplicity, we only use a real node's starting |
442 | * address to determine which emulated node it appears on. | 451 | * address to determine which emulated node it appears on. |
443 | */ | 452 | */ |
444 | if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { | 453 | if (addr >= mi->blk[i].start && addr < mi->blk[i].end) |
445 | ret = i; | 454 | return mi->blk[i].nid; |
446 | break; | ||
447 | } | ||
448 | } | 455 | } |
449 | return ret; | 456 | return NUMA_NO_NODE; |
450 | } | 457 | } |
451 | 458 | ||
452 | static int __init setup_physnodes(unsigned long start, unsigned long end) | 459 | static int __init setup_physnodes(unsigned long start, unsigned long end) |
453 | { | 460 | { |
461 | const struct numa_meminfo *mi = &numa_meminfo; | ||
454 | int ret = 0; | 462 | int ret = 0; |
455 | int i; | 463 | int i; |
456 | 464 | ||
457 | memset(physnodes, 0, sizeof(physnodes)); | 465 | memset(physnodes, 0, sizeof(physnodes)); |
458 | 466 | ||
459 | for_each_node_mask(i, mem_nodes_parsed) { | 467 | for (i = 0; i < mi->nr_blks; i++) { |
460 | physnodes[i].start = numa_nodes[i].start; | 468 | int nid = mi->blk[i].nid; |
461 | physnodes[i].end = numa_nodes[i].end; | 469 | |
470 | if (physnodes[nid].start == physnodes[nid].end) { | ||
471 | physnodes[nid].start = mi->blk[i].start; | ||
472 | physnodes[nid].end = mi->blk[i].end; | ||
473 | } else { | ||
474 | physnodes[nid].start = min(physnodes[nid].start, | ||
475 | mi->blk[i].start); | ||
476 | physnodes[nid].end = max(physnodes[nid].end, | ||
477 | mi->blk[i].end); | ||
478 | } | ||
462 | } | 479 | } |
463 | 480 | ||
464 | /* | 481 | /* |
@@ -809,8 +826,6 @@ static int dummy_numa_init(void) | |||
809 | node_set(0, cpu_nodes_parsed); | 826 | node_set(0, cpu_nodes_parsed); |
810 | node_set(0, mem_nodes_parsed); | 827 | node_set(0, mem_nodes_parsed); |
811 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); | 828 | numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); |
812 | numa_nodes[0].start = 0; | ||
813 | numa_nodes[0].end = (u64)max_pfn << PAGE_SHIFT; | ||
814 | 829 | ||
815 | return 0; | 830 | return 0; |
816 | } | 831 | } |
@@ -841,7 +856,6 @@ void __init initmem_init(void) | |||
841 | nodes_clear(node_possible_map); | 856 | nodes_clear(node_possible_map); |
842 | nodes_clear(node_online_map); | 857 | nodes_clear(node_online_map); |
843 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | 858 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
844 | memset(numa_nodes, 0, sizeof(numa_nodes)); | ||
845 | remove_all_active_ranges(); | 859 | remove_all_active_ranges(); |
846 | 860 | ||
847 | if (numa_init[i]() < 0) | 861 | if (numa_init[i]() < 0) |