diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-03-05 01:32:45 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-03-05 01:32:45 -0500 |
commit | ca764aaf025d2c83054191895b366fa81a9ccf48 (patch) | |
tree | e8e3bc880f4b269c924ccdf51ba8d6f3ff33b765 /arch/x86 | |
parent | d04c579f971bf7d995db1ef7a7161c0143068859 (diff) | |
parent | 078a198906c796981f93ff100c210506e91aade5 (diff) |
Merge branch 'x86-mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into x86/mm
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/page_types.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 54 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 96 | ||||
-rw-r--r-- | arch/x86/mm/numa_emulation.c | 20 |
5 files changed, 79 insertions, 101 deletions
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 97e6007e4edd..bce688d54c12 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -54,8 +54,6 @@ static inline phys_addr_t get_max_mapped(void) | |||
54 | extern unsigned long init_memory_mapping(unsigned long start, | 54 | extern unsigned long init_memory_mapping(unsigned long start, |
55 | unsigned long end); | 55 | unsigned long end); |
56 | 56 | ||
57 | void init_memory_mapping_high(void); | ||
58 | |||
59 | extern void initmem_init(void); | 57 | extern void initmem_init(void); |
60 | extern void free_initmem(void); | 58 | extern void free_initmem(void); |
61 | 59 | ||
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 46e684f85b36..c3a606c41ce0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -963,6 +963,14 @@ void __init setup_arch(char **cmdline_p) | |||
963 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); | 963 | max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT); |
964 | max_pfn_mapped = max_low_pfn_mapped; | 964 | max_pfn_mapped = max_low_pfn_mapped; |
965 | 965 | ||
966 | #ifdef CONFIG_X86_64 | ||
967 | if (max_pfn > max_low_pfn) { | ||
968 | max_pfn_mapped = init_memory_mapping(1UL<<32, | ||
969 | max_pfn<<PAGE_SHIFT); | ||
970 | /* can we preseve max_low_pfn ?*/ | ||
971 | max_low_pfn = max_pfn; | ||
972 | } | ||
973 | #endif | ||
966 | memblock.current_limit = get_max_mapped(); | 974 | memblock.current_limit = get_max_mapped(); |
967 | 975 | ||
968 | /* | 976 | /* |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 470cc4704a9a..c8813aa39740 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -606,63 +606,9 @@ kernel_physical_mapping_init(unsigned long start, | |||
606 | void __init initmem_init(void) | 606 | void __init initmem_init(void) |
607 | { | 607 | { |
608 | memblock_x86_register_active_regions(0, 0, max_pfn); | 608 | memblock_x86_register_active_regions(0, 0, max_pfn); |
609 | init_memory_mapping_high(); | ||
610 | } | 609 | } |
611 | #endif | 610 | #endif |
612 | 611 | ||
613 | struct mapping_work_data { | ||
614 | unsigned long start; | ||
615 | unsigned long end; | ||
616 | unsigned long pfn_mapped; | ||
617 | }; | ||
618 | |||
619 | static int __init_refok | ||
620 | mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax) | ||
621 | { | ||
622 | struct mapping_work_data *data = datax; | ||
623 | unsigned long pfn_mapped; | ||
624 | unsigned long final_start, final_end; | ||
625 | |||
626 | final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start); | ||
627 | final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end); | ||
628 | |||
629 | if (final_end <= final_start) | ||
630 | return 0; | ||
631 | |||
632 | pfn_mapped = init_memory_mapping(final_start, final_end); | ||
633 | |||
634 | if (pfn_mapped > data->pfn_mapped) | ||
635 | data->pfn_mapped = pfn_mapped; | ||
636 | |||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | static unsigned long __init_refok | ||
641 | init_memory_mapping_active_regions(unsigned long start, unsigned long end) | ||
642 | { | ||
643 | struct mapping_work_data data; | ||
644 | |||
645 | data.start = start; | ||
646 | data.end = end; | ||
647 | data.pfn_mapped = 0; | ||
648 | |||
649 | work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data); | ||
650 | |||
651 | return data.pfn_mapped; | ||
652 | } | ||
653 | |||
654 | void __init_refok init_memory_mapping_high(void) | ||
655 | { | ||
656 | if (max_pfn > max_low_pfn) { | ||
657 | max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32, | ||
658 | max_pfn<<PAGE_SHIFT); | ||
659 | /* can we preserve max_low_pfn ? */ | ||
660 | max_low_pfn = max_pfn; | ||
661 | |||
662 | memblock.current_limit = get_max_mapped(); | ||
663 | } | ||
664 | } | ||
665 | |||
666 | void __init paging_init(void) | 612 | void __init paging_init(void) |
667 | { | 613 | { |
668 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 614 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 74064e8ae79f..9ec0f209a6a4 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -543,8 +543,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
543 | if (!numa_meminfo_cover_memory(mi)) | 543 | if (!numa_meminfo_cover_memory(mi)) |
544 | return -EINVAL; | 544 | return -EINVAL; |
545 | 545 | ||
546 | init_memory_mapping_high(); | ||
547 | |||
548 | /* Finally register nodes. */ | 546 | /* Finally register nodes. */ |
549 | for_each_node_mask(nid, node_possible_map) { | 547 | for_each_node_mask(nid, node_possible_map) { |
550 | u64 start = (u64)max_pfn << PAGE_SHIFT; | 548 | u64 start = (u64)max_pfn << PAGE_SHIFT; |
@@ -564,6 +562,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
564 | return 0; | 562 | return 0; |
565 | } | 563 | } |
566 | 564 | ||
565 | /** | ||
566 | * dummy_numma_init - Fallback dummy NUMA init | ||
567 | * | ||
568 | * Used if there's no underlying NUMA architecture, NUMA initialization | ||
569 | * fails, or NUMA is disabled on the command line. | ||
570 | * | ||
571 | * Must online at least one node and add memory blocks that cover all | ||
572 | * allowed memory. This function must not fail. | ||
573 | */ | ||
567 | static int __init dummy_numa_init(void) | 574 | static int __init dummy_numa_init(void) |
568 | { | 575 | { |
569 | printk(KERN_INFO "%s\n", | 576 | printk(KERN_INFO "%s\n", |
@@ -577,57 +584,64 @@ static int __init dummy_numa_init(void) | |||
577 | return 0; | 584 | return 0; |
578 | } | 585 | } |
579 | 586 | ||
580 | void __init initmem_init(void) | 587 | static int __init numa_init(int (*init_func)(void)) |
581 | { | 588 | { |
582 | int (*numa_init[])(void) = { [2] = dummy_numa_init }; | 589 | int i; |
583 | int i, j; | 590 | int ret; |
584 | 591 | ||
585 | if (!numa_off) { | 592 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
586 | #ifdef CONFIG_ACPI_NUMA | 593 | set_apicid_to_node(i, NUMA_NO_NODE); |
587 | numa_init[0] = x86_acpi_numa_init; | ||
588 | #endif | ||
589 | #ifdef CONFIG_AMD_NUMA | ||
590 | numa_init[1] = amd_numa_init; | ||
591 | #endif | ||
592 | } | ||
593 | 594 | ||
594 | for (i = 0; i < ARRAY_SIZE(numa_init); i++) { | 595 | nodes_clear(numa_nodes_parsed); |
595 | if (!numa_init[i]) | 596 | nodes_clear(node_possible_map); |
596 | continue; | 597 | nodes_clear(node_online_map); |
598 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
599 | remove_all_active_ranges(); | ||
600 | numa_reset_distance(); | ||
597 | 601 | ||
598 | for (j = 0; j < MAX_LOCAL_APIC; j++) | 602 | ret = init_func(); |
599 | set_apicid_to_node(j, NUMA_NO_NODE); | 603 | if (ret < 0) |
604 | return ret; | ||
605 | ret = numa_cleanup_meminfo(&numa_meminfo); | ||
606 | if (ret < 0) | ||
607 | return ret; | ||
600 | 608 | ||
601 | nodes_clear(numa_nodes_parsed); | 609 | numa_emulation(&numa_meminfo, numa_distance_cnt); |
602 | nodes_clear(node_possible_map); | ||
603 | nodes_clear(node_online_map); | ||
604 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | ||
605 | remove_all_active_ranges(); | ||
606 | numa_reset_distance(); | ||
607 | 610 | ||
608 | if (numa_init[i]() < 0) | 611 | ret = numa_register_memblks(&numa_meminfo); |
609 | continue; | 612 | if (ret < 0) |
613 | return ret; | ||
610 | 614 | ||
611 | if (numa_cleanup_meminfo(&numa_meminfo) < 0) | 615 | for (i = 0; i < nr_cpu_ids; i++) { |
612 | continue; | 616 | int nid = early_cpu_to_node(i); |
613 | 617 | ||
614 | numa_emulation(&numa_meminfo, numa_distance_cnt); | 618 | if (nid == NUMA_NO_NODE) |
615 | |||
616 | if (numa_register_memblks(&numa_meminfo) < 0) | ||
617 | continue; | 619 | continue; |
620 | if (!node_online(nid)) | ||
621 | numa_clear_node(i); | ||
622 | } | ||
623 | numa_init_array(); | ||
624 | return 0; | ||
625 | } | ||
618 | 626 | ||
619 | for (j = 0; j < nr_cpu_ids; j++) { | 627 | void __init initmem_init(void) |
620 | int nid = early_cpu_to_node(j); | 628 | { |
629 | int ret; | ||
621 | 630 | ||
622 | if (nid == NUMA_NO_NODE) | 631 | if (!numa_off) { |
623 | continue; | 632 | #ifdef CONFIG_ACPI_NUMA |
624 | if (!node_online(nid)) | 633 | ret = numa_init(x86_acpi_numa_init); |
625 | numa_clear_node(j); | 634 | if (!ret) |
626 | } | 635 | return; |
627 | numa_init_array(); | 636 | #endif |
628 | return; | 637 | #ifdef CONFIG_AMD_NUMA |
638 | ret = numa_init(amd_numa_init); | ||
639 | if (!ret) | ||
640 | return; | ||
641 | #endif | ||
629 | } | 642 | } |
630 | BUG(); | 643 | |
644 | numa_init(dummy_numa_init); | ||
631 | } | 645 | } |
632 | 646 | ||
633 | unsigned long __init numa_free_all_bootmem(void) | 647 | unsigned long __init numa_free_all_bootmem(void) |
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index aeecea93820f..3696be0c2204 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -301,6 +301,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
301 | const u64 max_addr = max_pfn << PAGE_SHIFT; | 301 | const u64 max_addr = max_pfn << PAGE_SHIFT; |
302 | u8 *phys_dist = NULL; | 302 | u8 *phys_dist = NULL; |
303 | size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); | 303 | size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); |
304 | int dfl_phys_nid; | ||
304 | int i, j, ret; | 305 | int i, j, ret; |
305 | 306 | ||
306 | if (!emu_cmdline) | 307 | if (!emu_cmdline) |
@@ -357,6 +358,19 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
357 | node_distance(i, j); | 358 | node_distance(i, j); |
358 | } | 359 | } |
359 | 360 | ||
361 | /* determine the default phys nid to use for unmapped nodes */ | ||
362 | dfl_phys_nid = NUMA_NO_NODE; | ||
363 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) { | ||
364 | if (emu_nid_to_phys[i] != NUMA_NO_NODE) { | ||
365 | dfl_phys_nid = emu_nid_to_phys[i]; | ||
366 | break; | ||
367 | } | ||
368 | } | ||
369 | if (dfl_phys_nid == NUMA_NO_NODE) { | ||
370 | pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n"); | ||
371 | goto no_emu; | ||
372 | } | ||
373 | |||
360 | /* commit */ | 374 | /* commit */ |
361 | *numa_meminfo = ei; | 375 | *numa_meminfo = ei; |
362 | 376 | ||
@@ -377,7 +391,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
377 | /* make sure all emulated nodes are mapped to a physical node */ | 391 | /* make sure all emulated nodes are mapped to a physical node */ |
378 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) | 392 | for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) |
379 | if (emu_nid_to_phys[i] == NUMA_NO_NODE) | 393 | if (emu_nid_to_phys[i] == NUMA_NO_NODE) |
380 | emu_nid_to_phys[i] = 0; | 394 | emu_nid_to_phys[i] = dfl_phys_nid; |
381 | 395 | ||
382 | /* | 396 | /* |
383 | * Transform distance table. numa_set_distance() ignores all | 397 | * Transform distance table. numa_set_distance() ignores all |
@@ -417,9 +431,7 @@ void __cpuinit numa_add_cpu(int cpu) | |||
417 | { | 431 | { |
418 | int physnid, nid; | 432 | int physnid, nid; |
419 | 433 | ||
420 | nid = numa_cpu_node(cpu); | 434 | nid = early_cpu_to_node(cpu); |
421 | if (nid == NUMA_NO_NODE) | ||
422 | nid = early_cpu_to_node(cpu); | ||
423 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | 435 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); |
424 | 436 | ||
425 | physnid = emu_nid_to_phys[nid]; | 437 | physnid = emu_nid_to_phys[nid]; |