diff options
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r-- | arch/powerpc/mm/numa.c | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aabf30175eb5..9ec9939f9fb0 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -822,23 +822,28 @@ static void __init dump_numa_memory_topology(void) | |||
822 | * required. nid is the preferred node and end is the physical address of | 822 | * required. nid is the preferred node and end is the physical address of |
823 | * the highest address in the node. | 823 | * the highest address in the node. |
824 | * | 824 | * |
825 | * Returns the physical address of the memory. | 825 | * Returns the virtual address of the memory. |
826 | */ | 826 | */ |
827 | static void __init *careful_allocation(int nid, unsigned long size, | 827 | static void __init *careful_allocation(int nid, unsigned long size, |
828 | unsigned long align, | 828 | unsigned long align, |
829 | unsigned long end_pfn) | 829 | unsigned long end_pfn) |
830 | { | 830 | { |
831 | void *ret; | ||
831 | int new_nid; | 832 | int new_nid; |
832 | unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | 833 | unsigned long ret_paddr; |
834 | |||
835 | ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | ||
833 | 836 | ||
834 | /* retry over all memory */ | 837 | /* retry over all memory */ |
835 | if (!ret) | 838 | if (!ret_paddr) |
836 | ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); | 839 | ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); |
837 | 840 | ||
838 | if (!ret) | 841 | if (!ret_paddr) |
839 | panic("numa.c: cannot allocate %lu bytes for node %d", | 842 | panic("numa.c: cannot allocate %lu bytes for node %d", |
840 | size, nid); | 843 | size, nid); |
841 | 844 | ||
845 | ret = __va(ret_paddr); | ||
846 | |||
842 | /* | 847 | /* |
843 | * We initialize the nodes in numeric order: 0, 1, 2... | 848 | * We initialize the nodes in numeric order: 0, 1, 2... |
844 | * and hand over control from the LMB allocator to the | 849 | * and hand over control from the LMB allocator to the |
@@ -851,17 +856,15 @@ static void __init *careful_allocation(int nid, unsigned long size, | |||
851 | * instead of the LMB. We don't free the LMB memory | 856 | * instead of the LMB. We don't free the LMB memory |
852 | * since it would be useless. | 857 | * since it would be useless. |
853 | */ | 858 | */ |
854 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); | 859 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); |
855 | if (new_nid < nid) { | 860 | if (new_nid < nid) { |
856 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), | 861 | ret = __alloc_bootmem_node(NODE_DATA(new_nid), |
857 | size, align, 0); | 862 | size, align, 0); |
858 | 863 | ||
859 | ret = __pa(ret); | 864 | dbg("alloc_bootmem %p %lx\n", ret, size); |
860 | |||
861 | dbg("alloc_bootmem %lx %lx\n", ret, size); | ||
862 | } | 865 | } |
863 | 866 | ||
864 | return (void *)ret; | 867 | return ret; |
865 | } | 868 | } |
866 | 869 | ||
867 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { | 870 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { |
@@ -956,7 +959,7 @@ void __init do_init_bootmem(void) | |||
956 | 959 | ||
957 | for_each_online_node(nid) { | 960 | for_each_online_node(nid) { |
958 | unsigned long start_pfn, end_pfn; | 961 | unsigned long start_pfn, end_pfn; |
959 | unsigned long bootmem_paddr; | 962 | void *bootmem_vaddr; |
960 | unsigned long bootmap_pages; | 963 | unsigned long bootmap_pages; |
961 | 964 | ||
962 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | 965 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
@@ -971,7 +974,6 @@ void __init do_init_bootmem(void) | |||
971 | NODE_DATA(nid) = careful_allocation(nid, | 974 | NODE_DATA(nid) = careful_allocation(nid, |
972 | sizeof(struct pglist_data), | 975 | sizeof(struct pglist_data), |
973 | SMP_CACHE_BYTES, end_pfn); | 976 | SMP_CACHE_BYTES, end_pfn); |
974 | NODE_DATA(nid) = __va(NODE_DATA(nid)); | ||
975 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 977 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
976 | 978 | ||
977 | dbg("node %d\n", nid); | 979 | dbg("node %d\n", nid); |
@@ -988,14 +990,15 @@ void __init do_init_bootmem(void) | |||
988 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | 990 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); |
989 | 991 | ||
990 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | 992 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
991 | bootmem_paddr = (unsigned long)careful_allocation(nid, | 993 | bootmem_vaddr = careful_allocation(nid, |
992 | bootmap_pages << PAGE_SHIFT, | 994 | bootmap_pages << PAGE_SHIFT, |
993 | PAGE_SIZE, end_pfn); | 995 | PAGE_SIZE, end_pfn); |
994 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); | 996 | memset(bootmem_vaddr, 0, bootmap_pages << PAGE_SHIFT); |
995 | 997 | ||
996 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); | 998 | dbg("bootmap_vaddr = %p\n", bootmem_vaddr); |
997 | 999 | ||
998 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | 1000 | init_bootmem_node(NODE_DATA(nid), |
1001 | __pa(bootmem_vaddr) >> PAGE_SHIFT, | ||
999 | start_pfn, end_pfn); | 1002 | start_pfn, end_pfn); |
1000 | 1003 | ||
1001 | free_bootmem_with_active_regions(nid, end_pfn); | 1004 | free_bootmem_with_active_regions(nid, end_pfn); |