aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c62
1 files changed, 34 insertions, 28 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049e1e51..7393bd76d698 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -822,42 +822,50 @@ static void __init dump_numa_memory_topology(void)
822 * required. nid is the preferred node and end is the physical address of 822 * required. nid is the preferred node and end is the physical address of
823 * the highest address in the node. 823 * the highest address in the node.
824 * 824 *
825 * Returns the physical address of the memory. 825 * Returns the virtual address of the memory.
826 */ 826 */
827static void __init *careful_allocation(int nid, unsigned long size, 827static void __init *careful_zallocation(int nid, unsigned long size,
828 unsigned long align, 828 unsigned long align,
829 unsigned long end_pfn) 829 unsigned long end_pfn)
830{ 830{
831 void *ret;
831 int new_nid; 832 int new_nid;
832 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 833 unsigned long ret_paddr;
834
835 ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
833 836
834 /* retry over all memory */ 837 /* retry over all memory */
835 if (!ret) 838 if (!ret_paddr)
836 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); 839 ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
837 840
838 if (!ret) 841 if (!ret_paddr)
839 panic("numa.c: cannot allocate %lu bytes on node %d", 842 panic("numa.c: cannot allocate %lu bytes for node %d",
840 size, nid); 843 size, nid);
841 844
845 ret = __va(ret_paddr);
846
842 /* 847 /*
843 * If the memory came from a previously allocated node, we must 848 * We initialize the nodes in numeric order: 0, 1, 2...
844 * retry with the bootmem allocator. 849 * and hand over control from the LMB allocator to the
850 * bootmem allocator. If this function is called for
851 * node 5, then we know that all nodes <5 are using the
852 * bootmem allocator instead of the LMB allocator.
853 *
854 * So, check the nid from which this allocation came
855 * and double check to see if we need to use bootmem
856 * instead of the LMB. We don't free the LMB memory
857 * since it would be useless.
845 */ 858 */
846 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); 859 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
847 if (new_nid < nid) { 860 if (new_nid < nid) {
848 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), 861 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
849 size, align, 0); 862 size, align, 0);
850 863
851 if (!ret) 864 dbg("alloc_bootmem %p %lx\n", ret, size);
852 panic("numa.c: cannot allocate %lu bytes on node %d",
853 size, new_nid);
854
855 ret = __pa(ret);
856
857 dbg("alloc_bootmem %lx %lx\n", ret, size);
858 } 865 }
859 866
860 return (void *)ret; 867 memset(ret, 0, size);
868 return ret;
861} 869}
862 870
863static struct notifier_block __cpuinitdata ppc64_numa_nb = { 871static struct notifier_block __cpuinitdata ppc64_numa_nb = {
@@ -952,7 +960,7 @@ void __init do_init_bootmem(void)
952 960
953 for_each_online_node(nid) { 961 for_each_online_node(nid) {
954 unsigned long start_pfn, end_pfn; 962 unsigned long start_pfn, end_pfn;
955 unsigned long bootmem_paddr; 963 void *bootmem_vaddr;
956 unsigned long bootmap_pages; 964 unsigned long bootmap_pages;
957 965
958 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 966 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
@@ -964,11 +972,9 @@ void __init do_init_bootmem(void)
964 * previous nodes' bootmem to be initialized and have 972 * previous nodes' bootmem to be initialized and have
965 * all reserved areas marked. 973 * all reserved areas marked.
966 */ 974 */
967 NODE_DATA(nid) = careful_allocation(nid, 975 NODE_DATA(nid) = careful_zallocation(nid,
968 sizeof(struct pglist_data), 976 sizeof(struct pglist_data),
969 SMP_CACHE_BYTES, end_pfn); 977 SMP_CACHE_BYTES, end_pfn);
970 NODE_DATA(nid) = __va(NODE_DATA(nid));
971 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
972 978
973 dbg("node %d\n", nid); 979 dbg("node %d\n", nid);
974 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 980 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -984,20 +990,20 @@ void __init do_init_bootmem(void)
984 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); 990 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
985 991
986 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 992 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
987 bootmem_paddr = (unsigned long)careful_allocation(nid, 993 bootmem_vaddr = careful_zallocation(nid,
988 bootmap_pages << PAGE_SHIFT, 994 bootmap_pages << PAGE_SHIFT,
989 PAGE_SIZE, end_pfn); 995 PAGE_SIZE, end_pfn);
990 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
991 996
992 dbg("bootmap_paddr = %lx\n", bootmem_paddr); 997 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
993 998
994 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 999 init_bootmem_node(NODE_DATA(nid),
1000 __pa(bootmem_vaddr) >> PAGE_SHIFT,
995 start_pfn, end_pfn); 1001 start_pfn, end_pfn);
996 1002
997 free_bootmem_with_active_regions(nid, end_pfn); 1003 free_bootmem_with_active_regions(nid, end_pfn);
998 /* 1004 /*
999 * Be very careful about moving this around. Future 1005 * Be very careful about moving this around. Future
1000 * calls to careful_allocation() depend on this getting 1006 * calls to careful_zallocation() depend on this getting
1001 * done correctly. 1007 * done correctly.
1002 */ 1008 */
1003 mark_reserved_regions_for_nid(nid); 1009 mark_reserved_regions_for_nid(nid);