aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c130
1 files changed, 82 insertions, 48 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eb505ad34a85..cf81049e1e51 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -865,10 +865,77 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
865 .priority = 1 /* Must run before sched domains notifier. */ 865 .priority = 1 /* Must run before sched domains notifier. */
866}; 866};
867 867
868static void mark_reserved_regions_for_nid(int nid)
869{
870 struct pglist_data *node = NODE_DATA(nid);
871 int i;
872
873 for (i = 0; i < lmb.reserved.cnt; i++) {
874 unsigned long physbase = lmb.reserved.region[i].base;
875 unsigned long size = lmb.reserved.region[i].size;
876 unsigned long start_pfn = physbase >> PAGE_SHIFT;
877 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
878 struct node_active_region node_ar;
879 unsigned long node_end_pfn = node->node_start_pfn +
880 node->node_spanned_pages;
881
882 /*
883 * Check to make sure that this lmb.reserved area is
884 * within the bounds of the node that we care about.
885 * Checking the nid of the start and end points is not
886 * sufficient because the reserved area could span the
887 * entire node.
888 */
889 if (end_pfn <= node->node_start_pfn ||
890 start_pfn >= node_end_pfn)
891 continue;
892
893 get_node_active_region(start_pfn, &node_ar);
894 while (start_pfn < end_pfn &&
895 node_ar.start_pfn < node_ar.end_pfn) {
896 unsigned long reserve_size = size;
897 /*
898 * if reserved region extends past active region
899 * then trim size to active region
900 */
901 if (end_pfn > node_ar.end_pfn)
902 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
903 - (start_pfn << PAGE_SHIFT);
904 /*
905 * Only worry about *this* node, others may not
906 * yet have valid NODE_DATA().
907 */
908 if (node_ar.nid == nid) {
909 dbg("reserve_bootmem %lx %lx nid=%d\n",
910 physbase, reserve_size, node_ar.nid);
911 reserve_bootmem_node(NODE_DATA(node_ar.nid),
912 physbase, reserve_size,
913 BOOTMEM_DEFAULT);
914 }
915 /*
916 * if reserved region is contained in the active region
917 * then done.
918 */
919 if (end_pfn <= node_ar.end_pfn)
920 break;
921
922 /*
923 * reserved region extends past the active region
924 * get next active region that contains this
925 * reserved region
926 */
927 start_pfn = node_ar.end_pfn;
928 physbase = start_pfn << PAGE_SHIFT;
929 size = size - reserve_size;
930 get_node_active_region(start_pfn, &node_ar);
931 }
932 }
933}
934
935
868void __init do_init_bootmem(void) 936void __init do_init_bootmem(void)
869{ 937{
870 int nid; 938 int nid;
871 unsigned int i;
872 939
873 min_low_pfn = 0; 940 min_low_pfn = 0;
874 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 941 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
@@ -890,7 +957,13 @@ void __init do_init_bootmem(void)
890 957
891 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 958 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
892 959
893 /* Allocate the node structure node local if possible */ 960 /*
961 * Allocate the node structure node local if possible
962 *
963 * Be careful moving this around, as it relies on all
964 * previous nodes' bootmem to be initialized and have
965 * all reserved areas marked.
966 */
894 NODE_DATA(nid) = careful_allocation(nid, 967 NODE_DATA(nid) = careful_allocation(nid,
895 sizeof(struct pglist_data), 968 sizeof(struct pglist_data),
896 SMP_CACHE_BYTES, end_pfn); 969 SMP_CACHE_BYTES, end_pfn);
@@ -922,53 +995,14 @@ void __init do_init_bootmem(void)
922 start_pfn, end_pfn); 995 start_pfn, end_pfn);
923 996
924 free_bootmem_with_active_regions(nid, end_pfn); 997 free_bootmem_with_active_regions(nid, end_pfn);
925 } 998 /*
926 999 * Be very careful about moving this around. Future
927 /* Mark reserved regions */ 1000 * calls to careful_allocation() depend on this getting
928 for (i = 0; i < lmb.reserved.cnt; i++) { 1001 * done correctly.
929 unsigned long physbase = lmb.reserved.region[i].base; 1002 */
930 unsigned long size = lmb.reserved.region[i].size; 1003 mark_reserved_regions_for_nid(nid);
931 unsigned long start_pfn = physbase >> PAGE_SHIFT;
932 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
933 struct node_active_region node_ar;
934
935 get_node_active_region(start_pfn, &node_ar);
936 while (start_pfn < end_pfn &&
937 node_ar.start_pfn < node_ar.end_pfn) {
938 unsigned long reserve_size = size;
939 /*
940 * if reserved region extends past active region
941 * then trim size to active region
942 */
943 if (end_pfn > node_ar.end_pfn)
944 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
945 - (start_pfn << PAGE_SHIFT);
946 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
947 reserve_size, node_ar.nid);
948 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
949 reserve_size, BOOTMEM_DEFAULT);
950 /*
951 * if reserved region is contained in the active region
952 * then done.
953 */
954 if (end_pfn <= node_ar.end_pfn)
955 break;
956
957 /*
958 * reserved region extends past the active region
959 * get next active region that contains this
960 * reserved region
961 */
962 start_pfn = node_ar.end_pfn;
963 physbase = start_pfn << PAGE_SHIFT;
964 size = size - reserve_size;
965 get_node_active_region(start_pfn, &node_ar);
966 }
967
968 }
969
970 for_each_online_node(nid)
971 sparse_memory_present_with_active_regions(nid); 1004 sparse_memory_present_with_active_regions(nid);
1005 }
972} 1006}
973 1007
974void __init paging_init(void) 1008void __init paging_init(void)