aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-11-30 19:44:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-30 19:44:18 -0500
commit03cfdb86ac66677dbe76accae3f22c374a15b814 (patch)
tree86c2f6cf5afbd85fdc183fcadab12bf142e9659c /arch/powerpc/mm
parent4ec8f077e4dd51f713984669781e7b568b8c41e2 (diff)
parentab598b6680f1e74c267d1547ee352f3e1e530f89 (diff)
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: powerpc: Fix system calls on Cell entered with XER.SO=1 powerpc/cell: Fix GDB watchpoints, again powerpc/mpic: Don't reset affinity for secondary MPIC on boot powerpc/cell/axon-msi: Retry on missing interrupt powerpc: Fix boot freeze on machine with empty memory node powerpc: Fix IRQ assignment for some PCIe devices powerpc/spufs: Fix spinning in spufs_ps_fault on signal powerpc/mpc832x_rdb: fix swapped ethernet ids powerpc: Use generic PHY driver for Marvell 88E1111 PHY on GE Fanuc SBC610 powerpc/85xx: L2 cache size wrong in 8572DS dts powerpc/virtex: Update defconfigs powerpc/52xx: update defconfigs xsysace: Fix driver to use resource_size_t instead of unsigned long powerpc/virtex: fix various format/casting printk mismatches powerpc/mpc5200: fix bestcomm Kconfig dependencies powerpc/44x: Fix 460EX/460GT machine check handling powerpc/40x: Limit allocable DRAM during early mapping
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/40x_mmu.c16
-rw-r--r--arch/powerpc/mm/numa.c122
2 files changed, 89 insertions, 49 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index cecbbc76f624..29954dc28942 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -93,7 +93,7 @@ void __init MMU_init_hw(void)
93 93
94unsigned long __init mmu_mapin_ram(void) 94unsigned long __init mmu_mapin_ram(void)
95{ 95{
96 unsigned long v, s; 96 unsigned long v, s, mapped;
97 phys_addr_t p; 97 phys_addr_t p;
98 98
99 v = KERNELBASE; 99 v = KERNELBASE;
@@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void)
130 s -= LARGE_PAGE_SIZE_4M; 130 s -= LARGE_PAGE_SIZE_4M;
131 } 131 }
132 132
133 return total_lowmem - s; 133 mapped = total_lowmem - s;
134
135 /* If the size of RAM is not an exact power of two, we may not
136 * have covered RAM in its entirety with 16 and 4 MiB
137 * pages. Consequently, restrict the top end of RAM currently
138 * allocable so that calls to the LMB to allocate PTEs for "tail"
139 * coverage with normal-sized pages (or other reasons) do not
140 * attempt to allocate outside the allowed range.
141 */
142
143 __initial_memory_limit_addr = memstart_addr + mapped;
144
145 return mapped;
134} 146}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index eb505ad34a85..a8397bbad3d4 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -865,6 +865,67 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = {
865 .priority = 1 /* Must run before sched domains notifier. */ 865 .priority = 1 /* Must run before sched domains notifier. */
866}; 866};
867 867
868static void mark_reserved_regions_for_nid(int nid)
869{
870 struct pglist_data *node = NODE_DATA(nid);
871 int i;
872
873 for (i = 0; i < lmb.reserved.cnt; i++) {
874 unsigned long physbase = lmb.reserved.region[i].base;
875 unsigned long size = lmb.reserved.region[i].size;
876 unsigned long start_pfn = physbase >> PAGE_SHIFT;
877 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
878 struct node_active_region node_ar;
879 unsigned long node_end_pfn = node->node_start_pfn +
880 node->node_spanned_pages;
881
882 /*
883 * Check to make sure that this lmb.reserved area is
884 * within the bounds of the node that we care about.
885 * Checking the nid of the start and end points is not
886 * sufficient because the reserved area could span the
887 * entire node.
888 */
889 if (end_pfn <= node->node_start_pfn ||
890 start_pfn >= node_end_pfn)
891 continue;
892
893 get_node_active_region(start_pfn, &node_ar);
894 while (start_pfn < end_pfn &&
895 node_ar.start_pfn < node_ar.end_pfn) {
896 unsigned long reserve_size = size;
897 /*
898 * if reserved region extends past active region
899 * then trim size to active region
900 */
901 if (end_pfn > node_ar.end_pfn)
902 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
903 - (start_pfn << PAGE_SHIFT);
904 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
905 reserve_size, node_ar.nid);
906 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
907 reserve_size, BOOTMEM_DEFAULT);
908 /*
909 * if reserved region is contained in the active region
910 * then done.
911 */
912 if (end_pfn <= node_ar.end_pfn)
913 break;
914
915 /*
916 * reserved region extends past the active region
917 * get next active region that contains this
918 * reserved region
919 */
920 start_pfn = node_ar.end_pfn;
921 physbase = start_pfn << PAGE_SHIFT;
922 size = size - reserve_size;
923 get_node_active_region(start_pfn, &node_ar);
924 }
925 }
926}
927
928
868void __init do_init_bootmem(void) 929void __init do_init_bootmem(void)
869{ 930{
870 int nid; 931 int nid;
@@ -890,7 +951,13 @@ void __init do_init_bootmem(void)
890 951
891 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 952 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
892 953
893 /* Allocate the node structure node local if possible */ 954 /*
955 * Allocate the node structure node local if possible
956 *
957 * Be careful moving this around, as it relies on all
958 * previous nodes' bootmem to be initialized and have
959 * all reserved areas marked.
960 */
894 NODE_DATA(nid) = careful_allocation(nid, 961 NODE_DATA(nid) = careful_allocation(nid,
895 sizeof(struct pglist_data), 962 sizeof(struct pglist_data),
896 SMP_CACHE_BYTES, end_pfn); 963 SMP_CACHE_BYTES, end_pfn);
@@ -922,53 +989,14 @@ void __init do_init_bootmem(void)
922 start_pfn, end_pfn); 989 start_pfn, end_pfn);
923 990
924 free_bootmem_with_active_regions(nid, end_pfn); 991 free_bootmem_with_active_regions(nid, end_pfn);
925 } 992 /*
926 993 * Be very careful about moving this around. Future
927 /* Mark reserved regions */ 994 * calls to careful_allocation() depend on this getting
928 for (i = 0; i < lmb.reserved.cnt; i++) { 995 * done correctly.
929 unsigned long physbase = lmb.reserved.region[i].base; 996 */
930 unsigned long size = lmb.reserved.region[i].size; 997 mark_reserved_regions_for_nid(nid);
931 unsigned long start_pfn = physbase >> PAGE_SHIFT;
932 unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
933 struct node_active_region node_ar;
934
935 get_node_active_region(start_pfn, &node_ar);
936 while (start_pfn < end_pfn &&
937 node_ar.start_pfn < node_ar.end_pfn) {
938 unsigned long reserve_size = size;
939 /*
940 * if reserved region extends past active region
941 * then trim size to active region
942 */
943 if (end_pfn > node_ar.end_pfn)
944 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
945 - (start_pfn << PAGE_SHIFT);
946 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
947 reserve_size, node_ar.nid);
948 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
949 reserve_size, BOOTMEM_DEFAULT);
950 /*
951 * if reserved region is contained in the active region
952 * then done.
953 */
954 if (end_pfn <= node_ar.end_pfn)
955 break;
956
957 /*
958 * reserved region extends past the active region
959 * get next active region that contains this
960 * reserved region
961 */
962 start_pfn = node_ar.end_pfn;
963 physbase = start_pfn << PAGE_SHIFT;
964 size = size - reserve_size;
965 get_node_active_region(start_pfn, &node_ar);
966 }
967
968 }
969
970 for_each_online_node(nid)
971 sparse_memory_present_with_active_regions(nid); 998 sparse_memory_present_with_active_regions(nid);
999 }
972} 1000}
973 1001
974void __init paging_init(void) 1002void __init paging_init(void)