aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-12-29 00:00:23 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-31 17:06:04 -0500
commit6fc5bae797a6632bbccdd49a1b6a96121368a4b9 (patch)
treeb9bb72c869cd993d7b40794402effce3cfa902e0 /arch/sparc64
parentbfff6e92a33dce6121a3d83ef3809e9063b2734e (diff)
[SPARC64]: Fix "mem=xxx" handling.
We were not being careful enough. When we trim the physical memory areas, we have to make sure we don't remove the kernel image or initial ramdisk image ranges. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/mm/init.c147
1 files changed, 124 insertions, 23 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index a8e8802eed4d..054822a3e05e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -872,6 +872,115 @@ static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
872 prom_halt(); 872 prom_halt();
873} 873}
874 874
875static void __init trim_pavail(unsigned long *cur_size_p,
876 unsigned long *end_of_phys_p)
877{
878 unsigned long to_trim = *cur_size_p - cmdline_memory_size;
879 unsigned long avoid_start, avoid_end;
880 int i;
881
882 to_trim = PAGE_ALIGN(to_trim);
883
884 avoid_start = avoid_end = 0;
885#ifdef CONFIG_BLK_DEV_INITRD
886 avoid_start = initrd_start;
887 avoid_end = PAGE_ALIGN(initrd_end);
888#endif
889
890 /* Trim some pavail[] entries in order to satisfy the
891 * requested "mem=xxx" kernel command line specification.
892 *
893 * We must not trim off the kernel image area nor the
894 * initial ramdisk range (if any). Also, we must not trim
895 * any pavail[] entry down to zero in order to preserve
896 * the invariant that all pavail[] entries have a non-zero
897 * size which is assumed by all of the code in here.
898 */
899 for (i = 0; i < pavail_ents; i++) {
900 unsigned long start, end, kern_end;
901 unsigned long trim_low, trim_high, n;
902
903 kern_end = PAGE_ALIGN(kern_base + kern_size);
904
905 trim_low = start = pavail[i].phys_addr;
906 trim_high = end = start + pavail[i].reg_size;
907
908 if (kern_base >= start &&
909 kern_base < end) {
910 trim_low = kern_base;
911 if (kern_end >= end)
912 continue;
913 }
914 if (kern_end >= start &&
915 kern_end < end) {
916 trim_high = kern_end;
917 }
918 if (avoid_start &&
919 avoid_start >= start &&
920 avoid_start < end) {
921 if (trim_low > avoid_start)
922 trim_low = avoid_start;
923 if (avoid_end >= end)
924 continue;
925 }
926 if (avoid_end &&
927 avoid_end >= start &&
928 avoid_end < end) {
929 if (trim_high < avoid_end)
930 trim_high = avoid_end;
931 }
932
933 if (trim_high <= trim_low)
934 continue;
935
936 if (trim_low == start && trim_high == end) {
937 /* Whole chunk is available for trimming.
938 * Trim all except one page, in order to keep
939 * entry non-empty.
940 */
941 n = (end - start) - PAGE_SIZE;
942 if (n > to_trim)
943 n = to_trim;
944
945 if (n) {
946 pavail[i].phys_addr += n;
947 pavail[i].reg_size -= n;
948 to_trim -= n;
949 }
950 } else {
951 n = (trim_low - start);
952 if (n > to_trim)
953 n = to_trim;
954
955 if (n) {
956 pavail[i].phys_addr += n;
957 pavail[i].reg_size -= n;
958 to_trim -= n;
959 }
960 if (to_trim) {
961 n = end - trim_high;
962 if (n > to_trim)
963 n = to_trim;
964 if (n) {
965 pavail[i].reg_size -= n;
966 to_trim -= n;
967 }
968 }
969 }
970
971 if (!to_trim)
972 break;
973 }
974
975 /* Recalculate. */
976 *cur_size_p = 0UL;
977 for (i = 0; i < pavail_ents; i++) {
978 *end_of_phys_p = pavail[i].phys_addr +
979 pavail[i].reg_size;
980 *cur_size_p += pavail[i].reg_size;
981 }
982}
983
875static unsigned long __init bootmem_init(unsigned long *pages_avail, 984static unsigned long __init bootmem_init(unsigned long *pages_avail,
876 unsigned long phys_base) 985 unsigned long phys_base)
877{ 986{
@@ -889,31 +998,13 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
889 end_of_phys_memory = pavail[i].phys_addr + 998 end_of_phys_memory = pavail[i].phys_addr +
890 pavail[i].reg_size; 999 pavail[i].reg_size;
891 bytes_avail += pavail[i].reg_size; 1000 bytes_avail += pavail[i].reg_size;
892 if (cmdline_memory_size) {
893 if (bytes_avail > cmdline_memory_size) {
894 unsigned long slack = bytes_avail - cmdline_memory_size;
895
896 bytes_avail -= slack;
897 end_of_phys_memory -= slack;
898
899 pavail[i].reg_size -= slack;
900 if ((long)pavail[i].reg_size <= 0L) {
901 pavail[i].phys_addr = 0xdeadbeefUL;
902 pavail[i].reg_size = 0UL;
903 pavail_ents = i;
904 } else {
905 pavail[i+1].reg_size = 0Ul;
906 pavail[i+1].phys_addr = 0xdeadbeefUL;
907 pavail_ents = i + 1;
908 }
909 break;
910 }
911 }
912 } 1001 }
913 1002
914 *pages_avail = bytes_avail >> PAGE_SHIFT; 1003 /* Determine the location of the initial ramdisk before trying
915 1004 * to honor the "mem=xxx" command line argument. We must know
916 end_pfn = end_of_phys_memory >> PAGE_SHIFT; 1005 * where the kernel image and the ramdisk image are so that we
1006 * do not trim those two areas from the physical memory map.
1007 */
917 1008
918#ifdef CONFIG_BLK_DEV_INITRD 1009#ifdef CONFIG_BLK_DEV_INITRD
919 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ 1010 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
@@ -932,6 +1023,16 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
932 } 1023 }
933 } 1024 }
934#endif 1025#endif
1026
1027 if (cmdline_memory_size &&
1028 bytes_avail > cmdline_memory_size)
1029 trim_pavail(&bytes_avail,
1030 &end_of_phys_memory);
1031
1032 *pages_avail = bytes_avail >> PAGE_SHIFT;
1033
1034 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
1035
935 /* Initialize the boot-time allocator. */ 1036 /* Initialize the boot-time allocator. */
936 max_pfn = max_low_pfn = end_pfn; 1037 max_pfn = max_low_pfn = end_pfn;
937 min_low_pfn = (phys_base >> PAGE_SHIFT); 1038 min_low_pfn = (phys_base >> PAGE_SHIFT);