aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2009-03-04 04:24:04 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-04 14:55:03 -0500
commitb68adb16f29c8ea02f21f5ebf65bcabffe217e9f (patch)
treea4f8383f47a250dbcb229a9302148ccd0b9355dd /arch
parenta71edd1f46c8a599509bda478fb4eea27fb0da63 (diff)
x86: make 32-bit init_memory_mapping range change more like 64-bit
Impact: cleanup make code more readable and more like 64-bit Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> LKML-Reference: <49AE48B4.8010907@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init_32.c126
1 files changed, 94 insertions, 32 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 67bdb59d4e10..37aeaf366d5f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -885,29 +885,55 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
885 (table_start << PAGE_SHIFT) + tables); 885 (table_start << PAGE_SHIFT) + tables);
886} 886}
887 887
888struct map_range {
889 unsigned long start;
890 unsigned long end;
891 unsigned page_size_mask;
892};
893
894#define NR_RANGE_MR 3
895
896static int save_mr(struct map_range *mr, int nr_range,
897 unsigned long start_pfn, unsigned long end_pfn,
898 unsigned long page_size_mask)
899{
900 if (start_pfn < end_pfn) {
901 if (nr_range >= NR_RANGE_MR)
902 panic("run out of range for init_memory_mapping\n");
903 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
904 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
905 mr[nr_range].page_size_mask = page_size_mask;
906 nr_range++;
907 }
908
909 return nr_range;
910}
911
888unsigned long __init_refok init_memory_mapping(unsigned long start, 912unsigned long __init_refok init_memory_mapping(unsigned long start,
889 unsigned long end) 913 unsigned long end)
890{ 914{
891 pgd_t *pgd_base = swapper_pg_dir; 915 pgd_t *pgd_base = swapper_pg_dir;
916 unsigned long page_size_mask = 0;
892 unsigned long start_pfn, end_pfn; 917 unsigned long start_pfn, end_pfn;
893 unsigned long big_page_start; 918 unsigned long pos;
919
920 struct map_range mr[NR_RANGE_MR];
921 int nr_range, i;
922 int use_pse;
923
924 printk(KERN_INFO "init_memory_mapping: %08lx-%08lx\n", start, end);
925
894#ifdef CONFIG_DEBUG_PAGEALLOC 926#ifdef CONFIG_DEBUG_PAGEALLOC
895 /* 927 /*
896 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. 928 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
897 * This will simplify cpa(), which otherwise needs to support splitting 929 * This will simplify cpa(), which otherwise needs to support splitting
898 * large pages into small in interrupt context, etc. 930 * large pages into small in interrupt context, etc.
899 */ 931 */
900 int use_pse = 0; 932 use_pse = 0;
901#else 933#else
902 int use_pse = cpu_has_pse; 934 use_pse = cpu_has_pse;
903#endif 935#endif
904 936
905 /*
906 * Find space for the kernel direct mapping tables.
907 */
908 if (!after_init_bootmem)
909 find_early_table_space(end, use_pse);
910
911#ifdef CONFIG_X86_PAE 937#ifdef CONFIG_X86_PAE
912 set_nx(); 938 set_nx();
913 if (nx_enabled) 939 if (nx_enabled)
@@ -924,45 +950,81 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
924 __supported_pte_mask |= _PAGE_GLOBAL; 950 __supported_pte_mask |= _PAGE_GLOBAL;
925 } 951 }
926 952
953 memset(mr, 0, sizeof(mr));
954 nr_range = 0;
955
956 if (use_pse)
957 page_size_mask |= 1 << PG_LEVEL_2M;
958
927 /* 959 /*
928 * Don't use a large page for the first 2/4MB of memory 960 * Don't use a large page for the first 2/4MB of memory
929 * because there are often fixed size MTRRs in there 961 * because there are often fixed size MTRRs in there
930 * and overlapping MTRRs into large pages can cause 962 * and overlapping MTRRs into large pages can cause
931 * slowdowns. 963 * slowdowns.
932 */ 964 */
933 big_page_start = PMD_SIZE; 965 /* head could not be big page alignment ? */
934 966 start_pfn = start >> PAGE_SHIFT;
935 if (start < big_page_start) { 967 pos = start_pfn << PAGE_SHIFT;
936 start_pfn = start >> PAGE_SHIFT; 968 if (pos == 0)
937 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT); 969 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
938 } else { 970 else
939 /* head is not big page alignment ? */ 971 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
940 start_pfn = start >> PAGE_SHIFT;
941 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
942 << (PMD_SHIFT - PAGE_SHIFT); 972 << (PMD_SHIFT - PAGE_SHIFT);
973 if (end_pfn > (end>>PAGE_SHIFT))
974 end_pfn = end>>PAGE_SHIFT;
975 if (start_pfn < end_pfn) {
976 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
977 pos = end_pfn << PAGE_SHIFT;
943 } 978 }
944 if (start_pfn < end_pfn)
945 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
946 979
947 /* big page range */ 980 /* big page range */
948 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) 981 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
949 << (PMD_SHIFT - PAGE_SHIFT); 982 << (PMD_SHIFT - PAGE_SHIFT);
950 if (start_pfn < (big_page_start >> PAGE_SHIFT))
951 start_pfn = big_page_start >> PAGE_SHIFT;
952 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 983 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
953 if (start_pfn < end_pfn) 984 if (start_pfn < end_pfn) {
954 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 985 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
955 use_pse); 986 page_size_mask & (1<<PG_LEVEL_2M));
987 pos = end_pfn << PAGE_SHIFT;
988 }
956 989
957 /* tail is not big page alignment ? */ 990 /* tail is not big page alignment ? */
958 start_pfn = end_pfn; 991 start_pfn = pos>>PAGE_SHIFT;
959 if (start_pfn > (big_page_start>>PAGE_SHIFT)) { 992 end_pfn = end>>PAGE_SHIFT;
960 end_pfn = end >> PAGE_SHIFT; 993 if (start_pfn < end_pfn)
961 if (start_pfn < end_pfn) 994 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
962 kernel_physical_mapping_init(pgd_base, start_pfn, 995
963 end_pfn, 0); 996 /* try to merge same page size and continuous */
997 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
998 unsigned long old_start;
999 if (mr[i].end != mr[i+1].start ||
1000 mr[i].page_size_mask != mr[i+1].page_size_mask)
1001 continue;
1002 /* move it */
1003 old_start = mr[i].start;
1004 memmove(&mr[i], &mr[i+1],
1005 (nr_range - 1 - i) * sizeof(struct map_range));
1006 mr[i--].start = old_start;
1007 nr_range--;
964 } 1008 }
965 1009
1010 for (i = 0; i < nr_range; i++)
1011 printk(KERN_DEBUG " %08lx - %08lx page %s\n",
1012 mr[i].start, mr[i].end,
1013 (mr[i].page_size_mask & (1<<PG_LEVEL_2M)) ?
1014 "big page" : "4k");
1015
1016 /*
1017 * Find space for the kernel direct mapping tables.
1018 */
1019 if (!after_init_bootmem)
1020 find_early_table_space(end, use_pse);
1021
1022 for (i = 0; i < nr_range; i++)
1023 kernel_physical_mapping_init(pgd_base,
1024 mr[i].start >> PAGE_SHIFT,
1025 mr[i].end >> PAGE_SHIFT,
1026 mr[i].page_size_mask == (1<<PG_LEVEL_2M));
1027
966 early_ioremap_page_table_range_init(pgd_base); 1028 early_ioremap_page_table_range_init(pgd_base);
967 1029
968 load_cr3(swapper_pg_dir); 1030 load_cr3(swapper_pg_dir);