aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-03-05 07:55:05 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 08:17:17 -0500
commitf765090a2617b8d9cb73b71e0aa850c29460d8be (patch)
treeb23dff6b8639a0f74f99a5206e6b7c9def588f6f /arch/x86/mm/init_32.c
parent0c0f756fd679d9747d52dad51fce3a5bb362eec3 (diff)
x86: move init_memory_mapping() to common mm/init.c
Impact: cleanup This patch moves the init_memory_mapping() function to common mm/init.c. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <1236257708-27269-14-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c308
1 files changed, 7 insertions, 301 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index cd3c24b490a1..187522a0c66b 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -59,13 +59,9 @@ unsigned long highstart_pfn, highend_pfn;
59static noinline int do_test_wp_bit(void); 59static noinline int do_test_wp_bit(void);
60 60
61 61
62static unsigned long __initdata table_start; 62extern unsigned long __initdata table_start;
63static unsigned long __meminitdata table_end; 63extern unsigned long __meminitdata table_end;
64static unsigned long __meminitdata table_top; 64extern unsigned long __meminitdata table_top;
65
66int after_bootmem;
67
68int direct_gbpages;
69 65
70static __init void *alloc_low_page(void) 66static __init void *alloc_low_page(void)
71{ 67{
@@ -227,9 +223,9 @@ static inline int is_kernel_text(unsigned long addr)
227 * of max_low_pfn pages, by creating page tables starting from address 223 * of max_low_pfn pages, by creating page tables starting from address
228 * PAGE_OFFSET: 224 * PAGE_OFFSET:
229 */ 225 */
230static void __init kernel_physical_mapping_init(unsigned long start_pfn, 226void __init kernel_physical_mapping_init(unsigned long start_pfn,
231 unsigned long end_pfn, 227 unsigned long end_pfn,
232 int use_pse) 228 int use_pse)
233{ 229{
234 pgd_t *pgd_base = swapper_pg_dir; 230 pgd_t *pgd_base = swapper_pg_dir;
235 int pgd_idx, pmd_idx, pte_ofs; 231 int pgd_idx, pmd_idx, pte_ofs;
@@ -509,7 +505,7 @@ void __init native_pagetable_setup_done(pgd_t *base)
509 * be partially populated, and so it avoids stomping on any existing 505 * be partially populated, and so it avoids stomping on any existing
510 * mappings. 506 * mappings.
511 */ 507 */
512static void __init early_ioremap_page_table_range_init(void) 508void __init early_ioremap_page_table_range_init(void)
513{ 509{
514 pgd_t *pgd_base = swapper_pg_dir; 510 pgd_t *pgd_base = swapper_pg_dir;
515 unsigned long vaddr, end; 511 unsigned long vaddr, end;
@@ -834,296 +830,6 @@ void __init setup_bootmem_allocator(void)
834 after_bootmem = 1; 830 after_bootmem = 1;
835} 831}
836 832
837static void __init find_early_table_space(unsigned long end, int use_pse,
838 int use_gbpages)
839{
840 unsigned long puds, pmds, ptes, tables, start;
841
842 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
843 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
844
845 if (use_gbpages) {
846 unsigned long extra;
847
848 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
849 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
850 } else
851 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
852
853 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
854
855 if (use_pse) {
856 unsigned long extra;
857
858 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
859#ifdef CONFIG_X86_32
860 extra += PMD_SIZE;
861#endif
862 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
863 } else
864 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
865
866 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
867
868#ifdef CONFIG_X86_32
869 /* for fixmap */
870 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
871#endif
872
873 /*
874 * RED-PEN putting page tables only on node 0 could
875 * cause a hotspot and fill up ZONE_DMA. The page tables
876 * need roughly 0.5KB per GB.
877 */
878#ifdef CONFIG_X86_32
879 start = 0x7000;
880 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
881 tables, PAGE_SIZE);
882#else /* CONFIG_X86_64 */
883 start = 0x8000;
884 table_start = find_e820_area(start, end, tables, PAGE_SIZE);
885#endif
886 if (table_start == -1UL)
887 panic("Cannot find space for the kernel page tables");
888
889 table_start >>= PAGE_SHIFT;
890 table_end = table_start;
891 table_top = table_start + (tables >> PAGE_SHIFT);
892
893 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
894 end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
895}
896
897struct map_range {
898 unsigned long start;
899 unsigned long end;
900 unsigned page_size_mask;
901};
902
903#ifdef CONFIG_X86_32
904#define NR_RANGE_MR 3
905#else /* CONFIG_X86_64 */
906#define NR_RANGE_MR 5
907#endif
908
909static int save_mr(struct map_range *mr, int nr_range,
910 unsigned long start_pfn, unsigned long end_pfn,
911 unsigned long page_size_mask)
912{
913 if (start_pfn < end_pfn) {
914 if (nr_range >= NR_RANGE_MR)
915 panic("run out of range for init_memory_mapping\n");
916 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
917 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
918 mr[nr_range].page_size_mask = page_size_mask;
919 nr_range++;
920 }
921
922 return nr_range;
923}
924
925static inline void init_gbpages(void)
926{
927}
928
929/*
930 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
931 * This runs before bootmem is initialized and gets pages directly from
932 * the physical memory. To access them they are temporarily mapped.
933 */
934unsigned long __init_refok init_memory_mapping(unsigned long start,
935 unsigned long end)
936{
937 unsigned long page_size_mask = 0;
938 unsigned long start_pfn, end_pfn;
939 unsigned long pos;
940 unsigned long ret;
941
942 struct map_range mr[NR_RANGE_MR];
943 int nr_range, i;
944 int use_pse, use_gbpages;
945
946 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
947
948 if (!after_bootmem)
949 init_gbpages();
950
951#ifdef CONFIG_DEBUG_PAGEALLOC
952 /*
953 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
954 * This will simplify cpa(), which otherwise needs to support splitting
955 * large pages into small in interrupt context, etc.
956 */
957 use_pse = use_gbpages = 0;
958#else
959 use_pse = cpu_has_pse;
960 use_gbpages = direct_gbpages;
961#endif
962
963#ifdef CONFIG_X86_32
964#ifdef CONFIG_X86_PAE
965 set_nx();
966 if (nx_enabled)
967 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
968#endif
969
970 /* Enable PSE if available */
971 if (cpu_has_pse)
972 set_in_cr4(X86_CR4_PSE);
973
974 /* Enable PGE if available */
975 if (cpu_has_pge) {
976 set_in_cr4(X86_CR4_PGE);
977 __supported_pte_mask |= _PAGE_GLOBAL;
978 }
979#endif
980
981 if (use_gbpages)
982 page_size_mask |= 1 << PG_LEVEL_1G;
983 if (use_pse)
984 page_size_mask |= 1 << PG_LEVEL_2M;
985
986 memset(mr, 0, sizeof(mr));
987 nr_range = 0;
988
989 /* head if not big page alignment ? */
990 start_pfn = start >> PAGE_SHIFT;
991 pos = start_pfn << PAGE_SHIFT;
992#ifdef CONFIG_X86_32
993 /*
994 * Don't use a large page for the first 2/4MB of memory
995 * because there are often fixed size MTRRs in there
996 * and overlapping MTRRs into large pages can cause
997 * slowdowns.
998 */
999 if (pos == 0)
1000 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
1001 else
1002 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
1003 << (PMD_SHIFT - PAGE_SHIFT);
1004#else /* CONFIG_X86_64 */
1005 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
1006 << (PMD_SHIFT - PAGE_SHIFT);
1007#endif
1008 if (end_pfn > (end >> PAGE_SHIFT))
1009 end_pfn = end >> PAGE_SHIFT;
1010 if (start_pfn < end_pfn) {
1011 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
1012 pos = end_pfn << PAGE_SHIFT;
1013 }
1014
1015 /* big page (2M) range */
1016 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
1017 << (PMD_SHIFT - PAGE_SHIFT);
1018#ifdef CONFIG_X86_32
1019 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
1020#else /* CONFIG_X86_64 */
1021 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
1022 << (PUD_SHIFT - PAGE_SHIFT);
1023 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
1024 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
1025#endif
1026
1027 if (start_pfn < end_pfn) {
1028 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1029 page_size_mask & (1<<PG_LEVEL_2M));
1030 pos = end_pfn << PAGE_SHIFT;
1031 }
1032
1033#ifdef CONFIG_X86_64
1034 /* big page (1G) range */
1035 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
1036 << (PUD_SHIFT - PAGE_SHIFT);
1037 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
1038 if (start_pfn < end_pfn) {
1039 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1040 page_size_mask &
1041 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
1042 pos = end_pfn << PAGE_SHIFT;
1043 }
1044
1045 /* tail is not big page (1G) alignment */
1046 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
1047 << (PMD_SHIFT - PAGE_SHIFT);
1048 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
1049 if (start_pfn < end_pfn) {
1050 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1051 page_size_mask & (1<<PG_LEVEL_2M));
1052 pos = end_pfn << PAGE_SHIFT;
1053 }
1054#endif
1055
1056 /* tail is not big page (2M) alignment */
1057 start_pfn = pos>>PAGE_SHIFT;
1058 end_pfn = end>>PAGE_SHIFT;
1059 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
1060
1061 /* try to merge same page size and continuous */
1062 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
1063 unsigned long old_start;
1064 if (mr[i].end != mr[i+1].start ||
1065 mr[i].page_size_mask != mr[i+1].page_size_mask)
1066 continue;
1067 /* move it */
1068 old_start = mr[i].start;
1069 memmove(&mr[i], &mr[i+1],
1070 (nr_range - 1 - i) * sizeof(struct map_range));
1071 mr[i--].start = old_start;
1072 nr_range--;
1073 }
1074
1075 for (i = 0; i < nr_range; i++)
1076 printk(KERN_DEBUG " %010lx - %010lx page %s\n",
1077 mr[i].start, mr[i].end,
1078 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
1079 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
1080
1081 /*
1082 * Find space for the kernel direct mapping tables.
1083 *
1084 * Later we should allocate these tables in the local node of the
1085 * memory mapped. Unfortunately this is done currently before the
1086 * nodes are discovered.
1087 */
1088 if (!after_bootmem)
1089 find_early_table_space(end, use_pse, use_gbpages);
1090
1091#ifdef CONFIG_X86_32
1092 for (i = 0; i < nr_range; i++)
1093 kernel_physical_mapping_init(
1094 mr[i].start >> PAGE_SHIFT,
1095 mr[i].end >> PAGE_SHIFT,
1096 mr[i].page_size_mask == (1<<PG_LEVEL_2M));
1097 ret = end;
1098#else /* CONFIG_X86_64 */
1099 for (i = 0; i < nr_range; i++)
1100 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
1101 mr[i].page_size_mask);
1102#endif
1103
1104#ifdef CONFIG_X86_32
1105 early_ioremap_page_table_range_init();
1106
1107 load_cr3(swapper_pg_dir);
1108#endif
1109
1110#ifdef CONFIG_X86_64
1111 if (!after_bootmem)
1112 mmu_cr4_features = read_cr4();
1113#endif
1114 __flush_tlb_all();
1115
1116 if (!after_bootmem && table_end > table_start)
1117 reserve_early(table_start << PAGE_SHIFT,
1118 table_end << PAGE_SHIFT, "PGTABLE");
1119
1120 if (!after_bootmem)
1121 early_memtest(start, end);
1122
1123 return ret >> PAGE_SHIFT;
1124}
1125
1126
1127/* 833/*
1128 * paging_init() sets up the page tables - note that the first 8MB are 834 * paging_init() sets up the page tables - note that the first 8MB are
1129 * already mapped by head.S. 835 * already mapped by head.S.