aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c84
1 files changed, 74 insertions, 10 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 6b9a9358b330..c3789bb19308 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -195,11 +195,30 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
195 pgd_t *pgd; 195 pgd_t *pgd;
196 pmd_t *pmd; 196 pmd_t *pmd;
197 pte_t *pte; 197 pte_t *pte;
198 unsigned pages_2m = 0, pages_4k = 0; 198 unsigned pages_2m, pages_4k;
199 int mapping_iter;
200
201 /*
202 * First iteration will setup identity mapping using large/small pages
203 * based on use_pse, with other attributes same as set by
204 * the early code in head_32.S
205 *
206 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
207 * as desired for the kernel identity mapping.
208 *
209 * This two pass mechanism conforms to the TLB app note which says:
210 *
211 * "Software should not write to a paging-structure entry in a way
212 * that would change, for any linear address, both the page size
213 * and either the page frame or attributes."
214 */
215 mapping_iter = 1;
199 216
200 if (!cpu_has_pse) 217 if (!cpu_has_pse)
201 use_pse = 0; 218 use_pse = 0;
202 219
220repeat:
221 pages_2m = pages_4k = 0;
203 pfn = start_pfn; 222 pfn = start_pfn;
204 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 223 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
205 pgd = pgd_base + pgd_idx; 224 pgd = pgd_base + pgd_idx;
@@ -225,6 +244,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
225 if (use_pse) { 244 if (use_pse) {
226 unsigned int addr2; 245 unsigned int addr2;
227 pgprot_t prot = PAGE_KERNEL_LARGE; 246 pgprot_t prot = PAGE_KERNEL_LARGE;
247 /*
248 * first pass will use the same initial
249 * identity mapping attribute + _PAGE_PSE.
250 */
251 pgprot_t init_prot =
252 __pgprot(PTE_IDENT_ATTR |
253 _PAGE_PSE);
228 254
229 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 255 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
230 PAGE_OFFSET + PAGE_SIZE-1; 256 PAGE_OFFSET + PAGE_SIZE-1;
@@ -234,7 +260,10 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
234 prot = PAGE_KERNEL_LARGE_EXEC; 260 prot = PAGE_KERNEL_LARGE_EXEC;
235 261
236 pages_2m++; 262 pages_2m++;
237 set_pmd(pmd, pfn_pmd(pfn, prot)); 263 if (mapping_iter == 1)
264 set_pmd(pmd, pfn_pmd(pfn, init_prot));
265 else
266 set_pmd(pmd, pfn_pmd(pfn, prot));
238 267
239 pfn += PTRS_PER_PTE; 268 pfn += PTRS_PER_PTE;
240 continue; 269 continue;
@@ -246,17 +275,43 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
246 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; 275 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
247 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { 276 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
248 pgprot_t prot = PAGE_KERNEL; 277 pgprot_t prot = PAGE_KERNEL;
278 /*
279 * first pass will use the same initial
280 * identity mapping attribute.
281 */
282 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
249 283
250 if (is_kernel_text(addr)) 284 if (is_kernel_text(addr))
251 prot = PAGE_KERNEL_EXEC; 285 prot = PAGE_KERNEL_EXEC;
252 286
253 pages_4k++; 287 pages_4k++;
254 set_pte(pte, pfn_pte(pfn, prot)); 288 if (mapping_iter == 1)
289 set_pte(pte, pfn_pte(pfn, init_prot));
290 else
291 set_pte(pte, pfn_pte(pfn, prot));
255 } 292 }
256 } 293 }
257 } 294 }
258 update_page_count(PG_LEVEL_2M, pages_2m); 295 if (mapping_iter == 1) {
259 update_page_count(PG_LEVEL_4K, pages_4k); 296 /*
297 * update direct mapping page count only in the first
298 * iteration.
299 */
300 update_page_count(PG_LEVEL_2M, pages_2m);
301 update_page_count(PG_LEVEL_4K, pages_4k);
302
303 /*
304 * local global flush tlb, which will flush the previous
305 * mappings present in both small and large page TLB's.
306 */
307 __flush_tlb_all();
308
309 /*
310 * Second iteration will set the actual desired PTE attributes.
311 */
312 mapping_iter = 2;
313 goto repeat;
314 }
260} 315}
261 316
262/* 317/*
@@ -719,7 +774,7 @@ void __init setup_bootmem_allocator(void)
719 after_init_bootmem = 1; 774 after_init_bootmem = 1;
720} 775}
721 776
722static void __init find_early_table_space(unsigned long end) 777static void __init find_early_table_space(unsigned long end, int use_pse)
723{ 778{
724 unsigned long puds, pmds, ptes, tables, start; 779 unsigned long puds, pmds, ptes, tables, start;
725 780
@@ -729,7 +784,7 @@ static void __init find_early_table_space(unsigned long end)
729 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 784 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
730 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 785 tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
731 786
732 if (cpu_has_pse) { 787 if (use_pse) {
733 unsigned long extra; 788 unsigned long extra;
734 789
735 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 790 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
@@ -769,12 +824,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
769 pgd_t *pgd_base = swapper_pg_dir; 824 pgd_t *pgd_base = swapper_pg_dir;
770 unsigned long start_pfn, end_pfn; 825 unsigned long start_pfn, end_pfn;
771 unsigned long big_page_start; 826 unsigned long big_page_start;
827#ifdef CONFIG_DEBUG_PAGEALLOC
828 /*
829 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
830 * This will simplify cpa(), which otherwise needs to support splitting
831 * large pages into small in interrupt context, etc.
832 */
833 int use_pse = 0;
834#else
835 int use_pse = cpu_has_pse;
836#endif
772 837
773 /* 838 /*
774 * Find space for the kernel direct mapping tables. 839 * Find space for the kernel direct mapping tables.
775 */ 840 */
776 if (!after_init_bootmem) 841 if (!after_init_bootmem)
777 find_early_table_space(end); 842 find_early_table_space(end, use_pse);
778 843
779#ifdef CONFIG_X86_PAE 844#ifdef CONFIG_X86_PAE
780 set_nx(); 845 set_nx();
@@ -820,7 +885,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
820 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 885 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
821 if (start_pfn < end_pfn) 886 if (start_pfn < end_pfn)
822 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 887 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
823 cpu_has_pse); 888 use_pse);
824 889
825 /* tail is not big page alignment ? */ 890 /* tail is not big page alignment ? */
826 start_pfn = end_pfn; 891 start_pfn = end_pfn;
@@ -983,7 +1048,6 @@ void __init mem_init(void)
983 if (boot_cpu_data.wp_works_ok < 0) 1048 if (boot_cpu_data.wp_works_ok < 0)
984 test_wp_bit(); 1049 test_wp_bit();
985 1050
986 cpa_init();
987 save_pg_dir(); 1051 save_pg_dir();
988 zap_low_mappings(); 1052 zap_low_mappings();
989} 1053}