diff options
author | Ian Campbell <ijc@hellion.org.uk> | 2008-02-09 17:24:09 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-02-09 17:24:09 -0500 |
commit | 551889a6e2a24a9c06fd453ea03b57b7746ffdc0 (patch) | |
tree | d906bbc4e4a96e243a14416bf02feb7a4ffd4d7a /arch/x86/mm/init_32.c | |
parent | 185c045c245f46485ad8bbd8cc1100e986ff3f13 (diff) |
x86: construct 32-bit boot time page tables in native format.
Specifically the boot time page tables in a CONFIG_X86_PAE=y enabled
kernel are in PAE format.
early_ioremap is updated to use the standard page table accessors.
Clear any mappings beyond max_low_pfn from the boot page tables in
native_pagetable_setup_start because the initial mappings can extend
beyond the range of physical memory and into the vmalloc area.
Derived from patches by Eric Biederman and H. Peter Anvin.
[ jeremy@goop.org: PAE swapper_pg_dir needs to be page-sized fix ]
Signed-off-by: Ian Campbell <ijc@hellion.org.uk>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Mika Penttilä <mika.penttila@kolumbus.fi>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r-- | arch/x86/mm/init_32.c | 72 |
1 files changed, 27 insertions, 45 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d1bc04006d16..54aba3cf9efe 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/pgalloc.h> | 46 | #include <asm/pgalloc.h> |
47 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
48 | #include <asm/paravirt.h> | 48 | #include <asm/paravirt.h> |
49 | #include <asm/setup.h> | ||
49 | 50 | ||
50 | unsigned int __VMALLOC_RESERVE = 128 << 20; | 51 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
51 | 52 | ||
@@ -328,44 +329,38 @@ pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; | |||
328 | 329 | ||
329 | void __init native_pagetable_setup_start(pgd_t *base) | 330 | void __init native_pagetable_setup_start(pgd_t *base) |
330 | { | 331 | { |
331 | #ifdef CONFIG_X86_PAE | 332 | unsigned long pfn, va; |
332 | int i; | 333 | pgd_t *pgd; |
334 | pud_t *pud; | ||
335 | pmd_t *pmd; | ||
336 | pte_t *pte; | ||
333 | 337 | ||
334 | /* | 338 | /* |
335 | * Init entries of the first-level page table to the | 339 | * Remove any mappings which extend past the end of physical |
336 | * zero page, if they haven't already been set up. | 340 | * memory from the boot time page table: |
337 | * | ||
338 | * In a normal native boot, we'll be running on a | ||
339 | * pagetable rooted in swapper_pg_dir, but not in PAE | ||
340 | * mode, so this will end up clobbering the mappings | ||
341 | * for the lower 24Mbytes of the address space, | ||
342 | * without affecting the kernel address space. | ||
343 | */ | 341 | */ |
344 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | 342 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
345 | set_pgd(&base[i], | 343 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); |
346 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | 344 | pgd = base + pgd_index(va); |
347 | 345 | if (!pgd_present(*pgd)) | |
348 | /* Make sure kernel address space is empty so that a pagetable | 346 | break; |
349 | will be allocated for it. */ | 347 | |
350 | memset(&base[USER_PTRS_PER_PGD], 0, | 348 | pud = pud_offset(pgd, va); |
351 | KERNEL_PGD_PTRS * sizeof(pgd_t)); | 349 | pmd = pmd_offset(pud, va); |
352 | #else | 350 | if (!pmd_present(*pmd)) |
351 | break; | ||
352 | |||
353 | pte = pte_offset_kernel(pmd, va); | ||
354 | if (!pte_present(*pte)) | ||
355 | break; | ||
356 | |||
357 | pte_clear(NULL, va, pte); | ||
358 | } | ||
353 | paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); | 359 | paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); |
354 | #endif | ||
355 | } | 360 | } |
356 | 361 | ||
357 | void __init native_pagetable_setup_done(pgd_t *base) | 362 | void __init native_pagetable_setup_done(pgd_t *base) |
358 | { | 363 | { |
359 | #ifdef CONFIG_X86_PAE | ||
360 | /* | ||
361 | * Add low memory identity-mappings - SMP needs it when | ||
362 | * starting up on an AP from real-mode. In the non-PAE | ||
363 | * case we already have these mappings through head.S. | ||
364 | * All user-space mappings are explicitly cleared after | ||
365 | * SMP startup. | ||
366 | */ | ||
367 | set_pgd(&base[0], base[USER_PTRS_PER_PGD]); | ||
368 | #endif | ||
369 | } | 364 | } |
370 | 365 | ||
371 | /* | 366 | /* |
@@ -374,9 +369,8 @@ void __init native_pagetable_setup_done(pgd_t *base) | |||
374 | * the boot process. | 369 | * the boot process. |
375 | * | 370 | * |
376 | * If we're booting on native hardware, this will be a pagetable | 371 | * If we're booting on native hardware, this will be a pagetable |
377 | * constructed in arch/i386/kernel/head.S, and not running in PAE mode | 372 | * constructed in arch/x86/kernel/head_32.S. The root of the |
378 | * (even if we'll end up running in PAE). The root of the pagetable | 373 | * pagetable will be swapper_pg_dir. |
379 | * will be swapper_pg_dir. | ||
380 | * | 374 | * |
381 | * If we're booting paravirtualized under a hypervisor, then there are | 375 | * If we're booting paravirtualized under a hypervisor, then there are |
382 | * more options: we may already be running PAE, and the pagetable may | 376 | * more options: we may already be running PAE, and the pagetable may |
@@ -537,14 +531,6 @@ void __init paging_init(void) | |||
537 | 531 | ||
538 | load_cr3(swapper_pg_dir); | 532 | load_cr3(swapper_pg_dir); |
539 | 533 | ||
540 | #ifdef CONFIG_X86_PAE | ||
541 | /* | ||
542 | * We will bail out later - printk doesn't work right now so | ||
543 | * the user would just see a hanging kernel. | ||
544 | */ | ||
545 | if (cpu_has_pae) | ||
546 | set_in_cr4(X86_CR4_PAE); | ||
547 | #endif | ||
548 | __flush_tlb_all(); | 534 | __flush_tlb_all(); |
549 | 535 | ||
550 | kmap_init(); | 536 | kmap_init(); |
@@ -675,10 +661,6 @@ void __init mem_init(void) | |||
675 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | 661 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
676 | #endif /* double-sanity-check paranoia */ | 662 | #endif /* double-sanity-check paranoia */ |
677 | 663 | ||
678 | #ifdef CONFIG_X86_PAE | ||
679 | if (!cpu_has_pae) | ||
680 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | ||
681 | #endif | ||
682 | if (boot_cpu_data.wp_works_ok < 0) | 664 | if (boot_cpu_data.wp_works_ok < 0) |
683 | test_wp_bit(); | 665 | test_wp_bit(); |
684 | 666 | ||