aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:39:11 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:39 -0500
commit11ed9e927d573d78beda6e6a166612666ae97064 (patch)
treea45dc48e2b113027fba23c31fb666aa48fa74c64 /arch/x86/mm/init_32.c
parentc8dcdb9ce463ad4a660099a74a850f4f6fc81c41 (diff)
x86, mm: Add check before clear pte above max_low_pfn on 32bit
During test patch that adjust page_size_mask to map small range ram with big page size, found page table is setup wrongly for 32bit. And native_pagetable_init wrong clear pte for pmd with large page support. 1. add more comments about why we are expecting pte. 2. add BUG checking, so next time we could find problem earlier when we mess up page table setup again. 3. max_low_pfn is not included boundary for low memory mapping. We should check from max_low_pfn instead of +1. 4. add print out when some pte really get cleared, or we should use WARN() to find out why above max_low_pfn get mapped? so we could fix it. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-35-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 322ee56ea1fe..19ef9f018012 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -480,9 +480,14 @@ void __init native_pagetable_init(void)
480 480
481 /* 481 /*
482 * Remove any mappings which extend past the end of physical 482 * Remove any mappings which extend past the end of physical
483 * memory from the boot time page table: 483 * memory from the boot time page table.
484 * In virtual address space, we should have at least two pages
485 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
486 * definition. And max_low_pfn is set to VMALLOC_END physical
487 * address. If initial memory mapping is doing right job, we
488 * should have pte used near max_low_pfn or one pmd is not present.
484 */ 489 */
485 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { 490 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
486 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); 491 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
487 pgd = base + pgd_index(va); 492 pgd = base + pgd_index(va);
488 if (!pgd_present(*pgd)) 493 if (!pgd_present(*pgd))
@@ -493,10 +498,19 @@ void __init native_pagetable_init(void)
493 if (!pmd_present(*pmd)) 498 if (!pmd_present(*pmd))
494 break; 499 break;
495 500
501 /* should not be large page here */
502 if (pmd_large(*pmd)) {
503 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
504 pfn, pmd, __pa(pmd));
505 BUG_ON(1);
506 }
507
496 pte = pte_offset_kernel(pmd, va); 508 pte = pte_offset_kernel(pmd, va);
497 if (!pte_present(*pte)) 509 if (!pte_present(*pte))
498 break; 510 break;
499 511
512 printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
513 pfn, pmd, __pa(pmd), pte, __pa(pte));
500 pte_clear(NULL, va, pte); 514 pte_clear(NULL, va, pte);
501 } 515 }
502 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); 516 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);