aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:38:56 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:17 -0500
commiteceb3632ac85bc08fc27f7fc9ab85672681b2635 (patch)
tree381e44a45a46068b1500a33dec5aabb732cc6aac
parentaeebe84cc96cde4181807bc67c300c550d0ef123 (diff)
x86, mm: Don't clear page table if range is ram
After we add code use buffer in BRK to pre-map buf for page table in following patch: x86, mm: setup page table in top-down it should be safe to remove early_memmap for page table accessing. Instead we get panic with that. It turns out that we clear the initial page table wrongly for next range that is separated by holes. And it only happens when we are trying to map ram range one by one. We need to check if the range is ram before clearing page table. We change the loop structure to remove the extra little loop and use one loop only, and in that loop will caculate next at first, and check if [addr,next) is covered by E820_RAM. -v2: E820_RESERVED_KERN is treated as E820_RAM. EFI one change some E820_RAM to that, so next kernel by kexec will know that range is used already. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-20-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/mm/init_64.c40
1 files changed, 19 insertions, 21 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 869372a5d3cf..fa28e3e29741 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -363,20 +363,20 @@ static unsigned long __meminit
363phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, 363phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
364 pgprot_t prot) 364 pgprot_t prot)
365{ 365{
366 unsigned pages = 0; 366 unsigned long pages = 0, next;
367 unsigned long last_map_addr = end; 367 unsigned long last_map_addr = end;
368 int i; 368 int i;
369 369
370 pte_t *pte = pte_page + pte_index(addr); 370 pte_t *pte = pte_page + pte_index(addr);
371 371
372 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { 372 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
373 373 next = (addr & PAGE_MASK) + PAGE_SIZE;
374 if (addr >= end) { 374 if (addr >= end) {
375 if (!after_bootmem) { 375 if (!after_bootmem &&
376 for(; i < PTRS_PER_PTE; i++, pte++) 376 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
377 set_pte(pte, __pte(0)); 377 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
378 } 378 set_pte(pte, __pte(0));
379 break; 379 continue;
380 } 380 }
381 381
382 /* 382 /*
@@ -419,16 +419,15 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
419 pte_t *pte; 419 pte_t *pte;
420 pgprot_t new_prot = prot; 420 pgprot_t new_prot = prot;
421 421
422 next = (address & PMD_MASK) + PMD_SIZE;
422 if (address >= end) { 423 if (address >= end) {
423 if (!after_bootmem) { 424 if (!after_bootmem &&
424 for (; i < PTRS_PER_PMD; i++, pmd++) 425 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
425 set_pmd(pmd, __pmd(0)); 426 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
426 } 427 set_pmd(pmd, __pmd(0));
427 break; 428 continue;
428 } 429 }
429 430
430 next = (address & PMD_MASK) + PMD_SIZE;
431
432 if (pmd_val(*pmd)) { 431 if (pmd_val(*pmd)) {
433 if (!pmd_large(*pmd)) { 432 if (!pmd_large(*pmd)) {
434 spin_lock(&init_mm.page_table_lock); 433 spin_lock(&init_mm.page_table_lock);
@@ -497,13 +496,12 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
497 pmd_t *pmd; 496 pmd_t *pmd;
498 pgprot_t prot = PAGE_KERNEL; 497 pgprot_t prot = PAGE_KERNEL;
499 498
500 if (addr >= end)
501 break;
502
503 next = (addr & PUD_MASK) + PUD_SIZE; 499 next = (addr & PUD_MASK) + PUD_SIZE;
504 500 if (addr >= end) {
505 if (!after_bootmem && !e820_any_mapped(addr, next, 0)) { 501 if (!after_bootmem &&
506 set_pud(pud, __pud(0)); 502 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
503 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
504 set_pud(pud, __pud(0));
507 continue; 505 continue;
508 } 506 }
509 507