diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-07-09 23:15:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-10 02:46:40 -0400 |
commit | 7b16eb8930d1e2a7ce5c7f35c87d62252ecc91f2 (patch) | |
tree | 3d7175d24ac617c23da7ebb9766222638080b327 /arch/x86/mm | |
parent | 4fb3dc2729c22ed1b023475fe28b720460251de1 (diff) |
x86: overmapped fix when 4K pages on tail, 64-bit
fix phys_pmd_init to make sure not to return bigger value than end.
also print out range split:1G/2M/4K in init_memory_mapping().
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_64.c | 106 |
1 files changed, 72 insertions, 34 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 51f69b39b752..48548ef7ddf8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -302,11 +302,13 @@ static __meminit void unmap_low_page(void *adr) | |||
302 | early_iounmap(adr, PAGE_SIZE); | 302 | early_iounmap(adr, PAGE_SIZE); |
303 | } | 303 | } |
304 | 304 | ||
305 | static void __meminit | 305 | static unsigned long __meminit |
306 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) | 306 | phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) |
307 | { | 307 | { |
308 | unsigned pages = 0; | 308 | unsigned pages = 0; |
309 | unsigned long last_map_addr = end; | ||
309 | int i; | 310 | int i; |
311 | |||
310 | pte_t *pte = pte_page + pte_index(addr); | 312 | pte_t *pte = pte_page + pte_index(addr); |
311 | 313 | ||
312 | for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { | 314 | for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { |
@@ -326,17 +328,20 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end) | |||
326 | printk(" pte=%p addr=%lx pte=%016lx\n", | 328 | printk(" pte=%p addr=%lx pte=%016lx\n", |
327 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); | 329 | pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); |
328 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL)); | 330 | set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL)); |
331 | last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; | ||
329 | pages++; | 332 | pages++; |
330 | } | 333 | } |
331 | update_page_count(PG_LEVEL_4K, pages); | 334 | update_page_count(PG_LEVEL_4K, pages); |
335 | |||
336 | return last_map_addr; | ||
332 | } | 337 | } |
333 | 338 | ||
334 | static void __meminit | 339 | static unsigned long __meminit |
335 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) | 340 | phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end) |
336 | { | 341 | { |
337 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); | 342 | pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); |
338 | 343 | ||
339 | phys_pte_init(pte, address, end); | 344 | return phys_pte_init(pte, address, end); |
340 | } | 345 | } |
341 | 346 | ||
342 | static unsigned long __meminit | 347 | static unsigned long __meminit |
@@ -344,6 +349,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
344 | unsigned long page_size_mask) | 349 | unsigned long page_size_mask) |
345 | { | 350 | { |
346 | unsigned long pages = 0; | 351 | unsigned long pages = 0; |
352 | unsigned long last_map_addr = end; | ||
347 | 353 | ||
348 | int i = pmd_index(address); | 354 | int i = pmd_index(address); |
349 | 355 | ||
@@ -362,7 +368,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
362 | 368 | ||
363 | if (pmd_val(*pmd)) { | 369 | if (pmd_val(*pmd)) { |
364 | if (!pmd_large(*pmd)) | 370 | if (!pmd_large(*pmd)) |
365 | phys_pte_update(pmd, address, end); | 371 | last_map_addr = phys_pte_update(pmd, address, |
372 | end); | ||
366 | continue; | 373 | continue; |
367 | } | 374 | } |
368 | 375 | ||
@@ -370,17 +377,18 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
370 | pages++; | 377 | pages++; |
371 | set_pte((pte_t *)pmd, | 378 | set_pte((pte_t *)pmd, |
372 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 379 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
380 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | ||
373 | continue; | 381 | continue; |
374 | } | 382 | } |
375 | 383 | ||
376 | pte = alloc_low_page(&pte_phys); | 384 | pte = alloc_low_page(&pte_phys); |
377 | phys_pte_init(pte, address, end); | 385 | last_map_addr = phys_pte_init(pte, address, end); |
378 | unmap_low_page(pte); | 386 | unmap_low_page(pte); |
379 | 387 | ||
380 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 388 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
381 | } | 389 | } |
382 | update_page_count(PG_LEVEL_2M, pages); | 390 | update_page_count(PG_LEVEL_2M, pages); |
383 | return address; | 391 | return last_map_addr; |
384 | } | 392 | } |
385 | 393 | ||
386 | static unsigned long __meminit | 394 | static unsigned long __meminit |
@@ -659,6 +667,32 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
659 | 667 | ||
660 | return last_map_addr; | 668 | return last_map_addr; |
661 | } | 669 | } |
670 | |||
671 | struct map_range { | ||
672 | unsigned long start; | ||
673 | unsigned long end; | ||
674 | unsigned page_size_mask; | ||
675 | }; | ||
676 | |||
677 | #define NR_RANGE_MR 5 | ||
678 | |||
679 | static int save_mr(struct map_range *mr, int nr_range, | ||
680 | unsigned long start_pfn, unsigned long end_pfn, | ||
681 | unsigned long page_size_mask) | ||
682 | { | ||
683 | |||
684 | if (start_pfn < end_pfn) { | ||
685 | if (nr_range >= NR_RANGE_MR) | ||
686 | panic("run out of range for init_memory_mapping\n"); | ||
687 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | ||
688 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; | ||
689 | mr[nr_range].page_size_mask = page_size_mask; | ||
690 | nr_range++; | ||
691 | } | ||
692 | |||
693 | return nr_range; | ||
694 | } | ||
695 | |||
662 | /* | 696 | /* |
663 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 697 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. |
664 | * This runs before bootmem is initialized and gets pages directly from | 698 | * This runs before bootmem is initialized and gets pages directly from |
@@ -667,10 +701,13 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
667 | unsigned long __init_refok init_memory_mapping(unsigned long start, | 701 | unsigned long __init_refok init_memory_mapping(unsigned long start, |
668 | unsigned long end) | 702 | unsigned long end) |
669 | { | 703 | { |
670 | unsigned long last_map_addr = end; | 704 | unsigned long last_map_addr = 0; |
671 | unsigned long page_size_mask = 0; | 705 | unsigned long page_size_mask = 0; |
672 | unsigned long start_pfn, end_pfn; | 706 | unsigned long start_pfn, end_pfn; |
673 | 707 | ||
708 | struct map_range mr[NR_RANGE_MR]; | ||
709 | int nr_range, i; | ||
710 | |||
674 | printk(KERN_INFO "init_memory_mapping\n"); | 711 | printk(KERN_INFO "init_memory_mapping\n"); |
675 | 712 | ||
676 | /* | 713 | /* |
@@ -680,24 +717,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
680 | * memory mapped. Unfortunately this is done currently before the | 717 | * memory mapped. Unfortunately this is done currently before the |
681 | * nodes are discovered. | 718 | * nodes are discovered. |
682 | */ | 719 | */ |
683 | if (!after_bootmem) { | 720 | if (!after_bootmem) |
684 | init_gbpages(); | 721 | init_gbpages(); |
685 | find_early_table_space(end); | ||
686 | } | ||
687 | 722 | ||
688 | if (direct_gbpages) | 723 | if (direct_gbpages) |
689 | page_size_mask |= 1 << PG_LEVEL_1G; | 724 | page_size_mask |= 1 << PG_LEVEL_1G; |
690 | if (cpu_has_pse) | 725 | if (cpu_has_pse) |
691 | page_size_mask |= 1 << PG_LEVEL_2M; | 726 | page_size_mask |= 1 << PG_LEVEL_2M; |
692 | 727 | ||
693 | /* head if not big page aligment ?*/ | 728 | memset(mr, 0, sizeof(mr)); |
729 | nr_range = 0; | ||
730 | |||
731 | /* head if not big page alignment ?*/ | ||
694 | start_pfn = start >> PAGE_SHIFT; | 732 | start_pfn = start >> PAGE_SHIFT; |
695 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) | 733 | end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT) |
696 | << (PMD_SHIFT - PAGE_SHIFT); | 734 | << (PMD_SHIFT - PAGE_SHIFT); |
697 | if (start_pfn < end_pfn) | 735 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
698 | last_map_addr = kernel_physical_mapping_init( | ||
699 | start_pfn<<PAGE_SHIFT, | ||
700 | end_pfn<<PAGE_SHIFT, 0); | ||
701 | 736 | ||
702 | /* big page (2M) range*/ | 737 | /* big page (2M) range*/ |
703 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) | 738 | start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) |
@@ -706,37 +741,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
706 | << (PUD_SHIFT - PAGE_SHIFT); | 741 | << (PUD_SHIFT - PAGE_SHIFT); |
707 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) | 742 | if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT))) |
708 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); | 743 | end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)); |
709 | if (start_pfn < end_pfn) | 744 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
710 | last_map_addr = kernel_physical_mapping_init( | 745 | page_size_mask & (1<<PG_LEVEL_2M)); |
711 | start_pfn<<PAGE_SHIFT, | ||
712 | end_pfn<<PAGE_SHIFT, | ||
713 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
714 | 746 | ||
715 | /* big page (1G) range */ | 747 | /* big page (1G) range */ |
716 | start_pfn = end_pfn; | 748 | start_pfn = end_pfn; |
717 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | 749 | end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); |
718 | if (start_pfn < end_pfn) | 750 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
719 | last_map_addr = kernel_physical_mapping_init( | 751 | page_size_mask & |
720 | start_pfn<<PAGE_SHIFT, | 752 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
721 | end_pfn<<PAGE_SHIFT, | ||
722 | page_size_mask & ((1<<PG_LEVEL_2M) | ||
723 | | (1<<PG_LEVEL_1G))); | ||
724 | 753 | ||
725 | /* tail is not big page (1G) alignment */ | 754 | /* tail is not big page (1G) alignment */ |
726 | start_pfn = end_pfn; | 755 | start_pfn = end_pfn; |
727 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 756 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
728 | if (start_pfn < end_pfn) | 757 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
729 | last_map_addr = kernel_physical_mapping_init( | 758 | page_size_mask & (1<<PG_LEVEL_2M)); |
730 | start_pfn<<PAGE_SHIFT, | 759 | |
731 | end_pfn<<PAGE_SHIFT, | ||
732 | page_size_mask & (1<<PG_LEVEL_2M)); | ||
733 | /* tail is not big page (2M) alignment */ | 760 | /* tail is not big page (2M) alignment */ |
734 | start_pfn = end_pfn; | 761 | start_pfn = end_pfn; |
735 | end_pfn = end>>PAGE_SHIFT; | 762 | end_pfn = end>>PAGE_SHIFT; |
736 | if (start_pfn < end_pfn) | 763 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
764 | |||
765 | for (i = 0; i < nr_range; i++) | ||
766 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", | ||
767 | mr[i].start, mr[i].end, | ||
768 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | ||
769 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | ||
770 | |||
771 | if (!after_bootmem) | ||
772 | find_early_table_space(end); | ||
773 | |||
774 | for (i = 0; i < nr_range; i++) | ||
737 | last_map_addr = kernel_physical_mapping_init( | 775 | last_map_addr = kernel_physical_mapping_init( |
738 | start_pfn<<PAGE_SHIFT, | 776 | mr[i].start, mr[i].end, |
739 | end_pfn<<PAGE_SHIFT, 0); | 777 | mr[i].page_size_mask); |
740 | 778 | ||
741 | if (!after_bootmem) | 779 | if (!after_bootmem) |
742 | mmu_cr4_features = read_cr4(); | 780 | mmu_cr4_features = read_cr4(); |