aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-03-05 07:54:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 08:17:10 -0500
commitc3f5d2d8b5fa6eb0cc1c47fd162bf6432f206f42 (patch)
tree34f71a17caea552a7c6e8bf5dd48133bcddca063 /arch/x86/mm/init_64.c
parentfc5efe3941c47c0278fe1bbcf8cc02a03a74fcda (diff)
x86: init_memory_mapping() trivial cleanups
Impact: cleanup To reduce the diff between the 32-bit and 64-bit versions of init_memory_mapping(), fix up all trivial issues. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <1236257708-27269-1-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d325186dd32b..cdb3be1c41f1 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -554,20 +554,25 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
554 554
555 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 555 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
556 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 556 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
557
557 if (use_gbpages) { 558 if (use_gbpages) {
558 unsigned long extra; 559 unsigned long extra;
560
559 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 561 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
560 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 562 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
561 } else 563 } else
562 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 564 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
565
563 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 566 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
564 567
565 if (use_pse) { 568 if (use_pse) {
566 unsigned long extra; 569 unsigned long extra;
570
567 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); 571 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
568 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 572 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
569 } else 573 } else
570 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 574 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
575
571 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); 576 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
572 577
573 /* 578 /*
@@ -647,7 +652,6 @@ static int save_mr(struct map_range *mr, int nr_range,
647 unsigned long start_pfn, unsigned long end_pfn, 652 unsigned long start_pfn, unsigned long end_pfn,
648 unsigned long page_size_mask) 653 unsigned long page_size_mask)
649{ 654{
650
651 if (start_pfn < end_pfn) { 655 if (start_pfn < end_pfn) {
652 if (nr_range >= NR_RANGE_MR) 656 if (nr_range >= NR_RANGE_MR)
653 panic("run out of range for init_memory_mapping\n"); 657 panic("run out of range for init_memory_mapping\n");
@@ -679,13 +683,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
679 683
680 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 684 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
681 685
682 /*
683 * Find space for the kernel direct mapping tables.
684 *
685 * Later we should allocate these tables in the local node of the
686 * memory mapped. Unfortunately this is done currently before the
687 * nodes are discovered.
688 */
689 if (!after_bootmem) 686 if (!after_bootmem)
690 init_gbpages(); 687 init_gbpages();
691 688
@@ -709,7 +706,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
709 memset(mr, 0, sizeof(mr)); 706 memset(mr, 0, sizeof(mr));
710 nr_range = 0; 707 nr_range = 0;
711 708
712 /* head if not big page alignment ?*/ 709 /* head if not big page alignment ? */
713 start_pfn = start >> PAGE_SHIFT; 710 start_pfn = start >> PAGE_SHIFT;
714 pos = start_pfn << PAGE_SHIFT; 711 pos = start_pfn << PAGE_SHIFT;
715 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 712 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
@@ -721,7 +718,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
721 pos = end_pfn << PAGE_SHIFT; 718 pos = end_pfn << PAGE_SHIFT;
722 } 719 }
723 720
724 /* big page (2M) range*/ 721 /* big page (2M) range */
725 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 722 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
726 << (PMD_SHIFT - PAGE_SHIFT); 723 << (PMD_SHIFT - PAGE_SHIFT);
727 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) 724 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
@@ -769,7 +766,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
769 /* move it */ 766 /* move it */
770 old_start = mr[i].start; 767 old_start = mr[i].start;
771 memmove(&mr[i], &mr[i+1], 768 memmove(&mr[i], &mr[i+1],
772 (nr_range - 1 - i) * sizeof (struct map_range)); 769 (nr_range - 1 - i) * sizeof(struct map_range));
773 mr[i--].start = old_start; 770 mr[i--].start = old_start;
774 nr_range--; 771 nr_range--;
775 } 772 }
@@ -780,6 +777,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
780 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 777 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
781 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 778 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
782 779
780 /*
781 * Find space for the kernel direct mapping tables.
782 *
783 * Later we should allocate these tables in the local node of the
784 * memory mapped. Unfortunately this is done currently before the
785 * nodes are discovered.
786 */
783 if (!after_bootmem) 787 if (!after_bootmem)
784 find_early_table_space(end, use_pse, use_gbpages); 788 find_early_table_space(end, use_pse, use_gbpages);
785 789