diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-03-05 07:54:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-05 08:17:10 -0500 |
commit | c3f5d2d8b5fa6eb0cc1c47fd162bf6432f206f42 (patch) | |
tree | 34f71a17caea552a7c6e8bf5dd48133bcddca063 /arch | |
parent | fc5efe3941c47c0278fe1bbcf8cc02a03a74fcda (diff) |
x86: init_memory_mapping() trivial cleanups
Impact: cleanup
To reduce the diff between the 32-bit and 64-bit versions of
init_memory_mapping(), fix up all trivial issues.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1236257708-27269-1-git-send-email-penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/init_32.c | 42 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 26 |
2 files changed, 40 insertions, 28 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c351456d06dc..ad4e03c2d4df 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -868,11 +868,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
868 | 868 | ||
869 | table_start >>= PAGE_SHIFT; | 869 | table_start >>= PAGE_SHIFT; |
870 | table_end = table_start; | 870 | table_end = table_start; |
871 | table_top = table_start + (tables>>PAGE_SHIFT); | 871 | table_top = table_start + (tables >> PAGE_SHIFT); |
872 | 872 | ||
873 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", | 873 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", |
874 | end, table_start << PAGE_SHIFT, | 874 | end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT); |
875 | (table_start << PAGE_SHIFT) + tables); | ||
876 | } | 875 | } |
877 | 876 | ||
878 | struct map_range { | 877 | struct map_range { |
@@ -899,8 +898,13 @@ static int save_mr(struct map_range *mr, int nr_range, | |||
899 | return nr_range; | 898 | return nr_range; |
900 | } | 899 | } |
901 | 900 | ||
901 | /* | ||
902 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | ||
903 | * This runs before bootmem is initialized and gets pages directly from | ||
904 | * the physical memory. To access them they are temporarily mapped. | ||
905 | */ | ||
902 | unsigned long __init_refok init_memory_mapping(unsigned long start, | 906 | unsigned long __init_refok init_memory_mapping(unsigned long start, |
903 | unsigned long end) | 907 | unsigned long end) |
904 | { | 908 | { |
905 | pgd_t *pgd_base = swapper_pg_dir; | 909 | pgd_t *pgd_base = swapper_pg_dir; |
906 | unsigned long page_size_mask = 0; | 910 | unsigned long page_size_mask = 0; |
@@ -911,7 +915,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
911 | int nr_range, i; | 915 | int nr_range, i; |
912 | int use_pse; | 916 | int use_pse; |
913 | 917 | ||
914 | printk(KERN_INFO "init_memory_mapping: %08lx-%08lx\n", start, end); | 918 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
915 | 919 | ||
916 | #ifdef CONFIG_DEBUG_PAGEALLOC | 920 | #ifdef CONFIG_DEBUG_PAGEALLOC |
917 | /* | 921 | /* |
@@ -940,19 +944,19 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
940 | __supported_pte_mask |= _PAGE_GLOBAL; | 944 | __supported_pte_mask |= _PAGE_GLOBAL; |
941 | } | 945 | } |
942 | 946 | ||
943 | memset(mr, 0, sizeof(mr)); | ||
944 | nr_range = 0; | ||
945 | |||
946 | if (use_pse) | 947 | if (use_pse) |
947 | page_size_mask |= 1 << PG_LEVEL_2M; | 948 | page_size_mask |= 1 << PG_LEVEL_2M; |
948 | 949 | ||
950 | memset(mr, 0, sizeof(mr)); | ||
951 | nr_range = 0; | ||
952 | |||
949 | /* | 953 | /* |
950 | * Don't use a large page for the first 2/4MB of memory | 954 | * Don't use a large page for the first 2/4MB of memory |
951 | * because there are often fixed size MTRRs in there | 955 | * because there are often fixed size MTRRs in there |
952 | * and overlapping MTRRs into large pages can cause | 956 | * and overlapping MTRRs into large pages can cause |
953 | * slowdowns. | 957 | * slowdowns. |
954 | */ | 958 | */ |
955 | /* head could not be big page alignment ? */ | 959 | /* head if not big page alignment ? */ |
956 | start_pfn = start >> PAGE_SHIFT; | 960 | start_pfn = start >> PAGE_SHIFT; |
957 | pos = start_pfn << PAGE_SHIFT; | 961 | pos = start_pfn << PAGE_SHIFT; |
958 | if (pos == 0) | 962 | if (pos == 0) |
@@ -960,14 +964,14 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
960 | else | 964 | else |
961 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 965 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
962 | << (PMD_SHIFT - PAGE_SHIFT); | 966 | << (PMD_SHIFT - PAGE_SHIFT); |
963 | if (end_pfn > (end>>PAGE_SHIFT)) | 967 | if (end_pfn > (end >> PAGE_SHIFT)) |
964 | end_pfn = end>>PAGE_SHIFT; | 968 | end_pfn = end >> PAGE_SHIFT; |
965 | if (start_pfn < end_pfn) { | 969 | if (start_pfn < end_pfn) { |
966 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 970 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
967 | pos = end_pfn << PAGE_SHIFT; | 971 | pos = end_pfn << PAGE_SHIFT; |
968 | } | 972 | } |
969 | 973 | ||
970 | /* big page range */ | 974 | /* big page (2M) range */ |
971 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 975 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
972 | << (PMD_SHIFT - PAGE_SHIFT); | 976 | << (PMD_SHIFT - PAGE_SHIFT); |
973 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 977 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
@@ -977,7 +981,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
977 | pos = end_pfn << PAGE_SHIFT; | 981 | pos = end_pfn << PAGE_SHIFT; |
978 | } | 982 | } |
979 | 983 | ||
980 | /* tail is not big page alignment ? */ | 984 | /* tail is not big page (2M) alignment */ |
981 | start_pfn = pos>>PAGE_SHIFT; | 985 | start_pfn = pos>>PAGE_SHIFT; |
982 | end_pfn = end>>PAGE_SHIFT; | 986 | end_pfn = end>>PAGE_SHIFT; |
983 | if (start_pfn < end_pfn) | 987 | if (start_pfn < end_pfn) |
@@ -998,13 +1002,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
998 | } | 1002 | } |
999 | 1003 | ||
1000 | for (i = 0; i < nr_range; i++) | 1004 | for (i = 0; i < nr_range; i++) |
1001 | printk(KERN_DEBUG " %08lx - %08lx page %s\n", | 1005 | printk(KERN_DEBUG " %010lx - %010lx page %s\n", |
1002 | mr[i].start, mr[i].end, | 1006 | mr[i].start, mr[i].end, |
1003 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M)) ? | 1007 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( |
1004 | "big page" : "4k"); | 1008 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); |
1005 | 1009 | ||
1006 | /* | 1010 | /* |
1007 | * Find space for the kernel direct mapping tables. | 1011 | * Find space for the kernel direct mapping tables. |
1012 | * | ||
1013 | * Later we should allocate these tables in the local node of the | ||
1014 | * memory mapped. Unfortunately this is done currently before the | ||
1015 | * nodes are discovered. | ||
1008 | */ | 1016 | */ |
1009 | if (!after_init_bootmem) | 1017 | if (!after_init_bootmem) |
1010 | find_early_table_space(end, use_pse); | 1018 | find_early_table_space(end, use_pse); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d325186dd32b..cdb3be1c41f1 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -554,20 +554,25 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
554 | 554 | ||
555 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 555 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
556 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); | 556 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
557 | |||
557 | if (use_gbpages) { | 558 | if (use_gbpages) { |
558 | unsigned long extra; | 559 | unsigned long extra; |
560 | |||
559 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 561 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
560 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 562 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
561 | } else | 563 | } else |
562 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 564 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
565 | |||
563 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); | 566 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
564 | 567 | ||
565 | if (use_pse) { | 568 | if (use_pse) { |
566 | unsigned long extra; | 569 | unsigned long extra; |
570 | |||
567 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | 571 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); |
568 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 572 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
569 | } else | 573 | } else |
570 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 574 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
575 | |||
571 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); | 576 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
572 | 577 | ||
573 | /* | 578 | /* |
@@ -647,7 +652,6 @@ static int save_mr(struct map_range *mr, int nr_range, | |||
647 | unsigned long start_pfn, unsigned long end_pfn, | 652 | unsigned long start_pfn, unsigned long end_pfn, |
648 | unsigned long page_size_mask) | 653 | unsigned long page_size_mask) |
649 | { | 654 | { |
650 | |||
651 | if (start_pfn < end_pfn) { | 655 | if (start_pfn < end_pfn) { |
652 | if (nr_range >= NR_RANGE_MR) | 656 | if (nr_range >= NR_RANGE_MR) |
653 | panic("run out of range for init_memory_mapping\n"); | 657 | panic("run out of range for init_memory_mapping\n"); |
@@ -679,13 +683,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
679 | 683 | ||
680 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); | 684 | printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); |
681 | 685 | ||
682 | /* | ||
683 | * Find space for the kernel direct mapping tables. | ||
684 | * | ||
685 | * Later we should allocate these tables in the local node of the | ||
686 | * memory mapped. Unfortunately this is done currently before the | ||
687 | * nodes are discovered. | ||
688 | */ | ||
689 | if (!after_bootmem) | 686 | if (!after_bootmem) |
690 | init_gbpages(); | 687 | init_gbpages(); |
691 | 688 | ||
@@ -709,7 +706,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
709 | memset(mr, 0, sizeof(mr)); | 706 | memset(mr, 0, sizeof(mr)); |
710 | nr_range = 0; | 707 | nr_range = 0; |
711 | 708 | ||
712 | /* head if not big page alignment ?*/ | 709 | /* head if not big page alignment ? */ |
713 | start_pfn = start >> PAGE_SHIFT; | 710 | start_pfn = start >> PAGE_SHIFT; |
714 | pos = start_pfn << PAGE_SHIFT; | 711 | pos = start_pfn << PAGE_SHIFT; |
715 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | 712 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) |
@@ -721,7 +718,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
721 | pos = end_pfn << PAGE_SHIFT; | 718 | pos = end_pfn << PAGE_SHIFT; |
722 | } | 719 | } |
723 | 720 | ||
724 | /* big page (2M) range*/ | 721 | /* big page (2M) range */ |
725 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 722 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
726 | << (PMD_SHIFT - PAGE_SHIFT); | 723 | << (PMD_SHIFT - PAGE_SHIFT); |
727 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 724 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) |
@@ -769,7 +766,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
769 | /* move it */ | 766 | /* move it */ |
770 | old_start = mr[i].start; | 767 | old_start = mr[i].start; |
771 | memmove(&mr[i], &mr[i+1], | 768 | memmove(&mr[i], &mr[i+1], |
772 | (nr_range - 1 - i) * sizeof (struct map_range)); | 769 | (nr_range - 1 - i) * sizeof(struct map_range)); |
773 | mr[i--].start = old_start; | 770 | mr[i--].start = old_start; |
774 | nr_range--; | 771 | nr_range--; |
775 | } | 772 | } |
@@ -780,6 +777,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
780 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | 777 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( |
781 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | 778 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); |
782 | 779 | ||
780 | /* | ||
781 | * Find space for the kernel direct mapping tables. | ||
782 | * | ||
783 | * Later we should allocate these tables in the local node of the | ||
784 | * memory mapped. Unfortunately this is done currently before the | ||
785 | * nodes are discovered. | ||
786 | */ | ||
783 | if (!after_bootmem) | 787 | if (!after_bootmem) |
784 | find_early_table_space(end, use_pse, use_gbpages); | 788 | find_early_table_space(end, use_pse, use_gbpages); |
785 | 789 | ||