aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-03-05 07:55:02 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 08:17:15 -0500
commitc338d6f60fc29dfc74bd82b91526ef43ba992bab (patch)
tree7f47351226b03af0b6b4290a62828c0aa55d11bf
parent01ced9ec14ad1b4f8a533c2f2b5a4fe4c92c1099 (diff)
x86: ifdef 32-bit and 64-bit pfn setup in init_memory_mapping()
Impact: cleanup To reduce the diff between the 32-bit and 64-bit versions of init_memory_mapping(), ifdef configuration specific pfn setup code in the function. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <1236257708-27269-11-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/init_32.c42
-rw-r--r--arch/x86/mm/init_64.c21
2 files changed, 60 insertions, 3 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 3f91bdc20971..34760e483972 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -975,20 +975,25 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
975 memset(mr, 0, sizeof(mr)); 975 memset(mr, 0, sizeof(mr));
976 nr_range = 0; 976 nr_range = 0;
977 977
978 /* head if not big page alignment ? */
979 start_pfn = start >> PAGE_SHIFT;
980 pos = start_pfn << PAGE_SHIFT;
981#ifdef CONFIG_X86_32
978 /* 982 /*
979 * Don't use a large page for the first 2/4MB of memory 983 * Don't use a large page for the first 2/4MB of memory
980 * because there are often fixed size MTRRs in there 984 * because there are often fixed size MTRRs in there
981 * and overlapping MTRRs into large pages can cause 985 * and overlapping MTRRs into large pages can cause
982 * slowdowns. 986 * slowdowns.
983 */ 987 */
984 /* head if not big page alignment ? */
985 start_pfn = start >> PAGE_SHIFT;
986 pos = start_pfn << PAGE_SHIFT;
987 if (pos == 0) 988 if (pos == 0)
988 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); 989 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
989 else 990 else
990 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 991 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
991 << (PMD_SHIFT - PAGE_SHIFT); 992 << (PMD_SHIFT - PAGE_SHIFT);
993#else /* CONFIG_X86_64 */
994 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
995 << (PMD_SHIFT - PAGE_SHIFT);
996#endif
992 if (end_pfn > (end >> PAGE_SHIFT)) 997 if (end_pfn > (end >> PAGE_SHIFT))
993 end_pfn = end >> PAGE_SHIFT; 998 end_pfn = end >> PAGE_SHIFT;
994 if (start_pfn < end_pfn) { 999 if (start_pfn < end_pfn) {
@@ -999,12 +1004,43 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
999 /* big page (2M) range */ 1004 /* big page (2M) range */
1000 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 1005 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
1001 << (PMD_SHIFT - PAGE_SHIFT); 1006 << (PMD_SHIFT - PAGE_SHIFT);
1007#ifdef CONFIG_X86_32
1002 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); 1008 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
1009#else /* CONFIG_X86_64 */
1010 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
1011 << (PUD_SHIFT - PAGE_SHIFT);
1012 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
1013 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
1014#endif
1015
1016 if (start_pfn < end_pfn) {
1017 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1018 page_size_mask & (1<<PG_LEVEL_2M));
1019 pos = end_pfn << PAGE_SHIFT;
1020 }
1021
1022#ifdef CONFIG_X86_64
1023 /* big page (1G) range */
1024 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
1025 << (PUD_SHIFT - PAGE_SHIFT);
1026 end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
1027 if (start_pfn < end_pfn) {
1028 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1029 page_size_mask &
1030 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
1031 pos = end_pfn << PAGE_SHIFT;
1032 }
1033
1034 /* tail is not big page (1G) alignment */
1035 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
1036 << (PMD_SHIFT - PAGE_SHIFT);
1037 end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
1003 if (start_pfn < end_pfn) { 1038 if (start_pfn < end_pfn) {
1004 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 1039 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
1005 page_size_mask & (1<<PG_LEVEL_2M)); 1040 page_size_mask & (1<<PG_LEVEL_2M));
1006 pos = end_pfn << PAGE_SHIFT; 1041 pos = end_pfn << PAGE_SHIFT;
1007 } 1042 }
1043#endif
1008 1044
1009 /* tail is not big page (2M) alignment */ 1045 /* tail is not big page (2M) alignment */
1010 start_pfn = pos>>PAGE_SHIFT; 1046 start_pfn = pos>>PAGE_SHIFT;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5ecb23a57d2f..d99bc6ac4884 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -741,8 +741,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
741 /* head if not big page alignment ? */ 741 /* head if not big page alignment ? */
742 start_pfn = start >> PAGE_SHIFT; 742 start_pfn = start >> PAGE_SHIFT;
743 pos = start_pfn << PAGE_SHIFT; 743 pos = start_pfn << PAGE_SHIFT;
744#ifdef CONFIG_X86_32
745 /*
746 * Don't use a large page for the first 2/4MB of memory
747 * because there are often fixed size MTRRs in there
748 * and overlapping MTRRs into large pages can cause
749 * slowdowns.
750 */
751 if (pos == 0)
752 end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
753 else
754 end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
755 << (PMD_SHIFT - PAGE_SHIFT);
756#else /* CONFIG_X86_64 */
744 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 757 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
745 << (PMD_SHIFT - PAGE_SHIFT); 758 << (PMD_SHIFT - PAGE_SHIFT);
759#endif
746 if (end_pfn > (end >> PAGE_SHIFT)) 760 if (end_pfn > (end >> PAGE_SHIFT))
747 end_pfn = end >> PAGE_SHIFT; 761 end_pfn = end >> PAGE_SHIFT;
748 if (start_pfn < end_pfn) { 762 if (start_pfn < end_pfn) {
@@ -753,16 +767,22 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
753 /* big page (2M) range */ 767 /* big page (2M) range */
754 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) 768 start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
755 << (PMD_SHIFT - PAGE_SHIFT); 769 << (PMD_SHIFT - PAGE_SHIFT);
770#ifdef CONFIG_X86_32
771 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
772#else /* CONFIG_X86_64 */
756 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) 773 end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
757 << (PUD_SHIFT - PAGE_SHIFT); 774 << (PUD_SHIFT - PAGE_SHIFT);
758 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) 775 if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
759 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); 776 end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
777#endif
778
760 if (start_pfn < end_pfn) { 779 if (start_pfn < end_pfn) {
761 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 780 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
762 page_size_mask & (1<<PG_LEVEL_2M)); 781 page_size_mask & (1<<PG_LEVEL_2M));
763 pos = end_pfn << PAGE_SHIFT; 782 pos = end_pfn << PAGE_SHIFT;
764 } 783 }
765 784
785#ifdef CONFIG_X86_64
766 /* big page (1G) range */ 786 /* big page (1G) range */
767 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) 787 start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
768 << (PUD_SHIFT - PAGE_SHIFT); 788 << (PUD_SHIFT - PAGE_SHIFT);
@@ -783,6 +803,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
783 page_size_mask & (1<<PG_LEVEL_2M)); 803 page_size_mask & (1<<PG_LEVEL_2M));
784 pos = end_pfn << PAGE_SHIFT; 804 pos = end_pfn << PAGE_SHIFT;
785 } 805 }
806#endif
786 807
787 /* tail is not big page (2M) alignment */ 808 /* tail is not big page (2M) alignment */
788 start_pfn = pos>>PAGE_SHIFT; 809 start_pfn = pos>>PAGE_SHIFT;