aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-03-05 07:54:53 -0500
committerIngo Molnar <mingo@elte.hu>2009-03-05 08:17:11 -0500
commit4bbd4fa03832208f0e6e0b9e73a0ffa2620a626a (patch)
treec98e50e0f07b6406a49e7e068612b6f07c0d2bb0 /arch/x86
parentc3f5d2d8b5fa6eb0cc1c47fd162bf6432f206f42 (diff)
x86: add gbpages support to 32-bit init_memory_mapping()
Impact: cleanup To reduce the diff between the 32-bit and 64-bit versions of init_memory_mapping(), add gbpages support to the 32-bit version. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <1236257708-27269-2-git-send-email-penberg@cs.helsinki.fi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_32.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ad4e03c2d4df..5fad0f95d5a3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -65,6 +65,8 @@ static unsigned long __meminitdata table_top;
65 65
66static int __initdata after_init_bootmem; 66static int __initdata after_init_bootmem;
67 67
68int direct_gbpages;
69
68static __init void *alloc_low_page(void) 70static __init void *alloc_low_page(void)
69{ 71{
70 unsigned long pfn = table_end++; 72 unsigned long pfn = table_end++;
@@ -831,14 +833,22 @@ void __init setup_bootmem_allocator(void)
831 after_init_bootmem = 1; 833 after_init_bootmem = 1;
832} 834}
833 835
834static void __init find_early_table_space(unsigned long end, int use_pse) 836static void __init find_early_table_space(unsigned long end, int use_pse,
837 int use_gbpages)
835{ 838{
836 unsigned long puds, pmds, ptes, tables, start; 839 unsigned long puds, pmds, ptes, tables, start;
837 840
838 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 841 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
839 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); 842 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
840 843
841 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 844 if (use_gbpages) {
845 unsigned long extra;
846
847 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
848 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
849 } else
850 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
851
842 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); 852 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
843 853
844 if (use_pse) { 854 if (use_pse) {
@@ -913,7 +923,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
913 923
914 struct map_range mr[NR_RANGE_MR]; 924 struct map_range mr[NR_RANGE_MR];
915 int nr_range, i; 925 int nr_range, i;
916 int use_pse; 926 int use_pse, use_gbpages;
917 927
918 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 928 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
919 929
@@ -923,9 +933,10 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
923 * This will simplify cpa(), which otherwise needs to support splitting 933 * This will simplify cpa(), which otherwise needs to support splitting
924 * large pages into small in interrupt context, etc. 934 * large pages into small in interrupt context, etc.
925 */ 935 */
926 use_pse = 0; 936 use_pse = use_gbpages = 0;
927#else 937#else
928 use_pse = cpu_has_pse; 938 use_pse = cpu_has_pse;
939 use_gbpages = direct_gbpages;
929#endif 940#endif
930 941
931#ifdef CONFIG_X86_PAE 942#ifdef CONFIG_X86_PAE
@@ -944,6 +955,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
944 __supported_pte_mask |= _PAGE_GLOBAL; 955 __supported_pte_mask |= _PAGE_GLOBAL;
945 } 956 }
946 957
958 if (use_gbpages)
959 page_size_mask |= 1 << PG_LEVEL_1G;
947 if (use_pse) 960 if (use_pse)
948 page_size_mask |= 1 << PG_LEVEL_2M; 961 page_size_mask |= 1 << PG_LEVEL_2M;
949 962
@@ -1015,7 +1028,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
1015 * nodes are discovered. 1028 * nodes are discovered.
1016 */ 1029 */
1017 if (!after_init_bootmem) 1030 if (!after_init_bootmem)
1018 find_early_table_space(end, use_pse); 1031 find_early_table_space(end, use_pse, use_gbpages);
1019 1032
1020 for (i = 0; i < nr_range; i++) 1033 for (i = 0; i < nr_range; i++)
1021 kernel_physical_mapping_init(pgd_base, 1034 kernel_physical_mapping_init(pgd_base,