aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-05 02:25:01 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-05 03:23:12 -0500
commite61980a70245715ab39cbee2b9d6e6afc1ec37d4 (patch)
treea893a3d3ceee14cdd5e6a0e644f8942686e41550
parent10971ab269bbf22120edac95fcfa3c873a549bea (diff)
x86/mm: Simplify probe_page_size_mask()
Now that we've simplified the gbpages config space, move the 'page_size_mask' initialization into probe_page_size_mask(), right next to the PSE and PGE enablement lines. Cc: Luis R. Rodriguez <mcgrof@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Dexuan Cui <decui@microsoft.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: JBeulich@suse.com Cc: Jan Beulich <JBeulich@suse.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Pavel Machek <pavel@ucw.cz> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Lindgren <tony@atomide.com> Cc: Toshi Kani <toshi.kani@hp.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: julia.lawall@lip6.fr Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/init.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8704153f2675..6dc85d51cd98 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -131,29 +131,18 @@ void __init early_alloc_pgt_buf(void)
131 131
132int after_bootmem; 132int after_bootmem;
133 133
134static int page_size_mask;
135
136early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); 134early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
137 135
138static void __init init_gbpages(void)
139{
140 if (direct_gbpages && cpu_has_gbpages) {
141 printk(KERN_INFO "Using GB pages for direct mapping\n");
142 page_size_mask |= 1 << PG_LEVEL_1G;
143 } else
144 direct_gbpages = 0;
145}
146
147struct map_range { 136struct map_range {
148 unsigned long start; 137 unsigned long start;
149 unsigned long end; 138 unsigned long end;
150 unsigned page_size_mask; 139 unsigned page_size_mask;
151}; 140};
152 141
142static int page_size_mask;
143
153static void __init probe_page_size_mask(void) 144static void __init probe_page_size_mask(void)
154{ 145{
155 init_gbpages();
156
157#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) 146#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
158 /* 147 /*
159 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. 148 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
@@ -173,6 +162,14 @@ static void __init probe_page_size_mask(void)
173 cr4_set_bits_and_update_boot(X86_CR4_PGE); 162 cr4_set_bits_and_update_boot(X86_CR4_PGE);
174 __supported_pte_mask |= _PAGE_GLOBAL; 163 __supported_pte_mask |= _PAGE_GLOBAL;
175 } 164 }
165
166 /* Enable 1 GB linear kernel mappings if available: */
167 if (direct_gbpages && cpu_has_gbpages) {
168 printk(KERN_INFO "Using GB pages for direct mapping\n");
169 page_size_mask |= 1 << PG_LEVEL_1G;
170 } else {
171 direct_gbpages = 0;
172 }
176} 173}
177 174
178#ifdef CONFIG_X86_32 175#ifdef CONFIG_X86_32